repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
Nstats/cs_capsule | [
"e45a8518a41117d4b5f105bcc2c96a3d621e40ea"
] | [
"examples/evaluate.py"
] | [
"#*#*#*./examples/evaluate.py\n\"\"\"Official evaluation script for SQuAD version 2.0.\n\nIn addition to basic functionality, we also compute additional statistics and\nplot precision-recall curves if an additional na_prob.json file is provided.\nThis file is expected to map question ID's to the model's predicted probability\nthat a question is unanswerable.\n\"\"\"\nimport argparse\nimport collections\nimport json\nimport numpy as np\nimport os\nimport re\nimport string\nimport sys\n\nOPTS = None\ndef parse_args():\n parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')\n parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')\n parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')\n parser.add_argument('--out-file', '-o', metavar='eval.json',\n help='Write accuracy metrics to file (default is stdout).')\n parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',\n help='Model estimates of probability of no answer.')\n parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,\n help='Predict \"\" if no-answer probability exceeds this (default = 1.0).')\n parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,\n help='Save precision-recall curves to directory.')\n parser.add_argument('--verbose', '-v', action='store_true')\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()\n\n\ndef make_qid_to_has_ans(dataset):\n qid_to_has_ans = {}\n for article in dataset:\n for p in article['paragraphs']:\n for qa in p['qas']:\n qid_to_has_ans[qa['id']] = bool(qa['answers'])\n return qid_to_has_ans\n\ndef normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n def white_space_fix(text):\n return ' '.join(text.split())\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n def lower(text):\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\ndef get_tokens(s):\n if not s: return []\n return normalize_answer(s).split()\n\ndef compute_exact(a_gold, a_pred):\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\n\ndef compute_f1(a_gold, a_pred):\n gold_toks = get_tokens(a_gold) #答案list\n pred_toks = get_tokens(a_pred)\n common = collections.Counter(gold_toks) & collections.Counter(pred_toks)###c.Counter([1,2,33])& c.Counter([2,3,4]) res: Counter({2: 1})\n num_same = sum(common.values()) ###所有出现的相同词的总个数\n if len(gold_toks) == 0 or len(pred_toks) == 0: ##无答案问题直接对比\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n return int(gold_toks == pred_toks)\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_toks) #准确率\n recall = 1.0 * num_same / len(gold_toks) #召回率\n f1 = (2 * precision * recall) / (precision + recall) #f1\n return f1\n\ndef get_raw_scores(dataset, preds):\n exact_scores = {}\n f1_scores = {}\n for article in dataset:\n for p in article['paragraphs']:\n for qa in p['qas']:\n qid = qa['id']\n gold_answers = [a['text'] for a in qa['answers']\n if normalize_answer(a['text'])]\n if not gold_answers:\n # For unanswerable questions, only correct answer is empty string\n gold_answers = ['']\n if qid not in preds:\n print('Missing prediction for %s' % qid)\n continue\n a_pred = preds[qid]\n # Take max over all gold answers\n exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)\n f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)\n return exact_scores, f1_scores\n\ndef apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):\n new_scores = {}\n for qid, s in scores.items():\n pred_na = na_probs[qid] > na_prob_thresh ##有答案变成了无答案的情况\n if pred_na:\n new_scores[qid] = float(not qid_to_has_ans[qid])\n else:\n new_scores[qid] = s\n return new_scores\n\ndef make_eval_dict(exact_scores, f1_scores, qid_list=None):\n if not qid_list:\n total = len(exact_scores)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores.values()) / total),\n ('f1', 100.0 * sum(f1_scores.values()) / total),\n ('total', total),\n ])\n else:\n total = len(qid_list)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),\n ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),\n ('total', total),\n ])\n\ndef merge_eval(main_eval, new_eval, prefix):\n for k in new_eval:\n main_eval['%s_%s' % (prefix, k)] = new_eval[k]\n\ndef plot_pr_curve(precisions, recalls, out_image, title):\n plt.step(recalls, precisions, color='b', alpha=0.2, where='post')\n plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.xlim([0.0, 1.05])\n plt.ylim([0.0, 1.05])\n plt.title(title)\n plt.savefig(out_image)\n plt.clf()\n\ndef make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,\n out_image=None, title=None):\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\n true_pos = 0.0\n cur_p = 1.0\n cur_r = 0.0\n precisions = [1.0]\n recalls = [0.0]\n avg_prec = 0.0\n for i, qid in enumerate(qid_list):\n if qid_to_has_ans[qid]:\n true_pos += scores[qid]\n cur_p = true_pos / float(i+1)\n cur_r = true_pos / float(num_true_pos)\n if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:\n # i.e., if we can put a threshold after this point\n avg_prec += cur_p * (cur_r - recalls[-1])\n precisions.append(cur_p)\n recalls.append(cur_r)\n if out_image:\n plot_pr_curve(precisions, recalls, out_image, title)\n return {'ap': 100.0 * avg_prec}\n\ndef run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs,\n qid_to_has_ans, out_image_dir):\n if out_image_dir and not os.path.exists(out_image_dir):\n os.makedirs(out_image_dir)\n num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)\n if num_true_pos == 0:\n return\n pr_exact = make_precision_recall_eval(\n exact_raw, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_exact.png'),\n title='Precision-Recall curve for Exact Match score')\n pr_f1 = make_precision_recall_eval(\n f1_raw, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_f1.png'),\n title='Precision-Recall curve for F1 score')\n oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}\n pr_oracle = make_precision_recall_eval(\n oracle_scores, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_oracle.png'),\n title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')\n merge_eval(main_eval, pr_exact, 'pr_exact')\n merge_eval(main_eval, pr_f1, 'pr_f1')\n merge_eval(main_eval, pr_oracle, 'pr_oracle')\n\ndef histogram_na_prob(na_probs, qid_list, image_dir, name):\n if not qid_list:\n return\n x = [na_probs[k] for k in qid_list]\n weights = np.ones_like(x) / float(len(x))\n plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))\n plt.xlabel('Model probability of no-answer')\n plt.ylabel('Proportion of dataset')\n plt.title('Histogram of no-answer probability: %s' % name)\n plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))\n plt.clf()\n\ndef find_best_thresh(preds, scores, na_probs, qid_to_has_ans):\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\n cur_score = num_no_ans\n best_score = cur_score\n best_thresh = 0.0\n qid_list = sorted(na_probs, key=lambda k: na_probs[k]) #从小到大按照diff排序\n for i, qid in enumerate(qid_list):\n if qid not in scores: continue\n if qid_to_has_ans[qid]:\n diff = scores[qid]\n else:\n if preds[qid]:\n diff = -1\n else:\n diff = 0\n cur_score += diff\n if cur_score > best_score:\n best_score = cur_score\n best_thresh = na_probs[qid]\n return 100.0 * best_score / len(scores), best_thresh\n\ndef find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\n best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)\n best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)\n main_eval['best_exact'] = best_exact\n main_eval['best_exact_thresh'] = exact_thresh\n main_eval['best_f1'] = best_f1\n main_eval['best_f1_thresh'] = f1_thresh\n\ndef main():\n with open(OPTS.data_file) as f:\n dataset_json = json.load(f)\n dataset = dataset_json['data']\n with open(OPTS.pred_file) as f:\n preds = json.load(f)\n if OPTS.na_prob_file:\n with open(OPTS.na_prob_file) as f:\n na_probs = json.load(f)\n else:\n na_probs = {k: 0.0 for k in preds}\n qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\n exact_raw, f1_raw = get_raw_scores(dataset, preds) #得到每个答案的extract和f1 list\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\n OPTS.na_prob_thresh)\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\n OPTS.na_prob_thresh)\n\n\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\n if has_ans_qids:\n has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)\n merge_eval(out_eval, has_ans_eval, 'HasAns')\n if no_ans_qids:\n no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)\n merge_eval(out_eval, no_ans_eval, 'NoAns')\n if OPTS.na_prob_file:\n find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)\n if OPTS.na_prob_file and OPTS.out_image_dir:\n run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,\n qid_to_has_ans, OPTS.out_image_dir)\n histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')\n histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')\n if OPTS.out_file:\n with open(OPTS.out_file, 'w') as f:\n json.dump(out_eval, f)\n else:\n print(json.dumps(out_eval, indent=2))\n\n\ndef judgeOnline(data_file,pred_file,na_prob_file,output_dir,epoch,train_steps):\n\n if not os.path.exists(os.path.join(output_dir,\"eval_res\")):\n os.makedirs(os.path.join(output_dir,\"eval_res\"))\n output = os.path.join(output_dir,\"eval_res\")\n\n out_file = os.path.join(output,\"eval.json\")\n out_image_dir = None\n na_prob_thresh = 1.0\n\n with open(data_file) as f:\n\n dataset_json = json.load(f)\n dataset = dataset_json['data']\n with open(pred_file) as f:\n preds = json.load(f)\n with open(na_prob_file) as f:\n na_probs = json.load(f)\n\n\n exact_raw, f1_raw = get_raw_scores(dataset, preds)\n\n qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False 区分dev中的有无答案\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] #有答案的问题\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] #无答案的问题\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)###这里没用因为默认的是1.0 详情可参考该函数\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)###这里没用因为默认的是1.0 详情可参考该函数\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\n if has_ans_qids:\n has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)\n merge_eval(out_eval, has_ans_eval, 'HasAns')\n if no_ans_qids:\n no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)\n merge_eval(out_eval, no_ans_eval, 'NoAns')\n if na_prob_file: ##如果给出null_odds.json文件\n find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) ##获取最好的thresh\n if na_prob_file and out_image_dir:\n run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,\n qid_to_has_ans, out_image_dir)\n histogram_na_prob(na_probs, has_ans_qids, out_image_dir, 'hasAns')\n histogram_na_prob(na_probs, no_ans_qids, out_image_dir, 'noAns')\n if out_file:\n with open(out_file, 'a') as fout:\n fout.write(\"epoch:{} steps:{} evaluation res:{}\\n\".format(epoch,train_steps,json.dumps(out_eval, sort_keys=True, indent=2)))\n import logging\n logger = logging.getLogger(__name__)\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO )\n logger.info(\"write evaluation result to \" + out_file + \"OK!\")\n else:\n print(json.dumps(out_eval, indent=2))\n return out_eval\n\nif __name__ == '__main__':\n OPTS = parse_args()\n if OPTS.out_image_dir:\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n print(vars(OPTS))\n main()"
] | [
[
"numpy.ones_like",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"matplotlib.pyplot.step",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yeralin/qiskit-terra | [
"251930a7b5d83af121ea0f3aafb33a54a1860e14",
"251930a7b5d83af121ea0f3aafb33a54a1860e14",
"251930a7b5d83af121ea0f3aafb33a54a1860e14",
"251930a7b5d83af121ea0f3aafb33a54a1860e14"
] | [
"qiskit/circuit/library/standard_gates/s.py",
"qiskit/circuit/library/standard_gates/rx.py",
"qiskit/extensions/quantum_initializer/uc.py",
"test/python/quantum_info/test_pauli.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"The S and Sdg gate.\"\"\"\n\nimport numpy\nfrom qiskit.qasm import pi\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister\n\n\nclass SGate(Gate):\n r\"\"\"Single qubit S gate (Z**0.5).\n\n It induces a :math:`\\pi/2` phase, and is sometimes called the P gate (phase).\n\n This is a Clifford gate and a square-root of Pauli-Z.\n\n **Matrix Representation:**\n\n .. math::\n\n S = \\begin{pmatrix}\n 1 & 0 \\\\\n 0 & i\n \\end{pmatrix}\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌───┐\n q_0: ┤ S ├\n └───┘\n\n Equivalent to a :math:`\\pi/2` radian rotation about the Z axis.\n \"\"\"\n\n def __init__(self, label=None):\n \"\"\"Create new S gate.\"\"\"\n super().__init__('s', 1, [], label=label)\n\n def _define(self):\n \"\"\"\n gate s a { u1(pi/2) a; }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u1 import U1Gate\n q = QuantumRegister(1, 'q')\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (U1Gate(pi / 2), [q[0]], [])\n ]\n qc._data = rules\n self.definition = qc\n\n def inverse(self):\n \"\"\"Return inverse of S (SdgGate).\"\"\"\n return SdgGate()\n\n def to_matrix(self):\n \"\"\"Return a numpy.array for the S gate.\"\"\"\n return numpy.array([[1, 0],\n [0, 1j]], dtype=complex)\n\n\nclass SdgGate(Gate):\n r\"\"\"Single qubit S-adjoint gate (~Z**0.5).\n\n It induces a :math:`-\\pi/2` phase.\n\n This is a Clifford gate and a square-root of Pauli-Z.\n\n **Matrix Representation:**\n\n .. math::\n\n Sdg = \\begin{pmatrix}\n 1 & 0 \\\\\n 0 & -i\n \\end{pmatrix}\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌─────┐\n q_0: ┤ Sdg ├\n └─────┘\n\n Equivalent to a :math:`\\pi/2` radian rotation about the Z axis.\n \"\"\"\n\n def __init__(self, label=None):\n \"\"\"Create new Sdg gate.\"\"\"\n super().__init__('sdg', 1, [], label=label)\n\n def _define(self):\n \"\"\"\n gate sdg a { u1(-pi/2) a; }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u1 import U1Gate\n q = QuantumRegister(1, 'q')\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (U1Gate(-pi / 2), [q[0]], [])\n ]\n qc._data = rules\n self.definition = qc\n\n def inverse(self):\n \"\"\"Return inverse of Sdg (SGate).\"\"\"\n return SGate()\n\n def to_matrix(self):\n \"\"\"Return a numpy.array for the Sdg gate.\"\"\"\n return numpy.array([[1, 0],\n [0, -1j]], dtype=complex)\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Rotation around the X axis.\"\"\"\n\nimport math\nimport numpy\nfrom qiskit.qasm import pi\nfrom qiskit.circuit.controlledgate import ControlledGate\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister\n\n\nclass RXGate(Gate):\n r\"\"\"Single-qubit rotation about the X axis.\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌───────┐\n q_0: ┤ Rx(ϴ) ├\n └───────┘\n\n **Matrix Representation:**\n\n .. math::\n\n \\newcommand{\\th}{\\frac{\\theta}{2}}\n\n RX(\\theta) = exp(-i \\th X) =\n \\begin{pmatrix}\n \\cos{\\th} & -i\\sin{\\th} \\\\\n -i\\sin{\\th} & \\cos{\\th}\n \\end{pmatrix}\n \"\"\"\n\n def __init__(self, theta, label=None):\n \"\"\"Create new RX gate.\"\"\"\n super().__init__('rx', 1, [theta], label=label)\n\n def _define(self):\n \"\"\"\n gate rx(theta) a {r(theta, 0) a;}\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .r import RGate\n q = QuantumRegister(1, 'q')\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (RGate(self.params[0], 0), [q[0]], [])\n ]\n qc._data = rules\n self.definition = qc\n\n def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):\n \"\"\"Return a (mutli-)controlled-RX gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n if num_ctrl_qubits == 1:\n gate = CRXGate(self.params[0], label=label, ctrl_state=ctrl_state)\n gate.base_gate.label = self.label\n return gate\n return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)\n\n def inverse(self):\n r\"\"\"Return inverted RX gate.\n\n :math:`RX(\\lambda)^{\\dagger} = RX(-\\lambda)`\n \"\"\"\n return RXGate(-self.params[0])\n\n def to_matrix(self):\n \"\"\"Return a numpy.array for the RX gate.\"\"\"\n cos = math.cos(self.params[0] / 2)\n sin = math.sin(self.params[0] / 2)\n return numpy.array([[cos, -1j * sin],\n [-1j * sin, cos]], dtype=complex)\n\n\nclass CRXGate(ControlledGate):\n r\"\"\"Controlled-RX gate.\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n q_0: ────■────\n ┌───┴───┐\n q_1: ┤ Rx(ϴ) ├\n └───────┘\n\n **Matrix representation:**\n\n .. math::\n\n \\newcommand{\\th}{\\frac{\\theta}{2}}\n\n CRX(\\lambda)\\ q_0, q_1 =\n I \\otimes |0\\rangle\\langle 0| + RX(\\theta) \\otimes |1\\rangle\\langle 1| =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 \\\\\n 0 & \\cos{\\th} & 0 & -i\\sin{\\th} \\\\\n 0 & 0 & 1 & 0 \\\\\n 0 & -i\\sin{\\th} & 0 & \\cos{\\th}\n \\end{pmatrix}\n\n .. note::\n\n In Qiskit's convention, higher qubit indices are more significant\n (little endian convention). In many textbooks, controlled gates are\n presented with the assumption of more significant qubits as control,\n which in our case would be q_1. Thus a textbook matrix for this\n gate will be:\n\n .. parsed-literal::\n ┌───────┐\n q_0: ┤ Rx(ϴ) ├\n └───┬───┘\n q_1: ────■────\n\n .. math::\n\n \\newcommand{\\th}{\\frac{\\theta}{2}}\n\n CRX(\\theta)\\ q_1, q_0 =\n |0\\rangle\\langle0| \\otimes I + |1\\rangle\\langle1| \\otimes RX(\\theta) =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 \\\\\n 0 & 0 & \\cos{\\th} & -i\\sin{\\th} \\\\\n 0 & 0 & -i\\sin{\\th} & \\cos{\\th}\n \\end{pmatrix}\n \"\"\"\n\n def __init__(self, theta, label=None, ctrl_state=None):\n \"\"\"Create new CRX gate.\"\"\"\n super().__init__('crx', 2, [theta], num_ctrl_qubits=1,\n label=label, ctrl_state=ctrl_state)\n self.base_gate = RXGate(theta)\n\n def _define(self):\n \"\"\"\n gate cu3(theta,phi,lambda) c, t\n { u1(pi/2) t;\n cx c,t;\n u3(-theta/2,0,0) t;\n cx c,t;\n u3(theta/2,-pi/2,0) t;\n }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u1 import U1Gate\n from .u3 import U3Gate\n from .x import CXGate\n q = QuantumRegister(2, 'q')\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (U1Gate(pi / 2), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n (U3Gate(-self.params[0] / 2, 0, 0), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n (U3Gate(self.params[0] / 2, -pi / 2, 0), [q[1]], [])\n ]\n qc._data = rules\n self.definition = qc\n\n def inverse(self):\n \"\"\"Return inverse RX gate (i.e. with the negative rotation angle).\"\"\"\n return CRXGate(-self.params[0])\n\n def to_matrix(self):\n \"\"\"Return a numpy.array for the CRX gate.\"\"\"\n half_theta = float(self.params[0]) / 2\n cos = numpy.cos(half_theta)\n isin = 1j * numpy.sin(half_theta)\n if self.ctrl_state:\n return numpy.array([[1, 0, 0, 0],\n [0, cos, 0, -isin],\n [0, 0, 1, 0],\n [0, -isin, 0, cos]],\n dtype=complex)\n else:\n return numpy.array([[cos, 0, -isin, 0],\n [0, 1, 0, 0],\n [-isin, 0, cos, 0],\n [0, 0, 0, 1]],\n dtype=complex)\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# The structure of the code is based on Emanuel Malvetti's semester thesis at\n# ETH in 2018, which was supervised by Raban Iten and Prof. Renato Renner.\n\n# pylint: disable=invalid-name\n# pylint: disable=missing-param-doc\n# pylint: disable=missing-type-doc\n\n\"\"\"\nUniformly controlled gates (also called multiplexed gates).\n\nThese gates can have several control qubits and a single target qubit.\nIf the k control qubits are in the state |i> (in the computational basis),\na single-qubit unitary U_i is applied to the target qubit.\n\nThis gate is represented by a block-diagonal matrix, where each block is a\n2x2 unitary:\n\n [[U_0, 0, ...., 0],\n [0, U_1, ...., 0],\n .\n .\n [0, 0, ...., U_(2^k-1)]]\n\"\"\"\n\nimport cmath\nimport math\n\nimport numpy as np\n\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.library.standard_gates.h import HGate\nfrom qiskit.quantum_info.operators.predicates import is_unitary_matrix\nfrom qiskit.circuit.quantumregister import QuantumRegister\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.synthesis import OneQubitEulerDecomposer\n\n_EPS = 1e-10 # global variable used to chop very small numbers to zero\n_DECOMPOSER1Q = OneQubitEulerDecomposer('U3')\n\n\nclass UCGate(Gate):\n \"\"\"Uniformly controlled gate (also called multiplexed gate).\n The decomposition is based on: https://arxiv.org/pdf/quant-ph/0410066.pdf.\n \"\"\"\n\n def __init__(self, gate_list, up_to_diagonal=False):\n \"\"\"UCGate Gate initializer.\n\n Args:\n gate_list (list[ndarray]): list of two qubit unitaries [U_0,...,U_{2^k-1}],\n where each single-qubit unitary U_i is given as a 2*2 numpy array.\n\n up_to_diagonal (bool): determines if the gate is implemented up to a diagonal.\n or if it is decomposed completely (default: False).\n If the UCGate u is decomposed up to a diagonal d, this means that the circuit\n implements a unitary u' such that d.u'=u.\n\n Raises:\n QiskitError: in case of bad input to the constructor\n \"\"\"\n # check input format\n if not isinstance(gate_list, list):\n raise QiskitError(\"The single-qubit unitaries are not provided in a list.\")\n for gate in gate_list:\n if not gate.shape == (2, 2):\n raise QiskitError(\"The dimension of a controlled gate is not equal to (2,2).\")\n if not gate_list:\n raise QiskitError(\"The gate list cannot be empty.\")\n\n # Check if number of gates in gate_list is a positive power of two\n num_contr = math.log2(len(gate_list))\n if num_contr < 0 or not num_contr.is_integer():\n raise QiskitError(\"The number of controlled single-qubit gates is not a \"\n \"non-negative power of 2.\")\n\n # Check if the single-qubit gates are unitaries\n for gate in gate_list:\n if not is_unitary_matrix(gate, _EPS):\n raise QiskitError(\"A controlled gate is not unitary.\")\n\n # Create new gate.\n super().__init__(\"multiplexer\", int(num_contr) + 1, gate_list)\n self.up_to_diagonal = up_to_diagonal\n\n def _get_diagonal(self):\n # Important: for a control list q_controls = [q[0],...,q_[k-1]] the\n # diagonal gate is provided in the computational basis of the qubits\n # q[k-1],...,q[0],q_target, decreasingly ordered with respect to the\n # significance of the qubit in the computational basis\n _, diag = self._dec_ucg()\n return diag\n\n def _define(self):\n ucg_circuit, _ = self._dec_ucg()\n self.definition = ucg_circuit\n\n def _dec_ucg(self):\n \"\"\"\n Call to create a circuit that implements the uniformly controlled gate. If\n up_to_diagonal=True, the circuit implements the gate up to a diagonal gate and\n the diagonal gate is also returned.\n \"\"\"\n diag = np.ones(2 ** self.num_qubits).tolist()\n q = QuantumRegister(self.num_qubits)\n q_controls = q[1:]\n q_target = q[0]\n circuit = QuantumCircuit(q)\n # If there is no control, we use the ZYZ decomposition\n if not q_controls:\n theta, phi, lamb = _DECOMPOSER1Q.angles(self.params[0])\n circuit.u3(theta, phi, lamb, q)\n return circuit, diag\n # If there is at least one control, first,\n # we find the single qubit gates of the decomposition.\n (single_qubit_gates, diag) = self._dec_ucg_help()\n # Now, it is easy to place the C-NOT gates and some Hadamards and Rz(pi/2) gates\n # (which are absorbed into the single-qubit unitaries) to get back the full decomposition.\n for i, gate in enumerate(single_qubit_gates):\n # Absorb Hadamards and Rz(pi/2) gates\n if i == 0:\n squ = HGate().to_matrix().dot(gate)\n elif i == len(single_qubit_gates) - 1:\n squ = gate.dot(UCGate._rz(np.pi / 2)).dot(HGate().to_matrix())\n else:\n squ = HGate().to_matrix().dot(gate.dot(UCGate._rz(np.pi / 2))).dot(\n HGate().to_matrix())\n # Add single-qubit gate\n circuit.squ(squ, q_target)\n # The number of the control qubit is given by the number of zeros at the end\n # of the binary representation of (i+1)\n binary_rep = np.binary_repr(i + 1)\n num_trailing_zeros = len(binary_rep) - len(binary_rep.rstrip('0'))\n q_contr_index = num_trailing_zeros\n # Add C-NOT gate\n if not i == len(single_qubit_gates) - 1:\n circuit.cx(q_controls[q_contr_index], q_target)\n if not self.up_to_diagonal:\n # Important: the diagonal gate is given in the computational basis of the qubits\n # q[k-1],...,q[0],q_target (ordered with decreasing significance),\n # where q[i] are the control qubits and t denotes the target qubit.\n circuit.diagonal(diag.tolist(), q)\n return circuit, diag\n\n def _dec_ucg_help(self):\n \"\"\"\n This method finds the single qubit gate arising in the decomposition of UCGates given in\n https://arxiv.org/pdf/quant-ph/0410066.pdf.\n \"\"\"\n single_qubit_gates = [gate.astype(complex) for gate in self.params]\n diag = np.ones(2 ** self.num_qubits, dtype=complex)\n num_contr = self.num_qubits - 1\n for dec_step in range(num_contr):\n num_ucgs = 2 ** dec_step\n # The decomposition works recursively and the following loop goes over the different\n # UCGates that arise in the decomposition\n for ucg_index in range(num_ucgs):\n len_ucg = 2 ** (num_contr - dec_step)\n for i in range(int(len_ucg / 2)):\n shift = ucg_index * len_ucg\n a = single_qubit_gates[shift + i]\n b = single_qubit_gates[shift + len_ucg // 2 + i]\n # Apply the decomposition for UCGates given in equation (3) in\n # https://arxiv.org/pdf/quant-ph/0410066.pdf\n # to demultiplex one control of all the num_ucgs uniformly-controlled gates\n # with log2(len_ucg) uniform controls\n v, u, r = self._demultiplex_single_uc(a, b)\n # replace the single-qubit gates with v,u (the already existing ones\n # are not needed any more)\n single_qubit_gates[shift + i] = v\n single_qubit_gates[shift + len_ucg // 2 + i] = u\n # Now we decompose the gates D as described in Figure 4 in\n # https://arxiv.org/pdf/quant-ph/0410066.pdf and merge some of the gates\n # into the UCGates and the diagonal at the end of the circuit\n\n # Remark: The Rz(pi/2) rotation acting on the target qubit and the Hadamard\n # gates arising in the decomposition of D are ignored for the moment (they will\n # be added together with the C-NOT gates at the end of the decomposition\n # (in the method dec_ucg()))\n if ucg_index < num_ucgs - 1:\n # Absorb the Rz(pi/2) rotation on the control into the UC-Rz gate and\n # merge the UC-Rz rotation with the following UCGate,\n # which hasn't been decomposed yet.\n k = shift + len_ucg + i\n single_qubit_gates[k] = \\\n single_qubit_gates[k].dot(UCGate._ct(r)) * \\\n UCGate._rz(np.pi / 2).item((0, 0))\n k = k + len_ucg // 2\n single_qubit_gates[k] = \\\n single_qubit_gates[k].dot(r) * UCGate._rz(np.pi / 2).item((1, 1))\n else:\n # Absorb the Rz(pi/2) rotation on the control into the UC-Rz gate and merge\n # the trailing UC-Rz rotation into a diagonal gate at the end of the circuit\n for ucg_index_2 in range(num_ucgs):\n shift_2 = ucg_index_2 * len_ucg\n k = 2 * (i + shift_2)\n diag[k] = diag[k] * UCGate._ct(r).item((0, 0)) * \\\n UCGate._rz(np.pi / 2).item((0, 0))\n diag[k + 1] = diag[k + 1] * UCGate._ct(r).item((1, 1)) * UCGate._rz(\n np.pi / 2).item((0, 0))\n k = len_ucg + k\n diag[k] *= r.item((0, 0)) * UCGate._rz(np.pi / 2).item((1, 1))\n diag[k + 1] *= r.item((1, 1)) * UCGate._rz(np.pi / 2).item((1, 1))\n return single_qubit_gates, diag\n\n def _demultiplex_single_uc(self, a, b):\n \"\"\"\n This method implements the decomposition given in equation (3) in\n https://arxiv.org/pdf/quant-ph/0410066.pdf.\n The decomposition is used recursively to decompose uniformly controlled gates.\n a,b = single qubit unitaries\n v,u,r = outcome of the decomposition given in the reference mentioned above\n (see there for the details).\n \"\"\"\n # The notation is chosen as in https://arxiv.org/pdf/quant-ph/0410066.pdf.\n x = a.dot(UCGate._ct(b))\n det_x = np.linalg.det(x)\n x11 = x.item((0, 0)) / cmath.sqrt(det_x)\n phi = cmath.phase(det_x)\n r1 = cmath.exp(1j / 2 * (np.pi / 2 - phi / 2 - cmath.phase(x11)))\n r2 = cmath.exp(1j / 2 * (np.pi / 2 - phi / 2 + cmath.phase(x11) + np.pi))\n r = np.array([[r1, 0], [0, r2]], dtype=complex)\n d, u = np.linalg.eig(r.dot(x).dot(r))\n # If d is not equal to diag(i,-i), then we put it into this \"standard\" form\n # (see eq. (13) in https://arxiv.org/pdf/quant-ph/0410066.pdf) by interchanging\n # the eigenvalues and eigenvectors.\n if abs(d[0] + 1j) < _EPS:\n d = np.flip(d, 0)\n u = np.flip(u, 1)\n d = np.diag(np.sqrt(d))\n v = d.dot(UCGate._ct(u)).dot(UCGate._ct(r)).dot(b)\n return v, u, r\n\n @staticmethod\n def _ct(m):\n return np.transpose(np.conjugate(m))\n\n @staticmethod\n def _rz(alpha):\n return np.array([[np.exp(1j * alpha / 2), 0], [0, np.exp(-1j * alpha / 2)]])\n\n\ndef uc(self, gate_list, q_controls, q_target, up_to_diagonal=False):\n \"\"\"Attach a uniformly controlled gates (also called multiplexed gates) to a circuit.\n\n The decomposition was introduced by Bergholm et al. in\n https://arxiv.org/pdf/quant-ph/0410066.pdf.\n\n Args:\n gate_list (list[ndarray]): list of two qubit unitaries [U_0,...,U_{2^k-1}],\n where each single-qubit unitary U_i is a given as a 2*2 array\n q_controls (QuantumRegister|list[(QuantumRegister,int)]): list of k control qubits.\n The qubits are ordered according to their significance in the computational basis.\n For example if q_controls=[q[1],q[2]] (with q = QuantumRegister(2)),\n the unitary U_0 is performed if q[1] and q[2] are in the state zero, U_1 is\n performed if q[2] is in the state zero and q[1] is in the state one, and so on\n q_target (QuantumRegister|(QuantumRegister,int)): target qubit, where we act on with\n the single-qubit gates.\n up_to_diagonal (bool): If set to True, the uniformly controlled gate is decomposed up\n to a diagonal gate, i.e. a unitary u' is implemented such that there exists a\n diagonal gate d with u = d.dot(u'), where the unitary u describes the uniformly\n controlled gate\n\n Returns:\n QuantumCircuit: the uniformly controlled gate is attached to the circuit.\n\n Raises:\n QiskitError: if the list number of control qubits does not correspond to the provided\n number of single-qubit unitaries; if an input is of the wrong type\n \"\"\"\n\n if isinstance(q_controls, QuantumRegister):\n q_controls = q_controls[:]\n if isinstance(q_target, QuantumRegister):\n q_target = q_target[:]\n if len(q_target) == 1:\n q_target = q_target[0]\n else:\n raise QiskitError(\"The target qubit is a QuantumRegister containing more than\"\n \" one qubit.\")\n # Check if q_controls has type \"list\"\n if not isinstance(q_controls, list):\n raise QiskitError(\"The control qubits must be provided as a list\"\n \" (also if there is only one control qubit).\")\n # Check if gate_list has type \"list\"\n if not isinstance(gate_list, list):\n raise QiskitError(\"The single-qubit unitaries are not provided in a list.\")\n # Check if number of gates in gate_list is a positive power of two\n num_contr = math.log2(len(gate_list))\n if num_contr < 0 or not num_contr.is_integer():\n raise QiskitError(\"The number of controlled single-qubit gates is not a non negative\"\n \" power of 2.\")\n # Check if number of control qubits does correspond to the number of single-qubit rotations\n if num_contr != len(q_controls):\n raise QiskitError(\"Number of controlled gates does not correspond to the number of\"\n \" control qubits.\")\n return self.append(UCGate(gate_list, up_to_diagonal), [q_target] + q_controls)\n\n\nQuantumCircuit.uc = uc\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\n\"\"\"Quick program to test the qi tools modules.\"\"\"\n\nimport unittest\nfrom copy import deepcopy\nimport numpy as np\n\nfrom qiskit.quantum_info import Pauli, pauli_group\nfrom qiskit.test import QiskitTestCase\n\n\nclass TestPauliAPI(QiskitTestCase):\n \"\"\"Tests for Pauli class API.\"\"\"\n\n def check(self, result):\n \"\"\"checks for result to be a Pauli 'IY' \"\"\"\n self.assertIsInstance(result, Pauli)\n self.assertEqual(result.num_qubits, 2)\n self.assertEqual(result.to_label(), 'IY')\n\n def test_ndarray_bool(self):\n \"\"\"Test creation from np.bool.\"\"\"\n x = np.asarray([1, 0]).astype(np.bool)\n z = np.asarray([1, 0]).astype(np.bool)\n pauli = Pauli(x=x, z=z)\n self.check(pauli)\n\n def test_ndarray_int(self):\n \"\"\"Test creation from np.int.\"\"\"\n x = np.asarray([2, 0]).astype(np.int)\n z = np.asarray([2, 0]).astype(np.int)\n pauli = Pauli(x=x, z=z)\n self.check(pauli)\n\n def test_list(self):\n \"\"\"Test creation from lists.\"\"\"\n pauli = Pauli(x=[1, 0], z=[1, 0])\n self.check(pauli)\n\n def test_tuple(self):\n \"\"\"Test creation from tuples.\"\"\"\n pauli = Pauli(x=(1, 0), z=(1, 0))\n self.check(pauli)\n\n def test_mix(self):\n \"\"\"Test creation from tuples and list.\"\"\"\n pauli = Pauli(x=(1, 0), z=[1, 0])\n self.check(pauli)\n\n\nclass TestPauli(QiskitTestCase):\n \"\"\"Tests for Pauli class.\"\"\"\n\n def setUp(self):\n \"\"\"Setup.\"\"\"\n z = np.asarray([1, 0, 1, 0]).astype(np.bool)\n x = np.asarray([1, 1, 0, 0]).astype(np.bool)\n self.ref_p = Pauli(z, x)\n self.ref_label = 'IZXY'\n self.ref_matrix = np.array([[0. + 0.j, 0. + 0.j, 0. + 0.j, 0. - 1.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 1.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. - 1.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 1.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 1.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. - 1.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 1.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. - 1.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. - 1.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 1.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. - 1.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 1.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 1.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. - 1.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 1.j, 0. + 0.j, 0. + 0.j],\n [0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j,\n 0. - 1.j, 0. + 0.j, 0. + 0.j, 0. + 0.j]])\n\n def test_create_from_label(self):\n \"\"\"Test creation from pauli label.\"\"\"\n label = 'IZXY'\n pauli = Pauli(label=label)\n\n self.assertEqual(pauli, self.ref_p)\n self.assertEqual(pauli.to_label(), self.ref_label)\n self.assertEqual(len(pauli), 4)\n\n def test_create_from_z_x(self):\n \"\"\"Test creation for boolean vector.\"\"\"\n self.assertEqual(self.ref_p.to_label(), 'IZXY')\n self.assertEqual(len(self.ref_p), 4)\n\n def test_repr(self):\n \"\"\"Test __repr__.\"\"\"\n p = repr(self.ref_p)\n self.assertEqual(p, \"Pauli(z=[True, False, True, False], x=[True, True, False, False])\")\n\n def test_random_pauli(self):\n \"\"\"Test random pauli creation.\"\"\"\n length = 4\n q = Pauli.random(length, seed=42)\n self.log.info(q)\n self.assertEqual(q.num_qubits, length)\n self.assertEqual(len(q.z), length)\n self.assertEqual(len(q.x), length)\n self.assertEqual(len(q.to_label()), length)\n self.assertEqual(len(q.to_matrix()), 2 ** length)\n\n def test_mul(self):\n \"\"\"Test multiplication.\"\"\"\n p1 = self.ref_p\n p2 = Pauli.from_label('ZXXI')\n p3 = p1 * p2\n self.assertEqual(len(p3), 4)\n self.assertEqual(p3.to_label(), 'ZYIY')\n\n def test_imul(self):\n \"\"\"Test in-place multiplication.\"\"\"\n p1 = self.ref_p\n p2 = Pauli.from_label('ZXXI')\n p3 = deepcopy(p2)\n p2 *= p1\n self.assertTrue(p2 != p3)\n self.assertEqual(p2.to_label(), 'ZYIY')\n\n def test_equality_equal(self):\n \"\"\"Test equality operator: equal Paulis.\"\"\"\n p1 = self.ref_p\n p2 = deepcopy(p1)\n self.assertTrue(p1 == p2)\n self.assertEqual(p1.to_label(), self.ref_label)\n self.assertEqual(p2.to_label(), self.ref_label)\n\n def test_equality_different(self):\n \"\"\"Test equality operator: different Paulis.\"\"\"\n p1 = self.ref_p\n p2 = deepcopy(p1)\n\n p2.update_z(True, 1)\n self.assertFalse(p1 == p2)\n self.assertEqual(p1.to_label(), self.ref_label)\n self.assertEqual(p2.to_label(), 'IZYY')\n\n def test_inequality_equal(self):\n \"\"\"Test inequality operator: equal Paulis.\"\"\"\n p1 = self.ref_p\n p2 = deepcopy(p1)\n\n self.assertFalse(p1 != p2)\n\n def test_inequality_different(self):\n \"\"\"Test inequality operator: different Paulis.\"\"\"\n p1 = self.ref_p\n p2 = deepcopy(p1)\n p2.update_x(False, 1)\n self.assertTrue(p1 != p2)\n self.assertEqual(p2.to_label(), 'IZIY')\n\n def test_update_z(self):\n \"\"\"Test update_z method.\"\"\"\n updated_z = np.asarray([0, 0, 0, 0]).astype(np.bool)\n self.ref_p.update_z(updated_z)\n np.testing.assert_equal(self.ref_p.z, np.asarray([False, False, False, False]))\n self.assertEqual(self.ref_p.to_label(), 'IIXX')\n\n def test_update_z_2(self):\n \"\"\"Test update_z method, update partial z.\"\"\"\n updated_z = np.asarray([0, 1]).astype(np.bool)\n self.ref_p.update_z(updated_z, [0, 1])\n np.testing.assert_equal(self.ref_p.z, np.asarray([False, True, True, False]))\n self.assertEqual(self.ref_p.to_label(), 'IZYX')\n\n def test_update_x(self):\n \"\"\"Test update_x method.\"\"\"\n updated_x = np.asarray([0, 1, 0, 1]).astype(np.bool)\n self.ref_p.update_x(updated_x)\n np.testing.assert_equal(self.ref_p.x, np.asarray([False, True, False, True]))\n self.assertEqual(self.ref_p.to_label(), 'XZXZ')\n\n def test_update_x_2(self):\n \"\"\"Test update_x method, update partial x.\"\"\"\n updated_x = np.asarray([0, 1]).astype(np.bool)\n self.ref_p.update_x(updated_x, [1, 2])\n np.testing.assert_equal(self.ref_p.x, np.asarray([True, False, True, False]))\n self.assertEqual(self.ref_p.to_label(), 'IYIY')\n\n def test_to_matrix(self):\n \"\"\"Test pauli to matrix.\"\"\"\n np.testing.assert_allclose(self.ref_p.to_matrix(), self.ref_matrix)\n\n def test_delete_qubit(self):\n \"\"\"Test deleting single qubit.\"\"\"\n p1 = self.ref_p\n p2 = deepcopy(p1)\n\n p2.delete_qubits(0)\n self.assertTrue(p1 != p2)\n self.assertEqual(len(p2), 3)\n self.assertEqual(p2.to_label(), 'IZX')\n\n def test_delete_qubits(self):\n \"\"\"Test deleting multiple qubits.\"\"\"\n p1 = self.ref_p\n p2 = deepcopy(p1)\n\n p2.delete_qubits([0, 2])\n self.assertTrue(p1 != p2)\n self.assertEqual(len(p2), 2)\n self.assertEqual(p2.to_label(), 'IX')\n\n def test_append_pauli_labels(self):\n \"\"\"Test appending paulis via labels.\"\"\"\n p1 = self.ref_p\n p2 = deepcopy(p1)\n\n p2.append_paulis(pauli_labels=['Z', 'Y', 'I'])\n self.assertTrue(p1 != p2)\n self.assertEqual(len(p2), 7)\n self.assertEqual(p2.to_label(), 'IYZ' + self.ref_label)\n\n def test_append_paulis(self):\n \"\"\"Test appending paulis via pauli object.\"\"\"\n p1 = self.ref_p\n p2 = deepcopy(p1)\n\n p2.append_paulis(paulis=p1)\n self.assertTrue(p1 != p2)\n self.assertEqual(len(p2), 8)\n self.assertEqual(p2.to_label(), self.ref_label + self.ref_label)\n\n def test_insert_pauli_labels_1(self):\n \"\"\"Test inserting paulis via labels.\"\"\"\n p2 = deepcopy(self.ref_p)\n\n p2.insert_paulis(indices=[1, 2], pauli_labels=['Y', 'I'])\n self.assertTrue(self.ref_p != p2)\n self.assertEqual(len(p2), 6)\n self.assertEqual(p2.to_label(), 'IZIXYY')\n\n def test_insert_pauli_labels_2(self):\n \"\"\"Test inserting paulis via labels.\"\"\"\n p2 = deepcopy(self.ref_p)\n\n p2.insert_paulis(indices=[3, 2], pauli_labels=['Y', 'I'])\n self.assertTrue(self.ref_p != p2)\n self.assertEqual(len(p2), 6)\n self.assertEqual(p2.to_label(), 'IYZIXY')\n\n def test_insert_paulis(self):\n \"\"\"Test inserting paulis via pauli object.\"\"\"\n p1 = deepcopy(self.ref_p)\n\n new_p = Pauli.from_label('XY')\n\n p1.insert_paulis(indices=[0], paulis=new_p)\n\n self.assertTrue(p1 != self.ref_p)\n self.assertEqual(len(p1), 6)\n self.assertEqual(p1.to_label(), self.ref_label + 'XY')\n\n def test_kron(self):\n \"\"\"Test kron production.\"\"\"\n p1 = deepcopy(self.ref_p)\n p2 = self.ref_p\n p2.kron(p1)\n self.assertTrue(p1 != p2)\n self.assertEqual(len(p2), 8)\n self.assertEqual(p2.to_label(), self.ref_label + self.ref_label)\n\n def test_pauli_single(self):\n \"\"\"Test pauli single.\"\"\"\n num_qubits = 5\n pz = Pauli.pauli_single(num_qubits, 2, 'Z')\n self.assertTrue(pz.to_label(), 'IIIZI')\n\n py = Pauli.pauli_single(num_qubits, 4, 'Y')\n self.assertTrue(py.to_label(), 'IYIII')\n\n px = Pauli.pauli_single(num_qubits, 3, 'X')\n self.assertTrue(px.to_label(), 'IIXII')\n\n def test_pauli_group(self):\n \"\"\"Test pauli group.\"\"\"\n self.log.info(\"Group in tensor order:\")\n expected = ['III', 'XII', 'YII', 'ZII', 'IXI', 'XXI', 'YXI', 'ZXI', 'IYI', 'XYI', 'YYI',\n 'ZYI', 'IZI', 'XZI', 'YZI', 'ZZI', 'IIX', 'XIX', 'YIX', 'ZIX', 'IXX', 'XXX',\n 'YXX', 'ZXX', 'IYX', 'XYX', 'YYX', 'ZYX', 'IZX', 'XZX', 'YZX', 'ZZX', 'IIY',\n 'XIY', 'YIY', 'ZIY', 'IXY', 'XXY', 'YXY', 'ZXY', 'IYY', 'XYY', 'YYY', 'ZYY',\n 'IZY', 'XZY', 'YZY', 'ZZY', 'IIZ', 'XIZ', 'YIZ', 'ZIZ', 'IXZ', 'XXZ', 'YXZ',\n 'ZXZ', 'IYZ', 'XYZ', 'YYZ', 'ZYZ', 'IZZ', 'XZZ', 'YZZ', 'ZZZ']\n grp = pauli_group(3, case='tensor')\n for j in grp:\n self.log.info('==== j (tensor order) ====')\n self.log.info(j.to_label())\n self.assertEqual(expected.pop(0)[::-1], j.to_label())\n\n self.log.info(\"Group in weight order:\")\n expected = ['III', 'XII', 'YII', 'ZII', 'IXI', 'IYI', 'IZI', 'IIX', 'IIY', 'IIZ', 'XXI',\n 'YXI', 'ZXI', 'XYI', 'YYI', 'ZYI', 'XZI', 'YZI', 'ZZI', 'XIX', 'YIX', 'ZIX',\n 'IXX', 'IYX', 'IZX', 'XIY', 'YIY', 'ZIY', 'IXY', 'IYY', 'IZY', 'XIZ', 'YIZ',\n 'ZIZ', 'IXZ', 'IYZ', 'IZZ', 'XXX', 'YXX', 'ZXX', 'XYX', 'YYX', 'ZYX', 'XZX',\n 'YZX', 'ZZX', 'XXY', 'YXY', 'ZXY', 'XYY', 'YYY', 'ZYY', 'XZY', 'YZY', 'ZZY',\n 'XXZ', 'YXZ', 'ZXZ', 'XYZ', 'YYZ', 'ZYZ', 'XZZ', 'YZZ', 'ZZZ']\n grp = pauli_group(3, case='weight')\n for j in grp:\n self.log.info('==== j (weight order) ====')\n self.log.info(j.to_label())\n self.assertEqual(expected.pop(0)[::-1], j.to_label())\n\n def test_sgn_prod(self):\n \"\"\"Test sgn prod.\"\"\"\n p1 = Pauli(np.array([False]), np.array([True]))\n p2 = Pauli(np.array([True]), np.array([True]))\n\n self.log.info(\"sign product:\")\n p3, sgn = Pauli.sgn_prod(p1, p2)\n self.log.info(\"p1: %s\", p1.to_label())\n self.log.info(\"p2: %s\", p2.to_label())\n self.log.info(\"p3: %s\", p3.to_label())\n self.log.info(\"sgn_prod(p1, p2): %s\", str(sgn))\n self.assertEqual(p1.to_label(), 'X')\n self.assertEqual(p2.to_label(), 'Y')\n self.assertEqual(p3.to_label(), 'Z')\n self.assertEqual(sgn, 1j)\n\n self.log.info(\"sign product reverse:\")\n p3, sgn = Pauli.sgn_prod(p2, p1) # pylint: disable=arguments-out-of-order\n self.log.info(\"p2: %s\", p2.to_label())\n self.log.info(\"p1: %s\", p1.to_label())\n self.log.info(\"p3: %s\", p3.to_label())\n self.log.info(\"sgn_prod(p2, p1): %s\", str(sgn))\n self.assertEqual(p1.to_label(), 'X')\n self.assertEqual(p2.to_label(), 'Y')\n self.assertEqual(p3.to_label(), 'Z')\n self.assertEqual(sgn, -1j)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array"
],
[
"numpy.array",
"numpy.cos",
"numpy.sin"
],
[
"numpy.sqrt",
"numpy.ones",
"numpy.linalg.det",
"numpy.binary_repr",
"numpy.exp",
"numpy.array",
"numpy.flip",
"numpy.conjugate"
],
[
"numpy.asarray",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krumo/SPIN | [
"0e2f17e70f06de46e062683ea6d5b233eeaa73c1"
] | [
"spin/models/smpl.py"
] | [
"import torch\nimport numpy as np\nimport smplx\nfrom smplx import SMPL as _SMPL\nfrom smplx.body_models import ModelOutput\nfrom smplx.lbs import vertices2joints\n\nimport spin.config as config\nimport spin.constants as constants\n\nclass SMPL(_SMPL):\n \"\"\" Extension of the official SMPL implementation to support more joints \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SMPL, self).__init__(*args, **kwargs)\n joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]\n J_regressor_extra = np.load(config.JOINT_REGRESSOR_TRAIN_EXTRA)\n self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))\n self.joint_map = torch.tensor(joints, dtype=torch.long)\n\n def forward(self, *args, **kwargs):\n kwargs['get_skin'] = True\n smpl_output = super(SMPL, self).forward(*args, **kwargs)\n extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)\n joints = torch.cat([smpl_output.joints, extra_joints], dim=1)\n joints = smpl_output.joints\n # print(smpl_output.joints.shape)\n # joints = joints[:, self.joint_map, :]\n output = ModelOutput(vertices=smpl_output.vertices,\n global_orient=smpl_output.global_orient,\n body_pose=smpl_output.body_pose,\n joints=joints,\n betas=smpl_output.betas,\n full_pose=smpl_output.full_pose)\n return output\n"
] | [
[
"numpy.load",
"torch.cat",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yketa/UBC---Spring-2018---code | [
"b065544639a483dda48cda89bcbb11c1772232aa"
] | [
"analysis/coarse_graining.py"
] | [
"\"\"\"\nModule coarse_graining implements a Gaussian coarse-graining adapted from\nIlling et al., Phys. Rev. Lett. 117, 208002 (2016) following Goldhirsch and\nGoldenberg, Eur. Phys. J. E 9, 245–251 (2002).\n\"\"\"\n\nimport numpy as np\n\nclass GaussianCG:\n \"\"\"\n Gaussian coarse-graining.\n \"\"\"\n\n def __init__(self, sigma, r_cut):\n \"\"\"\n Parameters\n ----------\n sigma : float\n Length scale of Gaussian function.\n r_cut : float\n Coarse-graining cut-off radius.\n \"\"\"\n\n self.sigma = sigma # length scale of Gaussian function\n self.r_cut = r_cut # coarse-graining cut-off radius\n\n def function(self, r):\n \"\"\"\n Parameters\n ----------\n r : float\n Radius.\n\n Returns\n -------\n phi : float\n Coarse-graining factor at radius r.\n \"\"\"\n\n if r > self.r_cut: return 0 # coarse-graining function is zero after cut-off\n\n Dg = 2*np.pi*(self.sigma**2)*(1 -\n np.exp(-0.5*((self.r_cut/self.sigma)**2))) # normalisation factor\n return np.exp(-0.5*((r/self.sigma)**2))/Dg # coarse-graining factor\n\n def factors(self, positions):\n \"\"\"\n Parameters\n ----------\n positions : float array\n Coordinates at which coarse-graining is desired.\n\n Returns\n -------\n CGfactors : Numpy float array\n Coarse-graining factors at positions.\n \"\"\"\n\n return np.array(list(map(\n lambda r: self.function(r),\n np.sqrt(np.sum(positions**2, axis=-1))\n ))) # coarse graining factors at positions\n\nclass SquareUniformCG:\n \"\"\"\n Square uniform coarse-graining.\n \"\"\"\n\n def __init__(self, dL):\n \"\"\"\n Parameters\n ----------\n dL : float\n Length of square box on which to average.\n \"\"\"\n\n self.dL = dL # averaging square length\n\n def function(self, position):\n \"\"\"\n Parameters\n ----------\n position : float array\n Coordinates.\n\n Returns\n -------\n phi : float\n Coarse-graining factor at position position.\n \"\"\"\n\n if (np.abs(np.array(position)) > self.dL/2).any(): return 0 # coarse-graining function is zero outside square\n return 1 # is one in\n\n def factors(self, positions):\n \"\"\"\n Parameters\n ----------\n positions : float array\n Coordinates at which coarse-graining is desired.\n\n Returns\n -------\n CGfactors : Numpy float array\n Coarse-graining factors at positions.\n \"\"\"\n\n CGfactors = np.array(list(map(\n lambda position:\n self.function(position),\n positions\n )))\n sumCGfactors = np.sum(CGfactors)\n if np.sum(CGfactors) == 0: return 0\n return CGfactors/sumCGfactors # coarse graining factors at positions\n\nclass CoarseGraining:\n \"\"\"\n Enables unique calculation of coarse-graining factors and then calculation\n of coarse-graining avergages.\n \"\"\"\n\n def __init__(self, factors_function, positions):\n \"\"\"\n Parameters\n ----------\n factors_function : function\n Function of array of coordinates which returns coarse-graining\n factors at these coordinates.\n positions : float array\n Coordinates at which coarse-graining is desired.\n \"\"\"\n\n self.CGfactors = np.array(factors_function(positions)) # coarse-graining factors at positions\n\n def average(self, var):\n \"\"\"\n Coarse-graining averaging.\n\n Parameters\n ----------\n var : float array\n Values of variable to coarse-grain at different positions from\n point at which coarse-graining is desired.\n\n Returns\n -------\n average : float\n Coarse-grained variable.\n \"\"\"\n\n return np.sum(\n np.transpose(np.array(self.CGfactors,\n ndmin=len(np.array(var).shape)))\n *np.array(var), axis=0) # coarse-grained variable\n"
] | [
[
"numpy.array",
"numpy.exp",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Nexuscompute/Cirq | [
"640ef8f82d6a56ec95361388ce7976e096cca906",
"640ef8f82d6a56ec95361388ce7976e096cca906",
"640ef8f82d6a56ec95361388ce7976e096cca906",
"640ef8f82d6a56ec95361388ce7976e096cca906",
"640ef8f82d6a56ec95361388ce7976e096cca906"
] | [
"cirq-core/cirq/work/observable_measurement_data_test.py",
"cirq-core/cirq/neutral_atoms/neutral_atom_devices.py",
"cirq-core/cirq/ops/global_phase_op.py",
"cirq-core/cirq/sim/density_matrix_simulator_test.py",
"examples/bb84.py"
] | [
"# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport dataclasses\nimport datetime\nimport time\n\nimport numpy as np\nimport pytest\n\nimport cirq\nimport cirq.work as cw\nfrom cirq.work.observable_measurement_data import (\n _check_and_get_real_coef,\n _obs_vals_from_measurements,\n _stats_from_measurements,\n)\nfrom cirq.work.observable_settings import _MeasurementSpec\n\n\ndef test_get_real_coef():\n q0 = cirq.LineQubit(0)\n assert _check_and_get_real_coef(cirq.Z(q0) * 2, atol=1e-8) == 2\n assert _check_and_get_real_coef(cirq.Z(q0) * complex(2.0), atol=1e-8) == 2\n with pytest.raises(ValueError):\n _check_and_get_real_coef(cirq.Z(q0) * 2.0j, atol=1e-8)\n\n\ndef test_obs_vals_from_measurements():\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n obs = cirq.Z(a) * cirq.Z(b) * 10\n vals = _obs_vals_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)\n should_be = [10, -10, -10, 10]\n np.testing.assert_equal(vals, should_be)\n\n\ndef test_stats_from_measurements():\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n obs = cirq.Z(a) * cirq.Z(b) * 10\n mean, err = _stats_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)\n\n # The mean is zero since our bitstrings have balanced even- and odd-\n # parity cases.\n assert mean == 0\n\n # Since we multiplied our observable by 10, the standard deviation is\n # 10 [each obs val deviates by 10]. The variance is 10**2 and the\n # squared-standard-error-of-the-mean can be found by dividing by the\n # number of samples minus 1.\n assert err == 10**2 / (4 - 1)\n\n\ndef test_observable_measured_result():\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n omr = cw.ObservableMeasuredResult(\n setting=cw.InitObsSetting(\n init_state=cirq.Z(a) * cirq.Z(b), observable=cirq.Y(a) * cirq.Y(b)\n ),\n mean=0,\n variance=5**2,\n repetitions=4,\n circuit_params={'phi': 52},\n )\n assert omr.stddev == 5\n assert omr.observable == cirq.Y(a) * cirq.Y(b)\n assert omr.init_state == cirq.Z(a) * cirq.Z(b)\n\n cirq.testing.assert_equivalent_repr(omr)\n\n assert omr.as_dict() == {\n 'init_state': cirq.Z(a) * cirq.Z(b),\n 'observable': cirq.Y(a) * cirq.Y(b),\n 'mean': 0,\n 'variance': 25,\n 'repetitions': 4,\n 'param.phi': 52,\n }\n omr2 = dataclasses.replace(\n omr,\n circuit_params={\n 'phi': 52,\n 'observable': 3.14, # this would be a bad but legal parameter name\n 'param.phi': -1,\n },\n )\n assert omr2.as_dict() == {\n 'init_state': cirq.Z(a) * cirq.Z(b),\n 'observable': cirq.Y(a) * cirq.Y(b),\n 'mean': 0,\n 'variance': 25,\n 'repetitions': 4,\n 'param.phi': 52,\n 'param.observable': 3.14,\n 'param.param.phi': -1,\n }\n\n\[email protected]()\ndef example_bsa() -> 'cw.BitstringAccumulator':\n \"\"\"Test fixture to create an (empty) example BitstringAccumulator\"\"\"\n q0, q1 = cirq.LineQubit.range(2)\n setting = cw.InitObsSetting(\n init_state=cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1), observable=cirq.X(q0) * cirq.Y(q1)\n )\n meas_spec = _MeasurementSpec(\n max_setting=setting, circuit_params={'beta': 0.123, 'gamma': 0.456}\n )\n bsa = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[\n setting,\n cw.InitObsSetting(init_state=setting.init_state, observable=cirq.X(q0)),\n cw.InitObsSetting(init_state=setting.init_state, observable=cirq.Y(q1)),\n ],\n qubit_to_index={q0: 0, q1: 1},\n )\n return bsa\n\n\ndef test_bitstring_accumulator(example_bsa):\n # test initialization\n assert example_bsa.bitstrings.shape == (0, 2)\n assert example_bsa.chunksizes.shape == (0,)\n assert example_bsa.timestamps.shape == (0,)\n\n # test consume_results\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)\n example_bsa.consume_results(bitstrings)\n assert example_bsa.bitstrings.shape == (4, 2)\n assert example_bsa.chunksizes.shape == (1,)\n assert example_bsa.timestamps.shape == (1,)\n assert example_bsa.n_repetitions == 4\n\n with pytest.raises(ValueError):\n example_bsa.consume_results(bitstrings.astype(int))\n\n # test results\n results = list(example_bsa.results)\n assert len(results) == 3\n for r in results:\n assert r.repetitions == 4\n\n # test records\n for r in example_bsa.records:\n assert isinstance(r, dict)\n assert 'repetitions' in r\n assert r['repetitions'] == 4\n\n\ndef test_bitstring_accumulator_strings(example_bsa):\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)\n example_bsa.consume_results(bitstrings)\n\n q0, q1 = cirq.LineQubit.range(2)\n settings = cw.observables_to_settings(\n [cirq.X(q0), cirq.Y(q1), cirq.X(q0) * cirq.Y(q1)], qubits=[q0, q1]\n )\n\n strings_should_be = [\n '+Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577',\n '+Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577',\n '+Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577',\n ]\n for setting, ssb in zip(settings, strings_should_be):\n assert example_bsa.summary_string(setting) == ssb, ssb\n\n assert (\n str(example_bsa)\n == \"\"\"Accumulator +Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)); 4 repetitions\n +Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577\n +Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577\n +Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577\"\"\"\n )\n\n\ndef test_bitstring_accumulator_equality():\n et = cirq.testing.EqualsTester()\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)\n chunksizes = np.asarray([4])\n timestamps = np.asarray([datetime.datetime.now()])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n obs = cirq.Z(a) * cirq.Z(b) * 10\n setting = cw.InitObsSetting(init_state=cirq.Z(a) * cirq.Z(b), observable=obs)\n meas_spec = _MeasurementSpec(setting, {})\n\n cirq.testing.assert_equivalent_repr(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings.copy(),\n chunksizes=chunksizes.copy(),\n timestamps=timestamps.copy(),\n )\n )\n\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings.copy(),\n chunksizes=chunksizes.copy(),\n timestamps=timestamps.copy(),\n ),\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings.copy(),\n chunksizes=chunksizes.copy(),\n timestamps=timestamps.copy(),\n ),\n )\n\n time.sleep(1)\n timestamps = np.asarray([datetime.datetime.now()])\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n )\n\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(setting, {'a': 2}),\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n )\n\n bitstrings = bitstrings.copy()\n bitstrings[0] = [1, 1]\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n )\n chunksizes = np.asarray([2, 2])\n timestamps = np.asarray(list(timestamps) * 2)\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n )\n\n\ndef _get_ZZ_Z_Z_bsa_constructor_args():\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)\n chunksizes = np.asarray([4])\n timestamps = np.asarray([datetime.datetime.now()])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n settings = list(\n cw.observables_to_settings(\n [cirq.Z(a) * cirq.Z(b) * 7, cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]\n )\n )\n meas_spec = _MeasurementSpec(settings[0], {})\n return {\n 'meas_spec': meas_spec,\n 'simul_settings': settings,\n 'qubit_to_index': qubit_to_index,\n 'bitstrings': bitstrings,\n 'chunksizes': chunksizes,\n 'timestamps': timestamps,\n }\n\n\ndef test_bitstring_accumulator_stats():\n kwargs = _get_ZZ_Z_Z_bsa_constructor_args()\n settings = kwargs['simul_settings']\n a, b = kwargs['qubit_to_index']\n\n bsa = cw.BitstringAccumulator(**kwargs)\n\n # There are three observables, each with mean 0 because\n # the four 2-bit strings have even numbers of a) ones in the\n # first position b) ones in the second position c) even parity\n # pairs.\n np.testing.assert_allclose([0, 0, 0], bsa.means())\n\n # Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)\n # where xbar and ybar are 0, per above. Each individual observed\n # value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)\n # For off-diagonal elements, there are two +1 and two -1 terms for each entry\n # so the total contribution is zero, and the matrix is diagonal\n should_be = np.array([[4 * 7**2, 0, 0], [0, 4 * 5**2, 0], [0, 0, 4 * 3**2]])\n should_be = should_be / (4 - 1) # covariance formula\n should_be = should_be / 4 # cov of the distribution of sample mean\n np.testing.assert_allclose(should_be, bsa.covariance())\n\n for setting, var in zip(settings, [4 * 7**2, 4 * 5**2, 4 * 3**2]):\n np.testing.assert_allclose(0, bsa.mean(setting))\n np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))\n np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))\n\n bad_obs = [cirq.X(a) * cirq.X(b)]\n bad_setting = list(cw.observables_to_settings(bad_obs, qubits=[a, b]))[0]\n with pytest.raises(ValueError):\n bsa.mean(bad_setting)\n\n\ndef test_bitstring_accumulator_stats_2():\n bitstrings = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], np.uint8)\n chunksizes = np.asarray([4])\n timestamps = np.asarray([datetime.datetime.now()])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n settings = list(cw.observables_to_settings([cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]))\n meas_spec = _MeasurementSpec(settings[0], {})\n\n bsa = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=settings,\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n\n # There are three observables, each with mean 0 because\n # the four 2-bit strings have even numbers of a) ones in the\n # first position b) ones in the second position.\n np.testing.assert_allclose([0, 0], bsa.means())\n\n # Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)\n # where xbar and ybar are 0, per above. Each individual observed\n # value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)\n # In this case, the measurements are perfectly correlated.\n should_be = 4 * np.array([[5 * 5, 5 * 3], [3 * 5, 3 * 3]])\n should_be = should_be / (4 - 1) # covariance formula\n should_be = should_be / 4 # cov of the distribution of sample mean\n np.testing.assert_allclose(should_be, bsa.covariance())\n\n for setting, var in zip(settings, [4 * 5**2, 4 * 3**2]):\n np.testing.assert_allclose(0, bsa.mean(setting))\n np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))\n np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))\n\n\ndef test_bitstring_accumulator_errors():\n q0, q1 = cirq.LineQubit.range(2)\n settings = cw.observables_to_settings(\n [cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]\n )\n grouped_settings = cw.group_settings_greedy(settings)\n max_setting = list(grouped_settings.keys())[0]\n simul_settings = grouped_settings[max_setting]\n\n with pytest.raises(ValueError):\n bsa = cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(max_setting, {}),\n simul_settings=simul_settings,\n qubit_to_index={q0: 0, q1: 1},\n bitstrings=np.array([[0, 1], [0, 1]]),\n chunksizes=np.array([2]),\n )\n\n with pytest.raises(ValueError):\n bsa = cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(max_setting, {}),\n simul_settings=simul_settings,\n qubit_to_index={q0: 0, q1: 1},\n bitstrings=np.array([[0, 1], [0, 1]]),\n chunksizes=np.array([3]),\n timestamps=[datetime.datetime.now()],\n )\n bsa = cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(max_setting, {}),\n simul_settings=simul_settings[:1],\n qubit_to_index={q0: 0, q1: 1},\n )\n with pytest.raises(ValueError):\n bsa.covariance()\n with pytest.raises(ValueError):\n bsa.variance(simul_settings[0])\n with pytest.raises(ValueError):\n bsa.mean(simul_settings[0])\n\n bsa.consume_results(np.array([[0, 0]], dtype=np.uint8))\n assert bsa.covariance().shape == (1, 1)\n\n\ndef test_flatten_grouped_results():\n q0, q1 = cirq.LineQubit.range(2)\n settings = cw.observables_to_settings(\n [cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]\n )\n grouped_settings = cw.group_settings_greedy(settings)\n bsas = []\n for max_setting, simul_settings in grouped_settings.items():\n bsa = cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(max_setting, {}),\n simul_settings=simul_settings,\n qubit_to_index={q0: 0, q1: 1},\n )\n bsa.consume_results(np.array([[0, 0], [0, 0], [0, 0]], dtype=np.uint8))\n bsas.append(bsa)\n\n results = cw.flatten_grouped_results(bsas)\n assert len(results) == 4\n for res in results:\n # We pass all 0's to each consume_results, so everything is 1 +- 0\n assert res.mean == 1\n assert res.variance == 0\n assert res.repetitions == 3\n\n\ndef _get_mock_readout_calibration(qa_0=90, qa_1=10, qb_0=91, qb_1=9):\n # Mock readout correction results by constructing a BitstringAccumulator\n # with two <Z> measurements\n q1_ro = np.array([0] * qa_0 + [1] * qa_1)\n q2_ro = np.array([0] * qb_0 + [1] * qb_1)\n rs = np.random.RandomState(52)\n rs.shuffle(q1_ro)\n rs.shuffle(q2_ro)\n ro_bitstrings = np.vstack((q1_ro, q2_ro)).T\n assert ro_bitstrings.shape == (100, 2)\n chunksizes = np.asarray([100])\n timestamps = np.asarray([datetime.datetime.now()])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n ro_settings = list(cw.observables_to_settings([cirq.Z(a), cirq.Z(b)], qubits=[a, b]))\n (ro_meas_spec_setting,) = list(\n cw.observables_to_settings([cirq.Z(a) * cirq.Z(b)], qubits=[a, b])\n )\n ro_meas_spec = _MeasurementSpec(ro_meas_spec_setting, {})\n\n ro_bsa = cw.BitstringAccumulator(\n meas_spec=ro_meas_spec,\n simul_settings=ro_settings,\n qubit_to_index=qubit_to_index,\n bitstrings=ro_bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n return ro_bsa, ro_settings, ro_meas_spec_setting\n\n\ndef test_readout_correction():\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n ro_bsa, ro_settings, ro_meas_spec_setting = _get_mock_readout_calibration()\n\n # observables range from 1 to -1 while bitstrings range from 0 to 1\n assert ro_bsa.mean(ro_settings[0]) == 0.8\n assert ro_bsa.mean(ro_settings[1]) == 0.82\n assert np.isclose(ro_bsa.mean(ro_meas_spec_setting), 0.8 * 0.82, atol=0.05)\n\n bitstrings = np.array(\n [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 1], [1, 1]], dtype=np.uint8\n )\n chunksizes = np.asarray([len(bitstrings)])\n timestamps = np.asarray([datetime.datetime.now()])\n qubit_to_index = {a: 0, b: 1}\n settings = list(\n cw.observables_to_settings([cirq.X(a) * cirq.Y(b), cirq.X(a), cirq.Y(b)], qubits=[a, b])\n )\n meas_spec = _MeasurementSpec(settings[0], {})\n\n # First, make one with no readout correction\n bsa1 = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=settings,\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n\n # [XY: one excitation, X: one excitation, Y: two excitations]\n np.testing.assert_allclose([1 - 1 / 4, 1 - 1 / 4, 1 - 2 / 4], bsa1.means())\n np.testing.assert_allclose([0.75, 0.75, 0.5], bsa1.means())\n\n # Turn on readout correction\n bsa2 = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=settings,\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n readout_calibration=ro_bsa,\n )\n\n # Readout correction increases variance\n for setting in settings:\n assert bsa2.variance(setting) > bsa1.variance(setting)\n\n np.testing.assert_allclose(\n [0.75 / (0.8 * 0.82), 0.75 / 0.8, 0.5 / 0.82], bsa2.means(), atol=0.01\n )\n\n # Variance becomes singular when readout error is 50/50\n ro_bsa_50_50, _, _ = _get_mock_readout_calibration(qa_0=50, qa_1=50)\n bsa3 = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=settings,\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n readout_calibration=ro_bsa_50_50,\n )\n with pytest.raises(ZeroDivisionError):\n bsa3.means()\n\n assert bsa3.variance(settings[1]) == np.inf\n\n\ndef test_readout_correction_errors():\n kwargs = _get_ZZ_Z_Z_bsa_constructor_args()\n settings = kwargs['simul_settings']\n ro_bsa, _, _ = _get_mock_readout_calibration()\n kwargs['readout_calibration'] = ro_bsa\n bsa = cw.BitstringAccumulator(**kwargs)\n\n # Variance becomes singular as the estimated value approaches zero\n np.testing.assert_allclose(bsa.means(), [0, 0, 0])\n assert bsa.variance(settings[0]) == np.inf\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport collections\nfrom typing import Any, Iterable, cast, DefaultDict, TYPE_CHECKING\nfrom numpy import sqrt\nfrom cirq import devices, ops, circuits, value\nfrom cirq.devices.grid_qubit import GridQubit\nfrom cirq.ops import raw_types\nfrom cirq.value import Duration\nfrom cirq.neutral_atoms.neutral_atom_gateset import NeutralAtomGateset\n\nif TYPE_CHECKING:\n import cirq\n\n\ndef _subgate_if_parallel_gate(gate: 'cirq.Gate') -> 'cirq.Gate':\n \"\"\"Returns gate.sub_gate if gate is a ParallelGate, else returns gate\"\"\"\n return gate.sub_gate if isinstance(gate, ops.ParallelGate) else gate\n\n\[email protected]_equality\nclass NeutralAtomDevice(devices.Device):\n \"\"\"A device with qubits placed on a grid.\"\"\"\n\n def __init__(\n self,\n measurement_duration: 'cirq.DURATION_LIKE',\n gate_duration: 'cirq.DURATION_LIKE',\n control_radius: float,\n max_parallel_z: int,\n max_parallel_xy: int,\n max_parallel_c: int,\n qubits: Iterable[GridQubit],\n ) -> None:\n \"\"\"Initializes the description of the AQuA device.\n\n Args:\n measurement_duration: the maximum duration of a measurement.\n gate_duration: the maximum duration of a gate\n control_radius: the maximum distance between qubits for a controlled\n gate. Distance is measured in units of the indices passed into\n the GridQubit constructor.\n max_parallel_z: The maximum number of qubits that can be acted on\n in parallel by a Z gate\n max_parallel_xy: The maximum number of qubits that can be acted on\n in parallel by a local XY gate\n max_parallel_c: the maximum number of qubits that can be acted on in\n parallel by a controlled gate. Must be less than or equal to the\n lesser of max_parallel_z and max_parallel_xy\n qubits: Qubits on the device, identified by their x, y location.\n Must be of type GridQubit\n\n Raises:\n ValueError: if the wrong qubit type is provided or if invalid\n parallel parameters are provided\n \"\"\"\n self._measurement_duration = Duration(measurement_duration)\n self._gate_duration = Duration(gate_duration)\n self._control_radius = control_radius\n self._max_parallel_z = max_parallel_z\n self._max_parallel_xy = max_parallel_xy\n if max_parallel_c > min(max_parallel_z, max_parallel_xy):\n raise ValueError(\n \"max_parallel_c must be less than or equal to the\"\n \"min of max_parallel_z and max_parallel_xy\"\n )\n self._max_parallel_c = max_parallel_c\n self.xy_gateset_all_allowed = ops.Gateset(\n ops.ParallelGateFamily(ops.XPowGate),\n ops.ParallelGateFamily(ops.YPowGate),\n ops.ParallelGateFamily(ops.PhasedXPowGate),\n unroll_circuit_op=False,\n )\n self.controlled_gateset = ops.Gateset(\n ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CZPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),\n unroll_circuit_op=False,\n )\n self.gateset = NeutralAtomGateset(max_parallel_z, max_parallel_xy)\n for q in qubits:\n if not isinstance(q, GridQubit):\n raise ValueError(f'Unsupported qubit type: {q!r}')\n self.qubits = frozenset(qubits)\n\n self._metadata = devices.GridDeviceMetadata(\n [(a, b) for a in self.qubits for b in self.qubits if a.is_adjacent(b)], self.gateset\n )\n\n @property\n def metadata(self) -> devices.GridDeviceMetadata:\n return self._metadata\n\n def qubit_list(self):\n return [qubit for qubit in self.qubits]\n\n def duration_of(self, operation: ops.Operation):\n \"\"\"Provides the duration of the given operation on this device.\n\n Args:\n operation: the operation to get the duration of\n\n Returns:\n The duration of the given operation on this device\n\n Raises:\n ValueError: If the operation provided doesn't correspond to a native\n gate\n \"\"\"\n self.validate_operation(operation)\n if isinstance(operation, ops.GateOperation):\n if isinstance(operation.gate, ops.MeasurementGate):\n return self._measurement_duration\n return self._gate_duration\n\n def validate_gate(self, gate: ops.Gate):\n \"\"\"Raises an error if the provided gate isn't part of the native gate set.\n\n Args:\n gate: the gate to validate\n\n Raises:\n ValueError: If the given gate is not part of the native gate set.\n \"\"\"\n if gate not in self.gateset:\n if isinstance(gate, (ops.CNotPowGate, ops.CZPowGate, ops.CCXPowGate, ops.CCZPowGate)):\n raise ValueError('controlled gates must have integer exponents')\n raise ValueError(f'Unsupported gate: {gate!r}')\n\n def validate_operation(self, operation: ops.Operation):\n \"\"\"Raises an error if the given operation is invalid on this device.\n\n Args:\n operation: the operation to validate\n\n Raises:\n ValueError: If the operation is not valid\n \"\"\"\n if not isinstance(operation, ops.GateOperation):\n raise ValueError(f'Unsupported operation: {operation!r}')\n\n # All qubits the operation acts on must be on the device\n for q in operation.qubits:\n if q not in self.qubits:\n raise ValueError(f'Qubit not on device: {q!r}')\n\n if operation not in self.gateset and not (\n operation in self.xy_gateset_all_allowed and len(operation.qubits) == len(self.qubits)\n ):\n raise ValueError(f'Unsupported operation: {operation!r}')\n\n if operation in self.controlled_gateset:\n if len(operation.qubits) > self._max_parallel_c:\n raise ValueError(\n 'Too many qubits acted on in parallel by a controlled gate operation'\n )\n for p in operation.qubits:\n for q in operation.qubits:\n if self.distance(p, q) > self._control_radius:\n raise ValueError(f\"Qubits {p!r}, {q!r} are too far away\")\n\n def validate_moment(self, moment: circuits.Moment):\n \"\"\"Raises an error if the given moment is invalid on this device.\n\n Args:\n moment: The moment to validate\n\n Raises:\n ValueError: If the given moment is invalid\n \"\"\"\n super().validate_moment(moment)\n\n CATEGORIES = {\n 'Z': (ops.ZPowGate,),\n 'XY': (ops.XPowGate, ops.YPowGate, ops.PhasedXPowGate),\n 'controlled': (ops.CNotPowGate, ops.CZPowGate, ops.CCXPowGate, ops.CCZPowGate),\n 'measure': (ops.MeasurementGate,),\n }\n\n categorized_ops: DefaultDict = collections.defaultdict(list)\n for op in moment.operations:\n assert isinstance(op, ops.GateOperation)\n for k, v in CATEGORIES.items():\n assert isinstance(v, tuple)\n gate = _subgate_if_parallel_gate(op.gate)\n if isinstance(gate, v):\n categorized_ops[k].append(op)\n\n for k in ['Z', 'XY', 'controlled']:\n if len(set(_subgate_if_parallel_gate(op.gate) for op in categorized_ops[k])) > 1:\n raise ValueError(f\"Non-identical simultaneous {k} gates\")\n\n num_parallel_xy = sum([len(op.qubits) for op in categorized_ops['XY']])\n num_parallel_z = sum([len(op.qubits) for op in categorized_ops['Z']])\n has_measurement = len(categorized_ops['measure']) > 0\n controlled_qubits_lists = [op.qubits for op in categorized_ops['controlled']]\n\n if sum([len(l) for l in controlled_qubits_lists]) > self._max_parallel_c:\n raise ValueError(\"Too many qubits acted on by controlled gates\")\n if controlled_qubits_lists and (num_parallel_xy or num_parallel_z):\n raise ValueError(\n \"Can't perform non-controlled operations at same time as controlled operations\"\n )\n if self._are_qubit_lists_too_close(*controlled_qubits_lists):\n raise ValueError(\"Interacting controlled operations\")\n\n if num_parallel_z > self._max_parallel_z:\n raise ValueError(\"Too many simultaneous Z gates\")\n\n if num_parallel_xy > self._max_parallel_xy and num_parallel_xy != len(self.qubits):\n raise ValueError(\"Bad number of simultaneous XY gates\")\n\n if has_measurement:\n if controlled_qubits_lists or num_parallel_z or num_parallel_xy:\n raise ValueError(\"Measurements can't be simultaneous with other operations\")\n\n def _are_qubit_lists_too_close(self, *qubit_lists: Iterable[raw_types.Qid]) -> bool:\n if len(qubit_lists) < 2:\n return False\n if len(qubit_lists) == 2:\n a, b = qubit_lists\n return any(self.distance(p, q) <= self._control_radius for p in a for q in b)\n return any(\n self._are_qubit_lists_too_close(a, b) for a, b in itertools.combinations(qubit_lists, 2)\n )\n\n def validate_circuit(self, circuit: circuits.AbstractCircuit):\n \"\"\"Raises an error if the given circuit is invalid on this device.\n\n A circuit is invalid if any of its moments are invalid or if there is a\n non-empty moment after a moment with a measurement.\n\n Args:\n circuit: The circuit to validate\n\n Raises:\n ValueError: If the given circuit can't be run on this device\n \"\"\"\n super().validate_circuit(circuit)\n\n # Measurements must be in the last non-empty moment\n has_measurement_occurred = False\n for moment in circuit:\n if has_measurement_occurred:\n if len(moment.operations) > 0:\n raise ValueError(\"Non-empty moment after measurement\")\n for operation in moment.operations:\n if isinstance(operation.gate, ops.MeasurementGate):\n has_measurement_occurred = True\n\n def _value_equality_values_(self) -> Any:\n return (\n self._measurement_duration,\n self._gate_duration,\n self._max_parallel_z,\n self._max_parallel_xy,\n self._max_parallel_c,\n self._control_radius,\n self.qubits,\n )\n\n def __repr__(self) -> str:\n return (\n 'cirq.NeutralAtomDevice('\n f'measurement_duration={self._measurement_duration!r}, '\n f'gate_duration={self._gate_duration!r}, '\n f'max_parallel_z={self._max_parallel_z!r}, '\n f'max_parallel_xy={self._max_parallel_xy!r}, '\n f'max_parallel_c={self._max_parallel_c!r}, '\n f'control_radius={self._control_radius!r}, '\n f'qubits={sorted(self.qubits)!r})'\n )\n\n def neighbors_of(self, qubit: 'cirq.GridQubit') -> Iterable['cirq.GridQubit']:\n \"\"\"Returns the qubits that the given qubit can interact with.\"\"\"\n possibles = [\n GridQubit(qubit.row + 1, qubit.col),\n GridQubit(qubit.row - 1, qubit.col),\n GridQubit(qubit.row, qubit.col + 1),\n GridQubit(qubit.row, qubit.col - 1),\n ]\n return [e for e in possibles if e in self.qubits]\n\n def distance(self, p: 'cirq.Qid', q: 'cirq.Qid') -> float:\n p = cast(GridQubit, p)\n q = cast(GridQubit, q)\n return sqrt((p.row - q.row) ** 2 + (p.col - q.col) ** 2)\n\n def __str__(self) -> str:\n diagram = circuits.TextDiagramDrawer()\n\n for q in self.qubits:\n diagram.write(q.col, q.row, str(q))\n for q2 in self.neighbors_of(q):\n diagram.grid_line(q.col, q.row, q2.col, q2.row)\n\n return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True)\n\n def _repr_pretty_(self, p: Any, cycle: bool):\n \"\"\"iPython (Jupyter) pretty print.\"\"\"\n p.text(\"cirq.NeutralAtomDevice(...)\" if cycle else self.__str__())\n",
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A no-qubit global phase operation.\"\"\"\nfrom typing import AbstractSet, Any, Dict, Sequence, Tuple, TYPE_CHECKING, Union\n\nimport numpy as np\nimport sympy\n\nfrom cirq import value, protocols\nfrom cirq._compat import deprecated_class\nfrom cirq.type_workarounds import NotImplementedType\nfrom cirq.ops import gate_operation, raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality(approximate=True)\n@deprecated_class(deadline='v0.16', fix='Use cirq.global_phase_operation')\nclass GlobalPhaseOperation(gate_operation.GateOperation):\n def __init__(self, coefficient: value.Scalar, atol: float = 1e-8) -> None:\n gate = GlobalPhaseGate(coefficient, atol)\n super().__init__(gate, [])\n\n def with_qubits(self, *new_qubits) -> 'GlobalPhaseOperation':\n if new_qubits:\n raise ValueError(f'{self!r} applies to 0 qubits but new_qubits={new_qubits!r}.')\n return self\n\n @property\n def coefficient(self) -> value.Scalar:\n return self.gate.coefficient # type: ignore\n\n @coefficient.setter\n def coefficient(self, coefficient: value.Scalar):\n # coverage: ignore\n self.gate._coefficient = coefficient # type: ignore\n\n def __str__(self) -> str:\n return str(self.coefficient)\n\n def __repr__(self) -> str:\n return f'cirq.GlobalPhaseOperation({self.coefficient!r})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['coefficient'])\n\n\[email protected]_equality(approximate=True)\nclass GlobalPhaseGate(raw_types.Gate):\n def __init__(self, coefficient: 'cirq.TParamValComplex', atol: float = 1e-8) -> None:\n if not isinstance(coefficient, sympy.Basic) and abs(1 - abs(coefficient)) > atol:\n raise ValueError(f'Coefficient is not unitary: {coefficient!r}')\n self._coefficient = coefficient\n\n @property\n def coefficient(self) -> 'cirq.TParamValComplex':\n return self._coefficient\n\n def _value_equality_values_(self) -> Any:\n return self.coefficient\n\n def _has_unitary_(self) -> bool:\n return not self._is_parameterized_()\n\n def __pow__(self, power) -> 'cirq.GlobalPhaseGate':\n if isinstance(power, (int, float)):\n return GlobalPhaseGate(self.coefficient**power)\n return NotImplemented\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n if not self._has_unitary_():\n return NotImplemented\n return np.array([[self.coefficient]])\n\n def _apply_unitary_(\n self, args: 'cirq.ApplyUnitaryArgs'\n ) -> Union[np.ndarray, NotImplementedType]:\n if not self._has_unitary_():\n return NotImplemented\n args.target_tensor *= self.coefficient\n return args.target_tensor\n\n def _has_stabilizer_effect_(self) -> bool:\n return True\n\n def __str__(self) -> str:\n return str(self.coefficient)\n\n def __repr__(self) -> str:\n return f'cirq.GlobalPhaseGate({self.coefficient!r})'\n\n def _op_repr_(self, qubits: Sequence['cirq.Qid']) -> str:\n return f'cirq.global_phase_operation({self.coefficient!r})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['coefficient'])\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return tuple()\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.coefficient)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self.coefficient)\n\n def _resolve_parameters_(\n self, resolver: 'cirq.ParamResolver', recursive: bool\n ) -> 'cirq.GlobalPhaseGate':\n coefficient = protocols.resolve_parameters(self.coefficient, resolver, recursive)\n return GlobalPhaseGate(coefficient=coefficient)\n\n\ndef global_phase_operation(\n coefficient: 'cirq.TParamValComplex', atol: float = 1e-8\n) -> 'cirq.GateOperation':\n \"\"\"Creates an operation that represents a global phase on the state.\"\"\"\n return GlobalPhaseGate(coefficient, atol)()\n",
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\nimport random\nfrom typing import Type\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nimport sympy\n\nimport cirq\nimport cirq.testing\n\n\nclass _TestMixture(cirq.Gate):\n def __init__(self, gate_options):\n self.gate_options = gate_options\n\n def _qid_shape_(self):\n return cirq.qid_shape(self.gate_options[0], ())\n\n def _mixture_(self):\n return [(1 / len(self.gate_options), cirq.unitary(g)) for g in self.gate_options]\n\n\nclass _TestDecomposingChannel(cirq.Gate):\n def __init__(self, channels):\n self.channels = channels\n\n def _qid_shape_(self):\n return tuple(d for chan in self.channels for d in cirq.qid_shape(chan))\n\n def _decompose_(self, qubits):\n return [chan.on(q) for chan, q in zip(self.channels, qubits)]\n\n\ndef test_invalid_dtype():\n with pytest.raises(ValueError, match='complex'):\n cirq.DensityMatrixSimulator(dtype=np.int32)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_no_measurements(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n\n circuit = cirq.Circuit(cirq.X(q0), cirq.X(q1))\n with pytest.raises(ValueError, match=\"no measurements\"):\n simulator.run(circuit)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_no_results(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n\n circuit = cirq.Circuit(cirq.X(q0), cirq.X(q1))\n with pytest.raises(ValueError, match=\"no measurements\"):\n simulator.run(circuit)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_empty_circuit(dtype: Type[np.number], split: bool):\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n with pytest.raises(ValueError, match=\"no measurements\"):\n simulator.run(cirq.Circuit())\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_bit_flips(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0), (cirq.X**b1)(q1), cirq.measure(q0), cirq.measure(q1)\n )\n result = simulator.run(circuit)\n np.testing.assert_equal(result.measurements, {'q(0)': [[b0]], 'q(1)': [[b1]]})\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_bit_flips_with_dephasing(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0), (cirq.X**b1)(q1), cirq.measure(q0), cirq.measure(q1)\n )\n result = simulator.run(circuit)\n np.testing.assert_equal(result.measurements, {'q(0)': [[b0]], 'q(1)': [[b1]]})\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_qudit_increments(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((3, 4))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1, 2]:\n for b1 in [0, 1, 2, 3]:\n circuit = cirq.Circuit(\n [cirq.XPowGate(dimension=3)(q0)] * b0,\n [cirq.XPowGate(dimension=4)(q1)] * b1,\n cirq.measure(q0),\n cirq.measure(q1),\n )\n result = simulator.run(circuit)\n np.testing.assert_equal(\n result.measurements, {'q(0) (d=3)': [[b0]], 'q(1) (d=4)': [[b1]]}\n )\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_not_channel_op(dtype: Type[np.number], split: bool):\n class BadOp(cirq.Operation):\n def __init__(self, qubits):\n self._qubits = qubits\n\n @property\n def qubits(self):\n return self._qubits\n\n def with_qubits(self, *new_qubits):\n # coverage: ignore\n return BadOp(self._qubits)\n\n q0 = cirq.LineQubit(0)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n circuit = cirq.Circuit([BadOp([q0])])\n with pytest.raises(TypeError):\n simulator.simulate(circuit)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_mixture(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n circuit = cirq.Circuit(cirq.bit_flip(0.5)(q0), cirq.measure(q0), cirq.measure(q1))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n result = simulator.run(circuit, repetitions=100)\n np.testing.assert_equal(result.measurements['q(1)'], [[0]] * 100)\n # Test that we get at least one of each result. Probability of this test\n # failing is 2 ** (-99).\n q0_measurements = set(x[0] for x in result.measurements['q(0)'].tolist())\n assert q0_measurements == {0, 1}\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_qudit_mixture(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((3, 2))\n mixture = _TestMixture(\n [\n cirq.XPowGate(dimension=3) ** 0,\n cirq.XPowGate(dimension=3),\n cirq.XPowGate(dimension=3) ** 2,\n ]\n )\n circuit = cirq.Circuit(mixture(q0), cirq.measure(q0), cirq.measure(q1))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n result = simulator.run(circuit, repetitions=100)\n np.testing.assert_equal(result.measurements['q(1) (d=2)'], [[0]] * 100)\n # Test that we get at least one of each result. Probability of this test\n # failing is about 3 * (2/3) ** 100.\n q0_measurements = set(x[0] for x in result.measurements['q(0) (d=3)'].tolist())\n assert q0_measurements == {0, 1, 2}\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_channel(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n circuit = cirq.Circuit(\n cirq.X(q0), cirq.amplitude_damp(0.5)(q0), cirq.measure(q0), cirq.measure(q1)\n )\n\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n result = simulator.run(circuit, repetitions=100)\n np.testing.assert_equal(result.measurements['q(1)'], [[0]] * 100)\n # Test that we get at least one of each result. Probability of this test\n # failing is 2 ** (-99).\n q0_measurements = set(x[0] for x in result.measurements['q(0)'].tolist())\n assert q0_measurements == {0, 1}\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_decomposable_channel(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n\n circuit = cirq.Circuit(\n cirq.X(q0),\n _TestDecomposingChannel([cirq.amplitude_damp(0.5), cirq.amplitude_damp(0)]).on(q0, q1),\n cirq.measure(q0),\n cirq.measure(q1),\n )\n\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n result = simulator.run(circuit, repetitions=100)\n np.testing.assert_equal(result.measurements['q(1)'], [[0]] * 100)\n # Test that we get at least one of each result. Probability of this test\n # failing is 2 ** (-99).\n q0_measurements = set(x[0] for x in result.measurements['q(0)'].tolist())\n assert q0_measurements == {0, 1}\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_qudit_channel(dtype: Type[np.number], split: bool):\n class TestChannel(cirq.Gate):\n def _qid_shape_(self):\n return (3,)\n\n def _kraus_(self):\n return [\n np.array([[1, 0, 0], [0, 0.5**0.5, 0], [0, 0, 0.5**0.5]]),\n np.array([[0, 0.5**0.5, 0], [0, 0, 0], [0, 0, 0]]),\n np.array([[0, 0, 0], [0, 0, 0.5**0.5], [0, 0, 0]]),\n ]\n\n q0, q1 = cirq.LineQid.for_qid_shape((3, 4))\n circuit = cirq.Circuit(\n cirq.XPowGate(dimension=3)(q0) ** 2,\n TestChannel()(q0),\n TestChannel()(q0),\n cirq.measure(q0),\n cirq.measure(q1),\n )\n\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n result = simulator.run(circuit, repetitions=100)\n np.testing.assert_equal(result.measurements['q(1) (d=4)'], [[0]] * 100)\n # Test that we get at least one of each result. Probability of this test\n # failing is about (3/4) ** 100.\n q0_measurements = set(x[0] for x in result.measurements['q(0) (d=3)'].tolist())\n assert q0_measurements == {0, 1, 2}\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_measure_at_end_no_repetitions(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n with mock.patch.object(simulator, '_core_iterator', wraps=simulator._core_iterator) as mock_sim:\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0), (cirq.X**b1)(q1), cirq.measure(q0), cirq.measure(q1)\n )\n result = simulator.run(circuit, repetitions=0)\n np.testing.assert_equal(\n result.measurements, {'q(0)': np.empty([0, 1]), 'q(1)': np.empty([0, 1])}\n )\n assert result.repetitions == 0\n assert mock_sim.call_count == 0\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_repetitions_measure_at_end(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n with mock.patch.object(simulator, '_core_iterator', wraps=simulator._core_iterator) as mock_sim:\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0), (cirq.X**b1)(q1), cirq.measure(q0), cirq.measure(q1)\n )\n result = simulator.run(circuit, repetitions=3)\n np.testing.assert_equal(\n result.measurements, {'q(0)': [[b0]] * 3, 'q(1)': [[b1]] * 3}\n )\n assert result.repetitions == 3\n assert mock_sim.call_count == 8\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_qudits_repetitions_measure_at_end(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((2, 3))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n with mock.patch.object(simulator, '_core_iterator', wraps=simulator._core_iterator) as mock_sim:\n for b0 in [0, 1]:\n for b1 in [0, 1, 2]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0),\n cirq.XPowGate(dimension=3)(q1) ** b1,\n cirq.measure(q0),\n cirq.measure(q1),\n )\n result = simulator.run(circuit, repetitions=3)\n np.testing.assert_equal(\n result.measurements, {'q(0) (d=2)': [[b0]] * 3, 'q(1) (d=3)': [[b1]] * 3}\n )\n assert result.repetitions == 3\n assert mock_sim.call_count == 12\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_measurement_not_terminal_no_repetitions(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n with mock.patch.object(simulator, '_core_iterator', wraps=simulator._core_iterator) as mock_sim:\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0),\n (cirq.X**b1)(q1),\n cirq.measure(q0),\n cirq.measure(q1),\n cirq.H(q0),\n cirq.H(q1),\n )\n result = simulator.run(circuit, repetitions=0)\n np.testing.assert_equal(\n result.measurements, {'q(0)': np.empty([0, 1]), 'q(1)': np.empty([0, 1])}\n )\n assert result.repetitions == 0\n assert mock_sim.call_count == 0\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_repetitions_measurement_not_terminal(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n with mock.patch.object(simulator, '_core_iterator', wraps=simulator._core_iterator) as mock_sim:\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0),\n (cirq.X**b1)(q1),\n cirq.measure(q0),\n cirq.measure(q1),\n cirq.H(q0),\n cirq.H(q1),\n )\n result = simulator.run(circuit, repetitions=3)\n np.testing.assert_equal(\n result.measurements, {'q(0)': [[b0]] * 3, 'q(1)': [[b1]] * 3}\n )\n assert result.repetitions == 3\n assert mock_sim.call_count == 16\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_qudits_repetitions_measurement_not_terminal(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((2, 3))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n with mock.patch.object(simulator, '_core_iterator', wraps=simulator._core_iterator) as mock_sim:\n for b0 in [0, 1]:\n for b1 in [0, 1, 2]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0),\n cirq.XPowGate(dimension=3)(q1) ** b1,\n cirq.measure(q0),\n cirq.measure(q1),\n cirq.H(q0),\n cirq.XPowGate(dimension=3)(q1) ** (-b1),\n )\n result = simulator.run(circuit, repetitions=3)\n np.testing.assert_equal(\n result.measurements, {'q(0) (d=2)': [[b0]] * 3, 'q(1) (d=3)': [[b1]] * 3}\n )\n assert result.repetitions == 3\n assert mock_sim.call_count == 24\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_param_resolver(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X ** sympy.Symbol('b0'))(q0),\n (cirq.X ** sympy.Symbol('b1'))(q1),\n cirq.measure(q0),\n cirq.measure(q1),\n )\n param_resolver = {'b0': b0, 'b1': b1}\n result = simulator.run(circuit, param_resolver=param_resolver) # type: ignore\n np.testing.assert_equal(result.measurements, {'q(0)': [[b0]], 'q(1)': [[b1]]})\n # pylint: disable=line-too-long\n np.testing.assert_equal(result.params, cirq.ParamResolver(param_resolver)) # type: ignore\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_correlations(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1), cirq.measure(q0, q1))\n for _ in range(10):\n result = simulator.run(circuit)\n bits = result.measurements['q(0),q(1)'][0]\n assert bits[0] == bits[1]\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_measure_multiple_qubits(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit((cirq.X**b0)(q0), (cirq.X**b1)(q1), cirq.measure(q0, q1))\n result = simulator.run(circuit, repetitions=3)\n np.testing.assert_equal(result.measurements, {'q(0),q(1)': [[b0, b1]] * 3})\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_measure_multiple_qudits(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((2, 3))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1, 2]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0), cirq.XPowGate(dimension=3)(q1) ** b1, cirq.measure(q0, q1)\n )\n result = simulator.run(circuit, repetitions=3)\n np.testing.assert_equal(result.measurements, {'q(0) (d=2),q(1) (d=3)': [[b0, b1]] * 3})\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_run_sweeps_param_resolvers(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X ** sympy.Symbol('b0'))(q0),\n (cirq.X ** sympy.Symbol('b1'))(q1),\n cirq.measure(q0),\n cirq.measure(q1),\n )\n params = [\n cirq.ParamResolver({'b0': b0, 'b1': b1}),\n cirq.ParamResolver({'b0': b1, 'b1': b0}),\n ]\n results = simulator.run_sweep(circuit, params=params)\n\n assert len(results) == 2\n np.testing.assert_equal(results[0].measurements, {'q(0)': [[b0]], 'q(1)': [[b1]]})\n np.testing.assert_equal(results[1].measurements, {'q(0)': [[b1]], 'q(1)': [[b0]]})\n assert results[0].params == params[0]\n assert results[1].params == params[1]\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_no_circuit(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n circuit = cirq.Circuit()\n result = simulator.simulate(circuit, qubit_order=[q0, q1])\n expected = np.zeros((4, 4))\n expected[0, 0] = 1.0\n np.testing.assert_almost_equal(result.final_density_matrix, expected)\n assert len(result.measurements) == 0\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n circuit = cirq.Circuit(cirq.H(q0), cirq.H(q1))\n result = simulator.simulate(circuit, qubit_order=[q0, q1])\n np.testing.assert_almost_equal(result.final_density_matrix, np.ones((4, 4)) * 0.25)\n assert len(result.measurements) == 0\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_qudits(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((2, 3))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n circuit = cirq.Circuit(cirq.H(q0), cirq.XPowGate(dimension=3)(q1) ** 2)\n result = simulator.simulate(circuit, qubit_order=[q1, q0])\n expected = np.zeros((6, 6))\n expected[4:, 4:] = np.ones((2, 2)) / 2\n np.testing.assert_almost_equal(result.final_density_matrix, expected)\n assert len(result.measurements) == 0\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_reset_one_qubit_does_not_affect_partial_trace_of_other_qubits(\n dtype: Type[np.number], split: bool\n):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n circuit = cirq.Circuit(cirq.H(q0), cirq.CX(q0, q1), cirq.reset(q0))\n result = simulator.simulate(circuit)\n expected = np.zeros((4, 4), dtype=dtype)\n expected[0, 0] = 0.5\n expected[1, 1] = 0.5\n np.testing.assert_almost_equal(result.final_density_matrix, expected)\n\n\[email protected](\n 'dtype,circuit',\n itertools.product(\n [np.complex64, np.complex128],\n [cirq.testing.random_circuit(cirq.LineQubit.range(4), 5, 0.9) for _ in range(20)],\n ),\n)\ndef test_simulate_compare_to_state_vector_simulator(dtype: Type[np.number], circuit):\n qubits = cirq.LineQubit.range(4)\n pure_result = (\n cirq.Simulator(dtype=dtype).simulate(circuit, qubit_order=qubits).density_matrix_of()\n )\n mixed_result = (\n cirq.DensityMatrixSimulator(dtype=dtype)\n .simulate(circuit, qubit_order=qubits)\n .final_density_matrix\n )\n assert mixed_result.shape == (16, 16)\n np.testing.assert_almost_equal(mixed_result, pure_result, decimal=6)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_bit_flips(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0), (cirq.X**b1)(q1), cirq.measure(q0), cirq.measure(q1)\n )\n result = simulator.simulate(circuit)\n np.testing.assert_equal(result.measurements, {'q(0)': [b0], 'q(1)': [b1]})\n expected_density_matrix = np.zeros(shape=(4, 4))\n expected_density_matrix[b0 * 2 + b1, b0 * 2 + b1] = 1.0\n np.testing.assert_equal(result.final_density_matrix, expected_density_matrix)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_qudit_increments(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((2, 3))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1, 2]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0),\n (cirq.XPowGate(dimension=3)(q1),) * b1,\n cirq.measure(q0),\n cirq.measure(q1),\n )\n result = simulator.simulate(circuit)\n np.testing.assert_equal(result.measurements, {'q(0) (d=2)': [b0], 'q(1) (d=3)': [b1]})\n expected_density_matrix = np.zeros(shape=(6, 6))\n expected_density_matrix[b0 * 3 + b1, b0 * 3 + b1] = 1.0\n np.testing.assert_allclose(result.final_density_matrix, expected_density_matrix)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_initial_state(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit((cirq.X**b0)(q0), (cirq.X**b1)(q1))\n result = simulator.simulate(circuit, initial_state=1)\n expected_density_matrix = np.zeros(shape=(4, 4))\n expected_density_matrix[b0 * 2 + 1 - b1, b0 * 2 + 1 - b1] = 1.0\n np.testing.assert_equal(result.final_density_matrix, expected_density_matrix)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulation_state(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit((cirq.X**b0)(q0), (cirq.X**b1)(q1))\n args = simulator._create_simulation_state(initial_state=1, qubits=(q0, q1))\n result = simulator.simulate(circuit, initial_state=args)\n expected_density_matrix = np.zeros(shape=(4, 4))\n expected_density_matrix[b0 * 2 + 1 - b1, b0 * 2 + 1 - b1] = 1.0\n np.testing.assert_equal(result.final_density_matrix, expected_density_matrix)\n\n\ndef test_simulate_tps_initial_state():\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator()\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit((cirq.X**b0)(q0), (cirq.X**b1)(q1))\n result = simulator.simulate(circuit, initial_state=cirq.KET_ZERO(q0) * cirq.KET_ONE(q1))\n expected_density_matrix = np.zeros(shape=(4, 4))\n expected_density_matrix[b0 * 2 + 1 - b1, b0 * 2 + 1 - b1] = 1.0\n np.testing.assert_equal(result.final_density_matrix, expected_density_matrix)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_initial_qudit_state(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((3, 4))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1, 2]:\n for b1 in [0, 1, 2, 3]:\n circuit = cirq.Circuit(\n cirq.XPowGate(dimension=3)(q0) ** b0, cirq.XPowGate(dimension=4)(q1) ** b1\n )\n result = simulator.simulate(circuit, initial_state=6)\n expected_density_matrix = np.zeros(shape=(12, 12))\n expected_density_matrix[\n (b0 + 1) % 3 * 4 + (b1 + 2) % 4, (b0 + 1) % 3 * 4 + (b1 + 2) % 4\n ] = 1.0\n np.testing.assert_allclose(\n result.final_density_matrix, expected_density_matrix, atol=1e-15\n )\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_qubit_order(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit((cirq.X**b0)(q0), (cirq.X**b1)(q1))\n result = simulator.simulate(circuit, qubit_order=[q1, q0])\n expected_density_matrix = np.zeros(shape=(4, 4))\n expected_density_matrix[2 * b1 + b0, 2 * b1 + b0] = 1.0\n np.testing.assert_equal(result.final_density_matrix, expected_density_matrix)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_param_resolver(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X ** sympy.Symbol('b0'))(q0), (cirq.X ** sympy.Symbol('b1'))(q1)\n )\n resolver = cirq.ParamResolver({'b0': b0, 'b1': b1})\n result = simulator.simulate(circuit, param_resolver=resolver)\n expected_density_matrix = np.zeros(shape=(4, 4))\n expected_density_matrix[2 * b0 + b1, 2 * b0 + b1] = 1.0\n np.testing.assert_equal(result.final_density_matrix, expected_density_matrix)\n assert result.params == resolver\n assert len(result.measurements) == 0\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_measure_multiple_qubits(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit((cirq.X**b0)(q0), (cirq.X**b1)(q1), cirq.measure(q0, q1))\n result = simulator.simulate(circuit)\n np.testing.assert_equal(result.measurements, {'q(0),q(1)': [b0, b1]})\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_measure_multiple_qudits(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((2, 3))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1, 2]:\n circuit = cirq.Circuit(\n (cirq.X**b0)(q0), cirq.XPowGate(dimension=3)(q1) ** b1, cirq.measure(q0, q1)\n )\n result = simulator.simulate(circuit)\n np.testing.assert_equal(result.measurements, {'q(0) (d=2),q(1) (d=3)': [b0, b1]})\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_sweeps_param_resolver(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for b0 in [0, 1]:\n for b1 in [0, 1]:\n circuit = cirq.Circuit(\n (cirq.X ** sympy.Symbol('b0'))(q0), (cirq.X ** sympy.Symbol('b1'))(q1)\n )\n params = [\n cirq.ParamResolver({'b0': b0, 'b1': b1}),\n cirq.ParamResolver({'b0': b1, 'b1': b0}),\n ]\n results = simulator.simulate_sweep(circuit, params=params)\n expected_density_matrix = np.zeros(shape=(4, 4))\n expected_density_matrix[2 * b0 + b1, 2 * b0 + b1] = 1.0\n np.testing.assert_equal(results[0].final_density_matrix, expected_density_matrix)\n\n expected_density_matrix = np.zeros(shape=(4, 4))\n expected_density_matrix[2 * b1 + b0, 2 * b1 + b0] = 1.0\n np.testing.assert_equal(results[1].final_density_matrix, expected_density_matrix)\n\n assert results[0].params == params[0]\n assert results[1].params == params[1]\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_moment_steps(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n circuit = cirq.Circuit(cirq.H(q0), cirq.H(q1), cirq.H(q0), cirq.H(q1))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for i, step in enumerate(simulator.simulate_moment_steps(circuit)):\n assert cirq.qid_shape(step) == (2, 2)\n if i == 0:\n np.testing.assert_almost_equal(step.density_matrix(), np.ones((4, 4)) / 4)\n else:\n np.testing.assert_almost_equal(step.density_matrix(), np.diag([1, 0, 0, 0]), decimal=6)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_moment_steps_qudits(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQid.for_qid_shape((2, 3))\n circuit = cirq.Circuit(\n cirq.XPowGate(dimension=2)(q0),\n cirq.XPowGate(dimension=3)(q1),\n cirq.reset(q1),\n cirq.XPowGate(dimension=3)(q1),\n )\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for i, step in enumerate(simulator.simulate_moment_steps(circuit)):\n assert cirq.qid_shape(step) == (2, 3)\n if i == 0:\n np.testing.assert_almost_equal(step.density_matrix(), np.diag([0, 0, 0, 0, 1, 0]))\n elif i == 1:\n np.testing.assert_almost_equal(step.density_matrix(), np.diag([0, 0, 0, 1, 0, 0]))\n else:\n np.testing.assert_almost_equal(step.density_matrix(), np.diag([0, 0, 0, 0, 1, 0]))\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_moment_steps_empty_circuit(dtype: Type[np.number], split: bool):\n circuit = cirq.Circuit()\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n step = None\n for step in simulator.simulate_moment_steps(circuit):\n pass\n assert np.allclose(step.density_matrix(), np.array([[1]]))\n assert not cirq.qid_shape(step)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_moment_steps_sample(dtype: Type[np.number], split: bool):\n q0, q1 = cirq.LineQubit.range(2)\n circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for i, step in enumerate(simulator.simulate_moment_steps(circuit)):\n if i == 0:\n samples = step.sample([q0, q1], repetitions=10)\n for sample in samples:\n assert np.array_equal(sample, [True, False]) or np.array_equal(\n sample, [False, False]\n )\n else:\n samples = step.sample([q0, q1], repetitions=10)\n for sample in samples:\n assert np.array_equal(sample, [True, True]) or np.array_equal(\n sample, [False, False]\n )\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_moment_steps_sample_qudits(dtype: Type[np.number], split: bool):\n class TestGate(cirq.Gate):\n \"\"\"Swaps the 2nd qid |0> and |2> states when the 1st is |1>.\"\"\"\n\n def _qid_shape_(self):\n return (2, 3)\n\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs):\n args.available_buffer[..., 1, 0] = args.target_tensor[..., 1, 2]\n args.target_tensor[..., 1, 2] = args.target_tensor[..., 1, 0]\n args.target_tensor[..., 1, 0] = args.available_buffer[..., 1, 0]\n return args.target_tensor\n\n q0, q1 = cirq.LineQid.for_qid_shape((2, 3))\n circuit = cirq.Circuit(cirq.H(q0), TestGate()(q0, q1))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for i, step in enumerate(simulator.simulate_moment_steps(circuit)):\n if i == 0:\n samples = step.sample([q0, q1], repetitions=10)\n for sample in samples:\n assert np.array_equal(sample, [True, 0]) or np.array_equal(sample, [False, 0])\n else:\n samples = step.sample([q0, q1], repetitions=10)\n for sample in samples:\n assert np.array_equal(sample, [True, 2]) or np.array_equal(sample, [False, 0])\n\n\[email protected]('dtype', [np.complex64, np.complex128])\[email protected]('split', [True, False])\ndef test_simulate_moment_steps_intermediate_measurement(dtype: Type[np.number], split: bool):\n q0 = cirq.LineQubit(0)\n circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0), cirq.H(q0))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype, split_untangled_states=split)\n for i, step in enumerate(simulator.simulate_moment_steps(circuit)):\n if i == 1:\n result = int(step.measurements['q(0)'][0])\n expected = np.zeros((2, 2))\n expected[result, result] = 1\n np.testing.assert_almost_equal(step.density_matrix(), expected)\n if i == 2:\n expected = np.array([[0.5, 0.5 * (-1) ** result], [0.5 * (-1) ** result, 0.5]])\n np.testing.assert_almost_equal(step.density_matrix(), expected)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\ndef test_simulate_expectation_values(dtype):\n # Compare with test_expectation_from_state_vector_two_qubit_states\n # in file: cirq/ops/linear_combinations_test.py\n q0, q1 = cirq.LineQubit.range(2)\n psum1 = cirq.Z(q0) + 3.2 * cirq.Z(q1)\n psum2 = -1 * cirq.X(q0) + 2 * cirq.X(q1)\n c1 = cirq.Circuit(cirq.I(q0), cirq.X(q1))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype)\n result = simulator.simulate_expectation_values(c1, [psum1, psum2])\n assert cirq.approx_eq(result[0], -2.2, atol=1e-6)\n assert cirq.approx_eq(result[1], 0, atol=1e-6)\n\n c2 = cirq.Circuit(cirq.H(q0), cirq.H(q1))\n result = simulator.simulate_expectation_values(c2, [psum1, psum2])\n assert cirq.approx_eq(result[0], 0, atol=1e-6)\n assert cirq.approx_eq(result[1], 1, atol=1e-6)\n\n psum3 = cirq.Z(q0) + cirq.X(q1)\n c3 = cirq.Circuit(cirq.I(q0), cirq.H(q1))\n result = simulator.simulate_expectation_values(c3, psum3)\n assert cirq.approx_eq(result[0], 2, atol=1e-6)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\ndef test_simulate_noisy_expectation_values(dtype):\n q0 = cirq.LineQubit(0)\n psums = [cirq.Z(q0), cirq.X(q0)]\n c1 = cirq.Circuit(cirq.X(q0), cirq.amplitude_damp(gamma=0.1).on(q0))\n simulator = cirq.DensityMatrixSimulator(dtype=dtype)\n result = simulator.simulate_expectation_values(c1, psums)\n # <Z> = (gamma - 1) + gamma = -0.8\n assert cirq.approx_eq(result[0], -0.8, atol=1e-6)\n assert cirq.approx_eq(result[1], 0, atol=1e-6)\n\n c2 = cirq.Circuit(cirq.H(q0), cirq.depolarize(p=0.3).on(q0))\n result = simulator.simulate_expectation_values(c2, psums)\n assert cirq.approx_eq(result[0], 0, atol=1e-6)\n # <X> = (1 - p) + (-p / 3) = 0.6\n assert cirq.approx_eq(result[1], 0.6, atol=1e-6)\n\n\[email protected]('dtype', [np.complex64, np.complex128])\ndef test_simulate_expectation_values_terminal_measure(dtype):\n q0 = cirq.LineQubit(0)\n circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0))\n obs = cirq.Z(q0)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype)\n with pytest.raises(ValueError):\n _ = simulator.simulate_expectation_values(circuit, obs)\n\n results = {-1: 0, 1: 0}\n for _ in range(100):\n result = simulator.simulate_expectation_values(\n circuit, obs, permit_terminal_measurements=True\n )\n if cirq.approx_eq(result[0], -1, atol=1e-6):\n results[-1] += 1\n if cirq.approx_eq(result[0], 1, atol=1e-6):\n results[1] += 1\n\n # With a measurement after H, the Z-observable expects a specific state.\n assert results[-1] > 0\n assert results[1] > 0\n assert results[-1] + results[1] == 100\n\n circuit = cirq.Circuit(cirq.H(q0))\n results = {0: 0}\n for _ in range(100):\n result = simulator.simulate_expectation_values(\n circuit, obs, permit_terminal_measurements=True\n )\n if cirq.approx_eq(result[0], 0, atol=1e-6):\n results[0] += 1\n\n # Without measurement after H, the Z-observable is indeterminate.\n assert results[0] == 100\n\n\[email protected]('dtype', [np.complex64, np.complex128])\ndef test_simulate_expectation_values_qubit_order(dtype):\n q0, q1, q2 = cirq.LineQubit.range(3)\n circuit = cirq.Circuit(cirq.H(q0), cirq.H(q1), cirq.X(q2))\n obs = cirq.X(q0) + cirq.X(q1) - cirq.Z(q2)\n simulator = cirq.DensityMatrixSimulator(dtype=dtype)\n\n result = simulator.simulate_expectation_values(circuit, obs)\n assert cirq.approx_eq(result[0], 3, atol=1e-6)\n\n # Adjusting the qubit order has no effect on the observables.\n result_flipped = simulator.simulate_expectation_values(circuit, obs, qubit_order=[q1, q2, q0])\n assert cirq.approx_eq(result_flipped[0], 3, atol=1e-6)\n\n\ndef test_density_matrix_simulator_state_eq_deprecated():\n with cirq.testing.assert_deprecated('no longer used', deadline='v0.16', count=4):\n q0, q1 = cirq.LineQubit.range(2)\n eq = cirq.testing.EqualsTester()\n eq.add_equality_group(\n cirq.DensityMatrixSimulatorState(\n density_matrix=np.ones((2, 2)) * 0.5, qubit_map={q0: 0}\n ),\n cirq.DensityMatrixSimulatorState(\n density_matrix=np.ones((2, 2)) * 0.5, qubit_map={q0: 0}\n ),\n )\n eq.add_equality_group(\n cirq.DensityMatrixSimulatorState(density_matrix=np.eye(2) * 0.5, qubit_map={q0: 0})\n )\n eq.add_equality_group(\n cirq.DensityMatrixSimulatorState(\n density_matrix=np.eye(2) * 0.5, qubit_map={q0: 0, q1: 1}\n )\n )\n\n\ndef test_density_matrix_simulator_state_qid_shape():\n with cirq.testing.assert_deprecated('no longer used', deadline='v0.16', count=2):\n q0, q1 = cirq.LineQubit.range(2)\n assert cirq.qid_shape(\n cirq.DensityMatrixSimulatorState(\n density_matrix=np.ones((4, 4)) / 4, qubit_map={q0: 0, q1: 1}\n )\n ) == (2, 2)\n q0, q1 = cirq.LineQid.for_qid_shape((3, 4))\n assert cirq.qid_shape(\n cirq.DensityMatrixSimulatorState(\n density_matrix=np.ones((12, 12)) / 12, qubit_map={q0: 0, q1: 1}\n )\n ) == (3, 4)\n\n\ndef test_density_matrix_simulator_state_repr():\n with cirq.testing.assert_deprecated('no longer used', deadline='v0.16'):\n q0 = cirq.LineQubit(0)\n assert (\n repr(\n cirq.DensityMatrixSimulatorState(\n density_matrix=np.ones((2, 2)) * 0.5, qubit_map={q0: 0}\n )\n )\n == \"cirq.DensityMatrixSimulatorState(density_matrix=\"\n \"np.array([[0.5, 0.5], [0.5, 0.5]]), \"\n \"qubit_map={cirq.LineQubit(0): 0})\"\n )\n\n\ndef test_density_matrix_step_result_repr():\n q0 = cirq.LineQubit(0)\n assert (\n repr(\n cirq.DensityMatrixStepResult(\n sim_state=cirq.DensityMatrixSimulationState(\n initial_state=np.ones((2, 2)) * 0.5, qubits=[q0]\n )\n )\n )\n == \"cirq.DensityMatrixStepResult(sim_state=cirq.DensityMatrixSimulationState(\"\n \"initial_state=np.array([[(0.5+0j), (0.5+0j)], [(0.5+0j), (0.5+0j)]], dtype=np.complex64), \"\n \"qid_shape=(2,), qubits=(cirq.LineQubit(0),), \"\n \"classical_data=cirq.ClassicalDataDictionaryStore()), dtype=np.complex64)\"\n )\n\n\ndef test_density_matrix_trial_result_eq():\n q0 = cirq.LineQubit(0)\n final_simulator_state = cirq.DensityMatrixSimulationState(\n initial_state=np.ones((2, 2)) * 0.5, qubits=[q0]\n )\n eq = cirq.testing.EqualsTester()\n eq.add_equality_group(\n cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({}),\n measurements={},\n final_simulator_state=final_simulator_state,\n ),\n cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({}),\n measurements={},\n final_simulator_state=final_simulator_state,\n ),\n )\n eq.add_equality_group(\n cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({'s': 1}),\n measurements={},\n final_simulator_state=final_simulator_state,\n )\n )\n eq.add_equality_group(\n cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({'s': 1}),\n measurements={'m': np.array([[1]])},\n final_simulator_state=final_simulator_state,\n )\n )\n\n\ndef test_density_matrix_trial_result_qid_shape():\n q0, q1 = cirq.LineQubit.range(2)\n final_simulator_state = cirq.DensityMatrixSimulationState(\n initial_state=np.ones((4, 4)) / 4, qubits=[q0, q1]\n )\n assert cirq.qid_shape(\n cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({}),\n measurements={},\n final_simulator_state=final_simulator_state,\n )\n ) == (2, 2)\n q0, q1 = cirq.LineQid.for_qid_shape((3, 4))\n final_simulator_state = cirq.DensityMatrixSimulationState(\n initial_state=np.ones((12, 12)) / 12, qubits=[q0, q1]\n )\n assert cirq.qid_shape(\n cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({}),\n measurements={},\n final_simulator_state=final_simulator_state,\n )\n ) == (3, 4)\n\n\ndef test_density_matrix_trial_result_repr():\n q0 = cirq.LineQubit(0)\n dtype = np.complex64\n final_simulator_state = cirq.DensityMatrixSimulationState(\n available_buffer=[],\n qid_shape=(2,),\n prng=np.random.RandomState(0),\n qubits=[q0],\n initial_state=np.ones((2, 2), dtype=dtype) * 0.5,\n dtype=dtype,\n )\n trial_result = cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({'s': 1}),\n measurements={'m': np.array([[1]], dtype=np.int32)},\n final_simulator_state=final_simulator_state,\n )\n expected_repr = (\n \"cirq.DensityMatrixTrialResult(\"\n \"params=cirq.ParamResolver({'s': 1}), \"\n \"measurements={'m': np.array([[1]], dtype=np.int32)}, \"\n \"final_simulator_state=cirq.DensityMatrixSimulationState(\"\n \"initial_state=np.array([[(0.5+0j), (0.5+0j)], [(0.5+0j), (0.5+0j)]], dtype=np.complex64), \"\n \"qid_shape=(2,), \"\n \"qubits=(cirq.LineQubit(0),), \"\n \"classical_data=cirq.ClassicalDataDictionaryStore()))\"\n )\n assert repr(trial_result) == expected_repr\n assert eval(expected_repr) == trial_result\n\n\nclass XAsOp(cirq.Operation):\n def __init__(self, q):\n # coverage: ignore\n self.q = q\n\n @property\n def qubits(self):\n # coverage: ignore\n return (self.q,)\n\n def with_qubits(self, *new_qubits):\n # coverage: ignore\n return XAsOp(new_qubits[0])\n\n def _kraus_(self):\n # coverage: ignore\n return cirq.kraus(cirq.X)\n\n\ndef test_works_on_operation():\n class XAsOp(cirq.Operation):\n def __init__(self, q):\n # coverage: ignore\n self.q = q\n\n @property\n def qubits(self):\n # coverage: ignore\n return (self.q,)\n\n def with_qubits(self, *new_qubits):\n raise NotImplementedError()\n\n def _kraus_(self):\n # coverage: ignore\n return cirq.kraus(cirq.X)\n\n s = cirq.DensityMatrixSimulator()\n c = cirq.Circuit(XAsOp(cirq.LineQubit(0)))\n np.testing.assert_allclose(s.simulate(c).final_density_matrix, np.diag([0, 1]), atol=1e-8)\n\n\ndef test_works_on_pauli_string_phasor():\n a, b = cirq.LineQubit.range(2)\n c = cirq.Circuit(np.exp(0.5j * np.pi * cirq.X(a) * cirq.X(b)))\n sim = cirq.DensityMatrixSimulator()\n result = sim.simulate(c).final_density_matrix\n np.testing.assert_allclose(result.reshape(4, 4), np.diag([0, 0, 0, 1]), atol=1e-8)\n\n\ndef test_works_on_pauli_string():\n a, b = cirq.LineQubit.range(2)\n c = cirq.Circuit(cirq.X(a) * cirq.X(b))\n sim = cirq.DensityMatrixSimulator()\n result = sim.simulate(c).final_density_matrix\n np.testing.assert_allclose(result.reshape(4, 4), np.diag([0, 0, 0, 1]), atol=1e-8)\n\n\ndef test_density_matrix_trial_result_str():\n q0 = cirq.LineQubit(0)\n dtype = np.complex64\n final_simulator_state = cirq.DensityMatrixSimulationState(\n available_buffer=[],\n qid_shape=(2,),\n prng=np.random.RandomState(0),\n qubits=[q0],\n initial_state=np.ones((2, 2), dtype=dtype) * 0.5,\n dtype=dtype,\n )\n result = cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({}), measurements={}, final_simulator_state=final_simulator_state\n )\n\n # numpy varies whitespace in its representation for different versions\n # Eliminate whitespace to harden tests against this variation\n result_no_whitespace = str(result).replace('\\n', '').replace(' ', '')\n assert result_no_whitespace == (\n 'measurements:(nomeasurements)'\n 'qubits:(cirq.LineQubit(0),)'\n 'finaldensitymatrix:[[0.5+0.j0.5+0.j][0.5+0.j0.5+0.j]]'\n )\n\n\ndef test_density_matrix_trial_result_repr_pretty():\n q0 = cirq.LineQubit(0)\n dtype = np.complex64\n final_simulator_state = cirq.DensityMatrixSimulationState(\n available_buffer=[],\n qid_shape=(2,),\n prng=np.random.RandomState(0),\n qubits=[q0],\n initial_state=np.ones((2, 2), dtype=dtype) * 0.5,\n dtype=dtype,\n )\n result = cirq.DensityMatrixTrialResult(\n params=cirq.ParamResolver({}), measurements={}, final_simulator_state=final_simulator_state\n )\n\n fake_printer = cirq.testing.FakePrinter()\n result._repr_pretty_(fake_printer, cycle=False)\n # numpy varies whitespace in its representation for different versions\n # Eliminate whitespace to harden tests against this variation\n result_no_whitespace = fake_printer.text_pretty.replace('\\n', '').replace(' ', '')\n assert result_no_whitespace == (\n 'measurements:(nomeasurements)'\n 'qubits:(cirq.LineQubit(0),)'\n 'finaldensitymatrix:[[0.5+0.j0.5+0.j][0.5+0.j0.5+0.j]]'\n )\n\n cirq.testing.assert_repr_pretty(result, \"cirq.DensityMatrixTrialResult(...)\", cycle=True)\n\n\ndef test_run_sweep_parameters_not_resolved():\n a = cirq.LineQubit(0)\n simulator = cirq.DensityMatrixSimulator()\n circuit = cirq.Circuit(cirq.XPowGate(exponent=sympy.Symbol('a'))(a), cirq.measure(a))\n with pytest.raises(ValueError, match='symbols were not specified'):\n _ = simulator.run_sweep(circuit, cirq.ParamResolver({}))\n\n\ndef test_simulate_sweep_parameters_not_resolved():\n a = cirq.LineQubit(0)\n simulator = cirq.DensityMatrixSimulator()\n circuit = cirq.Circuit(cirq.XPowGate(exponent=sympy.Symbol('a'))(a), cirq.measure(a))\n with pytest.raises(ValueError, match='symbols were not specified'):\n _ = simulator.simulate_sweep(circuit, cirq.ParamResolver({}))\n\n\ndef test_random_seed():\n a = cirq.NamedQubit('a')\n circuit = cirq.Circuit(cirq.X(a) ** 0.5, cirq.measure(a))\n\n sim = cirq.DensityMatrixSimulator(seed=1234)\n result = sim.run(circuit, repetitions=10)\n assert np.all(\n result.measurements['a']\n == [[False], [True], [False], [True], [True], [False], [False], [True], [True], [True]]\n )\n\n sim = cirq.DensityMatrixSimulator(seed=np.random.RandomState(1234))\n result = sim.run(circuit, repetitions=10)\n assert np.all(\n result.measurements['a']\n == [[False], [True], [False], [True], [True], [False], [False], [True], [True], [True]]\n )\n\n\ndef test_random_seed_does_not_modify_global_state_terminal_measurements():\n a = cirq.NamedQubit('a')\n circuit = cirq.Circuit(cirq.X(a) ** 0.5, cirq.measure(a))\n\n sim = cirq.DensityMatrixSimulator(seed=1234)\n result1 = sim.run(circuit, repetitions=50)\n\n sim = cirq.DensityMatrixSimulator(seed=1234)\n _ = np.random.random()\n _ = random.random()\n result2 = sim.run(circuit, repetitions=50)\n\n assert result1 == result2\n\n\ndef test_random_seed_does_not_modify_global_state_non_terminal_measurements():\n a = cirq.NamedQubit('a')\n circuit = cirq.Circuit(\n cirq.X(a) ** 0.5, cirq.measure(a, key='a0'), cirq.X(a) ** 0.5, cirq.measure(a, key='a1')\n )\n\n sim = cirq.DensityMatrixSimulator(seed=1234)\n result1 = sim.run(circuit, repetitions=50)\n\n sim = cirq.DensityMatrixSimulator(seed=1234)\n _ = np.random.random()\n _ = random.random()\n result2 = sim.run(circuit, repetitions=50)\n\n assert result1 == result2\n\n\ndef test_random_seed_terminal_measurements_deterministic():\n a = cirq.NamedQubit('a')\n circuit = cirq.Circuit(cirq.X(a) ** 0.5, cirq.measure(a, key='a'))\n sim = cirq.DensityMatrixSimulator(seed=1234)\n result1 = sim.run(circuit, repetitions=30)\n result2 = sim.run(circuit, repetitions=30)\n assert np.all(\n result1.measurements['a']\n == [\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [0],\n [1],\n [1],\n [1],\n [0],\n [1],\n [1],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [0],\n [0],\n [1],\n [1],\n [0],\n [1],\n [0],\n [1],\n ]\n )\n assert np.all(\n result2.measurements['a']\n == [\n [1],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [0],\n [1],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [1],\n [1],\n [1],\n [1],\n [1],\n ]\n )\n\n\ndef test_random_seed_non_terminal_measurements_deterministic():\n a = cirq.NamedQubit('a')\n circuit = cirq.Circuit(\n cirq.X(a) ** 0.5, cirq.measure(a, key='a'), cirq.X(a) ** 0.5, cirq.measure(a, key='b')\n )\n sim = cirq.DensityMatrixSimulator(seed=1234)\n result = sim.run(circuit, repetitions=30)\n assert np.all(\n result.measurements['a']\n == [\n [0],\n [0],\n [1],\n [0],\n [1],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [1],\n ]\n )\n assert np.all(\n result.measurements['b']\n == [\n [1],\n [1],\n [0],\n [1],\n [1],\n [1],\n [1],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [1],\n [1],\n [0],\n [0],\n [1],\n [1],\n [1],\n [0],\n [1],\n [1],\n [1],\n [1],\n [1],\n [0],\n [1],\n [1],\n [1],\n ]\n )\n\n\ndef test_simulate_with_invert_mask():\n q0, q1, q2, q3, q4 = cirq.LineQid.for_qid_shape((2, 3, 3, 3, 4))\n c = cirq.Circuit(\n cirq.XPowGate(dimension=2)(q0),\n cirq.XPowGate(dimension=3)(q2),\n cirq.XPowGate(dimension=3)(q3) ** 2,\n cirq.XPowGate(dimension=4)(q4) ** 3,\n cirq.measure(q0, q1, q2, q3, q4, key='a', invert_mask=(True,) * 4),\n )\n assert np.all(cirq.DensityMatrixSimulator().run(c).measurements['a'] == [[0, 1, 0, 2, 3]])\n\n\ndef test_simulate_noise_with_terminal_measurements():\n q = cirq.LineQubit(0)\n circuit1 = cirq.Circuit(cirq.measure(q))\n circuit2 = circuit1 + cirq.I(q)\n\n simulator = cirq.DensityMatrixSimulator(noise=cirq.X)\n result1 = simulator.run(circuit1, repetitions=10)\n result2 = simulator.run(circuit2, repetitions=10)\n\n assert result1 == result2\n\n\ndef test_simulate_noise_with_subcircuit_measurements():\n q = cirq.LineQubit(0)\n circuit1 = cirq.Circuit(cirq.measure(q))\n circuit2 = cirq.Circuit(cirq.CircuitOperation(cirq.Circuit(cirq.measure(q)).freeze()))\n\n simulator = cirq.DensityMatrixSimulator(noise=cirq.X)\n result1 = simulator.run(circuit1, repetitions=10)\n result2 = simulator.run(circuit2, repetitions=10)\n\n assert result1 == result2\n\n\ndef test_nonmeasuring_subcircuits_do_not_cause_sweep_repeat():\n q = cirq.LineQubit(0)\n circuit = cirq.Circuit(\n cirq.CircuitOperation(cirq.Circuit(cirq.H(q)).freeze()), cirq.measure(q, key='x')\n )\n simulator = cirq.DensityMatrixSimulator()\n with mock.patch.object(simulator, '_core_iterator', wraps=simulator._core_iterator) as mock_sim:\n simulator.run(circuit, repetitions=10)\n assert mock_sim.call_count == 2\n\n\ndef test_measuring_subcircuits_cause_sweep_repeat():\n q = cirq.LineQubit(0)\n circuit = cirq.Circuit(\n cirq.CircuitOperation(cirq.Circuit(cirq.measure(q)).freeze()), cirq.measure(q, key='x')\n )\n simulator = cirq.DensityMatrixSimulator()\n with mock.patch.object(simulator, '_core_iterator', wraps=simulator._core_iterator) as mock_sim:\n simulator.run(circuit, repetitions=10)\n assert mock_sim.call_count == 11\n\n\ndef test_density_matrix_copy():\n sim = cirq.DensityMatrixSimulator(split_untangled_states=False)\n\n q = cirq.LineQubit(0)\n circuit = cirq.Circuit(cirq.H(q), cirq.H(q))\n\n matrices = []\n for step in sim.simulate_moment_steps(circuit):\n matrices.append(step.density_matrix(copy=True))\n assert all(np.isclose(np.trace(x), 1.0) for x in matrices)\n for x, y in itertools.combinations(matrices, 2):\n assert not np.shares_memory(x, y)\n\n # If the density matrix is not copied, then applying second Hadamard\n # causes old state to be modified.\n matrices = []\n traces = []\n for step in sim.simulate_moment_steps(circuit):\n matrices.append(step.density_matrix(copy=False))\n traces.append(np.trace(step.density_matrix(copy=False)))\n assert any(not np.isclose(np.trace(x), 1.0) for x in matrices)\n assert all(np.isclose(x, 1.0) for x in traces)\n assert all(not np.shares_memory(x, y) for x, y in itertools.combinations(matrices, 2))\n\n\ndef test_final_density_matrix_is_not_last_object():\n sim = cirq.DensityMatrixSimulator()\n\n q = cirq.LineQubit(0)\n initial_state = np.array([[1, 0], [0, 0]], dtype=np.complex64)\n circuit = cirq.Circuit(cirq.wait(q))\n result = sim.simulate(circuit, initial_state=initial_state)\n assert result.final_density_matrix is not initial_state\n assert not np.shares_memory(result.final_density_matrix, initial_state)\n np.testing.assert_equal(result.final_density_matrix, initial_state)\n\n\ndef test_density_matrices_same_with_or_without_split_untangled_states():\n sim = cirq.DensityMatrixSimulator(split_untangled_states=False)\n q0, q1 = cirq.LineQubit.range(2)\n circuit = cirq.Circuit(cirq.H(q0), cirq.CX.on(q0, q1), cirq.reset(q1))\n result1 = sim.simulate(circuit).final_density_matrix\n sim = cirq.DensityMatrixSimulator()\n result2 = sim.simulate(circuit).final_density_matrix\n assert np.allclose(result1, result2)\n\n\ndef test_large_untangled_okay():\n circuit = cirq.Circuit()\n for i in range(59):\n for _ in range(9):\n circuit.append(cirq.X(cirq.LineQubit(i)))\n circuit.append(cirq.measure(cirq.LineQubit(i)))\n\n # Validate this can't be allocated with entangled state\n with pytest.raises(MemoryError, match='Unable to allocate'):\n _ = cirq.DensityMatrixSimulator(split_untangled_states=False).simulate(circuit)\n\n # Validate a simulation run\n result = cirq.DensityMatrixSimulator().simulate(circuit)\n assert set(result._final_simulator_state.qubits) == set(cirq.LineQubit.range(59))\n # _ = result.final_density_matrix hangs (as expected)\n\n # Validate a trial run and sampling\n result = cirq.DensityMatrixSimulator().run(circuit, repetitions=1000)\n assert len(result.measurements) == 59\n assert len(result.measurements['q(0)']) == 1000\n assert (result.measurements['q(0)'] == np.full(1000, 1)).all()\n\n\ndef test_separated_states_str_does_not_merge():\n q0, q1 = cirq.LineQubit.range(2)\n circuit = cirq.Circuit(cirq.measure(q0), cirq.measure(q1), cirq.X(q0))\n\n result = cirq.DensityMatrixSimulator().simulate(circuit)\n assert (\n str(result)\n == \"\"\"measurements: q(0)=0 q(1)=0\n\nqubits: (cirq.LineQubit(0),)\nfinal density matrix:\n[[0.+0.j 0.+0.j]\n [0.+0.j 1.+0.j]]\n\nqubits: (cirq.LineQubit(1),)\nfinal density matrix:\n[[1.+0.j 0.+0.j]\n [0.+0.j 0.+0.j]]\n\nphase:\nfinal density matrix:\n[[1.+0.j]]\"\"\"\n )\n\n\ndef test_unseparated_states_str():\n q0, q1 = cirq.LineQubit.range(2)\n circuit = cirq.Circuit(cirq.measure(q0), cirq.measure(q1), cirq.X(q0))\n\n result = cirq.DensityMatrixSimulator(split_untangled_states=False).simulate(circuit)\n assert (\n str(result)\n == \"\"\"measurements: q(0)=0 q(1)=0\n\nqubits: (cirq.LineQubit(0), cirq.LineQubit(1))\nfinal density matrix:\n[[0.+0.j 0.+0.j 0.+0.j 0.+0.j]\n [0.+0.j 0.+0.j 0.+0.j 0.+0.j]\n [0.+0.j 0.+0.j 1.+0.j 0.+0.j]\n [0.+0.j 0.+0.j 0.+0.j 0.+0.j]]\"\"\"\n )\n\n\ndef test_sweep_unparameterized_prefix_not_repeated_even_non_unitaries():\n q = cirq.LineQubit(0)\n\n class NonUnitaryOp(cirq.Operation):\n count = 0\n\n def _act_on_(self, sim_state):\n self.count += 1\n return True\n\n def with_qubits(self, qubits):\n pass\n\n @property\n def qubits(self):\n return (q,)\n\n simulator = cirq.DensityMatrixSimulator()\n params = [cirq.ParamResolver({'a': 0}), cirq.ParamResolver({'a': 1})]\n\n op1 = NonUnitaryOp()\n op2 = NonUnitaryOp()\n circuit = cirq.Circuit(op1, cirq.XPowGate(exponent=sympy.Symbol('a'))(q), op2)\n simulator.simulate_sweep(program=circuit, params=params)\n assert op1.count == 1\n assert op2.count == 2\n",
"# pylint: disable=wrong-or-nonexistent-copyright-notice\n\"\"\" Example program to demonstrate BB84 QKD Protocol\n\nBB84 [1] is a quantum key distribution (QKD) protocol developed by\nCharles Bennett and Gilles Brassard in 1984. It was the first quantum\ncryptographic protocol, using the laws of quantum mechanics (specifically,\nno-cloning) to provide provably secure key generation.\n\nBB84 relies on the fact that it is impossible to gain information\ndistinguishing two non-orthogonal states without disturbing the signal.\n\nThe scheme involves two parties Alice and Bob connected by a classical\ncommunication channel. In addition to this, Alice can also prepare\nqubits in a particular state and send them to Bob using a unidirectional\nquantum channel.\n\nAlice generates two random binary strings a and b of the same length n.\nThe string a encodes the state and the string b encodes the basis.\nShe then prepares n qubits according to the following prescription:\n\n|q[i]⟩ = |0⟩ if a[i] == 0 and b[i] == 0\n|q[i]⟩ = |1⟩ if a[i] == 1 and b[i] == 0\n|q[i]⟩ = |+⟩ if a[i] == 0 and b[i] == 1\n|q[i]⟩ = |-⟩ if a[i] == 1 and b[i] == 1\n\nwhere |+/-⟩ = 1/sqrt(2)*(|0⟩+/-|1⟩).\n\nAlice sends her qubits to Bob. Bob then generates a random binary string\nc of length n. He measures the qubit |q[i]⟩ in the {|0⟩, |1⟩} basis\n(computational basis) if c[i] == 0 and in the {|+⟩,|-⟩} basis\n(Hadamard basis) if c[i] == 1 and stores the result in a string m.\nAlice and Bob then announce the strings b and c, which encode\nthe random basis choices of Alice and Bob respectively.\n\nThe strings a and m match in the places where b and c are the same.\nThis happens because the state was measured in the same basis in\nwhich it was prepared. For the remaining bits, the results are\nuncorrelated. The bits from strings a and m where the bases match\ncan be used as a key for cryptography.\n\nBB84 is secure against intercept-and-resend attacks. The no-cloning\ntheorem [2] guarantees that a qubit that is in an unknown state to\nbegin with cannot be copied or cloned. Thus, any measurement will\ndestroy the initial state of the qubit. Suppose an eavesdropper Eve\nintercepts all of Alice's qubits, measures them in a randomly chosen\nbasis, prepares another qubit in the state that she measured and resends\nit to Bob. The state Eve measures is not necessarily the state Alice\nprepared, and hence, Alice and Bob will not measure the same outcome\nfor that qubit even if their basis choices match. Thus, Alice and Bob\ncan detect eavesdropping by comparing a few bits from their\nobtained keys.\n\n[1]: https://en.wikipedia.org/wiki/BB84\n[2]: https://en.wikipedia.org/wiki/No-cloning_theorem\n\n === Example output ===\n\nSimulating non-eavesdropped protocol\n\n0: ───X───M───────────\n\n1: ───H───H───M───────\n\n2: ───X───H───M───────\n\n3: ───X───H───M───────\n\n4: ───X───H───M───────\n\n5: ───X───H───H───M───\n\n6: ───H───M───────────\n\n7: ───H───H───M───────\n\nAlice's basis: CHCCCHCH\nBob's basis: CHHHHHHH\nAlice's bits: 10111100\nBases match:: XX___X_X\nExpected key: 1010\nActual key: 1010\n\nSimulating eavesdropped protocol\n\n0: ───H───M───────────H───M───────────\n\n1: ───H───M───────────H───H───M───────\n\n2: ───X───H───H───M───X───H───H───M───\n\n3: ───H───M───────────H───M───────────\n\n4: ───M───────────────M───────────────\n\n5: ───X───H───M───────X───H───M───────\n\n6: ───H───M───────────X───H───M───────\n\n7: ───X───H───H───M───X───H───M───────\n\nAlice's basis: HCHCCHCH\nBob's basis: HHHCCHCC\nAlice's bits: 00100101\nBases match:: X_XXXXX_\nExpected key: 010010\nActual key: 111011\n\n\"\"\"\nimport numpy as np\nimport cirq\n\n\ndef main(num_qubits=8):\n # Setup non-eavesdropped protocol\n print('Simulating non-eavesdropped protocol')\n qubits = cirq.LineQubit.range(num_qubits)\n alice_basis = [np.random.randint(0, 2) for _ in range(num_qubits)]\n alice_state = [np.random.randint(0, 2) for _ in range(num_qubits)]\n bob_basis = [np.random.randint(0, 2) for _ in range(num_qubits)]\n\n expected_key = bitstring(\n [alice_state[i] for i in range(num_qubits) if alice_basis[i] == bob_basis[i]]\n )\n\n circuit = make_bb84_circ(num_qubits, alice_basis, bob_basis, alice_state)\n\n # Run simulations.\n repetitions = 1\n\n result = cirq.Simulator().run(program=circuit, repetitions=repetitions)\n result_bitstring = bitstring([int(result.measurements[str(q)]) for q in qubits])\n\n # Take only qubits where bases match\n obtained_key = ''.join(\n [result_bitstring[i] for i in range(num_qubits) if alice_basis[i] == bob_basis[i]]\n )\n\n assert expected_key == obtained_key, \"Keys don't match\"\n print(circuit)\n print_results(alice_basis, bob_basis, alice_state, expected_key, obtained_key)\n\n # Setup eavesdropped protocol\n print('Simulating eavesdropped protocol')\n np.random.seed(200) # Seed random generator for consistent results\n alice_basis = [np.random.randint(0, 2) for _ in range(num_qubits)]\n alice_state = [np.random.randint(0, 2) for _ in range(num_qubits)]\n bob_basis = [np.random.randint(0, 2) for _ in range(num_qubits)]\n eve_basis = [np.random.randint(0, 2) for _ in range(num_qubits)]\n\n expected_key = bitstring(\n [alice_state[i] for i in range(num_qubits) if alice_basis[i] == bob_basis[i]]\n )\n\n # Eve intercepts the qubits\n\n alice_eve_circuit = make_bb84_circ(num_qubits, alice_basis, eve_basis, alice_state)\n\n # Run simulations.\n repetitions = 1\n result = cirq.Simulator().run(program=alice_eve_circuit, repetitions=repetitions)\n eve_state = [int(result.measurements[str(q)]) for q in qubits]\n\n eve_bob_circuit = make_bb84_circ(num_qubits, eve_basis, bob_basis, eve_state)\n\n # Run simulations.\n repetitions = 1\n result = cirq.Simulator().run(program=eve_bob_circuit, repetitions=repetitions)\n result_bitstring = bitstring([int(result.measurements[str(q)]) for q in qubits])\n\n # Take only qubits where bases match\n obtained_key = ''.join(\n [result_bitstring[i] for i in range(num_qubits) if alice_basis[i] == bob_basis[i]]\n )\n\n assert expected_key != obtained_key, \"Keys shouldn't match\"\n\n circuit = alice_eve_circuit + eve_bob_circuit\n print(circuit)\n print_results(alice_basis, bob_basis, alice_state, expected_key, obtained_key)\n\n\ndef make_bb84_circ(num_qubits, alice_basis, bob_basis, alice_state):\n\n qubits = cirq.LineQubit.range(num_qubits)\n\n circuit = cirq.Circuit()\n\n # Alice prepares her qubits\n alice_enc = []\n for index, _ in enumerate(alice_basis):\n if alice_state[index] == 1:\n alice_enc.append(cirq.X(qubits[index]))\n if alice_basis[index] == 1:\n alice_enc.append(cirq.H(qubits[index]))\n\n circuit.append(alice_enc)\n\n # Bob measures the received qubits\n bob_basis_choice = []\n for index, _ in enumerate(bob_basis):\n if bob_basis[index] == 1:\n bob_basis_choice.append(cirq.H(qubits[index]))\n\n circuit.append(bob_basis_choice)\n circuit.append(cirq.measure_each(*qubits))\n\n return circuit\n\n\ndef bitstring(bits):\n return ''.join(str(int(b)) for b in bits)\n\n\ndef print_results(alice_basis, bob_basis, alice_state, expected_key, obtained_key):\n num_qubits = len(alice_basis)\n basis_match = ''.join(\n ['X' if alice_basis[i] == bob_basis[i] else '_' for i in range(num_qubits)]\n )\n alice_basis_str = \"\".join(['C' if alice_basis[i] == 0 else \"H\" for i in range(num_qubits)])\n bob_basis_str = \"\".join(['C' if bob_basis[i] == 0 else \"H\" for i in range(num_qubits)])\n\n print(f'Alice\\'s basis:\\t{alice_basis_str}')\n print(f'Bob\\'s basis:\\t{bob_basis_str}')\n print(f'Alice\\'s bits:\\t{bitstring(alice_state)}')\n print(f'Bases match::\\t{basis_match}')\n print(f'Expected key:\\t{expected_key}')\n print(f'Actual key:\\t{obtained_key}')\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.sqrt",
"numpy.asarray",
"numpy.random.RandomState",
"numpy.array",
"numpy.vstack"
],
[
"numpy.sqrt"
],
[
"numpy.array"
],
[
"numpy.diag",
"numpy.testing.assert_equal",
"numpy.random.random",
"numpy.allclose",
"numpy.array_equal",
"numpy.shares_memory",
"numpy.eye",
"numpy.empty",
"numpy.ones",
"numpy.all",
"numpy.testing.assert_almost_equal",
"numpy.full",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros",
"numpy.trace",
"numpy.random.RandomState",
"numpy.isclose"
],
[
"numpy.random.seed",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexcornier/INSEE | [
"a5dc6e1267834754ac1cd1331203b5e835828946"
] | [
"request.py"
] | [
"#================================================================\n# Ensemble de requêtes SQL sur une base de données SQL\n# hébergées sur un serveur local postgresql\n#\n# Modules pythons nécessaires\n# psycopg2 (SQL connection)\n# pandas (DataFrame et HTML)\n# matplotlib\n# jinja2 (styles HTML)\n#\n# Alexandre Cornier - 2020\n#================================================================\n\nimport psycopg2\nimport pandas as pd\nimport webbrowser\nimport pathlib\n\n# Interrupteur d'affichage console\nbconsole = False # pas d'affichage console par défaut\n\n#---------------------------- Connection à la Base de Données ------------------------------------\nconnection = psycopg2.connect(\"host=localhost port=5432 dbname=cremi user=postgres password=Audierne\")\ncur = connection.cursor()\n\n#-------------------------------------- Fonctions ------------------------------------------------\n\n# Affichage HTML des résultats dans le navigateur\ndef affiche_html(titre_question, question, fichier, resultat_html):\n # Préparation de l'entête du fichier HTML\n header = \"\"\"<!DOCTYPE html>\n <html>\n <head>\n <title>\"\"\" + titre_question + \"\"\"</title>\n </head>\n <body>\n \n <h1>\"\"\" + titre_question + \"\"\"</h1>\n <p>\"\"\" + question + \"\"\"</p>\n \"\"\"\n\n footer = \"\"\"\n </body>\n </html>\"\"\"\n\n # write html to file\n text_file = open(fichier, \"w\")\n text_file.write(header)\n text_file.write(resultat_html)\n text_file.write(footer)\n text_file.close()\n\n # open report.html in browser\n current_path = pathlib.Path(__file__).parent.absolute()\n fichier = \"file://\" + str(current_path) + \"/\" + fichier\n webbrowser.open(fichier)\n\n\n# Question 1\ndef listeRegions():\n cur.execute(\"\"\"SELECT reg, libelle FROM regions ORDER BY reg\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code région', 'Région'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Région\" else '' for i in x])\n .hide_index()\n .render())\n\n affiche_html(\"Question 1\", \"Régions présentes dans la base de données\",\\\n \"question_01.html\", html)\n\n if (bconsole):\n print(\"les régions présentes dans la base de données sont : \")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 2\ndef listeDepartement():\n cur.execute(\"\"\"SELECT dep, libelle FROM departements ORDER BY dep\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code département', 'Département'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .hide_index()\n .render())\n\n affiche_html(\"Question 2\", \"Départements présents dans la base de données\",\\\n \"question_02.html\", html)\n\n if (bconsole):\n print(\"les départements présents dans la base de données sont : \")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 3\ndef choixRegions():\n print(\"Donnez le nom de la région :\")\n choix = input().capitalize()\n cur.execute(\"\"\"SELECT * FROM regionsocial WHERE region = '%s' \"\"\" % choix)\n\n lst = []\n for info in cur.fetchall():\n lst=[[\"Numéro\", info[0]],\n [\"Taux de pauvreté (%)\", info[2]],\n [\"Part des jeunes non insérés (%) en 2014\", info[3]],\n [\"Part des jeunes non insérés (%) en 2009\", info[4]],\n [\"Poids de l'économie sociale dans les emplois salariés du territoire (%)\", info[5]]]\n\n df = pd.DataFrame(lst, columns=['Information', 'Valeur'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .set_properties(subset=[\"Valeur\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 3\", \"Informations concernant la régione \" + choix,\\\n \"question_03.html\", html)\n\n if (bconsole):\n print(\"-------------- Informations concernant\", choix, \"--------------\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 4\ndef choix_departement_theme(): \n print(\"Donnez le nom du département :\")\n choix1 = input().capitalize()\n print(\"Choisissez un thème : 1.Social ou 2.Environnement (par défaut)\")\n choix2 = input()\n\n lst = []\n if choix2 == \"1\" or choix2.lower() == \"social\":\n cur.execute(\"\"\"SELECT * FROM departementsocial WHERE departements = '%s' \"\"\" % choix1)\n\n for info in cur.fetchall():\n lst = [[\"Numéro\", info[0]],\n [\"Espérance de vie des hommes à la naissance en 2015 (années)\", info[2]],\n [\"Espérance de vie des hommes à la naissance en 2010 (années)\", info[3]],\n [\"Espérance de vie des femmes à la naissance en 2015 (années)\", info[4]],\n [\"Espérance de vie des femmes à la naissance en 2010 (années)\", info[5]],\n [\"Part de la population éloignée de plus de 7 mn des services de santé de proximité (%) en 2016\", info[6]],\n [\"Part de la population estimée en zone inondable (%)\", info[7]]]\n\n df = pd.DataFrame(lst, columns=['Information', 'Valeur'])\n\n df[\"Valeur\"] = pd.to_numeric(df[\"Valeur\"], errors='coerce')\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .format({\"Valeur\": \"{:.1f}\"})\n .set_properties(subset=[\"Valeur\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 4a\",\\\n \"Informations sociales concernant le département \" + choix1,\\\n \"question_04a.html\", html)\n\n if (bconsole):\n df[\"Valeur\"] = df[\"Valeur\"].map(\"{:.1f}\".format)\n print(\"-------------- Informations concernant\", choix1, \"--------------\")\n print(df)\n\n else :\n cur.execute(\"\"\"SELECT * FROM departementenvironnement WHERE departements = '%s' \"\"\" % choix1)\n\n for info in cur.fetchall():\n lst = [[\"Numéro\", info[0]],\n [\"Taux de valorisation matière et organique (%) en 2013\", info[2]],\n [\"Taux de valorisation matière et organique (%) en 2009\", info[3]],\n [\"Part de surfaces artificialisées (%) en 2012\", info[4]],\n [\"Part de surfaces artificialisées (%) en 2006\", info[5]],\n [\"Part de l'agriculture biologique dans la surface agricole totale (%) en 2016\", info[6]],\n [\"Part de l'agriculture biologique dans la surface agricole totale (%) en 2010\", info[7]],\n [\"Production de granulats (tonnes) en 2014\", info[8]],\n [\"Production de granulats (tonnes) en 2009\", info[9]],\n [\"Eolien (%) en 2015\", info[10]],\n [\"Eolien (%) en 2010\", info[11]],\n [\"Photovoltaïque (%) en 2015\", info[12]],\n [\"Photovoltaïque (%) en 2010\", info[13]],\n [\"Autre (biogaz, biomasse, géothermie, incinération de déchets, petite hydraulique) (%) en 2015\",info[14]],\n [\"Autre (biogaz, biomasse, géothermie, incinération de déchets, petite hydraulique) (%) en 2010\",info[15]]]\n\n df = pd.DataFrame(lst, columns=['Information', 'Valeur'])\n\n df[\"Valeur\"] = pd.to_numeric(df[\"Valeur\"], errors='coerce')\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .format({\"Valeur\": \"{:.1f}\"})\n .set_properties(subset=[\"Valeur\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 4b\",\\\n \"Informations environnementales concernant le département \" + choix1,\\\n \"question_04b.html\", html)\n\n if (bconsole):\n df[\"Valeur\"] = df[\"Valeur\"].map(\"{:.1f}\".format)\n print(\"-------------- Informations concernant\", choix1, \"--------------\")\n print(df)\n\n if (bconsole):\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 5\ndef typeEnergie():\n print(\"Choisissez un type d'energie : 1.Eolien, 2.Photovoltaique ou 3.Autre\")\n choix = input()\n\n if choix == \"1\" or choix.lower() == \"eolien\":\n cur.execute(\"\"\"SELECT nb, departements, eolien2015 - eolien2010 AS croissance FROM departementenvironnement\n WHERE eolien2015 > eolien2010\n ORDER BY eolien2015 - eolien2010 DESC\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .background_gradient(cmap='Blues', subset=[\"Croissance\"])\n .format({\"Croissance\": \"{:.1f}pts\"})\n .set_properties(subset=[\"Croissance\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 5a\",\\\n \"Départements où la part de l'énergie éolienne a augmenté entre les deux années de référence\",\\\n \"question_05a.html\", html)\n\n if (bconsole):\n df[\"Croissance\"] = df[\"Croissance\"].map(\"{:.1f}pts\".format)\n print(\n \"Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : \")\n print(df)\n\n if choix == \"2\" or choix.lower() == \"photovoltaique\":\n cur.execute(\"\"\"SELECT nb, departements, photovoltaique2015 - photovoltaique2010 AS croissance FROM departementenvironnement\n WHERE photovoltaique2015 > photovoltaique2010\n ORDER BY photovoltaique2015 - photovoltaique2010 DESC\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .background_gradient(cmap='Blues', subset=[\"Croissance\"])\n .format({\"Croissance\": \"{:.1f}pts\"})\n .set_properties(subset=[\"Croissance\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 5b\",\\\n \"Départements où la part de l'énergie photovoltaïque a augmenté entre les deux années de référence\",\\\n \"question_05b.html\", html)\n\n if (bconsole):\n df[\"Croissance\"] = df[\"Croissance\"].map(\"{:.1f}pts\".format)\n print(\"Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : \")\n print(df)\n\n if choix == \"3\" or choix.lower() == \"autre\":\n cur.execute(\"\"\"SELECT nb, departements, autre2015 - autre2010 AS croissance FROM departementenvironnement\n WHERE autre2015 > autre2010\n ORDER BY autre2015 - autre2010 DESC\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .background_gradient(cmap='Blues', subset=[\"Croissance\"])\n .format({\"Croissance\": \"{:.1f}pts\"})\n .set_properties(subset=[\"Croissance\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 5c\",\\\n \"Départements où la part des énergies renouvelables autres a augmenté entre les deux années de référence\",\\\n \"question_05c.html\", html)\n\n if (bconsole):\n df[\"Croissance\"] = df[\"Croissance\"].map(\"{:.1f}pts\".format)\n print(\"Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : \")\n print(df)\n\n if (bconsole):\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 6\ndef tonnes():\n cur.execute(\"\"\"SELECT departements.reg, regions.libelle AS region, departements.libelle AS departement\n FROM departements, regions \n WHERE departements.reg\n IN (SELECT departements.reg from departements\n INNER JOIN departementenvironnement\n ON departements.dep = departementenvironnement.nb\n INNER JOIN regions\n ON departements.reg = regions.reg\n GROUP BY departements.reg\n\t\t\tHAVING SUM(prodgranulat2014) > 25000000\n\t\t\tAND SUM(prodgranulat2014) <> 'NaN')\n\t\tORDER BY region, departement\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code région', 'Région', 'Département'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .hide_index()\n .render())\n\n affiche_html(\"Question 6\",\\\n \"Départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014\",\\\n \"question_06.html\", html)\n\n if (bconsole):\n print(\"les départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014 sont :\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 7\ndef topFive():\n cur.execute(\"\"\"SELECT nb, departements, eolien2015 FROM departementenvironnement \n ORDER BY nullif(eolien2015, 'NaN')\n DESC nulls last LIMIT 5\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code département', 'Département', \"Part de l'énergie éolienne en 2015\"])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .background_gradient(cmap='Blues', subset=[\"Part de l'énergie éolienne en 2015\"])\n .format({\"Part de l'énergie éolienne en 2015\": \"{:.1f}%\"})\n .set_properties(subset=[\"Part de l'énergie éolienne en 2015\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 7\",\\\n \"Les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015\",\\\n \"question_07.html\", html)\n\n if (bconsole):\n df[\"Part de l'énergie éolienne en 2015\"] = df[\"Part de l'énergie éolienne en 2015\"].map(\"{:.1f}%\".format)\n print(\"Les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015 sont :\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 8\ndef weak():\n cur.execute(\"\"\"SELECT regions.reg, regions.libelle AS region,\n departements.libelle AS departement, departementenvironnement.valorisationorga2013\n FROM departements\n INNER JOIN regions\n ON departements.reg = regions.reg\n INNER JOIN departementenvironnement\n ON departements.dep = departementenvironnement.nb\n ORDER BY nullif(valorisationorga2013, 'NaN') nulls last LIMIT 1\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code région', 'Région', 'Département', 'Valorisation en 2013'])\n\n # Formattage des valeurs\n df[\"Valorisation en 2013\"] = df[\"Valorisation en 2013\"].map(\"{:.1f}\".format)\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Région\" else '' for i in x])\n .set_properties(subset=[\"Valorisation en 2013\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 8\",\\\n \"Région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013\",\\\n \"question_08.html\", html)\n\n if (bconsole):\n print(\"La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013 est :\")\n print(\"Reg, Région, Département, Valorisation2013\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 9\ndef bestPopMin():\n cur.execute(\"\"\"SELECT departementenvironnement.departements, departementenvironnement.agriculturebio2016 \n FROM departementenvironnement\n INNER JOIN departementsocial\n ON departementenvironnement.departements = departementsocial.departements\n ORDER BY nullif(popeloignee7min, 'NaN') DESC nulls last LIMIT 1\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Département', \"Part de l'agriculture biologique\"])\n\n # Formattage des valeurs\n df[\"Part de l'agriculture biologique\"] = df[\"Part de l'agriculture biologique\"].map(\"{:.1f}%\".format)\n\n titre_html = \"Part en 2016 (en %) de l’agriculture biologique dans la surface agricole totale du département<br>\" +\\\n \"contenant le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité\"\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Part de l'agriculture biologique\" else '' for i in x])\n .set_properties(subset=[\"Part de l'agriculture biologique\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 9\", titre_html, \"question_09.html\", html)\n\n if (bconsole):\n print(\"En 2016, la part (en %) de l’agriculture biologique dans la surface agricole totale du département\")\n print(\"contenant le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité est : \")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 10\ndef pauvrete():\n cur.execute(\"\"\"SELECT pauvrete,region \n FROM regionsocial \n WHERE jeunesnoninseres2014 > 30\n AND pauvrete <> 'NaN'\n ORDER BY nullif(pauvrete, 'NaN') DESC nulls last\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Pauvreté', 'Région'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Pauvreté\" else '' for i in x])\n .format({\"Pauvreté\": \"{:.2f}%\"})\n .set_properties(subset=[\"Pauvreté\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 10\",\\\n \"Taux de pauvreté connu en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014\",\\\n \"question_10.html\", html)\n\n if (bconsole):\n df[\"Pauvreté\"] = df[\"Pauvreté\"].map(\"{:.2f}%\".format)\n print(\"Le taux de pauvreté connu en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014 sont : \")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 11\ndef poids_eco():\n cur.execute(\"\"\"SELECT regions.reg, regions.libelle, poidseco,\n AVG(photovoltaique2015) AS photovoltaique2015,\n AVG(agriculturebio2016) AS agriculturebio2016\n FROM departements\n INNER JOIN departementenvironnement\n ON departements.dep = departementenvironnement.nb\n INNER JOIN regionsocial\n ON departements.reg = regionsocial.nb\n INNER JOIN regions\n ON departements.reg = regions.reg\n GROUP BY poidseco, regions.reg\n HAVING AVG(photovoltaique2015) >= 10\n AND AVG(photovoltaique2015) <> 'NaN'\n AND AVG(agriculturebio2016) >= 5\n AND AVG(agriculturebio2016) <> 'NaN'\n ORDER BY poidseco\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code région', 'Région', \"Poids de l'économie sociale\",\\\n \"Part moyenne du photovoltaïque\", \"Part moyenne de l'agriculture Bio\"])\n\n # Conversion string vers float pour le formattage\n df[\"Part moyenne du photovoltaïque\"] = pd.to_numeric(df[\"Part moyenne du photovoltaïque\"], errors='coerce').fillna(0)\n df[\"Part moyenne de l'agriculture Bio\"] = pd.to_numeric(df[\"Part moyenne de l'agriculture Bio\"], errors=\"coerce\").fillna(0)\n\n titre_html = \"Poids de l'économie sociale en 2015 dans les emplois salariés de la région<br>\" +\\\n \"dont la source de la puissance électrique en énergies renouvelables provenait à au moins 10% de l'énergie photovoltaïque<br>\" +\\\n \"et dont la part de l'agriculture biologique dans la surface agricole totale était d'au moins 5%\"\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .set_properties(subset=[\"Poids de l'économie sociale\", \"Part moyenne du photovoltaïque\",\n \"Part moyenne de l'agriculture Bio\"], **{'text-align': 'right'})\n .hide_index()\n .background_gradient(cmap='Blues', subset=[\"Poids de l'économie sociale\"])\n .format({\"Poids de l'économie sociale\": \"{:.1f}%\"})\n .format({\"Part moyenne du photovoltaïque\": \"{:.1f}%\"})\n .format({\"Part moyenne de l'agriculture Bio\": \"{:.1f}%\"})\n .render())\n\n affiche_html(\"Question 11\", titre_html, \"question_11.html\", html)\n\n if (bconsole):\n df[\"Poids de l'économie sociale\"] = df[\"Poids de l'économie sociale\"].map(\"{:.1f}%\".format)\n df[\"Part moyenne du photovoltaïque\"] = df[\"Part moyenne du photovoltaïque\"].map(\"{:.1f}%\".format)\n df[\"Part moyenne de l'agriculture Bio\"] = df[\"Part moyenne de l'agriculture Bio\"].map(\"{:.1f}%\".format)\n print(\"Poids de l'économie sociale en 2015 dans les emplois salariés de la région\")\n print(\"dont la source de la puissance électrique en énergies renouvelables provenait à au moins 10% de l'énergie photovoltaïque\")\n print(\"et dont la part de l'agriculture biologique dans la surface agricole totale était d'au moins 5%\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\ndef menu():\n print (\"\")\n print (\"------------------------------------ Projet INSEE -----------------------------------\")\n print (\"\")\n print (\"1...Afficher la liste des régions\")\n print (\"2...Afficher la liste des départements\")\n print (\"3...Demander à l’utilisateur de choisir une région et afficher les données de la region choisie\")\n print (\"4...Demander à l’utilisateur de choisir un département et un thème : social ou environnemental,\")\n print (\" | et afficher les données demandées pour le departement choisi\")\n print (\"5...demander à l’utilisateur de choisir un type d’énergie (éolien, photovoltaïque, autre)\")\n print (\" | et en fonction de ce choix retourner la liste des départements où la part de cette énergie a augmenté\")\n print (\" | entre les deux années de référence, classés de la plus forte augmentation à la plus faible.\")\n print (\"6...les départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014\")\n print (\"7...les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015\")\n print (\"8...La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013\")\n print (\"9...La part (en %) de l’agriculture biologique dans la surface agricole totale du département contenant\")\n print (\" | le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité en 2016\")\n print (\"10..Le taux de pauvreté en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014 \")\n print (\"11..Le poids de l'économie sociale dans les emplois salariés de la région dont la source de la puissance électrique\")\n print (\" | en énergies renouvelables provenait à au moins 10% de l’énergie photovoltaïque et dont la part\")\n print (\" | de l’agriculture biologique dans la surface agricole totale était d’au moins 5% en 2015\")\n print (\"\")\n print (\"0...Quitter\")\n print (\"-------------------------------------------------------------------------------------\")\n\n\n#----------------------------------------- MAIN --------------------------------------------------\n\n# Demande d'affichae console ou non, HTML seul par défaut\n\nprint(\"Souhaitez-vous afficher les résultats dans la console,\")\nprint(\"en plus de la création des fichiers HTML ?\")\nprint(\" (O Oui / N Non)\")\nchoix = input()\n\nif (choix[0].lower() == \"o\"):\n bconsole = True\n\n# Menu principal\n\nwhile True:\n menu()\n print(\"Chosissez un numéro de question pour avoir la réponse :\")\n choix = input()\n\n if (choix == \"1\"):\n listeRegions()\n elif (choix == \"2\"):\n listeDepartement()\n elif (choix == \"3\"):\n choixRegions()\n elif (choix == \"4\"):\n choix_departement_theme()\n elif (choix == \"5\"):\n typeEnergie()\n elif (choix == \"6\"):\n tonnes()\n elif (choix == \"7\"):\n topFive()\n elif (choix == \"8\"):\n weak()\n elif (choix == \"9\"):\n bestPopMin()\n elif (choix == \"10\"):\n pauvrete()\n elif (choix == \"11\"):\n poids_eco()\n elif (choix == \"0\"):\n break\n else:\n print (\"Choix invalide\")\n\n# fermeture \"propre\" du curseur et de la connection\ncur.close()\nconnection.close()"
] | [
[
"pandas.to_numeric",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
myelintek/results | [
"11c38436a158c453e3011f8684570f7a55c03330",
"11c38436a158c453e3011f8684570f7a55c03330",
"11c38436a158c453e3011f8684570f7a55c03330",
"11c38436a158c453e3011f8684570f7a55c03330",
"11c38436a158c453e3011f8684570f7a55c03330",
"11c38436a158c453e3011f8684570f7a55c03330",
"11c38436a158c453e3011f8684570f7a55c03330",
"11c38436a158c453e3011f8684570f7a55c03330"
] | [
"v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/utils/metrics_hook_test.py",
"v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/tpu/models/official/amoeba_net/model_builder.py",
"v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/models/revnet.py",
"v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/data_generators/problem_test.py",
"v0.5.0/nvidia/submission/code/translation/pytorch/fairseq/data/data_utils.py",
"v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/rl/model_rl_experiment_player.py",
"v0.5.0/google/cloud_v100x8/code/resnet/benchmarks/scripts/tf_cnn_benchmarks/cnn_util.py",
"v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/utils/video/reward_confusion.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for metrics_hook.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport shutil\nfrom tensor2tensor.utils import metrics_hook\n\nimport tensorflow as tf\n\n\nclass DummyHook(metrics_hook.MetricsBasedHook):\n\n def _process_metrics(self, global_step, metrics):\n if metrics:\n assert \"\" in metrics\n assert isinstance(metrics[\"\"], dict)\n if metrics[\"\"]:\n assert \"global_step_1\" in metrics[\"\"]\n self.test_metrics = metrics\n if global_step >= 40:\n return True\n\n\nclass MetricsHookTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.base_checkpoint_dir = tf.test.get_temp_dir()\n shutil.rmtree(cls.base_checkpoint_dir, ignore_errors=True)\n\n def ckpt_dir(self, name):\n return os.path.join(self.base_checkpoint_dir, name)\n\n @contextlib.contextmanager\n def sess(self, hook, ckpt_dir):\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=ckpt_dir,\n save_checkpoint_secs=0,\n save_summaries_steps=10,\n hooks=[hook]) as sess:\n self._sess = sess\n yield sess\n\n def flush(self):\n self._sess._hooks[1]._summary_writer.flush()\n\n def testStop(self):\n global_step = tf.train.create_global_step()\n tf.summary.scalar(\"global_step\", global_step)\n incr_global_step = tf.assign_add(global_step, 1)\n\n ckpt_dir = self.ckpt_dir(\"stop\")\n dummy = DummyHook(ckpt_dir, every_n_steps=10)\n with self.sess(dummy, ckpt_dir) as sess:\n for _ in range(20):\n sess.run(incr_global_step)\n\n # Summary files should now have 2 global step values in them\n self.flush()\n\n # Run for 10 more so that the hook gets triggered again\n for _ in range(10):\n sess.run(incr_global_step)\n\n # Check that the metrics have actually been collected.\n self.assertTrue(\"\" in dummy.test_metrics)\n metrics = dummy.test_metrics[\"\"]\n self.assertTrue(\"global_step_1\" in metrics)\n steps, vals = metrics[\"global_step_1\"]\n self.assertTrue(len(steps) == len(vals))\n self.assertTrue(len(steps) >= 2)\n\n # Run for 10 more so that the hook triggers stoppage\n for _ in range(10):\n sess.run(incr_global_step)\n\n with self.assertRaisesRegexp(RuntimeError, \"after should_stop requested\"):\n sess.run(incr_global_step)\n\n def testEarlyStoppingHook(self):\n global_step = tf.train.create_global_step()\n counter = tf.get_variable(\"count\", initializer=0, dtype=tf.int32)\n tf.summary.scalar(\"count\", counter)\n incr_global_step = tf.assign_add(global_step, 1)\n incr_counter = tf.assign_add(counter, 1)\n\n # Stop if the global step has not gone up by more than 1 in 20 steps.\n\n ckpt_dir = self.ckpt_dir(\"early\")\n stop_hook = metrics_hook.EarlyStoppingHook(\n ckpt_dir,\n \"count_1\",\n num_plateau_steps=20,\n plateau_delta=1.,\n plateau_decrease=False,\n every_n_steps=10)\n with self.sess(stop_hook, ckpt_dir) as sess:\n for _ in range(20):\n sess.run((incr_global_step, incr_counter))\n\n # Summary files should now have 2 values in them\n self.flush()\n\n # Run for more steps so that the hook gets triggered and we verify that we\n # don't stop.\n for _ in range(30):\n sess.run((incr_global_step, incr_counter))\n\n self.flush()\n\n # Run without incrementing the counter\n for _ in range(40):\n sess.run(incr_global_step)\n\n # Metrics should be written such that now the counter has gone >20 steps\n # without being incremented.\n self.flush()\n\n # Check that we ask for stop\n with self.assertRaisesRegexp(RuntimeError, \"after should_stop requested\"):\n for _ in range(30):\n sess.run(incr_global_step)\n\n def testPlateauOpHook(self):\n global_step = tf.train.create_global_step()\n counter = tf.get_variable(\"count\", initializer=0, dtype=tf.int32)\n indicator = tf.get_variable(\"indicator\", initializer=0, dtype=tf.int32)\n tf.summary.scalar(\"count\", counter)\n incr_global_step = tf.assign_add(global_step, 1)\n incr_counter = tf.assign_add(counter, 1)\n incr_indicator = tf.assign_add(indicator, 1)\n\n # Stop if the global step has not gone up by more than 1 in 20 steps.\n\n ckpt_dir = self.ckpt_dir(\"plateauop\")\n stop_hook = metrics_hook.PlateauOpHook(\n ckpt_dir,\n \"count_1\",\n incr_indicator,\n num_plateau_steps=20,\n plateau_delta=1.,\n plateau_decrease=False,\n every_n_steps=10)\n with self.sess(stop_hook, ckpt_dir) as sess:\n for _ in range(20):\n sess.run((incr_global_step, incr_counter))\n\n # Summary files should now have 2 values in them\n self.flush()\n\n # Run for more steps so that the hook gets triggered and we verify that we\n # don't stop.\n for _ in range(30):\n sess.run((incr_global_step, incr_counter))\n\n self.flush()\n\n # Run without incrementing the counter\n for _ in range(30):\n sess.run(incr_global_step)\n self.flush()\n\n self.assertTrue(sess.run(indicator) < 1)\n\n # Metrics should be written such that now the counter has gone >20 steps\n # without being incremented.\n # Check that we run the incr_indicator op several times\n for _ in range(3):\n for _ in range(10):\n sess.run(incr_global_step)\n self.flush()\n\n self.assertTrue(sess.run(indicator) > 1)\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Constructs a generic image model based on the hparams the user passes in.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nimport numpy as np\nimport tensorflow as tf\n\nimport network_utils\n\n\narg_scope = tf.contrib.framework.arg_scope\nslim = tf.contrib.slim\n\n\ndef _build_loss(loss_fn, loss_name, logits, end_points, labels,\n add_summary=False):\n \"\"\"Compute total loss based on the specified loss function.\"\"\"\n # Collect all losses explicitly to sum up the total_loss.\n losses = []\n\n # Whethere to add aux loss is controled in network_fn. Once an aux head is\n # built, an aux loss would be added here automatically.\n aux_head_endpoint = None\n if 'AuxLogits' in end_points:\n # For Inception/Genet aux head.\n aux_head_endpoint = end_points['AuxLogits']\n elif 'aux_logits' in end_points:\n # For AmoebaNet aux head.\n aux_head_endpoint = end_points['aux_logits'],\n if aux_head_endpoint:\n aux_loss = loss_fn(\n labels,\n tf.squeeze(aux_head_endpoint, axis=[0]),\n weights=0.4,\n scope='aux_loss')\n tf.logging.info('Adding to aux loss.')\n if add_summary:\n tf.summary.scalar('losses/aux_loss', aux_loss)\n\n losses.append(aux_loss)\n\n # Add the empirical loss.\n primary_loss = loss_fn(labels, logits, weights=1.0, scope=loss_name)\n losses.append(primary_loss)\n\n # Add regularization losses.\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if reg_losses:\n fp32_reg_losses = []\n for reg_loss in reg_losses:\n fp32_reg_losses.append(tf.cast(reg_loss, tf.float32))\n reg_loss = tf.add_n(fp32_reg_losses, name='regularization_loss')\n losses.append(reg_loss)\n\n total_loss = tf.add_n(losses, name='total_loss')\n if add_summary:\n tf.summary.scalar('losses/' + loss_name, primary_loss)\n tf.summary.scalar('losses/regularization_loss', reg_loss)\n tf.summary.scalar('losses/total_loss', total_loss)\n\n return total_loss\n\n\ndef build_softmax_loss(logits,\n end_points,\n labels,\n label_smoothing=0.1,\n add_summary=True):\n loss_fn = functools.partial(\n tf.losses.softmax_cross_entropy, label_smoothing=label_smoothing)\n return _build_loss(\n loss_fn=loss_fn,\n loss_name='softmax_loss',\n logits=logits,\n end_points=end_points,\n labels=labels,\n add_summary=add_summary)\n\n\ndef compute_flops_per_example(batch_size):\n # TODO(ereal): remove this function and other unnecessary reporting.\n options = tf.profiler.ProfileOptionBuilder.float_operation()\n options['output'] = 'none'\n flops = (\n tf.profiler.profile(\n tf.get_default_graph(),\n options=options\n ).total_float_ops / batch_size)\n return flops\n\n\ndef build_learning_rate(initial_lr,\n lr_decay_type,\n global_step,\n decay_factor=None,\n decay_steps=None,\n stepwise_epoch=None,\n total_steps=None,\n add_summary=True,\n warmup_steps=0):\n \"\"\"Build learning rate.\"\"\"\n if lr_decay_type == 'exponential':\n assert decay_steps is not None\n assert decay_factor is not None\n lr = tf.train.exponential_decay(\n initial_lr, global_step, decay_steps, decay_factor, staircase=True)\n elif lr_decay_type == 'cosine':\n assert total_steps is not None\n lr = 0.5 * initial_lr * (\n 1 + tf.cos(np.pi * tf.cast(global_step, tf.float32) / total_steps))\n elif lr_decay_type == 'constant':\n lr = initial_lr\n elif lr_decay_type == 'stepwise':\n assert stepwise_epoch is not None\n boundaries = [\n 10 * stepwise_epoch,\n 20 * stepwise_epoch,\n ]\n values = [initial_lr, initial_lr * 0.1, initial_lr * 0.01]\n lr = tf.train.piecewise_constant(global_step, boundaries, values)\n else:\n assert False, 'Unknown lr_decay_type : %s' % lr_decay_type\n\n # By default, warmup_steps_fraction = 0.0 which means no warmup steps.\n tf.logging.info('Learning rate warmup_steps: %d' % warmup_steps)\n warmup_lr = (\n initial_lr * tf.cast(global_step, tf.float32) / tf.cast(\n warmup_steps, tf.float32))\n lr = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)\n\n if add_summary:\n tf.summary.scalar('learning_rate', lr)\n\n return lr\n\n\ndef _build_aux_head(net, end_points, num_classes, hparams, scope):\n \"\"\"Auxiliary head used for all models across all datasets.\"\"\"\n aux_scaling = 1.0\n # TODO(huangyp): double check aux_scaling with vrv@.\n if hasattr(hparams, 'aux_scaling'):\n aux_scaling = hparams.aux_scaling\n tf.logging.info('aux scaling: {}'.format(aux_scaling))\n with tf.variable_scope(scope, custom_getter=network_utils.bp16_getter):\n aux_logits = tf.identity(net)\n with tf.variable_scope('aux_logits'):\n aux_logits = slim.avg_pool2d(\n aux_logits, [5, 5], stride=3, padding='VALID')\n aux_logits = slim.conv2d(aux_logits, int(128 * aux_scaling),\n [1, 1], scope='proj')\n aux_logits = network_utils.batch_norm(aux_logits, scope='aux_bn0')\n aux_logits = tf.nn.relu(aux_logits)\n # Shape of feature map before the final layer.\n shape = aux_logits.shape\n if hparams.data_format == 'NHWC':\n shape = shape[1:3]\n else:\n shape = shape[2:4]\n aux_logits = slim.conv2d(aux_logits, int(768 * aux_scaling),\n shape, padding='VALID')\n aux_logits = network_utils.batch_norm(aux_logits, scope='aux_bn1')\n aux_logits = tf.nn.relu(aux_logits)\n aux_logits = tf.contrib.layers.flatten(aux_logits)\n aux_logits = slim.fully_connected(aux_logits, num_classes)\n end_point_name = (\n 'aux_logits' if 'aux_logits' not in end_points else 'aux_logits_2')\n end_points[end_point_name] = tf.cast(aux_logits, tf.float32)\n\n\ndef _imagenet_stem(inputs, hparams, stem_cell, filter_scaling_rate):\n \"\"\"Stem used for models trained on ImageNet.\"\"\"\n\n # 149 x 149 x 32\n num_stem_filters = hparams.stem_reduction_size\n with tf.variable_scope('stem', custom_getter=network_utils.bp16_getter):\n net = slim.conv2d(\n inputs, num_stem_filters, [3, 3], stride=2, scope='conv0',\n padding='VALID')\n net = network_utils.batch_norm(net, scope='conv0_bn')\n tf.logging.info('imagenet_stem shape: {}'.format(net.shape))\n # Run the reduction cells\n cell_outputs = [None, net]\n filter_scaling = 1.0 / (filter_scaling_rate**hparams.num_stem_cells)\n for cell_num in range(hparams.num_stem_cells):\n net = stem_cell(\n net,\n scope='cell_stem_{}'.format(cell_num),\n filter_scaling=filter_scaling,\n stride=2,\n prev_layer=cell_outputs[-2],\n cell_num=cell_num)\n cell_outputs.append(net)\n filter_scaling *= filter_scaling_rate\n tf.logging.info('imagenet_stem net shape at reduction layer {}: {}'.format(\n cell_num, net.shape))\n # Only include cells in the cell_outputs.\n return net, cell_outputs\n\n\ndef _basic_stem(inputs, hparams):\n num_stem_filters = hparams.stem_reduction_size\n with tf.variable_scope('stem', custom_getter=network_utils.bp16_getter):\n net = slim.conv2d(\n inputs, num_stem_filters, [3, 3], stride=1, scope='conv0',\n padding='VALID')\n net = network_utils.batch_norm(net, scope='conv0_bn')\n tf.logging.info('basic_stem shape: {}'.format(net.shape))\n return net, [None, net]\n\n\ndef network_arg_scope(weight_decay=5e-5,\n batch_norm_decay=0.9997,\n batch_norm_epsilon=1e-3,\n is_training=True,\n data_format='NHWC',\n num_shards=None,\n distributed_group_size=1):\n \"\"\"Defines the default arg scope for the AmoebaNet ImageNet model.\n\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n batch_norm_decay: Decay for batch norm moving average.\n batch_norm_epsilon: Small float added to variance to avoid dividing by zero\n in batch norm.\n is_training: whether is training or not.\n Useful for fine-tuning a model with different num_classes.\n data_format: Input data format.\n num_shards: Number of shards in the job\n distributed_group_size: Size of the group to average for batch norm.\n Returns:\n An `arg_scope` to use for the AmoebaNet Model.\n \"\"\"\n batch_norm_params = {\n # Decay for the moving averages.\n 'decay': batch_norm_decay,\n # epsilon to prevent 0s in variance.\n 'epsilon': batch_norm_epsilon,\n 'scale': True,\n 'moving_vars': 'moving_vars',\n 'is_training': is_training,\n 'data_format': data_format,\n 'num_shards': num_shards,\n 'distributed_group_size': distributed_group_size,\n }\n weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)\n weights_initializer = tf.contrib.layers.variance_scaling_initializer(\n mode='FAN_OUT')\n with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],\n weights_regularizer=weights_regularizer,\n weights_initializer=weights_initializer):\n with arg_scope([slim.fully_connected],\n activation_fn=None, scope='FC'):\n with arg_scope([slim.conv2d, slim.separable_conv2d],\n activation_fn=None, biases_initializer=None):\n with arg_scope([network_utils.batch_norm], **batch_norm_params):\n with arg_scope(\n [slim.dropout, network_utils.drop_path], is_training=is_training):\n with arg_scope([slim.avg_pool2d,\n slim.max_pool2d,\n slim.conv2d,\n slim.separable_conv2d,\n network_utils.factorized_reduction,\n network_utils.global_avg_pool,\n network_utils.get_channel_index,\n network_utils.get_channel_dim],\n data_format=data_format) as sc:\n return sc\n\n\ndef build_network(inputs,\n num_classes,\n is_training=True,\n hparams=None):\n \"\"\"Builds an image model.\n\n Builds a model the takes inputs and return logits and endpoints.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of classes needed to be predicted by the model. If None,\n only returns the feature vector endpoints after global_pool.\n is_training: whether is training or not.\n Useful for fine-tuning a model with different num_classes.\n hparams: hparams used to construct the imagenet model.\n\n Returns:\n a list containing 'logits', 'aux_logits' Tensors.\n\n Raises:\n ValueError: upon invalid hparams.\n \"\"\"\n total_num_cells = (hparams.num_cells +\n hparams.num_reduction_layers +\n hparams.num_stem_cells)\n normal_cell = network_utils.BaseCell(\n hparams.reduction_size, hparams.normal_cell_operations,\n hparams.normal_cell_used_hiddenstates,\n hparams.normal_cell_hiddenstate_indices, hparams.drop_connect_keep_prob,\n total_num_cells, hparams.drop_path_burn_in_steps)\n reduction_cell = network_utils.BaseCell(\n hparams.reduction_size, hparams.reduction_cell_operations,\n hparams.reduction_cell_used_hiddenstates,\n hparams.reduction_cell_hiddenstate_indices,\n hparams.drop_connect_keep_prob, total_num_cells,\n hparams.drop_path_burn_in_steps)\n num_shards = hparams.num_shards\n distributed_group_size = hparams.distributed_group_size\n assert distributed_group_size == 1 or hparams.use_tpu\n sc = network_arg_scope(weight_decay=hparams.weight_decay,\n batch_norm_decay=hparams.batch_norm_decay,\n batch_norm_epsilon=hparams.batch_norm_epsilon,\n is_training=is_training,\n data_format=hparams.data_format,\n num_shards=num_shards,\n distributed_group_size=distributed_group_size)\n with arg_scope(sc):\n return _build_network_base(inputs,\n normal_cell=normal_cell,\n reduction_cell=reduction_cell,\n num_classes=num_classes,\n hparams=hparams,\n is_training=is_training)\n\n\ndef _build_network_base(images,\n normal_cell,\n reduction_cell,\n num_classes,\n hparams,\n is_training):\n \"\"\"Constructs a AmoebaNet image model.\"\"\"\n if hparams.get('use_bp16', False) and hparams.get('use_tpu', False):\n images = tf.cast(images, dtype=tf.bfloat16)\n end_points = {}\n filter_scaling_rate = 2\n # Find where to place the reduction cells or stride normal cells\n reduction_indices = network_utils.calc_reduction_layers(\n hparams.num_cells, hparams.num_reduction_layers)\n stem_cell = reduction_cell\n\n if hparams.stem_type == 'imagenet':\n net, cell_outputs = _imagenet_stem(images, hparams, stem_cell,\n filter_scaling_rate)\n else:\n net, cell_outputs = _basic_stem(images, hparams)\n\n # Setup for building in the auxiliary head.\n aux_head_cell_idxes = []\n if len(reduction_indices) >= 2:\n aux_head_cell_idxes.append(reduction_indices[1] - 1)\n\n # Run the cells\n filter_scaling = 1.0\n # true_cell_num accounts for the stem cells\n true_cell_num = hparams.num_stem_cells\n for cell_num in range(hparams.num_cells):\n tf.logging.info('Current cell num: {}'.format(true_cell_num))\n\n if cell_num in reduction_indices:\n filter_scaling *= filter_scaling_rate\n net = reduction_cell(\n net,\n scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),\n filter_scaling=filter_scaling,\n stride=2,\n prev_layer=cell_outputs[-2],\n cell_num=true_cell_num)\n cell_outputs.append(net)\n tf.logging.info('Reduction cell shape at layer {}: {}'.format(\n true_cell_num, net.shape))\n true_cell_num += 1\n net = normal_cell(\n net,\n scope='cell_{}'.format(cell_num),\n filter_scaling=filter_scaling,\n stride=1,\n prev_layer=cell_outputs[-2],\n cell_num=true_cell_num)\n if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and\n num_classes and is_training):\n aux_net = tf.nn.relu(net)\n _build_aux_head(aux_net, end_points, num_classes, hparams,\n scope='aux_{}'.format(cell_num))\n cell_outputs.append(net)\n tf.logging.info('Normal net shape at layer {}: {}'.format(\n true_cell_num, net.shape))\n true_cell_num += 1\n\n # Final softmax layer\n with tf.variable_scope('final_layer',\n custom_getter=network_utils.bp16_getter):\n net = tf.nn.relu(net)\n net = network_utils.global_avg_pool(net)\n end_points['global_pool'] = net\n if not num_classes:\n return net, end_points\n net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')\n logits = slim.fully_connected(net, num_classes)\n logits = tf.cast(logits, tf.float32)\n predictions = tf.nn.softmax(logits, name='predictions')\n end_points['logits'] = logits\n end_points['predictions'] = predictions\n return logits, end_points\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Creates a RevNet with the bottleneck residual function.\n\nImplements the following equations described in the RevNet paper:\ny1 = x1 + f(x2)\ny2 = x2 + g(y1)\n\nHowever, in practice, the authors use the following equations to downsample\ntensors inside a RevNet block:\n\ny1 = h(x1) + f(x2)\ny2 = h(x2) + g(y1)\n\nIn this case, h is the downsampling function used to change number of channels.\n\nThese modified equations are evident in the authors' code online:\nhttps://github.com/renmengye/revnet-public\n\nFor reference, the original paper can be found here:\nhttps://arxiv.org/pdf/1707.04585.pdf\n\"\"\"\n\nimport functools\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import t2t_model\n\nimport tensorflow as tf\n\n\ndef wrapped_partial(fn, *args, **kwargs):\n partial = functools.partial(fn, *args, **kwargs)\n wrapped = functools.update_wrapper(partial, fn)\n return wrapped\n\n\nconv_initializer = tf.contrib.layers.variance_scaling_initializer(\n factor=2.0, mode='FAN_OUT')\n\nCONFIG = {'2d': {'conv': wrapped_partial(\n tf.layers.conv2d, kernel_initializer=conv_initializer),\n 'max_pool': tf.layers.max_pooling2d,\n 'avg_pool': tf.layers.average_pooling2d,\n 'split_axis': 3,\n 'reduction_dimensions': [1, 2]\n },\n '3d': {'conv': wrapped_partial(\n tf.layers.conv3d, kernel_initializer=conv_initializer),\n 'max_pool': tf.layers.max_pooling3d,\n 'avg_pool': tf.layers.average_pooling2d,\n 'split_axis': 4,\n 'reduction_dimensions': [1, 2, 3]\n }\n }\n\n\ndef f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1,\n training=True, bottleneck=True, padding='SAME'):\n \"\"\"Applies residual function for RevNet.\n\n Args:\n x: input tensor\n depth1: Number of output channels for the first and second conv layers.\n depth2: Number of output channels for the third conv layer.\n dim: '2d' if 2-dimensional, '3d' if 3-dimensional.\n first_batch_norm: Whether to keep the first batch norm layer or not.\n Typically used in the first RevNet block.\n stride: Stride for the first conv filter. Note that this particular\n RevNet architecture only varies the stride for the first conv\n filter. The stride for the second conv filter is always set to 1.\n training: True for train phase, False for eval phase.\n bottleneck: If true, apply bottleneck 1x1 down/up sampling.\n padding: Padding for each conv layer.\n\n Returns:\n Output tensor after applying residual function for RevNet.\n \"\"\"\n conv = CONFIG[dim]['conv']\n with tf.variable_scope('f', reuse=tf.AUTO_REUSE):\n if first_batch_norm:\n net = tf.layers.batch_normalization(x, training=training)\n net = tf.nn.relu(net)\n else:\n net = x\n\n if bottleneck:\n net = conv(net, depth1, 1, strides=stride,\n padding=padding, activation=None)\n\n net = tf.layers.batch_normalization(net, training=training)\n net = tf.nn.relu(net)\n net = conv(net, depth1, 3, strides=1,\n padding=padding, activation=None)\n\n net = tf.layers.batch_normalization(net, training=training)\n net = tf.nn.relu(net)\n net = conv(net, depth2, 1, strides=1,\n padding=padding, activation=None)\n else:\n net = conv(net, depth2, 3, strides=stride,\n padding=padding, activation=None)\n net = tf.layers.batch_normalization(x, training=training)\n net = tf.nn.relu(net)\n net = conv(net, depth2, 3, strides=stride,\n padding=padding, activation=None)\n\n return net\n\n\ndef downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'):\n \"\"\"Downsamples 'x' by `stride` using a 1x1 convolution filter.\n\n Args:\n x: input tensor of size [N, H, W, C]\n output_channels: Desired number of output channels.\n dim: '2d' if 2-dimensional, '3d' if 3-dimensional.\n stride: What stride to use. Usually 1 or 2.\n scope: Optional variable scope.\n\n Returns:\n A downsampled tensor of size [N, H/2, W/2, output_channels] if stride\n is 2, else returns a tensor of size [N, H, W, output_channels] if\n stride is 1.\n \"\"\"\n conv = CONFIG[dim]['conv']\n with tf.variable_scope(scope):\n x = conv(x, output_channels, 1, strides=stride, padding='SAME',\n activation=None)\n return x\n\n\ndef downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'):\n \"\"\"Downsamples 'x' by `stride` using average pooling.\n\n Args:\n x: input tensor of size [N, H, W, C]\n output_channels: Desired number of output channels.\n dim: '2d' if 2-dimensional, '3d' if 3-dimensional.\n stride: What stride to use. Usually 1 or 2.\n scope: Optional variable scope.\n\n Returns:\n A downsampled tensor of size [N, H/2, W/2, output_channels] if stride\n is 2, else returns a tensor of size [N, H, W, output_channels] if\n stride is 1.\n \"\"\"\n with tf.variable_scope(scope):\n if stride > 1:\n avg_pool = CONFIG[dim]['avg_pool']\n x = avg_pool(x,\n pool_size=(stride, stride),\n strides=(stride, stride),\n padding='VALID')\n\n input_channels = tf.shape(x)[3]\n diff = output_channels - input_channels\n x = tf.pad(\n x, [[0, 0], [0, 0], [0, 0],\n [diff // 2, diff // 2]])\n return x\n\n\ndef init(images, num_channels, dim='2d', stride=2,\n kernel_size=7, maxpool=True, training=True, scope='init'):\n \"\"\"Standard ResNet initial block used as first RevNet block.\n\n Args:\n images: [N, H, W, 3] tensor of input images to the model.\n num_channels: Output depth of convolutional layer in initial block.\n dim: '2d' if 2-dimensional, '3d' if 3-dimensional.\n stride: stride for the convolution and pool layer.\n kernel_size: Size of the initial convolution filter\n maxpool: If true, apply a maxpool after the convolution\n training: True for train phase, False for eval phase.\n scope: Optional scope for the init block.\n\n Returns:\n Two [N, H, W, C] output activations from input images.\n \"\"\"\n conv = CONFIG[dim]['conv']\n pool = CONFIG[dim]['max_pool']\n with tf.variable_scope(scope):\n net = conv(images, num_channels, kernel_size, strides=stride,\n padding='SAME', activation=None)\n net = tf.layers.batch_normalization(net, training=training)\n net = tf.nn.relu(net)\n if maxpool:\n net = pool(net, pool_size=3, strides=stride)\n x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis'])\n return x1, x2\n\n\ndef unit(x1, x2, block_num, depth, num_layers, dim='2d',\n bottleneck=True, first_batch_norm=True, stride=1, training=True):\n \"\"\"Implements bottleneck RevNet unit from authors' RevNet architecture.\n\n Args:\n x1: [N, H, W, C] tensor of network activations.\n x2: [N, H, W, C] tensor of network activations.\n block_num: integer ID of block\n depth: First depth in bottleneck residual unit.\n num_layers: Number of layers in the RevNet block.\n dim: '2d' if 2-dimensional, '3d' if 3-dimensional.\n bottleneck: Should a bottleneck layer be used.\n first_batch_norm: Whether to keep the first batch norm layer or not.\n Typically used in the first RevNet block.\n stride: Stride for the residual function.\n training: True for train phase, False for eval phase.\n\n Returns:\n Two [N, H, W, C] output activation tensors.\n \"\"\"\n scope_name = 'unit_%d' % block_num\n if bottleneck:\n depth1 = depth\n depth2 = depth * 4\n else:\n depth1 = depth2 = depth\n\n residual = wrapped_partial(f,\n depth1=depth1, depth2=depth2, dim=dim,\n training=training, bottleneck=bottleneck)\n\n with tf.variable_scope(scope_name):\n downsample = downsample_bottleneck if bottleneck else downsample_residual\n # Manual implementation of downsampling\n with tf.variable_scope('downsampling'):\n with tf.variable_scope('x1'):\n hx1 = downsample(x1, depth2, dim=dim, stride=stride)\n fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm)\n x1 = hx1 + fx2\n with tf.variable_scope('x2'):\n hx2 = downsample(x2, depth2, dim=dim, stride=stride)\n fx1 = residual(x1)\n x2 = hx2 + fx1\n\n # Full block using memory-efficient rev_block implementation.\n with tf.variable_scope('full_block'):\n x1, x2 = tf.contrib.layers.rev_block(x1, x2,\n residual,\n residual,\n num_layers=num_layers)\n return x1, x2\n\n\ndef final_block(x1, x2, dim='2d', training=True, scope='final_block'):\n \"\"\"Converts activations from last RevNet block to pre-logits.\n\n Args:\n x1: [NxHxWxC] tensor of network activations.\n x2: [NxHxWxC] tensor of network activations.\n dim: '2d' if 2-dimensional, '3d' if 3-dimensional.\n training: True for train phase, False for eval phase.\n scope: Optional variable scope for the final block.\n\n Returns:\n [N, hidden_dim] pre-logits tensor from activations x1 and x2.\n \"\"\"\n\n # Final batch norm and relu\n with tf.variable_scope(scope):\n y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis'])\n y = tf.layers.batch_normalization(y, training=training)\n y = tf.nn.relu(y)\n\n # Global average pooling\n net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'],\n name='final_pool', keep_dims=True)\n\n return net\n\n\ndef revnet(inputs, hparams, reuse=None):\n \"\"\"Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.\n\n Args:\n inputs: [NxHxWx3] tensor of input images to the model.\n hparams: HParams object that contains the following parameters,\n in addition to the parameters contained in the basic_params1() object in\n the common_hparams module:\n num_channels_first - A Python list where each element represents the\n depth of the first and third convolutional layers in the bottleneck\n residual unit for a given block.\n num_channels_second - A Python list where each element represents the\n depth of the second convolutional layer in the bottleneck residual\n unit for a given block.\n num_layers_per_block - A Python list containing the number of RevNet\n layers for each block.\n first_batch_norm - A Python list containing booleans representing the\n presence of a batch norm layer at the beginning of a given block.\n strides - A Python list containing integers representing the stride of\n the residual function for each block.\n num_channels_init_block - An integer representing the number of channels\n for the convolutional layer in the initial block.\n dimension - A string (either \"2d\" or \"3d\") that decides if the RevNet is\n 2-dimensional or 3-dimensional.\n reuse: Whether to reuse the default variable scope.\n\n Returns:\n [batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.\n \"\"\"\n training = hparams.mode == tf.estimator.ModeKeys.TRAIN\n with tf.variable_scope('RevNet', reuse=reuse):\n x1, x2 = init(inputs,\n num_channels=hparams.num_channels_init_block,\n dim=hparams.dim,\n kernel_size=hparams.init_kernel_size,\n maxpool=hparams.init_maxpool,\n stride=hparams.init_stride,\n training=training)\n for block_num in range(len(hparams.num_layers_per_block)):\n block = {'depth': hparams.num_channels[block_num],\n 'num_layers': hparams.num_layers_per_block[block_num],\n 'first_batch_norm': hparams.first_batch_norm[block_num],\n 'stride': hparams.strides[block_num],\n 'bottleneck': hparams.bottleneck}\n x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training,\n **block)\n pre_logits = final_block(x1, x2, dim=hparams.dim, training=training)\n return pre_logits\n\n\[email protected]_model\nclass Revnet(t2t_model.T2TModel):\n\n def body(self, features):\n return revnet(features['inputs'], self.hparams)\n\n\ndef revnet_base():\n \"\"\"Default hparams for Revnet.\"\"\"\n hparams = common_hparams.basic_params1()\n hparams.add_hparam('num_channels', [64, 128, 256, 416])\n hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1])\n hparams.add_hparam('bottleneck', True)\n hparams.add_hparam('first_batch_norm', [False, True, True, True])\n hparams.add_hparam('init_stride', 2)\n hparams.add_hparam('init_kernel_size', 7)\n hparams.add_hparam('init_maxpool', True)\n hparams.add_hparam('strides', [1, 2, 2, 2])\n hparams.add_hparam('num_channels_init_block', 64)\n hparams.add_hparam('dim', '2d')\n\n # Variable init\n hparams.initializer = 'normal_unit_scaling'\n hparams.initializer_gain = 2.\n\n # Optimization\n hparams.optimizer = 'Momentum'\n hparams.optimizer_momentum_momentum = 0.9\n hparams.optimizer_momentum_nesterov = True\n hparams.weight_decay = 1e-4\n hparams.clip_grad_norm = 0.0\n # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)\n hparams.learning_rate = 0.4\n hparams.learning_rate_decay_scheme = 'cosine'\n # For image_imagenet224, 120k training steps, which effectively makes this a\n # cosine decay (i.e. no cycles).\n hparams.learning_rate_cosine_cycle_steps = 120000\n\n # Can run with a batch size of 128 with Problem ImageImagenet224\n hparams.batch_size = 128\n return hparams\n\n\[email protected]_hparams\ndef revnet_104():\n return revnet_base()\n\n\ndef revnet_cifar_base():\n \"\"\"Tiny hparams suitable for CIFAR/etc.\"\"\"\n hparams = revnet_base()\n hparams.num_channels_init_block = 32\n hparams.first_batch_norm = [False, True, True]\n hparams.init_stride = 1\n hparams.init_kernel_size = 3\n hparams.init_maxpool = False\n hparams.strides = [1, 2, 2]\n hparams.batch_size = 128\n hparams.weight_decay = 1e-4\n\n hparams.learning_rate = 0.1\n hparams.learning_rate_cosine_cycle_steps = 5000\n return hparams\n\n\[email protected]_hparams\ndef revnet_38_cifar():\n hparams = revnet_cifar_base()\n hparams.bottleneck = False\n hparams.num_channels = [16, 32, 56]\n hparams.num_layers_per_block = [2, 2, 2]\n hparams.initializer = 'normal_unit_scaling'\n hparams.initializer_gain = 1.5\n return hparams\n\n\[email protected]_hparams\ndef revnet_110_cifar():\n \"\"\"Tiny hparams suitable for CIFAR/etc.\"\"\"\n hparams = revnet_cifar_base()\n hparams.bottleneck = False\n hparams.num_channels = [16, 32, 64]\n hparams.num_layers_per_block = [8, 8, 8]\n return hparams\n\n\[email protected]_hparams\ndef revnet_164_cifar():\n \"\"\"Tiny hparams suitable for CIFAR/etc.\"\"\"\n hparams = revnet_cifar_base()\n hparams.bottleneck = True\n hparams.num_channels = [16, 32, 64]\n hparams.num_layers_per_block = [8, 8, 8]\n return hparams\n\n\[email protected]_ranged_hparams\ndef revnet_range(rhp):\n \"\"\"Hyperparameters for tuning revnet.\"\"\"\n rhp.set_float('learning_rate', 0.05, 0.2, scale=rhp.LOG_SCALE)\n rhp.set_float('weight_decay', 1e-5, 1e-3, scale=rhp.LOG_SCALE)\n rhp.set_discrete('num_channels_init_block', [64, 128])\n return rhp\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test for common problem functionalities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized # for assertLen\nimport numpy as np\n\nfrom tensor2tensor.data_generators import algorithmic\nfrom tensor2tensor.data_generators import problem as problem_module\nfrom tensor2tensor.data_generators import problem_hparams\nfrom tensor2tensor.layers import modalities\n\nimport tensorflow as tf\n\n\ndef assert_tensors_equal(sess, t1, t2, n):\n \"\"\"Compute tensors `n` times and ensure that they are equal.\"\"\"\n\n for _ in range(n):\n\n v1, v2 = sess.run([t1, t2])\n\n if v1.shape != v2.shape:\n return False\n\n if not np.all(v1 == v2):\n return False\n\n return True\n\n\nclass ProblemTest(parameterized.TestCase, tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n algorithmic.TinyAlgo.setup_for_test()\n\n def testNoShuffleDeterministic(self):\n problem = algorithmic.TinyAlgo()\n dataset = problem.dataset(mode=tf.estimator.ModeKeys.TRAIN,\n data_dir=algorithmic.TinyAlgo.data_dir,\n shuffle_files=False)\n\n tensor1 = dataset.make_one_shot_iterator().get_next()[\"targets\"]\n tensor2 = dataset.make_one_shot_iterator().get_next()[\"targets\"]\n\n with tf.Session() as sess:\n self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20))\n\n def testNoShufflePreprocess(self):\n\n problem = algorithmic.TinyAlgo()\n dataset1 = problem.dataset(mode=tf.estimator.ModeKeys.TRAIN,\n data_dir=algorithmic.TinyAlgo.data_dir,\n shuffle_files=False, preprocess=False)\n dataset2 = problem.dataset(mode=tf.estimator.ModeKeys.TRAIN,\n data_dir=algorithmic.TinyAlgo.data_dir,\n shuffle_files=False, preprocess=True)\n\n tensor1 = dataset1.make_one_shot_iterator().get_next()[\"targets\"]\n tensor2 = dataset2.make_one_shot_iterator().get_next()[\"targets\"]\n\n with tf.Session() as sess:\n self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20))\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testProblemHparamsModality(self):\n problem = problem_hparams.TestProblem(input_vocab_size=2,\n target_vocab_size=3)\n p_hparams = problem.get_hparams()\n self.assertIsInstance(p_hparams.modality[\"inputs\"],\n modalities.SymbolModality)\n self.assertIsInstance(p_hparams.modality[\"targets\"],\n modalities.SymbolModality)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testProblemHparamsModalityObj(self):\n class ModalityObjProblem(problem_module.Problem):\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"inputs\": modalities.SymbolModality,\n \"targets\": modalities.SymbolModality}\n hp.vocab_size = {\"inputs\": 2,\n \"targets\": 3}\n\n problem = ModalityObjProblem(False, False)\n p_hparams = problem.get_hparams()\n self.assertIsInstance(p_hparams.modality[\"inputs\"],\n modalities.SymbolModality)\n self.assertIsInstance(p_hparams.modality[\"targets\"],\n modalities.SymbolModality)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testProblemHparamsInputOnlyModality(self):\n class InputOnlyProblem(problem_module.Problem):\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"inputs\": modalities.SymbolModality}\n hp.vocab_size = {\"inputs\": 2}\n\n problem = InputOnlyProblem(False, False)\n p_hparams = problem.get_hparams()\n self.assertIsInstance(p_hparams.modality[\"inputs\"],\n modalities.SymbolModality)\n self.assertLen(p_hparams.modality, 1)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testProblemHparamsTargetOnlyModality(self):\n class TargetOnlyProblem(problem_module.Problem):\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"targets\": modalities.SymbolModality}\n hp.vocab_size = {\"targets\": 3}\n\n problem = TargetOnlyProblem(False, False)\n p_hparams = problem.get_hparams()\n self.assertIsInstance(p_hparams.modality[\"targets\"],\n modalities.SymbolModality)\n self.assertLen(p_hparams.modality, 1)\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport contextlib\nimport itertools\nimport os\n\nimport numpy as np\nimport torch\n\nfrom . import FairseqDataset\nimport fairseq.data.batch_C\nimport sys\n\n\ndef infer_language_pair(path):\n \"\"\"Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx\"\"\"\n src, dst = None, None\n for filename in os.listdir(path):\n parts = filename.split('.')\n if len(parts) >= 3 and len(parts[1].split('-')) == 2:\n return parts[1].split('-')\n return src, dst\n\n\nclass ShardedIterator(object):\n \"\"\"A sharded wrapper around an iterable (padded to length).\"\"\"\n\n def __init__(self, iterable, num_shards, shard_id, fill_value=None):\n if shard_id < 0 or shard_id >= num_shards:\n raise ValueError('shard_id must be between 0 and num_shards')\n\n self._sharded_len = len(iterable) // num_shards\n if len(iterable) % num_shards > 0:\n self._sharded_len += 1\n\n self.itr = itertools.zip_longest(\n range(self._sharded_len),\n itertools.islice(iterable, shard_id, len(iterable), num_shards),\n fillvalue=fill_value,\n )\n\n def __len__(self):\n return self._sharded_len\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.itr)[1]\n\n\nclass CountingIterator(object):\n \"\"\"Wrapper around an iterable that maintains the iteration count.\"\"\"\n\n def __init__(self, iterable):\n self.iterable = iterable\n self.count = 0\n self.itr = iter(self)\n\n def __len__(self):\n return len(self.iterable)\n\n def __iter__(self):\n for x in self.iterable:\n self.count += 1\n yield x\n\n def __next__(self):\n return next(self.itr)\n\n def has_next(self):\n return self.count < len(self)\n\n def skip(self, num_to_skip):\n next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)\n return self\n\n\ndef collate_tokens(values, pad_idx, eos_idx, left_pad, move_eos_to_beginning=False, pad_sequence=1):\n \"\"\"Convert a list of 1d tensors into a padded 2d tensor.\"\"\"\n #size = max(v.size(0) for v in values)\n orig_size = max(v.size(0) for v in values)\n size = 0\n if pad_sequence > 1 :\n size = orig_size // pad_sequence * pad_sequence\n if orig_size % pad_sequence > 0 :\n size += pad_sequence\n else :\n size = orig_size\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n assert src[-1] == eos_idx\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res\n\n\nclass EpochBatchIterator(object):\n \"\"\"Iterate over a FairseqDataset and yield batches bucketed by size.\n\n Batches may contain sequences of different lengths. This iterator can be\n reused across multiple epochs with the next_epoch_itr() method.\n\n Args:\n dataset: a FairseqDataset\n max_tokens: max number of tokens in each batch\n max_sentences: max number of sentences in each batch\n max_positions: max sentence length supported by the model\n ignore_invalid_inputs: don't raise Exception for sentences that are too long\n required_batch_size_multiple: require batch size to be a multiple of N\n seed: seed for random number generator for reproducibility\n num_shards: shard the data iterator into N shards\n shard_id: which shard of the data iterator to return\n \"\"\"\n\n def __init__(\n self, dataset, max_tokens=None, max_sentences=None, max_positions=None,\n ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1,\n num_shards=1, shard_id=0, epoch=0\n ):\n assert isinstance(dataset, FairseqDataset)\n self.dataset = dataset\n self.max_tokens = max_tokens if max_tokens is not None else float('Inf')\n self.max_sentences = max_sentences if max_sentences is not None else float('Inf')\n self.max_positions = max_positions\n self.ignore_invalid_inputs = ignore_invalid_inputs\n self.bsz_mult = required_batch_size_multiple\n self.seed = seed\n self.num_shards = num_shards\n self.shard_id = shard_id\n \n self.epoch = epoch\n self._cur_epoch_itr = None\n self._next_epoch_itr = None\n\n with numpy_seed(self.seed):\n import time\n start = time.time()\n indices = self.dataset.ordered_indices(self.seed, self.epoch)\n#need integer, rather than float('Inf') values\n max_sentences = max_sentences if max_sentences is not None else sys.maxsize\n max_positions_num = 1024\n max_tokens = max_tokens if max_tokens is not None else sys.maxsize\n batches = fairseq.data.batch_C.make_batches(self.dataset.src_sizes, self.dataset.tgt_sizes, indices, max_tokens, max_sentences, self.bsz_mult, max_positions_num)\n self.frozen_batches = tuple(batches) \n# self.frozen_batches = tuple(self._batch_generator())\n print(\"generated batches in \", time.time() - start, \"s\")\n\n def __len__(self):\n return len(self.frozen_batches)\n\n def next_epoch_itr(self, shuffle=True):\n \"\"\"Shuffle batches and return a new iterator over the dataset.\"\"\"\n if self._next_epoch_itr is not None:\n self._cur_epoch_itr = self._next_epoch_itr\n self._next_epoch_itr = None\n else:\n self.epoch += 1\n self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle)\n return self._cur_epoch_itr\n\n def end_of_epoch(self):\n return not self._cur_epoch_itr.has_next()\n\n @property\n def iterations_in_epoch(self):\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.count\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.count\n return 0\n\n def state_dict(self):\n return {\n 'epoch': self.epoch,\n 'iterations_in_epoch': self.iterations_in_epoch,\n }\n\n def load_state_dict(self, state_dict):\n self.epoch = state_dict['epoch']\n itr_pos = state_dict.get('iterations_in_epoch', 0)\n if itr_pos > 0:\n # fast-forward epoch iterator\n itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))\n if itr_pos < len(itr):\n self._next_epoch_itr = itr.skip(itr_pos)\n\n def _get_iterator_for_epoch(self, epoch, shuffle):\n if shuffle:\n # set seed based on the seed and epoch number so that we get\n # reproducible results when resuming from checkpoints\n with numpy_seed(self.seed + epoch):\n batches = list(self.frozen_batches) # copy\n np.random.shuffle(batches)\n else:\n batches = self.frozen_batches\n return CountingIterator(torch.utils.data.DataLoader(\n self.dataset,\n collate_fn=self.dataset.collater,\n num_workers = 1,\n batch_sampler=ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]),\n ))\n\n def _batch_generator(self):\n batch = []\n\n def is_batch_full(num_tokens):\n if len(batch) == 0:\n return False\n if len(batch) == self.max_sentences:\n return True\n if num_tokens > self.max_tokens:\n return True\n return False\n\n sample_len = 0\n sample_lens = []\n ignored = []\n for idx in self.dataset.ordered_indices(self.seed, self.epoch):\n if not self.dataset.valid_size(idx, self.max_positions):\n if self.ignore_invalid_inputs:\n ignored.append(idx)\n continue\n raise Exception((\n 'Size of sample #{} is invalid, max_positions={}, skip this '\n 'example with --skip-invalid-size-inputs-valid-test'\n ).format(idx, self.max_positions))\n\n sample_lens.append(self.dataset.num_tokens(idx))\n sample_len = max(sample_len, sample_lens[-1])\n num_tokens = (len(batch) + 1) * sample_len\n if is_batch_full(num_tokens):\n mod_len = max(\n self.bsz_mult * (len(batch) // self.bsz_mult),\n len(batch) % self.bsz_mult,\n )\n yield batch[:mod_len]\n batch = batch[mod_len:]\n sample_lens = sample_lens[mod_len:]\n sample_len = max(sample_lens) if len(sample_lens) > 0 else 0\n\n batch.append(idx)\n\n if len(batch) > 0:\n yield batch\n\n if len(ignored) > 0:\n print((\n '| WARNING: {} samples have invalid sizes and will be skipped, '\n 'max_positions={}, first few sample ids={}'\n ).format(len(ignored), self.max_positions, ignored[:10]))\n\n\[email protected]\ndef numpy_seed(seed):\n \"\"\"Context manager which seeds the NumPy PRNG with the specified seed and\n restores the state afterward\"\"\"\n if seed is None:\n yield\n return\n state = np.random.get_state()\n np.random.seed(seed)\n try:\n yield\n finally:\n np.random.set_state(state)\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Play with a world model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport os\n\nfrom gym.core import Env\nfrom gym.spaces import Box\nfrom gym.spaces import Discrete\nfrom gym.utils import play\n\nimport numpy as np\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nfrom tensor2tensor.data_generators import gym_env\nfrom tensor2tensor.models.research.rl import get_policy\nfrom tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv\nfrom tensor2tensor.rl.trainer_model_based import FLAGS\nfrom tensor2tensor.rl.trainer_model_based import setup_directories\nfrom tensor2tensor.rl.trainer_model_based import temporary_flags\n\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nimport tensorflow as tf\n\n\n_font = None\nFONT_SIZE = 20\n\n\ndef _get_font():\n global _font\n if _font is None:\n font_paths = []\n for path in font_paths:\n try:\n _font = ImageFont.truetype(path, FONT_SIZE)\n return _font\n except: # pylint: disable=bare-except\n pass\n\n\ndef _assert_image(img):\n if isinstance(img, np.ndarray):\n img = Image.fromarray(np.ndarray.astype(img, np.uint8))\n return img\n\n\ndef write_on_image(img, text=\"\", position=(0, 0), color=(255, 255, 255)):\n img = _assert_image(img)\n if not text:\n return img\n draw = ImageDraw.Draw(img)\n font = _get_font()\n draw.text(position, text, color, font=font)\n return img\n\n\ndef concatenate_images(imgs, axis=1):\n imgs = [_assert_image(img) for img in imgs]\n imgs_np = [np.array(img) for img in imgs]\n concatenated_im_np = np.concatenate(imgs_np, axis=axis)\n return _assert_image(concatenated_im_np)\n\n\nclass DebugBatchEnv(Env):\n \"\"\"Debugging Environment.\"\"\"\n INFO_PANE_WIDTH = 250\n\n def __init__(self, hparams, sess=None):\n self.action_space = Discrete(6)\n self.observation_space = Box(\n low=0, high=255, shape=(210, 160+DebugBatchEnv.INFO_PANE_WIDTH, 3),\n dtype=np.uint8)\n self._tmp = 1\n self.res = None\n self.sess = sess if sess is not None else tf.Session()\n self._prepare_networks(hparams, self.sess)\n\n def _prepare_networks(self, hparams, sess):\n self.action = tf.placeholder(shape=(1,), dtype=tf.int32)\n batch_env = SimulatedBatchEnv(hparams.environment_spec, hparams.num_agents)\n self.reward, self.done = batch_env.simulate(self.action)\n self.observation = batch_env.observ\n self.reset_op = batch_env.reset(tf.constant([0], dtype=tf.int32))\n\n environment_wrappers = hparams.environment_spec.wrappers\n wrappers = copy.copy(environment_wrappers) if environment_wrappers else []\n\n to_initialize = [batch_env]\n for w in wrappers:\n batch_env = w[0](batch_env, **w[1])\n to_initialize.append(batch_env)\n\n def initialization_lambda():\n for batch_env in to_initialize:\n batch_env.initialize(sess)\n\n self.initialize = initialization_lambda\n\n obs_copy = batch_env.observ + 0\n\n actor_critic = get_policy(tf.expand_dims(obs_copy, 0), hparams)\n self.policy_probs = actor_critic.policy.probs[0, 0, :]\n self.value = actor_critic.value[0, :]\n\n def render(self, mode=\"human\"):\n raise NotImplementedError()\n\n def _fake_reset(self):\n self._tmp = 0\n observ = np.ones(shape=(210, 160, 3), dtype=np.uint8) * 10 * self._tmp\n observ[0, 0, 0] = 0\n observ[0, 0, 1] = 255\n self.res = (observ, 0, False, [0.1, 0.5, 0.5], 1.1)\n\n def _reset_env(self):\n observ = self.sess.run(self.reset_op)[0, ...]\n observ[0, 0, 0] = 0\n observ[0, 0, 1] = 255\n # TODO(pmilos): put correct numbers\n self.res = (observ, 0, False, [0.1, 0.5, 0.5], 1.1)\n\n def reset(self):\n self._reset_env()\n observ = self._augment_observation()\n return observ\n\n def _step_fake(self, action):\n observ = np.ones(shape=(210, 160, 3), dtype=np.uint8)*10*self._tmp\n observ[0, 0, 0] = 0\n observ[0, 0, 1] = 255\n\n self._tmp += 1\n if self._tmp > 20:\n self._tmp = 0\n\n rew = 1\n done = False\n probs = np.ones(shape=(6,), dtype=np.float32)/6\n vf = 0.0\n\n return observ, rew, done, probs, vf\n\n def _step_env(self, action):\n observ, rew, done, probs, vf = self.sess.\\\n run([self.observation, self.reward, self.done, self.policy_probs,\n self.value],\n feed_dict={self.action: [action]})\n\n return observ[0, ...], rew[0, ...], done[0, ...], probs, vf\n\n def _augment_observation(self):\n observ, rew, _, probs, vf = self.res\n info_pane = np.zeros(shape=(210, DebugBatchEnv.INFO_PANE_WIDTH, 3),\n dtype=np.uint8)\n probs_str = \"\"\n for p in probs:\n probs_str += \"%.2f\" % p + \", \"\n\n probs_str = probs_str[:-2]\n\n action = np.argmax(probs)\n info_str = \" Policy:{}\\n Action:{}\\n Value function:{}\\n Reward:{}\".format(\n probs_str, action, vf, rew)\n print(\"Info str:{}\".format(info_str))\n # info_pane = write_on_image(info_pane, info_str)\n\n augmented_observ = concatenate_images([observ, info_pane])\n augmented_observ = np.array(augmented_observ)\n return augmented_observ\n\n def step(self, action):\n # Special codes\n if action == 100:\n # skip action\n _, rew, done, _, _ = self.res\n observ = self._augment_observation()\n return observ, rew, done, {}\n\n if action == 101:\n # reset\n self.reset()\n _, rew, done, _, _ = self.res\n observ = self._augment_observation()\n return observ, rew, done, {}\n\n if action == 102:\n # play\n raise NotImplementedError()\n\n # standard codes\n observ, rew, done, probs, vf = self._step_env(action)\n self.res = (observ, rew, done, probs, vf)\n\n observ = self._augment_observation()\n return observ, rew, done, {\"probs\": probs, \"vf\": vf}\n\n\ndef main(_):\n hparams = registry.hparams(FLAGS.loop_hparams_set)\n hparams.parse(FLAGS.loop_hparams)\n output_dir = FLAGS.output_dir\n\n subdirectories = [\"data\", \"tmp\", \"world_model\", \"ppo\"]\n using_autoencoder = hparams.autoencoder_train_steps > 0\n if using_autoencoder:\n subdirectories.append(\"autoencoder\")\n directories = setup_directories(output_dir, subdirectories)\n\n if hparams.game in gym_env.ATARI_GAMES:\n game_with_mode = hparams.game + \"_deterministic-v4\"\n else:\n game_with_mode = hparams.game\n\n if using_autoencoder:\n simulated_problem_name = (\n \"gym_simulated_discrete_problem_with_agent_on_%s_autoencoded\"\n % game_with_mode)\n else:\n simulated_problem_name = (\"gym_simulated_discrete_problem_with_agent_on_%s\"\n % game_with_mode)\n if simulated_problem_name not in registry.list_problems():\n tf.logging.info(\"Game Problem %s not found; dynamically registering\",\n simulated_problem_name)\n gym_env.register_game(hparams.game, game_mode=\"Deterministic-v4\")\n\n epoch = hparams.epochs-1\n epoch_data_dir = os.path.join(directories[\"data\"], str(epoch))\n ppo_model_dir = directories[\"ppo\"]\n\n world_model_dir = directories[\"world_model\"]\n\n gym_problem = registry.problem(simulated_problem_name)\n\n model_hparams = trainer_lib.create_hparams(hparams.generative_model_params)\n environment_spec = copy.copy(gym_problem.environment_spec)\n environment_spec.simulation_random_starts = hparams.simulation_random_starts\n\n batch_env_hparams = trainer_lib.create_hparams(hparams.ppo_params)\n batch_env_hparams.add_hparam(\"model_hparams\", model_hparams)\n batch_env_hparams.add_hparam(\"environment_spec\", environment_spec)\n batch_env_hparams.num_agents = 1\n\n with temporary_flags({\n \"problem\": simulated_problem_name,\n \"model\": hparams.generative_model,\n \"hparams_set\": hparams.generative_model_params,\n \"output_dir\": world_model_dir,\n \"data_dir\": epoch_data_dir,\n }):\n sess = tf.Session()\n env = DebugBatchEnv(batch_env_hparams, sess)\n sess.run(tf.global_variables_initializer())\n env.initialize()\n\n env_model_loader = tf.train.Saver(\n tf.global_variables(\"next_frame*\"))\n trainer_lib.restore_checkpoint(world_model_dir, env_model_loader, sess,\n must_restore=True)\n\n model_saver = tf.train.Saver(\n tf.global_variables(\".*network_parameters.*\"))\n trainer_lib.restore_checkpoint(ppo_model_dir, model_saver, sess)\n\n key_mapping = gym_problem.env.env.get_keys_to_action()\n # map special codes\n key_mapping[()] = 100\n key_mapping[(ord(\"r\"),)] = 101\n key_mapping[(ord(\"p\"),)] = 102\n\n play.play(env, zoom=2, fps=10, keys_to_action=key_mapping)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utilities for CNN benchmarks.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport threading\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef tensorflow_version_tuple():\n v = tf.__version__\n major, minor, patch = v.split('.')\n return (int(major), int(minor), patch)\n\n\ndef tensorflow_version():\n vt = tensorflow_version_tuple()\n return vt[0] * 1000 + vt[1]\n\n\ndef log_fn(log):\n print(log)\n\n\ndef roll_numpy_batches(array, batch_size, shift_ratio):\n \"\"\"Moves a proportion of batches from start to the end of the array.\n\n This function moves a proportion of batches, specified by `shift_ratio`, from\n the starts of the array to the end. The number of batches moved is rounded\n down to the nearest integer. For example,\n\n ```\n roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]\n ```\n\n Args:\n array: A Numpy array whose first dimension is the batch dimension.\n batch_size: The batch size.\n shift_ratio: Proportion of batches to move from the start of the array to\n the end of the array.\n Returns:\n A new Numpy array, with a proportion of the batches at the start of `array`\n moved to the end.\n \"\"\"\n num_items = array.shape[0]\n assert num_items % batch_size == 0\n num_batches = num_items // batch_size\n starting_batch = int(num_batches * shift_ratio)\n starting_item = starting_batch * batch_size\n return np.roll(array, -starting_item, axis=0)\n\n\n# For Python 2.7 compatibility, we do not use threading.Barrier.\nclass Barrier(object):\n \"\"\"Implements a lightweight Barrier.\n\n Useful for synchronizing a fixed number of threads at known synchronization\n points. Threads block on 'wait()' and simultaneously return once they have\n all made that call.\n\n # Implementation adopted from boost/thread/barrier.hpp\n \"\"\"\n\n def __init__(self, parties):\n \"\"\"Create a barrier, initialised to 'parties' threads.\"\"\"\n self.cond = threading.Condition(threading.Lock())\n self.parties = parties\n # Indicates the number of waiting parties.\n self.waiting = 0\n # generation is needed to deal with spurious wakeups. If self.cond.wait()\n # wakes up for other reasons, generation will force it go back to wait().\n self.generation = 0\n self.broken = False\n\n def wait(self):\n \"\"\"Wait for the barrier.\"\"\"\n with self.cond:\n # Check if the barrier has been disabled or not.\n if self.broken:\n return\n gen = self.generation\n self.waiting += 1\n if self.waiting == self.parties:\n self.waiting = 0\n self.generation += 1\n self.cond.notify_all()\n # loop because of spurious wakeups\n while gen == self.generation:\n self.cond.wait()\n\n # TODO(huangyp): Remove this method once we find a way to know which step\n # is the last barrier.\n def abort(self):\n \"\"\"Clear existing barrier and disable this barrier.\"\"\"\n with self.cond:\n if self.waiting > 0:\n self.generation += 1\n self.cond.notify_all()\n self.broken = True\n\n\nclass ImageProducer(object):\n \"\"\"An image producer that puts images into a staging area periodically.\n\n This class is useful for periodically running a set of ops, `put_ops` on a\n different thread every `batch_group_size` steps.\n\n The notify_image_consumption() method is used to increment an internal counter\n so that every `batch_group_size` times it is called, `put_ops` is executed. A\n barrier is placed so that notify_image_consumption() will block until\n the previous call to `put_ops` has been executed.\n\n The start() method is used to start the thread that runs `put_ops`.\n\n The done() method waits until the last put_ops is executed and stops the\n thread.\n\n The purpose of this class is to fill an image input pipeline every\n `batch_group_size` steps. Suppose `put_ops` supplies `batch_group_size` images\n to the input pipeline when run, and that every step, 1 batch of images is\n consumed. Then, by calling notify_image_consumption() every step, images are\n supplied to the input pipeline at the same amount they are consumed.\n\n Example usage:\n ```\n put_ops = ... # Enqueues `batch_group_size` batches to a StagingArea\n get_op = ... # Dequeues 1 batch, and does some operations on it\n batch_group_size = 4\n with tf.Session() as sess:\n image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size)\n image_producer.start()\n for _ in range(100):\n sess.run(get_op)\n image_producer.notify_image_consumption()\n ```\n \"\"\"\n\n def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):\n self.sess = sess\n self.num_gets = 0\n self.put_ops = put_ops\n self.batch_group_size = batch_group_size\n self.done_event = threading.Event()\n if (use_python32_barrier and\n sys.version_info[0] == 3 and sys.version_info[1] >= 2):\n self.put_barrier = threading.Barrier(2)\n else:\n self.put_barrier = Barrier(2)\n\n def _should_put(self):\n return (self.num_gets + 1) % self.batch_group_size == 0\n\n def done(self):\n \"\"\"Stop the image producer.\"\"\"\n self.done_event.set()\n self.put_barrier.abort()\n self.thread.join()\n\n def start(self):\n \"\"\"Start the image producer.\"\"\"\n self.sess.run([self.put_ops])\n self.thread = threading.Thread(target=self._loop_producer)\n # Set daemon to true to allow Ctrl + C to terminate all threads.\n self.thread.daemon = True\n self.thread.start()\n\n def notify_image_consumption(self):\n \"\"\"Increment the counter of image_producer by 1.\n\n This should only be called by the main thread that consumes images and runs\n the model computation. One batch of images should be consumed between\n calling start() and the first call to this method. Then, one batch of images\n should be consumed between any two successive calls to this method.\n \"\"\"\n if self._should_put():\n self.put_barrier.wait()\n self.num_gets += 1\n\n def _loop_producer(self):\n while not self.done_event.isSet():\n self.sess.run([self.put_ops])\n self.put_barrier.wait()\n\n\nclass BaseClusterManager(object):\n \"\"\"The manager for the cluster of servers running the benchmark.\"\"\"\n\n def __init__(self, params):\n worker_hosts = params.worker_hosts.split(',')\n ps_hosts = params.ps_hosts.split(',') if params.ps_hosts else []\n cluster = {'worker': worker_hosts}\n if ps_hosts:\n cluster['ps'] = ps_hosts\n self._cluster_spec = tf.train.ClusterSpec(cluster)\n\n def get_target(self):\n \"\"\"Returns a target to be passed to tf.Session().\"\"\"\n raise NotImplementedError('get_target must be implemented by subclass')\n\n def join_server(self):\n raise NotImplementedError('join must be implemented by subclass')\n\n def get_cluster_spec(self):\n return self._cluster_spec\n\n def num_workers(self):\n return len(self._cluster_spec.job_tasks('worker'))\n\n def num_ps(self):\n if 'ps' in self._cluster_spec.jobs:\n return len(self._cluster_spec.job_tasks('ps'))\n else:\n return 0\n\n\nclass GrpcClusterManager(BaseClusterManager):\n \"\"\"A cluster manager for a cluster networked with gRPC.\"\"\"\n\n def __init__(self, params, config_proto):\n super(GrpcClusterManager, self).__init__(params)\n if params.job_name == 'controller':\n self._target = 'grpc://%s' % self._cluster_spec.job_tasks('worker')[0]\n else:\n self._server = tf.train.Server(self._cluster_spec,\n job_name=params.job_name,\n task_index=params.task_index,\n config=config_proto,\n protocol=params.server_protocol)\n self._target = self._server.target\n\n def get_target(self):\n return self._target\n\n def join_server(self):\n return self._server.join()\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Computes the reward prediction confusion matrix given checkpoints and data.\n\n Usage:\n reward_confusion \\\n --problem=\"gym_pong_deterministic-v4_random\" \\\n --model=\"next_frame_sv2p\" \\\n --hparams_set=\"next_frame_sv2p\" \\\n --output_dir=$CHECKPOINT_DIRECTORY \\\n --data_dir=$DATA_DIRECTORY \\\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom tensor2tensor.bin.t2t_decoder import create_hparams\nfrom tensor2tensor.data_generators import problem # pylint: disable=unused-import\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.utils import usr_dir\n\nimport tensorflow as tf\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n\ndef print_confusion_matrix(title, cm):\n print(\"=\" * 30)\n print(title)\n print(\"=\" * 30)\n print(cm)\n print(\"=\" * 30)\n print()\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n trainer_lib.set_random_seed(FLAGS.random_seed)\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n\n # Create hparams\n hparams = create_hparams()\n hparams.force_full_predict = True\n batch_size = hparams.batch_size\n\n # Iterating over dev/test partition of the data.\n # Change the data partition if necessary.\n dataset = registry.problem(FLAGS.problem).dataset(\n tf.estimator.ModeKeys.PREDICT,\n shuffle_files=False,\n hparams=hparams)\n\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n data = dataset.make_one_shot_iterator().get_next()\n input_data = dict((k, data[k]) for k in data.keys() if k.startswith(\"input\"))\n\n # Creat model\n model_cls = registry.model(FLAGS.model)\n model = model_cls(hparams, tf.estimator.ModeKeys.PREDICT)\n prediction_ops = model.infer(input_data)\n\n # Confusion Matrix\n nr = hparams.problem.num_rewards\n cm_per_frame = np.zeros((nr, nr), dtype=np.uint64)\n cm_next_frame = np.zeros((nr, nr), dtype=np.uint64)\n\n saver = tf.train.Saver()\n with tf.train.SingularMonitoredSession() as sess:\n # Load latest checkpoint\n ckpt = tf.train.get_checkpoint_state(FLAGS.output_dir).model_checkpoint_path\n saver.restore(sess.raw_session(), ckpt)\n\n counter = 0\n while not sess.should_stop():\n counter += 1\n if counter % 1 == 0:\n print(counter)\n\n # Predict next frames\n rew_pd, rew_gt = sess.run(\n [prediction_ops[\"target_reward\"], data[\"target_reward\"]])\n\n for i in range(batch_size):\n cm_next_frame[rew_gt[i, 0, 0], rew_pd[i, 0, 0]] += 1\n for gt, pd in zip(rew_gt[i], rew_pd[i]):\n cm_per_frame[gt, pd] += 1\n\n print_confusion_matrix(\"Per-frame Confusion Matrix\", cm_per_frame)\n print_confusion_matrix(\"Next-frame Confusion Matrix\", cm_next_frame)\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.assign_add",
"tensorflow.test.main",
"tensorflow.train.create_global_step",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.summary.scalar",
"tensorflow.test.get_temp_dir"
],
[
"tensorflow.cond",
"tensorflow.nn.relu",
"tensorflow.nn.softmax",
"tensorflow.summary.scalar",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.get_collection",
"tensorflow.cast",
"tensorflow.identity",
"tensorflow.squeeze",
"tensorflow.train.exponential_decay",
"tensorflow.train.piecewise_constant",
"tensorflow.contrib.layers.flatten",
"tensorflow.logging.info",
"tensorflow.variable_scope",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.profiler.ProfileOptionBuilder.float_operation",
"tensorflow.get_default_graph",
"tensorflow.add_n"
],
[
"tensorflow.nn.relu",
"tensorflow.layers.batch_normalization",
"tensorflow.concat",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.variable_scope",
"tensorflow.split",
"tensorflow.contrib.layers.rev_block"
],
[
"numpy.all",
"tensorflow.contrib.eager.run_test_in_graph_and_eager_modes",
"tensorflow.test.main",
"tensorflow.Session"
],
[
"numpy.random.get_state",
"numpy.random.shuffle",
"numpy.random.set_state",
"numpy.random.seed"
],
[
"tensorflow.constant",
"tensorflow.global_variables",
"tensorflow.placeholder",
"tensorflow.expand_dims",
"numpy.ones",
"numpy.concatenate",
"tensorflow.global_variables_initializer",
"numpy.argmax",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"numpy.ndarray.astype",
"numpy.array",
"numpy.zeros",
"tensorflow.app.run"
],
[
"tensorflow.train.ClusterSpec",
"tensorflow.train.Server",
"numpy.roll"
],
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.SingularMonitoredSession",
"tensorflow.app.run",
"tensorflow.logging.set_verbosity",
"tensorflow.train.Saver",
"numpy.zeros",
"tensorflow.contrib.data.batch_and_drop_remainder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
stillmatic/PyTorch-BigGraph | [
"d7d6576281faa54ec5850e204ffc07b1268fdb04"
] | [
"torchbiggraph/train_cpu.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE.txt file in the root directory of this source tree.\n\nimport logging\nimport math\nimport time\nfrom collections import defaultdict\nfrom functools import partial\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple\n\nimport torch\nimport torch.distributed as td\nfrom torch.optim import Optimizer\nfrom torchbiggraph.async_adagrad import AsyncAdagrad\nfrom torchbiggraph.batching import AbstractBatchProcessor, call, process_in_batches\nfrom torchbiggraph.bucket_scheduling import (\n BucketStats,\n DistributedBucketScheduler,\n LockServer,\n SingleMachineBucketScheduler,\n)\nfrom torchbiggraph.checkpoint_manager import (\n CheckpointManager,\n ConfigMetadataProvider,\n MetadataProvider,\n PartitionClient,\n)\nfrom torchbiggraph.config import ConfigSchema\nfrom torchbiggraph.distributed import ProcessRanks, init_process_group, start_server\nfrom torchbiggraph.edgelist import EdgeList\nfrom torchbiggraph.eval import RankingEvaluator\nfrom torchbiggraph.graph_storages import EDGE_STORAGES, ENTITY_STORAGES\nfrom torchbiggraph.losses import LOSS_FUNCTIONS, AbstractLossFunction\nfrom torchbiggraph.model import MultiRelationEmbedder, make_model\nfrom torchbiggraph.parameter_sharing import ParameterServer, ParameterSharer\nfrom torchbiggraph.row_adagrad import RowAdagrad\nfrom torchbiggraph.stats import Stats, StatsHandler\nfrom torchbiggraph.types import (\n SINGLE_TRAINER,\n UNPARTITIONED,\n Bucket,\n EntityName,\n FloatTensorType,\n ModuleStateDict,\n Partition,\n Rank,\n)\nfrom torchbiggraph.util import (\n BucketLogger,\n DummyOptimizer,\n EmbeddingHolder,\n allocate_shared_tensor,\n create_pool,\n fast_approx_rand,\n get_async_result,\n get_num_workers,\n hide_distributed_logging,\n round_up_to_nearest_multiple,\n split_almost_equally,\n tag_logs_with_process_name,\n)\n\n\nlogger = logging.getLogger(\"torchbiggraph\")\ndist_logger = logging.LoggerAdapter(logger, {\"distributed\": True})\n\n\nclass Trainer(AbstractBatchProcessor):\n def __init__(\n self,\n model_optimizer: Optimizer,\n loss_fn: AbstractLossFunction,\n relation_weights: List[float],\n ) -> None:\n super().__init__(loss_fn, relation_weights)\n self.model_optimizer = model_optimizer\n self.unpartitioned_optimizers: Dict[EntityName, Optimizer] = {}\n self.partitioned_optimizers: Dict[Tuple[EntityName, Partition], Optimizer] = {}\n\n def _process_one_batch(\n self, model: MultiRelationEmbedder, batch_edges: EdgeList\n ) -> Stats:\n model.zero_grad()\n\n scores, reg = model(batch_edges)\n\n loss = self.calc_loss(scores, batch_edges)\n\n stats = Stats(\n loss=float(loss),\n reg=float(reg) if reg is not None else 0.0,\n violators_lhs=int((scores.lhs_neg > scores.lhs_pos.unsqueeze(1)).sum()),\n violators_rhs=int((scores.rhs_neg > scores.rhs_pos.unsqueeze(1)).sum()),\n count=len(batch_edges),\n )\n if reg is not None:\n (loss + reg).backward()\n else:\n loss.backward()\n self.model_optimizer.step(closure=None)\n for optimizer in self.unpartitioned_optimizers.values():\n optimizer.step(closure=None)\n for optimizer in self.partitioned_optimizers.values():\n optimizer.step(closure=None)\n\n return stats\n\n\nclass IterationManager(MetadataProvider):\n def __init__(\n self,\n num_epochs: int,\n edge_paths: List[str],\n num_edge_chunks: int,\n *,\n iteration_idx: int = 0,\n ) -> None:\n self.num_epochs = num_epochs\n self.edge_paths = edge_paths\n self.num_edge_chunks = num_edge_chunks\n self.iteration_idx = iteration_idx\n\n @property\n def epoch_idx(self) -> int:\n return self.iteration_idx // self.num_edge_chunks // self.num_edge_paths\n\n @property\n def num_edge_paths(self) -> int:\n return len(self.edge_paths)\n\n @property\n def edge_path_idx(self) -> int:\n return self.iteration_idx // self.num_edge_chunks % self.num_edge_paths\n\n @property\n def edge_path(self) -> str:\n return self.edge_paths[self.edge_path_idx]\n\n @property\n def edge_chunk_idx(self) -> int:\n return self.iteration_idx % self.num_edge_chunks\n\n def __iter__(self) -> Iterable[Tuple[int, int, int]]:\n while self.epoch_idx < self.num_epochs:\n yield self.epoch_idx, self.edge_path_idx, self.edge_chunk_idx\n self.iteration_idx += 1\n\n def get_checkpoint_metadata(self) -> Dict[str, Any]:\n return {\n \"iteration/num_epochs\": self.num_epochs,\n \"iteration/epoch_idx\": self.epoch_idx,\n \"iteration/num_edge_paths\": self.num_edge_paths,\n \"iteration/edge_path_idx\": self.edge_path_idx,\n \"iteration/edge_path\": self.edge_path,\n \"iteration/num_edge_chunks\": self.num_edge_chunks,\n \"iteration/edge_chunk_idx\": self.edge_chunk_idx,\n }\n\n def __add__(self, delta: int) -> \"IterationManager\":\n return IterationManager(\n self.num_epochs,\n self.edge_paths,\n self.num_edge_chunks,\n iteration_idx=self.iteration_idx + delta,\n )\n\n\ndef should_preserve_old_checkpoint(\n iteration_manager: IterationManager, interval: Optional[int]\n) -> bool:\n \"\"\"Whether the checkpoint consumed by the current iteration should be kept\n\n Given the period, in number of epochs, at which to snapshot checkpoints,\n determinen whether the checkpoint that is used as input by the current\n iteration (as determined by the given manager) should be preserved rather\n than getting cleaned up.\n \"\"\"\n if interval is None:\n return False\n is_checkpoint_epoch = iteration_manager.epoch_idx % interval == 0\n is_first_edge_path = iteration_manager.edge_path_idx == 0\n is_first_edge_chunk = iteration_manager.edge_chunk_idx == 0\n return is_checkpoint_epoch and is_first_edge_path and is_first_edge_chunk\n\n\ndef get_num_edge_chunks(config: ConfigSchema) -> int:\n if config.num_edge_chunks is not None:\n return config.num_edge_chunks\n\n max_edges_per_bucket = 0\n # We should check all edge paths, all lhs partitions and all rhs partitions,\n # but the combinatorial explosion could lead to thousands of checks. Let's\n # assume that edges are uniformly distributed among buckets (this is not\n # exactly the case, as it's the entities that are uniformly distributed\n # among the partitions, and edge assignments to buckets are a function of\n # that, thus, for example, very high degree entities could skew this), and\n # use the size of bucket (0, 0) as an estimate of the average bucket size.\n # We still do it for all edge paths as there could be semantic differences\n # between them which lead to different sizes.\n for edge_path in config.edge_paths:\n edge_storage = EDGE_STORAGES.make_instance(edge_path)\n max_edges_per_bucket = max(\n max_edges_per_bucket,\n edge_storage.get_number_of_edges(UNPARTITIONED, UNPARTITIONED),\n )\n return max(1, math.ceil(max_edges_per_bucket / config.max_edges_per_chunk))\n\n\ndef make_optimizer(\n config: ConfigSchema, params: Iterable[torch.nn.Parameter], is_emb: bool\n) -> Optimizer:\n params = list(params)\n if len(params) == 0:\n optimizer = DummyOptimizer()\n elif is_emb:\n optimizer = RowAdagrad(params, lr=config.lr)\n else:\n if config.relation_lr is not None:\n lr = config.relation_lr\n else:\n lr = config.lr\n optimizer = AsyncAdagrad(params, lr=lr)\n optimizer.share_memory()\n return optimizer\n\n\nNOOP_STATS_HANDLER = StatsHandler()\n\n\nclass TrainingCoordinator:\n def __init__( # noqa\n self,\n config: ConfigSchema,\n model: Optional[MultiRelationEmbedder] = None,\n trainer: Optional[AbstractBatchProcessor] = None,\n evaluator: Optional[AbstractBatchProcessor] = None,\n rank: Rank = SINGLE_TRAINER,\n subprocess_init: Optional[Callable[[], None]] = None,\n stats_handler: StatsHandler = NOOP_STATS_HANDLER,\n ):\n \"\"\"Each epoch/pass, for each partition pair, loads in embeddings and edgelist\n from disk, runs HOGWILD training on them, and writes partitions back to disk.\n \"\"\"\n tag_logs_with_process_name(f\"Trainer-{rank}\")\n self.config = config\n if config.verbose > 0:\n import pprint\n\n pprint.PrettyPrinter().pprint(config.to_dict())\n\n logger.info(\"Loading entity counts...\")\n entity_storage = ENTITY_STORAGES.make_instance(config.entity_path)\n entity_counts: Dict[str, List[int]] = {}\n for entity, econf in config.entities.items():\n entity_counts[entity] = []\n for part in range(econf.num_partitions):\n entity_counts[entity].append(entity_storage.load_count(entity, part))\n\n # Figure out how many lhs and rhs partitions we need\n holder = self.holder = EmbeddingHolder(config)\n\n logger.debug(\n f\"nparts {holder.nparts_lhs} {holder.nparts_rhs} \"\n f\"types {holder.lhs_partitioned_types} {holder.rhs_partitioned_types}\"\n )\n\n # We know ahead of time that we wil need 1-2 storages for each embedding type,\n # as well as the max size of this storage (num_entities x D).\n # We allocate these storages n advance in `embedding_storage_freelist`.\n # When we need storage for an entity type, we pop it from this free list,\n # and then add it back when we 'delete' the embedding table.\n embedding_storage_freelist: Dict[\n EntityName, Set[torch.FloatStorage]\n ] = defaultdict(set)\n for entity_type, counts in entity_counts.items():\n max_count = max(counts)\n num_sides = (\n (1 if entity_type in holder.lhs_partitioned_types else 0)\n + (1 if entity_type in holder.rhs_partitioned_types else 0)\n + (\n 1\n if entity_type\n in (holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types)\n else 0\n )\n )\n for _ in range(num_sides):\n embedding_storage_freelist[entity_type].add(\n allocate_shared_tensor(\n (max_count, config.entity_dimension(entity_type)),\n dtype=torch.float,\n ).storage()\n )\n\n # create the handlers, threads, etc. for distributed training\n if config.num_machines > 1 or config.num_partition_servers > 0:\n if not 0 <= rank < config.num_machines:\n raise RuntimeError(\"Invalid rank for trainer\")\n if not td.is_available():\n raise RuntimeError(\n \"The installed PyTorch version doesn't provide \"\n \"distributed training capabilities.\"\n )\n ranks = ProcessRanks.from_num_invocations(\n config.num_machines, config.num_partition_servers\n )\n\n num_ps_groups = config.num_groups_for_partition_server\n groups: List[List[int]] = [ranks.trainers] # barrier group\n groups += [\n ranks.trainers + ranks.partition_servers\n ] * num_ps_groups # ps groups\n group_idxs_for_partition_servers = range(1, len(groups))\n\n if rank == SINGLE_TRAINER:\n logger.info(\"Setup lock server...\")\n start_server(\n LockServer(\n num_clients=len(ranks.trainers),\n nparts_lhs=holder.nparts_lhs,\n nparts_rhs=holder.nparts_rhs,\n entities_lhs=holder.lhs_partitioned_types,\n entities_rhs=holder.rhs_partitioned_types,\n entity_counts=entity_counts,\n init_tree=config.distributed_tree_init_order,\n stats_handler=stats_handler,\n ),\n process_name=\"LockServer\",\n init_method=config.distributed_init_method,\n world_size=ranks.world_size,\n server_rank=ranks.lock_server,\n groups=groups,\n subprocess_init=subprocess_init,\n )\n\n self.bucket_scheduler = DistributedBucketScheduler(\n server_rank=ranks.lock_server, client_rank=ranks.trainers[rank]\n )\n\n logger.info(\"Setup param server...\")\n start_server(\n ParameterServer(num_clients=len(ranks.trainers)),\n process_name=f\"ParamS-{rank}\",\n init_method=config.distributed_init_method,\n world_size=ranks.world_size,\n server_rank=ranks.parameter_servers[rank],\n groups=groups,\n subprocess_init=subprocess_init,\n )\n\n parameter_sharer = ParameterSharer(\n process_name=f\"ParamC-{rank}\",\n client_rank=ranks.parameter_clients[rank],\n all_server_ranks=ranks.parameter_servers,\n init_method=config.distributed_init_method,\n world_size=ranks.world_size,\n groups=groups,\n subprocess_init=subprocess_init,\n )\n\n if config.num_partition_servers == -1:\n start_server(\n ParameterServer(\n num_clients=len(ranks.trainers),\n group_idxs=group_idxs_for_partition_servers,\n log_stats=True,\n ),\n process_name=f\"PartS-{rank}\",\n init_method=config.distributed_init_method,\n world_size=ranks.world_size,\n server_rank=ranks.partition_servers[rank],\n groups=groups,\n subprocess_init=subprocess_init,\n )\n\n groups = init_process_group(\n rank=ranks.trainers[rank],\n world_size=ranks.world_size,\n init_method=config.distributed_init_method,\n groups=groups,\n )\n trainer_group, *groups_for_partition_servers = groups\n self.barrier_group = trainer_group\n\n if len(ranks.partition_servers) > 0:\n partition_client = PartitionClient(\n ranks.partition_servers,\n groups=groups_for_partition_servers,\n log_stats=True,\n )\n else:\n partition_client = None\n else:\n self.barrier_group = None\n self.bucket_scheduler = SingleMachineBucketScheduler(\n holder.nparts_lhs, holder.nparts_rhs, config.bucket_order, stats_handler\n )\n parameter_sharer = None\n partition_client = None\n hide_distributed_logging()\n\n # fork early for HOGWILD threads\n logger.info(\"Creating workers...\")\n self.num_workers = get_num_workers(config.workers)\n self.pool = create_pool(\n self.num_workers,\n subprocess_name=f\"TWorker-{rank}\",\n subprocess_init=subprocess_init,\n )\n\n checkpoint_manager = CheckpointManager(\n config.checkpoint_path,\n rank=rank,\n num_machines=config.num_machines,\n partition_client=partition_client,\n subprocess_name=f\"BackgRW-{rank}\",\n subprocess_init=subprocess_init,\n )\n self.checkpoint_manager = checkpoint_manager\n checkpoint_manager.register_metadata_provider(ConfigMetadataProvider(config))\n if rank == 0:\n checkpoint_manager.write_config(config)\n\n num_edge_chunks = get_num_edge_chunks(config)\n\n self.iteration_manager = IterationManager(\n config.num_epochs,\n config.edge_paths,\n num_edge_chunks,\n iteration_idx=checkpoint_manager.checkpoint_version,\n )\n checkpoint_manager.register_metadata_provider(self.iteration_manager)\n\n logger.info(\"Initializing global model...\")\n if model is None:\n model = make_model(config)\n model.share_memory()\n loss_fn = LOSS_FUNCTIONS.get_class(config.loss_fn)(margin=config.margin)\n relation_weights = [relation.weight for relation in config.relations]\n if trainer is None:\n trainer = Trainer(\n model_optimizer=make_optimizer(config, model.parameters(), False),\n loss_fn=loss_fn,\n relation_weights=relation_weights,\n )\n if evaluator is None:\n eval_overrides = {}\n if config.eval_num_batch_negs is not None:\n eval_overrides[\"num_batch_negs\"] = config.eval_num_batch_negs\n if config.eval_num_uniform_negs is not None:\n eval_overrides[\"num_uniform_negs\"] = config.eval_num_uniform_negs\n\n evaluator = RankingEvaluator(\n loss_fn=loss_fn,\n relation_weights=relation_weights,\n overrides=eval_overrides,\n )\n\n if config.init_path is not None:\n self.loadpath_manager = CheckpointManager(config.init_path)\n else:\n self.loadpath_manager = None\n\n # load model from checkpoint or loadpath, if available\n state_dict, optim_state = checkpoint_manager.maybe_read_model()\n if state_dict is None and self.loadpath_manager is not None:\n state_dict, optim_state = self.loadpath_manager.maybe_read_model()\n if state_dict is not None:\n model.load_state_dict(state_dict, strict=False)\n if optim_state is not None:\n trainer.model_optimizer.load_state_dict(optim_state)\n\n logger.debug(\"Loading unpartitioned entities...\")\n for entity in holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types:\n count = entity_counts[entity][0]\n s = embedding_storage_freelist[entity].pop()\n dimension = config.entity_dimension(entity)\n embs = torch.FloatTensor(s).view(-1, dimension)[:count]\n embs, optimizer = self._load_embeddings(entity, UNPARTITIONED, out=embs)\n holder.unpartitioned_embeddings[entity] = embs\n trainer.unpartitioned_optimizers[entity] = optimizer\n\n # start communicating shared parameters with the parameter server\n if parameter_sharer is not None:\n shared_parameters: Set[int] = set()\n for name, param in model.named_parameters():\n if id(param) in shared_parameters:\n continue\n shared_parameters.add(id(param))\n key = f\"model.{name}\"\n logger.info(\n f\"Adding {key} ({param.numel()} params) to parameter server\"\n )\n parameter_sharer.set_param(key, param.data)\n for entity, embs in holder.unpartitioned_embeddings.items():\n key = f\"entity.{entity}\"\n logger.info(f\"Adding {key} ({embs.numel()} params) to parameter server\")\n parameter_sharer.set_param(key, embs.data)\n\n # store everything in self\n self.model = model\n self.trainer = trainer\n self.evaluator = evaluator\n self.rank = rank\n self.entity_counts = entity_counts\n self.embedding_storage_freelist = embedding_storage_freelist\n self.stats_handler = stats_handler\n\n self.strict = False\n\n def train(self) -> None:\n\n holder = self.holder\n config = self.config\n iteration_manager = self.iteration_manager\n\n total_buckets = holder.nparts_lhs * holder.nparts_rhs\n\n # yield stats from checkpoint, to reconstruct\n # saved part of the learning curve\n if self.rank == SINGLE_TRAINER:\n for stats_dict in self.checkpoint_manager.maybe_read_stats():\n index: int = stats_dict[\"index\"]\n stats: Optional[Stats] = None\n if \"stats\" in stats_dict:\n stats: Stats = Stats.from_dict(stats_dict[\"stats\"])\n eval_stats_before: Optional[Stats] = None\n if \"eval_stats_before\" in stats_dict:\n eval_stats_before = Stats.from_dict(stats_dict[\"eval_stats_before\"])\n eval_stats_after: Optional[Stats] = None\n if \"eval_stats_after\" in stats_dict:\n eval_stats_after = Stats.from_dict(stats_dict[\"eval_stats_after\"])\n eval_stats_chunk_avg: Optional[Stats] = None\n if \"eval_stats_chunk_avg\" in stats_dict:\n eval_stats_chunk_avg = Stats.from_dict(\n stats_dict[\"eval_stats_chunk_avg\"]\n )\n self.stats_handler.on_stats(\n index,\n eval_stats_before,\n stats,\n eval_stats_after,\n eval_stats_chunk_avg,\n )\n\n for epoch_idx, edge_path_idx, edge_chunk_idx in iteration_manager:\n logger.info(\n f\"Starting epoch {epoch_idx + 1} / {iteration_manager.num_epochs}, \"\n f\"edge path {edge_path_idx + 1} / {iteration_manager.num_edge_paths}, \"\n f\"edge chunk {edge_chunk_idx + 1} / {iteration_manager.num_edge_chunks}\"\n )\n edge_storage = EDGE_STORAGES.make_instance(iteration_manager.edge_path)\n logger.info(f\"Edge path: {iteration_manager.edge_path}\")\n\n self._barrier()\n dist_logger.info(\"Lock client new epoch...\")\n self.bucket_scheduler.new_pass(\n is_first=iteration_manager.iteration_idx == 0\n )\n self._barrier()\n\n remaining = total_buckets\n cur_b: Optional[Bucket] = None\n cur_stats: Optional[BucketStats] = None\n while remaining > 0:\n old_b: Optional[Bucket] = cur_b\n old_stats: Optional[BucketStats] = cur_stats\n cur_b, remaining = self.bucket_scheduler.acquire_bucket()\n logger.info(f\"still in queue: {remaining}\")\n if cur_b is None:\n cur_stats = None\n if old_b is not None:\n # if you couldn't get a new pair, release the lock\n # to prevent a deadlock!\n tic = time.perf_counter()\n release_bytes = self._swap_partitioned_embeddings(\n old_b, None, old_stats\n )\n release_time = time.perf_counter() - tic\n logger.info(\n f\"Swapping old embeddings to release lock. io: {release_time:.2f} s for {release_bytes:,} bytes \"\n f\"( {release_bytes / release_time / 1e6:.2f} MB/sec )\"\n )\n time.sleep(1) # don't hammer td\n continue\n\n tic = time.perf_counter()\n self.cur_b = cur_b\n bucket_logger = BucketLogger(logger, bucket=cur_b)\n self.bucket_logger = bucket_logger\n\n io_bytes = self._swap_partitioned_embeddings(old_b, cur_b, old_stats)\n self.model.set_all_embeddings(holder, cur_b)\n\n current_index = (\n iteration_manager.iteration_idx + 1\n ) * total_buckets - remaining\n\n bucket_logger.debug(\"Loading edges\")\n edges = edge_storage.load_chunk_of_edges(\n cur_b.lhs,\n cur_b.rhs,\n edge_chunk_idx,\n iteration_manager.num_edge_chunks,\n shared=True,\n )\n num_edges = len(edges)\n\n # this might be off in the case of tensorlist or extra edge fields\n io_bytes += edges.lhs.tensor.numel() * edges.lhs.tensor.element_size()\n io_bytes += edges.rhs.tensor.numel() * edges.rhs.tensor.element_size()\n io_bytes += edges.rel.numel() * edges.rel.element_size()\n io_time = time.perf_counter() - tic\n tic = time.perf_counter()\n bucket_logger.debug(\"Shuffling edges\")\n # Fix a seed to get the same permutation every time; have it\n # depend on all and only what affects the set of edges.\n\n # Note: for the sake of efficiency, we sample eval edge idxs\n # from the edge set *with replacement*, meaning that there may\n # be duplicates of the same edge in the eval set. When we swap\n # edges into the eval set, if there are duplicates then all\n # but one will be clobbered. These collisions are unlikely\n # if eval_fraction is small.\n #\n # Importantly, this eval sampling strategy is theoretically\n # sound:\n # * Training and eval sets are (exactly) disjoint\n # * Eval set may have (rare) duplicates, but they are\n # uniformly sampled so it's still an unbiased estimator\n # of the out-of-sample statistics\n num_eval_edges = int(num_edges * config.eval_fraction)\n num_train_edges = num_edges - num_eval_edges\n if num_eval_edges > 0:\n g = torch.Generator()\n g.manual_seed(\n hash((edge_path_idx, edge_chunk_idx, cur_b.lhs, cur_b.rhs))\n )\n eval_edge_idxs = torch.randint(\n num_edges, (num_eval_edges,), dtype=torch.long, generator=g\n )\n else:\n eval_edge_idxs = None\n\n # HOGWILD evaluation before training\n eval_stats_before = self._coordinate_eval(edges, eval_edge_idxs)\n if eval_stats_before is not None:\n bucket_logger.info(f\"Stats before training: {eval_stats_before}\")\n eval_time = time.perf_counter() - tic\n tic = time.perf_counter()\n\n # HOGWILD training\n bucket_logger.debug(\"Waiting for workers to perform training\")\n stats = self._coordinate_train(edges, eval_edge_idxs, epoch_idx)\n if stats is not None:\n bucket_logger.info(f\"Training stats: {stats}\")\n train_time = time.perf_counter() - tic\n tic = time.perf_counter()\n\n # HOGWILD evaluation after training\n eval_stats_after = self._coordinate_eval(edges, eval_edge_idxs)\n if eval_stats_after is not None:\n bucket_logger.info(f\"Stats after training: {eval_stats_after}\")\n\n eval_time += time.perf_counter() - tic\n\n bucket_logger.info(\n f\"bucket {total_buckets - remaining} / {total_buckets} : \"\n f\"Trained {num_train_edges} edges in {train_time:.2f} s \"\n f\"( {num_train_edges / train_time / 1e6:.2g} M/sec ); \"\n f\"Eval 2*{num_eval_edges} edges in {eval_time:.2f} s \"\n f\"( {2 * num_eval_edges / eval_time / 1e6:.2g} M/sec ); \"\n f\"io: {io_time:.2f} s for {io_bytes:,} bytes ( {io_bytes / io_time / 1e6:.2f} MB/sec )\"\n )\n\n self.model.clear_all_embeddings()\n\n cur_stats = BucketStats(\n lhs_partition=cur_b.lhs,\n rhs_partition=cur_b.rhs,\n index=current_index,\n train=stats,\n eval_before=eval_stats_before,\n eval_after=eval_stats_after,\n )\n\n # release the final bucket\n self._swap_partitioned_embeddings(cur_b, None, cur_stats)\n\n # Distributed Processing: all machines can leave the barrier now.\n self._barrier()\n\n current_index = (iteration_manager.iteration_idx + 1) * total_buckets - 1\n\n self._maybe_write_checkpoint(\n epoch_idx, edge_path_idx, edge_chunk_idx, current_index\n )\n\n # now we're sure that all partition files exist,\n # so be strict about loading them\n self.strict = True\n\n def close(self):\n # cleanup\n self.pool.close()\n self.pool.join()\n\n self._barrier()\n\n self.checkpoint_manager.close()\n if self.loadpath_manager is not None:\n self.loadpath_manager.close()\n\n # FIXME join distributed workers (not really necessary)\n\n logger.info(\"Exiting\")\n\n ###########################################################################\n # private functions\n ###########################################################################\n\n def _barrier(self) -> None:\n if self.barrier_group is not None:\n td.barrier(group=self.barrier_group)\n\n def _load_embeddings(\n self,\n entity: EntityName,\n part: Partition,\n out: FloatTensorType,\n strict: bool = False,\n force_dirty: bool = False,\n ) -> Tuple[torch.nn.Parameter, Optimizer]:\n if strict:\n embs, optim_state = self.checkpoint_manager.read(\n entity, part, out=out, force_dirty=force_dirty\n )\n else:\n # Strict is only false during the first iteration, because in that\n # case the checkpoint may not contain any data (unless a previous\n # run was resumed) so we fall back on initial values.\n embs, optim_state = self.checkpoint_manager.maybe_read(\n entity, part, out=out, force_dirty=force_dirty\n )\n if embs is None and self.loadpath_manager is not None:\n embs, optim_state = self.loadpath_manager.maybe_read(\n entity, part, out=out\n )\n if embs is None:\n embs = out\n fast_approx_rand(embs)\n embs.mul_(self.config.init_scale)\n optim_state = None\n embs = torch.nn.Parameter(embs)\n optimizer = make_optimizer(self.config, [embs], True)\n if optim_state is not None:\n optimizer.load_state_dict(optim_state)\n return embs, optimizer\n\n def _swap_partitioned_embeddings(\n self,\n old_b: Optional[Bucket],\n new_b: Optional[Bucket],\n old_stats: Optional[BucketStats],\n ) -> int:\n io_bytes = 0\n logger.info(f\"Swapping partitioned embeddings {old_b} {new_b}\")\n\n holder = self.holder\n old_parts: Set[Tuple[EntityName, Partition]] = set()\n if old_b is not None:\n old_parts.update((e, old_b.lhs) for e in holder.lhs_partitioned_types)\n old_parts.update((e, old_b.rhs) for e in holder.rhs_partitioned_types)\n new_parts: Set[Tuple[EntityName, Partition]] = set()\n if new_b is not None:\n new_parts.update((e, new_b.lhs) for e in holder.lhs_partitioned_types)\n new_parts.update((e, new_b.rhs) for e in holder.rhs_partitioned_types)\n\n assert old_parts == holder.partitioned_embeddings.keys()\n\n if old_b is not None:\n if old_stats is None:\n raise TypeError(\"Got old bucket but not its stats\")\n logger.info(\"Saving partitioned embeddings to checkpoint\")\n for entity, part in old_parts - new_parts:\n logger.debug(f\"Saving ({entity} {part})\")\n embs = holder.partitioned_embeddings.pop((entity, part))\n optimizer = self.trainer.partitioned_optimizers.pop((entity, part))\n self.checkpoint_manager.write(\n entity, part, embs.detach(), optimizer.state_dict()\n )\n self.embedding_storage_freelist[entity].add(embs.storage())\n io_bytes += embs.numel() * embs.element_size() # ignore optim state\n # these variables are holding large objects; let them be freed\n del embs\n del optimizer\n\n self.bucket_scheduler.release_bucket(old_b, old_stats)\n\n if new_b is not None:\n logger.info(\"Loading partitioned embeddings from checkpoint\")\n for entity, part in new_parts - old_parts:\n logger.debug(f\"Loading ({entity} {part})\")\n force_dirty = self.bucket_scheduler.check_and_set_dirty(entity, part)\n count = self.entity_counts[entity][part]\n s = self.embedding_storage_freelist[entity].pop()\n dimension = self.config.entity_dimension(entity)\n embs = torch.FloatTensor(s).view(-1, dimension)[:count]\n embs, optimizer = self._load_embeddings(\n entity, part, out=embs, strict=self.strict, force_dirty=force_dirty\n )\n holder.partitioned_embeddings[entity, part] = embs\n self.trainer.partitioned_optimizers[entity, part] = optimizer\n io_bytes += embs.numel() * embs.element_size() # ignore optim state\n\n assert new_parts == holder.partitioned_embeddings.keys()\n\n return io_bytes\n\n def _coordinate_train(self, edges, eval_edge_idxs, epoch_idx) -> Stats:\n assert self.config.num_gpus == 0, \"GPU training not supported\"\n\n if eval_edge_idxs is not None:\n num_train_edges = len(edges) - len(eval_edge_idxs)\n train_edge_idxs = torch.arange(len(edges))\n train_edge_idxs[eval_edge_idxs] = torch.arange(num_train_edges, len(edges))\n train_edge_idxs = train_edge_idxs[:num_train_edges]\n edge_perm = train_edge_idxs[torch.randperm(num_train_edges)]\n else:\n edge_perm = torch.randperm(len(edges))\n\n future_all_stats = self.pool.map_async(\n call,\n [\n partial(\n process_in_batches,\n batch_size=self.config.batch_size,\n model=self.model,\n batch_processor=self.trainer,\n edges=edges,\n indices=edge_perm[s],\n # FIXME should we only delay if iteration_idx == 0?\n delay=self.config.hogwild_delay\n if epoch_idx == 0 and self.rank > 0\n else 0,\n )\n for rank, s in enumerate(\n split_almost_equally(edge_perm.size(0), num_parts=self.num_workers)\n )\n ],\n )\n all_stats = get_async_result(future_all_stats, self.pool)\n return Stats.sum(all_stats).average()\n\n def _coordinate_eval(self, edges, eval_edge_idxs) -> Optional[Stats]:\n eval_batch_size = round_up_to_nearest_multiple(\n self.config.batch_size, self.config.eval_num_batch_negs\n )\n if eval_edge_idxs is not None:\n self.bucket_logger.debug(\"Waiting for workers to perform evaluation\")\n future_all_eval_stats = self.pool.map_async(\n call,\n [\n partial(\n process_in_batches,\n batch_size=eval_batch_size,\n model=self.model,\n batch_processor=self.evaluator,\n edges=edges,\n indices=eval_edge_idxs[s],\n )\n for s in split_almost_equally(\n eval_edge_idxs.size(0), num_parts=self.num_workers\n )\n ],\n )\n all_eval_stats = get_async_result(future_all_eval_stats, self.pool)\n return Stats.sum(all_eval_stats).average()\n else:\n return None\n\n def _maybe_write_checkpoint(\n self,\n epoch_idx: int,\n edge_path_idx: int,\n edge_chunk_idx: int,\n current_index: int,\n ) -> None:\n\n config = self.config\n\n # Preserving a checkpoint requires two steps:\n # - create a snapshot (w/ symlinks) after it's first written;\n # - don't delete it once the following one is written.\n # These two happen in two successive iterations of the main loop: the\n # one just before and the one just after the epoch boundary.\n preserve_old_checkpoint = should_preserve_old_checkpoint(\n self.iteration_manager, config.checkpoint_preservation_interval\n )\n preserve_new_checkpoint = should_preserve_old_checkpoint(\n self.iteration_manager + 1, config.checkpoint_preservation_interval\n )\n\n # Write metadata: for multiple machines, write from rank-0\n logger.info(\n f\"Finished epoch {epoch_idx + 1} / {self.iteration_manager.num_epochs}, \"\n f\"edge path {edge_path_idx + 1} / {self.iteration_manager.num_edge_paths}, \"\n f\"edge chunk {edge_chunk_idx + 1} / \"\n f\"{self.iteration_manager.num_edge_chunks}\"\n )\n if self.rank == 0:\n for entity, embs in self.holder.unpartitioned_embeddings.items():\n logger.info(f\"Writing {entity} embeddings\")\n optimizer = self.trainer.unpartitioned_optimizers[entity]\n self.checkpoint_manager.write(\n entity,\n UNPARTITIONED,\n embs.detach(),\n optimizer.state_dict(),\n unpartitioned=True,\n )\n\n logger.info(\"Writing the metadata\")\n state_dict: ModuleStateDict = self.model.state_dict()\n self.checkpoint_manager.write_model(\n state_dict, self.trainer.model_optimizer.state_dict()\n )\n\n logger.info(\"Writing the training stats\")\n all_stats_dicts: List[Dict[str, Any]] = []\n bucket_eval_stats_list = []\n chunk_stats_dict = {\n \"epoch_idx\": epoch_idx,\n \"edge_path_idx\": edge_path_idx,\n \"edge_chunk_idx\": edge_chunk_idx,\n }\n for stats in self.bucket_scheduler.get_stats_for_pass():\n stats_dict = {\n \"lhs_partition\": stats.lhs_partition,\n \"rhs_partition\": stats.rhs_partition,\n \"index\": stats.index,\n \"stats\": stats.train.to_dict(),\n }\n if stats.eval_before is not None:\n stats_dict[\"eval_stats_before\"] = stats.eval_before.to_dict()\n bucket_eval_stats_list.append(stats.eval_before)\n\n if stats.eval_after is not None:\n stats_dict[\"eval_stats_after\"] = stats.eval_after.to_dict()\n\n stats_dict.update(chunk_stats_dict)\n all_stats_dicts.append(stats_dict)\n\n if len(bucket_eval_stats_list) != 0:\n eval_stats_chunk_avg = Stats.average_list(bucket_eval_stats_list)\n self.stats_handler.on_stats(\n index=current_index, eval_stats_chunk_avg=eval_stats_chunk_avg\n )\n chunk_stats_dict[\"index\"] = current_index\n chunk_stats_dict[\n \"eval_stats_chunk_avg\"\n ] = eval_stats_chunk_avg.to_dict()\n all_stats_dicts.append(chunk_stats_dict)\n\n self.checkpoint_manager.append_stats(all_stats_dicts)\n\n logger.info(\"Writing the checkpoint\")\n self.checkpoint_manager.write_new_version(\n config, self.entity_counts, self.embedding_storage_freelist\n )\n\n dist_logger.info(\n \"Waiting for other workers to write their parts of the checkpoint\"\n )\n self._barrier()\n dist_logger.info(\"All parts of the checkpoint have been written\")\n\n logger.info(\"Switching to the new checkpoint version\")\n self.checkpoint_manager.switch_to_new_version()\n\n dist_logger.info(\n \"Waiting for other workers to switch to the new checkpoint version\"\n )\n self._barrier()\n dist_logger.info(\"All workers have switched to the new checkpoint version\")\n\n # After all the machines have finished committing\n # checkpoints, we either remove the old checkpoints\n # or we preserve it\n if preserve_new_checkpoint:\n # Add 1 so the index is a multiple of the interval, it looks nicer.\n self.checkpoint_manager.preserve_current_version(config, epoch_idx + 1)\n if not preserve_old_checkpoint:\n self.checkpoint_manager.remove_old_version(config)\n"
] | [
[
"torch.Generator",
"torch.nn.Parameter",
"torch.randint",
"torch.randperm",
"torch.distributed.barrier",
"torch.distributed.is_available",
"torch.FloatTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EricRemmerswaal/tensorflow | [
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca"
] | [
"tensorflow/python/ops/ragged/ragged_getitem_test.py",
"tensorflow/python/framework/sparse_tensor_test.py",
"tensorflow/python/training/tracking/resource.py",
"tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver.py",
"tensorflow/python/tpu/tpu_embedding_v2.py",
"tensorflow/python/framework/op_def_library_test.py",
"tensorflow/python/distribute/multi_process_runner.py",
"tensorflow/python/autograph/converters/slices.py",
"tensorflow/lite/schema/upgrade_schema.py",
"tensorflow/lite/testing/op_tests/embedding_lookup.py",
"tensorflow/python/debug/lib/source_remote_test.py",
"tensorflow/python/ops/image_grad.py",
"tensorflow/lite/testing/op_tests/unfused_gru.py",
"tensorflow/python/util/lock_util_test.py",
"tensorflow/python/kernel_tests/strings_ops/string_upper_op_test.py",
"tensorflow/python/ops/distributions/special_math.py",
"tensorflow/lite/python/test_util.py",
"tensorflow/python/compiler/tensorrt/test/dynamic_input_shapes_test.py",
"tensorflow/python/keras/optimizer_v1.py",
"tensorflow/python/data/experimental/kernel_tests/assert_next_test.py",
"tensorflow/python/ops/control_flow_util_v2.py",
"tensorflow/python/kernel_tests/data_structures/priority_queue_test.py",
"tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/control_flow_duplicate_v1.py",
"tensorflow/compiler/tests/random_ops_test.py",
"tensorflow/python/kernel_tests/strings_ops/regex_replace_op_test.py",
"tensorflow/python/data/kernel_tests/dataset_test.py",
"tensorflow/lite/testing/op_tests/conv_activation.py",
"tensorflow/python/keras/layers/simplernn_test.py",
"tensorflow/python/client/session_clusterspec_prop_test.py",
"tensorflow/python/kernel_tests/sparse_ops/sparse_xent_op_test_base.py",
"tensorflow/python/data/experimental/ops/take_while_ops.py",
"tensorflow/python/autograph/utils/tensors.py",
"tensorflow/python/keras/engine/keras_tensor_test.py",
"tensorflow/python/keras/distribute/optimizer_combinations.py",
"tensorflow/python/eager/profiler_client_test.py",
"tensorflow/python/compiler/tensorrt/test/binary_tensor_weight_broadcast_test.py",
"tensorflow/python/ops/ragged/ragged_resize_image_op_test.py",
"tensorflow/compiler/tests/adadelta_test.py",
"tensorflow/python/debug/lib/debug_events_writer_test.py",
"tensorflow/python/keras/legacy_tf_layers/pooling.py",
"tensorflow/lite/testing/op_tests/sparse_to_dense.py",
"tensorflow/python/kernel_tests/distributions/special_math_test.py",
"tensorflow/python/eager/device_placement_test.py",
"tensorflow/python/autograph/pyct/origin_info.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for third_party.tensorflow.python.ops.ragged_tensor.\"\"\"\n\nimport re\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor\n\nfrom tensorflow.python.platform import googletest\n\n\nclass _SliceBuilder:\n \"\"\"Helper to construct arguments for __getitem__.\n\n Usage: _SliceBuilder()[<expr>] slice_spec Python generates for <expr>.\n \"\"\"\n\n def __getitem__(self, slice_spec):\n return slice_spec\n\n\nSLICE_BUILDER = _SliceBuilder()\n\n\ndef _make_tensor_slice_spec(slice_spec, use_constant=True):\n \"\"\"Wraps all integers in an extended slice spec w/ a tensor.\n\n This function is used to help test slicing when the slice spec contains\n tensors, rather than integers.\n\n Args:\n slice_spec: The extended slice spec.\n use_constant: If true, then wrap each integer with a tf.constant. If false,\n then wrap each integer with a tf.placeholder.\n\n Returns:\n A copy of slice_spec, but with each integer i replaced with tf.constant(i).\n \"\"\"\n\n def make_piece_scalar(piece):\n if isinstance(piece, int):\n scalar = constant_op.constant(piece)\n if use_constant:\n return scalar\n else:\n return array_ops.placeholder_with_default(scalar, [])\n elif isinstance(piece, slice):\n return slice(\n make_piece_scalar(piece.start), make_piece_scalar(piece.stop),\n make_piece_scalar(piece.step))\n else:\n return piece\n\n if isinstance(slice_spec, tuple):\n return tuple(make_piece_scalar(piece) for piece in slice_spec)\n else:\n return make_piece_scalar(slice_spec)\n\n\n# Example 2D ragged tensor value with one ragged dimension and with scalar\n# values, expressed as nested python lists and as splits+values.\nEXAMPLE_RAGGED_TENSOR_2D = [[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [],\n [b'g']]\nEXAMPLE_RAGGED_TENSOR_2D_SPLITS = [0, 2, 5, 6, 6, 7]\nEXAMPLE_RAGGED_TENSOR_2D_VALUES = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n\n# Example 4D ragged tensor value, with two ragged dimensions and with values\n# whose shape is [2], expressed as nested python lists and as splits+values.\nEXAMPLE_RAGGED_TENSOR_4D = [\n [ # rt[0]\n [[1, 2], [3, 4], [5, 6]], # rt[0][0]\n [[7, 8], [9, 10], [11, 12]]], # rt[0][1]\n [], # rt[1]\n [ # rt[2]\n [[13, 14], [15, 16], [17, 18]]], # rt[2][0]\n [ # rt[3]\n [[19, 20]]] # rt[3][0]\n] # pyformat: disable\nEXAMPLE_RAGGED_TENSOR_4D_SPLITS1 = [0, 2, 2, 3, 4]\nEXAMPLE_RAGGED_TENSOR_4D_SPLITS2 = [0, 3, 6, 9, 10]\nEXAMPLE_RAGGED_TENSOR_4D_VALUES = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],\n [11, 12], [13, 14], [15, 16], [17, 18],\n [19, 20]]\n\n# Example 3D ragged tensor with uniform_row_lengths.\nEXAMPLE_RAGGED_TENSOR_3D = [[[1, 2, 3], [4], [5, 6]], [[], [7, 8, 9], []]]\nEXAMPLE_RAGGED_TENSOR_3D_ROWLEN = 3\nEXAMPLE_RAGGED_TENSOR_3D_SPLITS = [0, 3, 4, 6, 6, 9, 9]\nEXAMPLE_RAGGED_TENSOR_3D_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass RaggedGetItemTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n longMessage = True # Property in unittest.Testcase. pylint: disable=invalid-name\n\n #=============================================================================\n # RaggedTensor.__getitem__\n #=============================================================================\n\n def _TestGetItem(self, rt, slice_spec, expected, expected_shape=None):\n \"\"\"Helper function for testing RaggedTensor.__getitem__.\n\n Checks that calling `rt.__getitem__(slice_spec) returns the expected value.\n Checks three different configurations for each slice spec:\n\n * Call __getitem__ with the slice spec as-is (with int values)\n * Call __getitem__ with int values in the slice spec wrapped in\n `tf.constant()`.\n * Call __getitem__ with int values in the slice spec wrapped in\n `tf.compat.v1.placeholder()` (so value is not known at graph\n construction time).\n\n Args:\n rt: The RaggedTensor to test.\n slice_spec: The slice spec.\n expected: The expected value of rt.__getitem__(slice_spec), as a python\n list; or an exception class.\n expected_shape: The expected shape for `rt.__getitem__(slice_spec)`.\n \"\"\"\n tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)\n tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)\n value1 = rt.__getitem__(slice_spec)\n value2 = rt.__getitem__(tensor_slice_spec1)\n value3 = rt.__getitem__(tensor_slice_spec2)\n self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))\n self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))\n self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))\n if expected_shape is not None:\n value1.shape.assert_is_compatible_with(expected_shape)\n value2.shape.assert_is_compatible_with(expected_shape)\n value3.shape.assert_is_compatible_with(expected_shape)\n\n def _TestGetItemException(self, rt, slice_spec, expected, message):\n \"\"\"Helper function for testing RaggedTensor.__getitem__ exceptions.\"\"\"\n tensor_slice_spec = _make_tensor_slice_spec(slice_spec, True)\n with self.assertRaisesRegex(expected, message):\n self.evaluate(rt.__getitem__(slice_spec))\n with self.assertRaisesRegex(expected, message):\n self.evaluate(rt.__getitem__(tensor_slice_spec))\n\n @parameterized.parameters(\n # Tests for rt[i]\n (SLICE_BUILDER[-5], EXAMPLE_RAGGED_TENSOR_2D[-5]),\n (SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),\n (SLICE_BUILDER[-1], EXAMPLE_RAGGED_TENSOR_2D[-1]),\n (SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),\n (SLICE_BUILDER[1], EXAMPLE_RAGGED_TENSOR_2D[1]),\n (SLICE_BUILDER[4], EXAMPLE_RAGGED_TENSOR_2D[4]),\n\n # Tests for rt[i:]\n (SLICE_BUILDER[-6:], EXAMPLE_RAGGED_TENSOR_2D[-6:]),\n (SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),\n (SLICE_BUILDER[-1:], EXAMPLE_RAGGED_TENSOR_2D[-1:]),\n (SLICE_BUILDER[0:], EXAMPLE_RAGGED_TENSOR_2D[0:]),\n (SLICE_BUILDER[3:], EXAMPLE_RAGGED_TENSOR_2D[3:]),\n (SLICE_BUILDER[5:], EXAMPLE_RAGGED_TENSOR_2D[5:]),\n\n # Tests for rt[:j]\n (SLICE_BUILDER[:-6], EXAMPLE_RAGGED_TENSOR_2D[:-6]),\n (SLICE_BUILDER[:-3], EXAMPLE_RAGGED_TENSOR_2D[:-3]),\n (SLICE_BUILDER[:-1], EXAMPLE_RAGGED_TENSOR_2D[:-1]),\n (SLICE_BUILDER[:0], EXAMPLE_RAGGED_TENSOR_2D[:0]),\n (SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),\n (SLICE_BUILDER[:5], EXAMPLE_RAGGED_TENSOR_2D[:5]),\n\n # Tests for rt[i:j]\n (SLICE_BUILDER[0:3], EXAMPLE_RAGGED_TENSOR_2D[0:3]),\n (SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),\n (SLICE_BUILDER[-5:3], EXAMPLE_RAGGED_TENSOR_2D[-5:3]),\n (SLICE_BUILDER[3:1], EXAMPLE_RAGGED_TENSOR_2D[3:1]),\n (SLICE_BUILDER[-1:1], EXAMPLE_RAGGED_TENSOR_2D[-1:1]),\n (SLICE_BUILDER[1:-1], EXAMPLE_RAGGED_TENSOR_2D[1:-1]),\n\n # Tests for rt[i, j]\n (SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),\n (SLICE_BUILDER[1, 2], EXAMPLE_RAGGED_TENSOR_2D[1][2]),\n (SLICE_BUILDER[-1, 0], EXAMPLE_RAGGED_TENSOR_2D[-1][0]),\n (SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),\n (SLICE_BUILDER[:], EXAMPLE_RAGGED_TENSOR_2D),\n (SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_2D),\n\n # Empty slice spec.\n ([], EXAMPLE_RAGGED_TENSOR_2D),\n\n # Test for ellipsis\n (SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_2D),\n (SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_2D[2]),\n (SLICE_BUILDER[..., :], EXAMPLE_RAGGED_TENSOR_2D),\n (SLICE_BUILDER[..., 2, 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),\n (SLICE_BUILDER[2, ..., 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),\n (SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_2D[2][0]),\n\n # Test for array_ops.newaxis\n (SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, array_ops.newaxis],\n [[row] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n\n # Slicing inner ragged dimensions.\n (SLICE_BUILDER[-1:,\n 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D[-1:]]),\n (SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n\n # Strided slices\n (SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_2D[::2]),\n (SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_2D[::-1]),\n (SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_2D[::-2]),\n (SLICE_BUILDER[::-3], EXAMPLE_RAGGED_TENSOR_2D[::-3]),\n (SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, ::-1], [row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, ::-2], [row[::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, ::-3], [row[::-3] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, 2::-1],\n [row[2::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, -1::-1],\n [row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[..., -1::-1],\n [row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, 2::-2],\n [row[2::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[::-1, ::-1],\n [row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D[::-1]]),\n ) # pyformat: disable\n def testWithRaggedRank1(self, slice_spec, expected):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n # Ragged tensor\n rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,\n EXAMPLE_RAGGED_TENSOR_2D_SPLITS)\n\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)\n self._TestGetItem(rt, slice_spec, expected)\n\n # pylint: disable=g-complex-comprehension\n @parameterized.parameters([(start, stop)\n for start in [-2, -1, None, 0, 1, 2]\n for stop in [-2, -1, None, 0, 1, 2]])\n def testWithStridedSlices(self, start, stop):\n test_value = [[1, 2, 3, 4, 5], [6, 7], [8, 9, 10], [], [9],\n [1, 2, 3, 4, 5, 6, 7, 8]]\n rt = ragged_factory_ops.constant(test_value)\n for step in [-3, -2, -1, 1, 2, 3]:\n # Slice outer dimension\n self.assertAllEqual(rt[start:stop:step], test_value[start:stop:step],\n 'slice=%s:%s:%s' % (start, stop, step))\n # Slice inner dimension\n self.assertAllEqual(rt[:, start:stop:step],\n [row[start:stop:step] for row in test_value],\n 'slice=%s:%s:%s' % (start, stop, step))\n\n # pylint: disable=invalid-slice-index\n @parameterized.parameters(\n # Tests for out-of-bound errors\n (SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n (SLICE_BUILDER[-6], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n (SLICE_BUILDER[0, 2], (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n (SLICE_BUILDER[3, 0], (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n\n # Indexing into an inner ragged dimension\n (SLICE_BUILDER[:, 3], ValueError,\n 'Cannot index into an inner ragged dimension'),\n (SLICE_BUILDER[:1, 3], ValueError,\n 'Cannot index into an inner ragged dimension'),\n (SLICE_BUILDER[..., 3], ValueError,\n 'Cannot index into an inner ragged dimension'),\n\n # Tests for type errors\n (SLICE_BUILDER[0.5], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),\n (SLICE_BUILDER[1:3:0.5], TypeError, re.escape(\n array_ops._SLICE_TYPE_ERROR)),\n (SLICE_BUILDER[:, 1:3:0.5], TypeError,\n 'slice strides must be integers or None'),\n (SLICE_BUILDER[:, 0.5:1.5], TypeError,\n 'slice offsets must be integers or None'),\n (SLICE_BUILDER['foo'], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),\n (SLICE_BUILDER[:, 'foo':'foo'], TypeError,\n 'slice offsets must be integers or None'),\n\n # Tests for other errors\n (SLICE_BUILDER[..., 0, 0,\n 0], IndexError, 'Too many indices for RaggedTensor'),\n )\n def testErrorsWithRaggedRank1(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n # Ragged tensor\n rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,\n EXAMPLE_RAGGED_TENSOR_2D_SPLITS)\n\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n @parameterized.parameters(\n # Tests for rt[index, index, ...]\n (SLICE_BUILDER[2, 0], EXAMPLE_RAGGED_TENSOR_4D[2][0]),\n (SLICE_BUILDER[2, 0, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),\n (SLICE_BUILDER[2, 0, 1, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1][1]),\n (SLICE_BUILDER[2, 0, 1:], EXAMPLE_RAGGED_TENSOR_4D[2][0][1:]),\n (SLICE_BUILDER[2, 0, 1:, 1:], [[16], [18]]),\n (SLICE_BUILDER[2, 0, :, 1], [14, 16, 18]),\n (SLICE_BUILDER[2, 0, 1, :], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),\n\n # Tests for rt[index, slice, ...]\n (SLICE_BUILDER[0, :], EXAMPLE_RAGGED_TENSOR_4D[0]),\n (SLICE_BUILDER[1, :], EXAMPLE_RAGGED_TENSOR_4D[1]),\n (SLICE_BUILDER[0, :, :, 1], [[2, 4, 6], [8, 10, 12]]),\n (SLICE_BUILDER[1, :, :, 1], []),\n (SLICE_BUILDER[2, :, :, 1], [[14, 16, 18]]),\n (SLICE_BUILDER[3, :, :, 1], [[20]]),\n\n # Tests for rt[slice, slice, ...]\n (SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_4D),\n (SLICE_BUILDER[:, :, :, 1], [[[2, 4, 6], [8, 10, 12]], [], [[14, 16, 18]],\n [[20]]]),\n (SLICE_BUILDER[1:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),\n (SLICE_BUILDER[-3:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),\n\n # Test for ellipsis\n (SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_4D),\n (SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_4D[2]),\n (SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_4D[2][0]),\n (SLICE_BUILDER[..., 0], [[[1, 3, 5], [7, 9, 11]], [], [[13, 15, 17]],\n [[19]]]),\n (SLICE_BUILDER[2, ..., 0], [[13, 15, 17]]),\n (SLICE_BUILDER[2, 0, ..., 0], [13, 15, 17]),\n\n # Test for array_ops.newaxis\n (SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, array_ops.newaxis],\n [[row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n\n # Empty slice spec.\n ([], EXAMPLE_RAGGED_TENSOR_4D),\n\n # Slicing inner ragged dimensions.\n (SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, :-1],\n [[v[:-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, 1:2],\n [[v[1:2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[1:, 1:3, 1:2],\n [[v[1:2] for v in row[1:3]] for row in EXAMPLE_RAGGED_TENSOR_4D[1:]]),\n\n # Strided slices\n (SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_4D[::2]),\n (SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_4D[::-1]),\n (SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_4D[::-2]),\n (SLICE_BUILDER[1::2], EXAMPLE_RAGGED_TENSOR_4D[1::2]),\n (SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, 1::2], [row[1::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, ::2],\n [[v[::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, 1::2],\n [[v[1::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, ::-1],\n [[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, ::-2],\n [[v[::-2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[..., ::-1, :],\n [[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[..., ::-1], [[[v[::-1] for v in col] for col in row]\n for row in EXAMPLE_RAGGED_TENSOR_4D]),\n ) # pyformat: disable\n def testWithRaggedRank2(self, slice_spec, expected):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_nested_row_splits(\n EXAMPLE_RAGGED_TENSOR_4D_VALUES,\n [EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)\n self._TestGetItem(rt, slice_spec, expected)\n\n @parameterized.parameters(\n # Test for errors in unsupported cases\n (SLICE_BUILDER[:, 0], ValueError,\n 'Cannot index into an inner ragged dimension.'),\n (SLICE_BUILDER[:, :, 0], ValueError,\n 'Cannot index into an inner ragged dimension.'),\n\n # Test for out-of-bounds errors.\n (SLICE_BUILDER[1, 0], (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n (SLICE_BUILDER[0, 0, 3],\n (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n (SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n (SLICE_BUILDER[0, 5], (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n )\n def testErrorsWithRaggedRank2(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_nested_row_splits(\n EXAMPLE_RAGGED_TENSOR_4D_VALUES,\n [EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n @parameterized.parameters(\n (SLICE_BUILDER[:], []),\n (SLICE_BUILDER[2:], []),\n (SLICE_BUILDER[:-3], []),\n )\n def testWithEmptyTensor(self, slice_spec, expected):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_row_splits([], [0])\n self._TestGetItem(rt, slice_spec, expected)\n\n @parameterized.parameters(\n (SLICE_BUILDER[0], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n (SLICE_BUILDER[-1], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n )\n def testErrorsWithEmptyTensor(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_row_splits([], [0])\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n @parameterized.parameters(\n (SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),\n (SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),\n (SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),\n (SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),\n (SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),\n (SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),\n (SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),\n )\n def testWithPlaceholderShapes(self, slice_spec, expected):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n # Intentionally use an unknown shape for `splits`, to force the code path\n # that deals with having nrows unknown at graph construction time.\n splits = constant_op.constant(\n EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64)\n splits = array_ops.placeholder_with_default(splits, None)\n rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits)\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)\n self._TestGetItem(rt, slice_spec, expected)\n\n @parameterized.parameters(\n (SLICE_BUILDER[..., 2], ValueError,\n 'Ellipsis not supported for unknown shape RaggedTensors'),)\n def testErrorsWithPlaceholderShapes(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n if not context.executing_eagerly():\n # Intentionally use an unknown shape for `values`.\n values = array_ops.placeholder_with_default([0], None)\n rt = RaggedTensor.from_row_splits(values, [0, 1])\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n def testNewAxis(self):\n # rt: [[[['a', 'b'], ['c', 'd']], [], [['e', 'f']]], []]\n splits1 = [0, 3, 3]\n splits2 = [0, 2, 2, 3]\n values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']])\n rt = RaggedTensor.from_nested_row_splits(values, [splits1, splits2])\n rt_newaxis0 = rt[array_ops.newaxis]\n rt_newaxis1 = rt[:, array_ops.newaxis]\n rt_newaxis2 = rt[:, :, array_ops.newaxis]\n rt_newaxis3 = rt[:, :, :, array_ops.newaxis]\n rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]\n\n self.assertAllEqual(\n rt, [[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])\n self.assertAllEqual(\n rt_newaxis0, [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])\n self.assertAllEqual(\n rt_newaxis1,\n [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])\n self.assertAllEqual(\n rt_newaxis2,\n [[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])\n self.assertAllEqual(\n rt_newaxis3,\n [[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])\n self.assertAllEqual(\n rt_newaxis4,\n [[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])\n\n self.assertEqual(rt.ragged_rank, 2)\n self.assertEqual(rt_newaxis0.ragged_rank, 3)\n self.assertEqual(rt_newaxis1.ragged_rank, 3)\n self.assertEqual(rt_newaxis2.ragged_rank, 3)\n self.assertEqual(rt_newaxis3.ragged_rank, 2)\n self.assertEqual(rt_newaxis4.ragged_rank, 2)\n\n self.assertEqual(rt_newaxis0.shape.as_list(), [1, 2, None, None, 2])\n self.assertEqual(rt_newaxis1.shape.as_list(), [2, 1, None, None, 2])\n self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, 1, None, 2])\n self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])\n self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])\n\n @parameterized.parameters(\n # EXAMPLE_RAGGED_TENSOR_3D.shape = [2, 3, None]\n\n # Indexing into uniform_row_splits dimension:\n (SLICE_BUILDER[:, 1], [r[1] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[:, -2], [r[-2] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[:, -3], [r[-3] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[1:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],\n [1, None]),\n (SLICE_BUILDER[:, 1, 1:], [r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[1:, 1, 1:],\n [r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],\n [1, None]),\n\n # Slicing uniform_row_splits dimension:\n (SLICE_BUILDER[:, 2:], [r[2:] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 1, None]),\n (SLICE_BUILDER[:, -2:], [r[-2:] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 2, None]),\n (SLICE_BUILDER[:, :, 1:],\n [[c[1:] for c in r] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 3, None]),\n (SLICE_BUILDER[:, 5:], [r[5:] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 0, None]),\n\n # Slicing uniform_row_splits dimension with a non-default step size:\n (SLICE_BUILDER[:, ::2], [r[::2] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 2, None]),\n (SLICE_BUILDER[:, ::-1], [r[::-1] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 3, None]),\n ) # pyformat: disable\n def testWithUniformRowLength(self, slice_spec, expected, expected_shape):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_uniform_row_length(\n RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,\n EXAMPLE_RAGGED_TENSOR_3D_SPLITS),\n EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)\n self.assertIsNot(rt.uniform_row_length, None)\n self._TestGetItem(rt, slice_spec, expected, expected_shape)\n\n # If the result is 3D, then check that it still has a uniform row length:\n actual = rt.__getitem__(slice_spec) # pylint: disable=assignment-from-no-return\n if actual.shape.rank == 3:\n self.assertIsNot(actual.uniform_row_length, None)\n self.assertAllEqual(actual.uniform_row_length, expected_shape[1])\n\n @parameterized.parameters(\n (SLICE_BUILDER[:, 3], errors.InvalidArgumentError, 'out of bounds'),\n (SLICE_BUILDER[:, -4], errors.InvalidArgumentError, 'out of bounds'),\n (SLICE_BUILDER[:, 10], errors.InvalidArgumentError, 'out of bounds'),\n (SLICE_BUILDER[:, -10], errors.InvalidArgumentError, 'out of bounds'),\n )\n def testErrorsWithUniformRowLength(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_uniform_row_length(\n RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,\n EXAMPLE_RAGGED_TENSOR_3D_SPLITS),\n EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for tensorflow.python.framework.sparse_tensor.\"\"\"\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.platform import googletest\n\n\nclass SparseTensorTest(test_util.TensorFlowTestCase):\n\n def testPythonConstruction(self):\n indices = [[1, 2], [2, 0], [3, 4]]\n values = [b\"a\", b\"b\", b\"c\"]\n shape = [4, 5]\n sp_value = sparse_tensor.SparseTensorValue(indices, values, shape)\n for sp in [\n sparse_tensor.SparseTensor(indices, values, shape),\n sparse_tensor.SparseTensor.from_value(sp_value),\n sparse_tensor.SparseTensor.from_value(\n sparse_tensor.SparseTensor(indices, values, shape))]:\n self.assertEqual(sp.indices.dtype, dtypes.int64)\n self.assertEqual(sp.values.dtype, dtypes.string)\n self.assertEqual(sp.dense_shape.dtype, dtypes.int64)\n self.assertEqual(sp.get_shape(), (4, 5))\n\n value = self.evaluate(sp)\n self.assertAllEqual(indices, value.indices)\n self.assertAllEqual(values, value.values)\n self.assertAllEqual(shape, value.dense_shape)\n sp_value = self.evaluate(sp)\n self.assertAllEqual(sp_value.indices, value.indices)\n self.assertAllEqual(sp_value.values, value.values)\n self.assertAllEqual(sp_value.dense_shape, value.dense_shape)\n\n def testShape(self):\n\n @def_function.function\n def test_fn(tensor):\n tensor = sparse_ops.sparse_transpose(tensor)\n self.assertEqual(tensor.shape.rank, 2)\n return tensor\n\n tensor = sparse_tensor.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])\n test_fn(tensor)\n\n def testIsSparse(self):\n self.assertFalse(sparse_tensor.is_sparse(3))\n self.assertFalse(sparse_tensor.is_sparse(\"foo\"))\n self.assertFalse(sparse_tensor.is_sparse(np.array(3)))\n self.assertTrue(\n sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))\n self.assertTrue(\n sparse_tensor.is_sparse(\n sparse_tensor.SparseTensorValue([[0]], [0], [1])))\n\n def testConsumers(self):\n with context.graph_mode():\n sp = sparse_tensor.SparseTensor([[0, 0], [1, 2]], [1.0, 3.0], [3, 4])\n w = ops.convert_to_tensor(np.ones([4, 1], np.float32))\n out = sparse_ops.sparse_tensor_dense_matmul(sp, w)\n self.assertEqual(len(sp.consumers()), 1)\n self.assertEqual(sp.consumers()[0], out.op)\n\n dense = sparse_ops.sparse_tensor_to_dense(sp)\n self.assertEqual(len(sp.consumers()), 2)\n self.assertIn(dense.op, sp.consumers())\n self.assertIn(out.op, sp.consumers())\n\n def testWithValues(self):\n source = sparse_tensor.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])\n new_tensor = source.with_values([5.0, 1.0])\n self.assertAllEqual(new_tensor.indices, source.indices)\n self.assertAllEqual(new_tensor.values, [5.0, 1.0])\n self.assertAllEqual(new_tensor.dense_shape, source.dense_shape)\n\n # ensure new value's shape is checked\n with self.assertRaises((errors.InvalidArgumentError, ValueError)):\n source.with_values([[5.0, 1.0]])\n\n\nclass ConvertToTensorOrSparseTensorTest(test_util.TensorFlowTestCase):\n\n def test_convert_dense(self):\n value = [42, 43]\n from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(\n value)\n self.assertAllEqual(value, self.evaluate(from_value))\n\n def test_convert_sparse(self):\n indices = [[0, 1], [1, 0]]\n values = [42, 43]\n shape = [2, 2]\n sparse_tensor_value = sparse_tensor.SparseTensorValue(\n indices, values, shape)\n st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)\n from_value = self.evaluate(\n sparse_tensor.convert_to_tensor_or_sparse_tensor(sparse_tensor_value))\n from_tensor = self.evaluate(\n sparse_tensor.convert_to_tensor_or_sparse_tensor(st))\n for convertee in [from_value, from_tensor]:\n self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)\n self.assertAllEqual(sparse_tensor_value.values, convertee.values)\n self.assertAllEqual(\n sparse_tensor_value.dense_shape, convertee.dense_shape)\n\n\nclass SparseTensorShapeTest(test_util.TensorFlowTestCase):\n\n def test_simple(self):\n indices = [[0, 2]]\n values = [1]\n dense_shape = [5, 5]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n\n self.assertIsInstance(sp.shape, tensor_shape.TensorShape)\n self.assertIsInstance(sp.dense_shape, ops.Tensor)\n self.assertEqual(sp.shape.as_list(), [5, 5])\n\n def test_unknown_shape(self):\n\n @def_function.function\n def my_func(dense_shape):\n indices = [[0, 2]]\n values = [1]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.as_list(), [None, None])\n return sp\n\n my_func.get_concrete_function(\n dense_shape=tensor_spec.TensorSpec(\n dtype=dtypes.int64, shape=[2,]))\n\n def test_partial_shape(self):\n\n @def_function.function\n def my_func(x):\n indices = [[0, 2]]\n values = [1]\n y = ops.convert_to_tensor(3, dtype=dtypes.int64)\n dense_shape = [x, y]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.as_list(), [None, 3])\n return sp\n\n my_func.get_concrete_function(\n x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[]))\n\n def test_neg_shape(self):\n indices = [[0, 2]]\n values = [1]\n dense_shape = [-1, 5]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.as_list(), [None, 5])\n\n def test_unknown_tensor_shape(self):\n\n @def_function.function\n def my_func(x):\n indices = [[0, 0]]\n values = [1]\n dense_shape = array_ops.shape(x)\n dense_shape = math_ops.cast(dense_shape, dtypes.int64)\n\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.as_list(), [None, None])\n return sp\n\n my_func.get_concrete_function(\n x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None, None]))\n\n def test_unknown_rank(self):\n\n @def_function.function\n def my_func(dense_shape):\n indices = [[0, 0]]\n values = [1]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.rank, None)\n return sp\n\n my_func.get_concrete_function(\n dense_shape=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None]))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SparseTensorSpecTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def assertAllTensorsEqual(self, list1, list2):\n self.assertLen(list1, len(list2))\n for (t1, t2) in zip(list1, list2):\n self.assertAllEqual(t1, t2)\n\n def testConstruction(self):\n spec1 = sparse_tensor.SparseTensorSpec()\n self.assertEqual(spec1.shape.rank, None)\n self.assertEqual(spec1.dtype, dtypes.float32)\n\n spec2 = sparse_tensor.SparseTensorSpec([None, None], dtypes.string)\n self.assertEqual(spec2.shape.as_list(), [None, None])\n self.assertEqual(spec2.dtype, dtypes.string)\n\n def testValueType(self):\n spec1 = sparse_tensor.SparseTensorSpec()\n self.assertEqual(spec1.value_type, sparse_tensor.SparseTensor)\n\n @parameterized.parameters([\n (sparse_tensor.SparseTensorSpec(),\n (tensor_shape.TensorShape(None), dtypes.float32)),\n (sparse_tensor.SparseTensorSpec(shape=[5, None, None]),\n (tensor_shape.TensorShape([5, None, None]), dtypes.float32)),\n (sparse_tensor.SparseTensorSpec(dtype=dtypes.int32),\n (tensor_shape.TensorShape(None), dtypes.int32)),\n ]) # pyformat: disable\n def testSerialize(self, st_spec, expected):\n serialization = st_spec._serialize()\n # TensorShape has an unconventional definition of equality, so we can't use\n # assertEqual directly here. But repr() is deterministic and lossless for\n # the expected values, so we can use that instead.\n self.assertEqual(repr(serialization), repr(expected))\n\n @parameterized.parameters([\n (sparse_tensor.SparseTensorSpec(dtype=dtypes.string), [\n tensor_spec.TensorSpec([None, None], dtypes.int64),\n tensor_spec.TensorSpec([None], dtypes.string),\n tensor_spec.TensorSpec([None], dtypes.int64)\n ]),\n (sparse_tensor.SparseTensorSpec(shape=[5, None, None]), [\n tensor_spec.TensorSpec([None, 3], dtypes.int64),\n tensor_spec.TensorSpec([None], dtypes.float32),\n tensor_spec.TensorSpec([3], dtypes.int64)\n ]),\n ])\n def testComponentSpecs(self, st_spec, expected):\n self.assertEqual(st_spec._component_specs, expected)\n\n @parameterized.parameters([\n {\n \"st_spec\": sparse_tensor.SparseTensorSpec(),\n \"indices\": [[0, 1], [10, 8]],\n \"values\": [3.0, 5.0],\n \"dense_shape\": [100, 100]\n },\n {\n \"st_spec\": sparse_tensor.SparseTensorSpec([100, None, None]),\n \"indices\": [[0, 1, 3], [10, 8, 2]],\n \"values\": [3.0, 5.0],\n \"dense_shape\": [100, 20, 20]\n },\n ])\n def testToFromComponents(self, st_spec, indices, values, dense_shape):\n st = sparse_tensor.SparseTensor(indices, values, dense_shape)\n actual_components = st_spec._to_components(st)\n self.assertAllTensorsEqual(actual_components,\n [indices, values, dense_shape])\n st_reconstructed = st_spec._from_components(actual_components)\n self.assertAllEqual(st.indices, st_reconstructed.indices)\n self.assertAllEqual(st.values, st_reconstructed.values)\n self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)\n\n @test_util.run_v1_only(\"SparseTensorValue is deprecated in v2\")\n def testFromNumpyComponents(self):\n indices = np.array([[0], [8]])\n values = np.array([1.0, 9.0])\n dense_shape = np.array([100])\n spec = sparse_tensor.SparseTensorSpec()\n st = spec._from_components([indices, values, dense_shape])\n self.assertIsInstance(st, sparse_tensor.SparseTensorValue)\n self.assertAllEqual(st.indices, indices)\n self.assertAllEqual(st.values, values)\n self.assertAllEqual(st.dense_shape, dense_shape)\n\n @parameterized.parameters([\n sparse_tensor.SparseTensorSpec(dtype=dtypes.string),\n sparse_tensor.SparseTensorSpec(shape=[5, None, None]),\n ])\n def testFlatTensorSpecs(self, st_spec):\n self.assertEqual(st_spec._flat_tensor_specs,\n [tensor_spec.TensorSpec(None, dtypes.variant)])\n\n @parameterized.parameters([\n {\n \"st_spec\": sparse_tensor.SparseTensorSpec(),\n \"indices\": [[0, 1], [10, 8]],\n \"values\": [3.0, 5.0],\n \"dense_shape\": [100, 100]\n },\n {\n \"st_spec\": sparse_tensor.SparseTensorSpec([100, None, None]),\n \"indices\": [[0, 1, 3], [10, 8, 2]],\n \"values\": [3.0, 5.0],\n \"dense_shape\": [100, 20, 20]\n },\n ])\n def testToFromTensorList(self, st_spec, indices, values, dense_shape):\n st = sparse_tensor.SparseTensor(indices, values, dense_shape)\n tensor_list = st_spec._to_tensor_list(st)\n st_reconstructed = st_spec._from_tensor_list(tensor_list)\n self.assertAllEqual(st.indices, st_reconstructed.indices)\n self.assertAllEqual(st.values, st_reconstructed.values)\n self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)\n\n @parameterized.parameters([\n (sparse_tensor.SparseTensorSpec([2, None], dtypes.float32), 32,\n sparse_tensor.SparseTensorSpec([32, 2, None], dtypes.float32)),\n (sparse_tensor.SparseTensorSpec([4, None], dtypes.float32), None,\n sparse_tensor.SparseTensorSpec([None, 4, None], dtypes.float32)),\n (sparse_tensor.SparseTensorSpec([2], dtypes.float32), 32,\n sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32)),\n ])\n def testBatch(self, spec, batch_size, expected):\n self.assertEqual(spec._batch(batch_size), expected)\n\n @parameterized.parameters([\n (sparse_tensor.SparseTensorSpec([32, None, None], dtypes.float32),\n sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),\n (sparse_tensor.SparseTensorSpec([None, None, None], dtypes.float32),\n sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),\n (sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32),\n sparse_tensor.SparseTensorSpec([2], dtypes.float32)),\n ])\n def testUnbatch(self, spec, expected):\n self.assertEqual(spec._unbatch(), expected)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Definitions for resource-type trackable object classes.\"\"\"\n\nimport contextlib\nimport copy\nimport weakref\n\nimport six\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n# global _RESOURCE_TRACKER_STACK\n_RESOURCE_TRACKER_STACK = []\n\n\nclass ResourceTracker(object):\n \"\"\"An object that tracks a list of resources.\"\"\"\n\n __slots__ = [\"_resources\"]\n\n def __init__(self):\n self._resources = []\n\n @property\n def resources(self):\n return self._resources\n\n def add_resource(self, resource):\n self._resources.append(resource)\n\n\n@tf_contextlib.contextmanager\ndef resource_tracker_scope(resource_tracker):\n \"\"\"A context to manage resource trackers.\n\n Use this in order to collect up all resources created within a block of code.\n Example usage:\n\n ```python\n resource_tracker = ResourceTracker()\n with resource_tracker_scope(resource_tracker):\n resource = TrackableResource()\n\n assert resource_tracker.resources == [resource]\n\n Args:\n resource_tracker: The passed in ResourceTracker object\n\n Yields:\n A scope in which the resource_tracker is active.\n \"\"\"\n global _RESOURCE_TRACKER_STACK\n old = list(_RESOURCE_TRACKER_STACK)\n _RESOURCE_TRACKER_STACK.append(resource_tracker)\n try:\n yield\n finally:\n _RESOURCE_TRACKER_STACK = old\n\n\ndef _make_getter(captured_getter, captured_previous):\n \"\"\"To avoid capturing loop variables.\"\"\"\n\n def getter(*args, **kwargs):\n return captured_getter(captured_previous, *args, **kwargs)\n\n return getter\n\n\nclass _ResourceMetaclass(type):\n \"\"\"Metaclass for CapturableResource.\"\"\"\n\n def __call__(cls, *args, **kwargs):\n\n def default_resource_creator(next_creator, *a, **kw):\n assert next_creator is None\n obj = cls.__new__(cls, *a, **kw)\n obj.__init__(*a, **kw)\n return obj\n\n previous_getter = lambda *a, **kw: default_resource_creator(None, *a, **kw)\n resource_creator_stack = ops.get_default_graph()._resource_creator_stack\n for getter in resource_creator_stack[cls._resource_type()]:\n previous_getter = _make_getter(getter, previous_getter)\n\n return previous_getter(*args, **kwargs)\n\n\nclass CapturableResource(six.with_metaclass(_ResourceMetaclass,\n base.Trackable)):\n \"\"\"Holds a Tensor which a tf.function can capture.\n\n `CapturableResource`s are discovered by traversing the graph of object\n attributes, e.g. during `tf.saved_model.save`. They are excluded from the\n scope-based tracking of `TrackableResource`; generally things that require\n initialization should inherit from `TrackableResource` instead of\n `CapturableResource` directly.\n \"\"\"\n\n def __init__(self, device=\"\"):\n \"\"\"Initialize the `CapturableResource`.\n\n Args:\n device: A string indicating a required placement for this resource,\n e.g. \"CPU\" if this resource must be created on a CPU device. A blank\n device allows the user to place resource creation, so generally this\n should be blank unless the resource only makes sense on one device.\n \"\"\"\n self._resource_handle_value = None\n self._resource_device = device\n self._self_destruction_context = (\n context.eager_mode if context.executing_eagerly()\n else ops.get_default_graph().as_default)\n\n @classmethod\n def _resource_type(cls):\n return cls.__name__\n\n @property\n def _destruction_context(self):\n return getattr(self, \"_self_destruction_context\",\n # no-op context\n contextlib.suppress)\n\n @_destruction_context.setter\n def _destruction_context(self, destruction_context):\n self._self_destruction_context = destruction_context\n\n def _create_resource(self):\n \"\"\"A function that creates a resource handle.\"\"\"\n raise NotImplementedError(\"TrackableResource._create_resource not \"\n \"implemented.\")\n\n @property\n def _resource_handle(self):\n return self._resource_handle_value\n\n @_resource_handle.setter\n def _resource_handle(self, value):\n if isinstance(value, (ops.Tensor, ops.EagerTensor)):\n value._parent_trackable = weakref.ref(self) # pylint: disable=protected-access\n self._resource_handle_value = value\n\n def _initialize(self):\n \"\"\"A function that initializes the resource. Optional.\"\"\"\n pass\n\n def _destroy_resource(self):\n \"\"\"A function that destroys the resource. Optional.\"\"\"\n pass\n\n @property\n def resource_handle(self):\n \"\"\"Returns the resource handle associated with this Resource.\"\"\"\n if self._resource_handle is None:\n with ops.device(self._resource_device):\n self._resource_handle = self._create_resource()\n return self._resource_handle\n\n def _map_resources(self, _):\n \"\"\"For implementing `Trackable`.\"\"\"\n new_obj = copy.copy(self)\n # pylint: disable=protected-access\n with ops.device(self._resource_device):\n new_resource = new_obj._create_resource()\n new_obj._resource_handle = new_resource\n # pylint: enable=protected-access\n obj_map = {self: new_obj}\n resource_map = {self.resource_handle: new_resource}\n return obj_map, resource_map\n\n def _trackable_children(self, save_type, **kwargs):\n children = super()._trackable_children(save_type, **kwargs)\n if save_type == \"savedmodel\":\n @def_function.function(input_signature=[], autograph=False)\n def _creator():\n resource = self._create_resource()\n return resource\n\n @def_function.function(input_signature=[], autograph=False)\n def _initializer():\n self._initialize()\n return 1 # Dummy return\n\n @def_function.function(input_signature=[], autograph=False)\n def _destroyer():\n self._destroy_resource()\n return 1 # Dummy return\n\n children.update({\n \"_create_resource\": _creator,\n \"_initialize\": _initializer,\n \"_destroy_resource\": _destroyer,\n })\n return children\n\n def __del__(self):\n try:\n # Outer race condition: on program exit, the destruction context may be\n # deleted before this __del__ is called. At this point we can safely\n # exit without calling _destroy_resource() and let Python handle things.\n with self._destruction_context():\n # Inner race condition: possible between this and `ScopedTFFunction`\n # whereby if an entire garbage collection chain containing both\n # objects is moved to unreachable during the same garbage collection\n # cycle, the __del__ for `ScopedTFFunction` can be collected before\n # this method is called. In that case, we can't do much but\n # continue.\n self._destroy_resource()\n except Exception: # pylint: disable=broad-except\n # Silence all error logs that occur when attempting to destroy this\n # resource.\n pass\n\n\n@tf_export(\"saved_model.experimental.TrackableResource\")\nclass TrackableResource(CapturableResource):\n \"\"\"Holds a Tensor which a tf.function can capture.\n\n A TrackableResource is most useful for stateful Tensors that require\n initialization, such as `tf.lookup.StaticHashTable`. `TrackableResource`s\n are discovered by traversing the graph of object attributes, e.g. during\n `tf.saved_model.save`.\n\n A TrackableResource has three methods to override:\n\n * `_create_resource` should create the resource tensor handle.\n * `_initialize` should initialize the resource held at `self.resource_handle`.\n * `_destroy_resource` is called upon a `TrackableResource`'s destruction\n and should decrement the resource's ref count. For most resources, this\n should be done with a call to `tf.raw_ops.DestroyResourceOp`.\n\n Example usage:\n\n >>> class DemoResource(tf.saved_model.experimental.TrackableResource):\n ... def __init__(self):\n ... super().__init__()\n ... self._initialize()\n ... def _create_resource(self):\n ... return tf.raw_ops.VarHandleOp(dtype=tf.float32, shape=[2])\n ... def _initialize(self):\n ... tf.raw_ops.AssignVariableOp(\n ... resource=self.resource_handle, value=tf.ones([2]))\n ... def _destroy_resource(self):\n ... tf.raw_ops.DestroyResourceOp(resource=self.resource_handle)\n >>> class DemoModule(tf.Module):\n ... def __init__(self):\n ... self.resource = DemoResource()\n ... def increment(self, tensor):\n ... return tensor + tf.raw_ops.ReadVariableOp(\n ... resource=self.resource.resource_handle, dtype=tf.float32)\n >>> demo = DemoModule()\n >>> demo.increment([5, 1])\n <tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 2.], dtype=float32)>\n \"\"\"\n\n def __init__(self, device=\"\"):\n \"\"\"Initialize the `TrackableResource`.\n\n Args:\n device: A string indicating a required placement for this resource,\n e.g. \"CPU\" if this resource must be created on a CPU device. A blank\n device allows the user to place resource creation, so generally this\n should be blank unless the resource only makes sense on one device.\n \"\"\"\n global _RESOURCE_TRACKER_STACK\n for resource_tracker in _RESOURCE_TRACKER_STACK:\n resource_tracker.add_resource(self)\n super(TrackableResource, self).__init__(device=device)\n\n\n# TODO(b/124205571,b/124092991): Solve destruction of resources.\nclass RestoredResource(TrackableResource):\n \"\"\"Restored SavedResource.\"\"\"\n\n def __init__(self, device=\"\"):\n super(RestoredResource, self).__init__(device=device)\n\n @classmethod\n def _deserialize_from_proto(cls, object_proto, dependencies, **unused_kwargs):\n obj = cls(device=object_proto.resource.device)\n resource_creator = dependencies.get(\"_create_resource\")\n if resource_creator is not None:\n obj._create_resource = resource_creator # pylint: disable=protected-access\n return obj\n\n def _add_trackable_child(self, name, value):\n setattr(self, name, value)\n if (isinstance(value, base.Trackable) and\n not isinstance(value, def_function.Function)):\n self._track_trackable(value, name)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of Cluster Resolvers for Kubernetes.\"\"\"\n\nfrom tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver.cluster_resolver import format_master_url\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('distribute.cluster_resolver.KubernetesClusterResolver')\nclass KubernetesClusterResolver(ClusterResolver):\n \"\"\"ClusterResolver for Kubernetes.\n\n This is an implementation of cluster resolvers for Kubernetes. When given the\n the Kubernetes namespace and label selector for pods, we will retrieve the\n pod IP addresses of all running pods matching the selector, and return a\n ClusterSpec based on that information.\n\n Note: it cannot retrieve `task_type`, `task_id` or `rpc_layer`. To use it\n with some distribution strategies like\n `tf.distribute.experimental.MultiWorkerMirroredStrategy`, you will need to\n specify `task_type` and `task_id` by setting these attributes.\n\n Usage example with tf.distribute.Strategy:\n\n ```Python\n # On worker 0\n cluster_resolver = KubernetesClusterResolver(\n {\"worker\": [\"job-name=worker-cluster-a\", \"job-name=worker-cluster-b\"]})\n cluster_resolver.task_type = \"worker\"\n cluster_resolver.task_id = 0\n strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(\n cluster_resolver=cluster_resolver)\n\n # On worker 1\n cluster_resolver = KubernetesClusterResolver(\n {\"worker\": [\"job-name=worker-cluster-a\", \"job-name=worker-cluster-b\"]})\n cluster_resolver.task_type = \"worker\"\n cluster_resolver.task_id = 1\n strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(\n cluster_resolver=cluster_resolver)\n ```\n \"\"\"\n\n def __init__(self,\n job_to_label_mapping=None,\n tf_server_port=8470,\n rpc_layer='grpc',\n override_client=None):\n \"\"\"Initializes a new KubernetesClusterResolver.\n\n This initializes a new Kubernetes ClusterResolver. The ClusterResolver\n will attempt to talk to the Kubernetes master to retrieve all the instances\n of pods matching a label selector.\n\n Args:\n job_to_label_mapping: A mapping of TensorFlow jobs to label selectors.\n This allows users to specify many TensorFlow jobs in one Cluster\n Resolver, and each job can have pods belong with different label\n selectors. For example, a sample mapping might be\n ```\n {'worker': ['job-name=worker-cluster-a', 'job-name=worker-cluster-b'],\n 'ps': ['job-name=ps-1', 'job-name=ps-2']}\n ```\n tf_server_port: The port the TensorFlow server is listening on.\n rpc_layer: (Optional) The RPC layer TensorFlow should use to communicate\n between tasks in Kubernetes. Defaults to 'grpc'.\n override_client: The Kubernetes client (usually automatically retrieved\n using `from kubernetes import client as k8sclient`). If you pass this\n in, you are responsible for setting Kubernetes credentials manually.\n\n Raises:\n ImportError: If the Kubernetes Python client is not installed and no\n `override_client` is passed in.\n RuntimeError: If autoresolve_task is not a boolean or a callable.\n \"\"\"\n try:\n from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top\n\n k8sconfig.load_kube_config()\n except ImportError:\n if not override_client:\n raise ImportError('The Kubernetes Python client must be installed '\n 'before using the Kubernetes Cluster Resolver. '\n 'To install the Kubernetes Python client, run '\n '`pip install kubernetes` on your command line.')\n\n if not job_to_label_mapping:\n job_to_label_mapping = {'worker': ['job-name=tensorflow']}\n\n self._job_to_label_mapping = job_to_label_mapping\n self._tf_server_port = tf_server_port\n self._override_client = override_client\n\n self.task_type = None\n self.task_id = None\n self.rpc_layer = rpc_layer\n\n def master(self, task_type=None, task_id=None, rpc_layer=None):\n \"\"\"Returns the master address to use when creating a session.\n\n You must have set the task_type and task_id object properties before\n calling this function, or pass in the `task_type` and `task_id`\n parameters when using this function. If you do both, the function parameters\n will override the object properties.\n\n Note: this is only useful for TensorFlow 1.x.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the master.\n task_id: (Optional) The index of the TensorFlow task of the master.\n rpc_layer: (Optional) The RPC protocol for the given cluster.\n\n Returns:\n The name or URL of the session master.\n \"\"\"\n task_type = task_type if task_type is not None else self.task_type\n task_id = task_id if task_id is not None else self.task_id\n\n if task_type is not None and task_id is not None:\n return format_master_url(\n self.cluster_spec().task_address(task_type, task_id),\n rpc_layer or self.rpc_layer)\n\n return ''\n\n def cluster_spec(self):\n \"\"\"Returns a ClusterSpec object based on the latest info from Kubernetes.\n\n We retrieve the information from the Kubernetes master every time this\n method is called.\n\n Returns:\n A ClusterSpec containing host information returned from Kubernetes.\n\n Raises:\n RuntimeError: If any of the pods returned by the master is not in the\n `Running` phase.\n \"\"\"\n if self._override_client:\n client = self._override_client\n else:\n from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top\n from kubernetes import client as k8sclient # pylint: disable=g-import-not-at-top\n\n k8sconfig.load_kube_config()\n client = k8sclient.CoreV1Api()\n\n cluster_map = {}\n\n for tf_job in self._job_to_label_mapping:\n all_pods = []\n for selector in self._job_to_label_mapping[tf_job]:\n ret = client.list_pod_for_all_namespaces(label_selector=selector)\n selected_pods = []\n\n # Sort the list by the name to make sure it doesn't change call to call.\n for pod in sorted(ret.items, key=lambda x: x.metadata.name):\n if pod.status.phase == 'Running':\n selected_pods.append(\n '%s:%s' % (pod.status.host_ip, self._tf_server_port))\n else:\n raise RuntimeError('Pod \"%s\" is not running; phase: \"%s\"' %\n (pod.metadata.name, pod.status.phase))\n all_pods.extend(selected_pods)\n cluster_map[tf_job] = all_pods\n\n return server_lib.ClusterSpec(cluster_map)\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Mid level API for TPU Embeddings.\"\"\"\n\nimport functools\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Text, Tuple, Union\n\nfrom absl import logging\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.distribute import tpu_strategy\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device as tf_device\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework.tensor_shape import TensorShape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.saved_model import save_context\nfrom tensorflow.python.tpu import tpu\nfrom tensorflow.python.tpu import tpu_embedding_v2_utils\nfrom tensorflow.python.tpu.ops import tpu_ops\nfrom tensorflow.python.training.saving import saveable_hook\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.types import core\nfrom tensorflow.python.types import internal as internal_types\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n_HOOK_KEY = \"TPUEmbedding_saveable\"\n_NAME_KEY = \"_tpu_embedding_layer\"\n\n\nclass TPUShardedVariable(sharded_variable.ShardedVariableMixin):\n \"\"\"A ShardedVariable class for TPU.\"\"\"\n\n @property\n def _in_graph_mode(self):\n return self.variables[0]._in_graph_mode # pylint: disable=protected-access\n\n\ndef _add_key_attr(op, name):\n op._set_attr(_NAME_KEY, attr_value_pb2.AttrValue(s=compat.as_bytes(name))) # pylint: disable=protected-access\n\n\n@tf_export(\"tpu.experimental.embedding.TPUEmbedding\")\nclass TPUEmbedding(tracking.AutoTrackable):\n \"\"\"The TPUEmbedding mid level API.\n\n NOTE: When instantiated under a TPUStrategy, this class can only be created\n once per call to `tf.tpu.experimental.initialize_tpu_system`. If you wish to\n re-initialize the embedding engine you must re-initialize the tpu as well.\n Doing this will clear any variables from TPU, so ensure you have checkpointed\n before you do this. If a further instances of the class are needed,\n set the `initialize_tpu_embedding` argument to `False`.\n\n This class can be used to support training large embeddings on TPU. When\n creating an instance of this class, you must specify the complete set of\n tables and features you expect to lookup in those tables. See the\n documentation of `tf.tpu.experimental.embedding.TableConfig` and\n `tf.tpu.experimental.embedding.FeatureConfig` for more details on the complete\n set of options. We will cover the basic usage here.\n\n NOTE: multiple `FeatureConfig` objects can use the same `TableConfig` object,\n allowing different features to share the same table:\n\n ```python\n table_config_one = tf.tpu.experimental.embedding.TableConfig(\n vocabulary_size=...,\n dim=...)\n table_config_two = tf.tpu.experimental.embedding.TableConfig(\n vocabulary_size=...,\n dim=...)\n feature_config = {\n 'feature_one': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_one),\n 'feature_two': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_one),\n 'feature_three': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_two)}\n ```\n\n There are two modes under which the `TPUEmbedding` class can used. This\n depends on if the class was created under a `TPUStrategy` scope or not.\n\n Under `TPUStrategy`, we allow access to the method `enqueue`, `dequeue` and\n `apply_gradients`. We will show examples below of how to use these to train\n and evaluate your model. Under CPU, we only access to the `embedding_tables`\n property which allow access to the embedding tables so that you can use them\n to run model evaluation/prediction on CPU.\n\n First lets look at the `TPUStrategy` mode. Initial setup looks like:\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=feature_config,\n optimizer=tf.tpu.experimental.embedding.SGD(0.1))\n ```\n\n When creating a distributed dataset that is to be passed to the enqueue\n operation a special input option must be specified:\n\n ```python\n distributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n ```\n\n Different feature inputs can have different shapes. For dense and sparse\n tensor, rank 2 and above is supported. For ragged tensor, although only rank 2\n is supported, you can specify the output shape to be rank 2 and above. The\n output shape specified in the FeatureConfig has the first priority. The input\n shape passed in build method has second priority and the input shapes\n auto detected from input feature has the lowest priority. The latter two will\n be converted to output shapes by omitting the last dimension. If the lower\n priority one has output shapes which don't match the former one. A ValueError\n will be raised. Only when the former one has undefined output shapes, the\n latter one can override.\n\n NOTE: All batches passed to the layer can have different input shapes. But\n these input shapes need to match with the output shapes set by either\n `FeatureConfig` or build method except for ragged tensor. Only 2D\n ragged tensor with output shape set to higher dimensions is allowed as\n long as the total number of elements matches. All subsequent calls must have\n the same input shapes. In the event that the input shapes cannot be\n automatically determined by the enqueue method, you must call\n the build method with the input shapes or provide output shapes in the\n `FeatureConfig` to initialize the layer.\n\n To use this API on TPU you should use a custom training loop. Below is an\n example of a training and evaluation step:\n\n ```python\n @tf.function\n def training_step(dataset_iterator, num_steps):\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n model_output = model(activations)\n loss = ... # some function of labels and model_output\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n # Insert your model gradient and optimizer application here\n\n for _ in tf.range(num_steps):\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features, ))\n\n @tf.function\n def evalution_step(dataset_iterator, num_steps):\n def tpu_step(tpu_features):\n activations = embedding.dequeue()\n model_output = model(activations)\n # Insert your evaluation code here.\n\n for _ in tf.range(num_steps):\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=False)\n strategy.run(tpu_step, args=(tpu_features, ))\n ```\n\n NOTE: The calls to `enqueue` have `training` set to `True` when\n `embedding.apply_gradients` is used and set to `False` when\n `embedding.apply_gradients` is not present in the function. If you don't\n follow this pattern you may cause an error to be raised or the tpu may\n deadlock.\n\n In the above examples, we assume that the user has a dataset which returns\n a tuple where the first element of the tuple matches the structure of what\n was passed as the `feature_config` argument to the object initializer. Also we\n utilize `tf.range` to get a `tf.while_loop` in order to increase performance.\n\n When checkpointing your model, you should include your\n `tf.tpu.experimental.embedding.TPUEmbedding` object in the checkpoint. It is a\n trackable object and saving it will save the embedding tables and their\n optimizer slot variables:\n\n ```python\n checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)\n checkpoint.save(...)\n ```\n\n On CPU, only the `embedding_table` property is usable. This will allow you to\n restore a checkpoint to the object and have access to the table variables:\n\n ```python\n model = model_fn(...)\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=feature_config,\n optimizer=tf.tpu.experimental.embedding.SGD(0.1))\n checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)\n checkpoint.restore(...)\n\n tables = embedding.embedding_tables\n ```\n\n You can now use table in functions like `tf.nn.embedding_lookup` to perform\n your embedding lookup and pass to your model.\n\n \"\"\"\n\n def __init__(\n self,\n feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic\n optimizer: Optional[tpu_embedding_v2_utils._Optimizer], # pylint:disable=protected-access\n pipeline_execution_with_tensor_core: bool = False):\n \"\"\"Creates the TPUEmbedding mid level API object.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=tf.tpu.experimental.embedding.FeatureConfig(\n table=tf.tpu.experimental.embedding.TableConfig(\n dim=...,\n vocabulary_size=...)))\n ```\n\n Args:\n feature_config: A nested structure of\n `tf.tpu.experimental.embedding.FeatureConfig` configs.\n optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`,\n `tf.tpu.experimental.embedding.Adagrad` or\n `tf.tpu.experimental.embedding.Adam`. When not created under\n TPUStrategy may be set to None to avoid the creation of the optimizer\n slot variables, useful for optimizing memory consumption when exporting\n the model for serving where slot variables aren't needed.\n pipeline_execution_with_tensor_core: If True, the TPU embedding\n computations will overlap with the TensorCore computations (and hence\n will be one step old). Set to True for improved performance.\n\n Raises:\n ValueError: If optimizer is not one of tf.tpu.experimental.embedding.(SGD,\n Adam or Adagrad) or None when created under a TPUStrategy.\n \"\"\"\n self._strategy = distribution_strategy_context.get_strategy()\n self._using_tpu = isinstance(self._strategy, (tpu_strategy.TPUStrategy,\n tpu_strategy.TPUStrategyV2))\n self._pipeline_execution_with_tensor_core = (\n pipeline_execution_with_tensor_core)\n\n self._feature_config = feature_config\n self._output_shapes = []\n for feature in nest.flatten(feature_config):\n self._output_shapes.append(feature.output_shape)\n\n # The TPU embedding ops are slightly inconsistent with how they refer to\n # tables:\n # * The enqueue op takes a parallel list of tensors for input, one of those\n # is the table id for the feature which matches the integer index of the\n # table in the proto created by _create_config_proto().\n # * The recv_tpu_embedding_activations op emits lookups per table in the\n # order from the config proto.\n # * The send_tpu_embedding_gradients expects input tensors to be per table\n # in the same order as the config proto.\n # * Per optimizer load and retrieve ops are specified per table and take the\n # table name rather than the table id.\n # Thus we must fix a common order to tables and ensure they have unique\n # names.\n\n # Set table order here to the order of the first occurence of the table in a\n # feature provided by the user. The order of this struct must be fixed\n # to provide the user with deterministic behavior over multiple\n # instantiations.\n self._table_config = []\n for feature in nest.flatten(feature_config):\n if feature.table not in self._table_config:\n self._table_config.append(feature.table)\n\n # Ensure tables have unique names. Also error check the optimizer as we\n # specifically don't do that in the TableConfig class to allow high level\n # APIs that are built on this to use strings/other classes to represent\n # optimizers (before they are passed to this class).\n table_names = []\n for i, table in enumerate(self._table_config):\n if table.optimizer is None:\n # TODO(bfontain) Should we allow some sort of optimizer merging here?\n table.optimizer = optimizer\n if ((table.optimizer is not None or self._using_tpu) and\n not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)): # pylint: disable=protected-access\n raise ValueError(\"{} is an unsupported optimizer class. Please pass an \"\n \"instance of one of the optimizer classes under \"\n \"tf.tpu.experimental.embedding.\".format(\n type(table.optimizer)))\n if table.name is None:\n table.name = \"table_{}\".format(i)\n if table.name in table_names:\n raise ValueError(\"Tables must have a unique name. \"\n f\"Multiple tables with name {table.name} found.\")\n table_names.append(table.name)\n\n if self._using_tpu:\n # Extract a list of callable learning rates also in fixed order. Each\n # table in the confix proto will get a index into this list and we will\n # pass this list in the same order after evaluation to the\n # send_tpu_embedding_gradients op.\n self._dynamic_learning_rates = list({\n table.optimizer.learning_rate for table in self._table_config if\n callable(table.optimizer.learning_rate)})\n\n # We need to list of host devices for the load/retrieve operations.\n self._hosts = get_list_of_hosts(self._strategy)\n\n self._built = False\n self._verify_output_shapes_on_enqueue = True\n\n def build(self, per_replica_input_shapes=None, per_replica_batch_size=None): # pylint:disable=g-bare-generic\n \"\"\"Create the underlying variables and initializes the TPU for embeddings.\n\n This method creates the underlying variables (including slot variables). If\n created under a TPUStrategy, this will also initialize the TPU for\n embeddings.\n\n This function will automatically get called by enqueue, which will try to\n determine your output shapes. If this fails, you must manually\n call this method before you call enqueue.\n\n Args:\n per_replica_input_shapes: A nested structure of The per replica input\n shapes that matches the structure of the feature config. The input\n shapes should be the same as the input shape of the feature (except for\n ragged tensor) Note that it is fixed and the same per replica input\n shapes must be used for both training and evaluation. If you want to\n calculate this from the global input shapes, you can use\n `num_replicas_in_sync` property of your strategy object. May be set to\n None if not created under a TPUStrategy.\n per_replica_batch_size: (Deprecated) The per replica batch size that you\n intend to use. Note that is fixed and the same batch size must be used\n for both training and evaluation. If you want to calculate this from the\n global batch size, you can use `num_replicas_in_sync` property of your\n strategy object. May be set to None if not created under a TPUStrategy.\n\n Raises:\n ValueError: If per_replica_input_shapes is inconsistent with the output\n shapes stored in the feature config or the output shapes get from the\n input shapes are not fully defined.\n RuntimeError: If tpu embedding is already initialized on TPU.\n \"\"\"\n if self._built:\n return\n\n if self._using_tpu:\n # If the tpu embedding is already initialized on TPU, raise runtime error.\n # Below logic is not added in `initialize_system_for_tpu_embedding`\n # because doing exception control flow in graph mode is difficult.\n if tpu_ops.is_tpu_embedding_initialized():\n raise RuntimeError(\n \"TPU is already initialized for embeddings. This may be caused by \"\n \"using multiple TPUEmbedding instances in a TPU scope which is \"\n \"unsupported\")\n self._get_and_update_output_shapes_from_input(per_replica_input_shapes,\n per_replica_batch_size)\n\n self._config_proto = self._create_config_proto()\n\n logging.info(\"Initializing TPU Embedding engine.\")\n tpu_embedding_v2_utils.log_tpu_embedding_configuration(self._config_proto)\n\n @def_function.function\n def load_config():\n tpu.initialize_system_for_tpu_embedding(self._config_proto)\n\n load_config()\n logging.info(\"Done initializing TPU Embedding engine.\")\n\n # Create and load variables and slot variables into the TPU.\n # Note that this is a dict of dicts. Keys to the first dict are table names.\n # We would prefer to use TableConfigs, but then these variables won't be\n # properly tracked by the tracking API.\n self._variables = self._create_variables_and_slots()\n\n self._built = True\n\n # This is internally conditioned self._built and self._using_tpu\n self._load_variables()\n\n def _maybe_build(self,\n output_shapes: Optional[Union[List[int], Iterable]] = None): # pylint:disable=g-bare-generic\n if not self._built:\n # This can be called while tracing a function, so we wrap the\n # initialization code with init_scope so it runs eagerly, this means that\n # it will not be included the function graph generated by tracing so that\n # we can be sure that we only initialize the TPU for embeddings exactly\n # once.\n with ops.init_scope():\n self.build(output_shapes)\n\n def _get_and_update_output_shapes_from_input(\n self,\n per_replica_input_shapes: Optional[List[TensorShape]] = None,\n per_replica_batch_size: Optional[int] = None):\n \"\"\"Get and update the per replica output shapes from the input.\"\"\"\n per_replica_output_shapes = None\n if per_replica_batch_size and per_replica_input_shapes is None:\n logging.warning(\n \"per_replica_batch_size argument will be deprecated, please specify \"\n \"all the input shapes using per_replica_input_shapes argument.\")\n per_replica_output_shapes = self._get_output_shapes_from_batch_size(\n per_replica_batch_size)\n\n # Update the input shapes if provided.\n if per_replica_input_shapes is not None:\n if isinstance(per_replica_input_shapes, int):\n logging.warning(\n \"Passing batch size to per_replica_input_shapes argument will be\"\n \" deprecated, please specify all the input shapes using\"\n \" per_replica_input_shapes argument.\")\n per_replica_output_shapes = self._get_output_shapes_from_batch_size(\n per_replica_input_shapes)\n else:\n nest.assert_same_structure(\n nest.flatten(per_replica_input_shapes),\n nest.flatten(self._feature_config))\n\n # Convert the nested structure to list.\n per_replica_input_shapes = nest.flatten(per_replica_input_shapes)\n\n per_replica_output_shapes = self._get_output_shapes_from_input_shapes(\n per_replica_input_shapes)\n\n if per_replica_output_shapes is not None:\n\n # Check the output shapes with existing output shapes setting.\n self._check_output_shapes(per_replica_output_shapes)\n\n # Update the output shapes with existing output shapes setting.\n # This is necessary Because the output shapes might be missing from\n # the feature config, the usr can set it:\n # 1. calling the build method\n # 2. output shapes auto detected when calling the dequeue method for\n # for the first time. The dequeue method will call build method\n # with the output shapes.\n # Either these two situations will lead to an update to the existing\n # output shapes.\n self._update_output_shapes(per_replica_output_shapes)\n\n # Check if the output shapes are fully defined. This is required in order\n # to set them in the feature descriptor field of the tpu embedding config\n # proto.\n self._check_output_shapes_fully_defined()\n\n def _get_output_shapes_from_input_shapes(\n self, input_shapes: List[TensorShape]) -> List[TensorShape]:\n \"\"\"Get output shapes from the flattened input shapes list.\"\"\"\n output_shapes = []\n for input_shape, feature in zip(input_shapes,\n nest.flatten(self._feature_config)):\n if input_shape.rank is None or input_shape.rank < 1:\n raise ValueError(\n \"Received input tensor of shape {}. Rank must be 1 and above\"\n .format(input_shape))\n # Update the input shape with the max sequence length. Only update when\n # 1. Input feature is 2D ragged or sparse tensor.\n # 2. Output shape is not set in the feature config and the max sequence\n # length is set.\n if (len(input_shape) == 2 and input_shape[-1] != 1 and\n not feature.output_shape and feature.max_sequence_length > 0):\n input_shape_list = input_shape.as_list()\n input_shape_list.insert(\n len(input_shape_list) - 1, feature.max_sequence_length)\n input_shape = TensorShape(input_shape_list)\n if input_shape.rank == 1:\n output_shapes.append(input_shape)\n else:\n output_shapes.append(input_shape[:-1])\n return output_shapes\n\n @property\n def embedding_tables(\n self\n ) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:\n \"\"\"Returns a dict of embedding tables, keyed by `TableConfig`.\n\n This property only works when the `TPUEmbedding` object is created under a\n non-TPU strategy. This is intended to be used to for CPU based lookup when\n creating a serving checkpoint.\n\n Returns:\n A dict of embedding tables, keyed by `TableConfig`.\n\n Raises:\n RuntimeError: If object was created under a `TPUStrategy`.\n \"\"\"\n # We don't support returning tables on TPU due to their sharded nature and\n # the fact that when using a TPUStrategy:\n # 1. Variables are stale and are only updated when a checkpoint is made.\n # 2. Updating the variables won't affect the actual tables on the TPU.\n if self._using_tpu:\n if save_context.in_save_context():\n return {table: self._variables[table.name][\"parameters\"].variables[0]\n for table in self._table_config}\n raise RuntimeError(\"Unable to retrieve embedding tables when using a TPU \"\n \"strategy. If you need access, save your model, \"\n \"create this object under a CPU strategy and restore.\")\n\n self._maybe_build(None)\n\n # Only return the tables and not the slot variables. On CPU this are honest\n # tf.Variables.\n return {table: self._variables[table.name][\"parameters\"]\n for table in self._table_config}\n\n def _create_config_proto(\n self\n ) -> tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration:\n \"\"\"Creates the TPUEmbeddingConfiguration proto.\n\n This proto is used to initialize the TPU embedding engine.\n\n Returns:\n A TPUEmbeddingConfiguration proto.\n \"\"\"\n\n config_proto = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration()\n\n # Map each callable dynamic learning rate to its in index in the list.\n # The learning rate index is the index of the dynamic learning rate for this\n # table (if it exists) in the list we created at initialization. We don't\n # simply create one learning rate index per table as this has extremely bad\n # performance characteristics. The more separate optimization configurations\n # we have, the worse the performance will be.\n learning_rate_index = {r: i for i, r in enumerate(\n self._dynamic_learning_rates)}\n\n for table in self._table_config:\n table_descriptor = config_proto.table_descriptor.add()\n table_descriptor.name = table.name\n\n # For small tables, we pad to the number of hosts so that at least one\n # id will be assigned to each host.\n table_descriptor.vocabulary_size = max(table.vocabulary_size,\n self._strategy.extended.num_hosts)\n table_descriptor.dimension = table.dim\n\n parameters = table_descriptor.optimization_parameters\n\n # We handle the learning rate separately here and don't allow the\n # optimization class to handle this, as it doesn't know about dynamic\n # rates.\n if callable(table.optimizer.learning_rate):\n parameters.learning_rate.dynamic.tag = (\n learning_rate_index[table.optimizer.learning_rate])\n else:\n parameters.learning_rate.constant = table.optimizer.learning_rate\n\n # Use optimizer to handle the rest of the parameters.\n table.optimizer._set_optimization_parameters(parameters) # pylint: disable=protected-access\n\n table_to_id = {table: i for i, table in enumerate(self._table_config)}\n\n # Set feature descriptor field in the config proto.\n for feature, output_shape in zip(\n nest.flatten(self._feature_config), self._output_shapes):\n feature_descriptor = config_proto.feature_descriptor.add()\n\n if feature.name:\n feature_descriptor.name = feature.name\n\n feature_descriptor.table_id = table_to_id[feature.table]\n # The input shape of the feature is the actual shape of the input tensor\n # except the last dimension because the last dimension will always be\n # reduced.\n feature_descriptor.input_shape.extend(output_shape.as_list())\n\n # Always set mode to training, we override the mode during enqueue.\n config_proto.mode = (\n tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TRAINING)\n\n config_proto.num_hosts = self._strategy.extended.num_hosts\n config_proto.num_tensor_cores = self._strategy.num_replicas_in_sync\n\n # TODO(bfontain): Allow users to pick MOD for the host sharding.\n config_proto.sharding_strategy = (\n tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.DIV_DEFAULT)\n config_proto.pipeline_execution_with_tensor_core = (\n self._pipeline_execution_with_tensor_core)\n\n return config_proto\n\n def apply_gradients(self, gradients, name: Optional[Text] = None):\n \"\"\"Applies the gradient update to the embedding tables.\n\n If a gradient of `None` is passed in any position of the nested structure,\n then an gradient update with a zero gradient is applied for that feature.\n For optimizers like SGD or Adagrad, this is the same as applying no update\n at all. For lazy Adam and other sparsely applied optimizers with decay,\n ensure you understand the effect of applying a zero gradient.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features, ))\n\n training_step()\n ```\n\n Args:\n gradients: A nested structure of gradients, with structure matching the\n `feature_config` passed to this object.\n name: A name for the underlying op.\n\n Raises:\n RuntimeError: If called when object wasn't created under a `TPUStrategy`\n or if not built (either by manually calling build or calling enqueue).\n ValueError: If a non-`tf.Tensor` non-`None` gradient is passed in, or a\n `tf.Tensor` of the incorrect shape is passed in. Also if\n the size of any sequence in `gradients` does not match corresponding\n sequence in `feature_config`.\n TypeError: If the type of any sequence in `gradients` does not match\n corresponding sequence in `feature_config`.\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"apply_gradients is not valid when TPUEmbedding \"\n \"object is not created under a TPUStrategy.\")\n\n if not self._built:\n raise RuntimeError(\"apply_gradients called on unbuilt TPUEmbedding \"\n \"object. Please either call enqueue first or manually \"\n \"call the build method.\")\n\n nest.assert_same_structure(self._feature_config, gradients)\n updated_gradients = []\n for (path, gradient), feature, output_shape in zip(\n nest.flatten_with_joined_string_paths(gradients),\n nest.flatten(self._feature_config), self._output_shapes):\n full_output_shape = list(output_shape) + [feature.table.dim]\n if gradient is not None and not isinstance(gradient, ops.Tensor):\n raise ValueError(\n f\"found non-tensor type: {type(gradient)} at path {path}.\")\n if gradient is not None:\n if gradient.shape != full_output_shape:\n raise ValueError(\"Found gradient of shape {} at path {}. Expected \"\n \"shape {}.\".format(gradient.shape, path,\n full_output_shape))\n else:\n # No gradient for this feature, since we must give a gradient for all\n # features, pass in a zero tensor here. Note that this is not correct\n # for all optimizers.\n logging.warning(\n \"No gradient passed for feature %s, sending zero \"\n \"gradient. This may not be correct behavior for certain \"\n \"optimizers like Adam.\", path)\n gradient = array_ops.zeros(full_output_shape, dtype=dtypes.float32)\n # Some gradients can be passed with op which shape is not correctly set.\n # This ensures that the shape of the gradient is correctly set.\n updated_gradients.append(\n array_ops.reshape(gradient, shape=gradient.shape))\n op = tpu_ops.send_tpu_embedding_gradients(\n inputs=updated_gradients,\n learning_rates=[\n math_ops.cast(fn(), dtype=dtypes.float32)\n for fn in self._dynamic_learning_rates\n ],\n config=self._config_proto.SerializeToString())\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(op, name)\n\n def dequeue(self, name: Optional[Text] = None):\n \"\"\"Get the embedding results.\n\n Returns a nested structure of `tf.Tensor` objects, matching the structure of\n the `feature_config` argument to the `TPUEmbedding` class. The output shape\n of the tensors is `(*output_shape, dim)`, `dim` is the dimension of the\n corresponding `TableConfig`. For output_shape, there are three places where\n it can be set.\n 1. FeatureConfig provided in the __init__ function.\n 2. Per_replica_output_shapes by directly calling the build method\n after initializing the tpu embedding class.\n 3. Auto detected from the shapes of the input feature.\n The priority of these places is the exact same order.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features, ))\n\n training_step()\n ```\n\n Args:\n name: A name for the underlying op.\n\n Returns:\n A nested structure of tensors, with the same structure as `feature_config`\n passed to this instance of the `TPUEmbedding` object.\n\n Raises:\n RuntimeError: If called when object wasn't created under a `TPUStrategy`\n or if not built (either by manually calling build or calling enqueue).\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"dequeue is not valid when TPUEmbedding object is not \"\n \"created under a TPUStrategy.\")\n\n if not self._built:\n raise RuntimeError(\"dequeue called on unbuilt TPUEmbedding object. \"\n \"Please either call enqueue first or manually call \"\n \"the build method.\")\n\n # The activations returned by this op are per feature.\n activations = tpu_ops.recv_tpu_embedding_activations(\n num_outputs=len(self._config_proto.feature_descriptor),\n config=self._config_proto.SerializeToString())\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(activations[0].op, name)\n\n # Pack the list back into the same nested structure as the features.\n return nest.pack_sequence_as(self._feature_config, activations)\n\n def _create_variables_and_slots(\n self\n ) -> Dict[Text, Dict[Text, tf_variables.Variable]]:\n \"\"\"Create variables for TPU embeddings.\n\n Note under TPUStrategy this will ensure that all creations happen within a\n variable creation scope of the sharded variable creator.\n\n Returns:\n A dict of dicts. The outer dict is keyed by the table names and the inner\n dicts are keyed by 'parameters' and the slot variable names.\n \"\"\"\n\n def create_variables(table):\n \"\"\"Create all variables.\"\"\"\n variable_shape = (table.vocabulary_size, table.dim)\n\n def getter(name, shape, dtype, initializer, trainable):\n del shape\n # _add_variable_with_custom_getter clears the shape sometimes, so we\n # take the global shape from outside the getter.\n initial_value = functools.partial(initializer, variable_shape,\n dtype=dtype)\n return tf_variables.Variable(\n name=name,\n initial_value=initial_value,\n shape=variable_shape,\n dtype=dtype,\n trainable=trainable)\n\n def variable_creator(name, initializer, trainable=True):\n # use add_variable_with_custom_getter here so that we take advantage of\n # the checkpoint loading to allow restore before the variables get\n # created which avoids double initialization.\n return self._add_variable_with_custom_getter(\n name=name,\n initializer=initializer,\n shape=variable_shape,\n dtype=dtypes.float32,\n getter=getter,\n trainable=trainable)\n\n parameters = variable_creator(table.name, table.initializer,\n trainable=not self._using_tpu)\n\n def slot_creator(name, initializer):\n return variable_creator(table.name + \"/\" + name,\n initializer,\n False)\n\n if table.optimizer is not None:\n slot_vars = table.optimizer._create_slots(parameters, slot_creator) # pylint: disable=protected-access\n else:\n slot_vars = {}\n slot_vars[\"parameters\"] = parameters\n return slot_vars\n\n # Store tables based on name rather than TableConfig as we can't track\n # through dicts with non-string keys, i.e. we won't be able to save.\n variables = {}\n for table in self._table_config:\n if not self._using_tpu:\n variables[table.name] = create_variables(table)\n else:\n with variable_scope.variable_creator_scope(\n make_sharded_variable_creator(self._hosts)):\n variables[table.name] = create_variables(table)\n\n return variables\n\n def _load_variables(self):\n # Only load the variables if we are:\n # 1) Using TPU\n # 2) Variables are created\n # 3) Not in save context (except if running eagerly)\n if self._using_tpu and self._built and not (\n not context.executing_eagerly() and save_context.in_save_context()):\n _load_variables_impl(self._config_proto.SerializeToString(),\n self._hosts,\n self._variables,\n self._table_config)\n\n def _retrieve_variables(self):\n # Only retrieve the variables if we are:\n # 1) Using TPU\n # 2) Variables are created\n # 3) Not in save context (except if running eagerly)\n if self._using_tpu and self._built and not (\n not context.executing_eagerly() and save_context.in_save_context()):\n _retrieve_variables_impl(self._config_proto.SerializeToString(),\n self._hosts,\n self._variables,\n self._table_config)\n\n def _gather_saveables_for_checkpoint(\n self\n ) -> Dict[Text, Callable[[Text], \"TPUEmbeddingSaveable\"]]:\n \"\"\"Overrides default Trackable implementation to add load/retrieve hook.\"\"\"\n # This saveable should be here in both TPU and CPU checkpoints, so when on\n # CPU, we add the hook with no functions.\n # TODO(bfontain): Update restore logic in saver so that these hooks are\n # always executed. Once that is done, we can output an empty list when on\n # CPU.\n\n def factory(name=_HOOK_KEY):\n return TPUEmbeddingSaveable(name, self._load_variables,\n self._retrieve_variables)\n return {_HOOK_KEY: factory}\n\n # Some helper functions for the below enqueue function.\n def _add_data_for_tensor(self, tensor, weight, indices, values, weights,\n int_zeros, float_zeros, path):\n if weight is not None:\n raise ValueError(\n \"Weight specified for dense input {}, which is not allowed. \"\n \"Weight will always be 1 in this case.\".format(path))\n # For tensors, there are no indices and no weights.\n indices.append(int_zeros)\n values.append(math_ops.cast(array_ops.reshape(tensor, [-1]), dtypes.int64))\n weights.append(float_zeros)\n\n def _add_data_for_sparse_tensor(self, tensor, weight, indices, values,\n weights, int_zeros, float_zeros, path,\n feature):\n sample_indices = math_ops.cast(tensor.indices, dtypes.int32)\n if tensor.shape.rank == 2:\n if not feature.output_shape and feature.max_sequence_length > 0:\n # Add one dimension to the last axis.\n sample_indices = array_ops.pad(\n sample_indices, paddings=[[0, 0], [0, 1]])\n indices.append(sample_indices)\n values.append(math_ops.cast(tensor.values, dtypes.int64))\n # If we have weights they must be a SparseTensor.\n if weight is not None:\n if not isinstance(weight, sparse_tensor.SparseTensor):\n raise ValueError(\"Weight for {} is type {} which does not match \"\n \"type input which is SparseTensor.\".format(\n path, type(weight)))\n weights.append(math_ops.cast(weight.values, dtypes.float32))\n else:\n weights.append(float_zeros)\n\n def _add_data_for_ragged_tensor(self, tensor, weight, row_splits, values,\n weights, int_zeros, float_zeros, path,\n feature):\n row_splits.append(math_ops.cast(tensor.row_splits, dtypes.int32))\n values.append(math_ops.cast(tensor.values, dtypes.int64))\n # If we have weights they must be a RaggedTensor.\n if weight is not None:\n if not isinstance(weight, ragged_tensor.RaggedTensor):\n raise ValueError(\"Weight for {} is type {} which does not match \"\n \"type input which is RaggedTensor.\".format(\n path, type(weight)))\n weights.append(math_ops.cast(weight.values, dtypes.float32))\n else:\n weights.append(float_zeros)\n\n def _generate_enqueue_op(\n self,\n flat_inputs: List[internal_types.NativeObject],\n flat_weights: List[Optional[internal_types.NativeObject]],\n flat_features: List[tpu_embedding_v2_utils.FeatureConfig],\n device_ordinal: int,\n mode_override: Text\n ) -> ops.Operation:\n \"\"\"Outputs a the enqueue op given the inputs and weights.\n\n Args:\n flat_inputs: A list of input tensors.\n flat_weights: A list of input weights (or None) of the same length as\n flat_inputs.\n flat_features: A list of FeatureConfigs of the same length as flat_inputs.\n device_ordinal: The device to create the enqueue op for.\n mode_override: A tensor containing the string \"train\" or \"inference\".\n\n Returns:\n The enqueue op.\n \"\"\"\n # Combiners are per table, list in the same order as the table order.\n combiners = [table.combiner for table in self._table_config]\n\n # These parallel arrays will be the inputs to the enqueue op.\n # sample_indices for sparse, row_splits for ragged.\n indices_or_row_splits = []\n values = []\n weights = []\n\n # We have to supply a empty/zero tensor in a list position where we don't\n # have data (e.g. indices for standard Tensor input, weight when no weight\n # is specified). We create one op here per call, so that we reduce the\n # graph size.\n int_zeros = array_ops.zeros((0,), dtype=dtypes.int32)\n float_zeros = array_ops.zeros((0,), dtype=dtypes.float32)\n\n # In the following loop we insert casts so that everything is either int32\n # or float32. This is because op inputs which are lists of tensors must be\n # of the same type within the list. Moreover the CPU implementations of\n # these ops cast to these types anyway, so we don't lose any data by casting\n # early.\n for inp, weight, (path, feature) in zip(\n flat_inputs, flat_weights, flat_features):\n if isinstance(inp, ops.Tensor):\n self._add_data_for_tensor(inp, weight, indices_or_row_splits, values,\n weights, int_zeros, float_zeros, path)\n elif isinstance(inp, sparse_tensor.SparseTensor):\n self._add_data_for_sparse_tensor(inp, weight, indices_or_row_splits,\n values, weights, int_zeros,\n float_zeros, path, feature)\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n self._add_data_for_ragged_tensor(inp, weight, indices_or_row_splits,\n values, weights, int_zeros,\n float_zeros, path, feature)\n else:\n raise ValueError(\"Input {} is of unknown type {}. Please only pass \"\n \"Tensor, SparseTensor or RaggedTensor as input to \"\n \"enqueue.\".format(path, type(inp)))\n\n return tpu_ops.enqueue_tpu_embedding_arbitrary_tensor_batch(\n sample_indices_or_row_splits=indices_or_row_splits,\n embedding_indices=values,\n aggregation_weights=weights,\n mode_override=mode_override,\n device_ordinal=device_ordinal,\n combiners=combiners)\n\n def _raise_error_for_incorrect_control_flow_context(self):\n \"\"\"Raises an error if we are not in the TPUReplicateContext.\"\"\"\n # Do not allow any XLA control flow (i.e. control flow in between a\n # TPUStrategy's run call and the call to this function), as we can't\n # extract the enqueue from the head when in XLA control flow.\n graph = ops.get_default_graph()\n in_tpu_ctx = False\n while graph is not None:\n ctx = graph._get_control_flow_context() # pylint: disable=protected-access\n while ctx is not None:\n if isinstance(ctx, tpu.TPUReplicateContext):\n in_tpu_ctx = True\n break\n ctx = ctx.outer_context\n if in_tpu_ctx:\n break\n graph = getattr(graph, \"outer_graph\", None)\n if graph != ops.get_default_graph() and in_tpu_ctx:\n raise RuntimeError(\n \"Current graph {} does not match graph which contains \"\n \"TPUReplicateContext {}. This is most likely due to the fact that \"\n \"enqueueing embedding data is called inside control flow or a \"\n \"nested function inside `strategy.run`. This is not supported \"\n \"because outside compilation fails to extract the enqueue ops as \"\n \"head of computation.\".format(ops.get_default_graph(), graph))\n return in_tpu_ctx\n\n def _raise_error_for_non_direct_inputs(self, features):\n \"\"\"Checks all tensors in features to see if they are a direct input.\"\"\"\n\n # expand_composites here is important: as composite tensors pass through\n # tpu.replicate, they get 'flattened' into their component tensors and then\n # repacked before being passed to the tpu function. In means that it is the\n # component tensors which are produced by an op with the\n # \"_tpu_input_identity\" attribute.\n for path, input_tensor in nest.flatten_with_joined_string_paths(\n features, expand_composites=True):\n if input_tensor.op.type == \"Placeholder\":\n continue\n try:\n is_input = input_tensor.op.get_attr(\"_tpu_input_identity\")\n except ValueError:\n is_input = False\n if not is_input:\n raise ValueError(\n \"Received input tensor {} which is the output of op {} (type {}) \"\n \"which does not have the `_tpu_input_identity` attr. Please \"\n \"ensure that the inputs to this layer are taken directly from \"\n \"the arguments of the function called by \"\n \"strategy.run. Two possible causes are: dynamic batch size \"\n \"support or you are using a keras layer and are not passing \"\n \"tensors which match the dtype of the `tf.keras.Input`s.\"\n \"If you are triggering dynamic batch size support, you can \"\n \"disable it by passing tf.distribute.RunOptions(\"\n \"experimental_enable_dynamic_batch_size=False) to the options \"\n \"argument of strategy.run().\".format(path,\n input_tensor.op.name,\n input_tensor.op.type))\n\n def _raise_error_for_inputs_not_on_cpu(self, flat_inputs, flat_paths):\n \"\"\"Checks all tensors in features to see are placed on the CPU.\"\"\"\n\n def check_device(path, device_string):\n spec = tf_device.DeviceSpec.from_string(device_string)\n if spec.device_type == \"TPU\":\n raise ValueError(\n \"Received input tensor {} which is on a TPU input device {}. Input \"\n \"tensors for TPU embeddings must be placed on the CPU. Please \"\n \"ensure that your dataset is prefetching tensors to the host by \"\n \"setting the 'experimental_fetch_to_device' option of the \"\n \"dataset distribution function. See the documentation of the \"\n \"enqueue method for an example.\".format(path, device_string))\n\n # expand_composites here is important, we need to check the device of each\n # underlying tensor.\n for input_tensor, input_path in zip(flat_inputs, flat_paths):\n if nest.is_nested_or_composite(input_tensor):\n input_tensors = nest.flatten(input_tensor, expand_composites=True)\n else:\n input_tensors = [input_tensor]\n for t in input_tensors:\n if (t.op.type == \"Identity\" and\n t.op.inputs[0].op.type == \"TPUReplicatedInput\"):\n for tensor in t.op.inputs[0].op.inputs:\n check_device(input_path, tensor.device)\n else:\n check_device(input_path, t.device)\n\n def enqueue(\n self,\n features,\n weights=None,\n training: bool = True,\n name: Optional[Text] = None,\n device: Optional[Text] = None):\n \"\"\"Enqueues id tensors for embedding lookup.\n\n This function enqueues a structure of features to be looked up in the\n embedding tables. We expect that the input shapes of each of the tensors in\n features matches the output shapes set via FeatureConfig or build method\n (if any). the output shapes will be auto detected based on the input shapes\n with the max_sequence_length or output shape setting in the FeatureConfig.\n Note that the output shapes is based on per replica batch size.\n If your input dataset is batched to the global batch size and you use\n `tf.distribute.TPUStrategy`'s `experimental_distribute_dataset`\n or if you use `distribute_datasets_from_function` and batch\n to the per core batch size computed by the context passed to your input\n function, the output shapes should match automatically.\n\n The auto detected the output shapes:\n 1. For dense tensor, if rank 2 or above, make sure the tensor has last\n dimension as 1. The output shape will be the input shape excluding\n the last dimension.\n 2. For sparse tensor, make sure the tensor has rank 2 and above.\n a. If feature config has max_sequence_length equals 0 or output shape\n set (the max_sequence_length setting will be ignored), the\n output shape will be the input shape excluding the last dimension.\n b. Otherwize if the tensor is rank 2, the output shape will be input\n shape with last dimension set as max_sequence_length. If the\n tensor is above rank 2, the output shape will be the input shape\n excluding the last dimension and the last dimension of the output\n shape will be set to max_sequence_length.\n 3. For ragged tensor, make sure the tensor has rank 2.\n a. If feature config has max_sequence_length equals 0 or output shape\n set (the max_sequence_length setting will be ignored), the\n output shape will be the input shape excluding the last dimension.\n b. Otherwise, the output shape will be the input shape excluding the\n last dimension and the last dimension of the output shape will be\n set to max_sequence_length.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features,))\n\n training_step()\n ```\n\n NOTE: You should specify `training=True` when using\n `embedding.apply_gradients` as above and `training=False` when not using\n `embedding.apply_gradients` (e.g. for frozen embeddings or when doing\n evaluation).\n\n For finer grained control, in the above example the line\n\n ```\n embedding.enqueue(embedding_features, training=True)\n ```\n\n may be replaced with\n\n ```\n per_core_embedding_features = self.strategy.experimental_local_results(\n embedding_features)\n\n def per_core_enqueue(ctx):\n core_id = ctx.replica_id_in_sync_group\n device = strategy.extended.worker_devices[core_id]\n embedding.enqueue(per_core_embedding_features[core_id],\n device=device)\n\n strategy.experimental_distribute_values_from_function(\n per_core_queue_inputs)\n ```\n\n Args:\n features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or\n `tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs\n will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`\n or `tf.RaggedTensor` is supported per call.\n weights: If not `None`, a nested structure of `tf.Tensor`s,\n `tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except\n that the tensors should be of float type (and they will be downcast to\n `tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the\n same for the parallel entries from `features` and similarly for\n `tf.RaggedTensor`s we assume the row_splits are the same.\n training: Defaults to `True`. If `False`, enqueue the batch as inference\n batch (forward pass only). Do not call `apply_gradients` when this is\n `False` as this may lead to a deadlock.\n name: A name for the underlying op.\n device: The device name (e.g. '/task:0/device:TPU:2') where this batch\n should be enqueued. This should be set if and only if features is not a\n `tf.distribute.DistributedValues` and enqueue is not being called\n inside a TPU context (e.g. inside `TPUStrategy.run`).\n\n Raises:\n ValueError: When called inside a strategy.run call and input is not\n directly taken from the args of the `strategy.run` call. Also if\n the size of any sequence in `features` does not match corresponding\n sequence in `feature_config`. Similarly for `weights`, if not `None`.\n If input shapes of features is unequal or different from a previous\n call.\n RuntimeError: When called inside a strategy.run call and inside XLA\n control flow. If batch_size is not able to be determined and build was\n not called.\n TypeError: If the type of any sequence in `features` does not match\n corresponding sequence in `feature_config`. Similarly for `weights`, if\n not `None`.\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"enqueue is not valid when TPUEmbedding object is not \"\n \"created under a TPUStrategy.\")\n\n in_tpu_context = self._raise_error_for_incorrect_control_flow_context()\n\n nest.assert_same_structure(self._feature_config, features)\n\n if not self._verify_output_shapes_on_enqueue:\n if not self._output_shapes or not self._built:\n raise ValueError(\n \"Configured not to check output shapes on each enqueue() call; please \"\n \"ensure build() was called with output shapes to initialize \"\n \"the TPU for embeddings.\")\n else:\n input_shapes = self._get_input_shapes(features, in_tpu_context)\n\n self._maybe_build(input_shapes)\n # If is already built, we still need to check if the output shapes matches\n # with the previous ones.\n self._check_output_shapes(\n self._get_output_shapes_from_input_shapes(input_shapes))\n\n flat_inputs = nest.flatten(features)\n flat_weights = [None] * len(flat_inputs)\n if weights is not None:\n nest.assert_same_structure(self._feature_config, weights)\n flat_weights = nest.flatten(weights)\n flat_features = nest.flatten_with_joined_string_paths(self._feature_config)\n flat_paths, _ = zip(*flat_features)\n\n self._raise_error_for_inputs_not_on_cpu(flat_inputs, flat_paths)\n # If we are in a tpu_context, automatically apply outside compilation.\n if in_tpu_context:\n self._raise_error_for_non_direct_inputs(features)\n\n def generate_enqueue_ops():\n \"\"\"Generate enqueue ops for outside compilation.\"\"\"\n # Note that we put array_ops.where_v2 rather than a python if so that\n # the op is explicitly create and the constant ops are both in the graph\n # even though we don't expect training to be a tensor (and thus generate\n # control flow automatically). This need to make it easier to re-write\n # the graph later if we need to fix which mode needs to be used.\n mode_override = array_ops.where_v2(training,\n constant_op.constant(\"train\"),\n constant_op.constant(\"inference\"))\n # Device ordinal is -1 here, a later rewrite will fix this once the op\n # is expanded by outside compilation.\n enqueue_op = self._generate_enqueue_op(\n flat_inputs, flat_weights, flat_features, device_ordinal=-1,\n mode_override=mode_override)\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(enqueue_op, name)\n\n # Ensure that this op has outbound control flow, otherwise it won't be\n # executed.\n ops.get_default_graph().control_outputs.append(enqueue_op)\n\n tpu.outside_compilation(generate_enqueue_ops)\n\n elif device is None:\n mode_override = \"train\" if training else \"inference\"\n # We generate enqueue ops per device, so we need to gather the all\n # features for a single device in to a dict.\n # We rely here on the fact that the devices in the PerReplica value occur\n # in the same (standard) order as self._strategy.extended.worker_devices.\n enqueue_ops = []\n for replica_id in range(self._strategy.num_replicas_in_sync):\n replica_inputs = distribute_utils.select_replica(replica_id,\n flat_inputs)\n replica_weights = distribute_utils.select_replica(replica_id,\n flat_weights)\n tpu_device = self._strategy.extended.worker_devices[replica_id]\n # TPU devices string are like /job:worker/replica:0/task:0/device:TPU:0\n # the device ordinal is the last number\n device_ordinal = (\n tf_device.DeviceSpec.from_string(tpu_device).device_index)\n\n with ops.device(device_util.get_host_for_device(tpu_device)):\n enqueue_op = self._generate_enqueue_op(\n replica_inputs, replica_weights, flat_features,\n device_ordinal=device_ordinal, mode_override=mode_override)\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(enqueue_op, name)\n enqueue_ops.append(enqueue_op)\n ops.get_default_graph().control_outputs.extend(enqueue_ops)\n else:\n mode_override = \"train\" if training else \"inference\"\n device_spec = tf_device.DeviceSpec.from_string(device)\n if device_spec.device_type != \"TPU\":\n raise ValueError(\n \"Non-TPU device {} passed to enqueue.\".format(device))\n\n with ops.device(device_util.get_host_for_device(device)):\n enqueue_op = self._generate_enqueue_op(\n flat_inputs, flat_weights, flat_features,\n device_ordinal=device_spec.device_index,\n mode_override=mode_override)\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(enqueue_op, name)\n ops.get_default_graph().control_outputs.append(enqueue_op)\n\n def _get_input_shapes(self, tensors,\n in_tpu_context: bool) -> List[TensorShape]:\n \"\"\"Get the input shapes from the input tensor.\"\"\"\n input_shapes = []\n for (path, maybe_tensor), feature in zip(\n nest.flatten_with_joined_string_paths(tensors),\n nest.flatten(self._feature_config)):\n if not in_tpu_context:\n tensor = distribute_utils.select_replica(0, maybe_tensor)\n else:\n tensor = maybe_tensor\n\n if isinstance(tensor, ops.Tensor):\n input_shapes.append(\n self._get_input_shape_for_tensor(tensor, feature, path))\n elif isinstance(tensor, sparse_tensor.SparseTensor):\n input_shapes.append(\n self._get_input_shape_for_sparse_tensor(tensor, feature, path))\n elif isinstance(tensor, ragged_tensor.RaggedTensor):\n input_shapes.append(\n self._get_input_shape_for_ragged_tensor(tensor, feature, path))\n return input_shapes\n\n def _get_input_shape_for_tensor(self, tensor, feature, path) -> TensorShape:\n \"\"\"Get the input shape for the dense tensor.\"\"\"\n shape = tensor.shape.as_list()\n if len(shape) < 1:\n raise ValueError(\"Only rank 1 and above dense tensor is supported,\"\n \" find rank {} sparse tensor for input {}\".format(\n len(shape), path))\n if len(shape) > 1 and shape[-1] != 1:\n raise ValueError(\n \"Rank 2 or above dense tensor should have last dimension as 1 \"\n \"as the last dimension will always be reduced. \"\n \"Instead got dense tensor as shape {}\".format(shape))\n return TensorShape(shape)\n\n def _get_input_shape_for_sparse_tensor(self, tensor, feature,\n path) -> TensorShape:\n \"\"\"Get the input shape for the sparse tensor.\"\"\"\n shape = tensor.shape.as_list()\n # Only 2 and above rank sparse tensor is supported.\n if len(shape) < 2:\n raise ValueError(\"Only rank 2 and above sparse tensor is supported,\"\n \" find rank {} sparse tensor for input {}\".format(\n len(shape), path))\n if not feature.output_shape and feature.max_sequence_length > 0:\n # If the max_sequence_length is set and the output shape for FeatureConfig\n # is not set, we modify the shape of the input feature. Only rank 2\n # feature output shape is modified\n if len(shape) == 2:\n # If the sparse tensor is 2D and max_sequence_length is set,\n # we need to add one dimension to the input feature.\n shape.insert(len(shape) - 1, feature.max_sequence_length)\n\n return TensorShape(shape)\n\n def _get_input_shape_for_ragged_tensor(self, tensor, feature,\n path) -> TensorShape:\n \"\"\"Get the input shape for the ragged tensor.\"\"\"\n shape = tensor.shape.as_list()\n # Only rank 2 ragged tensor is supported.\n if len(shape) != 2:\n raise ValueError(\"Only rank 2 ragged tensor is supported,\"\n \" find rank {} ragged tensor for input {}\".format(\n len(shape), path))\n if not feature.output_shape and feature.max_sequence_length > 0:\n # If the max_sequence_length is set and the output shape for FeatureConfig\n # is not set, add the sequence length as second last dimension of\n # the ragged tensor.\n shape.insert(len(shape) - 1, feature.max_sequence_length)\n\n return TensorShape(shape)\n\n def _update_output_shapes(self, incoming_output_shapes: List[TensorShape]):\n \"\"\"Update the existing output shapes based on the new output shapes.\n\n The existing output shapes always have higher piority than the new incoming\n output shapes.\n Args:\n incoming_output_shapes: nested structure of TensorShape to override the\n existing output shapes.\n \"\"\"\n nest.assert_same_structure(self._output_shapes, incoming_output_shapes)\n updated_output_shapes = []\n for old_output_shape, incoming_output_shape in zip(self._output_shapes,\n incoming_output_shapes):\n if old_output_shape:\n updated_output_shapes.append(old_output_shape)\n else:\n updated_output_shapes.append(incoming_output_shape)\n self._output_shapes = updated_output_shapes\n\n def _check_output_shapes(self, incoming_output_shapes: List[TensorShape]):\n \"\"\"Check the incoming output shapes against the output shapes stored.\"\"\"\n # The incoming output shape should have the same structure with the existing\n # output shapes.\n nest.assert_same_structure(self._output_shapes, incoming_output_shapes)\n\n for (path, _), old_output_shape, incoming_output_shape in zip(\n nest.flatten_with_joined_string_paths(self._feature_config),\n self._output_shapes, incoming_output_shapes):\n # First check if both shapes are not None.\n if old_output_shape and incoming_output_shape:\n # We skip the check when the incoming output shape is rank 1 or 2 and\n # rank of the old output shape is larger. This can happen for\n # (sequence) ragged tensor, we push the check down to the enqueue op.\n if (len(incoming_output_shape) == 1 or len(incoming_output_shape)\n == 2) and len(old_output_shape) > len(incoming_output_shape):\n continue\n if len(old_output_shape) != len(\n incoming_output_shape) or not self._is_tensor_shape_match(\n old_output_shape, incoming_output_shape):\n raise ValueError(\n f\"Inconsistent shape founded for input feature {path}, \"\n f\"Output shape is set to be {old_output_shape}, \"\n f\"But got incoming output shape {incoming_output_shape}\")\n\n def _check_output_shapes_fully_defined(self):\n \"\"\"Check if the output shape is fully defined.\"\"\"\n for (path, _), output_shape in zip(\n nest.flatten_with_joined_string_paths(self._feature_config),\n self._output_shapes):\n if not output_shape.is_fully_defined():\n raise ValueError(\n f\"Input Feature {path} has output shape set as \"\n f\"{output_shape} which is not fully defined. \"\n \"Please specify the fully defined shape in either FeatureConfig \"\n \"or for the build method.\")\n\n def _is_tensor_shape_match(self, shape_a: TensorShape,\n shape_b: TensorShape) -> bool:\n \"\"\"Check if shape b matches with shape a.\"\"\"\n for s_a, s_b in zip(shape_a.as_list(), shape_b.as_list()):\n if s_a and s_b and s_a != s_b:\n return False\n return True\n\n def _get_output_shapes_from_batch_size(self, per_replica_batch_size):\n \"\"\"Get the output shapes from the batch size.\"\"\"\n output_shapes = []\n for feature in nest.flatten(self._feature_config):\n if not feature.output_shape and feature.max_sequence_length > 0:\n output_shapes.append(\n TensorShape([per_replica_batch_size, feature.max_sequence_length]))\n else:\n output_shapes.append(TensorShape(per_replica_batch_size))\n return output_shapes\n\n\n@def_function.function\ndef _load_variables_impl(\n config: Text,\n hosts: List[Tuple[int, Text]],\n variables: Dict[Text, Dict[Text, tf_variables.Variable]],\n table_config: tpu_embedding_v2_utils.TableConfig):\n \"\"\"Load embedding tables to onto TPU for each table and host.\n\n Args:\n config: A serialized TPUEmbeddingConfiguration proto.\n hosts: A list of CPU devices, on per host.\n variables: A dictionary of dictionaries of TPUShardedVariables. First key is\n the table name, second key is 'parameters' or the optimizer slot name.\n table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.\n \"\"\"\n def select_fn(host_id):\n\n def select_or_zeros(x):\n if host_id >= len(x.variables):\n # In the edge case where we have more hosts than variables, due to using\n # a small number of rows, we load zeros for the later hosts. We copy\n # the shape of the first host's variables, which we assume is defined\n # because TableConfig guarantees at least one row.\n return array_ops.zeros_like(x.variables[0])\n return x.variables[host_id]\n\n return select_or_zeros\n\n for host_id, host in enumerate(hosts):\n with ops.device(host):\n host_variables = nest.map_structure(select_fn(host_id), variables)\n for table in table_config:\n table.optimizer._load()( # pylint: disable=protected-access\n table_name=table.name,\n num_shards=len(hosts),\n shard_id=host_id,\n config=config,\n **host_variables[table.name])\n # Ensure that only the first table/first host gets a config so that we\n # don't bloat graph by attaching this large string to each op.\n # We have num tables * num hosts of these so for models with a large\n # number of tables training on a large slice, this can be an issue.\n config = None\n\n\n@def_function.function\ndef _retrieve_variables_impl(\n config: Text,\n hosts: List[Tuple[int, Text]],\n variables: Dict[Text, Dict[Text, tf_variables.Variable]],\n table_config: tpu_embedding_v2_utils.TableConfig):\n \"\"\"Retrieve embedding tables from TPU to host memory.\n\n Args:\n config: A serialized TPUEmbeddingConfiguration proto.\n hosts: A list of all the host CPU devices.\n variables: A dictionary of dictionaries of TPUShardedVariables. First key is\n the table name, second key is 'parameters' or the optimizer slot name.\n table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.\n \"\"\"\n for host_id, host in enumerate(hosts):\n with ops.device(host):\n for table in table_config:\n retrieved = table.optimizer._retrieve()( # pylint: disable=protected-access\n table_name=table.name,\n num_shards=len(hosts),\n shard_id=host_id,\n config=config)\n # When there are no slot variables (e.g with SGD) this returns a\n # single tensor rather than a tuple. In this case we put the tensor in\n # a list to make the following code easier to write.\n if not isinstance(retrieved, tuple):\n retrieved = (retrieved,)\n\n for i, slot in enumerate([\"parameters\"] +\n table.optimizer._slot_names()): # pylint: disable=protected-access\n # We must assign the CPU variables the values of tensors that were\n # returned from the TPU.\n sharded_var = variables[table.name][slot]\n if host_id < len(sharded_var.variables):\n # In the edge case where we have more hosts than variables, due to\n # using a small number of rows, we skip the later hosts.\n sharded_var.variables[host_id].assign(retrieved[i])\n # Ensure that only the first table/first host gets a config so that we\n # don't bloat graph by attaching this large string to each op.\n # We have num tables * num hosts of these so for models with a large\n # number of tables training on a large slice, this can be an issue.\n config = None\n\n\nclass TPUEmbeddingSaveable(saveable_hook.SaveableHook):\n \"\"\"Save/Restore hook to Retrieve/Load TPUEmbedding variables.\"\"\"\n\n def __init__(\n self,\n name: Text,\n load: Callable[[], Any],\n retrieve: Callable[[], Any]):\n self._load = load\n self._retrieve = retrieve\n super(TPUEmbeddingSaveable, self).__init__(name=name)\n\n def before_save(self):\n if self._retrieve is not None:\n self._retrieve()\n\n def after_restore(self):\n if self._load is not None:\n self._load()\n\n\ndef _ragged_embedding_lookup_with_reduce(\n table: tf_variables.Variable,\n ragged: ragged_tensor.RaggedTensor,\n weights: ragged_tensor.RaggedTensor,\n combiner: Text) -> core.Tensor:\n \"\"\"Compute a ragged lookup followed by a reduce on axis 1.\n\n Args:\n table: The embedding table.\n ragged: A RaggedTensor of ids to look up.\n weights: A RaggedTensor of weights (or None).\n combiner: One of \"mean\", \"sum\", \"sqrtn\".\n\n Returns:\n A Tensor.\n \"\"\"\n if weights is None:\n weights = array_ops.ones_like(ragged, dtype=table.dtype)\n weights = array_ops.expand_dims(weights, axis=2)\n ragged_result = embedding_ops.embedding_lookup_ragged(table, ragged)\n ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1)\n if combiner == \"mean\":\n ragged_result = math_ops.div_no_nan(ragged_result,\n math_ops.reduce_sum(weights, axis=1))\n elif combiner == \"sqrtn\":\n ragged_result = math_ops.div_no_nan(\n ragged_result,\n math_ops.sqrt(math_ops.reduce_sum(weights * weights, axis=1)))\n return ragged_result\n\n\n@tf_export(\"tpu.experimental.embedding.serving_embedding_lookup\")\ndef cpu_embedding_lookup(inputs, weights, tables, feature_config):\n \"\"\"Apply standard lookup ops with `tf.tpu.experimental.embedding` configs.\n\n This function is a utility which allows using the\n `tf.tpu.experimental.embedding` config objects with standard lookup functions.\n This can be used when exporting a model which uses\n `tf.tpu.experimental.embedding.TPUEmbedding` for serving on CPU. In particular\n `tf.tpu.experimental.embedding.TPUEmbedding` only supports lookups on TPUs and\n should not be part of your serving graph.\n\n Note that TPU specific options (such as `max_sequence_length`) in the\n configuration objects will be ignored.\n\n In the following example we take a trained model (see the documentation for\n `tf.tpu.experimental.embedding.TPUEmbedding` for the context) and create a\n saved model with a serving function that will perform the embedding lookup and\n pass the results to your model:\n\n ```python\n model = model_fn(...)\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=feature_config,\n batch_size=1024,\n optimizer=tf.tpu.experimental.embedding.SGD(0.1))\n checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)\n checkpoint.restore(...)\n\n @tf.function(input_signature=[{'feature_one': tf.TensorSpec(...),\n 'feature_two': tf.TensorSpec(...),\n 'feature_three': tf.TensorSpec(...)}])\n def serve_tensors(embedding_features):\n embedded_features = tf.tpu.experimental.embedding.serving_embedding_lookup(\n embedding_features, None, embedding.embedding_tables,\n feature_config)\n return model(embedded_features)\n\n model.embedding_api = embedding\n tf.saved_model.save(model,\n export_dir=...,\n signatures={'serving_default': serve_tensors})\n\n ```\n\n NOTE: Its important to assign the embedding api object to a member of your\n model as `tf.saved_model.save` only supports saving variables one `Trackable`\n object. Since the model's weights are in `model` and the embedding table are\n managed by `embedding`, we assign `embedding` to and attribute of `model` so\n that tf.saved_model.save can find the embedding variables.\n\n NOTE: The same `serve_tensors` function and `tf.saved_model.save` call will\n work directly from training.\n\n Args:\n inputs: a nested structure of Tensors, SparseTensors or RaggedTensors.\n weights: a nested structure of Tensors, SparseTensors or RaggedTensors or\n None for no weights. If not None, structure must match that of inputs, but\n entries are allowed to be None.\n tables: a dict of mapping TableConfig objects to Variables.\n feature_config: a nested structure of FeatureConfig objects with the same\n structure as inputs.\n\n Returns:\n A nested structure of Tensors with the same structure as inputs.\n \"\"\"\n\n nest.assert_same_structure(inputs, feature_config)\n\n flat_inputs = nest.flatten(inputs)\n flat_weights = [None] * len(flat_inputs)\n if weights is not None:\n nest.assert_same_structure(inputs, weights)\n flat_weights = nest.flatten(weights)\n flat_features = nest.flatten_with_joined_string_paths(feature_config)\n\n outputs = []\n for inp, weight, (path, feature) in zip(\n flat_inputs, flat_weights, flat_features):\n table = tables[feature.table]\n\n if weight is not None:\n if isinstance(inp, ops.Tensor):\n raise ValueError(\n \"Weight specified for {}, but input is dense.\".format(path))\n elif type(weight) is not type(inp):\n raise ValueError(\n \"Weight for {} is of type {} but it does not match type of the \"\n \"input which is {}.\".format(path, type(weight), type(inp)))\n elif feature.max_sequence_length > 0:\n raise ValueError(\"Weight specified for {}, but this is a sequence \"\n \"feature.\".format(path))\n\n if isinstance(inp, ops.Tensor):\n if feature.max_sequence_length > 0:\n raise ValueError(\"Feature {} is a sequence feature but a dense tensor \"\n \"was passed.\".format(path))\n outputs.append(embedding_ops.embedding_lookup_v2(table, inp))\n\n elif isinstance(inp, sparse_tensor.SparseTensor):\n if not feature.output_shape and feature.max_sequence_length > 0:\n batch_size = math_ops.cast(array_ops.shape(inp)[0], dtype=dtypes.int64)\n sparse_shape = array_ops.stack(\n [batch_size, feature.max_sequence_length], axis=0)\n # TPU Embedding truncates sequences to max_sequence_length, and if we\n # don't truncate, scatter_nd will error out if the index was out of\n # bounds.\n truncated_inp = sparse_ops.sparse_slice(inp, start=[0, 0],\n size=sparse_shape)\n\n dense_output_shape = array_ops.stack(\n [batch_size, feature.max_sequence_length, feature.table.dim],\n axis=0)\n outputs.append(\n array_ops.scatter_nd(\n truncated_inp.indices,\n array_ops.gather(table.read_value(), truncated_inp.values),\n dense_output_shape))\n else:\n inp_rank = inp.dense_shape.get_shape()[0]\n if (not feature.validate_weights_and_indices and\n inp_rank is not None and inp_rank <= 2):\n outputs.append(\n embedding_ops.embedding_lookup_sparse_v2(\n table,\n inp,\n sp_weights=weight,\n combiner=feature.table.combiner))\n else:\n outputs.append(\n embedding_ops.safe_embedding_lookup_sparse_v2(\n table,\n inp,\n sparse_weights=weight,\n combiner=feature.table.combiner))\n\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n if inp.shape.rank != 2:\n raise ValueError(\n \"Only rank 2 ragged tensor is supported, but got rank {}\".format(\n inp.shape.rank))\n batch_size = inp.shape[0]\n if feature.output_shape:\n output_batch_size = math_ops.reduce_prod(feature.output_shape)\n # If the output batch size matches the data batch size, treat it as\n # normal ragged input.\n if output_batch_size == batch_size:\n ragged_output = _ragged_embedding_lookup_with_reduce(\n table, inp, weight, feature.table.combiner)\n ragged_output = array_ops.reshape(\n ragged_output, shape=feature.output_shape + [feature.table.dim])\n # If the data batch size is a factor of the output batch size, the\n # divide result will be the sequence length. Ignore the weights and\n # combiner.\n elif output_batch_size > batch_size and output_batch_size % batch_size == 0:\n ragged_output = embedding_ops.embedding_lookup_v2(table, inp)\n # Pad or truncate in the sequence dimension\n ragged_output = ragged_output.to_tensor(shape=[\n batch_size, output_batch_size // batch_size, feature.table.dim\n ])\n # Reshape to desire output shape.\n ragged_output = array_ops.reshape(\n ragged_output, feature.output_shape + [feature.table.dim])\n else:\n raise ValueError(\n \"Output shape set in the FeatureConfig should be the factor of \"\n \"the input data batch size. But instead got output shape {}, \"\n \"input data batch size {}\".format(feature.output_shape,\n batch_size))\n else:\n if feature.max_sequence_length > 0:\n output_shape = [\n batch_size, feature.max_sequence_length, feature.table.dim\n ]\n ragged_lookup = embedding_ops.embedding_lookup_v2(table, inp)\n # Unlike scatter_nd, RaggedTensor.to_tensor truncates to the given\n # shape.\n ragged_output = ragged_lookup.to_tensor(shape=output_shape)\n else:\n ragged_output = _ragged_embedding_lookup_with_reduce(\n table, inp, weight, feature.table.combiner)\n outputs.append(ragged_output)\n else:\n raise ValueError(\"Input {} is type {}. Tensor, SparseTensor or \"\n \"RaggedTensor expected.\".format(path, type(inp)))\n return nest.pack_sequence_as(feature_config, outputs)\n\n\ndef get_list_of_hosts(strategy: tpu_strategy.TPUStrategy) -> List[Text]:\n \"\"\"Returns a sorted list of CPU devices for the remote jobs.\n\n Args:\n strategy: A TPUStrategy object.\n\n Returns:\n A sort list of device strings.\n \"\"\"\n list_of_hosts = []\n # Assume this is sorted by task\n for tpu_device in strategy.extended.worker_devices:\n host = device_util.get_host_for_device(tpu_device)\n if host not in list_of_hosts:\n list_of_hosts.append(host)\n assert len(list_of_hosts) == strategy.extended.num_hosts\n return list_of_hosts\n\n\ndef extract_variable_info(\n kwargs) -> Tuple[Text, Tuple[int, ...], dtypes.DType, Callable[[], Any]]:\n \"\"\"Extracts the variable creation attributes from the kwargs.\n\n Args:\n kwargs: a dict of keyword arguments that were passed to a variable creator\n scope.\n\n Returns:\n A tuple of variable name, shape, dtype, initialization function.\n \"\"\"\n if (isinstance(kwargs[\"initial_value\"], functools.partial) and (\n \"shape\" in kwargs[\"initial_value\"].keywords or\n kwargs[\"initial_value\"].args)):\n # Sometimes shape is passed positionally, sometimes it's passed as a kwarg.\n if \"shape\" in kwargs[\"initial_value\"].keywords:\n shape = kwargs[\"initial_value\"].keywords[\"shape\"]\n else:\n shape = kwargs[\"initial_value\"].args[0]\n return (kwargs[\"name\"], shape,\n kwargs[\"initial_value\"].keywords.get(\"dtype\", kwargs[\"dtype\"]),\n kwargs[\"initial_value\"].func)\n elif \"shape\" not in kwargs or kwargs[\"shape\"] is None or not callable(\n kwargs[\"initial_value\"]):\n raise ValueError(\n \"Unable to extract initializer function and shape from {}. Please \"\n \"either pass a function that expects a shape and dtype as the \"\n \"initial value for your variable or functools.partial object with \"\n \"the shape and dtype kwargs set. This is needed so that we can \"\n \"initialize the shards of the ShardedVariable locally.\".format(\n kwargs[\"initial_value\"]))\n else:\n return (kwargs[\"name\"], kwargs[\"shape\"], kwargs[\"dtype\"],\n kwargs[\"initial_value\"])\n\n\ndef make_sharded_variable_creator(\n hosts: List[Text]) -> Callable[..., TPUShardedVariable]:\n \"\"\"Makes a sharded variable creator given a list of hosts.\n\n Args:\n hosts: a list of tensorflow devices on which to shard the tensors.\n\n Returns:\n A variable creator function.\n \"\"\"\n\n def sharded_variable_creator(\n next_creator: Callable[..., tf_variables.Variable], *args, **kwargs):\n \"\"\"The sharded variable creator.\"\"\"\n kwargs[\"skip_mirrored_creator\"] = True\n\n num_hosts = len(hosts)\n name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs)\n initial_value = kwargs[\"initial_value\"]\n rows = shape[0]\n cols = shape[1]\n partial_partition = rows % num_hosts\n full_rows_per_host = rows // num_hosts\n # We partition as if we were using MOD sharding: at least\n # `full_rows_per_host` rows to `num_hosts` hosts, where the first\n # `partial_partition` hosts get an additional row when the number of rows\n # is not cleanly divisible. Note that `full_rows_per_host` may be zero.\n partitions = (\n [full_rows_per_host + 1] * partial_partition\n + [full_rows_per_host] * (num_hosts - partial_partition))\n variables = []\n sharding_aware = \"shard_info\" in tf_inspect.getargspec(initial_value).args\n\n # Keep track of offset for sharding aware initializers.\n offset = 0\n kwargs[\"dtype\"] = dtype\n for i, p in enumerate(partitions):\n if p == 0:\n # Skip variable creation for empty partitions, resulting from the edge\n # case of 'rows < num_hosts'. This is safe because both load/restore\n # can handle the missing values.\n continue\n with ops.device(hosts[i]):\n kwargs[\"name\"] = \"{}_{}\".format(name, i)\n kwargs[\"shape\"] = (p, cols)\n if sharding_aware:\n shard_info = base.ShardInfo(kwargs[\"shape\"], (offset, 0))\n kwargs[\"initial_value\"] = functools.partial(\n initial_value, shard_info=shard_info)\n offset += p\n else:\n kwargs[\"initial_value\"] = functools.partial(\n unwrapped_initial_value, kwargs[\"shape\"], dtype=dtype)\n variables.append(next_creator(*args, **kwargs))\n return TPUShardedVariable(variables, name=name)\n return sharded_variable_creator\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for tensorflow.python.ops.op_def_library.\"\"\"\n\nfrom tensorflow.core.framework import tensor_shape_pb2\nfrom tensorflow.python.eager import function as eager_function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import op_def_library\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.util import compat\n\n\nclass OpDefLibraryTest(test_util.TensorFlowTestCase):\n\n def Tensor(self, t, name=\"in\"):\n return op_def_library.apply_op(\"OutT\", T=t, name=name)\n\n def testNoRegisteredOpFails(self):\n with self.assertRaises(RuntimeError) as cm:\n op_def_library.apply_op(\"unknown\")\n self.assertEqual(str(cm.exception), \"Unrecognized Op name unknown\")\n\n def testSimple(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"Simple\", a=3)\n self.assertEqual(dtypes.float32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'Simple' op: 'Simple' input: 'Simple/a'\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Simple\", a=4)\n self.assertProtoEquals(\"\"\"\n name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Simple\", a=5, name=\"named\")\n self.assertProtoEquals(\"\"\"\n name: 'named' op: 'Simple' input: 'named/a'\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\n \"Simple\", a=[[1, 2, 3], [4, 5, 6]], name=\"two_d\")\n self.assertProtoEquals(\"\"\"\n name: 'two_d' op: 'Simple' input: 'two_d/a'\n \"\"\", out.op.node_def)\n\n def testSimpleFailures(self):\n with ops.Graph().as_default():\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", a=\"Bad string\")\n self.assertIn(\n \"Expected int32 passed to parameter 'a' of op 'Simple', \"\n \"got 'Bad string' of type 'str' instead.\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", a=self.Tensor(dtypes.string))\n self.assertIn(\n \"Input 'a' of 'Simple' Op has type string \"\n \"that does not match expected type of int32.\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", a=6, extra=\"bogus\")\n self.assertIn(\"Simple got unexpected keyword arguments: extra\",\n str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"Simple\", a=6, extra1=\"bogus\", extra2=\"also_bogus\")\n self.assertIn(\n \"Simple got unexpected keyword arguments: extra1, \"\n \"extra2\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\")\n self.assertIn(\"No argument for input a\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", wrong=7)\n self.assertIn(\"No argument for input a\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", a={\"label\": 1})\n self.assertIn(\n \"Expected int32 passed to parameter 'a' of op 'Simple', \"\n \"got {'label': 1} of type 'dict' instead.\", str(cm.exception))\n\n def testReservedInput(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"ReservedInput\", input_=7, name=\"x\")\n self.assertProtoEquals(\"\"\"\n name: 'x' op: 'ReservedInput' input: 'x/input'\n \"\"\", op.node_def)\n\n def testPolymorphic(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"Polymorphic\", a=7, name=\"p\")\n self.assertEqual(dtypes.int32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'Polymorphic' input: 'p/a'\n attr { key: 'T' value { type: DT_INT32 } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Polymorphic\", a=\"s\", name=\"q\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'Polymorphic' input: 'q/a'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Polymorphic\", a=[\"s\", \"t\", \"u\"], name=\"r\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'Polymorphic' input: 'r/a'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Polymorphic\", a=\"s\", T=dtypes.string)\n self.assertEqual(\n str(cm.exception),\n \"Should not specify value for inferred attr 'T' for \"\n \"Polymorphic.\")\n\n def testPolymorphicOut(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"PolymorphicOut\", T=dtypes.int32, name=\"p\")\n self.assertEqual(dtypes.int32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'PolymorphicOut'\n attr { key: 'T' value { type: DT_INT32 } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"PolymorphicOut\", T=dtypes.bool, name=\"q\")\n self.assertEqual(dtypes.bool, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'PolymorphicOut'\n attr { key: 'T' value { type: DT_BOOL } }\n \"\"\", out.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"PolymorphicOut\")\n self.assertEqual(\n str(cm.exception), \"No argument found for attr T for PolymorphicOut\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"PolymorphicOut\", T=None)\n self.assertEqual(str(cm.exception),\n \"Expected DataType for argument 'T' not None.\")\n\n def testPolymorphicDefaultOut(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"PolymorphicDefaultOut\", T=None, name=\"p\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'PolymorphicDefaultOut'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\n \"PolymorphicDefaultOut\", T=dtypes.bool, name=\"q\")\n self.assertEqual(dtypes.bool, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'PolymorphicDefaultOut'\n attr { key: 'T' value { type: DT_BOOL } }\n \"\"\", out.op.node_def)\n\n def testBinary(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"Binary\", a=8, b=9, name=\"b\")\n self.assertEqual(dtypes.int32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'\n attr { key: 'T' value { type: DT_INT32 } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Binary\", a=\"left\", b=\"right\", name=\"c\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n with self.assertRaises(TypeError):\n op_def_library.apply_op(\"Binary\", a=\"left\", b=12)\n\n with self.assertRaises(TypeError):\n op_def_library.apply_op(\n \"Binary\", a=self.Tensor(dtypes.string), b=self.Tensor(dtypes.int32))\n\n def testRestrict(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"Restrict\", a=\"foo\", name=\"g\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'g' op: 'Restrict' input: 'g/a'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Restrict\", a=True, name=\"h\")\n self.assertEqual(dtypes.bool, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'h' op: 'Restrict' input: 'h/a'\n attr { key: 'T' value { type: DT_BOOL } }\n \"\"\", out.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Restrict\", a=17)\n self.assertEqual(str(cm.exception),\n \"Value passed to parameter 'a' has DataType int32 \"\n \"not in list of allowed values: string, bool\")\n\n def testTypeList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"TypeList\", a=[\"foo\"], name=\"z\")\n self.assertProtoEquals(\"\"\"\n name: 'z' op: 'TypeList' input: 'z/a_0'\n attr { key: 'T' value { list { type: DT_STRING } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"TypeList\", a=[True, 12], name=\"y\")\n self.assertProtoEquals(\"\"\"\n name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'\n attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"TypeList\", a=[], name=\"empty\")\n self.assertProtoEquals(\"\"\"\n name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"TypeList\", a=17)\n self.assertStartsWith(str(cm.exception),\n \"Expected list for 'a' \"\n \"argument to 'TypeList' Op, not \")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"TypeList\", a=[self.Tensor(dtypes.int32), None])\n self.assertStartsWith(str(cm.exception),\n \"Tensors in list passed to 'a' of 'TypeList' Op \"\n \"have types [int32, <NOT CONVERTIBLE TO TENSOR>]\")\n\n def testTypeListTwice(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"TypeListTwice\", a=[\"foo\", True], b=[\"bar\", False], name=\"z\")\n self.assertProtoEquals(\"\"\"\n name: 'z' op: 'TypeListTwice'\n input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'\n attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"TypeListTwice\", a=[], b=[], name=\"empty\")\n self.assertProtoEquals(\"\"\"\n name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"TypeListTwice\", a=[\"foo\", True], b=[\"bar\", 6])\n self.assertEqual(str(cm.exception),\n \"Input 'b' of 'TypeListTwice' Op has type list of \"\n \"string, int32 that does not match type list \"\n \"string, bool of argument 'a'.\")\n\n def testOutTypeList(self):\n with ops.Graph().as_default():\n out, = op_def_library.apply_op(\n \"OutTypeList\", T=[dtypes.float32], name=\"x\")\n self.assertEqual(dtypes.float32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'x' op: 'OutTypeList'\n attr { key: 'T' value { list { type: DT_FLOAT } } }\n \"\"\", out.op.node_def)\n\n out1, out2 = op_def_library.apply_op(\n \"OutTypeList\", T=[dtypes.int32, dtypes.bool], name=\"w\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.bool, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'w' op: 'OutTypeList'\n attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }\n \"\"\", out1.op.node_def)\n\n out = op_def_library.apply_op(\"OutTypeList\", T=[], name=\"empty\")\n self.assertEqual([], out)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"OutTypeList\", T=dtypes.int32)\n self.assertEqual(\n str(cm.exception), \"Expected list for attr T, obtained \"\n \"DType instead.\")\n\n def testTypeListRestrict(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"TypeListRestrict\", a=[\"foo\", False], name=\"v\")\n self.assertProtoEquals(\"\"\"\n name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'\n attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"TypeListRestrict\", a=[True, 12])\n self.assertEqual(str(cm.exception),\n \"Value passed to parameter 'a' has DataType int32 \"\n \"not in list of allowed values: string, bool\")\n\n def testOutTypeListRestrict(self):\n with ops.Graph().as_default():\n out1, out2 = op_def_library.apply_op(\n \"OutTypeListRestrict\", t=[dtypes.bool, dtypes.string], name=\"u\")\n self.assertEqual(dtypes.bool, out1.dtype)\n self.assertEqual(dtypes.string, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'OutTypeListRestrict'\n attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }\n \"\"\", out1.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"OutTypeListRestrict\", t=[dtypes.string, dtypes.int32])\n self.assertEqual(str(cm.exception),\n \"Value passed to parameter 't' has DataType int32 \"\n \"not in list of allowed values: string, bool\")\n\n def testAttr(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"Attr\", a=12, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"Attr\", a=tensor_shape.Dimension(13), name=\"u\")\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Attr\", a=\"bad\")\n self.assertEqual(str(cm.exception),\n \"Expected int for argument 'a' not 'bad'.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Attr\", a=[12])\n self.assertEqual(str(cm.exception),\n \"Expected int for argument 'a' not [12].\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Attr\", a=None)\n self.assertEqual(str(cm.exception),\n \"Expected int for argument 'a' not None.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Attr\")\n self.assertEqual(\n str(cm.exception), \"No argument found for attr a for \"\n \"Attr\")\n\n def testAttrFloat(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrFloat\", a=1.2, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrFloat\", a=12, name=\"u\")\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrFloat\", a=\"bad\")\n self.assertEqual(str(cm.exception),\n \"Expected float for argument 'a' not 'bad'.\")\n\n def testAttrFunc(self):\n with ops.Graph().as_default():\n @function.Defun(dtypes.float32, func_name=\"MyFn\")\n def fn(x):\n return 2 + x\n\n op = op_def_library.apply_op(\"FuncAttr\", f=fn, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'FuncAttr' attr { key: 'f'\n value { func { name: 'MyFn' } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"FuncAttr\", f=3)\n self.assertEqual(str(cm.exception),\n \"Don't know how to convert 3 to a func for argument f\")\n\n def testAttrFuncWithFuncWithAttrs(self):\n with ops.Graph().as_default():\n @eager_function.defun_with_attributes(\n input_signature=(tensor_spec.TensorSpec(None, dtypes.float32),),\n autograph=False,\n attributes={\"_dummy_attr\": 15})\n def fn(x):\n return 2 + x\n\n concrete_fn = fn.get_concrete_function()\n\n op = op_def_library.apply_op(\"FuncAttr\", f=concrete_fn, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'FuncAttr'\n attr {\n key: 'f'\n value {\n func {\n name: '%s'\n attr { key: \"_dummy_attr\" value { i: 15 } }\n }\n }\n }\n \"\"\" % compat.as_str(concrete_fn.name), op.node_def)\n\n def testAttrFuncList(self):\n with ops.Graph().as_default():\n @function.Defun(dtypes.float32, func_name=\"MyFn\")\n def fn1(x):\n return 2 + x\n @function.Defun(dtypes.int32, dtypes.float32, func_name=\"MyFn2\")\n def fn2(x, y):\n return 2 + x, y * 3\n @function.Defun(dtypes.int32, func_name=\"MyFn3\")\n def fn3(y):\n return 2 + y\n\n op = op_def_library.apply_op(\"FuncListAttr\", f=[fn1, fn2, fn3], name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'FuncListAttr'\n attr { key: 'f' value { list { func { name: 'MyFn' }\n func { name: 'MyFn2' }\n func { name: 'MyFn3' } } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"FuncListAttr\", f=[fn1, 3, fn2])\n self.assertEqual(str(cm.exception),\n \"Don't know how to convert 3 to a func for argument f\")\n\n def testAttrBool(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrBool\", a=True, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrBool\", a=False, name=\"u\")\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrBool\", a=0)\n self.assertEqual(str(cm.exception),\n \"Expected bool for argument 'a' not 0.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrBool\", a=1)\n self.assertEqual(str(cm.exception),\n \"Expected bool for argument 'a' not 1.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrBool\", a=[])\n self.assertEqual(str(cm.exception),\n \"Expected bool for argument 'a' not [].\")\n\n def testAttrBoolList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"AttrBoolList\", a=[True, False, True], name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'AttrBoolList'\n attr { key: 'a' value { list { b: true b: false b:true } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrBoolList\", a=[], name=\"u\")\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrBoolList\", a=[0])\n self.assertEqual(str(cm.exception),\n \"Expected bool for argument 'a' not 0.\")\n\n def testAttrMin(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrMin\", a=12, name=\"s\")\n self.assertProtoEquals(\"\"\"\n name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"AttrMin\", a=2)\n self.assertEqual(str(cm.exception),\n \"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.\")\n\n def testAttrListMin(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrListMin\", a=[1, 2], name=\"r\")\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'AttrListMin'\n attr { key: 'a' value { list { i: 1 i: 2 } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"AttrListMin\", a=[17])\n self.assertEqual(str(cm.exception),\n \"Attr 'a' of 'AttrListMin' Op \"\n \"passed list of length 1 less than minimum 2.\")\n\n def testAttrEnum(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrEnum\", a=\"oranges\", name=\"e\")\n self.assertProtoEquals(\"\"\"\n name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"AttrEnum\", a=\"invalid\")\n self.assertEqual(str(cm.exception),\n 'Attr \\'a\\' of \\'AttrEnum\\' Op '\n 'passed string \\'invalid\\' not in: '\n '\"apples\", \"oranges\".')\n\n def testAttrEnumList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"AttrEnumList\", a=[\"oranges\", \"apples\"], name=\"f\")\n self.assertProtoEquals(\"\"\"\n name: 'f' op: 'AttrEnumList'\n attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\n \"AttrEnumList\", a=[\"apples\", \"invalid\", \"oranges\"])\n self.assertEqual(str(cm.exception),\n 'Attr \\'a\\' of \\'AttrEnumList\\' Op '\n 'passed string \\'invalid\\' not '\n 'in: \"apples\", \"oranges\".')\n\n def testAttrShape(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrShape\", a=[5], name=\"s1\")\n self.assertProtoEquals(\"\"\"\n name: 's1' op: 'AttrShape'\n attr { key: 'a' value { shape { dim { size: 5 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrShape\", a=(4, 3, 2), name=\"s2\")\n self.assertProtoEquals(\"\"\"\n name: 's2' op: 'AttrShape'\n attr { key: 'a' value {\n shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"AttrShape\", a=tensor_shape.TensorShape([3, 2]), name=\"s3\")\n self.assertProtoEquals(\"\"\"\n name: 's3' op: 'AttrShape'\n attr { key: 'a' value {\n shape { dim { size: 3 } dim { size: 2 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrShape\", a=[], name=\"s4\")\n self.assertProtoEquals(\"\"\"\n name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }\n \"\"\", op.node_def)\n\n shape = tensor_shape_pb2.TensorShapeProto()\n shape.dim.add().size = 6\n shape.dim.add().size = 3\n op = op_def_library.apply_op(\"AttrShape\", a=shape, name=\"s5\")\n self.assertProtoEquals(\"\"\"\n name: 's5' op: 'AttrShape'\n attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }\n \"\"\", op.node_def)\n\n # TODO(josh11b): Re-enable this test once we stop promoting scalars to\n # shapes.\n # with self.assertRaises(TypeError) as cm:\n # op_def_library.apply_op(\"AttrShape\", a=5)\n # self.assertEqual(str(cm.exception),\n # \"Don't know how to convert 5 to a TensorShapeProto for\"\n # \" argument 'a'\")\n\n with self.assertRaises(TypeError):\n op_def_library.apply_op(\"AttrShape\", a=\"ABC\")\n\n def testAttrShapeList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"AttrShapeList\", a=[[3, 2], [6, 5, 4]], name=\"sl\")\n self.assertProtoEquals(\"\"\"\n name: 'sl' op: 'AttrShapeList'\n attr { key: 'a' value { list {\n shape { dim { size: 3 } dim { size: 2 } }\n shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrShapeList\", a=[], name=\"esl\")\n self.assertProtoEquals(\"\"\"\n name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n def testAttrPartialShape(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrPartialShape\", a=[5], name=\"s1\")\n self.assertProtoEquals(\"\"\"\n name: 's1' op: 'AttrPartialShape'\n attr { key: 'a' value { shape { dim { size: 5 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"AttrPartialShape\", a=(4, None, 2), name=\"s2\")\n self.assertProtoEquals(\"\"\"\n name: 's2' op: 'AttrPartialShape'\n attr { key: 'a' value {\n shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"AttrPartialShape\", a=tensor_shape.TensorShape([3, None]), name=\"s3\")\n self.assertProtoEquals(\"\"\"\n name: 's3' op: 'AttrPartialShape'\n attr { key: 'a' value {\n shape { dim { size: 3 } dim { size: -1 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrPartialShape\", a=[], name=\"s4\")\n self.assertProtoEquals(\"\"\"\n name: 's4' op: 'AttrPartialShape'\n attr { key: 'a' value { shape { } } }\n \"\"\", op.node_def)\n\n shape = tensor_shape_pb2.TensorShapeProto()\n shape.dim.add().size = -1\n shape.dim.add().size = 3\n op = op_def_library.apply_op(\"AttrPartialShape\", a=shape, name=\"s5\")\n self.assertProtoEquals(\"\"\"\n name: 's5' op: 'AttrPartialShape'\n attr { key: 'a' value {\n shape { dim { size: -1 } dim { size: 3 } } } }\n \"\"\", op.node_def)\n\n # TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.\n # with self.assertRaises(TypeError) as cm:\n # op_def_library.apply_op(\"AttrPartialShape\", a=5)\n # self.assertEqual(str(cm.exception),\n # \"Don't know how to convert 5 to a TensorShapeProto for\"\n # \" argument 'a'\")\n\n with self.assertRaises(TypeError):\n op_def_library.apply_op(\"AttrPartialShape\", a=\"ABC\")\n\n def testAttrPartialShapeList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"AttrPartialShapeList\", a=[[3, 2], [6, None, 4]], name=\"sl\")\n self.assertProtoEquals(\"\"\"\n name: 'sl' op: 'AttrPartialShapeList'\n attr { key: 'a' value { list {\n shape { dim { size: 3 } dim { size: 2 } }\n shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrPartialShapeList\", a=[], name=\"esl\")\n self.assertProtoEquals(\"\"\"\n name: 'esl' op: 'AttrPartialShapeList' attr {\n key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n def testAttrDefault(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrDefault\", a=None, name=\"d\")\n self.assertProtoEquals(\"\"\"\n name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrDefault\", a=\"kiwi\", name=\"c\")\n self.assertProtoEquals(\"\"\"\n name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }\n \"\"\", op.node_def)\n\n def testAttrListDefault(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrListDefault\", a=None, name=\"b\")\n self.assertProtoEquals(\"\"\"\n name: 'b' op: 'AttrListDefault'\n attr { key: 'a' value { list { i: 5 i: 15 } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrListDefault\", a=[3], name=\"a\")\n self.assertProtoEquals(\"\"\"\n name: 'a' op: 'AttrListDefault'\n attr { key: 'a' value { list { i: 3 } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrListDefault\", a=[], name=\"empty\")\n self.assertProtoEquals(\"\"\"\n name: 'empty' op: 'AttrListDefault'\n attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n def testAttrEmptyListDefault(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrEmptyListDefault\", a=None, name=\"b\")\n self.assertProtoEquals(\"\"\"\n name: 'b' op: 'AttrEmptyListDefault'\n attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrEmptyListDefault\", a=[3], name=\"a\")\n self.assertProtoEquals(\"\"\"\n name: 'a' op: 'AttrEmptyListDefault'\n attr { key: 'a' value { list { f: 3 } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrEmptyListDefault\", a=[], name=\"empty\")\n self.assertProtoEquals(\"\"\"\n name: 'empty' op: 'AttrEmptyListDefault'\n attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n def testReservedAttr(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"ReservedAttr\", range_=7, name=\"x\")\n self.assertProtoEquals(\"\"\"\n name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }\n \"\"\", op.node_def)\n\n def testDefaultAttrType(self):\n with ops.Graph().as_default():\n # Give an input whose type has no obvious output type.\n op = op_def_library.apply_op(\"AttrTypeDefault\", a=[], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'AttrTypeDefault' input: 'n/a'\n attr { key: 'T' value { type: DT_INT32 } }\n \"\"\", op.node_def)\n\n # Give an input whose type can be inferred as different\n # than the default.\n op = op_def_library.apply_op(\"AttrTypeDefault\", a=[1.0], name=\"f\")\n self.assertProtoEquals(\"\"\"\n name: 'f' op: 'AttrTypeDefault' input: 'f/a'\n attr { key: 'T' value { type: DT_FLOAT } }\n \"\"\", op.node_def)\n\n def testDefaultListAttrType(self):\n with ops.Graph().as_default():\n # Give an input whose type can be inferred as different\n # than the default.\n op = op_def_library.apply_op(\n \"AttrListTypeDefault\", a=[1.0], b=[2.0], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'\n attr { key: 'T' value { type: DT_FLOAT } }\n attr { key: 'N' value { i: 1 } }\n \"\"\", op.node_def)\n\n def testNIntsIn(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"NIntsIn\", a=[1, 2], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"NIntsIn\", a=[5, 4, 3, 2, 1], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NIntsIn'\n input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'\n attr { key: 'N' value { i: 5 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NIntsIn\", a=[\"foo\", \"bar\"])\n self.assertEqual(\n str(cm.exception),\n \"Tensors in list passed to 'a' of 'NIntsIn' Op have types \"\n \"[string, string] that do not match expected type int32.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NIntsIn\",\n a=[self.Tensor(dtypes.string),\n self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NIntsIn' Op have \"\n \"types [string, string] that do not match expected type \"\n \"int32.\")\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NIntsIn\", a=[99])\n self.assertEqual(str(cm.exception),\n \"List argument 'a' to 'NIntsIn' Op \"\n \"with length 1 shorter than \"\n \"minimum length 2.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NIntsIn\", a=[38, \"bar\"])\n self.assertEqual(\n str(cm.exception),\n \"Tensors in list passed to 'a' of 'NIntsIn' Op have types \"\n \"[int32, string] that do not match expected type int32.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NIntsIn\",\n a=[self.Tensor(dtypes.int32),\n self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NIntsIn' Op \"\n \"have types [int32, string] that do not match expected \"\n \"type int32.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NIntsIn\", a=17)\n self.assertStartsWith(str(cm.exception),\n \"Expected list for 'a' argument \"\n \"to 'NIntsIn' Op, not \")\n\n def testNPolymorphicIn(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"NPolymorphicIn\", a=[1, 2], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NPolymorphicIn\", a=[5, 4, 3, 2, 1], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NPolymorphicIn'\n input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 5 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"NPolymorphicIn\", a=[\"foo\", \"bar\"], name=\"p\")\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'\n attr { key: 'T' value { type: DT_STRING } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NPolymorphicIn\",\n a=[1, self.Tensor(dtypes.float32, name=\"x\")],\n name=\"q\")\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'\n attr { key: 'T' value { type: DT_FLOAT } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NPolymorphicIn\",\n a=[\n self.Tensor(dtypes.float32, name=\"y\"),\n self.Tensor(dtypes.float32_ref, name=\"z\")\n ],\n name=\"r\")\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'\n attr { key: 'T' value { type: DT_FLOAT } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NPolymorphicIn\", a=[99])\n self.assertEqual(str(cm.exception),\n \"List argument 'a' to 'NPolymorphicIn' Op with length 1 \"\n \"shorter than minimum length 2.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicIn\", a=[38, \"bar\"])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NPolymorphicIn' Op \"\n \"have types [int32, string] that don't all match.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NPolymorphicIn\", a=[38, self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NPolymorphicIn' Op \"\n \"have types [int32, string] that don't all match.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicIn\", a=[38, None])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NPolymorphicIn' Op \"\n \"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that \"\n \"don't all match.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NPolymorphicIn\", a=[\"abcd\", self.Tensor(dtypes.int32)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NPolymorphicIn' Op \"\n \"have types [string, int32] that don't all match.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicIn\", a=17)\n self.assertStartsWith(str(cm.exception),\n \"Expected list for 'a' argument \"\n \"to 'NPolymorphicIn' Op, not \")\n\n def testNPolymorphicRestrictIn(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"NPolymorphicRestrictIn\", a=[\"foo\", \"bar\"], name=\"p\")\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'\n attr { key: 'T' value { type: DT_STRING } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NPolymorphicRestrictIn\", a=[False, True, False], name=\"b\")\n self.assertProtoEquals(\"\"\"\n name: 'b' op: 'NPolymorphicRestrictIn'\n input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicRestrictIn\", a=[1, 2])\n self.assertEqual(\n str(cm.exception),\n \"Value passed to parameter 'a' has DataType int32 not in \"\n \"list of allowed values: string, bool\")\n\n def testNInTwice(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"NInTwice\", a=[1, 2], b=[\"one\", \"two\"], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NInTwice'\n input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"NInTwice\", a=[], b=[], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NInTwice\", a=[1, 2, 3], b=[\"too short\"])\n self.assertEqual(str(cm.exception),\n \"List argument 'b' to 'NInTwice' Op \"\n \"with length 1 must match \"\n \"length 3 of argument 'a'.\")\n\n def testNInPolymorphicTwice(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"NInPolymorphicTwice\", a=[1, 2], b=[3, 4], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NInPolymorphicTwice'\n input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NInPolymorphicTwice\", a=[1, 2, 3], b=[5])\n self.assertEqual(str(cm.exception),\n \"List argument 'b' to 'NInPolymorphicTwice' Op \"\n \"with length 1 \"\n \"must match length 3 of argument 'a'.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NInPolymorphicTwice\", a=[1, 2], b=[\"one\", \"two\"])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'b' of 'NInPolymorphicTwice' \"\n \"Op have types [string, string] that do not match type \"\n \"int32 inferred from earlier arguments.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NInPolymorphicTwice\",\n a=[self.Tensor(dtypes.int32)],\n b=[self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'b' of \"\n \"'NInPolymorphicTwice' Op have types [string] that do \"\n \"not match type int32 inferred from earlier arguments.\")\n\n def testNInTwoTypeVariables(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"NInTwoTypeVariables\", a=[1, 2], b=[True, False], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NInTwoTypeVariables'\n input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'\n attr { key: 'S' value { type: DT_INT32 } }\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NInTwoTypeVariables\", a=[1, 2], b=[3, 4], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NInTwoTypeVariables'\n input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'\n attr { key: 'S' value { type: DT_INT32 } }\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NInTwoTypeVariables\",\n a=[self.Tensor(dtypes.int32, name=\"q\")],\n b=[self.Tensor(dtypes.string, name=\"r\")],\n name=\"p\")\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'\n attr { key: 'S' value { type: DT_INT32 } }\n attr { key: 'T' value { type: DT_STRING } }\n attr { key: 'N' value { i: 1 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NInTwoTypeVariables\", a=[1, 2, 3], b=[\"5\"])\n self.assertEqual(str(cm.exception),\n \"List argument 'b' to 'NInTwoTypeVariables' Op \"\n \"with length 1 \"\n \"must match length 3 of argument 'a'.\")\n\n def testInPolymorphicTwice(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[8], b=[3, 4, 5], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'InPolymorphicTwice'\n input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 1 } }\n attr { key: 'M' value { i: 3 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"InPolymorphicTwice\", a=[8], b=[], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 1 } }\n attr { key: 'M' value { i: 0 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[], b=[3, 4], name=\"p\")\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'InPolymorphicTwice' input: 'p/b_0' input: 'p/b_1'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 0 } }\n attr { key: 'M' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[], b=[3.0, 4.0], name=\"q\")\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'InPolymorphicTwice' input: 'q/b_0' input: 'q/b_1'\n attr { key: 'T' value { type: DT_FLOAT } }\n attr { key: 'N' value { i: 0 } }\n attr { key: 'M' value { i: 2 } }\n \"\"\", op.node_def)\n\n # Empty input lists: assume default type for T.\n op = op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[], b=[], name=\"r\")\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'InPolymorphicTwice'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 0 } }\n attr { key: 'M' value { i: 0 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[1, 2], b=[\"one\", \"two\"])\n self.assertEqual(\n str(cm.exception),\n \"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op \"\n \"have types [string, string] that do not match type int32 \"\n \"inferred from earlier arguments.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"InPolymorphicTwice\",\n a=[self.Tensor(dtypes.int32)],\n b=[self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'b' of 'InPolymorphicTwice' \"\n \"Op have types [string] that do not match type int32 \"\n \"inferred from earlier arguments.\")\n\n def testNIntsOut(self):\n with ops.Graph().as_default():\n out1, out2 = op_def_library.apply_op(\"NIntsOut\", N=2, name=\"n\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }\n \"\"\", out1.op.node_def)\n\n out1, out2, out3, out4, out5 = op_def_library.apply_op(\n \"NIntsOut\", N=5, name=\"o\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertEqual(dtypes.int32, out3.dtype)\n self.assertEqual(dtypes.int32, out4.dtype)\n self.assertEqual(dtypes.int32, out5.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }\n \"\"\", out5.op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NIntsOut\", N=1)\n self.assertEqual(\n str(cm.exception),\n \"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NIntsOut\", N=[3])\n self.assertEqual(str(cm.exception),\n \"Expected int for argument 'N' not [3].\")\n\n def testNIntsOutDefault(self):\n with ops.Graph().as_default():\n out1, out2, out3 = op_def_library.apply_op(\n \"NIntsOutDefault\", N=None, name=\"z\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertEqual(dtypes.int32, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }\n \"\"\", out1.op.node_def)\n\n out1, out2 = op_def_library.apply_op(\"NIntsOutDefault\", N=2, name=\"y\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }\n \"\"\", out2.op.node_def)\n\n def testNPolymorphicOut(self):\n with ops.Graph().as_default():\n out1, out2 = op_def_library.apply_op(\n \"NPolymorphicOut\", N=2, T=dtypes.int32, name=\"n\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NPolymorphicOut'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", out1.op.node_def)\n\n out1, out2, out3 = op_def_library.apply_op(\n \"NPolymorphicOut\", T=dtypes.string, N=3, name=\"o\")\n self.assertEqual(dtypes.string, out1.dtype)\n self.assertEqual(dtypes.string, out2.dtype)\n self.assertEqual(dtypes.string, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NPolymorphicOut'\n attr { key: 'T' value { type: DT_STRING } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", out3.op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NPolymorphicOut\", N=1, T=dtypes.string)\n self.assertEqual(str(cm.exception),\n \"Attr 'N' of 'NPolymorphicOut' Op \"\n \"passed 1 less than minimum 2.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicOut\", N=3, T=[dtypes.string])\n self.assertEqual(\n str(cm.exception),\n \"Expected DataType for argument 'T' not [tf.string].\")\n\n def testNPolymorphicOutDefault(self):\n with ops.Graph().as_default():\n out1, out2 = op_def_library.apply_op(\n \"NPolymorphicOutDefault\", N=None, T=None, name=\"r\")\n self.assertEqual(dtypes.bool, out1.dtype)\n self.assertEqual(dtypes.bool, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'NPolymorphicOutDefault'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", out1.op.node_def)\n\n out1, out2, out3 = op_def_library.apply_op(\n \"NPolymorphicOutDefault\", N=3, T=None, name=\"s\")\n self.assertEqual(dtypes.bool, out1.dtype)\n self.assertEqual(dtypes.bool, out2.dtype)\n self.assertEqual(dtypes.bool, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 's' op: 'NPolymorphicOutDefault'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", out1.op.node_def)\n\n out1, out2 = op_def_library.apply_op(\n \"NPolymorphicOutDefault\", N=None, T=dtypes.int32, name=\"t\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'NPolymorphicOutDefault'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", out1.op.node_def)\n\n out1, out2, out3 = op_def_library.apply_op(\n \"NPolymorphicOutDefault\", N=3, T=dtypes.int32, name=\"u\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertEqual(dtypes.int32, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'NPolymorphicOutDefault'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", out1.op.node_def)\n\n def testNPolymorphicRestrictOut(self):\n with ops.Graph().as_default():\n out1, out2, out3 = op_def_library.apply_op(\n \"NPolymorphicRestrictOut\", N=3, T=dtypes.bool, name=\"u\")\n self.assertEqual(dtypes.bool, out1.dtype)\n self.assertEqual(dtypes.bool, out2.dtype)\n self.assertEqual(dtypes.bool, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'NPolymorphicRestrictOut'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", out1.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicRestrictOut\", N=2, T=dtypes.int32)\n self.assertEqual(str(cm.exception),\n \"Value passed to parameter 'T' has DataType int32 \"\n \"not in list of allowed values: string, bool\")\n\n def testRef(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"RefOut\", T=dtypes.bool, name=\"o\")\n self.assertEqual(dtypes.bool_ref, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'RefOut'\n attr { key: 'T' value { type: DT_BOOL } }\n \"\"\", out.op.node_def)\n\n op = op_def_library.apply_op(\"RefIn\", a=out, name=\"i\")\n self.assertProtoEquals(\"\"\"\n name: 'i' op: 'RefIn' input: 'o'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: \"_class\" value { list { s: \"loc:@o\" } } }\n \"\"\", op.node_def)\n\n # Can pass ref to non-ref input.\n out = op_def_library.apply_op(\"RefOut\", T=dtypes.int32, name=\"r\")\n out = op_def_library.apply_op(\"Simple\", a=out, name=\"s\")\n self.assertProtoEquals(\"\"\"\n name: 's' op: 'Simple' input: 'r'\n \"\"\", out.op.node_def)\n\n # Can't pass non-ref to ref input.\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"RefIn\", a=2)\n self.assertEqual(\n str(cm.exception),\n \"'RefIn' Op requires that input 'a' be a mutable tensor \" +\n \"(e.g.: a tf.Variable)\")\n\n input_a = op_def_library.apply_op(\"RefOut\", T=dtypes.int32, name=\"t\")\n input_b = op_def_library.apply_op(\"RefOut\", T=dtypes.int32, name=\"u\")\n op = op_def_library.apply_op(\"TwoRefsIn\", a=input_a, b=input_b, name=\"v\")\n # NOTE(mrry): The order of colocation constraints is an implementation\n # detail.\n self.assertProtoEquals(\"\"\"\n name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: \"_class\" value { list { s: \"loc:@t\" s: \"loc:@u\" } } }\n \"\"\", op.node_def)\n\n def testSpecifyDevice(self):\n graph = ops.Graph()\n with graph.as_default():\n with graph.device(\"/job:ADevice\"):\n op_def_library.apply_op(\"Simple\", a=3)\n # We look at the whole graph here to make sure the Const op is also given\n # the specified device.\n graph_def = graph.as_graph_def()\n self.assertEqual(len(graph_def.node), 2)\n for node in graph_def.node:\n self.assertDeviceEqual(node.device, \"/job:ADevice\")\n\n def testStructuredOutputSingleList(self):\n with ops.Graph().as_default():\n for n_a in [0, 1, 3]:\n a = op_def_library.apply_op(\"SimpleStruct\", n_a=n_a)\n self.assertIsInstance(a, list)\n self.assertEqual(n_a, len(a))\n\n def testStructuredOutputListAndSingle(self):\n with ops.Graph().as_default():\n for n_a in [0, 1, 3]:\n a, b = op_def_library.apply_op(\"MixedStruct\", n_a=n_a)\n self.assertIsInstance(a, list)\n self.assertEqual(n_a, len(a))\n self.assertTrue(all(x.dtype == dtypes.int32 for x in a))\n self.assertIsInstance(b, ops.Tensor)\n self.assertEqual(dtypes.float32, b.dtype)\n\n def testStructuredOutputMultipleLists(self):\n with ops.Graph().as_default():\n for n_a in [0, 1, 3]:\n for n_b in [0, 1, 3]:\n for t_c in [[],\n [dtypes.int32],\n [dtypes.int32, dtypes.float32]]:\n a, b, c = op_def_library.apply_op(\n \"ComplexStruct\", n_a=n_a, n_b=n_b, t_c=t_c)\n\n self.assertEqual(n_a, len(a))\n self.assertTrue(all(x.dtype == dtypes.int32 for x in a))\n self.assertEqual(n_b, len(b))\n self.assertTrue(all(x.dtype == dtypes.int64 for x in b))\n self.assertEqual(t_c, [x.dtype for x in c])\n\n\nclass OpDefLibraryGraphTest(test_util.TensorFlowTestCase):\n\n def testNoGraph(self):\n out = op_def_library.apply_op(\"Simple\", a=3)\n self.assertEqual(out.graph, ops.get_default_graph())\n\n def testDefaultGraph(self):\n graph = ops.Graph()\n with graph.as_default():\n out = op_def_library.apply_op(\"Simple\", a=3)\n self.assertEqual(out.graph, graph)\n\n def testDifferentGraphFails(self):\n with ops.Graph().as_default():\n a = op_def_library.apply_op(\"Simple\", a=3)\n with ops.Graph().as_default():\n b = op_def_library.apply_op(\"Simple\", a=4)\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"Binary\", a=a, b=b)\n self.assertIn(\"must be from the same graph\", str(cm.exception))\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Lint as: python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Multi-process runner for testing purpose.\"\"\"\n\nimport collections\nimport contextlib\nimport json\nimport os\nimport signal\nimport sys\nimport threading\nimport time\nimport unittest\nimport weakref\n\nfrom absl import logging\nimport six\nfrom six.moves import queue as Queue\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.compat import v2_compat\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import multi_process_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.util.tf_export import tf_export\n\nmultiprocessing = multi_process_lib.multiprocessing\n\n# pylint: disable=g-import-not-at-top\ntry:\n # `faulthandler` is not available in py2.\n import faulthandler\nexcept ImportError:\n faulthandler = None\n\n# TODO(b/150264776): Remove after resolving CI issue.\ntry:\n import dill\nexcept ImportError:\n dill = None\n\n# TODO(b/150264776): Remove after resolving CI issue.\ntry:\n import tblib.pickling_support\n # For pickling traceback objects.\n tblib.pickling_support.install()\nexcept ImportError:\n pass\n\n\n# _ProcessStatusInfo contains process status information. When is_successful\n# attribute is True, the subprocess has ended successfully, or if False, the\n# exception stack trace info is stored in exc_info to pass on to parent process\n# to be re-raised.\n_ProcessStatusInfo = collections.namedtuple(\n '_ProcessStatusInfo',\n ['task_type', 'task_id', 'is_successful', 'exc_info', 'return_value'])\n\n# Information returned from a successful MultiProcessRunner run.\nMultiProcessRunnerResult = collections.namedtuple('MultiProcessRunnerResult',\n ['return_value', 'stdout'])\n\n# visible_gpus: If not None, CUDA_VISIBLE_DEVICES is set to visible_gpus.\nTestEnvironment = collections.namedtuple('TestEnvironment', [\n 'task_type', 'task_id', 'cluster_spec', 'rpc_layer', 'grpc_fail_fast',\n 'v2_enabled', 'executing_eagerly', 'visible_gpus'\n])\n\n# Resources for communication between worker processes and the main process.\n#\n# `process_status_queue` is used by `multi_process_runner` internally for\n# communication from subprocesses to the parent process for whether it's been\n# successful, and if not what the error stack trace is.\n# `parent_to_sub_queue` is used for communications from parent to subprocess.\n# Currently this is only used to terminate subprocesses.\n# TODO(rchao): Remove this once subprocess is terminated by SIGKILL.\n# `streaming_pipe_w` is to stream stdout and stderr from subprocesses to parent\n# process.\n# `barrier` is a barrier for the party of all subprocesses.\nResources = collections.namedtuple('Resources', [\n 'process_status_queue', 'parent_to_sub_queue', 'streaming_pipe_w', 'barrier'\n])\n\n# Default time out sec is selected so that it's handled before the default\n# \"medium\" timeout of the test runs.\n_DEFAULT_TIMEOUT_SEC = 200\n\n# The timeout in seconds to wait to force kill a child process. When a child\n# process times out we first try to SIGTERM it so that it has a chance to dump\n# stacktraces. However dumping stacktrace can take a long time.\n_FORCE_KILL_WAIT_SEC = 30\n\n\nclass MultiProcessRunner(object):\n \"\"\"A utility class to start multiple processes to simulate a cluster.\n\n We need to use multiple processes to simulate a cluster in TF 2.0 tests\n because TF 2.0 has some process-global data structures that have to be\n separated by processes. We also need child processes to test out our fault\n tolerance because shutting down a standard TensorFlow server within its\n process is not supported.\n\n Note: the main test program that uses this runner class must run main program\n via `test_main` defined in this file. Using this runner in non-test binaries\n is not supported yet.\n\n This class is not thread-safe. Child processes will inherit TF2 behavior flag.\n \"\"\"\n\n def __init__(self,\n fn,\n cluster_spec,\n rpc_layer=None,\n max_run_time=None,\n grpc_fail_fast=None,\n stream_output=True,\n return_output=False,\n use_dill_for_args=True,\n daemon=False,\n dependence_on_chief=True,\n auto_restart=False,\n share_gpu=True,\n args=None,\n kwargs=None):\n \"\"\"Instantiation of a `MultiProcessRunner`.\n\n Args:\n fn: Function to be run on child processes. This will be run on processes\n for all task types.\n cluster_spec: Dict for cluster spec. The utility function\n `tf.__internal__.distribute.multi_process_runner.create_cluster_spec`\n can be conveniently used to create such dict. The following is an\n example of cluster with three workers and two ps's.\n {\"worker\": [\"worker0.example.com:2222\",\n \"worker1.example.com:2222\",\n \"worker2.example.com:2222\"],\n \"ps\": [\"ps0.example.com:2222\",\n \"ps1.example.com:2222\"]}\n rpc_layer: RPC layer to use. Default value is 'grpc'.\n max_run_time: `None` or integer. If not `None`, child processes are forced\n to exit at approximately this many seconds after this utility is called.\n We achieve this through `signal.alarm()` api. Note that this is best\n effort at Python level since Python signal handler does not get executed\n when it runs lower level C/C++ code. So it can be delayed for\n arbitrarily long time. If any of the child process is still running when\n `max_run_time` is up, they will be force-terminated and an\n `UnexpectedSubprocessExitError` may be raised. If `None`, child\n processes are not forced to exit.\n grpc_fail_fast: Whether GRPC connection between processes should fail\n without retrying. Defaults to None, in which case the environment\n variable is not explicitly set.\n stream_output: True if the output/error from the subprocesses should be\n streamed to be printed in parent process' log. Defaults to True.\n return_output: If True, the output/error from the subprocesses should be\n collected to be attached to the resulting namedtuple returned from\n `join()`. The list of output can be retrieved via `stdout` attribute.\n Defaults to False.\n use_dill_for_args: Whether to use dill to pickle `args` and `kwargs`. dill\n can pickle more objects, but doesn't work with types in\n `multiprocessing` library like `Mutex`.\n daemon: Whether to start processes as daemons.\n dependence_on_chief: Whether to terminates the cluster if the chief exits.\n If auto_restart is True, it only terminates the cluster if the chief\n exits with a zero exit code.\n auto_restart: Whether to automatically restart processes that exit with\n non-zero exit code.\n share_gpu: Whether to share GPUs among workers. If False, each worker is\n assigned different GPUs in a roundrobin fashion. This should be True\n whenever possible for better test execution coverage; some situations\n that need it to be False are tests that runs NCCL.\n args: Positional arguments to be sent to `fn` run on subprocesses.\n kwargs: Keyword arguments to be sent to `fn` run on subprocesses.\n\n Raises:\n RuntimeError: if `multi_process_runner.test_main()` is not called.\n ValueError: if there are more than one chief in the `cluster_spec`.\n SkipTest: if thread sanitizer is enabled (which is incompatible with MPR).\n \"\"\"\n if test_util.is_tsan_enabled():\n raise unittest.SkipTest(\n 'ThreadSanitizer is not compatible with MultiProcessRunner.')\n\n assert cluster_spec is not None\n if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:\n raise ValueError('If chief exists in the cluster, there must be at most '\n 'one chief. Current `cluster_spec` has {} chiefs.'\n .format(len(cluster_spec['chief'])))\n _check_initialization()\n if not callable(fn):\n raise ValueError('fn is not a callable')\n\n self._fn = fn\n self._cluster_spec = cluster_spec\n self._rpc_layer = rpc_layer or 'grpc'\n self._max_run_time = max_run_time\n self._grpc_fail_fast = grpc_fail_fast\n self._stream_output = stream_output\n # TODO(rchao): Revisit return_output argument to consider other solution.\n self._return_output = return_output\n self._dependence_on_chief = dependence_on_chief\n self._use_dill_for_args = use_dill_for_args\n self._daemon = daemon\n self._auto_restart = auto_restart\n self._args = args or ()\n self._kwargs = kwargs or {}\n\n self._share_gpu = share_gpu\n self._total_gpu = len(context.context().list_physical_devices('GPU'))\n\n # Child processes should have the same v2 and eager behavior.\n self._v2_enabled = tf2.enabled()\n self._executing_eagerly = context.executing_eagerly()\n\n self._joined = False\n self._process_lock = threading.Lock()\n # Guarded by self._process_lock.\n self._processes = {}\n # Record which processes are terminated. Due to a bug in Python<3.7,\n # terminated processes return 255 exit code, which should cause an exception\n # in join().\n # https://bugs.python.org/issue30589\n # Guarded by self._process_lock.\n self._terminated = set()\n self._reading_threads = []\n\n self._manager = manager()\n self._process_status_queue = self._manager.Queue()\n self._parent_to_sub_queue = self._manager.Queue()\n parties = sum(len(addresses) for addresses in self._cluster_spec.values())\n self._barrier = self._manager.Barrier(parties)\n\n # We use a queue to collect outputs from worker processes since it's thread\n # safe.\n self._streaming_queue = self._manager.Queue()\n\n self._watchdog_thread = None\n\n def set_args(self, args=None, kwargs=None):\n self._args = args or self._args\n self._kwargs = kwargs or self._kwargs\n\n def _continuously_readline_from_sub(self, pipe_r, task_type, task_id):\n \"\"\"Function to continuously read lines from subprocesses.\"\"\"\n with os.fdopen(pipe_r.fileno(), 'r', closefd=False) as reader:\n for line in reader:\n task_string = '[{}-{}]:'.format(task_type, task_id)\n formatted_line = '{} {}'.format(task_string.ljust(14), line)\n if self._stream_output:\n # TODO(rchao): Use a lock here to ensure the printed lines are not\n # broken.\n print(formatted_line, end='', flush=True)\n if self._return_output:\n self._streaming_queue.put(formatted_line)\n\n def _start_subprocess_and_reading_thread(self,\n task_type,\n task_id,\n cluster_spec=None,\n fn=None,\n args=None,\n kwargs=None):\n \"\"\"Start a subprocess and a thread the reads lines from the subprocess.\"\"\"\n\n if dill is None:\n raise unittest.SkipTest(\n 'TODO(b/150264776): Resolve dependency issue in CI')\n\n cluster_spec = cluster_spec or self._cluster_spec\n visible_gpus = None\n if not self._share_gpu and self._total_gpu > 0:\n # Assign GPUs in a roundrobin fashion.\n id_in_cluster = multi_worker_util.id_in_cluster(cluster_spec, task_type,\n task_id)\n worker_count = multi_worker_util.worker_count(cluster_spec, task_type)\n visible_gpus = list(range(id_in_cluster, self._total_gpu, worker_count))\n\n test_env = TestEnvironment(\n task_type=task_type,\n task_id=task_id,\n cluster_spec=cluster_spec,\n rpc_layer=self._rpc_layer,\n grpc_fail_fast=self._grpc_fail_fast,\n v2_enabled=self._v2_enabled,\n executing_eagerly=self._executing_eagerly,\n visible_gpus=visible_gpus,\n )\n pipe_r, pipe_w = multiprocessing.Pipe(duplex=False)\n resources = Resources(\n process_status_queue=self._process_status_queue,\n parent_to_sub_queue=self._parent_to_sub_queue,\n streaming_pipe_w=pipe_w,\n barrier=self._barrier,\n )\n if fn is None:\n fn, args, kwargs = self._fn, self._args, self._kwargs\n # Always use dill to pickle fn so that we support more callable\n # types, e.g. lambda.\n fn = dill.dumps(fn, dill.HIGHEST_PROTOCOL)\n if self._use_dill_for_args:\n args = dill.dumps(args, dill.HIGHEST_PROTOCOL)\n kwargs = dill.dumps(kwargs, dill.HIGHEST_PROTOCOL)\n\n p = _Process(\n test_env=test_env,\n target=_ProcFunc(),\n args=(resources, test_env, fn, args, kwargs, self._use_dill_for_args),\n daemon=self._daemon)\n p.start()\n self._processes[(task_type, task_id)] = p\n self._terminated.discard((task_type, task_id))\n\n # For each subprocess, we dedicate a thread continuously reading lines\n # from them.\n thread = threading.Thread( # pylint: disable=unexpected-keyword-arg\n target=self._continuously_readline_from_sub,\n args=(pipe_r, task_type, task_id))\n thread.start()\n self._reading_threads.append(thread)\n\n if self._watchdog_thread is None or not self._watchdog_thread.is_alive():\n self._watchdog_thread = threading.Thread(target=self._process_watchdog)\n self._watchdog_thread.start()\n\n def start(self):\n \"\"\"Starts processes, one for each task in `cluster_spec`.\n\n Note that this is best effort by the applicable multiprocessing library,\n and it may take up to seconds for a subprocess to be successfully started.\n \"\"\"\n with self._process_lock:\n if self._processes:\n raise ValueError('MultiProcessRunner already started.')\n if self._joined:\n raise ValueError('cannot start new processes after'\n 'MultiProcessRunner.join() is called')\n\n for task_type, addresses in self._cluster_spec.items():\n for task_id, _ in enumerate(addresses):\n self._start_subprocess_and_reading_thread(task_type, task_id)\n\n # TODO(rchao): Remove the need of using SIGALRM if possible. At this time,\n # without this the tests become very flaky.\n if self._max_run_time is not None:\n\n def handler(signum, frame):\n del signum, frame\n self.terminate_all()\n\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(self._max_run_time)\n\n def start_in_process_as(self, as_task_type, as_task_id):\n \"\"\"Start the processes, with the specified task run in main process.\n\n This is similar to `start()` except that the task with task_type\n `as_task_type` and task_id `as_task_id` is run in the main process.\n This method is particularly useful when debugging tool such as `pdb` is\n needed in some specific task. Note that since this method is blocking until\n that specific task exits, additional actions would need a thread to be\n called:\n\n ```python\n def fn():\n # user code to be run\n import pdb; pdb.set_trace()\n\n def follow_ups():\n time.sleep(5)\n mpr.start_single_process(\n task_type='evaluator',\n task_id=0)\n\n mpr = multi_process_runner.MultiProcessRunner(\n fn,\n multi_worker_test_base.create_cluster_spec(\n has_chief=True, num_workers=1))\n threading.Thread(target=follow_ups).start()\n mpr.start_in_process_as(as_task_type='chief', as_task_id=0)\n mpr.join()\n ```\n\n Note that if `return_output=True`, the logs/stdout by task\n run by the main process is not available in result.stdout.\n\n Args:\n as_task_type: The task type to be run in the main process.\n as_task_id: The task id to be run in the main process.\n \"\"\"\n if self._processes:\n raise ValueError('MultiProcessRunner already started.')\n with self._process_lock:\n if self._joined:\n raise ValueError('cannot start new processes after'\n 'MultiProcessRunner.join() is called')\n for task_type, addresses in self._cluster_spec.items():\n for task_id, _ in enumerate(addresses):\n if not (task_type == as_task_type and task_id == as_task_id):\n self._start_subprocess_and_reading_thread(task_type, task_id)\n\n _set_tf_config(as_task_type, as_task_id, self._cluster_spec,\n self._rpc_layer)\n self._fn(*self._args, **self._kwargs)\n\n def start_single_process(self,\n task_type,\n task_id,\n cluster_spec=None,\n fn=None,\n args=None,\n kwargs=None):\n \"\"\"Starts a single process.\n\n This starts a process in the cluster with the task type, task id, and the\n process function (`fn`). If process function is `None`, the function\n provided at `__init__` will be used. If `cluster_spec` is `None`, the\n cluster spec provided at `__init__` will be used.\n\n TODO(rchao): It is meant that all subprocesses will be updated with the new\n cluster spec, but this has yet to be implemented. At this time only the\n newly started subprocess picks up this updated cluster spec.\n\n Args:\n task_type: The task type.\n task_id: The task id.\n cluster_spec: The cluster spec to be used on the newly started\n process. If `None`, the cluster spec provided at `__init__` will be\n used.\n fn: The process function to be run on the newly started\n process. If specified, specify `args` and `kwargs` as well. If `None`,\n the function provided at `__init__` will be used.\n args: Optional positional arguments to be supplied in `fn`.\n kwargs: Optional keyword arguments to be supplied in `fn`.\n \"\"\"\n with self._process_lock:\n if self._joined:\n raise ValueError('cannot start new processes after'\n 'MultiProcessRunner.join() is called')\n self._start_subprocess_and_reading_thread(\n task_type,\n task_id,\n cluster_spec=cluster_spec,\n fn=fn,\n args=args or (),\n kwargs=kwargs or {})\n\n def _queue_to_list(self, queue_to_convert):\n \"\"\"Convert `queue.Queue` to `list`.\"\"\"\n list_to_return = []\n # Calling `queue.empty()` is not reliable.\n while True:\n try:\n list_to_return.append(queue_to_convert.get(block=False))\n except Queue.Empty:\n break\n return list_to_return\n\n def _get_process_statuses(self):\n # One worker may have multiple statuses. We only keep the last one.\n statuses = {}\n for status in self._queue_to_list(self._process_status_queue):\n statuses[(status.task_type, status.task_id)] = status\n return statuses\n\n def get_process_id(self, task_type, task_id):\n \"\"\"Returns the subprocess id given the task type and task id.\"\"\"\n with self._process_lock:\n p = self._processes.get((task_type, task_id), None)\n return p.pid if p else None\n\n def get_process_exit_code(self, task_type, task_id):\n \"\"\"Returns the subprocess exit code given the task type and task id.\n\n Args:\n task_type: The task type.\n task_id: The task id.\n\n Returns:\n The subprocess exit code; `None` if the subprocess has not exited yet.\n\n Raises:\n KeyError: If the corresponding subprocess is not found with `task_type`\n and `task_id`.\n \"\"\"\n with self._process_lock:\n p = self._processes[(task_type, task_id)]\n return p.exitcode if p else None\n\n def process_exists(self, task_type, task_id):\n \"\"\"Returns whether the subprocess still exists given the task type and id.\n\n Args:\n task_type: The task type.\n task_id: The task id.\n\n Returns:\n Boolean; whether the subprocess still exists. If the subprocess has\n exited, this returns False.\n \"\"\"\n return self.get_process_exit_code(task_type, task_id) is None\n\n def _process_watchdog(self):\n \"\"\"Simulates a cluster management system.\n\n - If auto_restart is True, it restarts processes that exit with a non-zero\n exit code. Note that when join() times out it overrides auto_restart to\n False.\n - If dependence_on_chief is True, it terminates all processes once the chief\n exits. If auto_restart is also True, it only terminates all processes if\n the chief exit with a zero exit code, otherwise it restarts the chief.\n\n This runs in self._watchdog_thread.\n \"\"\"\n while True:\n time.sleep(1)\n with self._process_lock:\n chief = self._processes.get(('chief', 0), None)\n # Terminate the cluster when _dependence_on_chief is True if either:\n # - chief has exited with zero exit code.\n # - chief has exited with non-zero exit code and self._auto_restart is\n # False.\n if chief and self._dependence_on_chief and chief.exitcode is not None:\n if chief.exitcode == 0 or (not self._auto_restart):\n for p in self._processes.values():\n # Give other processes a chance to exit on their own.\n p.join(timeout=3)\n self._terminate_all()\n for p in self._processes.values():\n p.join()\n return\n\n # Auto restart failed processes if self._auto_restart is True.\n if self._auto_restart:\n has_failure = False\n for (task_type, task_id), p in self._processes.items():\n if p.exitcode is not None and p.exitcode != 0:\n has_failure = True\n logging.info('Restarting failed %s-%d', task_type, task_id)\n self._start_subprocess_and_reading_thread(task_type, task_id)\n if has_failure:\n continue\n\n # Exit the thread if all processes have exited at this point.\n if all(p.exitcode is not None for p in self._processes.values()):\n return\n\n def _reraise_if_subprocess_error(self, process_statuses):\n for process_status in process_statuses.values():\n assert isinstance(process_status, _ProcessStatusInfo)\n if not process_status.is_successful:\n process_status.exc_info[1].mpr_result = self._get_mpr_result(\n process_statuses)\n six.reraise(*process_status.exc_info)\n\n def join(self, timeout=_DEFAULT_TIMEOUT_SEC):\n \"\"\"Joins all the processes with timeout.\n\n If any of the subprocesses does not exit approximately after `timeout`\n seconds has passed after `join` call, this raises a\n `SubprocessTimeoutError`.\n\n Note: At timeout, it uses SIGTERM to terminate the subprocesses, in order to\n log the stack traces of the subprocesses when they exit. However, this\n results in timeout when the test runs with tsan (thread sanitizer); if tsan\n is being run on the test targets that rely on timeout to assert information,\n `MultiProcessRunner.terminate_all()` must be called after `join()`, before\n the test exits, so the subprocesses are terminated with SIGKILL, and data\n race is removed.\n\n Args:\n timeout: optional integer or `None`. If provided as an integer, and not\n all processes report status within roughly `timeout` seconds, a\n `SubprocessTimeoutError` exception will be raised. If `None`, `join` never\n times out.\n\n Returns:\n A `MultiProcessRunnerResult` object, which has two attributes,\n `return_value` and `stdout`. `return_value` always contains a list of\n return values from the subprocesses, although the order is not meaningful.\n If `return_output` argument is True at `__init__`, `stdout` is available\n that contains a list of all messages from subprocesses' stdout and stderr.\n\n Raises:\n SubprocessTimeoutError: if not all processes report status approximately\n within `timeout` seconds. When this is raised, a\n `MultiProcessRunnerResult` object can be retrieved by\n `SubprocessTimeoutError`'s mpr_result attribute, which has the same\n structure as above 'Returns' section describes.\n UnexpectedSubprocessExitError: If any of the subprocesses did not exit\n properly (for example, they exit on SIGTERM or SIGKILL signal). When\n this is raised, a `MultiProcessRunnerResult` object can be retrieved by\n `UnexpectedSubprocessExitError`'s mpr_result attribute, which has the\n same structure as above 'Returns' section describes. If `max_run_time`\n is not `None`, it is expected that some subprocesses may be\n force-killed when `max_run_time` is up, and this is raised in those\n cases.\n Exception: if there is an Exception propagated from any subprocess. When\n this is raised, a `MultiProcessRunnerResult` object can be retrieved by\n `UnexpectedSubprocessExitError`'s mpr_result attribute, which has the\n same structure as above 'Returns' section describes.\n \"\"\"\n if timeout and not isinstance(timeout, int):\n raise ValueError('`timeout` must be an integer or `None`.')\n with self._process_lock:\n if self._joined:\n raise ValueError(\"MultiProcessRunner can't be joined twice.\")\n self._joined = True\n\n self._watchdog_thread.join(timeout)\n if self._watchdog_thread.is_alive():\n # Timeout. Force termination to dump worker processes stack trace.\n with self._process_lock:\n self._auto_restart = False\n logging.error('Timeout when joining for child processes. Terminating...')\n self.terminate_all(sig=signal.SIGTERM)\n # Wait for the processes to terminate by themselves first, so they have a\n # chance to dump stacktraces. After _FORCE_KILL_WAIT_SEC, we SIGKILL them.\n self._watchdog_thread.join(_FORCE_KILL_WAIT_SEC)\n if self._watchdog_thread.is_alive():\n logging.error('Timeout when waiting for child processes to '\n 'print stacktrace. Sending SIGKILL...')\n self.terminate_all()\n self._watchdog_thread.join()\n process_statuses = self._get_process_statuses()\n self._reraise_if_subprocess_error(process_statuses)\n raise SubprocessTimeoutError(\n 'One or more subprocesses timed out, where timeout was set to {}s. '\n 'Please change the `timeout` argument for '\n '`MultiProcessRunner.join()` or `multi_process_runner.run()` '\n 'if it should be adjusted.'.format(timeout),\n self._get_mpr_result(process_statuses))\n\n for (task_type, task_id), p in self._processes.items():\n logging.info('%s-%d exit code: %s', task_type, task_id, p.exitcode)\n\n process_statuses = self._get_process_statuses()\n self._reraise_if_subprocess_error(process_statuses)\n\n # Checking all the processes that are expected to exit properly.\n for (task_type, task_id), p in self._processes.items():\n # Successfully exiting process has exit code 0. We ignore processes that\n # are terminated.\n assert p.exitcode is not None\n if (p.exitcode > 0 and (task_type, task_id) not in self._terminated):\n raise UnexpectedSubprocessExitError(\n 'Subprocess %s-%d exited with exit code %s. See logs for details.'\n % (task_type, task_id, p.exitcode),\n self._get_mpr_result(process_statuses))\n\n logging.info('Joining log reading threads.')\n for thread in self._reading_threads:\n thread.join()\n logging.info('Joined log reading threads.')\n\n # Clear the alarm.\n signal.alarm(0)\n\n return self._get_mpr_result(process_statuses)\n\n def _get_mpr_result(self, process_statuses):\n stdout = self._queue_to_list(self._streaming_queue)\n return_values = []\n for process_status in process_statuses.values():\n if process_status.return_value is not None:\n return_values.append(process_status.return_value)\n return MultiProcessRunnerResult(stdout=stdout, return_value=return_values)\n\n def terminate(self, task_type, task_id):\n \"\"\"Terminates the process with `task_type` and `task_id`.\n\n If auto_retart=True, the terminated task will be restarted unless the chief\n has already exited with zero exit code.\n\n Args:\n task_type: the task type.\n task_id: the task id.\n\n \"\"\"\n with self._process_lock:\n p = self._processes.get((task_type, task_id), None)\n if p is None:\n raise ValueError('{}-{} does not exist'.format(task_type, task_id))\n self._terminated.add((task_type, task_id))\n # TODO(crccw): change to use Process.terminate() as well.\n self._parent_to_sub_queue.put('terminate {} {}'.format(\n task_type, task_id))\n p.join()\n\n def _terminate_all(self, sig=None):\n \"\"\"Terminates all subprocesses.\n\n The caller is required to hold self._process_lock.\n\n Args:\n sig: the signal used to terminate the process. The default is SIGKILL.\n \"\"\"\n\n # Use SIGKILL as default. In systems where that's unavailable such as\n # windows, use SIGTERM.\n sig = sig or getattr(signal, 'SIGKILL', signal.SIGTERM)\n for (task_type, task_id), p in self._processes.items():\n if p.exitcode is not None:\n logging.info('%s-%d has already exited. Not terminating.', task_type,\n task_id)\n continue\n try:\n os.kill(p.pid, sig)\n self._terminated.add((task_type, task_id))\n logging.info('%s-%d terminated with signal %r.', task_type, task_id,\n sig)\n except ProcessLookupError:\n logging.info('Attempting to kill %s-%d but it does not exist.',\n task_type, task_id)\n\n def terminate_all(self, sig=None):\n \"\"\"Terminates all subprocesses.\"\"\"\n with self._process_lock:\n self._terminate_all(sig)\n\n\nclass _Process(multi_process_lib.Process):\n \"\"\"A modified `multiprocessing.Process` that can set up environment variables.\"\"\"\n\n # TODO(crccw): consider moving other logics in _ProcFunc to _Process.\n\n def __init__(self, test_env, **kwargs):\n super(_Process, self).__init__(**kwargs)\n self._test_env = test_env\n self._actual_run = getattr(self, 'run')\n self.run = self._run_with_setenv\n\n def _run_with_setenv(self):\n # We need to set environment variables before doing anything because\n # setenv() is not thread-safe.\n test_env = self._test_env\n if test_env.grpc_fail_fast is not None:\n os.environ['GRPC_FAIL_FAST'] = str(test_env.grpc_fail_fast)\n if test_env.visible_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(\n [str(i) for i in test_env.visible_gpus])\n _set_tf_config(test_env.task_type, test_env.task_id, test_env.cluster_spec,\n test_env.rpc_layer)\n return self._actual_run()\n\n\nclass _ProcFunc(object):\n \"\"\"Represents a callable to run in a subprocess.\"\"\"\n\n @contextlib.contextmanager\n def _runtime_mode(self, executing_eagerly):\n if executing_eagerly:\n with context.eager_mode():\n yield\n else:\n with context.graph_mode():\n yield\n\n def _message_checking_func(self, task_type, task_id):\n \"\"\"A function that regularly checks messages from parent process.\"\"\"\n # TODO(rchao): Remove this once parent uses SIGKILL to terminate subprocess.\n while True:\n try:\n message = self._resources.parent_to_sub_queue.get(block=False)\n\n # Currently the only possible message is termination.\n if not message.startswith('terminate'):\n raise ValueError('Unrecognized message: {}'.format(message))\n\n if message == 'terminate {} {}'.format(task_type, task_id):\n break\n else:\n # If the message is not targeting this process, put it back to the\n # queue.\n self._resources.parent_to_sub_queue.put(message)\n time.sleep(1)\n except Queue.Empty:\n time.sleep(0.1)\n self._resources.process_status_queue.put(\n _ProcessStatusInfo(\n task_type=task_type,\n task_id=task_id,\n is_successful=True,\n exc_info=None,\n return_value=None))\n # `os._exit(1)` is used to more reliably terminate a subprocess.\n os._exit(1) # pylint: disable=protected-access\n\n def _close_streaming(self):\n \"\"\"Close stdout, stderr and streaming pipe.\n\n We need to explicitly close them since Tensorflow may take a while to exit,\n so that the reading threads in the main process can exit more quickly.\n \"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout.close()\n sys.stderr.close()\n self._resources.streaming_pipe_w.close()\n\n def __call__(self, resources, test_env, fn, args, kwargs, use_dill_for_args):\n \"\"\"The wrapper function that actually gets run in child process(es).\"\"\"\n\n global _barrier\n\n self._resources = resources\n _barrier = self._resources.barrier\n fn = dill.loads(fn)\n if use_dill_for_args:\n args = dill.loads(args)\n kwargs = dill.loads(kwargs)\n\n if faulthandler is not None:\n faulthandler.enable()\n faulthandler.register(signal.SIGTERM, chain=True)\n\n # All logging should go to stderr to be streamed to the main process.\n logging.set_stderrthreshold(logging.DEBUG)\n\n # Assign sys.stdout and sys.stderr as duplicates of `streaming_pipe_w` so\n # print() and logging.*() write directly to `streaming_pipe_w`.\n # Unfortunately since we cannot prepend task_type and task_id information to\n # the streamed logs we will need a thread per subprocess to distinguish\n # where the piece of message is from.\n os.dup2(resources.streaming_pipe_w.fileno(), sys.stdout.fileno())\n os.dup2(resources.streaming_pipe_w.fileno(), sys.stderr.fileno())\n\n pid = os.getpid()\n logging.info('Subprocess with PID %d (%s, %d) is now being started.', pid,\n test_env.task_type, test_env.task_id)\n logging.info('TF_CONFIG: %r', os.environ['TF_CONFIG'])\n\n # The thread will be dedicated to checking messages from the parent process.\n threading.Thread( # pylint: disable=unexpected-keyword-arg\n target=self._message_checking_func,\n args=(test_env.task_type, test_env.task_id),\n daemon=True).start()\n\n if test_env.v2_enabled:\n v2_compat.enable_v2_behavior()\n\n with self._runtime_mode(test_env.executing_eagerly):\n info = _run_contained(test_env.task_type, test_env.task_id, fn, args,\n kwargs)\n self._resources.process_status_queue.put(info)\n\n # Re-raise the exception in addition to reporting it to the parent\n # process, so that even if `--test_timeout` flag is set and the\n # error doesn't make it to be shown in parent process before bazel's\n # timeout, the log would still show what happens in this subprocess,\n # instead of silently suppressing the error due to early bazel\n # timeout. Raising an error in the subprocess produces stack trace in\n # the log, but the program continues running.\n if not info.is_successful:\n six.reraise(*info.exc_info)\n\n self._close_streaming()\n\n # Exit with code 0 as it's considered successful exit at this point.\n sys.exit(0)\n\n\n# Active MultiProcessPoolRunner. We need to shut them down when the program\n# exits, and this is by setting the `tearDownModule` of the module containing\n# `__main__`. Note this it set in both the parent process and the subprocesses.\n_active_pool_runners = weakref.WeakSet()\n\n\ndef _shutdown_all_pool_runners():\n for pool in _active_pool_runners:\n pool.shutdown()\n\n\ndef is_oss():\n \"\"\"Returns whether the test is run under OSS.\"\"\"\n return len(sys.argv) >= 1 and 'bazel' in sys.argv[0]\n\n\nclass MultiProcessPoolRunner(object):\n \"\"\"A utility class to start a process pool to simulate a cluster.\n\n It's similar to MultiProcessRunner, but uses a pool of processes to avoid the\n expensive initialization cost of Tensorflow.\n \"\"\"\n\n def __init__(self, cluster_spec, initializer=None, share_gpu=True):\n \"\"\"Creates a multi-process pool runner.\n\n Args:\n cluster_spec: Dict for cluster spec. The following is an example of\n cluster with three workers.\n {\"worker\": [\"worker0.example.com:2222\",\n \"worker1.example.com:2222\",\n \"worker2.example.com:2222\"]}\n initializer: a callable to called at the startup of worker processes.\n share_gpu: Whether to share GPUs among workers. If False, each worker is\n assigned different GPUs in a roundrobin fashion.\n\n Raises:\n RuntimeError: if `multi_process_runner.test_main()` is not called.\n ValueError: if there are more than one chief in the `cluster_spec`.\n \"\"\"\n _active_pool_runners.add(self)\n self._cluster_spec = cluster_spec\n self._initializer = initializer\n self._share_gpu = share_gpu\n self._conn = {}\n self._runner = None\n\n def __del__(self):\n self.shutdown()\n\n def shutdown(self):\n \"\"\"Shuts down the worker pool.\"\"\"\n for conn in self._conn.values():\n conn.close()\n self._conn = {}\n if self._runner is not None:\n try:\n self._runner.join()\n except Exception as e: # pylint: disable=broad-except\n logging.error(\n 'Ignoring exception when shutting down MultiProcessPoolRunner: %s',\n e)\n self._runner = None\n\n def _start(self):\n \"\"\"Starts the worker pool.\"\"\"\n # We need different arguments for different processes so we're passing a\n # no-op fn here and use start_single_process instead.\n\n if dill is None:\n raise unittest.SkipTest(\n 'TODO(b/150264776): Resolve dependency issue in CI')\n\n self._runner = MultiProcessRunner(\n fn=lambda: None,\n cluster_spec=self._cluster_spec,\n use_dill_for_args=False,\n share_gpu=self._share_gpu)\n if self._initializer:\n initializer = dill.dumps(self._initializer, dill.HIGHEST_PROTOCOL)\n else:\n initializer = None\n for task_type, addresses in self._cluster_spec.items():\n for task_id, _ in enumerate(addresses):\n conn1, conn2 = multiprocessing.Pipe(duplex=True)\n self._conn[(task_type, task_id)] = conn1\n self._runner.start_single_process(\n task_type,\n task_id,\n fn=_pool_runner_worker,\n args=(task_type, task_id, initializer, conn2))\n\n def run(self, fn, args=None, kwargs=None):\n \"\"\"Runs `fn` with `args` and `kwargs` on all jobs.\n\n Args:\n fn: The function to be run.\n args: Optional positional arguments to be supplied in `fn`.\n kwargs: Optional keyword arguments to be supplied in `fn`.\n\n Returns:\n A list of return values.\n \"\"\"\n _check_initialization()\n # TODO(b/150264776): skip in OSS until it's implemented.\n multi_process_lib.Process()\n if self._runner is None:\n self._start()\n\n fn = dill.dumps(fn, dill.HIGHEST_PROTOCOL)\n for conn in self._conn.values():\n conn.send((fn, args or [], kwargs or {}))\n\n process_statuses = []\n for (task_type, task_id), conn in self._conn.items():\n logging.info('Waiting for the result from %s-%d', task_type, task_id)\n try:\n process_statuses.append(conn.recv())\n except EOFError:\n # This shouldn't happen due to exceptions in fn. This usually\n # means bugs in the runner.\n self.shutdown()\n raise RuntimeError('Unexpected EOF. Worker process may have died. '\n 'Please report a bug')\n\n return_values = []\n for process_status in process_statuses:\n assert isinstance(process_status, _ProcessStatusInfo)\n if not process_status.is_successful:\n six.reraise(*process_status.exc_info)\n if process_status.return_value is not None:\n return_values.append(process_status.return_value)\n\n return return_values\n\n\ndef _pool_runner_worker(task_type, task_id, initializer, conn):\n \"\"\"Function that runs on the workers in a pool.\n\n It listens for callables to run and returns the result until `conn` is closed.\n It captures the exceptions during executing the callable and return it through\n `conn`.\n\n Args:\n task_type: the task type.\n task_id: the task index.\n initializer: a callable to execute during startup.\n conn: a multiprocessing.Connection object to listen for tasks and send\n results.\n \"\"\"\n if initializer:\n initializer = dill.loads(initializer)\n initializer()\n while True:\n try:\n fn, args, kwargs = conn.recv()\n except EOFError:\n break\n fn = dill.loads(fn)\n info = _run_contained(task_type, task_id, fn, args, kwargs)\n sys.stdout.flush()\n sys.stderr.flush()\n conn.send(info)\n\n\ndef _run_contained(task_type, task_id, fn, args, kwargs):\n \"\"\"Runs `fn` with `args` and `kwargs`.\n\n The function returns _ProcessStatusInfo which captures the return value and\n the exception.\n\n Args:\n task_type: the task type.\n task_id: the task index.\n fn: the function to be run.\n args: optional positional arguments to be supplied in `fn`.\n kwargs: optional keyword arguments to be supplied in `fn`.\n\n Returns:\n a _ProcessStatusInfo.\n\n \"\"\"\n is_successful = False\n return_value = None\n exc_info = None\n try:\n return_value = fn(*args, **kwargs)\n is_successful = True\n return _ProcessStatusInfo(\n task_type=task_type,\n task_id=task_id,\n is_successful=is_successful,\n exc_info=exc_info,\n return_value=return_value)\n\n # If `fn` ends up exiting with `sys.exit()`, the `SystemExit` is not\n # handled here.\n except Exception: # pylint: disable=broad-except\n exc_info = sys.exc_info()\n return _ProcessStatusInfo(\n task_type=task_type,\n task_id=task_id,\n is_successful=is_successful,\n exc_info=exc_info,\n return_value=return_value)\n\n\n@tf_export('__internal__.distribute.multi_process_runner'\n '.SubprocessTimeoutError',\n v1=[])\nclass SubprocessTimeoutError(RuntimeError):\n \"\"\"An error that indicates there is at least one subprocess timing out.\n\n When this is raised, a namedtuple object representing the multi-process run\n result can be retrieved by\n `tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError`'s\n `mpr_result` attribute. See\n `tf.__internal__.distribute.multi_process_runner.run` for more information.\n \"\"\"\n\n def __init__(self, msg, mpr_result):\n super(SubprocessTimeoutError, self).__init__(msg)\n self.mpr_result = mpr_result\n\n\n@tf_export('__internal__.distribute.multi_process_runner'\n '.UnexpectedSubprocessExitError',\n v1=[])\nclass UnexpectedSubprocessExitError(RuntimeError):\n \"\"\"An error indicating there is at least one subprocess with unexpected exit.\n\n When this is raised, a namedtuple object representing the multi-process run\n result can be retrieved by\n `tf.__internal__.distribute.multi_process_runner\n .UnexpectedSubprocessExitError`'s\n `mpr_result` attribute. See\n `tf.__internal__.distribute.multi_process_runner.run` for more information.\n \"\"\"\n\n def __init__(self, msg, mpr_result):\n super(UnexpectedSubprocessExitError, self).__init__(msg)\n self.mpr_result = mpr_result\n\n\n@tf_export(\n '__internal__.distribute.multi_process_runner.NotInitializedError', v1=[])\nclass NotInitializedError(RuntimeError):\n \"\"\"An error indicating `multi_process_runner.run` is used without init.\n\n When this is raised, user is supposed to call\n `tf.__internal__.distribute.multi_process_runner.test_main()` within\n `if __name__ == '__main__':` block to properly initialize\n `multi_process_runner.run`.\n \"\"\"\n pass\n\n\ndef _check_initialization():\n if not multi_process_lib.initialized():\n raise NotInitializedError(\n '`multi_process_runner` is not initialized. '\n 'Please call `tf.__internal__.distribute.multi_process_runner.'\n 'test_main()` within `if __name__ == \\'__main__\\':` block '\n 'in your python module to properly initialize '\n '`multi_process_runner`.')\n\n\ndef _set_tf_config(task_type, task_id, cluster_spec, rpc_layer=None):\n \"\"\"Set TF_CONFIG environment variable.\"\"\"\n tf_config_dict = {\n 'cluster': cluster_spec,\n 'task': {\n 'type': task_type,\n 'index': task_id,\n },\n }\n if rpc_layer is not None:\n tf_config_dict['rpc_layer'] = rpc_layer\n os.environ['TF_CONFIG'] = json.dumps(tf_config_dict)\n\n\n@tf_export('__internal__.distribute.multi_process_runner.run', v1=[])\ndef run(fn,\n cluster_spec,\n rpc_layer=None,\n max_run_time=None,\n return_output=False,\n timeout=_DEFAULT_TIMEOUT_SEC,\n args=None,\n kwargs=None):\n \"\"\"Run `fn` in multiple processes according to `cluster_spec`.\n\n Given a callable `fn`, `tf.__internal__.distribute.multi_process_runner.run`\n launches multiple processes, each of which runs `fn`. These processes are\n referred to as \"subprocesses\" or \"child processes\". Each of those subprocesses\n will have their `TF_CONFIG` environment variable set, according to\n `cluster_spec` and their task types. The stdout of the subprocesses are\n streamed to the main process' and thus available in logs (if `stream_output`\n is True), with [type-id] prefix.\n\n `tf.__internal__.distribute.multi_process_runner.run` will block until all\n subprocesses have successfully exited, and return a namedtuple object that\n represents the run result. This object has a `return_value` attribute, which\n is a list that contains subprocesses `fn`'s return values, for those\n subprocesses that successfully returned from `fn`. The order of `return_value`\n list is not meaningful. If an optional arg `return_output` (default to False)\n is set to True, the namedtuple object will have an additional attribute\n `stdout`, which is a list containing the stdout of the subprocesses. If any\n subprocess' `fn` ends up raising an error, that error will be reraised from\n `tf.__internal__.distribute.multi_process_runner.run`, and the aforementioned\n namedtuple object will be available through the exception's\n `mpr_result` attribute.\n\n This utility is used for simulating running TensorFlow programs across\n multiple task types, and each of the task type may contain more than one task\n (except for \"chief\" where more than one task is prohibited). Test coverage of\n multi-worker training is the main application of this utility, where code\n written for multi-worker training can be realistically covered in unit tests.\n\n Any test module that uses\n `tf.__internal__.distribute.multi_process_runner.run()` must call\n `tf.__internal__.distribute.multi_process_runner.test_main()` instead of\n regular `test.main()` inside `if __name__ == '__main__':` block for proper\n initialization.\n\n Args:\n fn: Function to be run on child processes. This will be run on processes for\n all task types.\n cluster_spec: Dict for cluster spec. The utility function\n `tf.__internal__.distribute.multi_process_runner.create_cluster_spec` can\n be conveniently used to create such dict. The following is an example of\n cluster with three workers and two ps's.\n {\"worker\": [\"worker0.example.com:2222\",\n \"worker1.example.com:2222\",\n \"worker2.example.com:2222\"],\n \"ps\": [\"ps0.example.com:2222\",\n \"ps1.example.com:2222\"]}\n rpc_layer: RPC layer to use. Default value is 'grpc'.\n max_run_time: `None` or integer. If not `None`, child processes are forced\n to exit at approximately this many seconds after this utility is called.\n We achieve this through `signal.alarm()` api. Note that this is best\n effort at Python level since Python signal handler does not get executed\n when it runs lower level C/C++ code. So it can be delayed for arbitrarily\n long time. If any of the child process is still running when\n `max_run_time` is up, they will be force-terminated and an\n `tf.__internal__.distribute.multi_process_runner\n .UnexpectedSubprocessExitError`\n may be raised. If `None`, child processes are not forced to exit.\n return_output: If True, the output/error from the subprocesses should be\n collected to be attached to the resulting namedtuple returned from this\n utility. The list of output can be retrieved via `stdout` attribute.\n Defaults to False.\n timeout: optional integer or `None`. If provided as an integer, and not all\n processes report status within roughly `timeout` seconds, a\n `tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError`\n exception will be raised. If `None`,\n `tf.__internal__.distribute.multi_process_runner.run` never times out.\n Defaults to the constant `_DEFAULT_TIMEOUT_SEC` defined in\n `multi_process_runner` module.\n args: Positional arguments to be sent to `fn` run on subprocesses.\n kwargs: Keyword arguments to be sent to `fn` run on subprocesses.\n\n Returns:\n A namedtuple object, which has two attributes,\n `return_value` and `stdout`. `return_value` always contains a list of\n returnvalues from the subprocesses, although the order is not meaningful.\n If `return_output` argument is True, `stdout` is available that contains a\n list of all messages from subprocesses' stdout and stderr, and the order\n is mostly chronological.\n\n Raises:\n RuntimeError: if\n `tf.__internal__.distribute.multi_process_runner.test_main()` is\n not called in test's `if __name__ == '__main__':` block.\n ValueError: if there are more than one chief in the `cluster_spec`.\n tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError: if\n not all processes report status approximately\n within `timeout` seconds. When this is raised, a\n namedtuple object can be retrieved by\n `tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError`'s\n `mpr_result` attribute, which has the same\n structure as above 'Returns' section describes.\n tf.__internal__.distribute.multi_process_runner\n .UnexpectedSubprocessExitError:\n If any of the subprocesses did not exit\n properly (for example, they exit on SIGTERM or SIGKILL signal). When\n this is raised, a namedtuple object can be retrieved by\n `tf.__internal__.distribute.multi_process_runner\n .UnexpectedSubprocessExitError`'s\n `mpr_result` attribute, which has the\n same structure as above 'Returns' section describes. If `max_run_time`\n is not `None`, it is expected that some subprocesses may be\n force-killed when `max_run_time` is up, and this is raised in those\n cases.\n Exception: if there is an Exception propagated from any subprocess. When\n this is raised, a namedtuple object can be retrieved by\n `tf.__internal__.distribute.multi_process_runner\n .UnexpectedSubprocessExitError`\n `mpr_result` attribute, which has the\n same structure as above 'Returns' section describes.\n\n Examples:\n\n ```python\n class SimpleMultiProcessTest(tf.test.TestCase):\n\n def test_simple_printing_and_return(self):\n\n def fn():\n resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()\n\n # This will print \"[chief-0]: Task type: chief , task id: 0\"\n # for chief, for example.\n logging.info('Task type: %s, task id: %d',\n resolver.task_type, resolver.task_id)\n\n return resolver.task_type\n\n result = tf.__internal__.distribute.multi_process_runner.run(\n fn=fn,\n cluster_spec=(\n tf.__internal__\n .distribute.multi_process_runner.create_cluster_spec(\n has_chief=True, num_workers=2)))\n assert sorted(result.return_value) == ['chief', 'worker', 'worker']\n\n def test_error_from_fn(self):\n\n def fn():\n resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()\n raise ValueError('Task type {}, task id {} is errors out'.format(\n resolver.task_type, resolver.task_id))\n\n with self.assertRaisesRegexp(ValueError,\n 'Task type worker, task id 0 is errors out'):\n cluster_spec = (\n tf.__internal__.distribute.multi_process_runner.create_cluster_spec(\n num_workers=1))\n tf.__internal__.distribute.multi_process_runner.run(\n fn=fn, cluster_spec=cluster_spec)\n\n\n if __name__ == '__main__':\n tf.__internal__.distribute.multi_process_runner.test_main()\n ```\n \"\"\"\n runner = MultiProcessRunner(\n fn,\n cluster_spec,\n rpc_layer,\n max_run_time=max_run_time,\n return_output=return_output,\n args=args,\n kwargs=kwargs)\n runner.start()\n return runner.join(timeout)\n\n\n# This is set by MultiProcessRunner in worker processes.\n_barrier = None\n\n\n@tf_export('__internal__.distribute.multi_process_runner.get_barrier', v1=[])\ndef get_barrier():\n \"\"\"Returns a `multiprocessing.Barrier` for `multi_process_runner.run`.\n\n `tf.__internal__.distribute.multi_process_runner.get_barrier()` returns\n a `multiprocessing.Barrier` object which can be used within `fn` of\n `tf.__internal__.distribute.multi_process_runner` to wait with\n `barrier.wait()` call until all other tasks have also reached the\n `barrier.wait()` call, before they can proceed individually.\n\n Note that all tasks (subprocesses) have to reach `barrier.wait()` call to\n proceed. Currently it is not supported to block on only a subset of tasks\n in the cluster.\n\n Example:\n ```python\n\n def fn():\n some_work_to_be_done_by_all_tasks()\n\n tf.__internal__.distribute.multi_process_runner.get_barrier().wait()\n\n # The barrier guarantees that at this point, all tasks have finished\n # `some_work_to_be_done_by_all_tasks()`\n some_other_work_to_be_done_by_all_tasks()\n\n result = tf.__internal__.distribute.multi_process_runner.run(\n fn=fn,\n cluster_spec=(\n tf.__internal__\n .distribute.multi_process_runner.create_cluster_spec(\n num_workers=2)))\n ```\n\n\n Returns:\n A `multiprocessing.Barrier` for `multi_process_runner.run`.\n \"\"\"\n if _barrier is None:\n raise ValueError(\n 'barrier is not defined. It is likely because you are calling '\n 'get_barrier() in the main process. get_barrier() can only be called '\n 'in the subprocesses.'\n )\n return _barrier\n\n\n_manager = None\n_manager_lock = threading.Lock()\n\n\ndef manager():\n \"\"\"Returns the multiprocessing manager object for concurrency tools.\n\n The manager object is useful as it controls a server process that holds\n the python objects that can be shared across processes. This can be used\n for parent-subprocess communication:\n\n ```python\n manager = multi_process_runner.manager()\n some_event_happening_in_subprocess = manager.Event()\n mpr = multi_process_runner.MultiProcessRunner(fn, cluster_spec,\n args=(some_event_happening_in_subprocess,))\n mpr.start()\n some_event_happening_in_subprocess.wait()\n # Do something that only should after some event happens in subprocess.\n ```\n\n Note that the user of multi_process_runner should not create additional\n `multiprocessing.Manager()` objects; doing so can result in segfault in\n some cases.\n\n This method should only be called after multi_process_runner.test_main() is\n called.\n \"\"\"\n _check_initialization()\n global _manager\n with _manager_lock:\n if _manager is None:\n _manager = multiprocessing.Manager()\n return _manager\n\n\n@tf_export('__internal__.distribute.multi_process_runner.test_main', v1=[])\ndef test_main():\n \"\"\"Main function to be called within `__main__` of a test file.\n\n Any test module that uses\n `tf.__internal__.distribute.multi_process_runner.run()`\n must call this instead of regular `test.main()` inside\n `if __name__ == '__main__':` block, or an error will be raised when\n `tf.__internal__.distribute.multi_process_runner.run()` is used. This method\n takes\n care of needed initialization for launching multiple subprocesses.\n\n Example:\n ```python\n class MyTestClass(tf.test.TestCase):\n def testSomething(self):\n # Testing code making use of\n # `tf.__internal__.distribute.multi_process_runner.run()`.\n\n if __name__ == '__main__':\n tf.__internal__.distribute.multi_process_runner.test_main()\n ```\n \"\"\"\n # Inject tearDownModule() to shut down all pool runners. Active pool runners\n # will block the program from exiting. This is necessary for global pool\n # runners. We tried atexit in the past, and it doesn't work in some\n # deployment.\n old_tear_down_module = getattr(sys.modules['__main__'], 'tearDownModule',\n None)\n\n def tear_down_module():\n _shutdown_all_pool_runners()\n if old_tear_down_module is not None:\n old_tear_down_module()\n\n setattr(sys.modules['__main__'], 'tearDownModule', tear_down_module)\n multi_process_lib.test_main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converter for slice operations.\"\"\"\n\nimport gast\n\nfrom tensorflow.python.autograph.core import converter\nfrom tensorflow.python.autograph.lang import directives\nfrom tensorflow.python.autograph.pyct import templates\n\n\nclass SliceTransformer(converter.Base):\n \"\"\"Converts slicing operations to their TF counterpart.\n\n Currently, relying on the default slice operator that Tensor uses is\n insufficient, because TensorArray and tensor lists use dedicated index read\n and write functions.\n \"\"\"\n\n def _process_single_assignment(self, target, value):\n if not isinstance(target, gast.Subscript):\n return None\n s = target.slice\n if isinstance(s, (gast.Tuple, gast.Slice)):\n return None\n\n template = \"\"\"\n target = ag__.set_item(target, key, item)\n \"\"\"\n return templates.replace(\n template, target=target.value, key=target.slice, item=value)\n\n def visit_Assign(self, node):\n node = self.generic_visit(node)\n # TODO(mdan): Support unpackings and multiple assignments.\n if len(node.targets) != 1:\n raise NotImplementedError('multiple assignment')\n replacement = self._process_single_assignment(node.targets[0], node.value)\n if replacement is not None:\n return replacement\n return node\n\n def visit_Subscript(self, node):\n node = self.generic_visit(node)\n s = node.slice\n if isinstance(s, (gast.Tuple, gast.Slice)):\n return node\n\n if not isinstance(node.ctx, gast.Load):\n # Index writes are handled at a higher level, one at which the rvalue is\n # also available.\n return node\n\n dtype = self.get_definition_directive(\n node.value,\n directives.set_element_type,\n 'dtype',\n default=templates.replace_as_expression('None'))\n\n template = \"\"\"\n ag__.get_item(\n target,\n key,\n opts=ag__.GetItemOpts(element_dtype=dtype))\n \"\"\"\n return templates.replace_as_expression(\n template, target=node.value, key=s, dtype=dtype)\n\n\ndef transform(node, ctx):\n return SliceTransformer(ctx).visit(node)\n",
"# ==============================================================================\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Upgrade script to move from pre-release schema to new schema.\n\nUsage examples:\n\nbazel run tensorflow/lite/schema/upgrade_schema -- in.json out.json\nbazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.bin\nbazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.json\nbazel run tensorflow/lite/schema/upgrade_schema -- in.json out.bin\nbazel run tensorflow/lite/schema/upgrade_schema -- in.tflite out.tflite\n\"\"\"\nimport argparse\nimport contextlib\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import resource_loader\n\nparser = argparse.ArgumentParser(\n description=\"Script to move TFLite models from pre-release schema to \"\n \"new schema.\")\nparser.add_argument(\n \"input\",\n type=str,\n help=\"Input TensorFlow lite file in `.json`, `.bin` or `.tflite` format.\")\nparser.add_argument(\n \"output\",\n type=str,\n help=\"Output json or bin TensorFlow lite model compliant with \"\n \"the new schema. Extension must be `.json`, `.bin` or `.tflite`.\")\n\n\n# RAII Temporary Directory, because flatc doesn't allow direct use of tempfiles.\[email protected]\ndef TemporaryDirectoryResource():\n temporary = tempfile.mkdtemp()\n try:\n yield temporary\n finally:\n shutil.rmtree(temporary)\n\n\nclass Converter(object):\n \"\"\"Converts TensorFlow flatbuffer models from old to new version of schema.\n\n This can convert between any version to the latest version. It uses\n an incremental upgrade strategy to go from version to version.\n\n Usage:\n converter = Converter()\n converter.Convert(\"a.tflite\", \"a.json\")\n converter.Convert(\"b.json\", \"b.tflite\")\n \"\"\"\n\n def __init__(self):\n # TODO(aselle): make this work in the open source version with better\n # path.\n paths_to_try = [\n \"../../../../flatbuffers/flatc\", # not bazel\n \"../../../../external/flatbuffers/flatc\" # bazel\n ]\n for p in paths_to_try:\n self._flatc_path = resource_loader.get_path_to_datafile(p)\n if os.path.exists(self._flatc_path): break\n\n def FindSchema(base_name):\n return resource_loader.get_path_to_datafile(\"%s\" % base_name)\n\n # Supported schemas for upgrade.\n self._schemas = [\n (0, FindSchema(\"schema_v0.fbs\"), True, self._Upgrade0To1),\n (1, FindSchema(\"schema_v1.fbs\"), True, self._Upgrade1To2),\n (2, FindSchema(\"schema_v2.fbs\"), True, self._Upgrade2To3),\n (3, FindSchema(\"schema_v3.fbs\"), False, None) # Non-callable by design.\n ]\n # Ensure schemas are sorted, and extract latest version and upgrade\n # dispatch function table.\n self._schemas.sort()\n self._new_version, self._new_schema = self._schemas[-1][:2]\n self._upgrade_dispatch = {\n version: dispatch\n for version, unused1, unused2, dispatch in self._schemas}\n\n def _Read(self, input_file, schema, raw_binary=False):\n \"\"\"Read a tflite model assuming the given flatbuffer schema.\n\n If `input_file` is in bin, then we must use flatc to convert the schema\n from binary to json.\n\n Args:\n input_file: a binary (flatbuffer) or json file to read from. Extension\n must be `.tflite`, `.bin`, or `.json` for FlatBuffer Binary or\n FlatBuffer JSON.\n schema: which schema to use for reading\n raw_binary: whether to assume raw_binary (versions previous to v3)\n that lacked file_identifier require this.\n\n Raises:\n RuntimeError: 1. When flatc cannot be invoked.\n 2. When json file does not exists.\n ValueError: When the extension is not json or bin.\n\n Returns:\n A dictionary representing the read tflite model.\n \"\"\"\n raw_binary = [\"--raw-binary\"] if raw_binary else []\n with TemporaryDirectoryResource() as tempdir:\n basename = os.path.basename(input_file)\n basename_no_extension, extension = os.path.splitext(basename)\n if extension in [\".bin\", \".tflite\"]:\n # Convert to json using flatc\n returncode = subprocess.call([\n self._flatc_path,\n \"-t\",\n \"--strict-json\",\n \"--defaults-json\",\n ] + raw_binary + [\"-o\", tempdir, schema, \"--\", input_file])\n if returncode != 0:\n raise RuntimeError(\"flatc failed to convert from binary to json.\")\n json_file = os.path.join(tempdir, basename_no_extension + \".json\")\n if not os.path.exists(json_file):\n raise RuntimeError(\"Could not find %r\" % json_file)\n elif extension == \".json\":\n json_file = input_file\n else:\n raise ValueError(\"Invalid extension on input file %r\" % input_file)\n return json.load(open(json_file))\n\n def _Write(self, data, output_file):\n \"\"\"Output a json or bin version of the flatbuffer model.\n\n Args:\n data: Dict representing the TensorFlow Lite model to write.\n output_file: filename to write the converted flatbuffer to. (json,\n tflite, or bin extension is required).\n Raises:\n ValueError: When the extension is not json or bin\n RuntimeError: When flatc fails to convert json data to binary.\n \"\"\"\n _, extension = os.path.splitext(output_file)\n with TemporaryDirectoryResource() as tempdir:\n if extension == \".json\":\n json.dump(data, open(output_file, \"w\"), sort_keys=True, indent=2)\n elif extension in [\".tflite\", \".bin\"]:\n input_json = os.path.join(tempdir, \"temp.json\")\n with open(input_json, \"w\") as fp:\n json.dump(data, fp, sort_keys=True, indent=2)\n returncode = subprocess.call([\n self._flatc_path, \"-b\", \"--defaults-json\", \"--strict-json\", \"-o\",\n tempdir, self._new_schema, input_json\n ])\n if returncode != 0:\n raise RuntimeError(\"flatc failed to convert upgraded json to binary.\")\n\n shutil.copy(os.path.join(tempdir, \"temp.tflite\"), output_file)\n else:\n raise ValueError(\"Invalid extension on output file %r\" % output_file)\n\n def _Upgrade0To1(self, data):\n \"\"\"Upgrade data from Version 0 to Version 1.\n\n Changes: Added subgraphs (which contains a subset of formally global\n entries).\n\n Args:\n data: Dictionary representing the TensorFlow lite data to be upgraded.\n This will be modified in-place to be an upgraded version.\n \"\"\"\n subgraph = {}\n for key_to_promote in [\"tensors\", \"operators\", \"inputs\", \"outputs\"]:\n subgraph[key_to_promote] = data[key_to_promote]\n del data[key_to_promote]\n data[\"subgraphs\"] = [subgraph]\n\n def _Upgrade1To2(self, data):\n \"\"\"Upgrade data from Version 1 to Version 2.\n\n Changes: Rename operators to Conform to NN API.\n\n Args:\n data: Dictionary representing the TensorFlow lite data to be upgraded.\n This will be modified in-place to be an upgraded version.\n Raises:\n ValueError: Throws when model builtins are numeric rather than symbols.\n \"\"\"\n\n def RemapOperator(opcode_name):\n \"\"\"Go from old schema op name to new schema op name.\n\n Args:\n opcode_name: String representing the ops (see :schema.fbs).\n Returns:\n Converted opcode_name from V1 to V2.\n \"\"\"\n old_name_to_new_name = {\n \"CONVOLUTION\": \"CONV_2D\",\n \"DEPTHWISE_CONVOLUTION\": \"DEPTHWISE_CONV_2D\",\n \"AVERAGE_POOL\": \"AVERAGE_POOL_2D\",\n \"MAX_POOL\": \"MAX_POOL_2D\",\n \"L2_POOL\": \"L2_POOL_2D\",\n \"SIGMOID\": \"LOGISTIC\",\n \"L2NORM\": \"L2_NORMALIZATION\",\n \"LOCAL_RESPONSE_NORM\": \"LOCAL_RESPONSE_NORMALIZATION\",\n \"Basic_RNN\": \"RNN\",\n }\n\n return (old_name_to_new_name[opcode_name]\n if opcode_name in old_name_to_new_name else opcode_name)\n\n def RemapOperatorType(operator_type):\n \"\"\"Remap operator structs from old names to new names.\n\n Args:\n operator_type: String representing the builtin operator data type\n string.\n (see :schema.fbs).\n Raises:\n ValueError: When the model has consistency problems.\n Returns:\n Upgraded builtin operator data type as a string.\n \"\"\"\n old_to_new = {\n \"PoolOptions\": \"Pool2DOptions\",\n \"DepthwiseConvolutionOptions\": \"DepthwiseConv2DOptions\",\n \"ConvolutionOptions\": \"Conv2DOptions\",\n \"LocalResponseNormOptions\": \"LocalResponseNormalizationOptions\",\n \"BasicRNNOptions\": \"RNNOptions\",\n }\n return (old_to_new[operator_type]\n if operator_type in old_to_new else operator_type)\n\n for subgraph in data[\"subgraphs\"]:\n for ops in subgraph[\"operators\"]:\n ops[\"builtin_options_type\"] = RemapOperatorType(\n ops[\"builtin_options_type\"])\n\n # Upgrade the operator codes\n for operator_code in data[\"operator_codes\"]:\n # Check if builtin_code is the appropriate string type\n # use type(\"\") instead of str or unicode. for py2and3\n if not isinstance(operator_code[\"builtin_code\"], type(u\"\")):\n raise ValueError(\"builtin_code %r is non-string. this usually means \"\n \"your model has consistency problems.\" %\n (operator_code[\"builtin_code\"]))\n operator_code[\"builtin_code\"] = (RemapOperator(\n operator_code[\"builtin_code\"]))\n\n def _Upgrade2To3(self, data):\n \"\"\"Upgrade data from Version 2 to Version 3.\n\n Changed actual read-only tensor data to be in a buffers table instead\n of inline with the tensor.\n\n Args:\n data: Dictionary representing the TensorFlow lite data to be upgraded.\n This will be modified in-place to be an upgraded version.\n \"\"\"\n buffers = [{\"data\": []}] # Start with 1 empty buffer\n for subgraph in data[\"subgraphs\"]:\n if \"tensors\" not in subgraph:\n continue\n for tensor in subgraph[\"tensors\"]:\n if \"data_buffer\" not in tensor:\n tensor[\"buffer\"] = 0\n else:\n if tensor[\"data_buffer\"]:\n tensor[u\"buffer\"] = len(buffers)\n buffers.append({\"data\": tensor[\"data_buffer\"]})\n else:\n tensor[\"buffer\"] = 0\n del tensor[\"data_buffer\"]\n data[\"buffers\"] = buffers\n\n def _PerformUpgrade(self, data):\n \"\"\"Manipulate the `data` (parsed JSON) based on changes in format.\n\n This incrementally will upgrade from version to version within data.\n\n Args:\n data: Dictionary representing the TensorFlow data. This will be upgraded\n in place.\n \"\"\"\n while data[\"version\"] < self._new_version:\n self._upgrade_dispatch[data[\"version\"]](data)\n data[\"version\"] += 1\n\n def Convert(self, input_file, output_file):\n \"\"\"Perform schema conversion from input_file to output_file.\n\n Args:\n input_file: Filename of TensorFlow Lite data to convert from. Must\n be `.json` or `.bin` extension files for JSON or Binary forms of\n the TensorFlow FlatBuffer schema.\n output_file: Filename to write to. Extension also must be `.json`\n or `.bin`.\n\n Raises:\n RuntimeError: Generated when none of the upgrader supported schemas\n matche the `input_file` data.\n \"\"\"\n # Read data in each schema (since they are incompatible). Version is\n # always present. Use the read data that matches the version of the\n # schema.\n for version, schema, raw_binary, _ in self._schemas:\n try:\n data_candidate = self._Read(input_file, schema, raw_binary)\n except RuntimeError:\n continue # Skip and hope another schema works\n if \"version\" not in data_candidate: # Assume version 1 if not present.\n data_candidate[\"version\"] = 1\n elif data_candidate[\"version\"] == 0: # Version 0 doesn't exist in wild.\n data_candidate[\"version\"] = 1\n\n if data_candidate[\"version\"] == version:\n self._PerformUpgrade(data_candidate)\n self._Write(data_candidate, output_file)\n return\n raise RuntimeError(\"No schema that the converter understands worked with \"\n \"the data file you provided.\")\n\n\ndef main(argv):\n del argv\n Converter().Convert(FLAGS.input, FLAGS.output)\n\n\nif __name__ == \"__main__\":\n FLAGS, unparsed = parser.parse_known_args()\n tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for embedding_lookup.\"\"\"\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_embedding_lookup_tests(options):\n \"\"\"Make a set of tests to do gather.\"\"\"\n\n test_parameters = [\n {\n \"params_dtype\": [tf.float32],\n \"params_shape\": [[10], [10, 10]],\n \"ids_dtype\": [tf.int32],\n \"ids_shape\": [[3], [5]],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build the gather op testing graph.\"\"\"\n params = tf.compat.v1.placeholder(\n dtype=parameters[\"params_dtype\"],\n name=\"params\",\n shape=parameters[\"params_shape\"])\n ids = tf.compat.v1.placeholder(\n dtype=parameters[\"ids_dtype\"],\n name=\"ids\",\n shape=parameters[\"ids_shape\"])\n out = tf.nn.embedding_lookup(params, ids)\n return [params, ids], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n params = create_tensor_data(parameters[\"params_dtype\"],\n parameters[\"params_shape\"])\n ids = create_tensor_data(parameters[\"ids_dtype\"], parameters[\"ids_shape\"],\n 0, parameters[\"params_shape\"][0] - 1)\n return [params, ids], sess.run(\n outputs, feed_dict=dict(zip(inputs, [params, ids])))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Unit tests for source_remote.\"\"\"\n\nimport os\nimport traceback\n\nimport grpc\n\nfrom tensorflow.core.debug import debug_service_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug.lib import grpc_debug_test_server\nfrom tensorflow.python.debug.lib import source_remote\nfrom tensorflow.python.debug.lib import source_utils\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import math_ops\n# Import resource_variable_ops for the variables-to-tensor implicit conversion.\nfrom tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import tf_inspect\n\n\ndef line_number_above():\n return tf_inspect.stack()[1][2] - 1\n\n\nclass SendTracebacksTest(test_util.TensorFlowTestCase):\n\n @classmethod\n def setUpClass(cls):\n test_util.TensorFlowTestCase.setUpClass()\n (cls._server_port, cls._debug_server_url, cls._server_dump_dir,\n cls._server_thread,\n cls._server) = grpc_debug_test_server.start_server_on_separate_thread(\n poll_server=True)\n cls._server_address = \"localhost:%d\" % cls._server_port\n (cls._server_port_2, cls._debug_server_url_2, cls._server_dump_dir_2,\n cls._server_thread_2,\n cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread()\n cls._server_address_2 = \"localhost:%d\" % cls._server_port_2\n cls._curr_file_path = os.path.normpath(os.path.abspath(__file__))\n\n @classmethod\n def tearDownClass(cls):\n # Stop the test server and join the thread.\n cls._server.stop_server().wait()\n cls._server_thread.join()\n cls._server_2.stop_server().wait()\n cls._server_thread_2.join()\n test_util.TensorFlowTestCase.tearDownClass()\n\n def tearDown(self):\n ops.reset_default_graph()\n self._server.clear_data()\n self._server_2.clear_data()\n super(SendTracebacksTest, self).tearDown()\n\n def _findFirstTraceInsideTensorFlowPyLibrary(self, op):\n \"\"\"Find the first trace of an op that belongs to the TF Python library.\"\"\"\n for trace in op.traceback:\n if source_utils.guess_is_tensorflow_py_library(trace.filename):\n return trace\n\n def testSendGraphTracebacksToSingleDebugServer(self):\n this_func_name = \"testSendGraphTracebacksToSingleDebugServer\"\n with session.Session() as sess:\n a = variables.Variable(21.0, name=\"a\")\n a_lineno = line_number_above()\n b = variables.Variable(2.0, name=\"b\")\n b_lineno = line_number_above()\n math_ops.add(a, b, name=\"x\")\n x_lineno = line_number_above()\n\n send_stack = traceback.extract_stack()\n send_lineno = line_number_above()\n source_remote.send_graph_tracebacks(\n self._server_address, \"dummy_run_key\", send_stack, sess.graph)\n\n tb = self._server.query_op_traceback(\"a\")\n self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)\n tb = self._server.query_op_traceback(\"b\")\n self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)\n tb = self._server.query_op_traceback(\"x\")\n self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)\n\n self.assertIn(\n (self._curr_file_path, send_lineno, this_func_name),\n self._server.query_origin_stack()[-1])\n\n self.assertEqual(\n \" a = variables.Variable(21.0, name=\\\"a\\\")\",\n self._server.query_source_file_line(__file__, a_lineno))\n # Files in the TensorFlow code base shouldn not have been sent.\n tf_trace = self._findFirstTraceInsideTensorFlowPyLibrary(a.op)\n tf_trace_file_path = tf_trace.filename\n with self.assertRaises(ValueError):\n self._server.query_source_file_line(tf_trace_file_path, 0)\n self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],\n self._server.query_call_types())\n self.assertEqual([\"dummy_run_key\"], self._server.query_call_keys())\n self.assertEqual(\n [sess.graph.version], self._server.query_graph_versions())\n\n def testSendGraphTracebacksToTwoDebugServers(self):\n this_func_name = \"testSendGraphTracebacksToTwoDebugServers\"\n with session.Session() as sess:\n a = variables.Variable(21.0, name=\"two/a\")\n a_lineno = line_number_above()\n b = variables.Variable(2.0, name=\"two/b\")\n b_lineno = line_number_above()\n x = math_ops.add(a, b, name=\"two/x\")\n x_lineno = line_number_above()\n\n send_traceback = traceback.extract_stack()\n send_lineno = line_number_above()\n\n with test.mock.patch.object(\n grpc, \"insecure_channel\",\n wraps=grpc.insecure_channel) as mock_grpc_channel:\n source_remote.send_graph_tracebacks(\n [self._server_address, self._server_address_2],\n \"dummy_run_key\", send_traceback, sess.graph)\n mock_grpc_channel.assert_called_with(\n test.mock.ANY,\n options=[(\"grpc.max_receive_message_length\", -1),\n (\"grpc.max_send_message_length\", -1)])\n\n servers = [self._server, self._server_2]\n for server in servers:\n tb = server.query_op_traceback(\"two/a\")\n self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)\n tb = server.query_op_traceback(\"two/b\")\n self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)\n tb = server.query_op_traceback(\"two/x\")\n self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)\n\n self.assertIn(\n (self._curr_file_path, send_lineno, this_func_name),\n server.query_origin_stack()[-1])\n\n self.assertEqual(\n \" x = math_ops.add(a, b, name=\\\"two/x\\\")\",\n server.query_source_file_line(__file__, x_lineno))\n tf_trace = self._findFirstTraceInsideTensorFlowPyLibrary(a.op)\n tf_trace_file_path = tf_trace.filename\n with self.assertRaises(ValueError):\n server.query_source_file_line(tf_trace_file_path, 0)\n self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],\n server.query_call_types())\n self.assertEqual([\"dummy_run_key\"], server.query_call_keys())\n self.assertEqual([sess.graph.version], server.query_graph_versions())\n\n def testSendEagerTracebacksToSingleDebugServer(self):\n this_func_name = \"testSendEagerTracebacksToSingleDebugServer\"\n send_traceback = traceback.extract_stack()\n send_lineno = line_number_above()\n source_remote.send_eager_tracebacks(self._server_address, send_traceback)\n\n self.assertEqual([debug_service_pb2.CallTraceback.EAGER_EXECUTION],\n self._server.query_call_types())\n self.assertIn((self._curr_file_path, send_lineno, this_func_name),\n self._server.query_origin_stack()[-1])\n\n def testGRPCServerMessageSizeLimit(self):\n \"\"\"Assert gRPC debug server is started with unlimited message size.\"\"\"\n with test.mock.patch.object(\n grpc, \"server\", wraps=grpc.server) as mock_grpc_server:\n (_, _, _, server_thread,\n server) = grpc_debug_test_server.start_server_on_separate_thread(\n poll_server=True)\n mock_grpc_server.assert_called_with(\n test.mock.ANY,\n options=[(\"grpc.max_receive_message_length\", -1),\n (\"grpc.max_send_message_length\", -1)])\n server.stop_server().wait()\n server_thread.join()\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains Gradient functions for image ops.\"\"\"\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_image_ops\nfrom tensorflow.python.ops import math_ops\n\n\[email protected](\"ResizeNearestNeighbor\")\ndef _ResizeNearestNeighborGrad(op, grad):\n \"\"\"The derivatives for nearest neighbor resizing.\n\n Args:\n op: The ResizeNearestNeighbor op.\n grad: The tensor representing the gradient w.r.t. the output.\n\n Returns:\n The gradients w.r.t. the input and the output.\n \"\"\"\n image = op.inputs[0]\n if image.get_shape()[1:3].is_fully_defined():\n image_shape = image.get_shape()[1:3]\n else:\n image_shape = array_ops.shape(image)[1:3]\n\n grads = gen_image_ops.resize_nearest_neighbor_grad(\n grad,\n image_shape,\n align_corners=op.get_attr(\"align_corners\"),\n half_pixel_centers=op.get_attr(\"half_pixel_centers\"))\n return [grads, None]\n\n\[email protected](\"ResizeBilinear\")\ndef _ResizeBilinearGrad(op, grad):\n \"\"\"The derivatives for bilinear resizing.\n\n Args:\n op: The ResizeBilinear op.\n grad: The tensor representing the gradient w.r.t. the output.\n\n Returns:\n The gradients w.r.t. the input.\n \"\"\"\n grad0 = gen_image_ops.resize_bilinear_grad(\n grad,\n op.inputs[0],\n align_corners=op.get_attr(\"align_corners\"),\n half_pixel_centers=op.get_attr(\"half_pixel_centers\"))\n return [grad0, None]\n\n\[email protected](\"ScaleAndTranslate\")\ndef _ScaleAndTranslateGrad(op, grad):\n \"\"\"The derivatives for ScaleAndTranslate transformation op.\n\n Args:\n op: The ScaleAndTranslate op.\n grad: The tensor representing the gradient w.r.t. the output.\n\n Returns:\n The gradients w.r.t. the input.\n \"\"\"\n\n grad0 = gen_image_ops.scale_and_translate_grad(\n grad,\n op.inputs[0],\n op.inputs[2],\n op.inputs[3],\n kernel_type=op.get_attr(\"kernel_type\"),\n antialias=op.get_attr(\"antialias\"))\n return [grad0, None, None, None]\n\n\[email protected](\"ResizeBicubic\")\ndef _ResizeBicubicGrad(op, grad):\n \"\"\"The derivatives for bicubic resizing.\n\n Args:\n op: The ResizeBicubic op.\n grad: The tensor representing the gradient w.r.t. the output.\n\n Returns:\n The gradients w.r.t. the input.\n \"\"\"\n allowed_types = [dtypes.float32, dtypes.float64]\n grad0 = None\n if op.inputs[0].dtype in allowed_types:\n grad0 = gen_image_ops.resize_bicubic_grad(\n grad,\n op.inputs[0],\n align_corners=op.get_attr(\"align_corners\"),\n half_pixel_centers=op.get_attr(\"half_pixel_centers\"))\n return [grad0, None]\n\n\[email protected](\"CropAndResize\")\ndef _CropAndResizeGrad(op, grad):\n \"\"\"The derivatives for crop_and_resize.\n\n We back-propagate to the image only when the input image tensor has floating\n point dtype but we always back-propagate to the input boxes tensor.\n\n Args:\n op: The CropAndResize op.\n grad: The tensor representing the gradient w.r.t. the output.\n\n Returns:\n The gradients w.r.t. the input image, boxes, as well as the always-None\n gradients w.r.t. box_ind and crop_size.\n \"\"\"\n image = op.inputs[0]\n if image.get_shape().is_fully_defined():\n image_shape = image.get_shape().as_list()\n else:\n image_shape = array_ops.shape(image)\n\n allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]\n if op.inputs[0].dtype in allowed_types:\n # pylint: disable=protected-access\n grad0 = gen_image_ops.crop_and_resize_grad_image(\n grad, op.inputs[1], op.inputs[2], image_shape, T=op.get_attr(\"T\"),\n method=op.get_attr(\"method\"))\n # pylint: enable=protected-access\n else:\n grad0 = None\n\n # `grad0` is the gradient to the input image pixels and it\n # has been implemented for nearest neighbor and bilinear sampling\n # respectively. `grad1` is the gradient to the input crop boxes' coordinates.\n # When using nearest neighbor sampling, the gradient to crop boxes'\n # coordinates are not well defined. In practice, we still approximate\n # grad1 using the gradient derived from bilinear sampling.\n grad1 = gen_image_ops.crop_and_resize_grad_boxes(\n grad, op.inputs[0], op.inputs[1], op.inputs[2])\n\n return [grad0, grad1, None, None]\n\n\ndef _CustomReciprocal(x):\n \"\"\"Wrapper function around `math_ops.div_no_nan()` to perform a \"safe\" reciprocal incase the input is zero. Avoids divide by zero and NaNs.\n\n Input:\n x -> input tensor to be reciprocat-ed.\n Returns:\n x_reciprocal -> reciprocal of x without NaNs.\n \"\"\"\n return math_ops.div_no_nan(1.0, x)\n\n\[email protected](\"RGBToHSV\")\ndef _RGBToHSVGrad(op, grad):\n \"\"\"The gradients for `rgb_to_hsv` operation.\n\n This function is a piecewise continuous function as defined here:\n https://en.wikipedia.org/wiki/HSL_and_HSV#From_RGB\n We perform the multivariate derivative and compute all partial derivatives\n separately before adding them in the end. Formulas are given before each\n partial derivative calculation.\n\n Args:\n op: The `rgb_to_hsv` `Operation` that we are differentiating.\n grad: Gradient with respect to the output of the `rgb_to_hsv` op.\n\n Returns:\n Gradients with respect to the input of `rgb_to_hsv`.\n \"\"\"\n # Input Channels\n reds = op.inputs[0][..., 0]\n greens = op.inputs[0][..., 1]\n blues = op.inputs[0][..., 2]\n # Output Channels\n saturation = op.outputs[0][..., 1]\n value = op.outputs[0][..., 2]\n\n # Mask/Indicator for max and min values of each pixel.\n # Arbitrary assignment in case of tie breakers with R>G>B.\n # Max values\n red_biggest = math_ops.cast((reds >= blues) & \\\n (reds >= greens), dtypes.float32)\n green_biggest = math_ops.cast((greens > reds) & \\\n (greens >= blues), dtypes.float32)\n blue_biggest = math_ops.cast((blues > reds) & \\\n (blues > greens), dtypes.float32)\n # Min values\n red_smallest = math_ops.cast((reds < blues) & \\\n (reds < greens), dtypes.float32)\n green_smallest = math_ops.cast((greens <= reds) & \\\n (greens < blues), dtypes.float32)\n blue_smallest = math_ops.cast((blues <= reds) & \\\n (blues <= greens), dtypes.float32)\n\n # Derivatives of R, G, B wrt Value slice\n dv_dr = red_biggest\n dv_dg = green_biggest\n dv_db = blue_biggest\n\n # Derivatives of R, G, B wrt Saturation slice\n\n # The first term in the addition is the case when the corresponding color\n # from (r,g,b) was \"MAX\"\n # -> derivative = MIN/square(MAX), MIN could be one of the other two colors\n # The second term is the case when the corresponding color from\n # (r,g,b) was \"MIN\"\n # -> derivative = -1/MAX, MAX could be one of the other two colours.\n ds_dr = math_ops.cast(reds > 0, dtypes.float32) * \\\n math_ops.add(red_biggest * \\\n math_ops.add(green_smallest * greens, blue_smallest * blues) * \\\n _CustomReciprocal(math_ops.square(reds)),\\\n red_smallest * -1 * _CustomReciprocal((green_biggest * \\\n greens) + (blue_biggest * blues)))\n ds_dg = math_ops.cast(greens > 0, dtypes.float32) * \\\n math_ops.add(green_biggest * \\\n math_ops.add(red_smallest * reds, blue_smallest * blues) * \\\n _CustomReciprocal(math_ops.square(greens)),\\\n green_smallest * -1 * _CustomReciprocal((red_biggest * \\\n reds) + (blue_biggest * blues)))\n ds_db = math_ops.cast(blues > 0, dtypes.float32) * \\\n math_ops.add(blue_biggest * \\\n math_ops.add(green_smallest * greens, red_smallest * reds) * \\\n _CustomReciprocal(math_ops.square(blues)),\\\n blue_smallest * -1 * _CustomReciprocal((green_biggest * \\\n greens) + (red_biggest * reds)))\n\n # Derivatives of R, G, B wrt Hue slice\n\n # Need to go case by case for each color.\n # for red, dh_dr -> dh_dr_1 + dh_dr_2 + dh_dr_3 + dh_dr_4 + dh_dr_5\n # dh_dr_1 ->\n # if red was MAX, then derivative = 60 * -1 * (G-B)/square(MAX-MIN) == 60 *\\\n # -1 * (greens-blues) * reciprocal(square(saturation)) * \\\n # reciprocal(square(value))\n # elif green was MAX, there are two subcases\n # ie when red was MIN and when red was NOT MIN\n # dh_dr_2 ->\n # if red was MIN (use UV rule) -> 60 * ((1 * -1/(MAX-MIN)) +\\\n # (B-R)*(-1/square(MAX-MIN) * -1)) == 60 * (blues - greens) *\\\n # reciprocal(square(reds - greens))\n # dh_dr_3 ->\n # if red was NOT MIN -> 60 * -1/MAX-MIN == -60 * reciprocal(greens-blues)\n # elif blue was MAX, there are two subcases\n # dh_dr_4 ->\n # if red was MIN (similarly use the UV rule) -> 60 * (blues - greens) *\\\n # reciprocal(square(blues - reds))\n # dh_dr_5 ->\n # if red was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(blues-greens)\n dh_dr_1 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \\\n -1 * \\\n (greens - blues) * \\\n _CustomReciprocal(math_ops.square(saturation)) *\\\n _CustomReciprocal(math_ops.square(value)))\n dh_dr_2 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \\\n red_smallest * (blues - greens) * \\\n _CustomReciprocal(math_ops.square(reds - greens)))\n dh_dr_3 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \\\n blue_smallest * -1 * _CustomReciprocal(greens - blues))\n dh_dr_4 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \\\n red_smallest * (blues - greens) * \\\n _CustomReciprocal(math_ops.square(blues - reds)))\n dh_dr_5 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \\\n green_smallest * _CustomReciprocal(blues - greens))\n\n dh_dr = dh_dr_1 + dh_dr_2 + dh_dr_3 + dh_dr_4 + dh_dr_5\n # Converting from degrees to [0,1] scale as specified in\n # https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv\n dh_dr = dh_dr / 360\n\n # for green, dh_dg -> dh_dg_1 + dh_dg_2 + dh_dg_3 + dh_dg_4 + dh_dg_5\n # dh_dg_1 ->\n # if green was MAX, then derivative = 60 * -1 * (B-R)/square(MAX-MIN) == 60 *\\\n # -1 * (blues - reds) * reciprocal(square(saturation)) * \\\n # reciprocal(square(value))\n # elif red was MAX, there are two subcases ie\n # when green was MIN and when green was NOT MIN\n # dh_dg_2 ->\n # if green was MIN (use UV rule) -> 60 * ((1 * 1/(MAX-MIN)) + \\\n # (greens-blues) * (-1/square(MAX-MIN) * -1)) == 60 * \\\n # ((reciprocal(reds-greens) + (greens-blues) * \\\n # reciprocal(square(reds-greens))))\n # dh_dg_3 ->\n # if green was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(reds - blues)\n # elif blue was MAX, there are two subcases\n # dh_dg_4 ->\n # if green was MIN (similarly use the UV rule) -> 60 * -1 * \\\n # (reciprocal(blues - greens) + (reds-greens)* -1 * \\\n # reciprocal(square(blues-greens)))\n # dh_dr_5 ->\n # if green was NOT MIN -> 60 * -1/MAX-MIN == -60 * reciprocal(blues - reds)\n dh_dg_1 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \\\n -1 * (blues - reds) * \\\n _CustomReciprocal(math_ops.square(saturation))\\\n * _CustomReciprocal(math_ops.square(value)))\n dh_dg_2 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \\\n green_smallest * (reds - blues) * \\\n _CustomReciprocal(math_ops.square(reds - greens)))\n dh_dg_3 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \\\n blue_smallest * _CustomReciprocal(reds - blues))\n dh_dg_4 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \\\n green_smallest * (reds - blues) * \\\n _CustomReciprocal(math_ops.square(blues - greens)))\n dh_dg_5 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \\\n red_smallest * -1 * _CustomReciprocal(blues - reds))\n\n dh_dg = dh_dg_1 + dh_dg_2 + dh_dg_3 + dh_dg_4 + dh_dg_5\n # Converting from degrees to [0,1] scale as specified in\n # https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv\n dh_dg = dh_dg / 360\n\n # for blue, dh_db -> dh_db_1 + dh_db_2 + dh_db_3 + dh_db_4 + dh_db_5\n # dh_db_1 ->\n # if blue was MAX, then derivative = 60 * -1 * (R-G)/square(MAX-MIN) == 60 *\\\n # -1 * reciprocal(square(saturation)) * reciprocal(square(value))\n # elif red was MAX, there are two subcases\n # ie when blue was MIN and when blue was NOT MIN\n # dh_dg_2 ->\n # if blue was MIN (use UV rule) -> 60 * ((1 * -1/(MAX-MIN)) + \\\n # (greens-blues) * (-1/square(MAX-MIN) * -1)) == 60 * (greens - reds) *\\\n # reciprocal(square(reds - blues))\n # dh_dg_3 ->\n # if blue was NOT MIN -> 60 * -1/MAX-MIN == 60 * -1 * \\\n # reciprocal(reds - greens)\n # elif green was MAX, there are two subcases\n # dh_dg_4 ->\n # if blue was MIN (similarly use the UV rule) -> 60 * -1 * \\\n # (reciprocal(greens - blues) + (blues - reds) * -1 * \\\n # reciprocal(square(greens - blues)))\n # dh_dr_5 ->\n # if blue was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(greens - reds)\n dh_db_1 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \\\n -1 * \\\n (reds - greens) * \\\n _CustomReciprocal(math_ops.square(saturation)) * \\\n _CustomReciprocal(math_ops.square(value)))\n dh_db_2 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest *\\\n blue_smallest * (greens - reds) * \\\n _CustomReciprocal(math_ops.square(reds - blues)))\n dh_db_3 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \\\n green_smallest * -1 * _CustomReciprocal(reds - greens))\n dh_db_4 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \\\n blue_smallest * (greens - reds) * \\\n _CustomReciprocal(math_ops.square(greens - blues)))\n dh_db_5 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \\\n red_smallest * _CustomReciprocal(greens - reds))\n\n dh_db = dh_db_1 + dh_db_2 + dh_db_3 + dh_db_4 + dh_db_5\n # Converting from degrees to [0,1] scale as specified in\n # https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv\n dh_db = dh_db / 360\n\n # Gradients wrt to inputs\n dv_drgb = array_ops.stack(\n [grad[..., 2] * dv_dr, grad[..., 2] * dv_dg, grad[..., 2] * dv_db],\n axis=-1)\n ds_drgb = array_ops.stack(\n [grad[..., 1] * ds_dr, grad[..., 1] * ds_dg, grad[..., 1] * ds_db],\n axis=-1)\n dh_drgb = array_ops.stack(\n [grad[..., 0] * dh_dr, grad[..., 0] * dh_dg, grad[..., 0] * dh_db],\n axis=-1)\n\n gradient_input = math_ops.add(math_ops.add(dv_drgb, ds_drgb), dh_drgb)\n return gradient_input\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for unfused_gru.\"\"\"\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_unfused_gru_tests(options):\n \"\"\"Make a set of tests for unfused gru op.\"\"\"\n\n test_parameters = [{\n \"units\": [2, 5],\n \"batch_size\": [1, 2],\n \"time\": [3],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the graph for unfused_gru.\"\"\"\n inputs = [\n tf.compat.v1.placeholder(\n tf.float32, [parameters[\"batch_size\"], parameters[\"units\"]])\n for _ in range(parameters[\"time\"])\n ]\n cell_fw = tf.compat.v1.nn.rnn_cell.GRUCell(parameters[\"units\"])\n cell_bw = tf.compat.v1.nn.rnn_cell.GRUCell(parameters[\"units\"])\n outputs, _, _ = tf.compat.v1.nn.static_bidirectional_rnn(\n cell_fw, cell_bw, inputs, dtype=tf.float32)\n\n return inputs, outputs\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Build the inputs for unfused_gru.\"\"\"\n input_values = [\n create_tensor_data(tf.float32,\n [parameters[\"batch_size\"], parameters[\"units\"]])\n for _ in range(parameters[\"time\"])\n ]\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n return input_values, sess.run(\n outputs, feed_dict=dict(zip(inputs, input_values)))\n\n make_zip_of_tests(\n options,\n test_parameters,\n build_graph,\n build_inputs,\n use_frozen_graph=True)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lock_util.\"\"\"\n\nimport random\nimport time\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import lock_util\n\n\nclass GroupLockTest(test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(1, 2, 3, 5, 10)\n def testGroups(self, num_groups):\n lock = lock_util.GroupLock(num_groups)\n num_threads = 10\n finished = set()\n\n def thread_fn(thread_id):\n time.sleep(random.random() * 0.1)\n group_id = thread_id % num_groups\n with lock.group(group_id):\n time.sleep(random.random() * 0.1)\n self.assertGreater(lock._group_member_counts[group_id], 0)\n for g, c in enumerate(lock._group_member_counts):\n if g != group_id:\n self.assertEqual(0, c)\n finished.add(thread_id)\n\n threads = [\n self.checkedThread(target=thread_fn, args=(i,))\n for i in range(num_threads)\n ]\n\n for i in range(num_threads):\n threads[i].start()\n for i in range(num_threads):\n threads[i].join()\n\n self.assertEqual(set(range(num_threads)), finished)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# -*- coding: utf-8 -*-\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for string_upper_op.\"\"\"\n\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.platform import test\n\n\nclass StringUpperOpTest(test.TestCase):\n \"\"\"Test cases for tf.strings.upper.\"\"\"\n\n def test_string_upper(self):\n strings = [\"Pigs on The Wing\", \"aNimals\"]\n\n with self.cached_session():\n output = string_ops.string_upper(strings)\n output = self.evaluate(output)\n self.assertAllEqual(output, [b\"PIGS ON THE WING\", b\"ANIMALS\"])\n\n def test_string_upper_2d(self):\n strings = [[\"pigS on THE wIng\", \"aniMals\"], [\" hello \", \"\\n\\tWorld! \\r \\n\"]]\n\n with self.cached_session():\n output = string_ops.string_upper(strings)\n output = self.evaluate(output)\n self.assertAllEqual(output, [[b\"PIGS ON THE WING\", b\"ANIMALS\"],\n [b\" HELLO \", b\"\\n\\tWORLD! \\r \\n\"]])\n\n def test_string_upper_unicode(self):\n strings = [[\"óósschloë\"]]\n with self.cached_session():\n output = string_ops.string_upper(strings, encoding=\"utf-8\")\n output = self.evaluate(output)\n # output: \"ÓÓSSCHLOË\"\n self.assertAllEqual(output, [[b\"\\xc3\\x93\\xc3\\x93SSCHLO\\xc3\\x8b\"]])\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# Functions \"ndtr\" and \"ndtri\" are derived from calculations made in:\n# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n# In the following email exchange, the author gives his consent to redistribute\n# derived works under an Apache 2.0 license.\n#\n# From: Stephen Moshier <[email protected]>\n# Date: Sat, Jun 9, 2018 at 2:36 PM\n# Subject: Re: Licensing cephes under Apache (BSD-like) license.\n# To: rif <[email protected]>\n#\n#\n#\n# Hello Rif,\n#\n# Yes, Google may distribute Cephes files under the Apache 2 license.\n#\n# If clarification is needed, I do not favor BSD over other free licenses.\n# I would agree that Apache 2 seems to cover the concern you mentioned\n# about sublicensees.\n#\n# Best wishes for good luck with your projects!\n# Steve Moshier\n#\n#\n#\n# On Thu, 31 May 2018, rif wrote:\n#\n# > Hello Steve.\n# > My name is Rif. I work on machine learning software at Google.\n# >\n# > Your cephes software continues to be incredibly useful and widely used. I\n# > was wondering whether it would be permissible for us to use the Cephes code\n# > under the Apache 2.0 license, which is extremely similar in permissions to\n# > the BSD license (Wikipedia comparisons). This would be quite helpful to us\n# > in terms of avoiding multiple licenses on software.\n# >\n# > I'm sorry to bother you with this (I can imagine you're sick of hearing\n# > about this by now), but I want to be absolutely clear we're on the level and\n# > not misusing your important software. In former conversation with Eugene\n# > Brevdo ([email protected]), you wrote \"If your licensing is similar to BSD,\n# > the formal way that has been handled is simply to add a statement to the\n# > effect that you are incorporating the Cephes software by permission of the\n# > author.\" I wanted to confirm that (a) we could use the Apache license, (b)\n# > that we don't need to (and probably you don't want to) keep getting\n# > contacted about individual uses, because your intent is generally to allow\n# > this software to be reused under \"BSD-like\" license, and (c) you're OK\n# > letting incorporators decide whether a license is sufficiently BSD-like?\n# >\n# > Best,\n# >\n# > rif\n# >\n# >\n# >\n\n\"\"\"Special Math Ops.\"\"\"\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\n\n__all__ = [\n \"erfinv\",\n \"ndtr\",\n \"ndtri\",\n \"log_ndtr\",\n \"log_cdf_laplace\",\n]\n\n\n# log_ndtr uses different functions over the ranges\n# (-infty, lower](lower, upper](upper, infty)\n# Lower bound values were chosen by examining where the support of ndtr\n# appears to be zero, relative to scipy's (which is always 64bit). They were\n# then made more conservative just to be safe. (Conservative means use the\n# expansion more than we probably need to.) See `NdtrTest` in\n# special_math_test.py.\nLOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64)\nLOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32)\n\n# Upper bound values were chosen by examining for which values of 'x'\n# Log[cdf(x)] is 0, after which point we need to use the approximation\n# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly\n# conservative, meaning we use the approximation earlier than needed.\nLOGNDTR_FLOAT64_UPPER = np.array(8, np.float64)\nLOGNDTR_FLOAT32_UPPER = np.array(5, np.float32)\n\n\ndef ndtr(x, name=\"ndtr\"):\n \"\"\"Normal distribution function.\n\n Returns the area under the Gaussian probability density function, integrated\n from minus infinity to x:\n\n ```\n 1 / x\n ndtr(x) = ---------- | exp(-0.5 t**2) dt\n sqrt(2 pi) /-inf\n\n = 0.5 (1 + erf(x / sqrt(2)))\n = 0.5 erfc(x / sqrt(2))\n ```\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"ndtr\").\n\n Returns:\n ndtr: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x` is not floating-type.\n \"\"\"\n\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n raise TypeError(\n \"x.dtype=%s is not handled, see docstring for supported types.\"\n % x.dtype)\n return _ndtr(x)\n\n\ndef _ndtr(x):\n \"\"\"Implements ndtr core logic.\"\"\"\n half_sqrt_2 = constant_op.constant(\n 0.5 * np.sqrt(2.), dtype=x.dtype, name=\"half_sqrt_2\")\n w = x * half_sqrt_2\n z = math_ops.abs(w)\n y = array_ops.where_v2(\n math_ops.less(z, half_sqrt_2), 1. + math_ops.erf(w),\n array_ops.where_v2(\n math_ops.greater(w, 0.), 2. - math_ops.erfc(z), math_ops.erfc(z)))\n return 0.5 * y\n\n\ndef ndtri(p, name=\"ndtri\"):\n \"\"\"The inverse of the CDF of the Normal distribution function.\n\n Returns x such that the area under the pdf from minus infinity to x is equal\n to p.\n\n A piece-wise rational approximation is done for the function.\n This is a port of the implementation in netlib.\n\n Args:\n p: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"ndtri\").\n\n Returns:\n x: `Tensor` with `dtype=p.dtype`.\n\n Raises:\n TypeError: if `p` is not floating-type.\n \"\"\"\n\n with ops.name_scope(name, values=[p]):\n p = ops.convert_to_tensor(p, name=\"p\")\n if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n raise TypeError(\n \"p.dtype=%s is not handled, see docstring for supported types.\"\n % p.dtype)\n return _ndtri(p)\n\n\ndef _ndtri(p):\n \"\"\"Implements ndtri core logic.\"\"\"\n\n # Constants used in piece-wise rational approximations. Taken from the cephes\n # library:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n\n p0 = [\n -1.23916583867381258016E0, 1.39312609387279679503E1,\n -5.66762857469070293439E1, 9.80010754185999661536E1,\n -5.99633501014107895267E1\n ]\n q0 = [\n -1.18331621121330003142E0, 1.59056225126211695515E1,\n -8.20372256168333339912E1, 2.00260212380060660359E2,\n -2.25462687854119370527E2, 8.63602421390890590575E1,\n 4.67627912898881538453E0, 1.95448858338141759834E0, 1.0\n ]\n p1 = [\n -8.57456785154685413611E-4, -3.50424626827848203418E-2,\n -1.40256079171354495875E-1, 2.18663306850790267539E0,\n 1.46849561928858024014E1, 4.40805073893200834700E1,\n 5.71628192246421288162E1, 3.15251094599893866154E1,\n 4.05544892305962419923E0\n ]\n q1 = [\n -9.33259480895457427372E-4, -3.80806407691578277194E-2,\n -1.42182922854787788574E-1, 2.50464946208309415979E0,\n 1.50425385692907503408E1, 4.13172038254672030440E1,\n 4.53907635128879210584E1, 1.57799883256466749731E1, 1.0\n ]\n p2 = [\n 6.23974539184983293730E-9, 2.65806974686737550832E-6,\n 3.01581553508235416007E-4, 1.23716634817820021358E-2,\n 2.01485389549179081538E-1, 1.33303460815807542389E0,\n 3.93881025292474443415E0, 6.91522889068984211695E0,\n 3.23774891776946035970E0\n ]\n q2 = [\n 6.79019408009981274425E-9, 2.89247864745380683936E-6,\n 3.28014464682127739104E-4, 1.34204006088543189037E-2,\n 2.16236993594496635890E-1, 1.37702099489081330271E0,\n 3.67983563856160859403E0, 6.02427039364742014255E0, 1.0\n ]\n\n def _create_polynomial(var, coeffs):\n \"\"\"Compute n_th order polynomial via Horner's method.\"\"\"\n coeffs = np.array(coeffs, var.dtype.as_numpy_dtype)\n if not coeffs.size:\n return array_ops.zeros_like(var)\n return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var\n\n maybe_complement_p = array_ops.where_v2(p > -np.expm1(-2.), 1. - p, p)\n # Write in an arbitrary value in place of 0 for p since 0 will cause NaNs\n # later on. The result from the computation when p == 0 is not used so any\n # number that doesn't result in NaNs is fine.\n sanitized_mcp = array_ops.where_v2(\n maybe_complement_p <= 0.,\n array_ops.fill(array_ops.shape(p), np.array(0.5, p.dtype.as_numpy_dtype)),\n maybe_complement_p)\n\n # Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).\n w = sanitized_mcp - 0.5\n ww = w ** 2\n x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)\n / _create_polynomial(ww, q0))\n x_for_big_p *= -np.sqrt(2. * np.pi)\n\n # Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),\n # where z = sqrt(-2. * log(p)), and P/Q are chosen between two different\n # arrays based on whether p < exp(-32).\n z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp))\n first_term = z - math_ops.log(z) / z\n second_term_small_p = (\n _create_polynomial(1. / z, p2) /\n _create_polynomial(1. / z, q2) / z)\n second_term_otherwise = (\n _create_polynomial(1. / z, p1) /\n _create_polynomial(1. / z, q1) / z)\n x_for_small_p = first_term - second_term_small_p\n x_otherwise = first_term - second_term_otherwise\n\n x = array_ops.where_v2(\n sanitized_mcp > np.exp(-2.), x_for_big_p,\n array_ops.where_v2(z >= 8.0, x_for_small_p, x_otherwise))\n\n x = array_ops.where_v2(p > 1. - np.exp(-2.), x, -x)\n infinity_scalar = constant_op.constant(np.inf, dtype=p.dtype)\n infinity = array_ops.fill(array_ops.shape(p), infinity_scalar)\n x_nan_replaced = array_ops.where_v2(p <= 0.0, -infinity,\n array_ops.where_v2(p >= 1.0, infinity, x))\n return x_nan_replaced\n\n\ndef log_ndtr(x, series_order=3, name=\"log_ndtr\"):\n \"\"\"Log Normal distribution function.\n\n For details of the Normal distribution function see `ndtr`.\n\n This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or\n using an asymptotic series. Specifically:\n - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on\n `log(1-x) ~= -x, x << 1`.\n - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique\n and take a log.\n - For `x <= lower_segment`, we use the series approximation of erf to compute\n the log CDF directly.\n\n The `lower_segment` is set based on the precision of the input:\n\n ```\n lower_segment = { -20, x.dtype=float64\n { -10, x.dtype=float32\n upper_segment = { 8, x.dtype=float64\n { 5, x.dtype=float32\n ```\n\n When `x < lower_segment`, the `ndtr` asymptotic series approximation is:\n\n ```\n ndtr(x) = scale * (1 + sum) + R_N\n scale = exp(-0.5 x**2) / (-x sqrt(2 pi))\n sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}\n R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})\n ```\n\n where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a\n [double-factorial](https://en.wikipedia.org/wiki/Double_factorial).\n\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n series_order: Positive Python `integer`. Maximum depth to\n evaluate the asymptotic expansion. This is the `N` above.\n name: Python string. A name for the operation (default=\"log_ndtr\").\n\n Returns:\n log_ndtr: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n TypeError: if `series_order` is a not Python `integer.`\n ValueError: if `series_order` is not in `[0, 30]`.\n \"\"\"\n if not isinstance(series_order, int):\n raise TypeError(\"series_order must be a Python integer.\")\n if series_order < 0:\n raise ValueError(\"series_order must be non-negative.\")\n if series_order > 30:\n raise ValueError(\"series_order must be <= 30.\")\n\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n\n if x.dtype.as_numpy_dtype == np.float64:\n lower_segment = LOGNDTR_FLOAT64_LOWER\n upper_segment = LOGNDTR_FLOAT64_UPPER\n elif x.dtype.as_numpy_dtype == np.float32:\n lower_segment = LOGNDTR_FLOAT32_LOWER\n upper_segment = LOGNDTR_FLOAT32_UPPER\n else:\n raise TypeError(\"x.dtype=%s is not supported.\" % x.dtype)\n\n # The basic idea here was ported from:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n # We copy the main idea, with a few changes\n # * For x >> 1, and X ~ Normal(0, 1),\n # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],\n # which extends the range of validity of this function.\n # * We use one fixed series_order for all of 'x', rather than adaptive.\n # * Our docstring properly reflects that this is an asymptotic series, not a\n # Taylor series. We also provided a correct bound on the remainder.\n # * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when\n # x=0. This happens even though the branch is unchosen because when x=0\n # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan\n # regardless of whether dy is finite. Note that the minimum is a NOP if\n # the branch is chosen.\n return array_ops.where_v2(\n math_ops.greater(x, upper_segment),\n -_ndtr(-x), # log(1-x) ~= -x, x << 1 # pylint: disable=invalid-unary-operand-type\n array_ops.where_v2(\n math_ops.greater(x, lower_segment),\n math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),\n _log_ndtr_lower(math_ops.minimum(x, lower_segment), series_order)))\n\n\ndef _log_ndtr_lower(x, series_order):\n \"\"\"Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`.\"\"\"\n x_2 = math_ops.square(x)\n # Log of the term multiplying (1 + sum)\n log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * np.log(2. * np.pi)\n return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))\n\n\ndef _log_ndtr_asymptotic_series(x, series_order):\n \"\"\"Calculates the asymptotic series used in log_ndtr.\"\"\"\n dtype = x.dtype.as_numpy_dtype\n if series_order <= 0:\n return np.array(1, dtype)\n x_2 = math_ops.square(x)\n even_sum = array_ops.zeros_like(x)\n odd_sum = array_ops.zeros_like(x)\n x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.\n for n in range(1, series_order + 1):\n y = np.array(_double_factorial(2 * n - 1), dtype) / x_2n\n if n % 2:\n odd_sum += y\n else:\n even_sum += y\n x_2n *= x_2\n return 1. + even_sum - odd_sum\n\n\ndef erfinv(x, name=\"erfinv\"):\n \"\"\"The inverse function for erf, the error function.\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"erfinv\").\n\n Returns:\n x: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x` is not floating-type.\n \"\"\"\n\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n raise TypeError(\n \"x.dtype=%s is not handled, see docstring for supported types.\"\n % x.dtype)\n return ndtri((x + 1.0) / 2.0) / np.sqrt(2)\n\n\ndef _double_factorial(n):\n \"\"\"The double factorial function for small Python integer `n`.\"\"\"\n return np.prod(np.arange(n, 1, -2))\n\n\ndef log_cdf_laplace(x, name=\"log_cdf_laplace\"):\n \"\"\"Log Laplace distribution function.\n\n This function calculates `Log[L(x)]`, where `L(x)` is the cumulative\n distribution function of the Laplace distribution, i.e.\n\n ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```\n\n For numerical accuracy, `L(x)` is computed in different ways depending on `x`,\n\n ```\n x <= 0:\n Log[L(x)] = Log[0.5] + x, which is exact\n\n 0 < x:\n Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact\n ```\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"log_ndtr\").\n\n Returns:\n `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n \"\"\"\n\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n\n # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.\n lower_solution = -np.log(2.) + x\n\n # safe_exp_neg_x = exp{-x} for x > 0, but is\n # bounded above by 1, which avoids\n # log[1 - 1] = -inf for x = log(1/2), AND\n # exp{-x} --> inf, for x << -1\n safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))\n\n # log1p(z) = log(1 + z) approx z for |z| << 1. This approximation is used\n # internally by log1p, rather than being done explicitly here.\n upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)\n\n return array_ops.where_v2(x < 0., lower_solution, upper_solution)\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions used by multiple tflite test files.\"\"\"\n\nfrom tensorflow.lite.python import schema_py_generated as schema_fb\nfrom tensorflow.lite.python import schema_util\nfrom tensorflow.lite.tools import visualize\n\n\ndef get_ops_list(model_data):\n \"\"\"Returns a set of ops in the tflite model data.\"\"\"\n model = schema_fb.Model.GetRootAsModel(model_data, 0)\n op_set = set()\n\n for subgraph_idx in range(model.SubgraphsLength()):\n subgraph = model.Subgraphs(subgraph_idx)\n for op_idx in range(subgraph.OperatorsLength()):\n op = subgraph.Operators(op_idx)\n opcode = model.OperatorCodes(op.OpcodeIndex())\n builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)\n if builtin_code == schema_fb.BuiltinOperator.CUSTOM:\n opname = opcode.CustomCode().decode(\"utf-8\")\n op_set.add(opname)\n else:\n op_set.add(visualize.BuiltinCodeToName(builtin_code))\n return op_set\n\n\ndef get_output_shapes(model_data):\n \"\"\"Returns a list of output shapes in the tflite model data.\"\"\"\n model = schema_fb.Model.GetRootAsModel(model_data, 0)\n\n output_shapes = []\n for subgraph_idx in range(model.SubgraphsLength()):\n subgraph = model.Subgraphs(subgraph_idx)\n for output_idx in range(subgraph.OutputsLength()):\n output_tensor_idx = subgraph.Outputs(output_idx)\n output_tensor = subgraph.Tensors(output_tensor_idx)\n output_shapes.append(output_tensor.ShapeAsNumpy().tolist())\n\n return output_shapes\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Script to test TF-TRT INT8 conversion without calibration on Mnist model.\"\"\"\n\nimport numpy as np\n\nfrom tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.platform import test\n\n\nclass DynamicInputShapesTest(trt_test.TfTrtIntegrationTestBase):\n\n def GraphFn(self, x):\n conv_filter1 = constant_op.constant(\n np.ones([3, 3, 1, 8]), name=\"weights1\", dtype=dtypes.float32)\n bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)\n x = nn.conv2d(\n input=x,\n filter=conv_filter1,\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n name=\"conv\")\n x = nn.bias_add(x, bias1)\n x = nn.relu(x)\n conv_filter2 = constant_op.constant(\n np.ones([3, 3, 8, 1]), name=\"weights2\", dtype=dtypes.float32)\n bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)\n x = nn.conv2d(\n input=x,\n filter=conv_filter2,\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n name=\"conv\")\n x = nn.bias_add(x, bias2)\n return array_ops.identity(x, name=\"output\")\n\n def GetParams(self):\n # TODO(laigd): we should test the following cases:\n # - batch size is not changed, other dims are changing\n # - batch size is decreasing, other dims are identical\n # - batch size is decreasing, other dims are changing\n # - batch size is increasing, other dims are identical\n # - batch size is increasing, other dims are changing\n input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],\n [[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],\n [[1, 224, 224, 1]], [[1, 128, 224, 1]]]\n expected_output_dims = input_dims\n\n return trt_test.TfTrtIntegrationTestParams(\n graph_fn=self.GraphFn,\n input_specs=[\n tensor_spec.TensorSpec([None, None, None, 1], dtypes.float32,\n \"input\")\n ],\n output_specs=[\n tensor_spec.TensorSpec([None, None, None, 1], dtypes.float32,\n \"output\")\n ],\n input_dims=input_dims,\n expected_output_dims=expected_output_dims)\n\n def setUp(self):\n super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call\n # Disable layout optimizer, since it will convert BiasAdd with NHWC\n # format to NCHW format under four dimentional input.\n self.DisableNonTrtOptimizers()\n\n def ExpectedEnginesToBuild(self, run_params):\n return [\"TRTEngineOp_0\"]\n\n def ShouldRunTest(self, run_params):\n return (run_params.dynamic_engine and not trt_test.IsQuantizationMode(\n run_params.precision_mode)), \"test dynamic engine and non-INT8\"\n\n def ExpectedAbsoluteTolerance(self, run_params):\n \"\"\"The absolute tolerance to compare floating point results.\"\"\"\n return 1.e-03 if run_params.precision_mode == \"FP32\" else 1.e-01\n\n def ExpectedRelativeTolerance(self, run_params):\n \"\"\"The relative tolerance to compare floating point results.\"\"\"\n return 1.e-03 if run_params.precision_mode == \"FP32\" else 1.e-01\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=invalid-name\n# pylint: disable=g-classes-have-attributes\n\"\"\"Legacy v1 optimizer classes.\n\nFor more examples see the base class `tf.compat.v1.keras.optimizers.Optimizer`.\n\"\"\"\n\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util import nest\n\n\nclass Optimizer(object):\n \"\"\"Abstract optimizer base class.\n\n Note: this is the parent class of all optimizers, not an actual optimizer\n that can be used for training models.\n\n All Keras optimizers support the following keyword arguments:\n\n clipnorm: float >= 0. Gradients will be clipped\n when their L2 norm exceeds this value.\n clipvalue: float >= 0. Gradients will be clipped\n when their absolute value exceeds this value.\n \"\"\"\n\n def __init__(self, **kwargs):\n allowed_kwargs = {'clipnorm', 'clipvalue'}\n for k in kwargs:\n if k not in allowed_kwargs:\n raise TypeError('Unexpected keyword argument '\n 'passed to optimizer: ' + str(k))\n # checks that clipnorm >= 0 and clipvalue >= 0\n if kwargs[k] < 0:\n raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))\n self.__dict__.update(kwargs)\n self.updates = []\n self.weights = []\n\n # Set this to False, indicating `apply_gradients` does not take the\n # `experimental_aggregate_gradients` argument.\n _HAS_AGGREGATE_GRAD = False\n\n def _create_all_weights(self, params):\n \"\"\"Creates and sets all optimizer weights.\n\n Args:\n params: list or tuple of `Variable` objects that will be minimized\n using this optimizer.\n\n Returns:\n Specific weight values that are used in `get_updates`\n \"\"\"\n raise NotImplementedError\n\n def get_updates(self, loss, params):\n raise NotImplementedError\n\n def get_gradients(self, loss, params):\n \"\"\"Returns gradients of `loss` with respect to `params`.\n\n Args:\n loss: Loss tensor.\n params: List of variables.\n\n Returns:\n List of gradient tensors.\n\n Raises:\n ValueError: In case any gradient cannot be computed (e.g. if gradient\n function not implemented).\n \"\"\"\n grads = backend.gradients(loss, params)\n if any(g is None for g in grads):\n raise ValueError('An operation has `None` for gradient. '\n 'Please make sure that all of your ops have a '\n 'gradient defined (i.e. are differentiable). '\n 'Common ops without gradient: '\n 'backend.argmax, backend.round, backend.eval.')\n if hasattr(self, 'clipnorm'):\n grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n if hasattr(self, 'clipvalue'):\n grads = [\n clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)\n for g in grads\n ]\n return grads\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the optimizer, from Numpy arrays.\n\n Should only be called after computing the gradients\n (otherwise the optimizer has no weights).\n\n Args:\n weights: a list of Numpy arrays. The number of arrays and their shape\n must match number of the dimensions of the weights of the optimizer\n (i.e. it should match the output of `get_weights`).\n\n Raises:\n ValueError: in case of incompatible weight shapes.\n \"\"\"\n params = self.weights\n if len(params) != len(weights):\n raise ValueError('Length of the specified weight list (' +\n str(len(weights)) +\n ') does not match the number of weights '\n 'of the optimizer (' + str(len(params)) + ')')\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError('Optimizer weight shape ' + str(pv.shape) +\n ' not compatible with '\n 'provided weight shape ' + str(w.shape))\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)\n\n def get_weights(self):\n \"\"\"Returns the current value of the weights of the optimizer.\n\n Returns:\n A list of numpy arrays.\n \"\"\"\n return backend.batch_get_value(self.weights)\n\n def get_config(self):\n config = {}\n if hasattr(self, 'clipnorm'):\n config['clipnorm'] = self.clipnorm\n if hasattr(self, 'clipvalue'):\n config['clipvalue'] = self.clipvalue\n return config\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n\nclass SGD(Optimizer):\n \"\"\"Stochastic gradient descent optimizer.\n\n Includes support for momentum,\n learning rate decay, and Nesterov momentum.\n\n Args:\n lr: float >= 0. Learning rate.\n momentum: float >= 0. Parameter that accelerates SGD in the relevant\n direction and dampens oscillations.\n decay: float >= 0. Learning rate decay over each update.\n nesterov: boolean. Whether to apply Nesterov momentum.\n \"\"\"\n\n def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):\n super(SGD, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n self.lr = backend.variable(lr, name='lr')\n self.momentum = backend.variable(momentum, name='momentum')\n self.decay = backend.variable(decay, name='decay')\n self.initial_decay = decay\n self.nesterov = nesterov\n\n def _create_all_weights(self, params):\n shapes = [backend.int_shape(p) for p in params]\n moments = [backend.zeros(shape) for shape in shapes]\n self.weights = [self.iterations] + moments\n return moments\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations,\n backend.dtype(self.decay))))\n # momentum\n moments = self._create_all_weights(params)\n for p, g, m in zip(params, grads, moments):\n v = self.momentum * m - lr * g # velocity\n self.updates.append(state_ops.assign(m, v))\n\n if self.nesterov:\n new_p = p + self.momentum * v - lr * g\n else:\n new_p = p + v\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(backend.get_value(self.lr)),\n 'momentum': float(backend.get_value(self.momentum)),\n 'decay': float(backend.get_value(self.decay)),\n 'nesterov': self.nesterov\n }\n base_config = super(SGD, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RMSprop(Optimizer):\n \"\"\"RMSProp optimizer.\n\n It is recommended to leave the parameters of this optimizer\n at their default values\n (except the learning rate, which can be freely tuned).\n\n Args:\n lr: float >= 0. Learning rate.\n rho: float >= 0.\n epsilon: float >= 0. Fuzz factor.\n If `None`, defaults to `backend.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n \"\"\"\n\n def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):\n super(RMSprop, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.lr = backend.variable(lr, name='lr')\n self.rho = backend.variable(rho, name='rho')\n self.decay = backend.variable(decay, name='decay')\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n if epsilon is None:\n epsilon = backend.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n\n def _create_all_weights(self, params):\n accumulators = [\n backend.zeros(backend.int_shape(p), dtype=backend.dtype(p))\n for p in params]\n self.weights = accumulators\n return accumulators\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n accumulators = self._create_all_weights(params)\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations,\n backend.dtype(self.decay))))\n\n for p, g, a in zip(params, grads, accumulators):\n # update accumulator\n new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)\n self.updates.append(state_ops.assign(a, new_a))\n new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon)\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(backend.get_value(self.lr)),\n 'rho': float(backend.get_value(self.rho)),\n 'decay': float(backend.get_value(self.decay)),\n 'epsilon': self.epsilon\n }\n base_config = super(RMSprop, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Adagrad(Optimizer):\n \"\"\"Adagrad optimizer.\n\n Adagrad is an optimizer with parameter-specific learning rates,\n which are adapted relative to how frequently a parameter gets\n updated during training. The more updates a parameter receives,\n the smaller the updates.\n\n It is recommended to leave the parameters of this optimizer\n at their default values.\n\n # Arguments\n lr: float >= 0. Initial learning rate.\n epsilon: float >= 0. If `None`, defaults to `backend.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n\n # References\n - [Adaptive Subgradient Methods for Online Learning and Stochastic\n Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n \"\"\"\n\n def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):\n super(Adagrad, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.lr = backend.variable(lr, name='lr')\n self.decay = backend.variable(decay, name='decay')\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n if epsilon is None:\n epsilon = backend.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n\n def _create_all_weights(self, params):\n shapes = [backend.int_shape(p) for p in params]\n accumulators = [backend.zeros(shape) for shape in shapes]\n self.weights = accumulators\n return accumulators\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n accumulators = self._create_all_weights(params)\n\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations,\n backend.dtype(self.decay))))\n\n for p, g, a in zip(params, grads, accumulators):\n new_a = a + math_ops.square(g) # update accumulator\n self.updates.append(state_ops.assign(a, new_a))\n new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon)\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(backend.get_value(self.lr)),\n 'decay': float(backend.get_value(self.decay)),\n 'epsilon': self.epsilon\n }\n base_config = super(Adagrad, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Adadelta(Optimizer):\n \"\"\"Adadelta optimizer.\n\n Adadelta is a more robust extension of Adagrad\n that adapts learning rates based on a moving window of gradient updates,\n instead of accumulating all past gradients. This way, Adadelta continues\n learning even when many updates have been done. Compared to Adagrad, in the\n original version of Adadelta you don't have to set an initial learning\n rate. In this version, initial learning rate and decay factor can\n be set, as in most other Keras optimizers.\n\n It is recommended to leave the parameters of this optimizer\n at their default values.\n\n Arguments:\n lr: float >= 0. Initial learning rate, defaults to 1.\n It is recommended to leave it at the default value.\n rho: float >= 0. Adadelta decay factor, corresponding to fraction of\n gradient to keep at each time step.\n epsilon: float >= 0. Fuzz factor.\n If `None`, defaults to `backend.epsilon()`.\n decay: float >= 0. Initial learning rate decay.\n\n References:\n - [Adadelta - an adaptive learning rate\n method](http://arxiv.org/abs/1212.5701)\n \"\"\"\n\n def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):\n super(Adadelta, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.lr = backend.variable(lr, name='lr')\n self.decay = backend.variable(decay, name='decay')\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n if epsilon is None:\n epsilon = backend.epsilon()\n self.rho = rho\n self.epsilon = epsilon\n self.initial_decay = decay\n\n def _create_all_weights(self, params):\n shapes = [backend.int_shape(p) for p in params]\n accumulators = [backend.zeros(shape) for shape in shapes]\n delta_accumulators = [backend.zeros(shape) for shape in shapes]\n self.weights = accumulators + delta_accumulators\n return accumulators, delta_accumulators\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n accumulators, delta_accumulators = self._create_all_weights(params)\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations,\n backend.dtype(self.decay))))\n\n for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):\n # update accumulator\n new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)\n self.updates.append(state_ops.assign(a, new_a))\n\n # use the new accumulator and the *old* delta_accumulator\n update = g * backend.sqrt(d_a + self.epsilon) / backend.sqrt(\n new_a + self.epsilon)\n new_p = p - lr * update\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n\n # update delta_accumulator\n new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)\n self.updates.append(state_ops.assign(d_a, new_d_a))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(backend.get_value(self.lr)),\n 'rho': self.rho,\n 'decay': float(backend.get_value(self.decay)),\n 'epsilon': self.epsilon\n }\n base_config = super(Adadelta, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Adam(Optimizer):\n \"\"\"Adam optimizer.\n\n Default parameters follow those provided in the original paper.\n\n Args:\n lr: float >= 0. Learning rate.\n beta_1: float, 0 < beta < 1. Generally close to 1.\n beta_2: float, 0 < beta < 1. Generally close to 1.\n epsilon: float >= 0. Fuzz factor.\n If `None`, defaults to `backend.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm\n from the paper \"On the Convergence of Adam and Beyond\".\n \"\"\"\n\n def __init__(self,\n lr=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=None,\n decay=0.,\n amsgrad=False,\n **kwargs):\n super(Adam, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n self.lr = backend.variable(lr, name='lr')\n self.beta_1 = backend.variable(beta_1, name='beta_1')\n self.beta_2 = backend.variable(beta_2, name='beta_2')\n self.decay = backend.variable(decay, name='decay')\n if epsilon is None:\n epsilon = backend.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n self.amsgrad = amsgrad\n\n def _create_all_weights(self, params):\n ms = [\n backend.zeros(backend.int_shape(p), dtype=backend.dtype(p))\n for p in params]\n vs = [\n backend.zeros(backend.int_shape(p), dtype=backend.dtype(p))\n for p in params]\n if self.amsgrad:\n vhats = [\n backend.zeros(backend.int_shape(p), dtype=backend.dtype(p))\n for p in params]\n else:\n vhats = [backend.zeros(1) for _ in params]\n self.weights = [self.iterations] + ms + vs + vhats\n return ms, vs, vhats\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations,\n backend.dtype(self.decay))))\n\n with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):\n t = math_ops.cast(self.iterations, backend.floatx())\n lr_t = lr * (\n backend.sqrt(1. - math_ops.pow(self.beta_2, t)) /\n (1. - math_ops.pow(self.beta_1, t)))\n\n ms, vs, vhats = self._create_all_weights(params)\n for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)\n if self.amsgrad:\n vhat_t = math_ops.maximum(vhat, v_t)\n p_t = p - lr_t * m_t / (backend.sqrt(vhat_t) + self.epsilon)\n self.updates.append(state_ops.assign(vhat, vhat_t))\n else:\n p_t = p - lr_t * m_t / (backend.sqrt(v_t) + self.epsilon)\n\n self.updates.append(state_ops.assign(m, m_t))\n self.updates.append(state_ops.assign(v, v_t))\n new_p = p_t\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(backend.get_value(self.lr)),\n 'beta_1': float(backend.get_value(self.beta_1)),\n 'beta_2': float(backend.get_value(self.beta_2)),\n 'decay': float(backend.get_value(self.decay)),\n 'epsilon': self.epsilon,\n 'amsgrad': self.amsgrad\n }\n base_config = super(Adam, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Adamax(Optimizer):\n \"\"\"Adamax optimizer from Adam paper's Section 7.\n\n It is a variant of Adam based on the infinity norm.\n Default parameters follow those provided in the paper.\n\n Args:\n lr: float >= 0. Learning rate.\n beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.\n epsilon: float >= 0. Fuzz factor.\n If `None`, defaults to `backend.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n \"\"\"\n\n def __init__(self,\n lr=0.002,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=None,\n decay=0.,\n **kwargs):\n super(Adamax, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n self.lr = backend.variable(lr, name='lr')\n self.beta_1 = backend.variable(beta_1, name='beta_1')\n self.beta_2 = backend.variable(beta_2, name='beta_2')\n self.decay = backend.variable(decay, name='decay')\n if epsilon is None:\n epsilon = backend.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n\n def _create_all_weights(self, params):\n\n shapes = [backend.int_shape(p) for p in params]\n # zero init of 1st moment\n ms = [backend.zeros(shape) for shape in shapes]\n # zero init of exponentially weighted infinity norm\n us = [backend.zeros(shape) for shape in shapes]\n self.weights = [self.iterations] + ms + us\n return ms, us\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations,\n backend.dtype(self.decay))))\n\n with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):\n t = math_ops.cast(self.iterations, backend.floatx())\n lr_t = lr / (1. - math_ops.pow(self.beta_1, t))\n\n ms, us = self._create_all_weights(params)\n\n for p, g, m, u in zip(params, grads, ms, us):\n\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))\n p_t = p - lr_t * m_t / (u_t + self.epsilon)\n\n self.updates.append(state_ops.assign(m, m_t))\n self.updates.append(state_ops.assign(u, u_t))\n new_p = p_t\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(backend.get_value(self.lr)),\n 'beta_1': float(backend.get_value(self.beta_1)),\n 'beta_2': float(backend.get_value(self.beta_2)),\n 'decay': float(backend.get_value(self.decay)),\n 'epsilon': self.epsilon\n }\n base_config = super(Adamax, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Nadam(Optimizer):\n \"\"\"Nesterov Adam optimizer.\n\n Much like Adam is essentially RMSprop with momentum,\n Nadam is Adam RMSprop with Nesterov momentum.\n\n Default parameters follow those provided in the paper.\n It is recommended to leave the parameters of this optimizer\n at their default values.\n\n Args:\n lr: float >= 0. Learning rate.\n beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.\n epsilon: float >= 0. Fuzz factor.\n If `None`, defaults to `backend.epsilon()`.\n \"\"\"\n\n def __init__(self,\n lr=0.002,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=None,\n schedule_decay=0.004,\n **kwargs):\n super(Nadam, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n self.m_schedule = backend.variable(1., name='m_schedule')\n self.lr = backend.variable(lr, name='lr')\n self.beta_1 = backend.variable(beta_1, name='beta_1')\n self.beta_2 = backend.variable(beta_2, name='beta_2')\n if epsilon is None:\n epsilon = backend.epsilon()\n self.epsilon = epsilon\n self.schedule_decay = schedule_decay\n\n def _create_all_weights(self, params):\n shapes = [backend.int_shape(p) for p in params]\n ms = [backend.zeros(shape) for shape in shapes]\n vs = [backend.zeros(shape) for shape in shapes]\n\n self.weights = [self.iterations, self.m_schedule] + ms + vs\n return ms, vs\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):\n t = math_ops.cast(self.iterations, backend.floatx())\n\n # Due to the recommendations in [2], i.e. warming momentum schedule\n momentum_cache_t = self.beta_1 * (\n 1. - 0.5 *\n (math_ops.pow(backend.cast_to_floatx(0.96), t * self.schedule_decay)))\n momentum_cache_t_1 = self.beta_1 * (\n 1. - 0.5 *\n (math_ops.pow(backend.cast_to_floatx(0.96),\n (t + 1) * self.schedule_decay)))\n m_schedule_new = self.m_schedule * momentum_cache_t\n m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1\n self.updates.append((self.m_schedule, m_schedule_new))\n\n ms, vs = self._create_all_weights(params)\n\n for p, g, m, v in zip(params, grads, ms, vs):\n # the following equations given in [1]\n g_prime = g / (1. - m_schedule_new)\n m_t = self.beta_1 * m + (1. - self.beta_1) * g\n m_t_prime = m_t / (1. - m_schedule_next)\n v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)\n v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))\n m_t_bar = (1. -\n momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime\n\n self.updates.append(state_ops.assign(m, m_t))\n self.updates.append(state_ops.assign(v, v_t))\n\n p_t = p - self.lr * m_t_bar / (backend.sqrt(v_t_prime) + self.epsilon)\n new_p = p_t\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(backend.get_value(self.lr)),\n 'beta_1': float(backend.get_value(self.beta_1)),\n 'beta_2': float(backend.get_value(self.beta_2)),\n 'epsilon': self.epsilon,\n 'schedule_decay': self.schedule_decay\n }\n base_config = super(Nadam, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass TFOptimizer(Optimizer, trackable.Trackable):\n \"\"\"Wrapper class for native TensorFlow optimizers.\"\"\"\n\n def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called\n self.optimizer = optimizer\n self._track_trackable(optimizer, name='optimizer')\n if iterations is None:\n with backend.name_scope(self.__class__.__name__):\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n else:\n self.iterations = iterations\n self._track_trackable(self.iterations, name='global_step')\n\n def _clip_gradients(self, grads):\n \"\"\"Clip gradients according to the clipnorm and clipvalue attributes.\"\"\"\n # TFOptimizer wrapper has no gradient clipping options.\n return grads\n\n def minimize(self, loss, var_list, grad_loss=None, tape=None):\n \"\"\"Mimics the `OptimizerV2.minimize` API.\"\"\"\n if not callable(loss) and tape is None:\n raise ValueError('`tape` is required when a `Tensor` loss is passed.')\n tape = tape if tape is not None else backprop.GradientTape()\n\n if callable(loss):\n with tape:\n if not callable(var_list):\n tape.watch(var_list)\n loss = loss()\n if callable(var_list):\n var_list = var_list()\n\n var_list = nest.flatten(var_list)\n if var_list:\n grads = tape.gradient(loss, var_list, grad_loss)\n grads_and_vars = list(zip(grads, var_list))\n self.apply_gradients(grads_and_vars)\n\n def apply_gradients(self, grads_and_vars):\n self.optimizer.apply_gradients(grads_and_vars, global_step=self.iterations)\n\n def get_grads(self, loss, params):\n return self.optimizer.compute_gradients(loss, params)\n\n def get_updates(self, loss, params):\n if distribution_strategy_context.has_strategy():\n self.updates = []\n\n if not params:\n # After the model vars have been created, the second call to get_updates\n # is called with params as an empty list. This ensures that we call\n # compute_gradients with params=None.\n grads = self.optimizer.compute_gradients(loss)\n else:\n grads = self.optimizer.compute_gradients(loss, params)\n global_step = training_util.get_global_step()\n opt_update = self.optimizer.apply_gradients(grads, global_step)\n else:\n if not params:\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n return self.updates\n\n # Updates list starts out empty because the iterations variable is\n # incremented in optimizer.apply_gradients()\n self.updates = []\n grads = self.optimizer.compute_gradients(loss, params)\n opt_update = self.optimizer.apply_gradients(\n grads, global_step=self.iterations)\n\n self.updates.append(opt_update)\n return self.updates\n\n @property\n def weights(self):\n raise NotImplementedError\n\n def get_config(self):\n raise NotImplementedError\n\n def from_config(self, config):\n raise NotImplementedError\n\n\n# Aliases.\n\nsgd = SGD\nrmsprop = RMSprop\nadagrad = Adagrad\nadadelta = Adadelta\nadam = Adam\nadamax = Adamax\nnadam = Nadam\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.experimental.assert_next()`.\"\"\"\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.data.experimental.ops import testing\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import options as options_lib\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.platform import test\n\n\nclass AssertNextTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n @combinations.generate(test_base.default_test_combinations())\n def testAssertNext(self):\n dataset = dataset_ops.Dataset.from_tensors(0).apply(\n testing.assert_next([\"Map\"])).map(lambda x: x)\n options = options_lib.Options()\n options.experimental_optimization.apply_default_optimizations = False\n dataset = dataset.with_options(options)\n self.assertDatasetProduces(dataset, expected_output=[0])\n\n @combinations.generate(test_base.default_test_combinations())\n def testIgnoreVersionSuffix(self):\n # The `batch` transformation creates a \"BatchV2\" dataset, but we should\n # still match that with \"Batch\".\n dataset = dataset_ops.Dataset.from_tensors(0).apply(\n testing.assert_next([\"Map\", \"Batch\"])).map(lambda x: x).batch(1)\n options = options_lib.Options()\n options.experimental_optimization.apply_default_optimizations = False\n dataset = dataset.with_options(options)\n self.assertDatasetProduces(dataset, expected_output=[[0]])\n\n @combinations.generate(test_base.default_test_combinations())\n def testAssertNextInvalid(self):\n dataset = dataset_ops.Dataset.from_tensors(0).apply(\n testing.assert_next([\"Whoops\"]))\n self.assertDatasetProduces(\n dataset,\n expected_error=(errors.InvalidArgumentError,\n \"Asserted transformation matching Whoops\"))\n\n @combinations.generate(test_base.default_test_combinations())\n def testAssertNextShort(self):\n dataset = dataset_ops.Dataset.from_tensors(0).apply(\n testing.assert_next([\"Root\", \"Whoops\"]))\n self.assertDatasetProduces(\n dataset,\n expected_error=(\n errors.InvalidArgumentError,\n \"Asserted next 2 transformations but encountered only 1.\"))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utilities for V2 control flow.\"\"\"\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import function_def_to_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework.func_graph import FuncGraph\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import control_flow_v2_func_graphs\nfrom tensorflow.python.ops import gradients_util\nfrom tensorflow.python.util import keras_deps\nfrom tensorflow.python.util import tf_contextlib\n\n\n_EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = None\n_DISABLE_LOWER_USING_SWITCH_MERGE = False\n\n\nCondBranchFuncGraph = control_flow_v2_func_graphs.CondBranchFuncGraph\nWhileCondFuncGraph = control_flow_v2_func_graphs.WhileCondFuncGraph\nWhileBodyFuncGraph = control_flow_v2_func_graphs.WhileBodyFuncGraph\n\n\ndef in_defun():\n \"\"\"Returns if the current graph is, or is nested in, a defun.\"\"\"\n if context.executing_eagerly(): return False\n\n graph = ops.get_default_graph()\n while (isinstance(graph, CondBranchFuncGraph) or\n isinstance(graph, WhileBodyFuncGraph) or\n isinstance(graph, WhileCondFuncGraph)):\n graph = graph.outer_graph\n return isinstance(graph, FuncGraph)\n\n\ndef in_while_loop_defun(graph):\n \"\"\"Returns if the graph is a while loop FuncGraph.\"\"\"\n if context.executing_eagerly(): return False\n return (isinstance(graph, WhileCondFuncGraph) or\n isinstance(graph, WhileBodyFuncGraph))\n\n\ndef create_new_tf_function(func_graph):\n \"\"\"Converts func_graph to a TF_Function and adds it to the current graph.\n\n Args:\n func_graph: FuncGraph\n\n Returns:\n The name of the new TF_Function.\n \"\"\"\n func = function._EagerDefinedFunction( # pylint: disable=protected-access\n func_graph.name, func_graph, func_graph.inputs, func_graph.outputs, {})\n func.add_to_graph(func_graph.outer_graph)\n return func_graph.name\n\n\ndef unique_fn_name(scope, name):\n \"\"\"Returns a unique name to use for a control flow function.\n\n Args:\n scope: A name scope string.\n name: An identifier for this function (e.g. \"true\", \"body\").\n\n Returns:\n A string, the name to use for the function.\n \"\"\"\n return (\"%s%s_%s\" % (scope, name, ops.uid())).replace(\"/\", \"_\")\n\n\ndef unique_grad_fn_name(forward_name):\n return \"%s_grad_%s\" % (forward_name, ops.uid())\n\n\ndef maybe_set_lowering_attr(op, lower_using_switch_merge=None):\n \"\"\"Sets the flag to enable lowering on `op` if necessary.\n\n Lowering allows cond_v2 and while_v2 to avoid some of the limitations of\n Functions, allowing users to specify devices & colocation inside of cond_v2\n and while_v2 input functions, and enabling non-strict evaluation & partial\n pruning. This brings v2 control flow closer to feature parity with v1 control\n flow.\n\n However, we do not lower in the following cases:\n - When the `If` or `While` ops are in the XLA context. Because it is easier\n for XLA to apply its own optimizations when dealing with un-lowered\n control flow operators than with low-level control flow primitives.\n - When the eager execution context specifies the executor of functions to\n be the single threaded executor (see context.function_executor_type()).\n Because the single threaded executor does not support v1 control flow ops.\n - When 'lower_using_switch_merge' is explicitly set to False.\n\n Args:\n op: An `If` or `While` Operation.\n lower_using_switch_merge: Explicit value to lower or not (optional).\n \"\"\"\n if lower_using_switch_merge is not None:\n # pylint: disable=protected-access\n op._set_attr(\"_lower_using_switch_merge\",\n attr_value_pb2.AttrValue(b=lower_using_switch_merge))\n # pylint: enable=protected-access\n elif (not _DISABLE_LOWER_USING_SWITCH_MERGE and\n not control_flow_util.GraphOrParentsInXlaContext(op.graph) and\n context.context().function_call_options.executor_type !=\n \"SINGLE_THREADED_EXECUTOR\"):\n # pylint: disable=protected-access\n op._set_attr(\"_lower_using_switch_merge\", attr_value_pb2.AttrValue(b=True))\n # pylint: enable=protected-access\n\n\ndef maybe_propagate_compile_time_consts_in_xla(op):\n \"\"\"Tells XLA whether to propagate compile-time consts in the loop body.\n\n This is needed to make compile time constants available to ops, for example\n `max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this\n would always be turned on, but that doesn't work with legacy functionalized\n while_loops.\n\n Args:\n op: A `While` Operation.\n \"\"\"\n if control_flow_util.GraphOrParentsInXlaContext(op.graph):\n # pylint: disable=protected-access\n op._set_attr(\"_xla_propagate_compile_time_consts\",\n attr_value_pb2.AttrValue(b=True))\n # pylint: enable=protected-access\n\n\ndef resource_input_index(tensor_name, input_names, node_defs, functions):\n \"\"\"Returns the index of the input corresponding to `tensor_name`.\n\n This method is used to find the corresponding index of an arbitrary resource\n tensor in a function (the function could be a loop body). We assume that\n resource handles are never created in functions, so that every resource\n tensor can be traced back to a function input.\n\n The awkward signature of this method is to make it work with both FuncGraphs\n and FunctionDefs. This is so we can recurse on function call ops without\n building the corresponding FuncGraph (note that even if a FuncGraph for a\n FunctionDef already exists, the input/output/node names may have been\n changed when the FuncGraph was serialized to the FunctionDef, which makes it\n unusable with this algorithm).\n\n Args:\n tensor_name: the name of the resource tensor to be resolved to an input.\n input_names: a list of the names of all inputs to the function.\n node_defs: a dict mapping op name -> NodeDef for every op in the function.\n functions: a dict mapping function name -> _EagerDefinedFunction.\n\n Returns:\n The index into input_names corresponding to `tensor_name`.\n \"\"\"\n while tensor_name not in input_names:\n # FunctionDefs and graphs use different tensor naming conventions.\n parts = tensor_name.split(\":\")\n if len(parts) == 3:\n op_name, _, output_idx = parts\n elif len(parts) == 2:\n op_name, output_idx = parts\n else:\n assert len(parts) == 1\n op_name = parts[0]\n output_idx = 0\n tensor_name = \"%s:%d\" % (tensor_name, output_idx)\n # Check again for cases where the tensor suffix (\":0\") is stripped out.\n if tensor_name in input_names:\n break\n output_idx = int(output_idx)\n node_def = node_defs[op_name]\n\n def _extract_input_index(function_attribute_name):\n func_name = node_def.attr[function_attribute_name].func.name\n fdef = functions[func_name].definition\n output_arg_name = fdef.signature.output_arg[output_idx].name\n output_tensor_name = fdef.ret[output_arg_name]\n return resource_input_index(\n output_tensor_name, [arg.name for arg in fdef.signature.input_arg],\n {ndef.name: ndef for ndef in fdef.node_def}, functions)\n\n if node_def.op in (\"Identity\", \"While\"):\n # Captured resources occur at the same index in the lists of inputs and\n # outputs of a while or identity op. So we lookup the input of `tensor.op`\n # at the same index as the index of `tensor` in the `tensor.op.outputs`.\n tensor_name = node_def.input[output_idx]\n elif node_def.op in (\"PartitionedCall\", \"StatefulPartitionedCall\"):\n # Functions output any captured resource tensors used by their\n # gradients. `tensor_name` is one of these outputs from a nested\n # function call, so recursively find the corresponding input in the\n # nested FunctionDef.\n tensor_name = node_def.input[_extract_input_index(\"f\")]\n elif node_def.op in (\"If\", \"StatelessIf\"):\n input_index = _extract_input_index(\"then_branch\")\n if input_index != _extract_input_index(\"else_branch\"):\n raise AssertionError(\n (\"Expected cond branches ({} op) to each have the same \"\n \"input->output mapping of resources.\").format(node_def.op))\n tensor_name = node_def.input[\n # Ignore the `cond` input; the function inputs come after.\n input_index + 1]\n else:\n # We assume there are no other ops types that will \"forward\" resource\n # handles like this, so all other handles must have been created by the\n # op. (Note that cond_v2 wraps resource handle outputs in optionals,\n # which we'll end up accumulating).\n raise ValueError(\"Taking gradient of a while loop which creates \"\n \"a resource in its body is not supported: %s (%s)\"\n % (op_name, node_def.op))\n\n return input_names.index(tensor_name)\n\n\n@tf_contextlib.contextmanager\ndef clear_control_inputs():\n \"\"\"Clears the control inputs but preserves the ControlFlowContext.\n\n This is needed to preserve the XLAControlFlowControl when clearing\n control inputs for the gradient accumulators in while_v2.\n `ops.control_dependencies` does not allow that.\n\n Yields:\n A context manager in which the ops created will not have any control inputs\n by default but the control flow context is the same.\n \"\"\"\n # pylint: disable=protected-access\n control_flow_context = ops.get_default_graph()._get_control_flow_context()\n with ops.control_dependencies(None):\n ops.get_default_graph()._set_control_flow_context(control_flow_context)\n yield\n # pylint: enable=protected-access\n\n\ndef _is_tpu_strategy(strategy):\n return (strategy is not None and\n strategy.__class__.__name__.startswith(\"TPUStrategy\"))\n\n\ndef _is_building_keras_layer():\n # TODO(srbs): Remove this function when we no long support session with Keras.\n keras_call_context_function = keras_deps.get_call_context_function()\n if keras_call_context_function:\n return keras_call_context_function().layer is not None\n else:\n return False\n\n\ndef output_all_intermediates():\n \"\"\"Whether to output all intermediates of a functional control flow op.\n\n The default behavior is to output intermediates only when building a Keras\n Layer in graph mode and that too when certain other conditions are met:\n 1. We do not output intermediates if the functional control flow op\n is being built inside a FuncGraph which is not a If/While graph. This\n guards against outputting intermediates in eager mode since keras adds\n tensors to a FuncGraph named \"keras_graph\" in that case. Also because we\n do not output intermediates of tf.function (since this feature is only for\n backwards compatibility) outputting intermediates of functional control\n flow ops built inside tf.function is of no value.\n 2. We do not output intermediates when the compilation is using XLA or for a\n TPU.\n 3. We do not output intermediates when a single threaded executor is used\n since that does not perform inlining and pruning.\n\n Returns:\n A bool telling whether to output all intermediates.\n \"\"\"\n if _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE is not None:\n return _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE\n if in_defun():\n return False\n if (control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()) or\n _is_tpu_strategy(distribution_strategy_context.get_strategy())):\n return False\n if (context.context().function_call_options.executor_type ==\n \"SINGLE_THREADED_EXECUTOR\"):\n return False\n return _is_building_keras_layer()\n\n\ndef get_func_graph(op, input_shapes, func_name):\n \"\"\"Generates and returns a FuncGraph for the given op and input_shapes.\"\"\"\n fdef = None\n graph = op.graph\n # Recursively search the func in graphs.\n while graph is not None:\n func = graph._get_function(func_name) # pylint: disable=protected-access\n if func is not None:\n fdef = func.definition\n break\n if hasattr(graph, \"outer_graph\"):\n graph = graph.outer_graph\n else:\n break\n\n if fdef is None:\n raise KeyError(\"%s cannot be found in the graph\" % func_name)\n\n # `op.graph` may not be the same as `ops.get_default_graph()` e.g.\n # in the case of nested if ops or when the gradient is being computed\n # from inside a Defun. We build the `func_graph` with `op.graph` as its\n # `outer_graph`. This resembles how the `FuncGraph` was built in the\n # forward pass. We need this so that we can resolve references to tensors\n # in `func_graph` from its gradient graph in `_resolve_grad_inputs`.\n with op.graph.as_default():\n func_graph = function_def_to_graph.function_def_to_graph(\n fdef, input_shapes)\n return func_graph\n\n\ndef get_op_and_outputs(op_or_outputs):\n if isinstance(op_or_outputs, ops.Operation):\n return op_or_outputs, []\n elif not op_or_outputs: # Empty list.\n return None, []\n else:\n return op_or_outputs[0].op, op_or_outputs\n\n\ndef graph_wrapped_for_higher_order_tape_gradients(graph):\n \"\"\"Check if `graph` is wrapped by `run_as_function_for_tape_gradients`.\"\"\"\n while graph is not None:\n if \"cflow_gradient_wrapper\" in getattr(graph, \"name\", \"\"):\n return True\n graph = getattr(graph, \"outer_graph\", None)\n return False\n\n\ndef run_as_function_for_tape_gradients(make_op, inputs):\n \"\"\"Fix higher-order tape gradients by wrapping `make_op` in a function.\n\n Args:\n make_op: A function that takes a list of inputs and returns a list of output\n tensors. This function should set any handle data relevant to its outputs\n before returning.\n inputs: A list of tensors to check for tape gradients and pass to\n `make_op`. These should include all tensors used in `make_op`.\n\n Returns:\n Tensors corresponding to `make_op`'s output.\n \"\"\"\n # GradientTapes created inside a function currently don't work well with\n # un-wrapped control flow ops in that same function. Wrapping in an extra\n # layer of intermediate function means we run extra logic in the function\n # gradient code to record the correct intermediates on the tape.\n #\n # The function attribute inputs to control flow ops are not hashable, so we\n # pass everything as a capture to bypass defun's caching.\n if (gradients_util.PossibleTapeGradientTypes(inputs)\n == gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER\n # We only need one function between the tape and the op; if we've already\n # wrapped once, we stop wrapping to avoid infinite recursion.\n and not (ops.get_default_graph().building_function\n and \"cflow_gradient_wrapper\" in ops.get_default_graph().name)):\n results = function.defun_with_attributes(\n make_op,\n autograph=False,\n attributes=dict(func_name=\"cflow_gradient_wrapper\"))(inputs)\n return results\n else:\n return make_op(inputs)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.data_flow_ops.PriorityQueue.\"\"\"\n\nimport copy\nimport random\nimport threading\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import data_flow_ops\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n\n@test_util.run_v1_only(\"PriorityQueue removed from v2\")\nclass PriorityQueueTest(test.TestCase):\n\n def testRoundTripInsertReadOnceSorts(self):\n with self.cached_session() as sess:\n q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (\n (), ()))\n elem = np.random.randint(-5, 5, size=100).astype(np.int64)\n side_value_0 = np.random.rand(100).astype(bytes)\n side_value_1 = np.random.rand(100).astype(bytes)\n enq_list = [\n q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))\n for e, v0, v1 in zip(elem, side_value_0, side_value_1)\n ]\n for enq in enq_list:\n enq.run()\n\n deq = q.dequeue_many(100)\n deq_elem, deq_value_0, deq_value_1 = self.evaluate(deq)\n\n allowed = {}\n missed = set()\n for e, v0, v1 in zip(elem, side_value_0, side_value_1):\n if e not in allowed:\n allowed[e] = set()\n allowed[e].add((v0, v1))\n missed.add((v0, v1))\n\n self.assertAllEqual(deq_elem, sorted(elem))\n for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):\n self.assertTrue((dv0, dv1) in allowed[e])\n missed.remove((dv0, dv1))\n self.assertEqual(missed, set())\n\n def testRoundTripInsertMultiThreadedReadOnceSorts(self):\n # We need each thread to keep its own device stack or the device scopes\n # won't be properly nested.\n ops.get_default_graph().switch_to_thread_local()\n with self.cached_session() as sess:\n q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (\n (), ()))\n elem = np.random.randint(-5, 5, size=100).astype(np.int64)\n side_value_0 = np.random.rand(100).astype(bytes)\n side_value_1 = np.random.rand(100).astype(bytes)\n\n enqueue_ops = [\n q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))\n for e, v0, v1 in zip(elem, side_value_0, side_value_1)\n ]\n\n # Run one producer thread for each element in elems.\n def enqueue(enqueue_op):\n self.evaluate(enqueue_op)\n\n dequeue_op = q.dequeue_many(100)\n\n enqueue_threads = [\n self.checkedThread(\n target=enqueue, args=(op,)) for op in enqueue_ops\n ]\n\n for t in enqueue_threads:\n t.start()\n\n deq_elem, deq_value_0, deq_value_1 = self.evaluate(dequeue_op)\n\n for t in enqueue_threads:\n t.join()\n\n allowed = {}\n missed = set()\n for e, v0, v1 in zip(elem, side_value_0, side_value_1):\n if e not in allowed:\n allowed[e] = set()\n allowed[e].add((v0, v1))\n missed.add((v0, v1))\n\n self.assertAllEqual(deq_elem, sorted(elem))\n for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):\n self.assertTrue((dv0, dv1) in allowed[e])\n missed.remove((dv0, dv1))\n self.assertEqual(missed, set())\n\n def testRoundTripFillsCapacityMultiThreadedEnqueueAndDequeue(self):\n # We need each thread to keep its own device stack or the device scopes\n # won't be properly nested.\n ops.get_default_graph().switch_to_thread_local()\n with self.cached_session() as sess:\n q = data_flow_ops.PriorityQueue(10, (dtypes.int64), (()))\n\n num_threads = 40\n enqueue_counts = np.random.randint(10, size=num_threads)\n enqueue_values = [\n np.random.randint(\n 5, size=count) for count in enqueue_counts\n ]\n enqueue_ops = [\n q.enqueue_many((values, values)) for values in enqueue_values\n ]\n shuffled_counts = copy.deepcopy(enqueue_counts)\n random.shuffle(shuffled_counts)\n dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]\n all_enqueued_values = np.hstack(enqueue_values)\n\n # Run one producer thread for each element in elems.\n def enqueue(enqueue_op):\n self.evaluate(enqueue_op)\n\n dequeued = []\n\n def dequeue(dequeue_op):\n (dequeue_indices, dequeue_values) = self.evaluate(dequeue_op)\n self.assertAllEqual(dequeue_indices, dequeue_values)\n dequeued.extend(dequeue_indices)\n\n enqueue_threads = [\n self.checkedThread(\n target=enqueue, args=(op,)) for op in enqueue_ops\n ]\n dequeue_threads = [\n self.checkedThread(\n target=dequeue, args=(op,)) for op in dequeue_ops\n ]\n\n # Dequeue and check\n for t in dequeue_threads:\n t.start()\n for t in enqueue_threads:\n t.start()\n for t in enqueue_threads:\n t.join()\n for t in dequeue_threads:\n t.join()\n\n self.assertAllEqual(sorted(dequeued), sorted(all_enqueued_values))\n\n def testRoundTripInsertManyMultiThreadedReadManyMultithreadedSorts(self):\n # We need each thread to keep its own device stack or the device scopes\n # won't be properly nested.\n ops.get_default_graph().switch_to_thread_local()\n with self.cached_session() as sess:\n q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))\n\n num_threads = 40\n enqueue_counts = np.random.randint(10, size=num_threads)\n enqueue_values = [\n np.random.randint(\n 5, size=count) for count in enqueue_counts\n ]\n enqueue_ops = [\n q.enqueue_many((values, values)) for values in enqueue_values\n ]\n shuffled_counts = copy.deepcopy(enqueue_counts)\n random.shuffle(shuffled_counts)\n dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]\n all_enqueued_values = np.hstack(enqueue_values)\n\n dequeue_wait = threading.Condition()\n\n # Run one producer thread for each element in elems.\n def enqueue(enqueue_op):\n self.evaluate(enqueue_op)\n\n def dequeue(dequeue_op, dequeued):\n (dequeue_indices, dequeue_values) = self.evaluate(dequeue_op)\n self.assertAllEqual(dequeue_indices, dequeue_values)\n dequeue_wait.acquire()\n dequeued.extend(dequeue_indices)\n dequeue_wait.release()\n\n dequeued = []\n enqueue_threads = [\n self.checkedThread(\n target=enqueue, args=(op,)) for op in enqueue_ops\n ]\n dequeue_threads = [\n self.checkedThread(\n target=dequeue, args=(op, dequeued)) for op in dequeue_ops\n ]\n\n for t in enqueue_threads:\n t.start()\n for t in enqueue_threads:\n t.join()\n # Dequeue and check\n for t in dequeue_threads:\n t.start()\n for t in dequeue_threads:\n t.join()\n\n # We can't guarantee full sorting because we can't guarantee\n # that the dequeued.extend() call runs immediately after the\n # self.evaluate() call. Here we're just happy everything came out.\n self.assertAllEqual(set(dequeued), set(all_enqueued_values))\n\n def testRoundTripInsertManyMultiThreadedReadOnceSorts(self):\n # We need each thread to keep its own device stack or the device scopes\n # won't be properly nested.\n ops.get_default_graph().switch_to_thread_local()\n with self.cached_session() as sess:\n q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (\n (), ()))\n elem = np.random.randint(-5, 5, size=100).astype(np.int64)\n side_value_0 = np.random.rand(100).astype(bytes)\n side_value_1 = np.random.rand(100).astype(bytes)\n\n batch = 5\n enqueue_ops = [\n q.enqueue_many((elem[i * batch:(i + 1) * batch],\n side_value_0[i * batch:(i + 1) * batch],\n side_value_1[i * batch:(i + 1) * batch]))\n for i in range(20)\n ]\n\n # Run one producer thread for each element in elems.\n def enqueue(enqueue_op):\n self.evaluate(enqueue_op)\n\n dequeue_op = q.dequeue_many(100)\n\n enqueue_threads = [\n self.checkedThread(\n target=enqueue, args=(op,)) for op in enqueue_ops\n ]\n\n for t in enqueue_threads:\n t.start()\n\n deq_elem, deq_value_0, deq_value_1 = self.evaluate(dequeue_op)\n\n for t in enqueue_threads:\n t.join()\n\n allowed = {}\n missed = set()\n for e, v0, v1 in zip(elem, side_value_0, side_value_1):\n if e not in allowed:\n allowed[e] = set()\n allowed[e].add((v0, v1))\n missed.add((v0, v1))\n\n self.assertAllEqual(deq_elem, sorted(elem))\n for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):\n self.assertTrue((dv0, dv1) in allowed[e])\n missed.remove((dv0, dv1))\n self.assertEqual(missed, set())\n\n def testRoundTripInsertOnceReadOnceSorts(self):\n with self.cached_session() as sess:\n q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (\n (), ()))\n elem = np.random.randint(-100, 100, size=1000).astype(np.int64)\n side_value_0 = np.random.rand(1000).astype(bytes)\n side_value_1 = np.random.rand(1000).astype(bytes)\n q.enqueue_many((elem, side_value_0, side_value_1)).run()\n deq = q.dequeue_many(1000)\n deq_elem, deq_value_0, deq_value_1 = self.evaluate(deq)\n\n allowed = {}\n for e, v0, v1 in zip(elem, side_value_0, side_value_1):\n if e not in allowed:\n allowed[e] = set()\n allowed[e].add((v0, v1))\n\n self.assertAllEqual(deq_elem, sorted(elem))\n for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):\n self.assertTrue((dv0, dv1) in allowed[e])\n\n def testRoundTripInsertOnceReadManySorts(self):\n with self.cached_session():\n q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))\n elem = np.random.randint(-100, 100, size=1000).astype(np.int64)\n q.enqueue_many((elem, elem)).run()\n deq_values = np.hstack((q.dequeue_many(100)[0].eval() for _ in range(10)))\n self.assertAllEqual(deq_values, sorted(elem))\n\n def testRoundTripInsertOnceReadOnceLotsSorts(self):\n with self.cached_session():\n q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))\n elem = np.random.randint(-100, 100, size=1000).astype(np.int64)\n q.enqueue_many((elem, elem)).run()\n dequeue_op = q.dequeue()\n deq_values = np.hstack(dequeue_op[0].eval() for _ in range(1000))\n self.assertAllEqual(deq_values, sorted(elem))\n\n def testInsertingNonInt64Fails(self):\n with self.cached_session():\n q = data_flow_ops.PriorityQueue(2000, (dtypes.string), (()))\n with self.assertRaises(TypeError):\n q.enqueue_many(([\"a\", \"b\", \"c\"], [\"a\", \"b\", \"c\"])).run()\n\n def testInsertingNonScalarFails(self):\n with self.cached_session() as sess:\n input_priority = array_ops.placeholder(dtypes.int64)\n input_other = array_ops.placeholder(dtypes.string)\n q = data_flow_ops.PriorityQueue(2000, (dtypes.string,), (()))\n\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Shape mismatch in tuple component 0. Expected \\[\\], got \\[2\\]\"):\n sess.run([q.enqueue((input_priority, input_other))],\n feed_dict={\n input_priority: np.array(\n [0, 2], dtype=np.int64),\n input_other: np.random.rand(3, 5).astype(bytes)\n })\n\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n r\"Shape mismatch in tuple component 0. Expected \\[2\\], got \\[2,2\\]\"):\n sess.run(\n [q.enqueue_many((input_priority, input_other))],\n feed_dict={\n input_priority: np.array(\n [[0, 2], [3, 4]], dtype=np.int64),\n input_other: np.random.rand(2, 3).astype(bytes)\n })\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# RUN: %p/control_flow_duplicate_v1 | FileCheck %s\n\n# pylint: disable=missing-docstring,line-too-long\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1\n\n# Tests handling dupliate functions after V1 control flow is functionalized.\n\n# CHECK: func {{.*}} tf_saved_model.exported_names = [\"key_1\"]\n# CHECK: \"tf.If\"\n# CHECK-SAME: else_branch = @[[else:[a-zA-Z_0-9]+]]\n# CHECK-SAME: then_branch = @[[then:[a-zA-Z_0-9]+]]\n\n# CHECK: func {{.*}} tf_saved_model.exported_names = [\"key_2\"]\n# CHECK: \"tf.If\"\n# CHECK-SAME: else_branch = @[[else]]\n# CHECK-SAME: then_branch = @[[then]]\n\n# CHECK: func private @[[else]](\n# CHECK: func private @[[then]](\n\n\ndef Test():\n\n zero = tf.constant(0)\n one = tf.constant(1)\n x = tf.placeholder(tf.int32, shape=(), name='input')\n result = tf.cond(x > zero, lambda: tf.square(x), lambda: tf.add(x, one))\n\n tensor_info_result = tf.compat.v1.saved_model.utils.build_tensor_info(result)\n\n signature_def = tf.saved_model.signature_def_utils.build_signature_def(\n inputs=None,\n outputs={'result': tensor_info_result},\n method_name='some_function')\n\n return {'key_1': signature_def, 'key_2': signature_def}, None, None\n\n\nif __name__ == '__main__':\n common_v1.set_tf_options()\n common_v1.do_test(Test)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for random-number generation ops in the XLA JIT compiler.\"\"\"\n\nimport math\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops.distributions import special_math\nfrom tensorflow.python.platform import googletest\n\n\nclass RandomOpsTest(xla_test.XLATestCase, parameterized.TestCase):\n \"\"\"Test cases for random-number generating operators.\"\"\"\n\n def _random_types(self):\n return set(self.numeric_types) - set(\n self.complex_types) - {np.uint64, np.int64, np.uint8, np.int8}\n\n def _testRngIsNotConstant(self, rng, dtype):\n # Tests that 'rng' does not always return the same value.\n with self.session():\n with self.test_scope():\n x = rng(dtype)\n\n # The random-number generator, if working correctly, should produce the\n # same output multiple times with low probability.\n y = self.evaluate(x)\n z = self.evaluate(x)\n w = self.evaluate(x)\n\n # We use exact equality here. If the random-number generator is producing\n # deterministic output, all three outputs will be bitwise identical.\n self.assertTrue((not np.array_equal(y, z)) or\n (not np.array_equal(z, w)) or (not np.array_equal(y, w)))\n\n def testRandomUniformIsNotConstant(self):\n\n def rng(dtype):\n dtype = dtypes.as_dtype(dtype)\n return random_ops.random_uniform(shape=[2], dtype=dtype, maxval=dtype.max)\n\n for dtype in self._random_types():\n self._testRngIsNotConstant(rng, dtype)\n\n def testRandomNormalIsNotConstant(self):\n\n def rng(dtype):\n return random_ops.random_normal(shape=[2], dtype=dtype)\n\n for dtype in self._random_types() & self.float_types:\n self._testRngIsNotConstant(rng, dtype)\n\n @parameterized.parameters({\n 'mean': 1.4,\n 'stddev': 1.2\n }, {\n 'mean': 2.3,\n 'stddev': 2.0\n })\n def testRandomNormal(self, mean, stddev):\n num_elts = 1000000\n for dtype in self._random_types() & self.float_types:\n with self.session():\n with self.test_scope():\n normal = random_ops.random_normal([num_elts],\n dtype=dtype,\n mean=mean,\n stddev=stddev)\n self._checkTruncatedNormalIsInRange(\n normal,\n a=normal.dtype.min,\n b=normal.dtype.max,\n mu=mean,\n sigma=stddev,\n count=num_elts,\n stat_test=True)\n\n def testRandomUniformIsInRange(self):\n for dtype in self._random_types():\n # TODO (b/112272078): enable bfloat16 for CPU and GPU when the bug is\n # fixed.\n if (self.device in ['XLA_GPU', 'XLA_CPU'\n ]) and (dtype in [dtypes.bfloat16, dtypes.half]):\n continue\n with self.session():\n with self.test_scope():\n x = random_ops.random_uniform(\n shape=[1000], dtype=dtype, minval=-2, maxval=33)\n y = self.evaluate(x)\n msg = str(y) + str(dtype)\n self.assertEqual((y >= -2).sum(), 1000, msg)\n self.assertEqual((y < 33).sum(), 1000, msg)\n\n def testTruncatedNormalIsNotConstant(self):\n\n def rng(dtype):\n return random_ops.truncated_normal(shape=[2], dtype=dtype)\n\n # TODO(b/34339814): make this test work with 16 bit float types.\n for dtype in self._random_types() & {np.float32, np.float64}:\n self._testRngIsNotConstant(rng, dtype)\n\n def _checkTruncatedNormalIsInRange(self, x, a, b, mu, sigma, count,\n stat_test):\n\n def normal_cdf(x):\n return .5 * math.erfc(-x / math.sqrt(2))\n\n def normal_pdf(x):\n return math.exp(-(x**2) / 2.) / math.sqrt(2 * math.pi)\n\n def probit(x):\n return self.evaluate(special_math.ndtri(x))\n\n y = self.evaluate(x)\n\n alpha = (a - mu) / sigma\n beta = (b - mu) / sigma\n z = normal_cdf(beta) - normal_cdf(alpha)\n\n self.assertEqual((y >= a).sum(), count)\n self.assertEqual((y <= b).sum(), count)\n\n # Skip statistical test for low probability regions.\n if not stat_test:\n return\n\n # For more information on these calculations, see:\n # Burkardt, John. \"The Truncated Normal Distribution\".\n # Department of Scientific Computing website. Florida State University.\n expected_mean = mu + (normal_pdf(alpha) - normal_pdf(beta)) / z * sigma\n actual_mean = np.mean(y, dtype=np.float64)\n if x.dtype == dtypes.bfloat16:\n atol = rtol = 1e-1\n else:\n atol = rtol = 2e-2\n self.assertAllClose(actual_mean, expected_mean, atol=atol, rtol=rtol)\n\n expected_median = mu + probit(\n (normal_cdf(alpha) + normal_cdf(beta)) / 2.) * sigma\n actual_median = np.median(y)\n self.assertAllClose(actual_median, expected_median, atol=atol, rtol=rtol)\n\n expected_variance = sigma**2 * (1 + (\n (alpha * normal_pdf(alpha) - beta * normal_pdf(beta)) / z) - (\n (normal_pdf(alpha) - normal_pdf(beta)) / z)**2)\n actual_variance = np.var(y, dtype=np.float64)\n self.assertAllClose(\n actual_variance, expected_variance, atol=atol, rtol=rtol)\n\n def testTruncatedNormalIsInRange(self):\n count = 10000000\n # TODO(b/34339814): make this test work with 16 bit float types.\n for dtype in self._random_types() & {np.float32, np.float64}:\n with self.session():\n with self.test_scope():\n x = random_ops.truncated_normal(shape=[count], dtype=dtype)\n self._checkTruncatedNormalIsInRange(\n x, a=-2, b=2, mu=0, sigma=1, count=count, stat_test=True)\n\n def _implParameterizedTruncatedNormalIsInRange(self, a, b, mu, sigma, count,\n stat_test):\n # TODO(b/34339814): make this test work with 16 bit float types.\n for dtype in self._random_types() & {np.float32, np.float64}:\n with self.session():\n with self.test_scope():\n x = random_ops.parameterized_truncated_normal(\n shape=[count],\n dtype=dtype,\n means=mu,\n stddevs=sigma,\n minvals=a,\n maxvals=b)\n self._checkTruncatedNormalIsInRange(\n x, a=a, b=b, mu=mu, sigma=sigma, count=count, stat_test=stat_test)\n\n def testParameterizedTruncatedNormalBroadcasting(self):\n for dtype in self._random_types() & {np.float32, np.float64}:\n with self.session():\n with self.test_scope():\n a = -1.\n b = 1.\n mu = 0.\n sigma = 1.\n count = 10000000\n x = random_ops.parameterized_truncated_normal(\n shape=[1, count],\n dtype=dtype,\n means=mu,\n stddevs=sigma,\n minvals=[a],\n maxvals=[b])\n self._checkTruncatedNormalIsInRange(\n x, a=a, b=b, mu=mu, sigma=sigma, count=count, stat_test=True)\n\n def testParameterizedTruncatedNormalBatched(self):\n # TODO(b/112289993): Make this test work with dtype np.float64.\n for dtype in self._random_types() & {np.float32}:\n with self.session():\n with self.test_scope():\n count = 10000000\n a = -100.\n b = 100.\n mu0 = 0.\n mu1 = 1.\n sigma = .1\n x = random_ops.parameterized_truncated_normal(\n shape=[2, count],\n dtype=dtype,\n means=[mu0, mu1],\n stddevs=sigma,\n minvals=[a],\n maxvals=[b])\n self._checkTruncatedNormalIsInRange(\n x[0], a=a, b=b, mu=mu0, sigma=sigma, count=count, stat_test=True)\n self._checkTruncatedNormalIsInRange(\n x[1], a=a, b=b, mu=mu1, sigma=sigma, count=count, stat_test=True)\n\n def testParameterizedTruncatedNormalIsInRangeCenter(self):\n count = 10000000\n self._implParameterizedTruncatedNormalIsInRange(\n a=-10, b=20, mu=5, sigma=5, count=count, stat_test=True)\n\n def testParameterizedTruncatedNormalIsInRangeLeft(self):\n count = 10000000\n # the region is on the left side of the parent normal distribution\n self._implParameterizedTruncatedNormalIsInRange(\n a=-10, b=-4, mu=0, sigma=1, count=count, stat_test=False)\n self._implParameterizedTruncatedNormalIsInRange(\n a=-np.infty, b=-4, mu=0, sigma=1, count=count, stat_test=False)\n\n def testParameterizedTruncatedNormalIsInRangeRight(self):\n count = 10000000\n # the region is on the right side of the parent normal distribution\n self._implParameterizedTruncatedNormalIsInRange(\n a=4, b=10, mu=0, sigma=1, count=count, stat_test=False)\n self._implParameterizedTruncatedNormalIsInRange(\n a=4, b=np.infty, mu=0, sigma=1, count=count, stat_test=False)\n\n def testShuffle1d(self):\n with self.session():\n with self.test_scope():\n x = math_ops.range(1 << 16)\n shuffle = random_ops.random_shuffle(x)\n result = self.evaluate(shuffle)\n expected = range(1 << 16)\n # Compare sets to avoid randomness behavior changes but make sure still\n # have all the values.\n self.assertAllEqual(set(result), set(expected))\n\n def testShuffle2d(self):\n with self.session():\n with self.test_scope():\n x = array_ops.diag(math_ops.range(20))\n shuffle = random_ops.random_shuffle(x)\n result = self.evaluate(shuffle)\n expected = np.diag(range(20)).flatten()\n # Compare sets to avoid randomness behavior changes but make sure still\n # have all the values.\n self.assertAllEqual(len(result.flatten()), len(expected))\n self.assertAllEqual(set(result.flatten()), set(expected))\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for RegexReplace op from string_ops.\"\"\"\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import gen_string_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.platform import test\n\n\[email protected](\n (gen_string_ops.regex_replace),\n (gen_string_ops.static_regex_replace))\nclass RegexReplaceOpVariantsTest(test.TestCase, parameterized.TestCase):\n\n @test_util.run_deprecated_v1\n def testForwarding(self, op):\n with self.cached_session():\n # Generate an input that is uniquely consumed by the regex op.\n # This exercises code paths which are optimized for this case\n # (e.g., using forwarding).\n inp = string_ops.substr(\n constant_op.constant([\"AbCdEfG\",\n \"HiJkLmN\"], dtypes.string),\n pos=0,\n len=5)\n stripped = op(inp, \"\\\\p{Ll}\", \".\")\n self.assertAllEqual([b\"A.C.E\", b\"H.J.L\"], stripped)\n\n @test_util.run_deprecated_v1\n def testRemovePrefix(self, op):\n values = [\"a:foo\", \"a:bar\", \"a:foo\", \"b:baz\", \"b:qux\", \"ca:b\"]\n with self.cached_session():\n input_vector = constant_op.constant(values, dtypes.string)\n stripped = op(input_vector, \"^(a:|b:)\", \"\", replace_global=False)\n self.assertAllEqual([b\"foo\", b\"bar\", b\"foo\", b\"baz\", b\"qux\", b\"ca:b\"],\n stripped)\n\n @test_util.run_deprecated_v1\n def testRegexReplace(self, op):\n values = [\"aba\\naba\", \"abcdabcde\"]\n with self.cached_session():\n input_vector = constant_op.constant(values, dtypes.string)\n stripped = op(input_vector, \"a.*a\", \"(\\\\0)\")\n self.assertAllEqual([b\"(aba)\\n(aba)\", b\"(abcda)bcde\"], stripped)\n\n @test_util.run_deprecated_v1\n def testEmptyMatch(self, op):\n values = [\"abc\", \"1\"]\n with self.cached_session():\n input_vector = constant_op.constant(values, dtypes.string)\n stripped = op(input_vector, \"\", \"x\")\n self.assertAllEqual([b\"xaxbxcx\", b\"x1x\"], stripped)\n\n @test_util.run_deprecated_v1\n def testInvalidPattern(self, op):\n values = [\"abc\", \"1\"]\n with self.cached_session():\n input_vector = constant_op.constant(values, dtypes.string)\n invalid_pattern = \"A[\"\n replace = op(input_vector, invalid_pattern, \"x\")\n with self.assertRaisesOpError(\"Invalid pattern\"):\n self.evaluate(replace)\n\n @test_util.run_deprecated_v1\n def testGlobal(self, op):\n values = [\"ababababab\", \"abcabcabc\", \"\"]\n with self.cached_session():\n input_vector = constant_op.constant(values, dtypes.string)\n stripped = op(input_vector, \"ab\", \"abc\", True)\n self.assertAllEqual([b\"abcabcabcabcabc\", b\"abccabccabcc\", b\"\"], stripped)\n\n\ndef as_string(s):\n return s\n\n\ndef as_tensor(s):\n return constant_op.constant(s, dtypes.string)\n\n\nclass RegexReplaceTest(test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(\n (as_string, as_tensor),\n (as_tensor, as_string),\n (as_tensor, as_tensor))\n @test_util.run_deprecated_v1\n def testRegexReplaceDelegation(self, pattern_fn, rewrite_fn):\n with self.cached_session():\n input_vector = constant_op.constant(\"foo\", dtypes.string)\n pattern = pattern_fn(\"[a-z]\")\n replace = rewrite_fn(\".\")\n op = string_ops.regex_replace(input_vector, pattern, replace)\n self.assertTrue(op.name.startswith(\"RegexReplace\"))\n\n @test_util.run_deprecated_v1\n def testStaticRegexReplaceDelegation(self):\n with self.cached_session():\n input_vector = constant_op.constant(\"foo\", dtypes.string)\n pattern = \"[a-z]\"\n replace = \".\"\n op = string_ops.regex_replace(input_vector, pattern, replace)\n self.assertTrue(op.name.startswith(\"StaticRegexReplace\"))\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.Dataset`.\"\"\"\n\nimport collections\nimport os\nimport warnings\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python.data.experimental.ops import testing\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import optional_ops\nfrom tensorflow.python.data.ops import options as options_lib\nfrom tensorflow.python.data.ops import readers\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.data.util import structure\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.lib.io import tf_record\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.platform import test\n\n\nclass DatasetTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n @combinations.generate(test_base.default_test_combinations())\n def testAsSerializedGraph(self):\n dataset = dataset_ops.Dataset.range(10)\n graph = graph_pb2.GraphDef().FromString(\n self.evaluate(dataset._as_serialized_graph()))\n self.assertTrue(any(node.op == \"RangeDataset\" for node in graph.node))\n\n def testAsSerializedGraphStateful(self):\n dataset = dataset_ops.Dataset.range(10).map(\n lambda _: random_ops.random_uniform(()))\n with self.assertRaises(errors.FailedPreconditionError):\n self.evaluate(\n dataset._as_serialized_graph(external_state_policy=options_lib\n .ExternalStatePolicy.FAIL))\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(\n init_source=[\"textfile\", \"keyvaluetensor\", \"dataset\"])))\n def testLookupTableGraphSerialization(self, init_source):\n vals = [10, 11]\n initializer = self.lookupTableInitializer(init_source, vals)\n table = lookup_ops.StaticHashTable(initializer, -1)\n dataset = dataset_ops.Dataset.range(3)\n dataset = dataset.map(table.lookup)\n self.evaluate(lookup_ops.tables_initializer())\n round_tripped = self.graphRoundTrip(dataset)\n del table\n del dataset\n self.assertDatasetProduces(\n round_tripped, [10, 11, -1], requires_initialization=True)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testAsFunctionWithMap(self):\n with ops.device(\"CPU\"):\n original_dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)\n fn = original_dataset._trace_variant_creation()\n variant = fn()\n\n revived_dataset = dataset_ops._VariantDataset(\n variant, original_dataset.element_spec)\n self.assertDatasetProduces(revived_dataset, range(0, 10, 2))\n\n @combinations.generate(test_base.eager_only_combinations())\n def testAsFunctionWithMapInFlatMap(self):\n with ops.device(\"CPU\"):\n original_dataset = dataset_ops.Dataset.range(5).flat_map(\n lambda x: dataset_ops.Dataset.range(5).map(lambda x: x * 2))\n fn = original_dataset._trace_variant_creation()\n variant = fn()\n\n revived_dataset = dataset_ops._VariantDataset(\n variant, original_dataset.element_spec)\n self.assertDatasetProduces(revived_dataset, list(original_dataset))\n\n @combinations.generate(test_base.eager_only_combinations())\n def testAsFunctionFromReader(self):\n with ops.device(\"CPU\"):\n file_path = os.path.join(self.get_temp_dir(),\n \"{}.tfrecord.gz\".format(\"tf_record_asset\"))\n with tf_record.TFRecordWriter(file_path, \"GZIP\") as f:\n for v in [\"a\", \"aa\", \"aaa\"]:\n f.write(str(v))\n original_dataset = readers.TFRecordDataset([file_path],\n compression_type=\"GZIP\")\n fn = original_dataset._trace_variant_creation()\n variant = fn()\n\n revived_dataset = dataset_ops._VariantDataset(\n variant, original_dataset.element_spec)\n self.assertDatasetProduces(revived_dataset, [\"a\", \"aa\", \"aaa\"])\n\n def _testNumInputs(self, dataset, num_inputs):\n self.assertLen(dataset._inputs(), num_inputs)\n\n @combinations.generate(test_base.default_test_combinations())\n def testFixedLengthRecordInputs(self):\n dataset = readers.FixedLengthRecordDataset(\"\", 42)\n self._testNumInputs(dataset, 0)\n\n @combinations.generate(test_base.default_test_combinations())\n def testFromGeneratorInputs(self):\n def gen():\n yield 42\n\n dataset = dataset_ops.Dataset.from_generator(gen, dtypes.int32)\n self._testNumInputs(dataset, 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testFromTensorsInputs(self):\n dataset = dataset_ops.Dataset.from_tensors([42])\n self._testNumInputs(dataset, 0)\n\n @combinations.generate(test_base.default_test_combinations())\n def testRangeInputs(self):\n dataset = dataset_ops.Dataset.range(10)\n self._testNumInputs(dataset, 0)\n\n @combinations.generate(test_base.default_test_combinations())\n def testTextLineInputs(self):\n dataset = readers.TextLineDataset(\"\")\n self._testNumInputs(dataset, 0)\n\n @combinations.generate(test_base.default_test_combinations())\n def testTFRecordInputs(self):\n dataset = readers.TFRecordDataset(\"\")\n self._testNumInputs(dataset, 1)\n\n @combinations.generate(\n combinations.combine(tf_api_version=1, mode=[\"eager\", \"graph\"]))\n def testDatasetComplexSourceInputs(self):\n dataset_fn = dataset_ops.Dataset.from_sparse_tensor_slices(\n sparse_tensor.SparseTensor(\n indices=np.array([[0, 0], [1, 0], [2, 0]]),\n values=np.array([0, 0, 0]),\n dense_shape=np.array([3, 1])))\n self.assertEmpty(dataset_fn._inputs())\n\n def _testUnaryInputs(self, dataset_fn):\n input_dataset = dataset_ops.Dataset.range(0)\n self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())\n\n @combinations.generate(test_base.default_test_combinations())\n def testBatchInputs(self):\n self._testUnaryInputs(lambda x: x.batch(10))\n\n @combinations.generate(test_base.default_test_combinations())\n def testCacheInputs(self):\n self._testUnaryInputs(lambda x: x.cache())\n\n @combinations.generate(test_base.default_test_combinations())\n def testFilterInputs(self):\n self._testUnaryInputs(lambda x: x.filter(lambda x: True))\n\n @combinations.generate(test_base.default_test_combinations())\n def testFlatMapInputs(self):\n self._testUnaryInputs(\n lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0)))\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapInputs(self):\n self._testUnaryInputs(lambda x: x.map(lambda x: x))\n\n @combinations.generate(test_base.default_test_combinations())\n def testPaddedBatchInputs(self):\n self._testUnaryInputs(lambda x: x.padded_batch(10, []))\n\n @combinations.generate(test_base.default_test_combinations())\n def testParallelMapInputs(self):\n self._testUnaryInputs(lambda x: x.map(lambda x: x, num_parallel_calls=2))\n\n @combinations.generate(test_base.default_test_combinations())\n def testRepeatInputs(self):\n self._testUnaryInputs(lambda x: x.repeat())\n\n @combinations.generate(test_base.default_test_combinations())\n def testShuffleInputs(self):\n self._testUnaryInputs(lambda x: x.shuffle(10))\n\n @combinations.generate(test_base.default_test_combinations())\n def testSkipInputs(self):\n self._testUnaryInputs(lambda x: x.skip(1))\n\n @combinations.generate(test_base.default_test_combinations())\n def testTakeInputs(self):\n self._testUnaryInputs(lambda x: x.take(1))\n\n @combinations.generate(test_base.default_test_combinations())\n def testWindowInputs(self):\n self._testUnaryInputs(lambda x: x.window(10))\n\n @combinations.generate(test_base.default_test_combinations())\n def testUnaryTransformationInputsApply(self):\n input_dataset = dataset_ops.Dataset.range(0)\n dataset = input_dataset.apply(lambda dataset: dataset.cache())\n\n self.assertEqual([input_dataset], dataset._inputs())\n\n def _testInputsWithInterleaveFn(self, dataset_fn, interleave_parallelism):\n input_dataset = dataset_ops.Dataset.range(0)\n dataset = input_dataset.interleave(\n lambda x: dataset_ops.Dataset.range(0),\n cycle_length=2,\n num_parallel_calls=interleave_parallelism)\n self.assertEqual([input_dataset], dataset._inputs())\n\n @combinations.generate(test_base.default_test_combinations())\n def testParallelInterleaveInputs(self):\n self._testInputsWithInterleaveFn(lambda: dataset_ops.range(0), 2)\n\n @combinations.generate(test_base.default_test_combinations())\n def testInterleaveInputs(self):\n self._testInputsWithInterleaveFn(lambda: dataset_ops.range(0), None)\n\n @combinations.generate(test_base.default_test_combinations())\n def testDebugString(self):\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.map(lambda x: x**2)\n dataset = dataset.filter(lambda x: x > 10)\n debug_string = dataset.__debug_string__()\n for transformation in [\"Range\", \"Map\", \"Filter\"]:\n self.assertContainsSubsequence(debug_string, transformation)\n\n @combinations.generate(test_base.default_test_combinations())\n def testNoWarnings(self):\n with test.mock.patch.object(warnings, \"warn\") as mock_log:\n dataset_ops.Dataset.range(0).interleave(\n lambda x: dataset_ops.Dataset.range(0), cycle_length=2)\n self.assertEmpty(mock_log.call_args_list)\n\n def _testBinaryInputs(self, dataset_fn):\n input1 = dataset_ops.Dataset.range(0)\n input2 = dataset_ops.Dataset.range(1)\n self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs())\n\n @combinations.generate(test_base.default_test_combinations())\n def testConcatenateInputs(self):\n self._testBinaryInputs(lambda x, y: x.concatenate(y))\n\n def _testVariadicInputs(self, dataset_fn, input_datasets):\n self.assertEqual(\n nest.flatten(input_datasets),\n dataset_fn(input_datasets)._inputs())\n\n @combinations.generate(test_base.default_test_combinations())\n def testZipOneInputs(self):\n input_datasets = dataset_ops.Dataset.range(0)\n self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)\n\n @combinations.generate(test_base.default_test_combinations())\n def testZipNestInputs(self):\n input_datasets = (dataset_ops.Dataset.range(0),\n (dataset_ops.Dataset.range(1),\n dataset_ops.Dataset.range(2)))\n self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)\n\n @combinations.generate(test_base.default_test_combinations())\n def testZipTupleInputs(self):\n input_datasets = (dataset_ops.Dataset.range(0),\n dataset_ops.Dataset.range(1))\n self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)\n\n @combinations.generate(test_base.default_test_combinations())\n def testFunctions(self):\n dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)\n self.assertLen(dataset._functions(), 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testCollectInputs(self):\n ds1 = dataset_ops.Dataset.range(0)\n ds2 = ds1.concatenate(ds1)\n ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2))\n\n inputs = []\n queue = [ds3]\n while queue:\n ds = queue[0]\n queue = queue[1:]\n queue.extend(ds._inputs())\n inputs.append(ds)\n\n self.assertEqual(5, inputs.count(ds1))\n self.assertEqual(2, inputs.count(ds2))\n self.assertEqual(1, inputs.count(ds3))\n\n def _testDatasetSpec(self, tf_value, expected_element_structure):\n dataset = dataset_ops.Dataset.from_tensors(0).map(lambda _: tf_value)\n dataset_structure = structure.type_spec_from_value(dataset)\n self.assertIsInstance(dataset_structure, dataset_ops.DatasetSpec)\n\n self.assertTrue(\n structure.are_compatible(\n dataset_ops.get_structure(dataset), expected_element_structure))\n self.assertEqual([dtypes.variant],\n structure.get_flat_tensor_types(dataset_structure))\n self.assertEqual([tensor_shape.TensorShape([])],\n structure.get_flat_tensor_shapes(dataset_structure))\n\n # Assert that the `Dataset` survives a round-trip via _from_tensor_list()\n # and _to_tensor_list().\n round_trip_dataset = dataset_structure._from_tensor_list(\n dataset_structure._to_tensor_list(dataset))\n\n value = tf_value\n\n if isinstance(value, dataset_ops.Dataset):\n self.assertDatasetsEqual(value, dataset.flat_map(lambda x: x))\n elif isinstance(value, optional_ops.Optional):\n self.assertDatasetProduces(\n round_trip_dataset.map(lambda opt: opt.get_value()),\n [self.evaluate(value.get_value())],\n requires_initialization=True)\n else:\n self.assertDatasetProduces(\n round_trip_dataset, [self.evaluate(tf_value)],\n requires_initialization=True)\n\n @combinations.generate(test_base.default_test_combinations())\n def testTensorDatasetSpec(self):\n self._testDatasetSpec(\n constant_op.constant(37.0), tensor_spec.TensorSpec([], dtypes.float32))\n\n @combinations.generate(test_base.default_test_combinations())\n def testSparseTensorDatasetSpec(self):\n self._testDatasetSpec(\n sparse_tensor.SparseTensor(\n indices=[[0]],\n values=constant_op.constant([0], dtype=dtypes.int32),\n dense_shape=[1]), sparse_tensor.SparseTensorSpec([1], dtypes.int32))\n\n @combinations.generate(test_base.default_test_combinations())\n def testNestDatasetSpec(self):\n self._testDatasetSpec(\n {\n \"a\": constant_op.constant(37.0),\n \"b\": (constant_op.constant([\"Foo\"]), constant_op.constant(\"Bar\"))\n }, {\n \"a\":\n tensor_spec.TensorSpec([], dtypes.float32),\n \"b\": (\n tensor_spec.TensorSpec([1], dtypes.string),\n tensor_spec.TensorSpec([], dtypes.string),\n )\n })\n\n @combinations.generate(test_base.default_test_combinations())\n def testDatasetDatasetSpec(self):\n self._testDatasetSpec(\n dataset_ops.Dataset.from_tensor_slices(\n constant_op.constant([1, 2, 3])),\n dataset_ops.DatasetSpec(tensor_spec.TensorSpec([], dtypes.int32)))\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptionalDatasetSpec(self):\n self._testDatasetSpec(\n optional_ops.Optional.from_value(37.0),\n optional_ops.OptionalSpec(tensor_spec.TensorSpec([], dtypes.float32)))\n\n @combinations.generate(test_base.graph_only_combinations())\n def testSameGraphError(self):\n dataset = dataset_ops.Dataset.range(10)\n with ops.Graph().as_default():\n with self.assertRaisesRegex(ValueError, \"must be from the same graph\"):\n dataset = dataset.batch(2)\n\n @combinations.generate(\n combinations.combine(tf_api_version=[1], mode=[\"graph\"]))\n def testSameGraphErrorOneShot(self):\n dataset = dataset_ops.Dataset.range(10)\n with ops.Graph().as_default():\n with self.assertRaisesRegex(ValueError,\n \"make sure that the dataset is created in \"\n \"the same graph as the iterator\"):\n _ = dataset_ops.make_one_shot_iterator(dataset)\n\n @combinations.generate(\n combinations.combine(tf_api_version=[1], mode=[\"graph\"]))\n def testSameGraphErrorInitializable(self):\n dataset = dataset_ops.Dataset.range(10)\n with ops.Graph().as_default():\n with self.assertRaisesRegex(ValueError,\n \"make sure that the dataset is created in \"\n \"the same graph as the iterator\"):\n _ = dataset_ops.make_initializable_iterator(dataset)\n\n @combinations.generate(\n combinations.times(\n test_base.eager_only_combinations(),\n combinations.combine(execution_mode=[context.ASYNC, context.SYNC])))\n def testEagerIteration(self, execution_mode):\n with context.execution_mode(execution_mode):\n val = 0\n dataset = dataset_ops.Dataset.range(10)\n for foo in dataset:\n self.assertEqual(val, foo.numpy())\n val += 1\n\n @combinations.generate(test_base.default_test_combinations())\n def testDatasetAsFunctionArgument(self):\n\n @def_function.function\n def _uses_dataset(d):\n accumulator = array_ops.zeros([], dtype=dtypes.int64)\n for value in d:\n accumulator += value\n return accumulator\n\n with ops.device(\"CPU\"):\n first_dataset = dataset_ops.Dataset.range(10)\n self.assertEqual(45, self.evaluate(_uses_dataset(first_dataset)))\n second_dataset = dataset_ops.Dataset.range(11)\n self.assertEqual(55, self.evaluate(_uses_dataset(second_dataset)))\n first_concrete = _uses_dataset.get_concrete_function(first_dataset)\n # The dataset should not be a captured input\n self.assertEmpty(first_concrete.graph.captures)\n # The two datasets have the same structure and so should re-use a trace.\n self.assertIs(first_concrete,\n _uses_dataset.get_concrete_function(second_dataset))\n # With a different structure we should use a different trace.\n self.assertIsNot(\n first_concrete,\n _uses_dataset.get_concrete_function(\n dataset_ops.Dataset.zip((first_dataset, second_dataset))))\n\n @combinations.generate(test_base.default_test_combinations())\n def testLimitedRetracing(self):\n trace_count = [0]\n\n @def_function.function\n def f(ds):\n trace_count[0] += 1\n counter = np.int64(0)\n for elem in ds:\n counter += elem\n return counter\n\n dataset = dataset_ops.Dataset.range(5)\n dataset2 = dataset_ops.Dataset.range(10)\n\n for _ in range(10):\n self.assertEqual(self.evaluate(f(dataset)), 10)\n self.assertEqual(self.evaluate(f(dataset2)), 45)\n self.assertEqual(trace_count[0], 1)\n\n # pylint: disable=g-long-lambda,unnecessary-lambda\n @combinations.generate(test_base.default_test_combinations())\n def testLegacyStructureAPI(self):\n components = (np.array([1, 2, 3], dtype=np.int64), (np.array([4., 5.]),\n np.array([6., 7.])),\n np.array([8, 9, 10], dtype=np.int64))\n\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEqual(\n (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),\n dataset_ops.get_legacy_output_types(dataset))\n self.assertEqual(([3], ([2], [2]), [3]),\n dataset_ops.get_legacy_output_shapes(dataset))\n\n dataset = dataset.shuffle(10, 10)\n self.assertEqual(\n (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),\n dataset_ops.get_legacy_output_types(dataset))\n self.assertEqual(([3], ([2], [2]), [3]),\n dataset_ops.get_legacy_output_shapes(dataset))\n\n dataset = dataset.repeat(-1)\n self.assertEqual(\n (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),\n dataset_ops.get_legacy_output_types(dataset))\n self.assertEqual(([3], ([2], [2]), [3]),\n dataset_ops.get_legacy_output_shapes(dataset))\n\n dataset = dataset.filter(lambda x, y, z: True)\n self.assertEqual(\n (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),\n dataset_ops.get_legacy_output_types(dataset))\n self.assertEqual(([3], ([2], [2]), [3]),\n dataset_ops.get_legacy_output_shapes(dataset))\n\n dataset = dataset.take(5)\n self.assertEqual(\n (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),\n dataset_ops.get_legacy_output_types(dataset))\n self.assertEqual(([3], ([2], [2]), [3]),\n dataset_ops.get_legacy_output_shapes(dataset))\n\n dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))\n self.assertEqual(\n ((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),\n dataset_ops.get_legacy_output_types(dataset))\n self.assertEqual((([3], [3]), ([2], [2])),\n dataset_ops.get_legacy_output_shapes(dataset))\n\n dataset = dataset.flat_map(lambda x, y: dataset_ops.Dataset.from_tensors(\n ((x[0], x[1]), (y[0], y[1]))))\n self.assertEqual(\n ((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),\n dataset_ops.get_legacy_output_types(dataset))\n self.assertEqual((([3], [3]), ([2], [2])),\n dataset_ops.get_legacy_output_shapes(dataset))\n\n dataset = dataset.batch(32)\n self.assertEqual(\n ((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),\n dataset_ops.get_legacy_output_types(dataset))\n dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)\n self.assertEqual(\n (([None, 3], [None, 3]), ([None, 2], [None, 2])),\n nest.pack_sequence_as(\n dataset_output_shapes,\n [s.as_list() for s in nest.flatten(dataset_output_shapes)]))\n\n # Define a separate set of components with matching leading\n # dimension for the from-slices constructor.\n components_for_slices = (np.array([1, 2, 3],\n dtype=np.int64), (np.array([4., 5., 6.]),\n np.array([7., 8., 9.])),\n np.array([10, 11, 12], dtype=np.int64))\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)\n self.assertEqual(\n (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),\n dataset_ops.get_legacy_output_types(dataset))\n self.assertEqual(([], ([], []), []),\n dataset_ops.get_legacy_output_shapes(dataset))\n\n @combinations.generate(test_base.default_test_combinations())\n def testNoneComponent(self):\n dataset = dataset_ops.Dataset.from_tensors((42, None))\n if context.executing_eagerly():\n self.assertDatasetProduces(dataset, expected_output=[(42, None)])\n else:\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n next_first, next_second = iterator.get_next()\n self.assertEqual(next_second, None)\n with self.cached_session() as sess:\n self.assertEqual(sess.run(next_first), 42)\n\n @combinations.generate(test_base.default_test_combinations())\n def testNoneComponentInFunction(self):\n\n @def_function.function\n def fn(ds):\n total = 0\n it = iter(ds)\n for elem in it:\n x, _ = elem\n total += x\n return total\n\n dataset = dataset_ops.Dataset.range(\n 10, output_type=dtypes.int32).map(lambda x: (x, None))\n self.assertEqual(self.evaluate(fn(dataset)), 45)\n\n @combinations.generate(test_base.default_test_combinations())\n def testIncorrectPythonStructure(self):\n # Tests that an exception is raised (as opposed to a segfault) when the\n # Python structure assigned to a dataset is incorrect.\n dataset = dataset_ops.Dataset.range(10)\n spec = tensor_spec.TensorSpec([], dtypes.int64)\n new_structure = (spec, spec)\n dataset = dataset_ops._RestructuredDataset(dataset, new_structure)\n dataset = dataset.map(lambda x, y: y)\n\n with self.assertRaisesOpError(\"\"):\n self.getDatasetOutput(dataset)\n\n @combinations.generate(test_base.default_test_combinations())\n def testNamedTupleStructure(self):\n Foo = collections.namedtuple(\"Foo\", [\"a\", \"b\"])\n x = Foo(a=3, b=\"test\")\n dataset = dataset_ops.Dataset.from_tensors(x)\n dataset = dataset_ops.Dataset.from_tensor_slices([dataset, dataset])\n self.assertEqual(\n str(dataset.element_spec),\n \"DatasetSpec(Foo(a=TensorSpec(shape=(), dtype=tf.int32, name=None), \"\n \"b=TensorSpec(shape=(), dtype=tf.string, name=None)), TensorShape([]))\")\n\n @combinations.generate(test_base.eager_only_combinations())\n def testIterationError(self):\n\n @def_function.function(autograph=False)\n def fn(ds):\n for _ in ds:\n pass\n\n dataset = dataset_ops.Dataset.range(10)\n with self.assertRaises(ValueError):\n self.evaluate(fn(dataset))\n\n\nclass DebugDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n def setUp(self):\n super(DebugDatasetTest, self).setUp()\n dataset_ops.toggle_debug_mode(True)\n\n def tearDown(self):\n dataset_ops.toggle_debug_mode(False)\n super(DebugDatasetTest, self).tearDown()\n\n @combinations.generate(test_base.eager_only_combinations())\n def testDebugModeEagerExecution(self):\n counter = []\n ds = dataset_ops.Dataset.range(10)\n\n def map_fn(x):\n counter.append(1)\n return x\n\n ds = ds.map(map_fn)\n self.assertDatasetProduces(ds, list(range(10)))\n\n # The body of `map_fn` will be executed 11 times since the implementation\n # traces the function to figure out what the types and shapes of its\n # outputs are.\n self.assertLen(counter, 11)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testDebugModeSequentialExecution(self):\n ds = dataset_ops.Dataset.range(10)\n ds = ds.apply(\n testing.assert_next([\"Interleave\", \"Map\", \"Batch\", \"FiniteTake\"]))\n ds = ds.interleave(\n lambda x: dataset_ops.Dataset.from_tensors(x),\n cycle_length=10,\n num_parallel_calls=10)\n ds = ds.map(lambda x: x * x, num_parallel_calls=10)\n ds = ds.batch(batch_size=5, num_parallel_calls=2)\n ds = ds.prefetch(buffer_size=2)\n ds = ds.take(2)\n self.assertDatasetProduces(ds, [[0, 1, 4, 9, 16], [25, 36, 49, 64, 81]])\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for conv with activations.\"\"\"\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\ndef make_conv_activation_tests(activation_op):\n \"\"\"Make a set of tests to do convolution with activation.\"\"\"\n\n def f(options):\n \"\"\"Actual function that generates examples.\"\"\"\n test_parameters = [\n {\n \"input_shape\": [[1, 3, 4, 3], [4, 6, 6, 1]],\n \"filter_shape\": [[1, 1], [2, 3], [3, 3]],\n \"strides\": [[1, 1, 1, 1], [1, 2, 3, 1]],\n \"dilations\": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"], # TODO(aselle): NCHW would be good\n \"constant_filter\": [True, False],\n \"channel_multiplier\": [1, 2],\n \"fully_quantize\": [False],\n \"quant_16x8\": [False],\n \"dynamic_range_quantize\": [False],\n },\n # TODO(b/134702301): The fully_quantize param is just ignored by the\n # MLIR testing path now, resulting in duplicate tests. Either ignore\n # these tests or handle it properly in the mlir_convert() function.\n {\n \"input_shape\": [[1, 3, 4, 3], [4, 6, 6, 1]],\n \"filter_shape\": [[1, 1], [2, 3]],\n \"strides\": [[1, 1, 1, 1], [1, 2, 3, 1]],\n \"dilations\": [[1, 1, 1, 1], [1, 3, 2, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"], # TODO(aselle): NCHW would be good\n \"constant_filter\": [True],\n \"channel_multiplier\": [1, 2],\n \"fully_quantize\": [True],\n \"quant_16x8\": [False, True],\n \"dynamic_range_quantize\": [False],\n },\n {\n \"input_shape\": [[1, 3, 4, 3]],\n \"filter_shape\": [[1, 1], [2, 3], [3, 3]],\n \"strides\": [[1, 1, 1, 1], [1, 2, 3, 1]],\n \"dilations\": [[1, 1, 1, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"],\n \"constant_filter\": [True],\n \"channel_multiplier\": [1, 2],\n \"fully_quantize\": [False],\n \"quant_16x8\": [False],\n \"dynamic_range_quantize\": [True],\n },\n ]\n\n def get_tensor_shapes(parameters):\n input_shape = parameters[\"input_shape\"]\n filter_size = parameters[\"filter_shape\"]\n filter_shape = filter_size + [\n input_shape[3], parameters[\"channel_multiplier\"]\n ]\n return [input_shape, filter_shape]\n\n def build_graph(parameters):\n \"\"\"Build a conv graph given `parameters`.\"\"\"\n input_shape, filter_shape = get_tensor_shapes(parameters)\n input_tensor = tf.compat.v1.placeholder(\n dtype=tf.float32, name=\"input\", shape=input_shape)\n\n # Get filter input either as a placeholder or constants. Also get a list\n # of the input tensors that are represented as placeholders.\n if parameters[\"constant_filter\"]:\n filter_input = create_tensor_data(\n np.float32, filter_shape, min_value=-10, max_value=10)\n input_tensors = [input_tensor]\n else:\n filter_input = tf.compat.v1.placeholder(\n dtype=tf.float32, name=\"filter\", shape=filter_shape)\n input_tensors = [input_tensor, filter_input]\n\n out = tf.nn.conv2d(\n input_tensor,\n filter_input,\n strides=parameters[\"strides\"],\n dilations=parameters[\"dilations\"],\n padding=parameters[\"padding\"],\n data_format=parameters[\"data_format\"])\n out = activation_op(out)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Build inputs for conv with activation.\"\"\"\n\n input_shape, filter_shape = get_tensor_shapes(parameters)\n values = [\n create_tensor_data(\n np.float32, input_shape, min_value=-1, max_value=1)\n ]\n if not parameters[\"constant_filter\"]:\n values.append(create_tensor_data(np.float32, filter_shape))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(\n options,\n test_parameters,\n build_graph,\n build_inputs,\n expected_tf_failures=48)\n\n return f\n\n\n@register_make_test_function()\ndef make_conv_relu6_tests(options):\n \"\"\"Make a set of tests to do conv_relu6.\"\"\"\n return make_conv_activation_tests(tf.nn.relu6)(options)\n\n\n@register_make_test_function()\ndef make_conv_relu_tests(options):\n \"\"\"Make a set of tests to do conv_relu.\"\"\"\n return make_conv_activation_tests(tf.nn.relu)(options)\n\n\ndef relu1(input_tensor):\n # Note that the following is not supported:\n # out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))\n out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))\n return out\n\n\n@register_make_test_function()\ndef make_conv_relu1_tests(options):\n \"\"\"Make a set of tests to do conv_relu1.\"\"\"\n return make_conv_activation_tests(relu1)(options)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for SimpleRNN layer.\"\"\"\n\nimport copy\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.keras import combinations\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import gradient_descent\n\n\[email protected](combinations.keras_mode_combinations())\nclass SimpleRNNLayerTest(test.TestCase, parameterized.TestCase):\n\n def test_return_sequences_SimpleRNN(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n testing_utils.layer_test(\n keras.layers.SimpleRNN,\n kwargs={'units': units,\n 'return_sequences': True},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n @testing_utils.run_v2_only\n def test_float64_SimpleRNN(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n testing_utils.layer_test(\n keras.layers.SimpleRNN,\n kwargs={'units': units,\n 'return_sequences': True,\n 'dtype': 'float64'},\n input_shape=(num_samples, timesteps, embedding_dim),\n input_dtype='float64')\n\n def test_dynamic_behavior_SimpleRNN(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))\n model = keras.models.Sequential()\n model.add(layer)\n model.compile('rmsprop', 'mse')\n x = np.random.random((num_samples, timesteps, embedding_dim))\n y = np.random.random((num_samples, units))\n model.train_on_batch(x, y)\n\n def test_dropout_SimpleRNN(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n testing_utils.layer_test(\n keras.layers.SimpleRNN,\n kwargs={'units': units,\n 'dropout': 0.1,\n 'recurrent_dropout': 0.1},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n def test_implementation_mode_SimpleRNN(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n for mode in [0, 1, 2]:\n testing_utils.layer_test(\n keras.layers.SimpleRNN,\n kwargs={'units': units,\n 'implementation': mode},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n def test_constraints_SimpleRNN(self):\n embedding_dim = 4\n layer_class = keras.layers.SimpleRNN\n k_constraint = keras.constraints.max_norm(0.01)\n r_constraint = keras.constraints.max_norm(0.01)\n b_constraint = keras.constraints.max_norm(0.01)\n layer = layer_class(\n 5,\n return_sequences=False,\n weights=None,\n input_shape=(None, embedding_dim),\n kernel_constraint=k_constraint,\n recurrent_constraint=r_constraint,\n bias_constraint=b_constraint)\n layer.build((None, None, embedding_dim))\n self.assertEqual(layer.cell.kernel.constraint, k_constraint)\n self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)\n self.assertEqual(layer.cell.bias.constraint, b_constraint)\n\n def test_with_masking_layer_SimpleRNN(self):\n layer_class = keras.layers.SimpleRNN\n inputs = np.random.random((2, 3, 4))\n targets = np.abs(np.random.random((2, 3, 5)))\n targets /= targets.sum(axis=-1, keepdims=True)\n model = keras.models.Sequential()\n model.add(keras.layers.Masking(input_shape=(3, 4)))\n model.add(layer_class(units=5, return_sequences=True, unroll=False))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)\n\n def test_from_config_SimpleRNN(self):\n layer_class = keras.layers.SimpleRNN\n for stateful in (False, True):\n l1 = layer_class(units=1, stateful=stateful)\n l2 = layer_class.from_config(l1.get_config())\n assert l1.get_config() == l2.get_config()\n\n def test_deep_copy_SimpleRNN(self):\n cell = keras.layers.SimpleRNNCell(5)\n copied_cell = copy.deepcopy(cell)\n self.assertEqual(copied_cell.units, 5)\n self.assertEqual(cell.get_config(), copied_cell.get_config())\n\n def test_regularizers_SimpleRNN(self):\n embedding_dim = 4\n layer_class = keras.layers.SimpleRNN\n layer = layer_class(\n 5,\n return_sequences=False,\n weights=None,\n input_shape=(None, embedding_dim),\n kernel_regularizer=keras.regularizers.l1(0.01),\n recurrent_regularizer=keras.regularizers.l1(0.01),\n bias_regularizer='l2',\n activity_regularizer='l1')\n layer.build((None, None, 2))\n self.assertLen(layer.losses, 3)\n\n x = keras.backend.variable(np.ones((2, 3, 2)))\n layer(x)\n if context.executing_eagerly():\n self.assertLen(layer.losses, 4)\n else:\n self.assertLen(layer.get_losses_for(x), 1)\n\n def test_statefulness_SimpleRNN(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n layer_class = keras.layers.SimpleRNN\n model = keras.models.Sequential()\n model.add(\n keras.layers.Embedding(\n 4,\n embedding_dim,\n mask_zero=True,\n input_length=timesteps,\n batch_input_shape=(num_samples, timesteps)))\n layer = layer_class(\n units, return_sequences=False, stateful=True, weights=None)\n model.add(layer)\n model.compile(\n optimizer=gradient_descent.GradientDescentOptimizer(0.01),\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly())\n out1 = model.predict(np.ones((num_samples, timesteps)))\n self.assertEqual(out1.shape, (num_samples, units))\n\n # train once so that the states change\n model.train_on_batch(\n np.ones((num_samples, timesteps)), np.ones((num_samples, units)))\n out2 = model.predict(np.ones((num_samples, timesteps)))\n\n # if the state is not reset, output should be different\n self.assertNotEqual(out1.max(), out2.max())\n\n # check that output changes after states are reset\n # (even though the model itself didn't change)\n layer.reset_states()\n out3 = model.predict(np.ones((num_samples, timesteps)))\n self.assertNotEqual(out2.max(), out3.max())\n\n # check that container-level reset_states() works\n model.reset_states()\n out4 = model.predict(np.ones((num_samples, timesteps)))\n np.testing.assert_allclose(out3, out4, atol=1e-5)\n\n # check that the call to `predict` updated the states\n out5 = model.predict(np.ones((num_samples, timesteps)))\n self.assertNotEqual(out4.max(), out5.max())\n\n # Check masking\n layer.reset_states()\n\n left_padded_input = np.ones((num_samples, timesteps))\n left_padded_input[0, :1] = 0\n left_padded_input[1, :2] = 0\n out6 = model.predict(left_padded_input)\n\n layer.reset_states()\n\n right_padded_input = np.ones((num_samples, timesteps))\n right_padded_input[0, -1:] = 0\n right_padded_input[1, -2:] = 0\n out7 = model.predict(right_padded_input)\n\n np.testing.assert_allclose(out7, out6, atol=1e-5)\n\n def test_get_initial_states(self):\n batch_size = 4\n cell = keras.layers.SimpleRNNCell(20)\n initial_state = cell.get_initial_state(\n batch_size=batch_size, dtype=dtypes.float32)\n _, state = cell(np.ones((batch_size, 20), dtype=np.float32), initial_state)\n self.assertEqual(state.shape, initial_state.shape)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.python.client.session.Session's ClusterSpec Propagation.\n\nThese tests exercise the ClusterSpec Propagation capabilities of distributed\nSessions.\n\"\"\"\nimport numpy as np\n\nfrom tensorflow.core.protobuf import cluster_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\n# Import resource_variable_ops for the variables-to-tensor implicit conversion.\nfrom tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import server_lib\n\n\nclass SessionClusterSpecPropagationTest(test_util.TensorFlowTestCase):\n\n def testClusterSpecPropagationSimple(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config)\n output = self.evaluate(const)\n self.assertEqual(17, output)\n\n def testClusterSpecPropagationWorker2Placement(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device('/job:worker/task:1'):\n with ops.device('/cpu:0'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n output = sess.run(const, options=run_options, run_metadata=run_metadata)\n self.assertEqual(17, output)\n self.assertEqual(1,\n len([\n node_stats\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:worker/replica:0/task:1/device:CPU:0' ==\n dev_stats.device and 'Const' == node_stats.node_name\n ]))\n\n def testClusterSpecPropagationWorker1Placement(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device('/job:worker/task:0'):\n const = constant_op.constant(17)\n with session.Session(server1.target, config=config, graph=g):\n output = self.evaluate(const)\n self.assertEqual(17, output)\n\n def testCanonicalDeviceNames(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device(\n '/job:worker/task:1/device:CPU:0'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n output = sess.run(const, options=run_options, run_metadata=run_metadata)\n self.assertEqual(17, output)\n self.assertEqual(1,\n len([\n node_stats\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:worker/replica:0/task:1/device:CPU:0' ==\n dev_stats.device and 'Const' == node_stats.node_name\n ]))\n\n def testFullDeviceNames(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'renamed_worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device(\n '/job:renamed_worker/replica:0/task:1/device:CPU:0'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n output = sess.run(const, options=run_options, run_metadata=run_metadata)\n self.assertEqual(17, output)\n self.assertEqual(1,\n len([\n node_stats\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:renamed_worker/replica:0/task:1/device:CPU:0'\n == dev_stats.device and 'Const' == node_stats.node_name\n ]))\n\n def testMultipleLocalDevices(self):\n # Note: CPU->CPU transfers have a fast-path in\n # BaseRemoteRendezvous::SameWorkerRecvDone that means the test doesn't\n # actually capture the motivating bug unless run on a GPU machine.\n #\n # Example error message (before bugfix -- line breaks added because lint):\n #\n # W0718 17:14:41.521534 190121 device_mgr.cc:107] Unknown device:\n # /job:worker/replica:0/task:0/device:CPU:0 all devices:\n # /job:local/replica:0/task:0/device:GPU:0,\n # /job:local/replica:0/task:0/device:GPU:0,\n # /job:local/replica:0/task:0/cpu:1, CPU:0, GPU:0,\n # /job:local/replica:0/task:0/device:CPU:1,\n # /job:local/replica:0/task:0/device:CPU:0, CPU:1,\n # /job:local/replica:0/task:0/cpu:0\n server_config = config_pb2.ConfigProto(device_count={'CPU': 2})\n server1 = server_lib.Server.create_local_server(config=server_config)\n server2 = server_lib.Server.create_local_server(config=server_config)\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g:\n with ops.device('/job:worker/task:1/cpu:1'):\n input1 = constant_op.constant(17, dtypes.float32)\n with ops.device('/job:worker/task:0/cpu:1'):\n input2 = constant_op.constant(3, dtypes.float32)\n with ops.device('/job:worker/task:1/cpu:0'):\n sum1 = input1 + input2\n\n if test.is_gpu_available():\n device_str = '/job:worker/task:0/device:GPU:0'\n else:\n device_str = '/job:worker/task:0/cpu:1'\n with ops.device(device_str):\n sum2 = input2 + input1\n\n with ops.device('/job:worker/task:0/cpu:0'):\n sum3 = sum1 + sum2\n with session.Session(server1.target, config=config, graph=g):\n output = self.evaluate(sum3)\n self.assertEqual(40, output)\n\n def testLegacyDeviceNames(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device('/job:worker/task:1/cpu:0'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n output = sess.run(const, options=run_options, run_metadata=run_metadata)\n self.assertEqual(17, output)\n self.assertEqual(1,\n len([\n node_stats\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:worker/replica:0/task:1/device:CPU:0' ==\n dev_stats.device and 'Const' == node_stats.node_name\n ]))\n\n def testClusterSpecPropagationThreeServers2Graphs(self):\n \"\"\"Boots 3 servers, creates 2 sessions, ensures appropriate operations.\n\n We create 2 clusterspecs:\n 1. server2 as the master, server1 as a worker\n 2. server2 as the master, server3 as a worker\n\n We ensure that variables on the workers are independent.\n \"\"\"\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n server3 = server_lib.Server.create_local_server()\n cluster_def1 = cluster_pb2.ClusterDef()\n job1 = cluster_def1.job.add()\n job1.name = 'worker1'\n job1.tasks[0] = server2.target[len('grpc://'):]\n job1.tasks[1] = server1.target[len('grpc://'):]\n\n cluster_def2 = cluster_pb2.ClusterDef()\n job2 = cluster_def2.job.add()\n job2.name = 'worker2'\n job2.tasks[0] = server2.target[len('grpc://'):]\n job2.tasks[1] = server3.target[len('grpc://'):]\n\n config1 = config_pb2.ConfigProto(cluster_def=cluster_def1)\n config2 = config_pb2.ConfigProto(cluster_def=cluster_def2)\n\n with ops.Graph().as_default() as g1:\n with ops.device('/job:worker1/task:1'):\n var1 = variables.Variable(array_ops.zeros([2]), name='var1')\n update_op1 = state_ops.assign_add(\n var1, array_ops.ones([2]), name='var1_assign_add')\n init1 = variables.global_variables_initializer()\n\n with ops.Graph().as_default() as g2:\n with ops.device('/job:worker2/task:1'):\n var2 = variables.Variable(array_ops.zeros([2]), name='var2')\n update_op2 = state_ops.assign_add(\n var2, array_ops.ones([2]), name='var2_assign_add')\n init2 = variables.global_variables_initializer()\n\n sess1 = session.Session(server2.target, graph=g1, config=config1)\n sess2 = session.Session(server2.target, graph=g2, config=config2)\n\n init1.run(session=sess1)\n init2.run(session=sess2)\n\n expected_zeros = np.zeros([2])\n expected_ones = np.ones([2])\n\n self.assertAllEqual(expected_zeros, sess1.run(var1))\n self.assertAllEqual(expected_zeros, sess2.run(var2))\n\n self.assertAllEqual(expected_ones, sess1.run(update_op1))\n self.assertAllEqual(expected_ones, sess1.run(var1))\n self.assertAllEqual(expected_zeros, sess2.run(var2))\n self.assertAllEqual(expected_ones, sess2.run(update_op2))\n self.assertAllEqual(expected_ones + expected_ones, sess1.run(update_op1))\n self.assertAllEqual(expected_ones, sess2.run(var2))\n self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1))\n\n def testClusterSpecPropagationThreeServers(self):\n \"\"\"Boots 3 servers, creates 2 sessions, ensures appropriate operations.\n\n We create 2 clusterspecs:\n 1. server2 as the master, server1 as a worker\n 2. server2 as the master, server3 as a worker\n\n We ensure that variables on the workers are independent.\n \"\"\"\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n server3 = server_lib.Server.create_local_server()\n cluster_def1 = cluster_pb2.ClusterDef()\n job1 = cluster_def1.job.add()\n job1.name = 'worker'\n job1.tasks[0] = server2.target[len('grpc://'):]\n job1.tasks[1] = server1.target[len('grpc://'):]\n\n cluster_def2 = cluster_pb2.ClusterDef()\n job2 = cluster_def2.job.add()\n job2.name = 'worker'\n job2.tasks[0] = server2.target[len('grpc://'):]\n job2.tasks[1] = server3.target[len('grpc://'):]\n\n config1 = config_pb2.ConfigProto(cluster_def=cluster_def1)\n config2 = config_pb2.ConfigProto(cluster_def=cluster_def2)\n\n with ops.device('/job:worker/task:1'):\n var = variables.Variable(array_ops.zeros([2]), name='var')\n feed = array_ops.placeholder(dtypes.float32, shape=(2))\n update_op = var.assign_add(feed)\n\n sess1 = session.Session(server2.target, config=config1)\n sess2 = session.Session(server2.target, config=config2)\n\n variables.global_variables_initializer().run(session=sess1)\n variables.global_variables_initializer().run(session=sess2)\n\n expected_zeros = np.zeros([2])\n expected_ones = np.ones([2])\n\n self.assertAllEqual(expected_zeros, sess1.run(var))\n self.assertAllEqual(expected_zeros, sess2.run(var))\n self.assertAllEqual(expected_ones,\n sess1.run(update_op, feed_dict={feed: expected_ones}))\n self.assertAllEqual(expected_ones, sess1.run(var))\n self.assertAllEqual(expected_zeros, sess2.run(var))\n self.assertAllEqual(expected_ones,\n sess2.run(update_op, feed_dict={feed: expected_ones}))\n self.assertAllEqual(expected_ones + expected_ones,\n sess1.run(update_op, feed_dict={feed: expected_ones}))\n self.assertAllEqual(expected_ones, sess2.run(var))\n self.assertAllEqual(expected_ones + expected_ones, sess1.run(var))\n\n def testClusterSpecPropagationThreeServersOneCluster(self):\n \"\"\"Boots 3 servers, ensures appropriate communication across workers.\n\n Additionally, in this cluster, we ensure the master is not the 0-th worker.\n\n Note: this test only uses one session.\n \"\"\"\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n server3 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server3.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n job.tasks[2] = server1.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n # Add ops to the devices in non-linear order.\n\n with ops.device('/job:worker/task:1'):\n feed1 = array_ops.placeholder(dtypes.float32, shape=(2))\n const1 = constant_op.constant(2.0)\n mul1 = const1 * feed1\n\n with ops.device('/job:worker/task:2'):\n feed2 = array_ops.placeholder(dtypes.float32, shape=(2))\n const2 = constant_op.constant(2.0)\n mul2 = const2 * feed2\n\n with ops.device('/job:worker/task:0'):\n feed0 = array_ops.placeholder(dtypes.float32, shape=(2))\n const0 = constant_op.constant(2.0)\n mul0 = const0 * feed0\n\n sum_op = mul0 + mul1 + mul2\n\n ones = np.ones([2])\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n # Run!\n with session.Session(server1.target, config=config) as sess:\n output = sess.run(\n sum_op,\n options=run_options,\n run_metadata=run_metadata,\n feed_dict={feed1: ones,\n feed2: ones,\n feed0: ones})\n self.assertAllEqual(6 * ones, output)\n\n self.assertEqual(\n 3,\n len([\n dev_stats.device\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:worker/replica:0/task:' in dev_stats.device and\n node_stats.node_name.startswith('Const')\n ]), run_metadata)\n\n def testClusterSpecPropagationIsolation(self):\n \"\"\"Test that two sessions using ClusterSpec propagation are isolated.\"\"\"\n server = server_lib.Server.create_local_server()\n init_value = array_ops.placeholder(dtypes.int32, shape=[])\n v = variables.Variable(init_value)\n\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n sess1 = session.Session(server.target, config=config)\n sess2 = session.Session(server.target, config=config)\n\n # Initially, the variable is uninitialized in both sessions.\n with self.assertRaises(errors.FailedPreconditionError):\n sess1.run(v)\n with self.assertRaises(errors.FailedPreconditionError):\n sess2.run(v)\n\n # An update in sess1 should be visible in sess1 only.\n sess1.run(v.initializer, feed_dict={init_value: 37})\n self.assertEqual(37, sess1.run(v))\n with self.assertRaises(errors.FailedPreconditionError):\n sess2.run(v)\n\n # An update in sess2 should be visible in sess2 only.\n sess2.run(v.initializer, feed_dict={init_value: 86})\n self.assertEqual(37, sess1.run(v))\n self.assertEqual(86, sess2.run(v))\n\n # Closing sess2 has no effect on the state of sess1.\n sess2.close()\n self.assertEqual(37, sess1.run(v))\n\n # Subsequent sessions will not see the state of existing sessions.\n sess3 = session.Session(server.target, config=config)\n self.assertEqual(37, sess1.run(v))\n with self.assertRaises(errors.FailedPreconditionError):\n sess3.run(v)\n\n def testClusterSpecPropagationNonIsolation(self):\n \"\"\"Test that two sessions using ClusterSpec propagation shares state.\n\n For example, the updated Variable value are visible among all worker\n sessions registered in the same server.\n \"\"\"\n server = server_lib.Server.create_local_server()\n init_value = array_ops.placeholder(dtypes.int32, shape=[])\n v = variables.Variable(init_value)\n\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n config.experimental.share_session_state_in_clusterspec_propagation = True\n\n sess1 = session.Session(server.target, config=config)\n sess2 = session.Session(server.target, config=config)\n\n # Initially, the variable is uninitialized in both sessions.\n with self.assertRaises(errors.FailedPreconditionError):\n sess1.run(v)\n with self.assertRaises(errors.FailedPreconditionError):\n sess2.run(v)\n\n # An update in sess1 should be visible in sess2.\n sess1.run(v.initializer, feed_dict={init_value: 37})\n self.assertEqual(37, sess1.run(v))\n self.assertEqual(37, sess2.run(v))\n\n # Closing sess2 has no effect on the state of sess1.\n sess2.close()\n self.assertEqual(37, sess1.run(v))\n\n # Subsequent sessions should see the state of existing sessions.\n sess3 = session.Session(server.target, config=config)\n self.assertEqual(37, sess1.run(v))\n self.assertEqual(37, sess3.run(v))\n\n def testClusterSpecPropagationNonIsolation2Graphs(self):\n \"\"\"Creates 2 sessions with each own graph, ensures appropriate operations.\n\n We ensure that variables on the workers shares state.\n \"\"\"\n server = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n config.experimental.share_session_state_in_clusterspec_propagation = True\n\n with ops.Graph().as_default() as g1:\n var1 = variables.Variable(array_ops.zeros([2]), name='var')\n update_op1 = state_ops.assign_add(\n var1, array_ops.ones([2]), name='var1_assign_add')\n init1 = variables.global_variables_initializer()\n\n with ops.Graph().as_default() as g2:\n var2 = variables.Variable(array_ops.zeros([2]), name='var')\n update_op2 = state_ops.assign_add(\n var2, array_ops.ones([2]), name='var2_assign_add')\n\n sess1 = session.Session(server.target, graph=g1, config=config)\n sess2 = session.Session(server.target, graph=g2, config=config)\n\n expected_zeros = np.zeros([2])\n expected_ones = np.ones([2])\n\n init1.run(session=sess1)\n self.assertAllEqual(expected_zeros, sess1.run(var1))\n self.assertAllEqual(expected_zeros, sess2.run(var2))\n\n self.assertAllEqual(expected_ones, sess1.run(update_op1))\n self.assertAllEqual(expected_ones, sess1.run(var1))\n self.assertAllEqual(expected_ones, sess2.run(var2))\n self.assertAllEqual(expected_ones + expected_ones, sess2.run(update_op2))\n self.assertAllEqual(expected_ones + expected_ones, sess2.run(var2))\n self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1))\n\n def testClusterSpecPropagationPartialRun(self):\n \"\"\"Test successful partial run with ClusterSpec propagation.\"\"\"\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.device('/job:worker/task:0'):\n a = array_ops.placeholder(dtypes.float32, shape=[])\n with ops.device('/job:worker/task:1'):\n b = array_ops.placeholder(dtypes.float32, shape=[])\n c = array_ops.placeholder(dtypes.float32, shape=[])\n r1 = math_ops.add(a, b)\n with ops.device('/job:worker/task:0'):\n r2 = math_ops.multiply(r1, c)\n\n with session.Session(server1.target, config=config) as sess:\n h = sess.partial_run_setup([r1, r2], [a, b, c])\n res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})\n self.assertEqual(3, res)\n res = sess.partial_run(h, r2, feed_dict={c: 3})\n self.assertEqual(9, res)\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2015-2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for SparseSoftmaxCrossEntropyWithLogits op.\"\"\"\n\nimport numpy as np\n\nfrom tensorflow.python.eager import backprop as backprop_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops as ops_lib\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import nn_ops\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n\nclass SparseXentOpTestBase(test.TestCase):\n\n def _opFwdBwd(self, labels, logits):\n \"\"\"Runs the op-under-test both forwards and backwards\"\"\"\n logits = ops_lib.convert_to_tensor(logits) # needed for the gradient tape\n with backprop_lib.GradientTape() as tape:\n tape.watch(logits)\n loss = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits)\n return loss, tape.gradient(loss, logits)\n\n def _npXent(self, labels, logits):\n logits = np.reshape(logits, [-1, logits.shape[-1]])\n labels = np.reshape(labels, [-1])\n batch_dim = 0\n class_dim = 1\n batch_size = logits.shape[batch_dim]\n e = np.exp(logits -\n np.reshape(np.amax(logits, axis=class_dim), [batch_size, 1]))\n probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])\n labels_mat = np.zeros_like(probs).astype(probs.dtype)\n labels_mat[np.arange(batch_size), labels] = 1.0\n gradient = (probs - labels_mat)\n loss = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)\n return loss, gradient\n\n def _testXent(self, np_labels, np_logits):\n np_loss, np_gradient = self._npXent(labels=np_labels, logits=np_logits)\n tf_loss, tf_gradient = self._opFwdBwd(labels=np_labels, logits=np_logits)\n self.assertAllCloseAccordingToType(np_loss, tf_loss)\n self.assertAllCloseAccordingToType(np_gradient, tf_gradient)\n\n def testSingleClass(self):\n for label_dtype in np.int32, np.int64:\n tf_loss, tf_gradient = self._opFwdBwd(\n labels=np.array([0, 0, 0]).astype(label_dtype),\n logits=np.array([[1.], [-1.], [0.]]).astype(np.float32))\n self.assertAllClose([0.0, 0.0, 0.0], tf_loss)\n self.assertAllClose([[0.0], [0.0], [0.0]], tf_gradient)\n\n @test_util.run_gpu_only()\n def _testInvalidLabelGPU(self, invalid_label_gradient=np.nan):\n labels = [4, 3, 0, -1]\n logits = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],\n [1., 2., 3., 4.]]\n loss, gradient = self._opFwdBwd(labels=labels, logits=logits)\n self.assertAllClose([np.nan, 1.3862, 3.4420, np.nan],\n loss,\n rtol=1e-3,\n atol=1e-3)\n self.assertAllClose(\n [[invalid_label_gradient] * 4, [0.25, 0.25, 0.25, -0.75],\n [-0.968, 0.087, 0.237, 0.6439], [invalid_label_gradient] * 4],\n gradient,\n rtol=1e-3,\n atol=1e-3)\n\n def testInvalidLabelGPU(self):\n \"\"\"This method is structured to be easily overridden by a child class.\"\"\"\n self._testInvalidLabelGPU()\n\n @test_util.run_in_graph_and_eager_modes(use_gpu=False)\n @test_util.disable_xla(\"XLA cannot assert inside of a kernel.\")\n def _testInvalidLabelCPU(self, expected_regex=\"Received a label value of\"):\n labels = [4, 3, 0, -1]\n logits = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],\n [1., 2., 3., 4.]]\n with self.assertRaisesRegex(\n (errors_impl.InvalidArgumentError, errors_impl.UnknownError),\n expected_regex):\n self.evaluate(\n nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits))\n\n def testInvalidLabelCPU(self):\n \"\"\"This method is structured to be easily overridden by a child class.\"\"\"\n self._testInvalidLabelCPU()\n\n def testNpXent(self):\n # We create 2 batches of logits for testing.\n # batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.\n # batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.\n labels = [3, 0]\n logits = [[1., 1., 1., 1.], [1., 2., 3., 4.]]\n\n # For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25\n # With a hard target 3, the gradient is [0.25, 0.25, 0.25, -0.75]\n # The loss for this batch is -log(0.25) = 1.386\n #\n # For batch 1, we have:\n # exp(0) = 1\n # exp(1) = 2.718\n # exp(2) = 7.389\n # exp(3) = 20.085\n # SUM = 31.192\n # So we have as probabilities:\n # exp(0) / SUM = 0.032\n # exp(1) / SUM = 0.087\n # exp(2) / SUM = 0.237\n # exp(3) / SUM = 0.644\n # With a hard 1, the gradient is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]\n # The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]\n # = [1.3862, 3.4420]\n np_loss, np_gradient = self._npXent(\n labels=np.array(labels), logits=np.array(logits))\n self.assertAllClose(\n np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),\n np_gradient,\n rtol=1.e-3,\n atol=1.e-3)\n self.assertAllClose(\n np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)\n\n def testShapeMismatch(self):\n with self.assertRaisesRegex(\n ValueError, \"`labels.shape.rank` must equal `logits.shape.rank - 1`\"):\n nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])\n\n def testScalar(self):\n with self.assertRaisesRegex(ValueError, \"`logits` cannot be a scalar\"):\n nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=constant_op.constant(0), logits=constant_op.constant(1.0))\n\n def _testLabelsPlaceholderScalar(self, expected_error_message):\n with ops_lib.Graph().as_default(), self.session():\n labels = array_ops.placeholder(np.int32)\n y = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=[[7.]])\n with self.assertRaisesOpError(expected_error_message):\n y.eval(feed_dict={labels: 0})\n\n def testLabelsPlaceholderScalar(self):\n \"\"\"This method is structured to be easily overridden by a child class.\"\"\"\n self._testLabelsPlaceholderScalar(\n expected_error_message=\"labels must be 1-D\")\n\n def testVector(self):\n loss = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=constant_op.constant(0), logits=constant_op.constant([1.0]))\n self.assertAllClose(0.0, loss)\n\n def testFloat(self):\n for label_dtype in np.int32, np.int64:\n self._testXent(\n np_labels=np.array([3, 0]).astype(label_dtype),\n np_logits=np.array([[1., 1., 1., 1.], [1., 2., 3.,\n 4.]]).astype(np.float32))\n\n def testDouble(self):\n for label_dtype in np.int32, np.int64:\n self._testXent(\n np_labels=np.array([0, 3]).astype(label_dtype),\n np_logits=np.array([[1., 1., 1., 1.], [1., 2., 3.,\n 4.]]).astype(np.float64))\n\n def testHalf(self):\n for label_dtype in np.int32, np.int64:\n self._testXent(\n np_labels=np.array([3, 0]).astype(label_dtype),\n np_logits=np.array([[1., 1., 1., 1.], [1., 2., 3.,\n 4.]]).astype(np.float16))\n\n def testEmpty(self):\n self._testXent(\n np_labels=np.zeros((0,), dtype=np.int32), np_logits=np.zeros((0, 3)))\n\n @test_util.run_in_graph_and_eager_modes()\n def testGradient(self):\n with self.session() as sess:\n labels = constant_op.constant([3, 0, 1], name=\"labels\")\n logits = constant_op.constant(\n [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],\n shape=[3, 4],\n dtype=dtypes.float64,\n name=\"logits\")\n\n def xent(logits):\n # gradient_checker_v2.computee_gradient doesn't take int32/int64.\n # labels must be of type int32/int64, so passing them separately here.\n return nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits, name=\"xent\")\n\n analytical, numerical = gradient_checker_v2.compute_gradient(\n xent, [logits])\n\n if not context.executing_eagerly():\n # Check that no extra computation performed. When only first derivative\n # is requested, second derivative must not be computed. So when there is\n # no second derivative, there is no `BatchMatMul` op in the graph.\n op_names = [\n op.op_def.name for op in sess.graph.get_operations() if op.op_def\n ]\n self.assertNotIn(\"BatchMatMul\", op_names)\n self.assertNotIn(\"BatchMatMulV2\", op_names)\n\n tol = 5e-8\n self.assertAllClose(analytical, numerical, atol=tol, rtol=tol)\n\n @test_util.run_in_graph_and_eager_modes()\n def testSecondGradient(self):\n with self.session() as sess:\n labels = constant_op.constant([3, 0, 1], name=\"labels\")\n logits = constant_op.constant(\n [0.3, 0.4, 0.1, 1.2, 0.1, 1.9, 0.1, 0.7, 0.8, 0.2, 1.3, 1.3],\n shape=[3, 4],\n dtype=dtypes.float64,\n name=\"logits\")\n\n def xent_grad(logits):\n with backprop_lib.GradientTape() as tape:\n tape.watch(logits)\n return tape.gradient(\n nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits, name=\"xent\"), [logits])[0]\n\n analytical, numerical = gradient_checker_v2.compute_gradient(\n xent_grad, [logits])\n\n if (not context.executing_eagerly() and\n not config.is_op_determinism_enabled()):\n # Check that second derivative is calculated.\n # (it is equivalent to being `BatchMatMul` op in the graph because of\n # implementation of xentropy grad)\n op_names = [\n op.op_def.name for op in sess.graph.get_operations() if op.op_def\n ]\n self.assertIn(\"BatchMatMulV2\", op_names)\n\n tol = 5e-8\n self.assertAllClose(analytical, numerical, atol=tol, rtol=tol)\n\n @test_util.run_in_graph_and_eager_modes()\n def _testHighDim(self, labels, logits):\n np_loss, np_gradient = self._npXent(\n labels=np.array(labels), logits=np.array(logits))\n # manually reshape loss\n np_loss = np.reshape(np_loss, np.array(labels).shape)\n tf_loss = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits)\n with backprop_lib.GradientTape() as tape:\n logits = constant_op.constant(logits)\n tape.watch(logits)\n tf_gradient = tape.gradient(\n nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits), [logits])[0]\n tf_gradient = array_ops.reshape(tf_gradient, np_gradient.shape)\n\n self.assertAllCloseAccordingToType(np_loss, tf_loss)\n self.assertAllCloseAccordingToType(np_gradient, tf_gradient)\n\n def testHighDim(self):\n labels = [[3], [0]]\n logits = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]\n self._testHighDim(labels, logits)\n\n def testHighDim2(self):\n labels = [[3, 2], [0, 3]]\n logits = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],\n [[1., 2., 3., 4.], [5., 6., 7., 8.]]]\n self._testHighDim(labels, logits)\n\n def _testScalarHandling(self, expected_regex):\n with ops_lib.Graph().as_default(), self.session(use_gpu=False) as sess:\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n expected_regex):\n labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])\n logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])\n ce = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(\n labels=array_ops.squeeze(labels), logits=logits)\n labels_v2 = np.zeros((1, 1), dtype=np.int32)\n logits_v2 = np.random.randn(1, 3)\n sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})\n\n def testScalarHandling(self):\n \"\"\"This method is structured to be easily overridden by a child class.\"\"\"\n self._testScalarHandling(expected_regex=\".*labels must be 1-D.*\")\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"take-while dataset transformation.\"\"\"\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\[email protected](None, \"Use `tf.data.Dataset.take_while(...)\")\n@tf_export(\"data.experimental.take_while\")\ndef take_while(predicate):\n \"\"\"A transformation that stops dataset iteration based on a `predicate`.\n\n Args:\n predicate: A function that maps a nested structure of tensors (having shapes\n and types defined by `self.output_shapes` and `self.output_types`) to a\n scalar `tf.bool` tensor.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`.\n \"\"\"\n\n def _apply_fn(dataset):\n return dataset.take_while(predicate=predicate)\n\n return _apply_fn\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"This module defines tensor utilities not found in TensorFlow.\n\nThe reason these utilities are not defined in TensorFlow is because they may\nnot be not fully robust, although they work in the vast majority of cases. So\nwe define them here in order for their behavior to be consistently verified.\n\"\"\"\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import tensor_array_ops\n\n\ndef is_dense_tensor(t):\n # TODO(mdan): Resolve this inconsistency.\n return (tensor_util.is_tf_type(t) and\n not isinstance(t, sparse_tensor.SparseTensor))\n\n\ndef is_tensor_array(t):\n return isinstance(t, tensor_array_ops.TensorArray)\n\n\ndef is_tensor_list(t):\n # TODO(mdan): This is just a heuristic.\n # With TF lacking support for templated types, this is unfortunately the\n # closest we can get right now. A dedicated op ought to be possible to\n # construct.\n return (tensor_util.is_tf_type(t) and t.dtype == dtypes.variant and\n not t.shape.ndims)\n\n\ndef is_range_tensor(t):\n \"\"\"Returns True if a tensor is the result of a tf.range op. Best effort.\"\"\"\n return tensor_util.is_tf_type(t) and hasattr(t, 'op') and t.op.type == 'Range'\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"InputSpec tests.\"\"\"\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras.engine import keras_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass KerasTensorTest(test.TestCase):\n\n def test_repr_and_string(self):\n kt = keras_tensor.KerasTensor(\n type_spec=tensor_spec.TensorSpec(shape=(1, 2, 3), dtype=dtypes.float32))\n expected_str = (\"KerasTensor(type_spec=TensorSpec(shape=(1, 2, 3), \"\n \"dtype=tf.float32, name=None))\")\n expected_repr = \"<KerasTensor: shape=(1, 2, 3) dtype=float32>\"\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n kt = keras_tensor.KerasTensor(\n type_spec=tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.int32),\n inferred_value=[2, 3])\n expected_str = (\"KerasTensor(type_spec=TensorSpec(shape=(2,), \"\n \"dtype=tf.int32, name=None), inferred_value=[2, 3])\")\n expected_repr = (\n \"<KerasTensor: shape=(2,) dtype=int32 inferred_value=[2, 3]>\")\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n kt = keras_tensor.KerasTensor(\n type_spec=sparse_tensor.SparseTensorSpec(\n shape=(1, 2, 3), dtype=dtypes.float32))\n expected_str = (\"KerasTensor(type_spec=SparseTensorSpec(\"\n \"TensorShape([1, 2, 3]), tf.float32))\")\n expected_repr = (\n \"<KerasTensor: type_spec=SparseTensorSpec(\"\n \"TensorShape([1, 2, 3]), tf.float32)>\")\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n inp = layers.Input(shape=(3, 5))\n kt = layers.Dense(10)(inp)\n expected_str = (\n \"KerasTensor(type_spec=TensorSpec(shape=(None, 3, 10), \"\n \"dtype=tf.float32, name=None), name='dense/BiasAdd:0', \"\n \"description=\\\"created by layer 'dense'\\\")\")\n expected_repr = (\n \"<KerasTensor: shape=(None, 3, 10) dtype=float32 (created \"\n \"by layer 'dense')>\")\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n kt = array_ops.reshape(kt, shape=(3, 5, 2))\n expected_str = (\n \"KerasTensor(type_spec=TensorSpec(shape=(3, 5, 2), dtype=tf.float32, \"\n \"name=None), name='tf.reshape/Reshape:0', description=\\\"created \"\n \"by layer 'tf.reshape'\\\")\")\n expected_repr = (\"<KerasTensor: shape=(3, 5, 2) dtype=float32 (created \"\n \"by layer 'tf.reshape')>\")\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n kts = array_ops.unstack(kt)\n for i in range(3):\n expected_str = (\n \"KerasTensor(type_spec=TensorSpec(shape=(5, 2), dtype=tf.float32, \"\n \"name=None), name='tf.unstack/unstack:%s', description=\\\"created \"\n \"by layer 'tf.unstack'\\\")\" % (i,))\n expected_repr = (\"<KerasTensor: shape=(5, 2) dtype=float32 \"\n \"(created by layer 'tf.unstack')>\")\n self.assertEqual(expected_str, str(kts[i]))\n self.assertEqual(expected_repr, repr(kts[i]))\n\nif __name__ == \"__main__\":\n ops.enable_eager_execution()\n tensor_shape.enable_v2_tensorshape()\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Strategy and optimizer combinations for combinations.combine().\"\"\"\n\nfrom tensorflow.python.distribute import strategy_combinations as strategy_combinations_base\nfrom tensorflow.python.framework import test_combinations as combinations\nfrom tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_keras_v2\nfrom tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2\nfrom tensorflow.python.keras.optimizer_v2 import adam as adam_keras_v2\nfrom tensorflow.python.keras.optimizer_v2 import adamax as adamax_keras_v2\nfrom tensorflow.python.keras.optimizer_v2 import ftrl as ftrl_keras_v2\nfrom tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras_v2\nfrom tensorflow.python.keras.optimizer_v2 import nadam as nadam_keras_v2\nfrom tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_keras_v2\nfrom tensorflow.python.training import adagrad\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training import ftrl\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import rmsprop\n\n\ngradient_descent_optimizer_v1_fn = combinations.NamedObject(\n \"GradientDescentV1\",\n lambda: gradient_descent.GradientDescentOptimizer(0.001))\nadagrad_optimizer_v1_fn = combinations.NamedObject(\n \"AdagradV1\", lambda: adagrad.AdagradOptimizer(0.001))\nadam_optimizer_v1_fn = combinations.NamedObject(\n \"AdamV1\", lambda: adam.AdamOptimizer(0.001, epsilon=1))\nftrl_optimizer_v1_fn = combinations.NamedObject(\n \"FtrlV1\", lambda: ftrl.FtrlOptimizer(0.001))\nrmsprop_optimizer_v1_fn = combinations.NamedObject(\n \"RmsPropV1\", lambda: rmsprop.RMSPropOptimizer(0.001))\n\n# TODO(shiningsun): consider adding the other v1 optimizers\noptimizers_v1 = [\n gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn,\n ftrl_optimizer_v1_fn, rmsprop_optimizer_v1_fn\n]\n\nadadelta_optimizer_keras_v2_fn = combinations.NamedObject(\n \"AdadeltaKerasV2\", lambda: adadelta_keras_v2.Adadelta(0.001))\nadagrad_optimizer_keras_v2_fn = combinations.NamedObject(\n \"AdagradKerasV2\", lambda: adagrad_keras_v2.Adagrad(0.001))\nadam_optimizer_keras_v2_fn = combinations.NamedObject(\n \"AdamKerasV2\", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))\nadamax_optimizer_keras_v2_fn = combinations.NamedObject(\n \"AdamaxKerasV2\", lambda: adamax_keras_v2.Adamax(0.001, epsilon=1.0))\nnadam_optimizer_keras_v2_fn = combinations.NamedObject(\n \"NadamKerasV2\", lambda: nadam_keras_v2.Nadam(0.001, epsilon=1.0))\nftrl_optimizer_keras_v2_fn = combinations.NamedObject(\n \"FtrlKerasV2\", lambda: ftrl_keras_v2.Ftrl(0.001))\ngradient_descent_optimizer_keras_v2_fn = combinations.NamedObject(\n \"GradientDescentKerasV2\", lambda: gradient_descent_keras_v2.SGD(0.001))\nrmsprop_optimizer_keras_v2_fn = combinations.NamedObject(\n \"RmsPropKerasV2\", lambda: rmsprop_keras_v2.RMSprop(0.001))\n\n# TODO(shiningsun): consider adding the other v2 optimizers\noptimizers_v2 = [\n gradient_descent_optimizer_keras_v2_fn, adagrad_optimizer_keras_v2_fn\n]\n\noptimizers_v1_and_v2 = optimizers_v1 + optimizers_v2\n\n\ndef distributions_and_v1_optimizers():\n \"\"\"A common set of combination with DistributionStrategies and Optimizers.\"\"\"\n return combinations.combine(\n distribution=[\n strategy_combinations_base.one_device_strategy,\n strategy_combinations_base.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations_base.mirrored_strategy_with_two_gpus,\n strategy_combinations_base\n .mirrored_strategy_with_two_gpus_no_merge_call,\n ],\n optimizer_fn=optimizers_v1)\n\n\ndef distributions_and_v2_optimizers():\n \"\"\"A common set of combination with DistributionStrategies and Optimizers.\"\"\"\n return combinations.combine(\n distribution=[\n strategy_combinations_base.one_device_strategy,\n strategy_combinations_base.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations_base.mirrored_strategy_with_two_gpus,\n strategy_combinations_base\n .mirrored_strategy_with_two_gpus_no_merge_call,\n ],\n optimizer_fn=optimizers_v2)\n\n\ndef distributions_and_v1_and_v2_optimizers():\n \"\"\"A common set of combination with DistributionStrategies and Optimizers.\"\"\"\n return combinations.combine(\n distribution=[\n strategy_combinations_base.one_device_strategy,\n strategy_combinations_base.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations_base.mirrored_strategy_with_two_gpus,\n strategy_combinations_base\n .mirrored_strategy_with_two_gpus_no_merge_call,\n ],\n optimizer_fn=optimizers_v1_and_v2)\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for profiler_client.\"\"\"\n\nfrom tensorflow.python.eager import profiler_client\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\n\n\nclass ProfilerClientTest(test_util.TensorFlowTestCase):\n\n def testStartTracing_ProcessInvalidAddress(self):\n with self.assertRaises(errors.UnavailableError):\n profiler_client.start_tracing('localhost:6006', '/tmp/', 2000)\n\n def testMonitor_ProcessInvalidAddress(self):\n with self.assertRaises(errors.UnavailableError):\n profiler_client.monitor('localhost:6006', 2000)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model script to test TF-TensorRT integration.\"\"\"\n\nimport os\nimport numpy as np\n\nfrom tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.platform import test\n\n\nclass BinaryTensorWeightBroadcastTest(trt_test.TfTrtIntegrationTestBase):\n \"\"\"Tests for scale & elementwise layers in TF-TRT.\"\"\"\n\n def _ConstOp(self, shape):\n return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32)\n\n def GraphFn(self, x):\n for weights_shape in [\n (1,), # scale\n (24, 1, 1), # scale\n (24, 24, 20), # scale\n (20,), # elementwise\n (1, 24, 1, 1), # elementwise\n (1, 24, 24, 1), # elementwise\n (1, 24, 24, 20), # elementwise\n (24, 20), # elementwise\n ]:\n a = self._ConstOp(weights_shape)\n f = x + a\n x = self.trt_incompatible_op(f)\n a = self._ConstOp(weights_shape)\n f = a + x\n x = self.trt_incompatible_op(f)\n return gen_array_ops.reshape(x, [5, -1], name=\"output_0\")\n\n def GetParams(self):\n # TODO(aaroey): test graph with different dtypes.\n return self.BuildParams(self.GraphFn, dtypes.float32, [[10, 24, 24, 20]],\n [[5, 23040]])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n # The final reshape op is converted only in dynamic shape mode. This op is\n # placed into a new engine due to the preceding trt_incompatible_ops.\n num_engines = 17 if run_params.dynamic_shape else 16\n return [\"TRTEngineOp_%d\" % i for i in range(num_engines)]\n\n # TODO(b/176540862): remove this routine to disallow native segment execution\n # for TensorRT 7+.\n def setUp(self):\n super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call\n os.environ[\"TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION\"] = \"True\"\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for RaggedTensor dispatch of tf.images.resize.\"\"\"\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_concat_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import googletest\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass RaggedResizeImageOpTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def make_image_batch(self, sizes, channels):\n if not sizes:\n return ragged_tensor.RaggedTensor.from_tensor(\n array_ops.zeros([0, 5, 5, channels]), ragged_rank=2)\n images = [\n array_ops.reshape(\n math_ops.range(w * h * channels * 1.0), [w, h, channels])\n for (w, h) in sizes\n ]\n return ragged_concat_ops.stack(images)\n\n @parameterized.parameters([\n dict(src_sizes=[], dst_size=(4, 4), v1=True),\n dict(src_sizes=[], dst_size=(4, 4), v1=False),\n dict(src_sizes=[(2, 2)], dst_size=(4, 4), v1=True),\n dict(src_sizes=[(2, 2)], dst_size=(4, 4), v1=False),\n dict(src_sizes=[(2, 8), (3, 5), (10, 10)], dst_size=(5, 5), v1=True),\n dict(src_sizes=[(2, 8), (3, 5), (10, 10)], dst_size=(5, 5), v1=False),\n ])\n def testResize(self, src_sizes, dst_size, v1=False):\n resize = image_ops.resize_images if v1 else image_ops.resize_images_v2\n\n # Construct the input images.\n channels = 3\n images = self.make_image_batch(src_sizes, channels)\n expected_shape = [len(src_sizes)] + list(dst_size) + [channels]\n\n # Resize the ragged batch of images.\n resized_images = resize(images, dst_size)\n self.assertIsInstance(resized_images, ops.Tensor)\n self.assertEqual(resized_images.shape.as_list(), expected_shape)\n\n # Check that results for each image matches what we'd get with the\n # non-batch version of tf.images.resize.\n for i in range(len(src_sizes)):\n actual = resized_images[i]\n expected = resize(images[i].to_tensor(), dst_size)\n self.assertAllClose(actual, expected)\n\n @parameterized.parameters([\n dict(src_shape=[None, None, None, None], src_sizes=[], dst_size=(4, 4)),\n dict(src_shape=[None, None, None, 3], src_sizes=[], dst_size=(4, 4)),\n dict(src_shape=[0, None, None, None], src_sizes=[], dst_size=(4, 4)),\n dict(src_shape=[0, None, None, 3], src_sizes=[], dst_size=(4, 4)),\n dict(\n src_shape=[None, None, None, None],\n src_sizes=[(2, 2)],\n dst_size=(4, 4)),\n dict(\n src_shape=[None, None, None, None],\n src_sizes=[(2, 8), (3, 5), (10, 10)],\n dst_size=(5, 5)),\n dict(\n src_shape=[None, None, None, 1],\n src_sizes=[(2, 8), (3, 5), (10, 10)],\n dst_size=(5, 5)),\n dict(\n src_shape=[3, None, None, 1],\n src_sizes=[(2, 8), (3, 5), (10, 10)],\n dst_size=(5, 5)),\n ])\n def testResizeWithPartialStaticShape(self, src_shape, src_sizes, dst_size):\n channels = src_shape[-1] or 3\n images = self.make_image_batch(src_sizes, channels)\n rt_spec = ragged_tensor.RaggedTensorSpec(src_shape,\n ragged_rank=images.ragged_rank)\n expected_shape = [len(src_sizes)] + list(dst_size) + [channels]\n\n # Use @tf.function to erase static shape information.\n @def_function.function(input_signature=[rt_spec])\n def do_resize(images):\n return image_ops.resize_images_v2(images, dst_size)\n\n resized_images = do_resize(images)\n self.assertIsInstance(resized_images, ops.Tensor)\n self.assertTrue(resized_images.shape.is_compatible_with(expected_shape))\n\n # Check that results for each image matches what we'd get with the\n # non-batch version of tf.images.resize.\n for i in range(len(src_sizes)):\n actual = resized_images[i]\n expected = image_ops.resize_images_v2(images[i].to_tensor(), dst_size)\n self.assertAllClose(actual, expected)\n\n def testSizeIsTensor(self):\n @def_function.function\n def do_resize(images, new_size):\n return image_ops.resize_images_v2(images, new_size)\n\n src_images = self.make_image_batch([[5, 8], [3, 2], [10, 4]], 3)\n resized_images = do_resize(src_images, constant_op.constant([2, 2]))\n self.assertIsInstance(resized_images, ops.Tensor)\n self.assertTrue(resized_images.shape.is_compatible_with([3, 2, 2, 3]))\n\n def testBadRank(self):\n rt = ragged_tensor.RaggedTensor.from_tensor(array_ops.zeros([5, 5, 3]))\n with self.assertRaisesRegex(ValueError, 'rank must be 4'):\n image_ops.resize_images_v2(rt, [10, 10])\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Adadelta Optimizer.\"\"\"\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import adadelta\n\n\nclass AdadeltaOptimizerTest(xla_test.XLATestCase):\n\n def testBasic(self):\n num_updates = 4 # number of ADADELTA steps to perform\n if \"CPU\" in self.device:\n # To avoid timeout on CPU.\n all_grad = [0.2, 0.01]\n all_lr = [1.0, 0.1]\n else:\n all_grad = [0.2, 0.1, 0.01]\n all_lr = [1.0, 0.5, 0.1]\n\n for dtype in self.float_types | self.complex_types:\n with self.session(), self.test_scope():\n for grad in all_grad:\n for lr in all_lr:\n var0_init = [1.0, 2.0]\n var1_init = [3.0, 4.0]\n var0 = resource_variable_ops.ResourceVariable(\n var0_init, dtype=dtype)\n var1 = resource_variable_ops.ResourceVariable(\n var1_init, dtype=dtype)\n\n grads = constant_op.constant([grad, grad], dtype=dtype)\n\n accum = 0.0\n accum_update = 0.0\n\n # ADADELTA gradient optimizer\n rho = 0.95\n epsilon = 1e-8\n adadelta_opt = adadelta.AdadeltaOptimizer(\n learning_rate=lr, rho=rho, epsilon=epsilon)\n adadelta_update = adadelta_opt.apply_gradients(\n zip([grads, grads], [var0, var1]))\n self.evaluate(variables.global_variables_initializer())\n opt_vars = adadelta_opt.variables()\n self.assertStartsWith(opt_vars[0].name, var0._shared_name)\n self.assertStartsWith(opt_vars[1].name, var0._shared_name)\n self.assertStartsWith(opt_vars[2].name, var1._shared_name)\n self.assertStartsWith(opt_vars[3].name, var1._shared_name)\n self.assertEqual(4, len(opt_vars))\n # Assign slots\n slot = [None] * 2\n slot_update = [None] * 2\n self.assertEqual([\"accum\", \"accum_update\"],\n adadelta_opt.get_slot_names())\n slot[0] = adadelta_opt.get_slot(var0, \"accum\")\n self.assertEqual(slot[0].get_shape(), var0.get_shape())\n self.assertNotIn(slot[0], variables.trainable_variables())\n\n slot_update[0] = adadelta_opt.get_slot(var0, \"accum_update\")\n self.assertEqual(slot_update[0].get_shape(), var0.get_shape())\n self.assertNotIn(slot_update[0], variables.trainable_variables())\n\n slot[1] = adadelta_opt.get_slot(var1, \"accum\")\n self.assertEqual(slot[1].get_shape(), var1.get_shape())\n self.assertNotIn(slot[1], variables.trainable_variables())\n\n slot_update[1] = adadelta_opt.get_slot(var1, \"accum_update\")\n self.assertEqual(slot_update[1].get_shape(), var1.get_shape())\n self.assertNotIn(slot_update[1], variables.trainable_variables())\n\n # Fetch params to validate initial values\n self.assertAllClose(var0_init, self.evaluate(var0))\n self.assertAllClose(var1_init, self.evaluate(var1))\n\n update = [None] * num_updates\n tot_update = 0\n for step in range(num_updates):\n # Run adadelta update for comparison\n self.evaluate(adadelta_update)\n\n # Perform initial update without previous accum values\n accum = accum * rho + (grad**2) * (1 - rho)\n update[step] = (\n np.sqrt(accum_update + epsilon) *\n (1. / np.sqrt(accum + epsilon)) * grad)\n accum_update = (\n accum_update * rho + (update[step]**2) * (1.0 - rho))\n tot_update += update[step] * lr\n\n # Check that the accumulators have been updated\n for slot_idx in range(2):\n self.assertAllCloseAccordingToType(\n np.array([accum, accum], dtype=dtype),\n self.evaluate(slot[slot_idx]),\n rtol=1e-5)\n\n self.assertAllCloseAccordingToType(\n np.array([accum_update, accum_update], dtype=dtype),\n self.evaluate(slot_update[slot_idx]),\n rtol=1e-5)\n\n # Check that the parameters have been updated\n self.assertAllCloseAccordingToType(\n np.array(\n [var0_init[0] - tot_update, var0_init[1] - tot_update],\n dtype=dtype),\n self.evaluate(var0),\n rtol=1e-5)\n\n self.assertAllCloseAccordingToType(\n np.array(\n [var1_init[0] - tot_update, var1_init[1] - tot_update],\n dtype=dtype),\n self.evaluate(var1),\n rtol=1e-5)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the debug events writer Python class.\"\"\"\n\nimport glob\nimport json as json_lib\nimport os\nimport re\nimport threading\nimport time\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.core.protobuf import debug_event_pb2\nfrom tensorflow.python.debug.lib import debug_events_reader\nfrom tensorflow.python.debug.lib import debug_events_writer\nfrom tensorflow.python.debug.lib import dumping_callback_test_lib\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.platform import googletest\n\n\nclass DebugEventsWriterTest(dumping_callback_test_lib.DumpingCallbackTestBase,\n parameterized.TestCase):\n\n def testMultiThreadedConstructorCallWorks(self):\n def init_writer():\n debug_events_writer.DebugEventsWriter(self.dump_root, self.tfdbg_run_id)\n\n num_threads = 4\n threads = []\n for _ in range(num_threads):\n thread = threading.Thread(target=init_writer)\n thread.start()\n threads.append(thread)\n for thread in threads:\n thread.join()\n\n # Verify that there is only one debug event file of each type.\n metadata_paths = glob.glob(os.path.join(self.dump_root, \"*.metadata\"))\n self.assertLen(metadata_paths, 1)\n source_files_paths = glob.glob(\n os.path.join(self.dump_root, \"*.source_files\"))\n self.assertLen(source_files_paths, 1)\n stack_frames_paths = glob.glob(\n os.path.join(self.dump_root, \"*.stack_frames\"))\n self.assertLen(stack_frames_paths, 1)\n graphs_paths = glob.glob(os.path.join(self.dump_root, \"*.graphs\"))\n self.assertLen(graphs_paths, 1)\n self._readAndCheckMetadataFile()\n\n def testWriteSourceFilesAndStackFrames(self):\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id)\n num_protos = 10\n for i in range(num_protos):\n source_file = debug_event_pb2.SourceFile()\n source_file.file_path = \"/home/tf2user/main.py\"\n source_file.host_name = \"machine.cluster\"\n source_file.lines.append(\"print(%d)\" % i)\n writer.WriteSourceFile(source_file)\n\n stack_frame = debug_event_pb2.StackFrameWithId()\n stack_frame.id = \"stack_%d\" % i\n stack_frame.file_line_col.file_index = i * 10\n writer.WriteStackFrameWithId(stack_frame)\n\n writer.FlushNonExecutionFiles()\n\n with debug_events_reader.DebugEventsReader(self.dump_root) as reader:\n actuals = list(item.debug_event.source_file\n for item in reader.source_files_iterator())\n self.assertLen(actuals, num_protos)\n for i in range(num_protos):\n self.assertEqual(actuals[i].file_path, \"/home/tf2user/main.py\")\n self.assertEqual(actuals[i].host_name, \"machine.cluster\")\n self.assertEqual(actuals[i].lines, [\"print(%d)\" % i])\n\n actuals = list(item.debug_event.stack_frame_with_id\n for item in reader.stack_frames_iterator())\n self.assertLen(actuals, num_protos)\n for i in range(num_protos):\n self.assertEqual(actuals[i].id, \"stack_%d\" % i)\n self.assertEqual(actuals[i].file_line_col.file_index, i * 10)\n\n def testWriteGraphOpCreationAndDebuggedGraphs(self):\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id)\n num_op_creations = 10\n for i in range(num_op_creations):\n graph_op_creation = debug_event_pb2.GraphOpCreation()\n graph_op_creation.op_type = \"Conv2D\"\n graph_op_creation.op_name = \"Conv2D_%d\" % i\n writer.WriteGraphOpCreation(graph_op_creation)\n debugged_graph = debug_event_pb2.DebuggedGraph()\n debugged_graph.graph_id = \"deadbeaf\"\n debugged_graph.graph_name = \"MyGraph1\"\n writer.WriteDebuggedGraph(debugged_graph)\n writer.FlushNonExecutionFiles()\n\n reader = debug_events_reader.DebugEventsReader(self.dump_root)\n actuals = list(item.debug_event for item in reader.graphs_iterator())\n self.assertLen(actuals, num_op_creations + 1)\n for i in range(num_op_creations):\n self.assertEqual(actuals[i].graph_op_creation.op_type, \"Conv2D\")\n self.assertEqual(actuals[i].graph_op_creation.op_name, \"Conv2D_%d\" % i)\n self.assertEqual(actuals[num_op_creations].debugged_graph.graph_id,\n \"deadbeaf\")\n\n def testConcurrentWritesToNonExecutionFilesWorks(self):\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id)\n\n source_file_state = {\"counter\": 0, \"lock\": threading.Lock()}\n\n def writer_source_file():\n source_file = debug_event_pb2.SourceFile()\n with source_file_state[\"lock\"]:\n source_file.file_path = \"/home/tf2user/file_%d.py\" % source_file_state[\n \"counter\"]\n source_file_state[\"counter\"] += 1\n writer.WriteSourceFile(source_file)\n # More-frequent-than-necessary concurrent flushing is not recommended,\n # but tolerated.\n writer.FlushNonExecutionFiles()\n\n stack_frame_state = {\"counter\": 0, \"lock\": threading.Lock()}\n\n def write_stack_frame():\n stack_frame = debug_event_pb2.StackFrameWithId()\n with stack_frame_state[\"lock\"]:\n stack_frame.id = \"stack_frame_%d\" % stack_frame_state[\"counter\"]\n stack_frame_state[\"counter\"] += 1\n writer.WriteStackFrameWithId(stack_frame)\n # More-frequent-than-necessary concurrent flushing is not recommended,\n # but tolerated.\n writer.FlushNonExecutionFiles()\n\n graph_op_state = {\"counter\": 0, \"lock\": threading.Lock()}\n\n def write_graph_op_creation():\n graph_op_creation = debug_event_pb2.GraphOpCreation()\n with graph_op_state[\"lock\"]:\n graph_op_creation.op_name = \"Op%d\" % graph_op_state[\"counter\"]\n graph_op_state[\"counter\"] += 1\n writer.WriteGraphOpCreation(graph_op_creation)\n # More-frequent-than-necessary concurrent flushing is not recommended,\n # but tolerated.\n writer.FlushNonExecutionFiles()\n\n num_threads = 9\n threads = []\n for i in range(num_threads):\n if i % 3 == 0:\n target = writer_source_file\n elif i % 3 == 1:\n target = write_stack_frame\n else:\n target = write_graph_op_creation\n thread = threading.Thread(target=target)\n thread.start()\n threads.append(thread)\n for thread in threads:\n thread.join()\n\n # Verify the content of the .source_files file.\n with debug_events_reader.DebugEventsReader(self.dump_root) as reader:\n source_files_iter = reader.source_files_iterator()\n actuals = list(item.debug_event.source_file for item in source_files_iter)\n file_paths = sorted([actual.file_path for actual in actuals])\n self.assertEqual(file_paths, [\n \"/home/tf2user/file_0.py\", \"/home/tf2user/file_1.py\",\n \"/home/tf2user/file_2.py\"\n ])\n\n # Verify the content of the .stack_frames file.\n actuals = list(item.debug_event.stack_frame_with_id\n for item in reader.stack_frames_iterator())\n stack_frame_ids = sorted([actual.id for actual in actuals])\n self.assertEqual(stack_frame_ids,\n [\"stack_frame_0\", \"stack_frame_1\", \"stack_frame_2\"])\n\n # Verify the content of the .graphs file.\n actuals = list(item.debug_event.graph_op_creation\n for item in reader.graphs_iterator())\n graph_op_names = sorted([actual.op_name for actual in actuals])\n self.assertEqual(graph_op_names, [\"Op0\", \"Op1\", \"Op2\"])\n\n def testWriteAndReadMetadata(self):\n t0 = time.time()\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id)\n writer.Close()\n with debug_events_reader.DebugDataReader(self.dump_root) as reader:\n self.assertIsInstance(reader.starting_wall_time(), float)\n self.assertGreaterEqual(reader.starting_wall_time(), t0)\n self.assertEqual(reader.tensorflow_version(), versions.__version__)\n self.assertTrue(reader.tfdbg_run_id())\n\n def testWriteExecutionEventsWithCircularBuffer(self):\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id)\n num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2\n for i in range(num_execution_events):\n execution = debug_event_pb2.Execution()\n execution.op_type = \"OpType%d\" % i\n writer.WriteExecution(execution)\n\n with debug_events_reader.DebugDataReader(self.dump_root) as reader:\n # Before FlushExecutionFiles() is called. No data should have been written\n # to the file.\n reader.update()\n self.assertFalse(reader.executions())\n\n writer.FlushExecutionFiles()\n reader.update()\n executions = reader.executions()\n for i, execution in enumerate(executions):\n self.assertEqual(\n execution.op_type,\n \"OpType%d\" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))\n\n def testWriteExecutionEventsWithoutCircularBufferBehavior(self):\n # A circular buffer size of 0 abolishes the circular buffer behavior.\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id, 0)\n num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2\n for i in range(num_execution_events):\n execution = debug_event_pb2.Execution()\n execution.op_type = \"OpType%d\" % i\n writer.WriteExecution(execution)\n writer.FlushExecutionFiles()\n\n with debug_events_reader.DebugDataReader(self.dump_root) as reader:\n reader.update()\n executions = reader.executions()\n self.assertLen(executions, num_execution_events)\n for i, execution in enumerate(executions):\n self.assertEqual(execution.op_type, \"OpType%d\" % i)\n\n def testWriteGraphExecutionTraceEventsWithCircularBuffer(self):\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id)\n num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2\n for i in range(num_execution_events):\n trace = debug_event_pb2.GraphExecutionTrace()\n trace.op_name = \"Op%d\" % i\n writer.WriteGraphExecutionTrace(trace)\n\n with debug_events_reader.DebugEventsReader(self.dump_root) as reader:\n actuals = list(reader.graph_execution_traces_iterators()[0])\n # Before FlushExecutionFiles() is called. No data should have been written\n # to the file.\n self.assertEmpty(actuals)\n\n writer.FlushExecutionFiles()\n actuals = list(item.debug_event.graph_execution_trace\n for item in reader.graph_execution_traces_iterators()[0])\n self.assertLen(actuals, debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE)\n for i in range(debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE):\n self.assertEqual(\n actuals[i].op_name,\n \"Op%d\" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))\n\n def testWriteGraphExecutionTraceEventsWithoutCircularBufferBehavior(self):\n # A circular buffer size of 0 abolishes the circular buffer behavior.\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id, 0)\n num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2\n for i in range(num_execution_events):\n trace = debug_event_pb2.GraphExecutionTrace()\n trace.op_name = \"Op%d\" % i\n writer.WriteGraphExecutionTrace(trace)\n writer.FlushExecutionFiles()\n\n with debug_events_reader.DebugEventsReader(self.dump_root) as reader:\n actuals = list(item.debug_event.graph_execution_trace\n for item in reader.graph_execution_traces_iterators()[0])\n self.assertLen(actuals, num_execution_events)\n for i in range(num_execution_events):\n self.assertEqual(actuals[i].op_name, \"Op%d\" % i)\n\n def testConcurrentWritesToExecutionFiles(self):\n circular_buffer_size = 5\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id,\n circular_buffer_size)\n debugged_graph = debug_event_pb2.DebuggedGraph(graph_id=\"graph1\",\n graph_name=\"graph1\")\n writer.WriteDebuggedGraph(debugged_graph)\n\n execution_state = {\"counter\": 0, \"lock\": threading.Lock()}\n\n def write_execution():\n execution = debug_event_pb2.Execution()\n with execution_state[\"lock\"]:\n execution.op_type = \"OpType%d\" % execution_state[\"counter\"]\n execution_state[\"counter\"] += 1\n writer.WriteExecution(execution)\n\n graph_execution_trace_state = {\"counter\": 0, \"lock\": threading.Lock()}\n\n def write_graph_execution_trace():\n with graph_execution_trace_state[\"lock\"]:\n op_name = \"Op%d\" % graph_execution_trace_state[\"counter\"]\n graph_op_creation = debug_event_pb2.GraphOpCreation(\n op_type=\"FooOp\", op_name=op_name, graph_id=\"graph1\")\n trace = debug_event_pb2.GraphExecutionTrace(\n op_name=op_name, tfdbg_context_id=\"graph1\")\n graph_execution_trace_state[\"counter\"] += 1\n writer.WriteGraphOpCreation(graph_op_creation)\n writer.WriteGraphExecutionTrace(trace)\n\n threads = []\n for i in range(circular_buffer_size * 4):\n if i % 2 == 0:\n target = write_execution\n else:\n target = write_graph_execution_trace\n thread = threading.Thread(target=target)\n thread.start()\n threads.append(thread)\n for thread in threads:\n thread.join()\n writer.FlushNonExecutionFiles()\n writer.FlushExecutionFiles()\n\n with debug_events_reader.DebugDataReader(self.dump_root) as reader:\n reader.update()\n # Verify the content of the .execution file.\n executions = reader.executions()\n executed_op_types = [execution.op_type for execution in executions]\n self.assertLen(executed_op_types, circular_buffer_size)\n self.assertLen(executed_op_types, len(set(executed_op_types)))\n\n # Verify the content of the .graph_execution_traces file.\n op_names = [trace.op_name for trace in reader.graph_execution_traces()]\n self.assertLen(op_names, circular_buffer_size)\n self.assertLen(op_names, len(set(op_names)))\n\n def testConcurrentSourceFileRandomReads(self):\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id)\n\n for i in range(100):\n source_file = debug_event_pb2.SourceFile(\n host_name=\"localhost\", file_path=\"/tmp/file_%d.py\" % i)\n source_file.lines.append(\"# File %d\" % i)\n writer.WriteSourceFile(source_file)\n writer.FlushNonExecutionFiles()\n\n reader = debug_events_reader.DebugDataReader(self.dump_root)\n reader.update()\n lines = [None] * 100\n def read_job_1():\n # Read in the reverse order to enhance randomness of the read access.\n for i in range(49, -1, -1):\n lines[i] = reader.source_lines(\"localhost\", \"/tmp/file_%d.py\" % i)\n def read_job_2():\n for i in range(99, 49, -1):\n lines[i] = reader.source_lines(\"localhost\", \"/tmp/file_%d.py\" % i)\n thread_1 = threading.Thread(target=read_job_1)\n thread_2 = threading.Thread(target=read_job_2)\n thread_1.start()\n thread_2.start()\n thread_1.join()\n thread_2.join()\n for i in range(100):\n self.assertEqual(lines[i], [\"# File %d\" % i])\n\n def testConcurrentExecutionUpdateAndRandomRead(self):\n circular_buffer_size = -1\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id,\n circular_buffer_size)\n\n writer_state = {\"counter\": 0, \"done\": False}\n\n with debug_events_reader.DebugDataReader(self.dump_root) as reader:\n def write_and_update_job():\n while True:\n if writer_state[\"done\"]:\n break\n execution = debug_event_pb2.Execution()\n execution.op_type = \"OpType%d\" % writer_state[\"counter\"]\n writer_state[\"counter\"] += 1\n writer.WriteExecution(execution)\n writer.FlushExecutionFiles()\n reader.update()\n # On the sub-thread, keep writing and reading new Execution protos.\n write_and_update_thread = threading.Thread(target=write_and_update_job)\n write_and_update_thread.start()\n # On the main thread, do concurrent random read.\n while True:\n exec_digests = reader.executions(digest=True)\n if exec_digests:\n exec_0 = reader.read_execution(exec_digests[0])\n self.assertEqual(exec_0.op_type, \"OpType0\")\n writer_state[\"done\"] = True\n break\n else:\n time.sleep(0.1)\n continue\n write_and_update_thread.join()\n\n def testConcurrentExecutionRandomReads(self):\n circular_buffer_size = -1\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id,\n circular_buffer_size)\n\n for i in range(100):\n execution = debug_event_pb2.Execution()\n execution.op_type = \"OpType%d\" % i\n writer.WriteExecution(execution)\n writer.FlushNonExecutionFiles()\n writer.FlushExecutionFiles()\n\n reader = debug_events_reader.DebugDataReader(self.dump_root)\n reader.update()\n executions = [None] * 100\n def read_job_1():\n execution_digests = reader.executions(digest=True)\n # Read in the reverse order to enhance randomness of the read access.\n for i in range(49, -1, -1):\n execution = reader.read_execution(execution_digests[i])\n executions[i] = execution\n def read_job_2():\n execution_digests = reader.executions(digest=True)\n for i in range(99, 49, -1):\n execution = reader.read_execution(execution_digests[i])\n executions[i] = execution\n thread_1 = threading.Thread(target=read_job_1)\n thread_2 = threading.Thread(target=read_job_2)\n thread_1.start()\n thread_2.start()\n thread_1.join()\n thread_2.join()\n for i in range(100):\n self.assertEqual(executions[i].op_type, \"OpType%d\" % i)\n\n def testConcurrentGraphExecutionTraceUpdateAndRandomRead(self):\n circular_buffer_size = -1\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id,\n circular_buffer_size)\n debugged_graph = debug_event_pb2.DebuggedGraph(graph_id=\"graph1\",\n graph_name=\"graph1\")\n writer.WriteDebuggedGraph(debugged_graph)\n\n writer_state = {\"counter\": 0, \"done\": False}\n\n with debug_events_reader.DebugDataReader(self.dump_root) as reader:\n def write_and_update_job():\n while True:\n if writer_state[\"done\"]:\n break\n op_name = \"Op%d\" % writer_state[\"counter\"]\n graph_op_creation = debug_event_pb2.GraphOpCreation(\n op_type=\"FooOp\", op_name=op_name, graph_id=\"graph1\")\n writer.WriteGraphOpCreation(graph_op_creation)\n trace = debug_event_pb2.GraphExecutionTrace(\n op_name=op_name, tfdbg_context_id=\"graph1\")\n writer.WriteGraphExecutionTrace(trace)\n writer_state[\"counter\"] += 1\n writer.FlushNonExecutionFiles()\n writer.FlushExecutionFiles()\n reader.update()\n # On the sub-thread, keep writing and reading new GraphExecutionTraces.\n write_and_update_thread = threading.Thread(target=write_and_update_job)\n write_and_update_thread.start()\n # On the main thread, do concurrent random read.\n while True:\n digests = reader.graph_execution_traces(digest=True)\n if digests:\n trace_0 = reader.read_graph_execution_trace(digests[0])\n self.assertEqual(trace_0.op_name, \"Op0\")\n writer_state[\"done\"] = True\n break\n else:\n time.sleep(0.1)\n continue\n write_and_update_thread.join()\n\n def testConcurrentGraphExecutionTraceRandomReads(self):\n circular_buffer_size = -1\n writer = debug_events_writer.DebugEventsWriter(self.dump_root,\n self.tfdbg_run_id,\n circular_buffer_size)\n debugged_graph = debug_event_pb2.DebuggedGraph(graph_id=\"graph1\",\n graph_name=\"graph1\")\n writer.WriteDebuggedGraph(debugged_graph)\n\n for i in range(100):\n op_name = \"Op%d\" % i\n graph_op_creation = debug_event_pb2.GraphOpCreation(\n op_type=\"FooOp\", op_name=op_name, graph_id=\"graph1\")\n writer.WriteGraphOpCreation(graph_op_creation)\n trace = debug_event_pb2.GraphExecutionTrace(\n op_name=op_name, tfdbg_context_id=\"graph1\")\n writer.WriteGraphExecutionTrace(trace)\n writer.FlushNonExecutionFiles()\n writer.FlushExecutionFiles()\n\n reader = debug_events_reader.DebugDataReader(self.dump_root)\n reader.update()\n traces = [None] * 100\n def read_job_1():\n digests = reader.graph_execution_traces(digest=True)\n for i in range(49, -1, -1):\n traces[i] = reader.read_graph_execution_trace(digests[i])\n def read_job_2():\n digests = reader.graph_execution_traces(digest=True)\n for i in range(99, 49, -1):\n traces[i] = reader.read_graph_execution_trace(digests[i])\n thread_1 = threading.Thread(target=read_job_1)\n thread_2 = threading.Thread(target=read_job_2)\n thread_1.start()\n thread_2.start()\n thread_1.join()\n thread_2.join()\n for i in range(100):\n self.assertEqual(traces[i].op_name, \"Op%d\" % i)\n\n @parameterized.named_parameters(\n (\"Begin1End3\", 1, 3, 1, 3),\n (\"Begin0End3\", 0, 3, 0, 3),\n (\"Begin0EndNeg1\", 0, -1, 0, 4),\n (\"BeginNoneEnd3\", None, 3, 0, 3),\n (\"Begin2EndNone\", 2, None, 2, 5),\n (\"BeginNoneEndNone\", None, None, 0, 5),\n )\n def testRangeReadingExecutions(self, begin, end, expected_begin,\n expected_end):\n writer = debug_events_writer.DebugEventsWriter(\n self.dump_root, self.tfdbg_run_id, circular_buffer_size=-1)\n for i in range(5):\n execution = debug_event_pb2.Execution(op_type=\"OpType%d\" % i)\n writer.WriteExecution(execution)\n writer.FlushExecutionFiles()\n writer.Close()\n\n with debug_events_reader.DebugDataReader(self.dump_root) as reader:\n reader.update()\n executions = reader.executions(begin=begin, end=end)\n self.assertLen(executions, expected_end - expected_begin)\n self.assertEqual(executions[0].op_type, \"OpType%d\" % expected_begin)\n self.assertEqual(executions[-1].op_type, \"OpType%d\" % (expected_end - 1))\n\n @parameterized.named_parameters(\n (\"Begin1End3\", 1, 3, 1, 3),\n (\"Begin0End3\", 0, 3, 0, 3),\n (\"Begin0EndNeg1\", 0, -1, 0, 4),\n (\"BeginNoneEnd3\", None, 3, 0, 3),\n (\"Begin2EndNone\", 2, None, 2, 5),\n (\"BeginNoneEndNone\", None, None, 0, 5),\n )\n def testRangeReadingGraphExecutionTraces(self, begin, end, expected_begin,\n expected_end):\n writer = debug_events_writer.DebugEventsWriter(\n self.dump_root, self.tfdbg_run_id, circular_buffer_size=-1)\n debugged_graph = debug_event_pb2.DebuggedGraph(\n graph_id=\"graph1\", graph_name=\"graph1\")\n writer.WriteDebuggedGraph(debugged_graph)\n for i in range(5):\n op_name = \"Op_%d\" % i\n graph_op_creation = debug_event_pb2.GraphOpCreation(\n op_name=op_name, graph_id=\"graph1\")\n writer.WriteGraphOpCreation(graph_op_creation)\n trace = debug_event_pb2.GraphExecutionTrace(\n op_name=op_name, tfdbg_context_id=\"graph1\")\n writer.WriteGraphExecutionTrace(trace)\n writer.FlushNonExecutionFiles()\n writer.FlushExecutionFiles()\n writer.Close()\n\n with debug_events_reader.DebugDataReader(self.dump_root) as reader:\n reader.update()\n traces = reader.graph_execution_traces(begin=begin, end=end)\n self.assertLen(traces, expected_end - expected_begin)\n self.assertEqual(traces[0].op_name, \"Op_%d\" % expected_begin)\n self.assertEqual(traces[-1].op_name, \"Op_%d\" % (expected_end - 1))\n\n\nclass MultiSetReaderTest(dumping_callback_test_lib.DumpingCallbackTestBase):\n \"\"\"Test for DebugDataReader for multiple file sets under a dump root.\"\"\"\n\n def testReadingTwoFileSetsWithTheSameDumpRootSucceeds(self):\n # To simulate a multi-host data dump, we first generate file sets in two\n # different directories, with the same tfdbg_run_id, and then combine them.\n tfdbg_run_id = \"foo\"\n for i in range(2):\n writer = debug_events_writer.DebugEventsWriter(\n os.path.join(self.dump_root, str(i)),\n tfdbg_run_id,\n circular_buffer_size=-1)\n if i == 0:\n debugged_graph = debug_event_pb2.DebuggedGraph(\n graph_id=\"graph1\", graph_name=\"graph1\")\n writer.WriteDebuggedGraph(debugged_graph)\n op_name = \"Op_0\"\n graph_op_creation = debug_event_pb2.GraphOpCreation(\n op_type=\"FooOp\", op_name=op_name, graph_id=\"graph1\")\n writer.WriteGraphOpCreation(graph_op_creation)\n op_name = \"Op_1\"\n graph_op_creation = debug_event_pb2.GraphOpCreation(\n op_type=\"FooOp\", op_name=op_name, graph_id=\"graph1\")\n writer.WriteGraphOpCreation(graph_op_creation)\n for _ in range(10):\n trace = debug_event_pb2.GraphExecutionTrace(\n op_name=\"Op_%d\" % i, tfdbg_context_id=\"graph1\")\n writer.WriteGraphExecutionTrace(trace)\n writer.FlushNonExecutionFiles()\n writer.FlushExecutionFiles()\n\n # Move all files from the subdirectory /1 to subdirectory /0.\n dump_root_0 = os.path.join(self.dump_root, \"0\")\n src_paths = glob.glob(os.path.join(self.dump_root, \"1\", \"*\"))\n for src_path in src_paths:\n dst_path = os.path.join(\n dump_root_0,\n # Rename the file set to avoid file name collision.\n re.sub(r\"(tfdbg_events\\.\\d+)\", r\"\\g<1>1\", os.path.basename(src_path)))\n os.rename(src_path, dst_path)\n\n with debug_events_reader.DebugDataReader(dump_root_0) as reader:\n reader.update()\n # Verify the content of the .graph_execution_traces file.\n trace_digests = reader.graph_execution_traces(digest=True)\n self.assertLen(trace_digests, 20)\n for _ in range(10):\n trace = reader.read_graph_execution_trace(trace_digests[i])\n self.assertEqual(trace.op_name, \"Op_0\")\n for _ in range(10):\n trace = reader.read_graph_execution_trace(trace_digests[i + 10])\n self.assertEqual(trace.op_name, \"Op_1\")\n\n def testReadingTwoFileSetsWithTheDifferentRootsLeadsToError(self):\n # To simulate a multi-host data dump, we first generate file sets in two\n # different directories, with different tfdbg_run_ids, and then combine\n # them.\n for i in range(2):\n writer = debug_events_writer.DebugEventsWriter(\n os.path.join(self.dump_root, str(i)),\n \"run_id_%d\" % i,\n circular_buffer_size=-1)\n writer.FlushNonExecutionFiles()\n writer.FlushExecutionFiles()\n\n # Move all files from the subdirectory /1 to subdirectory /0.\n dump_root_0 = os.path.join(self.dump_root, \"0\")\n src_paths = glob.glob(os.path.join(self.dump_root, \"1\", \"*\"))\n for src_path in src_paths:\n dst_path = os.path.join(\n dump_root_0,\n # Rename the file set to avoid file name collision.\n re.sub(r\"(tfdbg_events\\.\\d+)\", r\"\\g<1>1\", os.path.basename(src_path)))\n os.rename(src_path, dst_path)\n\n with self.assertRaisesRegex(ValueError,\n r\"Found multiple \\(2\\) tfdbg2 runs\"):\n debug_events_reader.DebugDataReader(dump_root_0)\n\n\nclass DataObjectsTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def jsonRoundTripCheck(self, obj):\n self.assertEqual(\n json_lib.dumps(json_lib.loads(json_lib.dumps(obj)), sort_keys=True),\n json_lib.dumps(obj, sort_keys=True))\n\n def testExecutionDigestWithNoOutputToJson(self):\n execution_digest = debug_events_reader.ExecutionDigest(\n 1234, 5678, \"FooOp\", output_tensor_device_ids=None)\n json = execution_digest.to_json()\n self.jsonRoundTripCheck(json)\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"output_tensor_device_ids\"], None)\n\n def testExecutionDigestWithTwoOutputsToJson(self):\n execution_digest = debug_events_reader.ExecutionDigest(\n 1234, 5678, \"FooOp\", output_tensor_device_ids=[1357, 2468])\n json = execution_digest.to_json()\n self.jsonRoundTripCheck(json)\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"output_tensor_device_ids\"], (1357, 2468))\n\n def testExecutionNoGraphNoInputToJson(self):\n execution_digest = debug_events_reader.ExecutionDigest(\n 1234, 5678, \"FooOp\", output_tensor_device_ids=[1357])\n execution = debug_events_reader.Execution(\n execution_digest,\n \"localhost\",\n (\"a1\", \"b2\"),\n debug_event_pb2.TensorDebugMode.CURT_HEALTH,\n graph_id=None,\n input_tensor_ids=None,\n output_tensor_ids=[2468],\n debug_tensor_values=([1, 0],))\n json = execution.to_json()\n self.jsonRoundTripCheck(json)\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"output_tensor_device_ids\"], (1357,))\n self.assertEqual(json[\"host_name\"], \"localhost\")\n self.assertEqual(json[\"stack_frame_ids\"], (\"a1\", \"b2\"))\n self.assertEqual(json[\"tensor_debug_mode\"],\n debug_event_pb2.TensorDebugMode.CURT_HEALTH)\n self.assertIsNone(json[\"graph_id\"])\n self.assertIsNone(json[\"input_tensor_ids\"])\n self.assertEqual(json[\"output_tensor_ids\"], (2468,))\n self.assertEqual(json[\"debug_tensor_values\"], ([1, 0],))\n\n def testExecutionNoGraphNoInputButWithOutputToJson(self):\n execution_digest = debug_events_reader.ExecutionDigest(\n 1234, 5678, \"FooOp\", output_tensor_device_ids=[1357])\n execution = debug_events_reader.Execution(\n execution_digest,\n \"localhost\",\n (\"a1\", \"b2\"),\n debug_event_pb2.TensorDebugMode.FULL_HEALTH,\n graph_id=\"abcd\",\n input_tensor_ids=[13, 37],\n output_tensor_ids=None,\n debug_tensor_values=None)\n json = execution.to_json()\n self.jsonRoundTripCheck(json)\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"output_tensor_device_ids\"], (1357,))\n self.assertEqual(json[\"host_name\"], \"localhost\")\n self.assertEqual(json[\"stack_frame_ids\"], (\"a1\", \"b2\"))\n self.assertEqual(json[\"tensor_debug_mode\"],\n debug_event_pb2.TensorDebugMode.FULL_HEALTH)\n self.assertEqual(json[\"graph_id\"], \"abcd\")\n self.assertEqual(json[\"input_tensor_ids\"], (13, 37))\n self.assertIsNone(json[\"output_tensor_ids\"])\n self.assertIsNone(json[\"debug_tensor_values\"])\n\n @parameterized.named_parameters(\n (\"EmptyList\", []),\n (\"None\", None),\n )\n def testExecutionWithNoOutputTensorsReturnsZeroForNumOutputs(\n self, output_tensor_ids):\n execution = debug_events_reader.Execution(\n debug_events_reader.ExecutionDigest(1234, 5678, \"FooOp\"),\n \"localhost\", (\"a1\", \"b2\"),\n debug_event_pb2.TensorDebugMode.FULL_HEALTH,\n graph_id=\"abcd\",\n input_tensor_ids=[13, 37],\n output_tensor_ids=output_tensor_ids,\n debug_tensor_values=None)\n self.assertEqual(execution.num_outputs, 0)\n\n def testDebuggedDeviceToJons(self):\n debugged_device = debug_events_reader.DebuggedDevice(\"/TPU:3\", 4)\n self.assertEqual(debugged_device.to_json(), {\n \"device_name\": \"/TPU:3\",\n \"device_id\": 4,\n })\n\n def testDebuggedGraphToJonsWitouthNameInnerOuterGraphIds(self):\n debugged_graph = debug_events_reader.DebuggedGraph(\n None,\n \"b1c2\",\n outer_graph_id=None,\n )\n self.assertEqual(\n debugged_graph.to_json(), {\n \"name\": None,\n \"graph_id\": \"b1c2\",\n \"outer_graph_id\": None,\n \"inner_graph_ids\": [],\n })\n\n def testDebuggedGraphToJonsWithNameAndInnerOuterGraphIds(self):\n debugged_graph = debug_events_reader.DebuggedGraph(\n \"loss_function\",\n \"b1c2\",\n outer_graph_id=\"a0b1\",\n )\n debugged_graph.add_inner_graph_id(\"c2d3\")\n debugged_graph.add_inner_graph_id(\"c2d3e4\")\n self.assertEqual(\n debugged_graph.to_json(), {\n \"name\": \"loss_function\",\n \"graph_id\": \"b1c2\",\n \"outer_graph_id\": \"a0b1\",\n \"inner_graph_ids\": [\"c2d3\", \"c2d3e4\"],\n })\n\n @parameterized.named_parameters(\n (\"EmptyList\", []),\n (\"None\", None),\n )\n def testGraphOpDigestWithNoOutpusReturnsNumOutputsZero(\n self, output_tensor_ids):\n op_creation_digest = debug_events_reader.GraphOpCreationDigest(\n 1234,\n 5678,\n \"deadbeef\",\n \"FooOp\",\n \"Model_1/Foo_2\",\n output_tensor_ids,\n \"machine.cluster\", (\"a1\", \"a2\"),\n input_names=None,\n device_name=None)\n self.assertEqual(op_creation_digest.num_outputs, 0)\n\n def testGraphOpCreationDigestNoInputNoDeviceNameToJson(self):\n op_creation_digest = debug_events_reader.GraphOpCreationDigest(\n 1234,\n 5678,\n \"deadbeef\",\n \"FooOp\",\n \"Model_1/Foo_2\", [135],\n \"machine.cluster\", (\"a1\", \"a2\"),\n input_names=None,\n device_name=None)\n json = op_creation_digest.to_json()\n self.jsonRoundTripCheck(json)\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"graph_id\"], \"deadbeef\")\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"op_name\"], \"Model_1/Foo_2\")\n self.assertEqual(json[\"output_tensor_ids\"], (135,))\n self.assertEqual(json[\"host_name\"], \"machine.cluster\")\n self.assertEqual(json[\"stack_frame_ids\"], (\"a1\", \"a2\"))\n self.assertIsNone(json[\"input_names\"])\n self.assertIsNone(json[\"device_name\"])\n\n def testGraphOpCreationDigestWithInputsAndDeviceNameToJson(self):\n op_creation_digest = debug_events_reader.GraphOpCreationDigest(\n 1234,\n 5678,\n \"deadbeef\",\n \"FooOp\",\n \"Model_1/Foo_2\", [135],\n \"machine.cluster\", (\"a1\", \"a2\"),\n input_names=[\"Bar_1\", \"Qux_2\"],\n device_name=\"/device:GPU:0\")\n json = op_creation_digest.to_json()\n self.jsonRoundTripCheck(json)\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"graph_id\"], \"deadbeef\")\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"op_name\"], \"Model_1/Foo_2\")\n self.assertEqual(json[\"output_tensor_ids\"], (135,))\n self.assertEqual(json[\"host_name\"], \"machine.cluster\")\n self.assertEqual(json[\"stack_frame_ids\"], (\"a1\", \"a2\"))\n self.assertEqual(json[\"input_names\"], (\"Bar_1\", \"Qux_2\"))\n self.assertEqual(json[\"device_name\"], \"/device:GPU:0\")\n\n def testGraphExecutionTraceDigestToJson(self):\n trace_digest = debug_events_reader.GraphExecutionTraceDigest(\n 1234, 5678, \"FooOp\", \"Model_1/Foo_2\", 1, \"deadbeef\")\n json = trace_digest.to_json()\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"op_name\"], \"Model_1/Foo_2\")\n self.assertEqual(json[\"output_slot\"], 1)\n self.assertEqual(json[\"graph_id\"], \"deadbeef\")\n\n def testGraphExecutionTraceWithTensorDebugValueAndDeviceNameToJson(self):\n trace_digest = debug_events_reader.GraphExecutionTraceDigest(\n 1234, 5678, \"FooOp\", \"Model_1/Foo_2\", 1, \"deadbeef\")\n trace = debug_events_reader.GraphExecutionTrace(\n trace_digest, [\"g1\", \"g2\", \"deadbeef\"],\n debug_event_pb2.TensorDebugMode.CURT_HEALTH,\n debug_tensor_value=[3, 1], device_name=\"/device:GPU:0\")\n json = trace.to_json()\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"op_name\"], \"Model_1/Foo_2\")\n self.assertEqual(json[\"output_slot\"], 1)\n self.assertEqual(json[\"graph_id\"], \"deadbeef\")\n self.assertEqual(json[\"graph_ids\"], (\"g1\", \"g2\", \"deadbeef\"))\n self.assertEqual(json[\"tensor_debug_mode\"],\n debug_event_pb2.TensorDebugMode.CURT_HEALTH)\n self.assertEqual(json[\"debug_tensor_value\"], (3, 1))\n self.assertEqual(json[\"device_name\"], \"/device:GPU:0\")\n\n def testGraphExecutionTraceNoTensorDebugValueNoDeviceNameToJson(self):\n trace_digest = debug_events_reader.GraphExecutionTraceDigest(\n 1234, 5678, \"FooOp\", \"Model_1/Foo_2\", 1, \"deadbeef\")\n trace = debug_events_reader.GraphExecutionTrace(\n trace_digest, [\"g1\", \"g2\", \"deadbeef\"],\n debug_event_pb2.TensorDebugMode.NO_TENSOR,\n debug_tensor_value=None, device_name=None)\n json = trace.to_json()\n self.assertEqual(json[\"wall_time\"], 1234)\n self.assertEqual(json[\"op_type\"], \"FooOp\")\n self.assertEqual(json[\"op_name\"], \"Model_1/Foo_2\")\n self.assertEqual(json[\"output_slot\"], 1)\n self.assertEqual(json[\"graph_id\"], \"deadbeef\")\n self.assertEqual(json[\"graph_ids\"], (\"g1\", \"g2\", \"deadbeef\"))\n self.assertEqual(json[\"tensor_debug_mode\"],\n debug_event_pb2.TensorDebugMode.NO_TENSOR)\n self.assertIsNone(json[\"debug_tensor_value\"])\n self.assertIsNone(json[\"device_name\"])\n\n\nif __name__ == \"__main__\":\n ops.enable_eager_execution()\n googletest.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n# pylint: disable=g-classes-have-attributes\n\"\"\"Contains the pooling layer classes and their functional aliases.\"\"\"\nimport warnings\n\nfrom tensorflow.python.keras import layers as keras_layers\nfrom tensorflow.python.keras.legacy_tf_layers import base\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling1D'])\n@tf_export(v1=['layers.AveragePooling1D'])\nclass AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):\n \"\"\"Average Pooling layer for 1D inputs.\n\n Args:\n pool_size: An integer or tuple/list of a single integer,\n representing the size of the pooling window.\n strides: An integer or tuple/list of a single integer, specifying the\n strides of the pooling operation.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, length)`.\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self, pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(AveragePooling1D, self).__init__(\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name,\n **kwargs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling1d'])\n@tf_export(v1=['layers.average_pooling1d'])\ndef average_pooling1d(inputs, pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None):\n \"\"\"Average Pooling layer for 1D inputs.\n\n Args:\n inputs: The tensor over which to pool. Must have rank 3.\n pool_size: An integer or tuple/list of a single integer,\n representing the size of the pooling window.\n strides: An integer or tuple/list of a single integer, specifying the\n strides of the pooling operation.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, length)`.\n name: A string, the name of the layer.\n\n Returns:\n The output tensor, of rank 3.\n\n Raises:\n ValueError: if eager execution is enabled.\n \"\"\"\n warnings.warn('`tf.layers.average_pooling1d` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.AveragePooling1D` instead.')\n layer = AveragePooling1D(pool_size=pool_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n return layer.apply(inputs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling1D'])\n@tf_export(v1=['layers.MaxPooling1D'])\nclass MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):\n \"\"\"Max Pooling layer for 1D inputs.\n\n Args:\n pool_size: An integer or tuple/list of a single integer,\n representing the size of the pooling window.\n strides: An integer or tuple/list of a single integer, specifying the\n strides of the pooling operation.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, length)`.\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self, pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(MaxPooling1D, self).__init__(\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name,\n **kwargs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling1d'])\n@tf_export(v1=['layers.max_pooling1d'])\ndef max_pooling1d(inputs, pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None):\n \"\"\"Max Pooling layer for 1D inputs.\n\n Args:\n inputs: The tensor over which to pool. Must have rank 3.\n pool_size: An integer or tuple/list of a single integer,\n representing the size of the pooling window.\n strides: An integer or tuple/list of a single integer, specifying the\n strides of the pooling operation.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, length)`.\n name: A string, the name of the layer.\n\n Returns:\n The output tensor, of rank 3.\n\n Raises:\n ValueError: if eager execution is enabled.\n \"\"\"\n warnings.warn('`tf.layers.max_pooling1d` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.MaxPooling1D` instead.')\n layer = MaxPooling1D(pool_size=pool_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n return layer.apply(inputs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling2D'])\n@tf_export(v1=['layers.AveragePooling2D'])\nclass AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):\n \"\"\"Average pooling layer for 2D inputs (e.g. images).\n\n Args:\n pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)\n specifying the size of the pooling window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the pooling operation.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string. The ordering of the dimensions in the inputs.\n `channels_last` (default) and `channels_first` are supported.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, height, width)`.\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self, pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(AveragePooling2D, self).__init__(\n pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format, name=name, **kwargs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling2d'])\n@tf_export(v1=['layers.average_pooling2d'])\ndef average_pooling2d(inputs,\n pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None):\n \"\"\"Average pooling layer for 2D inputs (e.g. images).\n\n Args:\n inputs: The tensor over which to pool. Must have rank 4.\n pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)\n specifying the size of the pooling window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the pooling operation.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string. The ordering of the dimensions in the inputs.\n `channels_last` (default) and `channels_first` are supported.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, height, width)`.\n name: A string, the name of the layer.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if eager execution is enabled.\n \"\"\"\n warnings.warn('`tf.layers.average_pooling2d` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.AveragePooling2D` instead.')\n layer = AveragePooling2D(pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format,\n name=name)\n return layer.apply(inputs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling2D'])\n@tf_export(v1=['layers.MaxPooling2D'])\nclass MaxPooling2D(keras_layers.MaxPooling2D, base.Layer):\n \"\"\"Max pooling layer for 2D inputs (e.g. images).\n\n Args:\n pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)\n specifying the size of the pooling window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the pooling operation.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string. The ordering of the dimensions in the inputs.\n `channels_last` (default) and `channels_first` are supported.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, height, width)`.\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self, pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(MaxPooling2D, self).__init__(\n pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format, name=name, **kwargs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling2d'])\n@tf_export(v1=['layers.max_pooling2d'])\ndef max_pooling2d(inputs,\n pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None):\n \"\"\"Max pooling layer for 2D inputs (e.g. images).\n\n Args:\n inputs: The tensor over which to pool. Must have rank 4.\n pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)\n specifying the size of the pooling window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the pooling operation.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string. The ordering of the dimensions in the inputs.\n `channels_last` (default) and `channels_first` are supported.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, height, width)`.\n name: A string, the name of the layer.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if eager execution is enabled.\n \"\"\"\n warnings.warn('`tf.layers.max_pooling2d` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.MaxPooling2D` instead.')\n layer = MaxPooling2D(pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format,\n name=name)\n return layer.apply(inputs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling3D'])\n@tf_export(v1=['layers.AveragePooling3D'])\nclass AveragePooling3D(keras_layers.AveragePooling3D, base.Layer):\n \"\"\"Average pooling layer for 3D inputs (e.g. volumes).\n\n Args:\n pool_size: An integer or tuple/list of 3 integers:\n (pool_depth, pool_height, pool_width)\n specifying the size of the pooling window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the pooling operation.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string. The ordering of the dimensions in the inputs.\n `channels_last` (default) and `channels_first` are supported.\n `channels_last` corresponds to inputs with shape\n `(batch, depth, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, depth, height, width)`.\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self, pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(AveragePooling3D, self).__init__(\n pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format, name=name, **kwargs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling3d'])\n@tf_export(v1=['layers.average_pooling3d'])\ndef average_pooling3d(inputs,\n pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None):\n \"\"\"Average pooling layer for 3D inputs (e.g. volumes).\n\n Args:\n inputs: The tensor over which to pool. Must have rank 5.\n pool_size: An integer or tuple/list of 3 integers:\n (pool_depth, pool_height, pool_width)\n specifying the size of the pooling window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the pooling operation.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string. The ordering of the dimensions in the inputs.\n `channels_last` (default) and `channels_first` are supported.\n `channels_last` corresponds to inputs with shape\n `(batch, depth, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, depth, height, width)`.\n name: A string, the name of the layer.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if eager execution is enabled.\n \"\"\"\n warnings.warn('`tf.layers.average_pooling3d` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.AveragePooling3D` instead.')\n layer = AveragePooling3D(pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format,\n name=name)\n return layer.apply(inputs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling3D'])\n@tf_export(v1=['layers.MaxPooling3D'])\nclass MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):\n \"\"\"Max pooling layer for 3D inputs (e.g. volumes).\n\n Args:\n pool_size: An integer or tuple/list of 3 integers:\n (pool_depth, pool_height, pool_width)\n specifying the size of the pooling window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the pooling operation.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string. The ordering of the dimensions in the inputs.\n `channels_last` (default) and `channels_first` are supported.\n `channels_last` corresponds to inputs with shape\n `(batch, depth, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, depth, height, width)`.\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self, pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(MaxPooling3D, self).__init__(\n pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format, name=name, **kwargs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling3d'])\n@tf_export(v1=['layers.max_pooling3d'])\ndef max_pooling3d(inputs,\n pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None):\n \"\"\"Max pooling layer for 3D inputs (e.g.\n\n volumes).\n\n Args:\n inputs: The tensor over which to pool. Must have rank 5.\n pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height,\n pool_width) specifying the size of the pooling window. Can be a single\n integer to specify the same value for all spatial dimensions.\n strides: An integer or tuple/list of 3 integers, specifying the strides of\n the pooling operation. Can be a single integer to specify the same value\n for all spatial dimensions.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string. The ordering of the dimensions in the inputs.\n `channels_last` (default) and `channels_first` are supported.\n `channels_last` corresponds to inputs with shape `(batch, depth, height,\n width, channels)` while `channels_first` corresponds to inputs with shape\n `(batch, channels, depth, height, width)`.\n name: A string, the name of the layer.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if eager execution is enabled.\n \"\"\"\n warnings.warn('`tf.layers.max_pooling3d` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.MaxPooling3D` instead.')\n layer = MaxPooling3D(pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format,\n name=name)\n return layer.apply(inputs)\n\n# Aliases\n\nAvgPool2D = AveragePooling2D\nMaxPool2D = MaxPooling2D\nmax_pool2d = max_pooling2d\navg_pool2d = average_pooling2d\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for sparse_to_dense.\"\"\"\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_scalar_data\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_sparse_to_dense_tests(options):\n \"\"\"Make a set of tests to do sparse to dense.\"\"\"\n\n test_parameters = [{\n \"value_dtype\": [tf.float32, tf.int32, tf.int64],\n \"index_dtype\": [tf.int32, tf.int64],\n \"value_count\": [1, 3, 6, 8],\n \"dense_shape\": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],\n \"default_value\": [0, -1],\n \"value_is_scalar\": [True, False],\n }]\n\n # Return a single value for 1-D dense shape, but a tuple for other shapes.\n def generate_index(dense_shape):\n if len(dense_shape) == 1:\n return np.random.randint(dense_shape[0])\n else:\n index = []\n for shape in dense_shape:\n index.append(np.random.randint(shape))\n return tuple(index)\n\n def build_graph(parameters):\n \"\"\"Build the sparse_to_dense op testing graph.\"\"\"\n dense_shape = parameters[\"dense_shape\"]\n\n # Special handle for value_is_scalar case.\n # value_count must be 1.\n if parameters[\"value_is_scalar\"] and parameters[\"value_count\"] == 1:\n value = tf.compat.v1.placeholder(\n name=\"value\", dtype=parameters[\"value_dtype\"], shape=())\n else:\n value = tf.compat.v1.placeholder(\n name=\"value\",\n dtype=parameters[\"value_dtype\"],\n shape=[parameters[\"value_count\"]])\n indices = set()\n while len(indices) < parameters[\"value_count\"]:\n indices.add(generate_index(dense_shape))\n indices = tf.constant(tuple(indices), dtype=parameters[\"index_dtype\"])\n # TODO(renjieliu): Add test for validate_indices case.\n out = tf.sparse_to_dense(\n indices,\n dense_shape,\n value,\n parameters[\"default_value\"],\n validate_indices=False)\n\n return [value], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n if parameters[\"value_is_scalar\"] and parameters[\"value_count\"] == 1:\n input_value = create_scalar_data(parameters[\"value_dtype\"])\n else:\n input_value = create_tensor_data(parameters[\"value_dtype\"],\n [parameters[\"value_count\"]])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Special Math Ops.\"\"\"\n\nimport collections\nimport importlib\n\nimport numpy as np\n\nfrom tensorflow.python.eager import backprop as tfe_backprop\nfrom tensorflow.python.eager import context as tfe_context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops.distributions import special_math\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\n\n\ndef try_import(name): # pylint: disable=invalid-name\n module = None\n try:\n module = importlib.import_module(name)\n except ImportError as e:\n tf_logging.warning(\"Could not import %s: %s\" % (name, str(e)))\n return module\n\n\nspecial = try_import(\"scipy.special\")\nstats = try_import(\"scipy.stats\")\nsm = special_math\n\n\ndef _check_strictly_increasing(array_1d):\n diff = np.diff(array_1d)\n np.testing.assert_array_less(0, diff)\n\n\ndef _make_grid(dtype, grid_spec):\n \"\"\"Returns a uniform grid + noise, reshaped to shape argument.\"\"\"\n rng = np.random.RandomState(0)\n num_points = np.prod(grid_spec.shape)\n grid = np.linspace(grid_spec.min, grid_spec.max, num=num_points).astype(dtype)\n grid_spacing = (grid_spec.max - grid_spec.min) / num_points\n grid += 0.1 * grid_spacing * rng.randn(*grid.shape) # pylint: disable=not-an-iterable\n # More useful if it's sorted (e.g. for testing monotonicity, or debugging).\n grid = np.sort(grid)\n return np.reshape(grid, grid_spec.shape)\n\n\ndef _value_and_gradient(fn, *args):\n \"\"\"Calls `fn` and computes the gradient of the result wrt `arg`.\"\"\"\n if tfe_context.executing_eagerly():\n v, g = tfe_backprop.val_and_grad_function(fn)(args)\n else:\n v = fn(*args)\n g = gradients_impl.gradients(v, args)\n return v, g\n\n\nGridSpec = collections.namedtuple(\"GridSpec\", [\"min\", \"max\", \"shape\"])\n\nErrorSpec = collections.namedtuple(\"ErrorSpec\", [\"rtol\", \"atol\"])\n\n\nclass NdtriTest(test.TestCase):\n\n def assertAllFinite(self, x):\n is_finite = np.isfinite(x)\n all_true = np.ones_like(is_finite, dtype=np.bool_)\n self.assertAllEqual(all_true, is_finite)\n\n @test_util.run_in_graph_and_eager_modes\n def testNdtri(self):\n \"\"\"Verifies that ndtri computation is correct.\"\"\"\n if not special:\n return\n\n p = np.linspace(0., 1.0, 50).astype(np.float64)\n # Quantile performs piecewise rational approximation so adding some\n # special input values to make sure we hit all the pieces.\n p = np.hstack((p, np.exp(-32), 1. - np.exp(-32), np.exp(-2),\n 1. - np.exp(-2)))\n expected_x = special.ndtri(p)\n x = special_math.ndtri(p)\n self.assertAllClose(expected_x, self.evaluate(x), atol=0.)\n\n @test_util.run_deprecated_v1\n def testNdtriDynamicShape(self):\n \"\"\"Verifies that ndtri computation is correct.\"\"\"\n with self.cached_session() as sess:\n if not special:\n return\n\n p = array_ops.placeholder(np.float32)\n p_ = np.linspace(0., 1.0, 50).astype(np.float32)\n\n x = special_math.ndtri(p)\n x_ = sess.run(x, feed_dict={p: p_})\n\n expected_x_ = special.ndtri(p_)\n self.assertAllClose(expected_x_, x_, atol=0.)\n\n def _baseNdtriFiniteGradientTest(self, dtype):\n \"\"\"Verifies that ndtri has finite gradients at interesting points.\"\"\"\n # Tests gradients at 0, 1, and piece-wise boundaries.\n p = constant_op.constant(\n np.array([\n 0.,\n np.exp(-32.),\n np.exp(-2.),\n 1. - np.exp(-2.),\n 1. - np.exp(-32.),\n 1.,\n ]).astype(dtype))\n # Not having the lambda sanitizer means we'd get an `IndexError` whenever\n # the user supplied function has default args.\n _, grads = _value_and_gradient(\n lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda\n self.assertAllFinite(self.evaluate(grads[0]))\n\n @test_util.run_in_graph_and_eager_modes\n def testNdtriFiniteGradientFloat32(self):\n self._baseNdtriFiniteGradientTest(np.float32)\n\n @test_util.run_in_graph_and_eager_modes\n def testNdtriFiniteGradientFloat64(self):\n self._baseNdtriFiniteGradientTest(np.float64)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass NdtrTest(test.TestCase):\n _use_log = False\n # Grid min/max chosen to ensure 0 < cdf(x) < 1.\n _grid32 = GridSpec(min=-12.9, max=5., shape=[100])\n _grid64 = GridSpec(min=-37.5, max=8., shape=[100])\n _error32 = ErrorSpec(rtol=1e-4, atol=0.)\n _error64 = ErrorSpec(rtol=1e-6, atol=0.)\n\n def _test_grid(self, dtype, grid_spec, error_spec):\n if self._use_log:\n self._test_grid_log(dtype, grid_spec, error_spec)\n else:\n self._test_grid_no_log(dtype, grid_spec, error_spec)\n\n def _test_grid_log(self, dtype, grid_spec, error_spec):\n if not special:\n return\n\n grid = _make_grid(dtype, grid_spec)\n actual = self.evaluate(sm.log_ndtr(grid))\n\n # Basic tests.\n # isfinite checks for NaN and Inf.\n self.assertTrue(np.isfinite(actual).all())\n # On the grid, -inf < log_cdf(x) < 0. In this case, we should be able\n # to use a huge grid because we have used tricks to escape numerical\n # difficulties.\n self.assertTrue((actual < 0).all())\n _check_strictly_increasing(actual)\n\n # Versus scipy.\n expected = special.log_ndtr(grid)\n # Scipy prematurely goes to zero at some places that we don't. So don't\n # include these in the comparison.\n self.assertAllClose(\n expected.astype(np.float64)[expected < 0],\n actual.astype(np.float64)[expected < 0],\n rtol=error_spec.rtol,\n atol=error_spec.atol)\n\n def _test_grid_no_log(self, dtype, grid_spec, error_spec):\n if not special:\n return\n\n grid = _make_grid(dtype, grid_spec)\n actual = self.evaluate(sm.ndtr(grid))\n\n # Basic tests.\n # isfinite checks for NaN and Inf.\n self.assertTrue(np.isfinite(actual).all())\n # On the grid, 0 < cdf(x) < 1. The grid cannot contain everything due\n # to numerical limitations of cdf.\n self.assertTrue((actual > 0).all())\n self.assertTrue((actual < 1).all())\n _check_strictly_increasing(actual)\n\n # Versus scipy.\n expected = special.ndtr(grid)\n # Scipy prematurely goes to zero at some places that we don't. So don't\n # include these in the comparison.\n self.assertAllClose(\n expected.astype(np.float64)[expected < 0],\n actual.astype(np.float64)[expected < 0],\n rtol=error_spec.rtol,\n atol=error_spec.atol)\n\n @test_util.run_deprecated_v1\n def test_float32(self):\n self._test_grid(np.float32, self._grid32, self._error32)\n\n @test_util.run_deprecated_v1\n def test_float64(self):\n self._test_grid(np.float64, self._grid64, self._error64)\n\n\nclass LogNdtrTestLower(NdtrTest):\n _use_log = True\n _grid32 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT32_LOWER, shape=[100])\n _grid64 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT64_LOWER, shape=[100])\n _error32 = ErrorSpec(rtol=1e-4, atol=0.)\n _error64 = ErrorSpec(rtol=1e-4, atol=0.)\n\n\n# The errors are quite large when the input is > 6 or so. Also,\n# scipy.special.log_ndtr becomes zero very early, before 10,\n# (due to ndtr becoming 1). We approximate Log[1 + epsilon] as epsilon, and\n# avoid this issue.\nclass LogNdtrTestMid(NdtrTest):\n _use_log = True\n _grid32 = GridSpec(\n min=sm.LOGNDTR_FLOAT32_LOWER, max=sm.LOGNDTR_FLOAT32_UPPER, shape=[100])\n _grid64 = GridSpec(\n min=sm.LOGNDTR_FLOAT64_LOWER, max=sm.LOGNDTR_FLOAT64_UPPER, shape=[100])\n # Differences show up as soon as we're in the tail, so add some atol.\n _error32 = ErrorSpec(rtol=0.1, atol=1e-7)\n _error64 = ErrorSpec(rtol=0.1, atol=1e-7)\n\n\nclass LogNdtrTestUpper(NdtrTest):\n _use_log = True\n _grid32 = GridSpec(\n min=sm.LOGNDTR_FLOAT32_UPPER,\n max=12., # Beyond this, log_cdf(x) may be zero.\n shape=[100])\n _grid64 = GridSpec(\n min=sm.LOGNDTR_FLOAT64_UPPER,\n max=35., # Beyond this, log_cdf(x) may be zero.\n shape=[100])\n _error32 = ErrorSpec(rtol=1e-6, atol=1e-14)\n _error64 = ErrorSpec(rtol=1e-6, atol=1e-14)\n\n\nclass NdtrGradientTest(test.TestCase):\n _use_log = False\n _grid = GridSpec(min=-100., max=100., shape=[1, 2, 3, 8])\n _error32 = ErrorSpec(rtol=1e-4, atol=0)\n _error64 = ErrorSpec(rtol=1e-7, atol=0)\n\n def assert_all_true(self, v):\n self.assertAllEqual(np.ones_like(v, dtype=np.bool_), v)\n\n def assert_all_false(self, v):\n self.assertAllEqual(np.zeros_like(v, dtype=np.bool_), v)\n\n def _test_grad_finite(self, dtype):\n x = constant_op.constant([-100., 0., 100.], dtype=dtype)\n output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x))\n fn = sm.log_ndtr if self._use_log else sm.ndtr\n # Not having the lambda sanitizer means we'd get an `IndexError` whenever\n # the user supplied function has default args.\n output, grad_output = _value_and_gradient(\n lambda x_: fn(x_), x) # pylint: disable=unnecessary-lambda\n # isfinite checks for NaN and Inf.\n output_, grad_output_ = self.evaluate([output, grad_output])\n self.assert_all_true(np.isfinite(output_))\n self.assert_all_true(np.isfinite(grad_output_[0]))\n\n def _test_grad_accuracy(self, dtype, grid_spec, error_spec):\n raw_grid = _make_grid(dtype, grid_spec)\n grid = ops.convert_to_tensor(raw_grid)\n with self.cached_session():\n fn = sm.log_ndtr if self._use_log else sm.ndtr\n\n # If there are N points in the grid,\n # grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of\n # the ith output point w.r.t. the jth grid point. We only expect the\n # diagonal to be nonzero.\n # TODO(b/31131137): Replace tf.compat.v1.test.compute_gradient with our\n # own custom gradient evaluation to ensure we correctly handle small\n # function delta.\n grad_eval, _ = gradient_checker.compute_gradient(grid, grid_spec.shape,\n fn(grid),\n grid_spec.shape)\n grad_eval = np.diag(grad_eval)\n\n # Check for NaN separately in order to get informative failures.\n self.assert_all_false(np.isnan(grad_eval))\n self.assert_all_true(grad_eval > 0.)\n # isfinite checks for NaN and Inf.\n self.assert_all_true(np.isfinite(grad_eval))\n\n # Do the same checks but explicitly compute the gradient.\n # (We did this because we're not sure if we trust\n # tf.test.compute_gradient.)\n grad_eval = gradients_impl.gradients(fn(grid), grid)[0].eval()\n self.assert_all_false(np.isnan(grad_eval))\n if self._use_log:\n g = np.reshape(grad_eval, [-1])\n half = np.ceil(len(g) / 2)\n self.assert_all_true(g[:int(half)] > 0.)\n self.assert_all_true(g[int(half):] >= 0.)\n else:\n # The ndtr gradient will only be non-zero in the range [-14, 14] for\n # float32 and [-38, 38] for float64.\n self.assert_all_true(grad_eval >= 0.)\n # isfinite checks for NaN and Inf.\n self.assert_all_true(np.isfinite(grad_eval))\n\n # Versus scipy.\n if not (special and stats):\n return\n\n expected = stats.norm.pdf(raw_grid)\n if self._use_log:\n expected /= special.ndtr(raw_grid)\n expected[np.isnan(expected)] = 0.\n # Scipy prematurely goes to zero at some places that we don't. So don't\n # include these in the comparison.\n self.assertAllClose(\n expected.astype(np.float64)[expected < 0],\n grad_eval.astype(np.float64)[expected < 0],\n rtol=error_spec.rtol,\n atol=error_spec.atol)\n\n @test_util.run_deprecated_v1\n def test_float32(self):\n self._test_grad_accuracy(np.float32, self._grid, self._error32)\n self._test_grad_finite(np.float32)\n\n @test_util.run_deprecated_v1\n def test_float64(self):\n self._test_grad_accuracy(np.float64, self._grid, self._error64)\n self._test_grad_finite(np.float64)\n\n\nclass LogNdtrGradientTest(NdtrGradientTest):\n _use_log = True\n\n\nclass ErfInvTest(test.TestCase):\n\n def testErfInvValues(self):\n with self.cached_session():\n if not special:\n return\n\n x = np.linspace(0., 1.0, 50).astype(np.float64)\n\n expected_x = special.erfinv(x)\n x = special_math.erfinv(x)\n self.assertAllClose(expected_x, self.evaluate(x), atol=0.)\n\n def testErfInvIntegerInput(self):\n with self.cached_session():\n\n with self.assertRaises(TypeError):\n x = np.array([1, 2, 3]).astype(np.int32)\n special_math.erfinv(x)\n\n with self.assertRaises(TypeError):\n x = np.array([1, 2, 3]).astype(np.int64)\n special_math.erfinv(x)\n\n\nclass LogCDFLaplaceTest(test.TestCase):\n # Note that scipy.stats.laplace does not have a stable Log CDF, so we cannot\n # rely on scipy to cross check the extreme values.\n\n # Test will be done differently over different ranges. These are the values\n # such that when exceeded by x, produce output that causes the naive (scipy)\n # implementation to have numerical issues.\n #\n # If x = log(1 / (2 * eps)), then 0.5 * exp{-x} = eps.\n # With inserting eps = np.finfo(dtype).eps, we see that log(1 / (2 * eps)) is\n # the value of x such that any larger value will result in\n # 1 - 0.5 * exp{-x} = 0, which will cause the log_cdf_laplace code to take a\n # log # of zero. We therefore choose these as our cutoffs for testing.\n CUTOFF_FLOAT64_UPPER = np.log(1. / (2. * np.finfo(np.float64).eps)) - 1.\n CUTOFF_FLOAT32_UPPER = np.log(1. / (2. * np.finfo(np.float32).eps)) - 1.\n\n def assertAllTrue(self, x):\n self.assertAllEqual(np.ones_like(x, dtype=np.bool_), x)\n\n def _test_grid_log(self, dtype, scipy_dtype, grid_spec, error_spec):\n with self.cached_session():\n grid = _make_grid(dtype, grid_spec)\n actual = sm.log_cdf_laplace(grid).eval()\n\n # Basic tests.\n # isfinite checks for NaN and Inf.\n self.assertAllTrue(np.isfinite(actual))\n self.assertAllTrue((actual < 0))\n _check_strictly_increasing(actual)\n\n # Versus scipy.\n if not stats:\n return\n\n scipy_dist = stats.laplace(loc=0., scale=1.)\n expected = scipy_dist.logcdf(grid.astype(scipy_dtype))\n self.assertAllClose(\n expected.astype(np.float64),\n actual.astype(np.float64),\n rtol=error_spec.rtol,\n atol=error_spec.atol)\n\n @test_util.run_deprecated_v1\n def test_float32_lower_and_mid_segment_scipy_float32_ok(self):\n # Choose values mild enough that we can use scipy in float32, which will\n # allow for a high accuracy match to scipy (since we both use float32).\n self._test_grid_log(\n np.float32, # dtype\n np.float32, # scipy_dtype\n GridSpec(min=-10, max=self.CUTOFF_FLOAT32_UPPER - 5, shape=[100]),\n ErrorSpec(rtol=5e-4, atol=0))\n\n @test_util.run_deprecated_v1\n def test_float32_all_segments_with_scipy_float64_ok(self):\n # Choose values outside the range where scipy float32 works.\n # Let scipy use float64. This means we\n # won't be exactly the same since we are in float32.\n self._test_grid_log(\n np.float32, # dtype\n np.float64, # scipy_dtype\n GridSpec(min=-50, max=self.CUTOFF_FLOAT32_UPPER + 5, shape=[100]),\n ErrorSpec(rtol=0.05, atol=0))\n\n @test_util.run_deprecated_v1\n def test_float32_extreme_values_result_and_gradient_finite_and_nonzero(self):\n with self.cached_session() as sess:\n # On the lower branch, log_cdf_laplace(x) = x, so we know this will be\n # fine, but test to -200 anyways.\n grid = _make_grid(\n np.float32, GridSpec(min=-200, max=80, shape=[20, 100]))\n grid = ops.convert_to_tensor(grid)\n\n actual = sm.log_cdf_laplace(grid)\n grad = gradients_impl.gradients(actual, grid)[0]\n\n actual_, grad_ = self.evaluate([actual, grad])\n\n # isfinite checks for NaN and Inf.\n self.assertAllTrue(np.isfinite(actual_))\n self.assertAllTrue(np.isfinite(grad_))\n self.assertFalse(np.any(actual_ == 0))\n self.assertFalse(np.any(grad_ == 0))\n\n @test_util.run_deprecated_v1\n def test_float64_extreme_values_result_and_gradient_finite_and_nonzero(self):\n with self.cached_session() as sess:\n # On the lower branch, log_cdf_laplace(x) = x, so we know this will be\n # fine, but test to -200 anyways.\n grid = _make_grid(\n np.float64, GridSpec(min=-200, max=700, shape=[20, 100]))\n grid = ops.convert_to_tensor(grid)\n\n actual = sm.log_cdf_laplace(grid)\n grad = gradients_impl.gradients(actual, grid)[0]\n\n actual_, grad_ = self.evaluate([actual, grad])\n\n # isfinite checks for NaN and Inf.\n self.assertAllTrue(np.isfinite(actual_))\n self.assertAllTrue(np.isfinite(grad_))\n self.assertFalse(np.any(actual_ == 0))\n self.assertFalse(np.any(grad_ == 0))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for device placement.\"\"\"\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import remote\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\n\n\nclass SoftDevicePlacementTest(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(SoftDevicePlacementTest, self).setUp()\n context._reset_context()\n context.ensure_initialized()\n config.set_soft_device_placement(enabled=True)\n context.context().log_device_placement = True\n\n @test_util.run_gpu_only\n def testDefaultPlacement(self):\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n c = a + b\n with ops.device('CPU'):\n d = a + b\n self.assertIn('GPU', c.device)\n self.assertIn('CPU', d.device)\n\n @test_util.run_gpu_only\n def testUnsupportedDevice(self):\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n s = constant_op.constant(list('hello world'))\n with ops.device('GPU:0'):\n c = a + b\n t = s[a]\n self.assertIn('GPU:0', c.device)\n self.assertIn('CPU', t.device)\n\n @test_util.run_gpu_only\n def testUnknownDevice(self):\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n with ops.device('GPU:42'):\n c = a + b\n self.assertIn('GPU:0', c.device)\n\n def testNoGpu(self):\n if test_util.is_gpu_available():\n # CPU only test.\n return\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n c = a + b\n with ops.device('GPU'):\n d = a + b\n self.assertIn('CPU', c.device)\n self.assertIn('CPU', d.device)\n\n @test_util.run_gpu_only\n def testSoftPlacedGPU(self):\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n with ops.device('GPU:110'):\n c = a + b\n self.assertIn('GPU:0', c.device)\n\n @test_util.run_gpu_only\n def testNestedDeviceScope(self):\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n with ops.device('CPU:0'):\n with ops.device('GPU:42'):\n c = a + b\n # We don't support nested device placement right now.\n self.assertIn('GPU:0', c.device)\n\n @parameterized.named_parameters(('float', 1.0, None),\n ('int32', [1], dtypes.int32),\n ('string', ['a'], None))\n def testSoftPlacedCPUConstant(self, value, dtype):\n if test_util.is_gpu_available():\n self.skipTest('CPU only test')\n with ops.device('GPU:0'):\n a = constant_op.constant(value, dtype=dtype)\n self.assertIn('CPU:0', a.device)\n self.assertIn('CPU:0', a.backing_device)\n\n def testPlacedToDeviceInFunction(self):\n\n @def_function.function\n def f():\n a = random_ops.random_uniform([32, 32])\n return math_ops.matmul(a, a)\n\n gpus = config.list_physical_devices('GPU')\n if not gpus:\n self.assertIn('CPU:0', f().device)\n else:\n self.assertIn('GPU:0', f().device)\n\n @test_util.disable_tfrt('b/173726713: Support properly inserting device at '\n 'tf_to_corert lowering.')\n def testUnknownDeviceInFunction(self):\n\n @def_function.function\n def f():\n with ops.device('GPU:42'):\n # With placer, the unknown GPU:42 will be replaced with GPU:0.\n a = constant_op.constant(1) + constant_op.constant(2)\n return a + constant_op.constant(2)\n\n gpus = config.list_physical_devices('GPU')\n if not gpus:\n self.assertIn('CPU:0', f().device)\n else:\n self.assertIn('GPU:0', f().device)\n\n\nclass HardDevicePlacementTest(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(HardDevicePlacementTest, self).setUp()\n context._reset_context()\n config.set_soft_device_placement(enabled=False)\n context.context().log_device_placement = True\n cpus = context.context().list_physical_devices('CPU')\n # Set 2 virtual CPUs\n context.context().set_logical_device_configuration(cpus[0], [\n context.LogicalDeviceConfiguration(),\n context.LogicalDeviceConfiguration()\n ])\n self.assertEqual(config.get_soft_device_placement(), False)\n self.assertEqual(context.context().soft_device_placement, False)\n\n @test_util.run_gpu_only\n def testIdentityCanCopy(self):\n config.set_device_policy('explicit')\n with ops.device('CPU:0'):\n x = constant_op.constant(1.0)\n self.assertIn('CPU:0', x.device)\n self.assertIn('CPU:0', x.backing_device)\n with ops.device('GPU:0'):\n y = array_ops.identity(x)\n self.assertIn('GPU:0', y.device)\n self.assertIn('GPU:0', y.backing_device)\n\n @test_util.run_gpu_only\n def testSimpleConstantsExplicitGPU(self):\n config.set_device_policy('explicit')\n with ops.device('GPU:0'):\n self.assertAllClose(1., array_ops.ones([]))\n self.assertAllClose(0., array_ops.zeros([]))\n self.assertAllClose([1.], array_ops.fill([1], 1.))\n\n def testSimpleConstantsExplicitCPU(self):\n config.set_device_policy('explicit')\n with ops.device('CPU:1'):\n self.assertAllClose(1., array_ops.ones([]))\n self.assertAllClose(0., array_ops.zeros([]))\n self.assertAllClose([1.], array_ops.fill([1], 1.))\n self.assertAllClose(2., constant_op.constant(1.) * 2.)\n\n @parameterized.named_parameters(\n ('float_cpu0', 'CPU:0', 1.0, None),\n ('int32_cpu0', 'CPU:0', [1], dtypes.int32),\n ('string_cpu0', 'CPU:0', ['a'], None),\n ('float_cpu1', 'CPU:1', 1.0, None),\n ('int32_cpu1', 'CPU:1', [1], dtypes.int32),\n ('string_cpu1', 'CPU:1', ['a'], None),\n )\n def testHardPlacedCPUConstant(self, device, value, dtype):\n with ops.device(device):\n a = constant_op.constant(value, dtype=dtype)\n self.assertIn(device, a.device)\n\n @parameterized.named_parameters(\n ('float', 'GPU:0', 1.0, None),\n ('int32', 'GPU:0', [1], dtypes.int32),\n ('string', 'GPU:0', ['a'], None),\n )\n def testHardPlacedGPUConstant(self, device, value, dtype):\n if not test_util.is_gpu_available():\n self.skipTest('Test requires a GPU')\n with ops.device(device):\n a = constant_op.constant(value, dtype=dtype)\n self.assertIn(device, a.device)\n if a.dtype == dtypes.float32:\n self.assertIn(device, a.backing_device)\n\n\nclass ClusterPlacementTest(test.TestCase):\n\n def setUp(self):\n super(ClusterPlacementTest, self).setUp()\n context._reset_context()\n config.set_soft_device_placement(enabled=True)\n context.context().log_device_placement = True\n workers, _ = test_util.create_local_cluster(2, 0)\n remote.connect_to_remote_host([workers[0].target, workers[1].target])\n\n @test_util.disable_tfrt('remote host not supported yet.')\n def testNotFullySpecifiedTask(self):\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n with ops.device('/job:worker'):\n c = a + b\n self.assertIn('/job:worker/replica:0/task:0', c.device)\n\n @test_util.disable_tfrt('remote host not supported yet.')\n def testRemoteUnknownDevice(self):\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n # Right now we don't support soft device place on remote worker.\n with self.assertRaises(errors.InvalidArgumentError) as cm:\n with ops.device('/job:worker/replica:0/task:0/device:GPU:42'):\n c = a + b\n del c\n self.assertIn('unknown device', cm.exception.message)\n\n @test_util.disable_tfrt('remote host not supported yet.')\n def testUnknownDeviceInFunctionReturnUnknowDevice(self):\n\n @def_function.function\n def f():\n with ops.device('GPU:42'):\n return constant_op.constant(1) + constant_op.constant(2)\n\n gpus = config.list_physical_devices('GPU')\n if not gpus:\n self.assertIn('CPU:0', f().device)\n else:\n self.assertIn('GPU:0', f().device)\n\n @test_util.disable_tfrt('remote host not supported yet.')\n def testUnknownDeviceInFunction(self):\n\n @def_function.function\n def f():\n with ops.device('GPU:42'):\n a = constant_op.constant(1) + constant_op.constant(2)\n return a + constant_op.constant(2)\n\n gpus = config.list_physical_devices('GPU')\n if not gpus:\n self.assertIn('CPU:0', f().device)\n else:\n self.assertIn('GPU:0', f().device)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Container for origin source code information before AutoGraph compilation.\"\"\"\nimport collections\nimport difflib\nimport os\nimport tokenize\n\nimport gast\nimport six\n\nfrom tensorflow.python.autograph.pyct import anno\nfrom tensorflow.python.autograph.pyct import ast_util\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.autograph.pyct import pretty_printer\nfrom tensorflow.python.util import tf_inspect\n\n\nclass LineLocation(\n collections.namedtuple('LineLocation', ('filename', 'lineno'))):\n \"\"\"Similar to Location, but without column information.\n\n Attributes:\n filename: Text\n lineno: int, 1-based\n \"\"\"\n pass\n\n\nclass Location(\n collections.namedtuple('Location', ('filename', 'lineno', 'col_offset'))):\n \"\"\"Encodes code location information.\n\n Attributes:\n filename: Text\n lineno: int, 1-based\n col_offset: int\n line_loc: LineLocation\n \"\"\"\n\n @property\n def line_loc(self):\n return LineLocation(self.filename, self.lineno)\n\n\nclass OriginInfo(\n collections.namedtuple(\n 'OriginInfo',\n ('loc', 'function_name', 'source_code_line', 'comment'))):\n \"\"\"Container for information about the source code before conversion.\n\n Attributes:\n loc: Location\n function_name: Optional[Text]\n source_code_line: Text\n comment: Optional[Text]\n \"\"\"\n\n def as_frame(self):\n \"\"\"Returns a 4-tuple consistent with the return of traceback.extract_tb.\"\"\"\n return (self.loc.filename, self.loc.lineno, self.function_name,\n self.source_code_line)\n\n def __repr__(self):\n if self.loc.filename:\n return '{}:{}:{}'.format(\n os.path.split(self.loc.filename)[1], self.loc.lineno,\n self.loc.col_offset)\n return '<no file>:{}:{}'.format(self.loc.lineno, self.loc.col_offset)\n\n\n# TODO(mdan): This source map should be a class - easier to refer to.\ndef create_source_map(nodes, code, filepath):\n \"\"\"Creates a source map between an annotated AST and the code it compiles to.\n\n Note: this function assumes nodes nodes, code and filepath correspond to the\n same code.\n\n Args:\n nodes: Iterable[ast.AST, ...], one or more AST modes.\n code: Text, the source code in which nodes are found.\n filepath: Text\n\n Returns:\n Dict[LineLocation, OriginInfo], mapping locations in code to locations\n indicated by origin annotations in node.\n \"\"\"\n reparsed_nodes = parser.parse(code, preamble_len=0, single_node=False)\n for node in reparsed_nodes:\n resolve(node, code, filepath, node.lineno, node.col_offset)\n\n source_map = {}\n\n try:\n for before, after in ast_util.parallel_walk(nodes, reparsed_nodes):\n # Note: generated code might not be mapped back to its origin.\n # TODO(mdan): Generated code should always be mapped to something.\n origin_info = anno.getanno(before, anno.Basic.ORIGIN, default=None)\n final_info = anno.getanno(after, anno.Basic.ORIGIN, default=None)\n if origin_info is None or final_info is None:\n continue\n\n # Note: the keys are by line only, excluding the column offset.\n line_loc = LineLocation(final_info.loc.filename, final_info.loc.lineno)\n\n existing_origin = source_map.get(line_loc)\n if existing_origin is not None:\n # Overlaps may exist because of child nodes, but almost never to\n # different line locations. Exception make decorated functions, where\n # both lines are mapped to the same line in the AST.\n\n # Line overlaps: keep bottom node.\n if existing_origin.loc.line_loc == origin_info.loc.line_loc:\n if existing_origin.loc.lineno >= origin_info.loc.lineno:\n continue\n\n # In case of column overlaps, keep the leftmost node.\n if existing_origin.loc.col_offset <= origin_info.loc.col_offset:\n continue\n\n source_map[line_loc] = origin_info\n\n except ValueError as err:\n new_msg = 'Inconsistent ASTs detected. This is a bug. Cause: \\n'\n new_msg += str(err)\n new_msg += 'Diff:\\n'\n\n for n, rn in zip(nodes, reparsed_nodes):\n nodes_str = pretty_printer.fmt(n, color=False, noanno=True)\n reparsed_nodes_str = pretty_printer.fmt(rn, color=False, noanno=True)\n diff = difflib.context_diff(\n nodes_str.split('\\n'),\n reparsed_nodes_str.split('\\n'),\n fromfile='Original nodes',\n tofile='Reparsed nodes',\n n=7)\n diff = '\\n'.join(diff)\n new_msg += diff + '\\n'\n raise ValueError(new_msg)\n\n return source_map\n\n\nclass _Function(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass OriginResolver(gast.NodeVisitor):\n \"\"\"Annotates an AST with additional source information like file name.\"\"\"\n\n def __init__(self, root_node, source_lines, comments_map,\n context_lineno, context_col_offset,\n filepath):\n self._source_lines = source_lines\n self._comments_map = comments_map\n\n if (hasattr(root_node, 'decorator_list') and root_node.decorator_list and\n hasattr(root_node.decorator_list[0], 'lineno')):\n # Typical case: functions. The line number of the first decorator\n # is more accurate than the line number of the function itself in\n # 3.8+. In earier versions they coincide.\n self._lineno_offset = context_lineno - root_node.decorator_list[0].lineno\n else:\n # Fall back to the line number of the root node.\n self._lineno_offset = context_lineno - root_node.lineno\n\n self._col_offset = context_col_offset - root_node.col_offset\n\n self._filepath = filepath\n\n self._function_stack = []\n\n def _absolute_lineno(self, lineno):\n return lineno + self._lineno_offset\n\n def _absolute_col_offset(self, col_offset):\n if col_offset is None:\n return 0\n return col_offset + self._col_offset\n\n def _attach_origin_info(self, node):\n lineno = getattr(node, 'lineno', None)\n col_offset = getattr(node, 'col_offset', None)\n\n if lineno is None:\n return\n\n if self._function_stack:\n function_name = self._function_stack[-1].name\n else:\n function_name = None\n\n source_code_line = self._source_lines[lineno - 1]\n comment = self._comments_map.get(lineno)\n\n loc = Location(self._filepath, self._absolute_lineno(lineno),\n self._absolute_col_offset(col_offset))\n origin = OriginInfo(loc, function_name, source_code_line, comment)\n anno.setanno(node, 'lineno', lineno)\n anno.setanno(node, anno.Basic.ORIGIN, origin)\n\n def visit(self, node):\n entered_function = False\n if isinstance(node, gast.FunctionDef):\n entered_function = True\n self._function_stack.append(_Function(node.name))\n\n self._attach_origin_info(node)\n self.generic_visit(node)\n\n if entered_function:\n self._function_stack.pop()\n\n\ndef resolve(node, source, context_filepath, context_lineno, context_col_offset):\n \"\"\"Adds origin information to an AST, based on the source it was loaded from.\n\n This allows us to map the original source code line numbers to generated\n source code.\n\n Note: the AST may be a part of a larger context (e.g. a function is part of\n a module that may contain other things). However, this function does not\n assume the source argument contains the entire context, nor that it contains\n only code corresponding to node itself. However, it assumes that node was\n parsed from the given source code.\n For this reason, two extra arguments are required, and they indicate the\n location of the node in the original context.\n\n Args:\n node: gast.AST, the AST to annotate.\n source: Text, the source code representing node.\n context_filepath: Text\n context_lineno: int\n context_col_offset: int\n \"\"\"\n # TODO(mdan): Pull this to a separate utility.\n code_reader = six.StringIO(source)\n comments_map = {}\n try:\n for token in tokenize.generate_tokens(code_reader.readline):\n tok_type, tok_string, loc, _, _ = token\n srow, _ = loc\n if tok_type == tokenize.COMMENT:\n comments_map[srow] = tok_string.strip()[1:].strip()\n except tokenize.TokenError:\n if isinstance(node, gast.Lambda):\n # Source code resolution in older Python versions is brittle for\n # lambda functions, and may contain garbage.\n pass\n else:\n raise\n\n source_lines = source.split('\\n')\n visitor = OriginResolver(node, source_lines, comments_map,\n context_lineno, context_col_offset,\n context_filepath)\n visitor.visit(node)\n\n\ndef resolve_entity(node, source, entity):\n \"\"\"Like resolve, but extracts the context information from an entity.\"\"\"\n lines, lineno = tf_inspect.getsourcelines(entity)\n filepath = tf_inspect.getsourcefile(entity)\n\n # Poor man's attempt at guessing the column offset: count the leading\n # whitespace. This might not work well with tabs.\n definition_line = lines[0]\n col_offset = len(definition_line) - len(definition_line.lstrip())\n\n resolve(node, source, filepath, lineno, col_offset)\n\n\ndef copy_origin(from_node, to_node):\n \"\"\"Copies the origin info from a node to another, recursively.\"\"\"\n origin = anno.Basic.ORIGIN.of(from_node, default=None)\n if origin is None:\n return\n if not isinstance(to_node, (list, tuple)):\n to_node = (to_node,)\n for node in to_node:\n for n in gast.walk(node):\n anno.setanno(n, anno.Basic.ORIGIN, origin)\n"
] | [
[
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_nested_row_splits",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.sparse_tensor.SparseTensor.from_value",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.framework.sparse_tensor.SparseTensorSpec",
"tensorflow.python.ops.sparse_ops.sparse_tensor_dense_matmul",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"tensorflow.python.framework.sparse_tensor.is_sparse",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"numpy.ones",
"tensorflow.python.ops.sparse_ops.sparse_transpose"
],
[
"tensorflow.python.eager.def_function.function",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.executing_eagerly"
],
[
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.util.tf_export.tf_export"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.embedding_ops.embedding_lookup_v2",
"tensorflow.python.tpu.tpu.outside_compilation",
"tensorflow.python.training.tracking.base.ShardInfo",
"tensorflow.python.tpu.ops.tpu_ops.enqueue_tpu_embedding_arbitrary_tensor_batch",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.embedding_ops.embedding_lookup_ragged",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.embedding_ops.embedding_lookup_sparse_v2",
"tensorflow.python.distribute.distribute_utils.select_replica",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.tpu.tpu.initialize_system_for_tpu_embedding",
"tensorflow.python.util.nest.flatten_with_joined_string_paths",
"tensorflow.core.protobuf.tpu.tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.embedding_ops.safe_embedding_lookup_sparse_v2",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.tpu.ops.tpu_ops.is_tpu_embedding_initialized",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.saved_model.save_context.in_save_context",
"tensorflow.python.ops.sparse_ops.sparse_slice",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.tpu.tpu_embedding_v2_utils.log_tpu_embedding_configuration",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.util.tf_inspect.getargspec",
"tensorflow.python.ops.array_ops.pad",
"tensorflow.python.util.nest.is_nested_or_composite",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.framework.op_def_library.apply_op",
"tensorflow.python.framework.function.Defun",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto",
"tensorflow.python.util.compat.as_str",
"tensorflow.python.platform.googletest.main"
],
[
"tensorflow.python.compat.v2_compat.enable_v2_behavior",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.distribute.multi_process_lib.Process",
"tensorflow.python.distribute.multi_process_lib.initialized",
"tensorflow.python.distribute.multi_process_lib.test_main",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.multi_worker_util.worker_count",
"tensorflow.python.distribute.multi_worker_util.id_in_cluster",
"tensorflow.python.tf2.enabled",
"tensorflow.python.framework.test_util.is_tsan_enabled",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.context.executing_eagerly"
],
[
"tensorflow.python.autograph.pyct.templates.replace",
"tensorflow.python.autograph.pyct.templates.replace_as_expression"
],
[
"tensorflow.python.platform.resource_loader.get_path_to_datafile",
"tensorflow.compat.v1.app.run"
],
[
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests",
"tensorflow.lite.testing.zip_test_utils.create_tensor_data",
"tensorflow.compat.v1.compat.v1.placeholder",
"tensorflow.compat.v1.nn.embedding_lookup",
"tensorflow.lite.testing.zip_test_utils.register_make_test_function"
],
[
"tensorflow.python.debug.lib.grpc_debug_test_server.start_server_on_separate_thread",
"tensorflow.python.framework.test_util.TensorFlowTestCase.tearDownClass",
"tensorflow.python.debug.lib.source_remote.send_eager_tracebacks",
"tensorflow.python.debug.lib.source_remote.send_graph_tracebacks",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.platform.test.mock.patch.object",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.debug.lib.source_utils.guess_is_tensorflow_py_library",
"tensorflow.python.framework.test_util.TensorFlowTestCase.setUpClass",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.python.util.tf_inspect.stack"
],
[
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.gen_image_ops.crop_and_resize_grad_boxes",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.math_ops.div_no_nan",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests",
"tensorflow.compat.v1.compat.v1.nn.rnn_cell.GRUCell",
"tensorflow.compat.v1.compat.v1.global_variables_initializer",
"tensorflow.lite.testing.zip_test_utils.create_tensor_data",
"tensorflow.compat.v1.compat.v1.nn.static_bidirectional_rnn",
"tensorflow.compat.v1.compat.v1.placeholder",
"tensorflow.lite.testing.zip_test_utils.register_make_test_function"
],
[
"tensorflow.python.platform.test.main",
"tensorflow.python.util.lock_util.GroupLock"
],
[
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.string_ops.string_upper"
],
[
"tensorflow.python.ops.math_ops.log",
"numpy.sqrt",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.where_v2",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.math_ops.erfc",
"numpy.exp",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.math_ops.less",
"numpy.arange",
"tensorflow.python.ops.math_ops.log1p",
"numpy.log",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"tensorflow.python.ops.math_ops.erf",
"numpy.expm1",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.lite.python.schema_py_generated.Model.GetRootAsModel",
"tensorflow.lite.tools.visualize.BuiltinCodeToName",
"tensorflow.lite.python.schema_util.get_builtin_code_from_operator_code"
],
[
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.ops.nn.relu",
"numpy.ones",
"tensorflow.python.platform.test.main",
"numpy.random.randn",
"tensorflow.python.ops.nn.bias_add",
"tensorflow.python.ops.nn.conv2d",
"tensorflow.python.compiler.tensorrt.test.tf_trt_integration_test_base.IsQuantizationMode",
"tensorflow.python.ops.array_ops.identity"
],
[
"tensorflow.python.keras.backend.zeros",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.keras.backend.batch_get_value",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.python.keras.backend.variable",
"tensorflow.python.keras.backend.dtype",
"tensorflow.python.distribute.distribution_strategy_context.has_strategy",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.training.training_util.get_global_step",
"tensorflow.python.ops.clip_ops.clip_by_value",
"tensorflow.python.keras.backend.gradients",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.ops.clip_ops.clip_by_norm",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.keras.backend.cast_to_floatx",
"tensorflow.python.keras.backend.batch_set_value",
"tensorflow.python.keras.backend.sqrt",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.keras.backend.epsilon"
],
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.data.ops.options.Options",
"tensorflow.python.data.experimental.ops.testing.assert_next",
"tensorflow.python.platform.test.main"
],
[
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.ops.uid",
"tensorflow.python.util.keras_deps.get_call_context_function",
"tensorflow.python.ops.gradients_util.PossibleTapeGradientTypes",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.ops.control_flow_util.GraphOrParentsInXlaContext",
"tensorflow.python.framework.function_def_to_graph.function_def_to_graph",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.eager.function._EagerDefinedFunction"
],
[
"numpy.hstack",
"tensorflow.python.ops.data_flow_ops.PriorityQueue",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.platform.test.main",
"numpy.random.rand",
"numpy.array",
"tensorflow.python.framework.constant_op.constant",
"numpy.random.randint"
],
[
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.add",
"tensorflow.compat.v1.saved_model.signature_def_utils.build_signature_def",
"tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model.common_v1.do_test",
"tensorflow.compat.v1.placeholder",
"tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model.common_v1.set_tf_options",
"tensorflow.compat.v1.compat.v1.saved_model.utils.build_tensor_info",
"tensorflow.compat.v1.constant"
],
[
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.distributions.special_math.ndtri",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.python.ops.random_ops.random_shuffle",
"numpy.array_equal",
"numpy.median",
"tensorflow.python.ops.random_ops.parameterized_truncated_normal",
"tensorflow.python.framework.dtypes.as_dtype",
"numpy.mean",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.random_ops.random_normal",
"numpy.var",
"tensorflow.python.ops.random_ops.random_uniform"
],
[
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.string_ops.regex_replace",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.data.ops.readers.TextLineDataset",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.data.util.structure.type_spec_from_value",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.data.ops.optional_ops.Optional.from_value",
"tensorflow.python.data.ops.dataset_ops._RestructuredDataset",
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator",
"tensorflow.python.lib.io.tf_record.TFRecordWriter",
"tensorflow.python.data.kernel_tests.test_base.graph_only_combinations",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.framework.ops.device",
"tensorflow.python.data.ops.dataset_ops.get_legacy_output_shapes",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.data.ops.dataset_ops.get_legacy_output_types",
"tensorflow.python.data.util.structure.get_flat_tensor_types",
"tensorflow.python.eager.context.execution_mode",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.kernel_tests.test_base.eager_only_combinations",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.sparse_tensor.SparseTensorSpec",
"tensorflow.python.data.ops.readers.TFRecordDataset",
"tensorflow.python.ops.lookup_ops.tables_initializer",
"tensorflow.python.data.util.structure.get_flat_tensor_shapes",
"tensorflow.python.data.ops.dataset_ops.make_initializable_iterator",
"tensorflow.python.data.ops.dataset_ops.toggle_debug_mode",
"numpy.int64",
"tensorflow.python.data.experimental.ops.testing.assert_next",
"tensorflow.python.ops.lookup_ops.StaticHashTable",
"tensorflow.python.data.util.nest.flatten",
"numpy.array",
"tensorflow.python.data.ops.dataset_ops._VariantDataset",
"tensorflow.python.framework.combinations.combine",
"tensorflow.python.data.ops.dataset_ops.range",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.data.ops.dataset_ops.get_structure",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_generator",
"tensorflow.python.data.ops.dataset_ops.Dataset.zip",
"tensorflow.python.platform.test.mock.patch.object",
"tensorflow.python.data.ops.readers.FixedLengthRecordDataset",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests",
"tensorflow.lite.testing.zip_test_utils.create_tensor_data",
"tensorflow.compat.v1.compat.v1.placeholder",
"tensorflow.compat.v1.nn.conv2d",
"tensorflow.compat.v1.maximum",
"tensorflow.lite.testing.zip_test_utils.register_make_test_function"
],
[
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"numpy.random.random",
"tensorflow.python.keras.layers.Embedding",
"tensorflow.python.keras.regularizers.l1",
"tensorflow.python.keras.layers.SimpleRNN",
"tensorflow.python.keras.combinations.keras_mode_combinations",
"tensorflow.python.keras.layers.Masking",
"tensorflow.python.keras.models.Sequential",
"tensorflow.python.keras.testing_utils.layer_test",
"numpy.ones",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.layers.SimpleRNNCell",
"numpy.testing.assert_allclose",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.constraints.max_norm"
],
[
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.array_ops.ones",
"numpy.zeros",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.training.server_lib.Server.create_local_server",
"tensorflow.python.client.session.Session",
"tensorflow.python.framework.ops.Graph",
"numpy.ones",
"tensorflow.core.protobuf.cluster_pb2.ClusterDef",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.amax",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"tensorflow.python.framework.config.is_op_determinism_enabled",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.squeeze",
"numpy.zeros_like",
"numpy.random.randn",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.test_util.disable_xla",
"numpy.reshape",
"numpy.arange",
"tensorflow.python.ops.gradient_checker_v2.compute_gradient",
"numpy.zeros",
"tensorflow.python.framework.test_util.run_gpu_only",
"numpy.log",
"tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits_v2",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"numpy.sum",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.util.deprecation.deprecated"
],
[
"tensorflow.python.framework.tensor_util.is_tf_type"
],
[
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.framework.sparse_tensor.SparseTensorSpec",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.framework.tensor_shape.enable_v2_tensorshape"
],
[
"tensorflow.python.keras.optimizer_v2.ftrl.Ftrl",
"tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop",
"tensorflow.python.keras.optimizer_v2.gradient_descent.SGD",
"tensorflow.python.keras.optimizer_v2.adagrad.Adagrad",
"tensorflow.python.keras.optimizer_v2.nadam.Nadam",
"tensorflow.python.training.adagrad.AdagradOptimizer",
"tensorflow.python.keras.optimizer_v2.adamax.Adamax",
"tensorflow.python.keras.optimizer_v2.adadelta.Adadelta",
"tensorflow.python.training.adam.AdamOptimizer",
"tensorflow.python.training.rmsprop.RMSPropOptimizer",
"tensorflow.python.training.ftrl.FtrlOptimizer",
"tensorflow.python.keras.optimizer_v2.adam.Adam",
"tensorflow.python.framework.test_combinations.combine",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer"
],
[
"tensorflow.python.eager.profiler_client.start_tracing",
"tensorflow.python.eager.profiler_client.monitor",
"tensorflow.python.eager.test.main"
],
[
"tensorflow.python.ops.gen_array_ops.reshape",
"tensorflow.python.platform.test.main",
"numpy.random.randn"
],
[
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.ragged.ragged_concat_ops.stack",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensorSpec",
"tensorflow.python.ops.image_ops.resize_images_v2",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.sqrt",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.training.adadelta.AdadeltaOptimizer",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.array",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.debug.lib.debug_events_reader.GraphExecutionTrace",
"tensorflow.core.protobuf.debug_event_pb2.SourceFile",
"tensorflow.python.debug.lib.debug_events_reader.Execution",
"tensorflow.core.protobuf.debug_event_pb2.DebuggedGraph",
"tensorflow.python.debug.lib.debug_events_reader.DebuggedDevice",
"tensorflow.core.protobuf.debug_event_pb2.GraphOpCreation",
"tensorflow.python.debug.lib.debug_events_reader.DebugEventsReader",
"tensorflow.core.protobuf.debug_event_pb2.StackFrameWithId",
"tensorflow.core.protobuf.debug_event_pb2.Execution",
"tensorflow.core.protobuf.debug_event_pb2.GraphExecutionTrace",
"tensorflow.python.debug.lib.debug_events_reader.GraphExecutionTraceDigest",
"tensorflow.python.debug.lib.debug_events_reader.ExecutionDigest",
"tensorflow.python.debug.lib.debug_events_reader.DebugDataReader",
"tensorflow.python.debug.lib.debug_events_reader.DebuggedGraph",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.debug.lib.debug_events_writer.DebugEventsWriter",
"tensorflow.python.debug.lib.debug_events_reader.GraphOpCreationDigest"
],
[
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.util.tf_export.tf_export"
],
[
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests",
"tensorflow.lite.testing.zip_test_utils.create_tensor_data",
"tensorflow.compat.v1.sparse_to_dense",
"tensorflow.compat.v1.compat.v1.placeholder",
"tensorflow.lite.testing.zip_test_utils.create_scalar_data",
"tensorflow.lite.testing.zip_test_utils.register_make_test_function",
"numpy.random.randint"
],
[
"numpy.diag",
"numpy.linspace",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.zeros_like",
"numpy.any",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.exp",
"tensorflow.python.ops.distributions.special_math.ndtri",
"numpy.ones_like",
"numpy.reshape",
"tensorflow.python.ops.distributions.special_math.erfinv",
"numpy.finfo",
"numpy.diff",
"tensorflow.python.platform.test.main",
"tensorflow.python.eager.backprop.val_and_grad_function",
"numpy.isnan",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.random.RandomState",
"numpy.array",
"numpy.isfinite",
"numpy.sort",
"numpy.testing.assert_array_less",
"numpy.prod",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.framework.config.list_physical_devices",
"tensorflow.python.framework.config.set_soft_device_placement",
"tensorflow.python.eager.context._reset_context",
"tensorflow.python.framework.test_util.create_local_cluster",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.context",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.ensure_initialized",
"tensorflow.python.framework.test_util.is_gpu_available",
"tensorflow.python.ops.array_ops.fill",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.eager.test.main",
"tensorflow.python.framework.config.get_soft_device_placement",
"tensorflow.python.framework.config.set_device_policy",
"tensorflow.python.eager.context.LogicalDeviceConfiguration",
"tensorflow.python.framework.test_util.disable_tfrt",
"tensorflow.python.eager.remote.connect_to_remote_host",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.autograph.pyct.parser.parse",
"tensorflow.python.util.tf_inspect.getsourcefile",
"tensorflow.python.autograph.pyct.anno.setanno",
"tensorflow.python.util.tf_inspect.getsourcelines",
"tensorflow.python.autograph.pyct.ast_util.parallel_walk",
"tensorflow.python.autograph.pyct.pretty_printer.fmt",
"tensorflow.python.autograph.pyct.anno.getanno",
"tensorflow.python.autograph.pyct.anno.Basic.ORIGIN.of"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.9",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.9",
"2.6",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.12",
"2.6",
"2.7",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.9",
"2.5",
"2.6",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.10",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.9",
"2.8",
"2.7",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"1.4",
"2.2",
"1.13",
"2.3",
"2.4",
"2.6",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.9",
"2.8",
"2.7",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.9",
"2.5",
"2.6",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChenjunZou/katib | [
"6a07daae796c29d24f63375cce71b75c4eee8d9c",
"6a07daae796c29d24f63375cce71b75c4eee8d9c"
] | [
"examples/v1alpha3/nas/darts-cnn-cifar10/model.py",
"pkg/suggestion/v1alpha3/bayesianoptimization/global_optimizer.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom operations import FactorizedReduce, StdConv, MixedOp\n\n\nclass Cell(nn.Module):\n \"\"\" Cell for search\n Each edge is mixed and continuous relaxed.\n \"\"\"\n\n def __init__(self, num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space):\n \"\"\"\n Args:\n num_nodes: Number of intermediate cell nodes\n c_prev_prev: channels_out[k-2]\n c_prev : Channels_out[k-1]\n c_cur : Channels_in[k] (current)\n reduction_prev: flag for whether the previous cell is reduction cell or not\n reduction_cur: flag for whether the current cell is reduction cell or not\n \"\"\"\n\n super(Cell, self).__init__()\n self.reduction_cur = reduction_cur\n self.num_nodes = num_nodes\n\n # If previous cell is reduction cell, current input size does not match with\n # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing\n if reduction_prev:\n self.preprocess0 = FactorizedReduce(c_prev_prev, c_cur)\n else:\n self.preprocess0 = StdConv(c_prev_prev, c_cur, kernel_size=1, stride=1, padding=0)\n self.preprocess1 = StdConv(c_prev, c_cur, kernel_size=1, stride=1, padding=0)\n\n # Generate dag from mixed operations\n self.dag_ops = nn.ModuleList()\n\n for i in range(self.num_nodes):\n self.dag_ops.append(nn.ModuleList())\n # Include 2 input nodes\n for j in range(2+i):\n # Reduction with stride = 2 must be only for the input node\n stride = 2 if reduction_cur and j < 2 else 1\n op = MixedOp(c_cur, stride, search_space)\n self.dag_ops[i].append(op)\n\n def forward(self, s0, s1, w_dag):\n s0 = self.preprocess0(s0)\n s1 = self.preprocess1(s1)\n\n states = [s0, s1]\n for edges, w_list in zip(self.dag_ops, w_dag):\n state_cur = sum(edges[i](s, w) for i, (s, w) in enumerate((zip(states, w_list))))\n states.append(state_cur)\n\n state_out = torch.cat(states[2:], dim=1)\n return state_out\n\n\nclass NetworkCNN(nn.Module):\n\n def __init__(self, init_channels, input_channels, num_classes,\n num_layers, criterion, search_space, num_nodes, stem_multiplier):\n super(NetworkCNN, self).__init__()\n\n self.init_channels = init_channels\n self.num_classes = num_classes\n self.num_layers = num_layers\n self.criterion = criterion\n\n # TODO: Algorithm settings?\n self.num_nodes = num_nodes\n self.stem_multiplier = stem_multiplier\n\n c_cur = self.stem_multiplier*self.init_channels\n\n self.stem = nn.Sequential(\n nn.Conv2d(input_channels, c_cur, 3, padding=1, bias=False),\n nn.BatchNorm2d(c_cur)\n )\n\n # In first Cell stem is used for s0 and s1\n # c_prev_prev and c_prev - output channels size\n # c_cur - init channels size\n c_prev_prev, c_prev, c_cur = c_cur, c_cur, self.init_channels\n\n self.cells = nn.ModuleList()\n\n reduction_prev = False\n for i in range(self.num_layers):\n # For Network with 1 layer: Only Normal Cell\n if self.num_layers == 1:\n reduction_cur = False\n else:\n # For Network with two layers: First layer - Normal, Second - Reduction\n # For Other Networks: [1/3, 2/3] Layers - Reduction cell with double channels\n # Others - Normal cell\n if ((self.num_layers == 2 and i == 1) or\n (self.num_layers > 2 and i in [self.num_layers//3, 2*self.num_layers//3])):\n c_cur *= 2\n reduction_cur = True\n else:\n reduction_cur = False\n\n cell = Cell(self.num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space)\n reduction_prev = reduction_cur\n self.cells.append(cell)\n\n c_cur_out = c_cur * self.num_nodes\n c_prev_prev, c_prev = c_prev, c_cur_out\n\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(c_prev, self.num_classes)\n\n # Initialize alphas parameters\n num_ops = len(search_space.primitives)\n\n self.alpha_normal = nn.ParameterList()\n self.alpha_reduce = nn.ParameterList()\n\n for i in range(self.num_nodes):\n self.alpha_normal.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))\n if self.num_layers > 1:\n self.alpha_reduce.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))\n\n # Setup alphas list\n self.alphas = []\n for name, parameter in self.named_parameters():\n if \"alpha\" in name:\n self.alphas.append((name, parameter))\n\n def forward(self, x):\n\n weights_normal = [F.softmax(alpha, dim=-1) for alpha in self.alpha_normal]\n weights_reduce = [F.softmax(alpha, dim=-1) for alpha in self.alpha_reduce]\n\n s0 = s1 = self.stem(x)\n\n for cell in self.cells:\n weights = weights_reduce if cell.reduction_cur else weights_normal\n s0, s1 = s1, cell(s0, s1, weights)\n\n out = self.global_pooling(s1)\n\n # Make out flatten\n out = out.view(out.size(0), -1)\n\n logits = self.classifier(out)\n return logits\n\n def print_alphas(self):\n\n print(\"\\n>>> Alphas Normal <<<\")\n for alpha in self.alpha_normal:\n print(F.softmax(alpha, dim=-1))\n\n if self.num_layers > 1:\n print(\"\\n>>> Alpha Reduce <<<\")\n for alpha in self.alpha_reduce:\n print(F.softmax(alpha, dim=-1))\n print(\"\\n\")\n\n def getWeights(self):\n return self.parameters()\n\n def getAlphas(self):\n for _, parameter in self.alphas:\n yield parameter\n\n def loss(self, x, y):\n logits = self.forward(x)\n return self.criterion(logits, y)\n\n def genotype(self, search_space):\n gene_normal = search_space.parse(self.alpha_normal, k=2)\n gene_reduce = search_space.parse(self.alpha_reduce, k=2)\n # concat all intermediate nodes\n concat = range(2, 2 + self.num_nodes)\n\n return search_space.genotype(normal=gene_normal, normal_concat=concat,\n reduce=gene_reduce, reduce_concat=concat)\n",
"\"\"\" module for the global optimizer\nDIRECT algorithm is used in this case\n\"\"\"\nimport copy\n\nimport numpy as np\n\nfrom pkg.suggestion.v1alpha3.bayesianoptimization.acquisition_func import AcquisitionFunc\nfrom pkg.suggestion.v1alpha3.bayesianoptimization.model.gp import GaussianProcessModel\nfrom pkg.suggestion.v1alpha3.bayesianoptimization.model.rf import RandomForestModel\nfrom pkg.suggestion.v1alpha3.bayesianoptimization.utils import get_logger\n\n\nclass RectPack:\n \"\"\" class for the rectangular\n including border, center and acquisition function value\n \"\"\"\n\n def __init__(self, l, u, division_num, dim, scaler, aq_func):\n self.l = l\n self.u = u\n self.center = (l + u) / 2\n j = np.mod(division_num, dim)\n k = (division_num - j) / dim\n self.d = np.sqrt(j * np.power(3, float(-2 * (k + 1))) +\n (dim - j) * np.power(3, float(-2 * k))) / 2\n self.division_num = division_num\n self.fc, _, _ = aq_func.compute(scaler.inverse_transform(self.center))\n self.fc = -self.fc\n\n\nclass RectBucket:\n \"\"\" class for the rectangular bucket\n rectangular with the same size are put in the same bucket\n the rectangular is sorted by the acquisition function value\n \"\"\"\n\n def __init__(self, diff, pack):\n self.diff = diff\n self.array = [pack]\n\n def insert(self, new_pack):\n \"\"\" insert a new rectangular to a bucket \"\"\"\n for i in range(len(self.array)):\n if new_pack.fc < self.array[i].fc:\n self.array.insert(i, new_pack)\n return\n self.array.append(new_pack)\n\n def delete(self):\n \"\"\" delete the first rectangular\"\"\"\n del self.array[0]\n\n def diff_exist(self, diff):\n \"\"\" detect the size difference \"\"\"\n return abs(self.diff - diff) < 0.00001\n\n\nclass OptimalPoint:\n \"\"\" helper class to find potential optimal points\"\"\"\n\n def __init__(self, point, prev, slope):\n self.point = point\n self.prev = prev\n self.slope = slope\n\n\nclass DimPack:\n def __init__(self, dim, fc):\n self.dim = dim\n self.fc = fc\n\n\nclass GlobalOptimizer:\n \"\"\" class for the global optimizer \"\"\"\n\n def __init__(self, N, l, u, scaler, X_train, y_train, current_optimal,\n experiment_name, mode, trade_off, length_scale,\n noise, nu, kernel_type, n_estimators, max_features, model_type, logger=None):\n self.logger = logger if (logger is not None) else get_logger()\n self.N = N\n self.l = l\n self.u = u\n self.scaler = scaler\n self.buckets = []\n self.dim = None\n self._experiment_name = experiment_name\n if model_type == \"gp\":\n model = GaussianProcessModel(\n length_scale=length_scale,\n noise=noise,\n nu=nu,\n kernel_type=kernel_type,\n )\n else:\n model = RandomForestModel(\n n_estimators=n_estimators,\n max_features=max_features,\n )\n self.logger.debug(\"before model fit\", extra={\n \"Experiment\": self._experiment_name})\n model.fit(X_train, y_train)\n self.logger.debug(\"after model fit\", extra={\n \"Experiment\": self._experiment_name})\n self.aq_func = AcquisitionFunc(\n model=model,\n current_optimal=current_optimal,\n mode=mode,\n trade_off=trade_off,\n )\n\n def potential_opt(self, f_min):\n \"\"\" find the potential optimal rectangular \"\"\"\n b = []\n for i in range(len(self.buckets)):\n b.append(self.buckets[i].array[0])\n b.sort(key=lambda x: x.d)\n index = 0\n min_fc = b[0].fc\n for i in range(len(b)):\n if b[i].fc < min_fc:\n min_fc = b[i].fc\n index = i\n\n opt_list = [OptimalPoint(b[index], 0, 0)]\n for i in range(index + 1, len(b)):\n prev = len(opt_list) - 1\n diff1 = b[i].d\n diff2 = opt_list[prev].point.d\n current_slope = (\n b[i].fc - opt_list[prev].point.fc) / (diff1 - diff2)\n prev_slope = opt_list[prev].slope\n\n while prev >= 0 and current_slope < prev_slope:\n temp = opt_list[prev].prev\n opt_list[prev].prev = -1\n prev = temp\n prev_slope = opt_list[prev].slope\n diff1 = b[i].d\n diff2 = opt_list[prev].point.d\n current_slope = (\n b[i].fc - opt_list[prev].point.fc) / (diff1 - diff2)\n\n opt_list.append(OptimalPoint(b[i], prev, current_slope))\n\n opt_list2 = []\n for i in range(len(opt_list)):\n if opt_list[i].prev != -1:\n opt_list2.append(opt_list[i])\n\n for i in range(len(opt_list2) - 1):\n c1 = opt_list2[i].point.d\n c2 = opt_list2[i + 1].point.d\n fc1 = opt_list2[i].point.fc\n fc2 = opt_list2[i + 1].point.fc\n if fc1 - c1 * (fc1 - fc2) / (c1 - c2) > (1 - 0.001) * f_min:\n # if abs(fc1-fc2)<0.0001:\n opt_list2[i] = None\n while None in opt_list2:\n index = opt_list2.index(None)\n del opt_list2[index]\n # for opt in opt_list2:\n # print(opt.point.fc)\n return opt_list2\n\n def direct(self, request_num):\n \"\"\" main algorithm \"\"\"\n self.dim = self.l.shape[1]\n division_num = 0\n\n # create the first rectangle and put it in the first bucket\n first_rect = RectPack(self.l, self.u, division_num, self.dim,\n self.scaler, self.aq_func)\n self.buckets.append(RectBucket(first_rect.d, first_rect))\n\n ei_min = []\n f_min = first_rect.fc\n x_next = first_rect.center\n ei_min.append(f_min)\n\n for _ in range(self.N):\n opt_set = self.potential_opt(f_min)\n\n # for bucket in self.buckets:\n # for i in range(len(bucket.array)):\n # print(bucket.array[i].fc)\n # plt.plot(bucket.diff, bucket.array[i].fc, 'b.')\n #\n # for opt in opt_set:\n # plt.plot(opt.point.d, opt.point.fc, 'r.')\n # plt.show()\n\n for opt in opt_set:\n f_min, x_next = self.divide_rect(\n opt.point,\n f_min,\n x_next,\n self.aq_func,\n self.scaler\n )\n for bucket in self.buckets:\n if bucket.diff_exist(opt.point.d):\n bucket.delete()\n if not bucket.array:\n index = self.buckets.index(bucket)\n del self.buckets[index]\n ei_min.append(f_min)\n x_next_candidate = self.sample_buckets(request_num)\n return f_min, x_next_candidate\n\n def sample_buckets(self, request_num):\n self.logger.debug(\"In lne self.buckets: %r\", len(self.buckets))\n bucket_index = []\n fc_sum = 0.0\n x_next_candidate = []\n for bucket in self.buckets:\n for a in bucket.array:\n self.logger.debug(\"fc: %r, %r\", a.fc, a.center)\n fc_sum -= a.fc\n bucket_index.append([-a.fc, a.center])\n bucket_index = sorted(bucket_index, key=lambda x: x[0])\n for _ in range(request_num):\n sample = np.random.rand()\n stick = 0.0\n for b in bucket_index:\n stick += b[0]/fc_sum\n if stick > sample:\n x_next_candidate.append(b[1])\n break\n return x_next_candidate\n\n def divide_rect(self, opt_rect, f_min, x_next, aq_func, scaler):\n \"\"\" divide the rectangular into smaller ones \"\"\"\n rect = copy.deepcopy(opt_rect)\n division_num = rect.division_num\n j = np.mod(division_num, self.dim)\n k = (division_num - j) / self.dim\n max_side_len = np.power(3, float(-k))\n delta = max_side_len / 3\n dim_set = []\n for i in range(self.dim):\n if abs(max_side_len - (rect.u[0, i] - rect.l[0, i])) < 0.0000001:\n dim_set.append(i)\n\n dim_list = []\n for i in dim_set:\n e = np.zeros((1, self.dim))\n e[0, i] = 1\n function_value = min(\n aq_func.compute(scaler.inverse_transform(\n rect.center + delta * e)),\n aq_func.compute(scaler.inverse_transform(\n rect.center - delta * e))\n )\n dim_list.append(DimPack(i, function_value))\n dim_list.sort(key=lambda x: x.fc)\n\n for i in range(len(dim_list)):\n division_num = division_num + 1\n temp = np.zeros((1, self.dim))\n temp[0, dim_list[i].dim] = delta\n left_rect = RectPack(\n rect.l,\n rect.u - 2 * temp,\n division_num,\n self.dim,\n self.scaler,\n aq_func\n )\n middle_rect = RectPack(\n rect.l + temp,\n rect.u - temp,\n division_num,\n self.dim,\n self.scaler,\n aq_func\n )\n right_rect = RectPack(\n rect.l + 2 * temp,\n rect.u,\n division_num,\n self.dim,\n self.scaler,\n aq_func\n )\n if left_rect.fc < f_min:\n f_min = left_rect.fc\n x_next = left_rect.center\n if right_rect.fc < f_min:\n f_min = right_rect.fc\n x_next = right_rect.center\n\n insert = 0\n for bucket in self.buckets:\n if bucket.diff_exist(left_rect.d):\n bucket.insert(left_rect)\n bucket.insert(right_rect)\n if i == len(dim_list) - 1:\n bucket.insert(middle_rect)\n insert = 1\n break\n if insert == 0:\n new_bucket = RectBucket(left_rect.d, left_rect)\n new_bucket.insert(right_rect)\n if i == len(dim_list) - 1:\n new_bucket.insert(middle_rect)\n self.buckets.append(new_bucket)\n rect = middle_rect\n return f_min, x_next\n"
] | [
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.randn",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ParameterList",
"torch.nn.BatchNorm2d"
],
[
"numpy.mod",
"numpy.zeros",
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
enourbakhsh/skylink | [
"83270f3351ff637abeb0af25786412d4dd09134a"
] | [
"tests/test_networkx.py"
] | [
"import os\nimport skylink\nfrom skylink import testing\nimport numpy as np\nfrom astropy.table import Table\nimport FoFCatalogMatching\nimport pytest # noqa\n\n# TODO: test the matching with more than two catalogs\n# TODO: test N-way matching with `linking_lengths` as a dictionary\n# TODO: test if we catch illegal footprints that are not gnomonic-projectable\n# TODO: test MPI implementation\n# TODO: test a wide range of linking lengths\n\ngraph_lib = \"networkx\"\nncpus_max = os.cpu_count() # maximum number of cpus\nlinking_lengths_default = 0.75 # arcsec\nn = 2_000 # number of objects for the mock-up data\n\n\ndef make_mockup():\n def tnormal(mu=None, sigma=None, n=None, lower=-0.5, upper=0.5):\n return np.clip(np.random.normal(np.repeat(mu, n), sigma), lower, upper)\n\n np.random.seed(2)\n ra = np.random.uniform(4, 6, n)\n dec = np.random.uniform(-1, 1, n)\n\n cat_a = Table({\"ra\": ra, \"dec\": dec})\n cat_b = Table(\n {\n \"ra\": np.append(ra + tnormal(0, 0.0004, n), ra + tnormal(0, 0.0001, n)),\n \"dec\": np.append(dec + tnormal(0, 0.0002, n), dec + tnormal(0, 0.0002, n)),\n }\n )\n\n return cat_a, cat_b\n\n\ndef run_FoFCatalogMatching(cat_a, cat_b, return_pandas=False):\n \"\"\" Genetare an output using `FoFCatalogMatching` as our benchmark \"\"\"\n res_fcm = FoFCatalogMatching.match(\n {\"a\": cat_a, \"b\": cat_b}, linking_lengths_default\n )\n if return_pandas:\n return res_fcm.to_pandas()\n else:\n return res_fcm\n\n\ndef test_graph_lib():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl = skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n testing.assert_equal(res_fcm, res_sl)\n\n\ndef run_with_ncpus(cat_a, cat_b, ncpus):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n nprocs=ncpus,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_nprocs():\n # TODO: test equality with more than 2 catalogs\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_ncpus(cat_a, cat_b, 1)\n res_sl2 = run_with_ncpus(cat_a, cat_b, 2)\n res_sl3 = run_with_ncpus(cat_a, cat_b, ncpus_max)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n testing.assert_equal(res_sl2, res_sl3)\n\n\ndef run_with_overlap(cat_a, cat_b, overlap):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n overlap=overlap,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_overlap():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_overlap(cat_a, cat_b, 1.0)\n res_sl2 = run_with_overlap(cat_a, cat_b, 1.1)\n res_sl3 = run_with_overlap(cat_a, cat_b, 1.2)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n testing.assert_equal(res_sl2, res_sl3)\n\n\ndef run_with_linked_mask(cat_a, cat_b, use_linked_mask):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n use_linked_mask=use_linked_mask,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n )\n\n\[email protected](\n reason=\"FIXME: The `networkx` graph library does not give the right results with use_linked_mask=True\"\n)\ndef test_linked_mask():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_linked_mask(cat_a, cat_b, True)\n res_sl2 = run_with_linked_mask(cat_a, cat_b, False)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n\n\ndef run_with_order(cat_a, cat_b, reverse=False):\n cats = {\"b\": cat_b, \"a\": cat_a} if reverse else {\"a\": cat_a, \"b\": cat_b}\n return skylink.match(\n cats,\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_cat_orders():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_order(cat_a, cat_b, False)\n res_sl2 = run_with_order(cat_a, cat_b, True)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n\n\ndef run_with_sort(cat_a, cat_b, sort):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n sort=sort,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_sort():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_sort(cat_a, cat_b, True)\n res_sl2 = run_with_sort(cat_a, cat_b, False)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n\n\ndef run_with_storekdtree(cat_a, cat_b, storekdtree):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n storekdtree=storekdtree,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_storekdtree():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl2 = run_with_storekdtree(cat_a, cat_b, False)\n res_sl1 = run_with_storekdtree(cat_a, cat_b, True)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n"
] | [
[
"numpy.random.uniform",
"numpy.repeat",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
radiantprism/StarCraft-2 | [
"1f159ae84feaed17c5e0bd70e272c06992ae0c48",
"1f159ae84feaed17c5e0bd70e272c06992ae0c48"
] | [
"pysc2/lib/features_test.py",
"pysc2/lib/renderer_human.py"
] | [
"#!/usr/bin/python\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for features.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport pickle\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom future.builtins import range # pylint: disable=redefined-builtin\nimport numpy\nimport six\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\nfrom pysc2.lib import point\n\nfrom google.protobuf import text_format\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\n\n\n# Heavily trimmed, so this is useful for testing actions, but not observations.\nobservation_text_proto = \"\"\"\nplayer_common {\n player_id: 1\n minerals: 0\n vespene: 0\n food_cap: 10\n food_used: 0\n food_army: 0\n food_workers: 0\n idle_worker_count: 0\n army_count: 0\n warp_gate_count: 0\n larva_count: 0\n}\ngame_loop: 20\n\"\"\"\n\n\nRECTANGULAR_DIMENSIONS = features.Dimensions(screen=(84, 80), minimap=(64, 67))\nSQUARE_DIMENSIONS = features.Dimensions(screen=84, minimap=64)\n\n\nclass AvailableActionsTest(absltest.TestCase):\n\n always_expected = {\n \"no_op\", \"move_camera\", \"select_point\", \"select_rect\",\n \"select_control_group\"\n }\n\n def setUp(self):\n super(AvailableActionsTest, self).setUp()\n self.obs = text_format.Parse(observation_text_proto, sc_pb.Observation())\n self.hideSpecificActions(True)\n\n def hideSpecificActions(self, hide_specific_actions): # pylint: disable=invalid-name\n self.features = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n hide_specific_actions=hide_specific_actions))\n\n def assertAvail(self, expected):\n actual = self.features.available_actions(self.obs)\n actual_names = {actions.FUNCTIONS[i].name for i in actual}\n self.assertEqual(actual_names, set(expected) | self.always_expected)\n\n def testAlways(self):\n self.assertAvail([])\n\n def testSelectUnit(self):\n self.obs.ui_data.multi.units.add(unit_type=1)\n self.assertAvail([\"select_unit\"])\n\n def testSelectIdleWorkder(self):\n self.obs.player_common.idle_worker_count = 1\n self.assertAvail([\"select_idle_worker\"])\n\n def testSelectArmy(self):\n self.obs.player_common.army_count = 3\n self.assertAvail([\"select_army\"])\n\n def testSelectWarpGates(self):\n self.obs.player_common.warp_gate_count = 1\n self.assertAvail([\"select_warp_gates\"])\n\n def testSelectLarva(self):\n self.obs.player_common.larva_count = 2\n self.assertAvail([\"select_larva\"])\n\n def testQuick(self):\n self.obs.abilities.add(ability_id=32)\n self.assertAvail([\"Effect_Salvage_quick\"])\n\n def testScreen(self):\n self.obs.abilities.add(ability_id=326, requires_point=True)\n self.assertAvail([\"Build_SensorTower_screen\"])\n\n def testScreenMinimap(self):\n self.obs.abilities.add(ability_id=17, requires_point=True)\n self.assertAvail([\"Patrol_screen\", \"Patrol_minimap\"])\n\n def testScreenAutocast(self):\n self.obs.abilities.add(ability_id=386, requires_point=True)\n self.assertAvail([\"Effect_Heal_screen\", \"Effect_Heal_autocast\"])\n\n def testScreenQuick(self):\n a = self.obs.abilities.add(ability_id=421)\n\n self.hideSpecificActions(True)\n a.requires_point = False\n self.assertAvail([\"Build_TechLab_quick\"])\n a.requires_point = True\n self.assertAvail([\"Build_TechLab_screen\"])\n\n self.hideSpecificActions(False)\n a.requires_point = False\n self.assertAvail([\"Build_TechLab_Barracks_quick\", \"Build_TechLab_quick\"])\n a.requires_point = True\n self.assertAvail([\"Build_TechLab_Barracks_screen\", \"Build_TechLab_screen\"])\n\n def testGeneral(self):\n self.obs.abilities.add(ability_id=1374)\n self.hideSpecificActions(False)\n self.assertAvail([\"BurrowDown_quick\", \"BurrowDown_Baneling_quick\"])\n self.hideSpecificActions(True)\n self.assertAvail([\"BurrowDown_quick\"])\n\n def testGeneralType(self):\n a = self.obs.abilities.add(ability_id=1376)\n self.hideSpecificActions(False)\n self.assertAvail([\"BurrowUp_quick\", \"BurrowUp_Baneling_quick\",\n \"BurrowUp_autocast\", \"BurrowUp_Baneling_autocast\"])\n self.hideSpecificActions(True)\n self.assertAvail([\"BurrowUp_quick\", \"BurrowUp_autocast\"])\n\n a.ability_id = 2110\n self.hideSpecificActions(False)\n self.assertAvail([\"BurrowUp_quick\", \"BurrowUp_Lurker_quick\"])\n self.hideSpecificActions(True)\n self.assertAvail([\"BurrowUp_quick\"])\n\n def testMany(self):\n add = [\n (23, True), # Attack\n (318, True), # Build_CommandCenter\n (320, True), # Build_Refinery\n (319, True), # Build_SupplyDepot\n (316, True), # Effect_Repair_SCV\n (295, True), # Harvest_Gather_SCV\n (16, True), # Move\n (17, True), # Patrol\n (4, False), # Stop\n ]\n for a, r in add:\n self.obs.abilities.add(ability_id=a, requires_point=r)\n self.hideSpecificActions(False)\n self.assertAvail([\n \"Attack_Attack_minimap\",\n \"Attack_Attack_screen\",\n \"Attack_minimap\",\n \"Attack_screen\",\n \"Build_CommandCenter_screen\",\n \"Build_Refinery_screen\",\n \"Build_SupplyDepot_screen\",\n \"Effect_Repair_screen\",\n \"Effect_Repair_autocast\",\n \"Effect_Repair_SCV_autocast\",\n \"Effect_Repair_SCV_screen\",\n \"Harvest_Gather_screen\",\n \"Harvest_Gather_SCV_screen\",\n \"Move_minimap\",\n \"Move_screen\",\n \"Move_Move_minimap\",\n \"Move_Move_screen\",\n \"Patrol_minimap\",\n \"Patrol_screen\",\n \"Patrol_Patrol_minimap\",\n \"Patrol_Patrol_screen\",\n \"Stop_quick\",\n \"Stop_Stop_quick\"\n ])\n self.hideSpecificActions(True)\n self.assertAvail([\n \"Attack_minimap\",\n \"Attack_screen\",\n \"Build_CommandCenter_screen\",\n \"Build_Refinery_screen\",\n \"Build_SupplyDepot_screen\",\n \"Effect_Repair_screen\",\n \"Effect_Repair_autocast\",\n \"Harvest_Gather_screen\",\n \"Move_minimap\",\n \"Move_screen\",\n \"Patrol_minimap\",\n \"Patrol_screen\",\n \"Stop_quick\",\n ])\n\n\nclass ToPointTest(absltest.TestCase):\n\n def testIntAsString(self):\n value = features._to_point(\"32\")\n self.assertEqual(value, point.Point(32, 32))\n\n def testIntStringTwoTuple(self):\n value = features._to_point((\"32\", 64))\n self.assertEqual(value, point.Point(32, 64))\n\n def testNoneInputReturnsNoneOutput(self):\n with self.assertRaises(AssertionError):\n features._to_point(None)\n\n def testNoneAsFirstElementOfTupleRaises(self):\n with self.assertRaises(TypeError):\n features._to_point((None, 32))\n\n def testNoneAsSecondElementOfTupleRaises(self):\n with self.assertRaises(TypeError):\n features._to_point((32, None))\n\n def testSingletonTupleRaises(self):\n with self.assertRaises(ValueError):\n features._to_point((32,))\n\n def testThreeTupleRaises(self):\n with self.assertRaises(ValueError):\n features._to_point((32, 32, 32))\n\n\nclass DimensionsTest(absltest.TestCase):\n\n def testScreenSizeWithoutMinimapRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=84)\n\n def testScreenWidthWithoutHeightRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(84, 0), minimap=64)\n\n def testScreenWidthHeightWithoutMinimapRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(84, 80))\n\n def testMinimapWidthAndHeightWithoutScreenRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(minimap=(64, 67))\n\n def testNoneNoneRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=None, minimap=None)\n\n def testSingularZeroesRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=0, minimap=0)\n\n def testTwoZeroesRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(0, 0), minimap=(0, 0))\n\n def testThreeTupleScreenRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(1, 2, 3), minimap=32)\n\n def testThreeTupleMinimapRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=64, minimap=(1, 2, 3))\n\n def testNegativeScreenRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=-64, minimap=32)\n\n def testNegativeMinimapRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=64, minimap=-32)\n\n def testNegativeScreenTupleRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(-64, -64), minimap=32)\n\n def testNegativeMinimapTupleRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=64, minimap=(-32, -32))\n\n def testEquality(self):\n self.assertEqual(features.Dimensions(screen=64, minimap=64),\n features.Dimensions(screen=64, minimap=64))\n self.assertNotEqual(features.Dimensions(screen=64, minimap=64),\n features.Dimensions(screen=64, minimap=32))\n self.assertNotEqual(features.Dimensions(screen=64, minimap=64), None)\n\n\nclass TestParseAgentInterfaceFormat(parameterized.TestCase):\n\n def test_no_arguments_raises(self):\n with self.assertRaises(ValueError):\n features.parse_agent_interface_format()\n\n @parameterized.parameters((32, None), (None, 32))\n def test_invalid_feature_combinations_raise(self, screen, minimap):\n with self.assertRaises(ValueError):\n features.parse_agent_interface_format(\n feature_screen=screen,\n feature_minimap=minimap)\n\n def test_valid_feature_specification_is_parsed(self):\n agent_interface_format = features.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=(24, 24))\n\n self.assertEqual(\n agent_interface_format.feature_dimensions.screen,\n point.Point(32, 32))\n\n self.assertEqual(\n agent_interface_format.feature_dimensions.minimap,\n point.Point(24, 24))\n\n @parameterized.parameters((32, None), (None, 32), (32, 64))\n def test_invalid_minimap_combinations_raise(self, screen, minimap):\n with self.assertRaises(ValueError):\n features.parse_agent_interface_format(\n rgb_screen=screen,\n rgb_minimap=minimap)\n\n def test_valid_minimap_specification_is_parsed(self):\n agent_interface_format = features.parse_agent_interface_format(\n rgb_screen=32,\n rgb_minimap=(24, 24))\n\n self.assertEqual(\n agent_interface_format.rgb_dimensions.screen,\n point.Point(32, 32))\n\n self.assertEqual(\n agent_interface_format.rgb_dimensions.minimap,\n point.Point(24, 24))\n\n def test_invalid_action_space_raises(self):\n with self.assertRaises(KeyError):\n features.parse_agent_interface_format(\n feature_screen=64,\n feature_minimap=64,\n action_space=\"UNKNOWN_ACTION_SPACE\")\n\n @parameterized.parameters(actions.ActionSpace.__members__.keys())\n def test_valid_action_space_is_parsed(self, action_space):\n agent_interface_format = features.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=(24, 24),\n rgb_screen=64,\n rgb_minimap=(48, 48),\n use_raw_units=True,\n action_space=action_space)\n\n self.assertEqual(\n agent_interface_format.action_space,\n actions.ActionSpace[action_space])\n\n def test_camera_width_world_units_are_parsed(self):\n agent_interface_format = features.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=(24, 24),\n camera_width_world_units=77)\n\n self.assertEqual(agent_interface_format.camera_width_world_units, 77)\n\n def test_use_feature_units_is_parsed(self):\n agent_interface_format = features.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=(24, 24),\n use_feature_units=True)\n\n self.assertEqual(agent_interface_format.use_feature_units, True)\n\n\nclass FeaturesTest(absltest.TestCase):\n\n def testFunctionsIdsAreConsistent(self):\n for i, f in enumerate(actions.FUNCTIONS):\n self.assertEqual(i, f.id, \"id doesn't match for %s\" % f.id)\n\n def testAllVersionsOfAnAbilityHaveTheSameGeneral(self):\n for ability_id, funcs in six.iteritems(actions.ABILITY_IDS):\n self.assertLen({f.general_id for f in funcs}, 1,\n \"Multiple generals for %s\" % ability_id)\n\n def testValidFunctionsAreConsistent(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS))\n\n valid_funcs = feats.action_spec()\n for func_def in valid_funcs.functions:\n func = actions.FUNCTIONS[func_def.id]\n self.assertEqual(func_def.id, func.id)\n self.assertEqual(func_def.name, func.name)\n self.assertEqual(len(func_def.args), len(func.args)) # pylint: disable=g-generic-assert\n\n def gen_random_function_call(self, action_spec, func_id):\n args = [[numpy.random.randint(0, size) for size in arg.sizes] # pylint: disable=g-complex-comprehension\n for arg in action_spec.functions[func_id].args]\n return actions.FunctionCall(func_id, args)\n\n def testIdsMatchIndex(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS))\n action_spec = feats.action_spec()\n for func_index, func_def in enumerate(action_spec.functions):\n self.assertEqual(func_index, func_def.id)\n for type_index, type_def in enumerate(action_spec.types):\n self.assertEqual(type_index, type_def.id)\n\n def testReversingUnknownAction(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n hide_specific_actions=False))\n sc2_action = sc_pb.Action()\n sc2_action.action_feature_layer.unit_command.ability_id = 6 # Cheer\n func_call = feats.reverse_action(sc2_action)\n self.assertEqual(func_call.function, 0) # No-op\n\n def testSpecificActionsAreReversible(self):\n \"\"\"Test that the `transform_action` and `reverse_action` are inverses.\"\"\"\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n hide_specific_actions=False))\n action_spec = feats.action_spec()\n\n for func_def in action_spec.functions:\n for _ in range(10):\n func_call = self.gen_random_function_call(action_spec, func_def.id)\n\n sc2_action = feats.transform_action(\n None, func_call, skip_available=True)\n func_call2 = feats.reverse_action(sc2_action)\n sc2_action2 = feats.transform_action(\n None, func_call2, skip_available=True)\n if func_def.id == actions.FUNCTIONS.select_rect.id:\n # Need to check this one manually since the same rect can be\n # defined in multiple ways.\n def rect(a):\n return point.Rect(point.Point(*a[1]).floor(),\n point.Point(*a[2]).floor())\n\n self.assertEqual(func_call.function, func_call2.function)\n self.assertEqual(len(func_call.arguments), len(func_call2.arguments)) # pylint: disable=g-generic-assert\n self.assertEqual(func_call.arguments[0], func_call2.arguments[0])\n self.assertEqual(rect(func_call.arguments),\n rect(func_call2.arguments))\n else:\n self.assertEqual(func_call, func_call2, msg=sc2_action)\n self.assertEqual(sc2_action, sc2_action2)\n\n def testRawActionUnitTags(self):\n feats = features.Features(\n features.AgentInterfaceFormat(\n use_raw_units=True,\n action_space=actions.ActionSpace.RAW),\n map_size=point.Point(100, 100))\n\n tags = [numpy.random.randint(2**20, 2**24) for _ in range(10)]\n ntags = numpy.array(tags, dtype=numpy.int64)\n tag = tags[0]\n ntag = numpy.array(tag, dtype=numpy.int64)\n\n def transform(fn, *args):\n func_call = actions.RAW_FUNCTIONS[fn](\"now\", *args)\n proto = feats.transform_action(None, func_call, skip_available=True)\n return proto.action_raw.unit_command\n\n self.assertEqual(transform(\"Attack_pt\", tag, [15, 20]).unit_tags, [tag])\n self.assertEqual(transform(\"Attack_pt\", ntag, [15, 20]).unit_tags, [tag])\n self.assertEqual(transform(\"Attack_pt\", [tag], [15, 20]).unit_tags, [tag])\n self.assertEqual(transform(\"Attack_pt\", [ntag], [15, 20]).unit_tags, [tag])\n self.assertEqual(transform(\"Attack_pt\", tags, [15, 20]).unit_tags, tags)\n self.assertEqual(transform(\"Attack_pt\", ntags, [15, 20]).unit_tags, tags)\n # Weird, but needed for backwards compatibility\n self.assertEqual(transform(\"Attack_pt\", [tags], [15, 20]).unit_tags, tags)\n self.assertEqual(transform(\"Attack_pt\", [ntags], [15, 20]).unit_tags, tags)\n\n self.assertEqual(transform(\"Attack_unit\", tag, tag).target_unit_tag, tag)\n self.assertEqual(transform(\"Attack_unit\", tag, ntag).target_unit_tag, tag)\n self.assertEqual(transform(\"Attack_unit\", tag, [tag]).target_unit_tag, tag)\n self.assertEqual(transform(\"Attack_unit\", tag, [ntag]).target_unit_tag, tag)\n\n def testCanPickleSpecs(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=SQUARE_DIMENSIONS))\n action_spec = feats.action_spec()\n observation_spec = feats.observation_spec()\n\n self.assertEqual(action_spec, pickle.loads(pickle.dumps(action_spec)))\n self.assertEqual(observation_spec,\n pickle.loads(pickle.dumps(observation_spec)))\n\n def testCanPickleFunctionCall(self):\n func = actions.FUNCTIONS.select_point(\"select\", [1, 2])\n self.assertEqual(func, pickle.loads(pickle.dumps(func)))\n\n def testCanDeepcopyNumpyFunctionCall(self):\n arguments = [numpy.float32] * len(actions.Arguments._fields)\n dtypes = actions.FunctionCall(\n function=numpy.float32,\n arguments=actions.Arguments(*arguments))\n self.assertEqual(dtypes, copy.deepcopy(dtypes))\n\n def testSizeConstructors(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=SQUARE_DIMENSIONS))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (84, 84))\n self.assertEqual(spec.types.screen2.sizes, (84, 84))\n self.assertEqual(spec.types.minimap.sizes, (64, 64))\n\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (84, 80))\n self.assertEqual(spec.types.screen2.sizes, (84, 80))\n self.assertEqual(spec.types.minimap.sizes, (64, 67))\n\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (84, 80))\n self.assertEqual(spec.types.screen2.sizes, (84, 80))\n self.assertEqual(spec.types.minimap.sizes, (64, 67))\n\n # Missing one or the other of game_info and dimensions.\n with self.assertRaises(ValueError):\n features.Features()\n\n # Resolution/action space mismatch.\n with self.assertRaises(ValueError):\n features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n action_space=actions.ActionSpace.RGB))\n with self.assertRaises(ValueError):\n features.Features(features.AgentInterfaceFormat(\n rgb_dimensions=RECTANGULAR_DIMENSIONS,\n action_space=actions.ActionSpace.FEATURES))\n with self.assertRaises(ValueError):\n features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n rgb_dimensions=RECTANGULAR_DIMENSIONS))\n\n def testFlRgbActionSpec(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),\n action_space=actions.ActionSpace.FEATURES))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (84, 80))\n self.assertEqual(spec.types.screen2.sizes, (84, 80))\n self.assertEqual(spec.types.minimap.sizes, (64, 67))\n\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),\n action_space=actions.ActionSpace.RGB))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (128, 132))\n self.assertEqual(spec.types.screen2.sizes, (128, 132))\n self.assertEqual(spec.types.minimap.sizes, (74, 77))\n\n def testFlRgbObservationSpec(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),\n action_space=actions.ActionSpace.FEATURES))\n obs_spec = feats.observation_spec()\n self.assertEqual(obs_spec[\"feature_screen\"], # pylint: disable=g-generic-assert\n (len(features.SCREEN_FEATURES), 80, 84))\n self.assertEqual(obs_spec[\"feature_minimap\"], # pylint: disable=g-generic-assert\n (len(features.MINIMAP_FEATURES), 67, 64))\n self.assertEqual(obs_spec[\"rgb_screen\"], (132, 128, 3))\n self.assertEqual(obs_spec[\"rgb_minimap\"], (77, 74, 3))\n\n\nif __name__ == \"__main__\":\n absltest.main()\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A viewer for starcraft observations/replays.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport ctypes\nimport functools\nimport itertools\nfrom absl import logging\nimport math\nimport os\nimport platform\nimport re\nimport subprocess\nimport threading\nimport time\n\nimport enum\nfrom future.builtins import range # pylint: disable=redefined-builtin\nimport numpy as np\nimport pygame\nimport queue\nfrom pysc2.lib import buffs\nfrom pysc2.lib import colors\nfrom pysc2.lib import features\nfrom pysc2.lib import memoize\nfrom pysc2.lib import point\nfrom pysc2.lib import remote_controller\nfrom pysc2.lib import stopwatch\nfrom pysc2.lib import transform\n\nfrom pysc2.lib import video_writer\nfrom s2clientprotocol import error_pb2 as sc_err\nfrom s2clientprotocol import raw_pb2 as sc_raw\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\nfrom s2clientprotocol import spatial_pb2 as sc_spatial\nfrom s2clientprotocol import ui_pb2 as sc_ui\n\n# Disable attribute-error because of the multiple stages of initialization for\n# RendererHuman.\n# pytype: disable=attribute-error\n\nsw = stopwatch.sw\n\nrender_lock = threading.Lock() # Serialize all window/render operations.\n\n\ndef with_lock(lock):\n \"\"\"Make sure the lock is held while in this function.\"\"\"\n def decorator(func):\n @functools.wraps(func)\n def _with_lock(*args, **kwargs):\n with lock:\n return func(*args, **kwargs)\n return _with_lock\n return decorator\n\n\ndef clamp(n, smallest, largest):\n return max(smallest, min(n, largest))\n\n\nclass MouseButtons(enum.IntEnum):\n # https://www.pygame.org/docs/ref/mouse.html\n LEFT = 1\n MIDDLE = 2\n RIGHT = 3\n WHEEL_UP = 4\n WHEEL_DOWN = 5\n\n\nclass SurfType(enum.IntEnum):\n \"\"\"Used to tell what a mouse click refers to.\"\"\"\n CHROME = 1 # ie help, feature layer titles, etc\n SCREEN = 2\n MINIMAP = 4\n FEATURE = 8\n RGB = 16\n\n\nclass ActionCmd(enum.Enum):\n STEP = 1\n RESTART = 2\n QUIT = 3\n\n\nclass _Ability(collections.namedtuple(\"_Ability\", [\n \"ability_id\", \"name\", \"footprint_radius\", \"requires_point\", \"hotkey\"])):\n \"\"\"Hold the specifics of available abilities.\"\"\"\n\n def __new__(cls, ability, static_data):\n specific_data = static_data[ability.ability_id]\n if specific_data.remaps_to_ability_id:\n general_data = static_data[specific_data.remaps_to_ability_id]\n else:\n general_data = specific_data\n return super(_Ability, cls).__new__(\n cls,\n ability_id=general_data.ability_id,\n name=(general_data.friendly_name or general_data.button_name or\n general_data.link_name),\n footprint_radius=general_data.footprint_radius,\n requires_point=ability.requires_point,\n hotkey=specific_data.hotkey)\n\n\nclass _Surface(object):\n \"\"\"A surface to display on screen.\"\"\"\n\n def __init__(self, surf, surf_type, surf_rect, world_to_surf, world_to_obs,\n draw):\n \"\"\"A surface to display on screen.\n\n Args:\n surf: The actual pygame.Surface (or subsurface).\n surf_type: A SurfType, used to tell how to treat clicks in that area.\n surf_rect: Rect of the surface relative to the window.\n world_to_surf: Convert a world point to a pixel on the surface.\n world_to_obs: Convert a world point to a pixel in the observation.\n draw: A function that draws onto the surface.\n \"\"\"\n self.surf = surf\n self.surf_type = surf_type\n self.surf_rect = surf_rect\n self.world_to_surf = world_to_surf\n self.world_to_obs = world_to_obs\n self.draw = draw\n\n def draw_line(self, color, start_loc, end_loc, thickness=1):\n \"\"\"Draw a line using world coordinates and thickness.\"\"\"\n pygame.draw.line(self.surf, color,\n self.world_to_surf.fwd_pt(start_loc).round(),\n self.world_to_surf.fwd_pt(end_loc).round(),\n max(1, thickness))\n\n def draw_arc(self, color, world_loc, world_radius, start_angle, stop_angle,\n thickness=1):\n \"\"\"Draw an arc using world coordinates, radius, start and stop angles.\"\"\"\n center = self.world_to_surf.fwd_pt(world_loc).round()\n radius = max(1, int(self.world_to_surf.fwd_dist(world_radius)))\n rect = pygame.Rect(center - radius, (radius * 2, radius * 2))\n pygame.draw.arc(self.surf, color, rect, start_angle, stop_angle,\n thickness if thickness < radius else 0)\n\n def draw_circle(self, color, world_loc, world_radius, thickness=0):\n \"\"\"Draw a circle using world coordinates and radius.\"\"\"\n if world_radius > 0:\n center = self.world_to_surf.fwd_pt(world_loc).round()\n radius = max(1, int(self.world_to_surf.fwd_dist(world_radius)))\n pygame.draw.circle(self.surf, color, center, radius,\n thickness if thickness < radius else 0)\n\n def draw_rect(self, color, world_rect, thickness=0):\n \"\"\"Draw a rectangle using world coordinates.\"\"\"\n tl = self.world_to_surf.fwd_pt(world_rect.tl).round()\n br = self.world_to_surf.fwd_pt(world_rect.br).round()\n rect = pygame.Rect(tl, br - tl)\n pygame.draw.rect(self.surf, color, rect, thickness)\n\n def blit_np_array(self, array):\n \"\"\"Fill this surface using the contents of a numpy array.\"\"\"\n with sw(\"make_surface\"):\n raw_surface = pygame.surfarray.make_surface(array.transpose([1, 0, 2]))\n with sw(\"draw\"):\n pygame.transform.scale(raw_surface, self.surf.get_size(), self.surf)\n\n def write_screen(self, font, color, screen_pos, text, align=\"left\",\n valign=\"top\"):\n \"\"\"Write to the screen in font.size relative coordinates.\"\"\"\n pos = point.Point(*screen_pos) * point.Point(0.75, 1) * font.get_linesize()\n text_surf = font.render(str(text), True, color)\n rect = text_surf.get_rect()\n if pos.x >= 0:\n setattr(rect, align, pos.x)\n else:\n setattr(rect, align, self.surf.get_width() + pos.x)\n if pos.y >= 0:\n setattr(rect, valign, pos.y)\n else:\n setattr(rect, valign, self.surf.get_height() + pos.y)\n self.surf.blit(text_surf, rect)\n\n def write_world(self, font, color, world_loc, text):\n text_surf = font.render(text, True, color)\n rect = text_surf.get_rect()\n rect.center = self.world_to_surf.fwd_pt(world_loc)\n self.surf.blit(text_surf, rect)\n\n\nclass MousePos(collections.namedtuple(\"MousePos\", [\"world_pos\", \"surf\"])):\n \"\"\"Holds the mouse position in world coordinates and the surf it came from.\"\"\"\n __slots__ = ()\n\n @property\n def surf_pos(self):\n return self.surf.world_to_surf.fwd_pt(self.world_pos)\n\n @property\n def obs_pos(self):\n return self.surf.world_to_obs.fwd_pt(self.world_pos)\n\n def action_spatial(self, action):\n \"\"\"Given an Action, return the right spatial action.\"\"\"\n if self.surf.surf_type & SurfType.FEATURE:\n return action.action_feature_layer\n elif self.surf.surf_type & SurfType.RGB:\n return action.action_render\n else:\n assert self.surf.surf_type & (SurfType.RGB | SurfType.FEATURE)\n\n\nclass PastAction(collections.namedtuple(\"PastAction\", [\n \"ability\", \"color\", \"pos\", \"time\", \"deadline\"])):\n \"\"\"Holds a past action for drawing over time.\"\"\"\n\n\[email protected]\ndef _get_desktop_size():\n \"\"\"Get the desktop size.\"\"\"\n if platform.system() == \"Linux\":\n try:\n xrandr_query = subprocess.check_output([\"xrandr\", \"--query\"])\n sizes = re.findall(r\"\\bconnected primary (\\d+)x(\\d+)\", str(xrandr_query))\n if sizes[0]:\n return point.Point(int(sizes[0][0]), int(sizes[0][1]))\n except: # pylint: disable=bare-except\n logging.error(\"Failed to get the resolution from xrandr.\")\n\n # Most general, but doesn't understand multiple monitors.\n display_info = pygame.display.Info()\n return point.Point(display_info.current_w, display_info.current_h)\n\n\ndef circle_mask(shape, pt, radius):\n # ogrid is confusing but seems to be the best way to generate a circle mask.\n # http://docs.scipy.org/doc/numpy/reference/generated/numpy.ogrid.html\n # http://stackoverflow.com/questions/8647024/how-to-apply-a-disc-shaped-mask-to-a-numpy-array\n y, x = np.ogrid[-pt.y:shape.y - pt.y, -pt.x:shape.x - pt.x]\n # <= is important as radius will often come in as 0 due to rounding.\n return x**2 + y**2 <= radius**2\n\n\nclass RendererHuman(object):\n \"\"\"Render starcraft obs with pygame such that it's playable by humans.\"\"\"\n camera_actions = { # camera moves by 3 world units.\n pygame.K_LEFT: point.Point(-3, 0),\n pygame.K_RIGHT: point.Point(3, 0),\n pygame.K_UP: point.Point(0, 3),\n pygame.K_DOWN: point.Point(0, -3),\n }\n\n cmd_group_keys = {\n pygame.K_0: 0,\n pygame.K_1: 1,\n pygame.K_2: 2,\n pygame.K_3: 3,\n pygame.K_4: 4,\n pygame.K_5: 5,\n pygame.K_6: 6,\n pygame.K_7: 7,\n pygame.K_8: 8,\n pygame.K_9: 9,\n }\n\n shortcuts = [\n (\"F1\", \"Select idle worker\"),\n (\"F2\", \"Select army\"),\n (\"F3\", \"Select larva (zerg) or warp gates (protoss)\"),\n (\"F4\", \"Quit the game\"),\n (\"F5\", \"Restart the map\"),\n (\"F8\", \"Save a replay\"),\n (\"F9\", \"Toggle RGB rendering\"),\n (\"F10\", \"Toggle rendering the player_relative layer.\"),\n (\"F11\", \"Toggle synchronous rendering\"),\n (\"F12\", \"Toggle raw/feature layer actions\"),\n (\"Ctrl++\", \"Zoom in\"),\n (\"Ctrl+-\", \"Zoom out\"),\n (\"PgUp/PgDn\", \"Increase/decrease the max game speed\"),\n (\"Ctrl+PgUp/PgDn\", \"Increase/decrease the step multiplier\"),\n (\"Pause\", \"Pause the game\"),\n (\"?\", \"This help screen\"),\n ]\n\n upgrade_colors = [\n colors.black, # unused...\n colors.white * 0.6,\n colors.white * 0.8,\n colors.white,\n ]\n\n def __init__(self, fps=22.4, step_mul=1, render_sync=False,\n render_feature_grid=True, video=None):\n \"\"\"Create a renderer for use by humans.\n\n Make sure to call `init` with the game info, or just use `run`.\n\n Args:\n fps: How fast should the game be run.\n step_mul: How many game steps to take per observation.\n render_sync: Whether to wait for the obs to render before continuing.\n render_feature_grid: When RGB and feature layers are available, whether\n to render the grid of feature layers.\n video: A filename to write the video to. Implicitly enables render_sync.\n \"\"\"\n self._fps = fps\n self._step_mul = step_mul\n self._render_sync = render_sync or bool(video)\n self._raw_actions = False\n self._render_player_relative = False\n self._render_rgb = None\n self._render_feature_grid = render_feature_grid\n self._window = None\n self._window_scale = 0.75\n self._obs_queue = queue.Queue()\n self._render_thread = threading.Thread(target=self.render_thread,\n name=\"Renderer\")\n self._render_thread.start()\n self._game_times = collections.deque(maxlen=100) # Avg FPS over 100 frames. # pytype: disable=wrong-keyword-args\n self._render_times = collections.deque(maxlen=100) # pytype: disable=wrong-keyword-args\n self._last_time = time.time()\n self._last_game_loop = 0\n self._name_lengths = {}\n self._video_writer = video_writer.VideoWriter(video, fps) if video else None\n\n def close(self):\n if self._obs_queue:\n self._obs_queue.put(None)\n self._render_thread.join()\n self._obs_queue = None\n self._render_thread = None\n if self._video_writer:\n self._video_writer.close()\n self._video_writer = None\n\n def init(self, game_info, static_data):\n \"\"\"Take the game info and the static data needed to set up the game.\n\n This must be called before render or get_actions for each game or restart.\n\n Args:\n game_info: A `sc_pb.ResponseGameInfo` object for this game.\n static_data: A `StaticData` object for this game.\n\n Raises:\n ValueError: if there is nothing to render.\n \"\"\"\n self._game_info = game_info\n self._static_data = static_data\n\n if not game_info.HasField(\"start_raw\"):\n raise ValueError(\"Raw observations are required for the renderer.\")\n\n self._map_size = point.Point.build(game_info.start_raw.map_size)\n self._playable = point.Rect(\n point.Point.build(game_info.start_raw.playable_area.p0),\n point.Point.build(game_info.start_raw.playable_area.p1))\n\n if game_info.options.HasField(\"feature_layer\"):\n fl_opts = game_info.options.feature_layer\n self._feature_screen_px = point.Point.build(fl_opts.resolution)\n self._feature_minimap_px = point.Point.build(fl_opts.minimap_resolution)\n self._feature_camera_width_world_units = fl_opts.width\n self._render_rgb = False\n if not fl_opts.crop_to_playable_area:\n self._playable = point.Rect(self._map_size)\n else:\n self._feature_screen_px = self._feature_minimap_px = None\n if game_info.options.HasField(\"render\"):\n render_opts = game_info.options.render\n self._rgb_screen_px = point.Point.build(render_opts.resolution)\n self._rgb_minimap_px = point.Point.build(render_opts.minimap_resolution)\n self._render_rgb = True\n else:\n self._rgb_screen_px = self._rgb_minimap_px = None\n\n if not self._feature_screen_px and not self._rgb_screen_px:\n raise ValueError(\"Nothing to render.\")\n\n try:\n self.init_window()\n self._initialized = True\n except pygame.error as e:\n self._initialized = False\n logging.error(\"-\" * 60)\n logging.error(\"Failed to initialize pygame: %s\", e)\n logging.error(\"Continuing without pygame.\")\n logging.error(\"If you're using ssh and have an X server, try ssh -X.\")\n logging.error(\"-\" * 60)\n\n self._obs = sc_pb.ResponseObservation()\n self._queued_action = None\n self._queued_hotkey = \"\"\n self._select_start = None\n self._alerts = {}\n self._past_actions = []\n self._help = False\n self._last_zoom_time = 0\n\n @with_lock(render_lock)\n @sw.decorate\n def init_window(self):\n \"\"\"Initialize the pygame window and lay out the surfaces.\"\"\"\n if platform.system() == \"Windows\":\n # Enable DPI awareness on Windows to give the correct window size.\n ctypes.windll.user32.SetProcessDPIAware() # pytype: disable=module-attr\n\n pygame.init()\n\n if self._render_rgb and self._rgb_screen_px:\n main_screen_px = self._rgb_screen_px\n else:\n main_screen_px = self._feature_screen_px\n\n window_size_ratio = main_screen_px\n num_feature_layers = 0\n if self._render_feature_grid:\n # Want a roughly square grid of feature layers, each being roughly square.\n if self._game_info.options.raw:\n num_feature_layers += 5\n if self._feature_screen_px:\n num_feature_layers += len(features.SCREEN_FEATURES)\n num_feature_layers += len(features.MINIMAP_FEATURES)\n if num_feature_layers > 0:\n feature_cols = math.ceil(math.sqrt(num_feature_layers))\n feature_rows = math.ceil(num_feature_layers / feature_cols)\n features_layout = point.Point(\n feature_cols, feature_rows * 1.05) # Make room for titles.\n\n # Scale features_layout to main_screen_px height so we know its width.\n features_aspect_ratio = (features_layout * main_screen_px.y /\n features_layout.y)\n window_size_ratio += point.Point(features_aspect_ratio.x, 0)\n\n window_size_px = window_size_ratio.scale_max_size(\n _get_desktop_size() * self._window_scale).ceil()\n\n # Create the actual window surface. This should only be blitted to from one\n # of the sub-surfaces defined below.\n self._window = pygame.display.set_mode(window_size_px, 0, 32)\n pygame.display.set_caption(\"Starcraft Viewer\")\n\n # The sub-surfaces that the various draw functions will draw to.\n self._surfaces = []\n def add_surface(surf_type, surf_loc, world_to_surf, world_to_obs, draw_fn):\n \"\"\"Add a surface. Drawn in order and intersect in reverse order.\"\"\"\n sub_surf = self._window.subsurface(\n pygame.Rect(surf_loc.tl, surf_loc.size))\n self._surfaces.append(_Surface(\n sub_surf, surf_type, surf_loc, world_to_surf, world_to_obs, draw_fn))\n\n self._scale = window_size_px.y // 32\n self._font_small = pygame.font.Font(None, int(self._scale * 0.5))\n self._font_large = pygame.font.Font(None, self._scale)\n\n def check_eq(a, b):\n \"\"\"Used to run unit tests on the transforms.\"\"\"\n assert (a - b).len() < 0.0001, \"%s != %s\" % (a, b)\n\n # World has origin at bl, world_tl has origin at tl.\n self._world_to_world_tl = transform.Linear(\n point.Point(1, -1), point.Point(0, self._map_size.y))\n\n check_eq(self._world_to_world_tl.fwd_pt(point.Point(0, 0)),\n point.Point(0, self._map_size.y))\n check_eq(self._world_to_world_tl.fwd_pt(point.Point(5, 10)),\n point.Point(5, self._map_size.y - 10))\n\n # Move the point to be relative to the camera. This gets updated per frame.\n self._world_tl_to_world_camera_rel = transform.Linear(\n offset=-self._map_size / 4)\n\n check_eq(self._world_tl_to_world_camera_rel.fwd_pt(self._map_size / 4),\n point.Point(0, 0))\n check_eq(\n self._world_tl_to_world_camera_rel.fwd_pt(\n (self._map_size / 4) + point.Point(5, 10)),\n point.Point(5, 10))\n\n if self._feature_screen_px:\n # Feature layer locations in continuous space.\n feature_world_per_pixel = (self._feature_screen_px /\n self._feature_camera_width_world_units)\n world_camera_rel_to_feature_screen = transform.Linear(\n feature_world_per_pixel, self._feature_screen_px / 2)\n\n check_eq(world_camera_rel_to_feature_screen.fwd_pt(point.Point(0, 0)),\n self._feature_screen_px / 2)\n check_eq(\n world_camera_rel_to_feature_screen.fwd_pt(\n point.Point(-0.5, -0.5) * self._feature_camera_width_world_units),\n point.Point(0, 0))\n\n self._world_to_feature_screen = transform.Chain(\n self._world_to_world_tl,\n self._world_tl_to_world_camera_rel,\n world_camera_rel_to_feature_screen)\n self._world_to_feature_screen_px = transform.Chain(\n self._world_to_feature_screen,\n transform.PixelToCoord())\n\n world_tl_to_feature_minimap = transform.Linear(\n self._feature_minimap_px / self._playable.diagonal.max_dim())\n world_tl_to_feature_minimap.offset = world_tl_to_feature_minimap.fwd_pt(\n -self._world_to_world_tl.fwd_pt(self._playable.bl))\n\n self._world_to_feature_minimap = transform.Chain(\n self._world_to_world_tl,\n world_tl_to_feature_minimap)\n self._world_to_feature_minimap_px = transform.Chain(\n self._world_to_feature_minimap,\n transform.PixelToCoord())\n\n # These are confusing since self._playable is in world coords which is\n # (bl <= tr), but stored in a Rect that is (tl <= br).\n check_eq(self._world_to_feature_minimap.fwd_pt(self._playable.bl),\n point.Point(0, 0))\n check_eq(self._world_to_feature_minimap.fwd_pt(self._playable.tr),\n self._playable.diagonal.scale_max_size(self._feature_minimap_px))\n\n if self._rgb_screen_px:\n # RGB pixel locations in continuous space.\n\n # TODO(tewalds): Use a real 3d projection instead of orthogonal.\n rgb_world_per_pixel = (self._rgb_screen_px / 24)\n world_camera_rel_to_rgb_screen = transform.Linear(\n rgb_world_per_pixel, self._rgb_screen_px / 2)\n\n check_eq(world_camera_rel_to_rgb_screen.fwd_pt(point.Point(0, 0)),\n self._rgb_screen_px / 2)\n check_eq(\n world_camera_rel_to_rgb_screen.fwd_pt(\n point.Point(-0.5, -0.5) * 24),\n point.Point(0, 0))\n\n self._world_to_rgb_screen = transform.Chain(\n self._world_to_world_tl,\n self._world_tl_to_world_camera_rel,\n world_camera_rel_to_rgb_screen)\n self._world_to_rgb_screen_px = transform.Chain(\n self._world_to_rgb_screen,\n transform.PixelToCoord())\n\n world_tl_to_rgb_minimap = transform.Linear(\n self._rgb_minimap_px / self._map_size.max_dim())\n\n check_eq(world_tl_to_rgb_minimap.fwd_pt(point.Point(0, 0)),\n point.Point(0, 0))\n check_eq(world_tl_to_rgb_minimap.fwd_pt(self._map_size),\n self._map_size.scale_max_size(self._rgb_minimap_px))\n\n self._world_to_rgb_minimap = transform.Chain(\n self._world_to_world_tl,\n world_tl_to_rgb_minimap)\n self._world_to_rgb_minimap_px = transform.Chain(\n self._world_to_rgb_minimap,\n transform.PixelToCoord())\n\n # Renderable space for the screen.\n screen_size_px = main_screen_px.scale_max_size(window_size_px)\n minimap_size_px = self._playable.diagonal.scale_max_size(screen_size_px / 4)\n minimap_offset = point.Point(0, (screen_size_px.y - minimap_size_px.y))\n\n if self._render_rgb:\n rgb_screen_to_main_screen = transform.Linear(\n screen_size_px / self._rgb_screen_px)\n add_surface(SurfType.RGB | SurfType.SCREEN,\n point.Rect(point.origin, screen_size_px),\n transform.Chain( # surf\n self._world_to_rgb_screen,\n rgb_screen_to_main_screen),\n self._world_to_rgb_screen_px,\n self.draw_screen)\n rgb_minimap_to_main_minimap = transform.Linear(\n minimap_size_px / self._rgb_minimap_px)\n add_surface(SurfType.RGB | SurfType.MINIMAP,\n point.Rect(minimap_offset,\n minimap_offset + minimap_size_px),\n transform.Chain( # surf\n self._world_to_rgb_minimap,\n rgb_minimap_to_main_minimap),\n self._world_to_rgb_minimap_px,\n self.draw_mini_map)\n else: # Feature layer main screen\n feature_screen_to_main_screen = transform.Linear(\n screen_size_px / self._feature_screen_px)\n add_surface(SurfType.FEATURE | SurfType.SCREEN,\n point.Rect(point.origin, screen_size_px),\n transform.Chain( # surf\n self._world_to_feature_screen,\n feature_screen_to_main_screen),\n self._world_to_feature_screen_px,\n self.draw_screen)\n feature_minimap_to_main_minimap = transform.Linear(\n minimap_size_px.max_dim() / self._feature_minimap_px.max_dim())\n add_surface(SurfType.FEATURE | SurfType.MINIMAP,\n point.Rect(minimap_offset,\n minimap_offset + minimap_size_px),\n transform.Chain( # surf\n self._world_to_feature_minimap,\n feature_minimap_to_main_minimap),\n self._world_to_feature_minimap_px,\n self.draw_mini_map)\n\n if self._render_feature_grid and num_feature_layers > 0:\n # Add the raw and feature layers\n features_loc = point.Point(screen_size_px.x, 0)\n feature_pane = self._window.subsurface(\n pygame.Rect(features_loc, window_size_px - features_loc))\n feature_pane.fill(colors.white / 2)\n feature_pane_size = point.Point(*feature_pane.get_size())\n feature_grid_size = feature_pane_size / point.Point(feature_cols,\n feature_rows)\n feature_layer_area = point.Point(1, 1).scale_max_size(\n feature_grid_size)\n feature_layer_padding = feature_layer_area // 20\n feature_layer_size = feature_layer_area - feature_layer_padding * 2\n\n feature_font_size = int(feature_grid_size.y * 0.09)\n feature_font = pygame.font.Font(None, feature_font_size)\n\n feature_counter = itertools.count()\n def add_layer(surf_type, world_to_surf, world_to_obs, name, fn):\n \"\"\"Add a layer surface.\"\"\"\n i = next(feature_counter)\n grid_offset = point.Point(i % feature_cols,\n i // feature_cols) * feature_grid_size\n text = feature_font.render(name, True, colors.white)\n rect = text.get_rect()\n rect.center = grid_offset + point.Point(feature_grid_size.x / 2,\n feature_font_size)\n feature_pane.blit(text, rect)\n surf_loc = (features_loc + grid_offset + feature_layer_padding +\n point.Point(0, feature_font_size))\n add_surface(surf_type,\n point.Rect(surf_loc, surf_loc + feature_layer_size).round(),\n world_to_surf, world_to_obs, fn)\n\n raw_world_to_obs = transform.Linear()\n raw_world_to_surf = transform.Linear(feature_layer_size / self._map_size)\n def add_raw_layer(from_obs, name, color):\n add_layer(SurfType.FEATURE | SurfType.MINIMAP,\n raw_world_to_surf, raw_world_to_obs, \"raw \" + name,\n lambda surf: self.draw_raw_layer(surf, from_obs, name, color))\n\n if self._game_info.options.raw:\n add_raw_layer(False, \"terrain_height\", colors.height_map(256))\n add_raw_layer(False, \"pathing_grid\", colors.winter(2))\n add_raw_layer(False, \"placement_grid\", colors.winter(2))\n add_raw_layer(True, \"visibility\", colors.VISIBILITY_PALETTE)\n add_raw_layer(True, \"creep\", colors.CREEP_PALETTE)\n\n def add_feature_layer(feature, surf_type, world_to_surf, world_to_obs):\n add_layer(surf_type, world_to_surf, world_to_obs, feature.full_name,\n lambda surf: self.draw_feature_layer(surf, feature))\n\n if self._feature_minimap_px:\n # Add the minimap feature layers\n feature_minimap_to_feature_minimap_surf = transform.Linear(\n feature_layer_size / self._feature_minimap_px)\n world_to_feature_minimap_surf = transform.Chain(\n self._world_to_feature_minimap,\n feature_minimap_to_feature_minimap_surf)\n for feature in features.MINIMAP_FEATURES:\n add_feature_layer(feature, SurfType.FEATURE | SurfType.MINIMAP,\n world_to_feature_minimap_surf,\n self._world_to_feature_minimap_px)\n\n if self._feature_screen_px:\n # Add the screen feature layers\n feature_screen_to_feature_screen_surf = transform.Linear(\n feature_layer_size / self._feature_screen_px)\n world_to_feature_screen_surf = transform.Chain(\n self._world_to_feature_screen,\n feature_screen_to_feature_screen_surf)\n for feature in features.SCREEN_FEATURES:\n add_feature_layer(feature, SurfType.FEATURE | SurfType.SCREEN,\n world_to_feature_screen_surf,\n self._world_to_feature_screen_px)\n\n # Add the help screen\n help_size = point.Point(\n (max(len(s) for s, _ in self.shortcuts) +\n max(len(s) for _, s in self.shortcuts)) * 0.4 + 4,\n len(self.shortcuts) + 3) * self._scale\n help_rect = point.Rect(window_size_px / 2 - help_size / 2,\n window_size_px / 2 + help_size / 2)\n add_surface(SurfType.CHROME, help_rect, None, None, self.draw_help)\n\n # Arbitrarily set the initial camera to the center of the map.\n self._update_camera(self._map_size / 2)\n\n def _update_camera(self, camera_center):\n \"\"\"Update the camera transform based on the new camera center.\"\"\"\n self._world_tl_to_world_camera_rel.offset = (\n -self._world_to_world_tl.fwd_pt(camera_center) *\n self._world_tl_to_world_camera_rel.scale)\n\n if self._feature_screen_px:\n camera_radius = (self._feature_screen_px / self._feature_screen_px.x *\n self._feature_camera_width_world_units / 2)\n center = camera_center.bound(camera_radius,\n self._map_size - camera_radius)\n self._camera = point.Rect(\n (center - camera_radius).bound(self._map_size),\n (center + camera_radius).bound(self._map_size))\n\n def zoom(self, factor):\n \"\"\"Zoom the window in/out.\"\"\"\n self._window_scale *= factor\n if time.time() - self._last_zoom_time < 1:\n # Avoid a deadlock in pygame if you zoom too quickly.\n time.sleep(time.time() - self._last_zoom_time)\n self.init_window()\n self._last_zoom_time = time.time()\n\n def get_mouse_pos(self, window_pos=None):\n \"\"\"Return a MousePos filled with the world position and surf it hit.\"\"\"\n window_pos = window_pos or pygame.mouse.get_pos()\n # +0.5 to center the point on the middle of the pixel.\n window_pt = point.Point(*window_pos) + 0.5\n for surf in reversed(self._surfaces):\n if (surf.surf_type != SurfType.CHROME and\n surf.surf_rect.contains_point(window_pt)):\n surf_rel_pt = window_pt - surf.surf_rect.tl\n world_pt = surf.world_to_surf.back_pt(surf_rel_pt)\n return MousePos(world_pt, surf)\n\n def clear_queued_action(self):\n self._queued_hotkey = \"\"\n self._queued_action = None\n\n def save_replay(self, run_config, controller):\n if controller.status in (remote_controller.Status.in_game,\n remote_controller.Status.ended):\n prefix, _ = os.path.splitext(\n os.path.basename(self._game_info.local_map_path))\n replay_path = run_config.save_replay(\n controller.save_replay(), \"local\", prefix)\n print(\"Wrote replay to:\", replay_path)\n\n @sw.decorate\n def get_actions(self, run_config, controller):\n \"\"\"Get actions from the UI, apply to controller, and return an ActionCmd.\"\"\"\n if not self._initialized:\n return ActionCmd.STEP\n\n for event in pygame.event.get():\n ctrl = pygame.key.get_mods() & pygame.KMOD_CTRL\n shift = pygame.key.get_mods() & pygame.KMOD_SHIFT\n alt = pygame.key.get_mods() & pygame.KMOD_ALT\n if event.type == pygame.QUIT:\n return ActionCmd.QUIT\n elif event.type == pygame.KEYDOWN:\n if self._help:\n self._help = False\n elif event.key in (pygame.K_QUESTION, pygame.K_SLASH):\n self._help = True\n elif event.key == pygame.K_PAUSE:\n pause = True\n while pause:\n time.sleep(0.1)\n for event2 in pygame.event.get():\n if event2.type == pygame.KEYDOWN:\n if event2.key in (pygame.K_PAUSE, pygame.K_ESCAPE):\n pause = False\n elif event2.key == pygame.K_F4:\n return ActionCmd.QUIT\n elif event2.key == pygame.K_F5:\n return ActionCmd.RESTART\n elif event.key == pygame.K_F4:\n return ActionCmd.QUIT\n elif event.key == pygame.K_F5:\n return ActionCmd.RESTART\n elif event.key == pygame.K_F9: # Toggle rgb rendering.\n if self._rgb_screen_px and self._feature_screen_px:\n self._render_rgb = not self._render_rgb\n print(\"Rendering\", self._render_rgb and \"RGB\" or \"Feature Layers\")\n self.init_window()\n elif event.key == pygame.K_F11: # Toggle synchronous rendering.\n self._render_sync = not self._render_sync\n print(\"Rendering\", self._render_sync and \"Sync\" or \"Async\")\n elif event.key == pygame.K_F12:\n self._raw_actions = not self._raw_actions\n print(\"Action space:\", self._raw_actions and \"Raw\" or \"Spatial\")\n elif event.key == pygame.K_F10: # Toggle player_relative layer.\n self._render_player_relative = not self._render_player_relative\n elif event.key == pygame.K_F8: # Save a replay.\n self.save_replay(run_config, controller)\n elif event.key in (pygame.K_PLUS, pygame.K_EQUALS) and ctrl:\n self.zoom(1.1) # zoom in\n elif event.key in (pygame.K_MINUS, pygame.K_UNDERSCORE) and ctrl:\n self.zoom(1 / 1.1) # zoom out\n elif event.key in (pygame.K_PAGEUP, pygame.K_PAGEDOWN):\n if ctrl:\n if event.key == pygame.K_PAGEUP:\n self._step_mul += 1\n elif self._step_mul > 1:\n self._step_mul -= 1\n print(\"New step mul:\", self._step_mul)\n else:\n self._fps *= 1.25 if event.key == pygame.K_PAGEUP else 1 / 1.25\n print(\"New max game speed: %.1f\" % self._fps)\n elif event.key == pygame.K_F1:\n if self._obs.observation.player_common.idle_worker_count > 0:\n controller.act(self.select_idle_worker(ctrl, shift))\n elif event.key == pygame.K_F2:\n if self._obs.observation.player_common.army_count > 0:\n controller.act(self.select_army(shift))\n elif event.key == pygame.K_F3:\n if self._obs.observation.player_common.warp_gate_count > 0:\n controller.act(self.select_warp_gates(shift))\n if self._obs.observation.player_common.larva_count > 0:\n controller.act(self.select_larva())\n elif event.key in self.cmd_group_keys:\n controller.act(self.control_group(self.cmd_group_keys[event.key],\n ctrl, shift, alt))\n elif event.key in self.camera_actions:\n if self._obs:\n pt = point.Point.build(self._obs.observation.raw_data.player.camera)\n pt += self.camera_actions[event.key]\n controller.act(self.camera_action_raw(pt))\n controller.observer_act(self.camera_action_observer_pt(pt))\n elif event.key == pygame.K_ESCAPE:\n controller.observer_act(self.camera_action_observer_player(\n self._obs.observation.player_common.player_id))\n if self._queued_action:\n self.clear_queued_action()\n else:\n cmds = self._abilities(lambda cmd: cmd.hotkey == \"escape\") # Cancel\n for cmd in cmds: # There could be multiple cancels.\n assert not cmd.requires_point\n controller.act(self.unit_action(cmd, None, shift))\n else:\n if not self._queued_action:\n key = pygame.key.name(event.key).lower()\n new_cmd = self._queued_hotkey + key\n cmds = self._abilities(lambda cmd, n=new_cmd: ( # pylint: disable=g-long-lambda\n cmd.hotkey != \"escape\" and cmd.hotkey.startswith(n)))\n if cmds:\n self._queued_hotkey = new_cmd\n if len(cmds) == 1:\n cmd = cmds[0]\n if cmd.hotkey == self._queued_hotkey:\n if cmd.requires_point:\n self.clear_queued_action()\n self._queued_action = cmd\n else:\n controller.act(self.unit_action(cmd, None, shift))\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = self.get_mouse_pos(event.pos)\n if event.button == MouseButtons.LEFT and mouse_pos:\n if self._queued_action:\n controller.act(self.unit_action(\n self._queued_action, mouse_pos, shift))\n elif mouse_pos.surf.surf_type & SurfType.MINIMAP:\n controller.act(self.camera_action(mouse_pos))\n controller.observer_act(self.camera_action_observer_pt(\n mouse_pos.world_pos))\n else:\n self._select_start = mouse_pos\n elif event.button == MouseButtons.RIGHT:\n if self._queued_action:\n self.clear_queued_action()\n cmds = self._abilities(lambda cmd: cmd.name == \"Smart\")\n if cmds:\n controller.act(self.unit_action(cmds[0], mouse_pos, shift))\n elif event.type == pygame.MOUSEBUTTONUP:\n mouse_pos = self.get_mouse_pos(event.pos)\n if event.button == MouseButtons.LEFT and self._select_start:\n if (mouse_pos and mouse_pos.surf.surf_type & SurfType.SCREEN and\n mouse_pos.surf.surf_type == self._select_start.surf.surf_type):\n controller.act(self.select_action(\n self._select_start, mouse_pos, ctrl, shift))\n self._select_start = None\n return ActionCmd.STEP\n\n def camera_action(self, mouse_pos):\n \"\"\"Return a `sc_pb.Action` with the camera movement filled.\"\"\"\n action = sc_pb.Action()\n action_spatial = mouse_pos.action_spatial(action)\n mouse_pos.obs_pos.assign_to(action_spatial.camera_move.center_minimap)\n return action\n\n def camera_action_raw(self, world_pos):\n \"\"\"Return a `sc_pb.Action` with the camera movement filled.\"\"\"\n action = sc_pb.Action()\n world_pos.assign_to(action.action_raw.camera_move.center_world_space)\n return action\n\n def camera_action_observer_pt(self, world_pos):\n \"\"\"Return a `sc_pb.ObserverAction` with the camera movement filled.\"\"\"\n action = sc_pb.ObserverAction()\n world_pos.assign_to(action.camera_move.world_pos)\n return action\n\n def camera_action_observer_player(self, player_id):\n \"\"\"Return a `sc_pb.ObserverAction` with the camera movement filled.\"\"\"\n action = sc_pb.ObserverAction()\n action.camera_follow_player.player_id = player_id\n return action\n\n def select_action(self, pos1, pos2, ctrl, shift):\n \"\"\"Return a `sc_pb.Action` with the selection filled.\"\"\"\n assert pos1.surf.surf_type == pos2.surf.surf_type\n assert pos1.surf.world_to_obs == pos2.surf.world_to_obs\n\n action = sc_pb.Action()\n if self._raw_actions:\n unit_command = action.action_raw.unit_command\n unit_command.ability_id = 0 # no-op\n player_id = self._obs.observation.player_common.player_id\n if pos1.world_pos == pos2.world_pos: # select a point\n for u, p in reversed(list(self._visible_units())):\n if (pos1.world_pos.contained_circle(p, u.radius) and\n u.owner == player_id):\n unit_command.unit_tags.append(u.tag)\n break\n else:\n rect = point.Rect(pos1.world_pos, pos2.world_pos)\n unit_command.unit_tags.extend([\n u.tag for u, p in self._visible_units()\n if u.owner == player_id and rect.intersects_circle(p, u.radius)])\n else:\n action_spatial = pos1.action_spatial(action)\n if pos1.world_pos == pos2.world_pos: # select a point\n select = action_spatial.unit_selection_point\n pos1.obs_pos.assign_to(select.selection_screen_coord)\n mod = sc_spatial.ActionSpatialUnitSelectionPoint\n if ctrl:\n select.type = mod.AddAllType if shift else mod.AllType\n else:\n select.type = mod.Toggle if shift else mod.Select\n else:\n select = action_spatial.unit_selection_rect\n rect = select.selection_screen_coord.add()\n pos1.obs_pos.assign_to(rect.p0)\n pos2.obs_pos.assign_to(rect.p1)\n select.selection_add = shift\n\n # Clear the queued action if something will be selected. An alternative\n # implementation may check whether the selection changed next frame.\n units = self._units_in_area(point.Rect(pos1.world_pos, pos2.world_pos))\n if units:\n self.clear_queued_action()\n\n return action\n\n def select_idle_worker(self, ctrl, shift):\n \"\"\"Select an idle worker.\"\"\"\n action = sc_pb.Action()\n mod = sc_ui.ActionSelectIdleWorker\n if ctrl:\n select_worker = mod.AddAll if shift else mod.All\n else:\n select_worker = mod.Add if shift else mod.Set\n action.action_ui.select_idle_worker.type = select_worker\n return action\n\n def select_army(self, shift):\n \"\"\"Select the entire army.\"\"\"\n action = sc_pb.Action()\n action.action_ui.select_army.selection_add = shift\n return action\n\n def select_warp_gates(self, shift):\n \"\"\"Select all warp gates.\"\"\"\n action = sc_pb.Action()\n action.action_ui.select_warp_gates.selection_add = shift\n return action\n\n def select_larva(self):\n \"\"\"Select all larva.\"\"\"\n action = sc_pb.Action()\n action.action_ui.select_larva.SetInParent() # Adds the empty proto field.\n return action\n\n def control_group(self, control_group_id, ctrl, shift, alt):\n \"\"\"Act on a control group, selecting, setting, etc.\"\"\"\n action = sc_pb.Action()\n select = action.action_ui.control_group\n\n mod = sc_ui.ActionControlGroup\n if not ctrl and not shift and not alt:\n select.action = mod.Recall\n elif ctrl and not shift and not alt:\n select.action = mod.Set\n elif not ctrl and shift and not alt:\n select.action = mod.Append\n elif not ctrl and not shift and alt:\n select.action = mod.SetAndSteal\n elif not ctrl and shift and alt:\n select.action = mod.AppendAndSteal\n else:\n return # unknown\n select.control_group_index = control_group_id\n return action\n\n def unit_action(self, cmd, pos, shift):\n \"\"\"Return a `sc_pb.Action` filled with the cmd and appropriate target.\"\"\"\n action = sc_pb.Action()\n if self._raw_actions:\n unit_command = action.action_raw.unit_command\n unit_command.ability_id = cmd.ability_id\n unit_command.queue_command = shift\n player_id = self._obs.observation.player_common.player_id\n unit_command.unit_tags.extend([u.tag for u, _ in self._visible_units()\n if u.is_selected and u.owner == player_id])\n if pos:\n for u, p in reversed(list(self._visible_units())):\n if pos.world_pos.contained_circle(p, u.radius):\n unit_command.target_unit_tag = u.tag\n break\n else:\n pos.world_pos.assign_to(unit_command.target_world_space_pos)\n else:\n if pos:\n action_spatial = pos.action_spatial(action)\n unit_command = action_spatial.unit_command\n unit_command.ability_id = cmd.ability_id\n unit_command.queue_command = shift\n if pos.surf.surf_type & SurfType.SCREEN:\n pos.obs_pos.assign_to(unit_command.target_screen_coord)\n elif pos.surf.surf_type & SurfType.MINIMAP:\n pos.obs_pos.assign_to(unit_command.target_minimap_coord)\n else:\n if self._feature_screen_px:\n action.action_feature_layer.unit_command.ability_id = cmd.ability_id\n else:\n action.action_render.unit_command.ability_id = cmd.ability_id\n\n self.clear_queued_action()\n return action\n\n def _abilities(self, fn=None):\n \"\"\"Return the list of abilities filtered by `fn`.\"\"\"\n out = {}\n for cmd in self._obs.observation.abilities:\n ability = _Ability(cmd, self._static_data.abilities)\n if not fn or fn(ability):\n out[ability.ability_id] = ability\n return list(out.values())\n\n def _visible_units(self):\n \"\"\"A generator of visible units and their positions as `Point`s, sorted.\"\"\"\n # Sort the units by elevation, then owned (eg refinery) above world (ie 16)\n # (eg geiser), small above big, and otherwise arbitrary but stable.\n for u in sorted(self._obs.observation.raw_data.units,\n key=lambda u: (u.pos.z, u.owner != 16, -u.radius, u.tag)):\n yield u, point.Point.build(u.pos)\n\n def _units_in_area(self, rect):\n \"\"\"Return the list of units that intersect the rect.\"\"\"\n player_id = self._obs.observation.player_common.player_id\n return [u for u, p in self._visible_units()\n if rect.intersects_circle(p, u.radius) and u.owner == player_id]\n\n def get_unit_name(self, surf, name, radius):\n \"\"\"Get a length limited unit name for drawing units.\"\"\"\n key = (name, radius)\n if key not in self._name_lengths:\n max_len = surf.world_to_surf.fwd_dist(radius * 1.6)\n for i in range(len(name)):\n if self._font_small.size(name[:i + 1])[0] > max_len:\n self._name_lengths[key] = name[:i]\n break\n else:\n self._name_lengths[key] = name\n return self._name_lengths[key]\n\n @sw.decorate\n def draw_units(self, surf):\n \"\"\"Draw the units and buildings.\"\"\"\n unit_dict = None # Cache the units {tag: unit_proto} for orders.\n tau = 2 * math.pi\n for u, p in self._visible_units():\n if self._camera.intersects_circle(p, u.radius):\n fraction_damage = clamp((u.health_max - u.health) / (u.health_max or 1),\n 0, 1)\n if u.display_type == sc_raw.Placeholder:\n surf.draw_circle(colors.PLAYER_ABSOLUTE_PALETTE[u.owner] // 3, p,\n u.radius)\n else:\n surf.draw_circle(colors.PLAYER_ABSOLUTE_PALETTE[u.owner], p, u.radius)\n\n if fraction_damage > 0:\n surf.draw_circle(colors.PLAYER_ABSOLUTE_PALETTE[u.owner] // 2,\n p, u.radius * fraction_damage)\n surf.draw_circle(colors.black, p, u.radius, thickness=1)\n\n if self._static_data.unit_stats[u.unit_type].movement_speed > 0:\n surf.draw_arc(colors.white, p, u.radius, u.facing - 0.1,\n u.facing + 0.1, thickness=1)\n\n def draw_arc_ratio(color, world_loc, radius, start, end, thickness=1):\n surf.draw_arc(color, world_loc, radius, start * tau, end * tau,\n thickness)\n\n if u.shield and u.shield_max:\n draw_arc_ratio(colors.blue, p, u.radius - 0.05, 0,\n u.shield / u.shield_max)\n if u.energy and u.energy_max:\n draw_arc_ratio(colors.purple * 0.9, p, u.radius - 0.1, 0,\n u.energy / u.energy_max)\n if 0 < u.build_progress < 1:\n draw_arc_ratio(colors.cyan, p, u.radius - 0.15, 0, u.build_progress)\n elif u.orders and 0 < u.orders[0].progress < 1:\n draw_arc_ratio(colors.cyan, p, u.radius - 0.15, 0,\n u.orders[0].progress)\n\n if u.buff_duration_remain and u.buff_duration_max:\n draw_arc_ratio(colors.white, p, u.radius - 0.2, 0,\n u.buff_duration_remain / u.buff_duration_max)\n\n if u.attack_upgrade_level:\n draw_arc_ratio(self.upgrade_colors[u.attack_upgrade_level], p,\n u.radius - 0.25, 0.18, 0.22, thickness=3)\n\n if u.armor_upgrade_level:\n draw_arc_ratio(self.upgrade_colors[u.armor_upgrade_level], p,\n u.radius - 0.25, 0.23, 0.27, thickness=3)\n\n if u.shield_upgrade_level:\n draw_arc_ratio(self.upgrade_colors[u.shield_upgrade_level], p,\n u.radius - 0.25, 0.28, 0.32, thickness=3)\n\n def write_small(loc, s):\n surf.write_world(self._font_small, colors.white, loc, str(s))\n\n name = self.get_unit_name(\n surf, self._static_data.units.get(u.unit_type, \"<none>\"), u.radius)\n if name:\n write_small(p, name)\n if u.ideal_harvesters > 0:\n write_small(p + point.Point(0, 0.5),\n \"%s / %s\" % (u.assigned_harvesters, u.ideal_harvesters))\n if u.mineral_contents > 0:\n write_small(p - point.Point(0, 0.5), u.mineral_contents)\n elif u.vespene_contents > 0:\n write_small(p - point.Point(0, 0.5), u.vespene_contents)\n elif u.display_type == sc_raw.Snapshot:\n write_small(p - point.Point(0, 0.5), \"snapshot\")\n elif u.display_type == sc_raw.Placeholder:\n write_small(p - point.Point(0, 0.5), \"placeholder\")\n elif u.is_hallucination:\n write_small(p - point.Point(0, 0.5), \"hallucination\")\n elif u.is_burrowed:\n write_small(p - point.Point(0, 0.5), \"burrowed\")\n elif u.cloak != sc_raw.NotCloaked:\n write_small(p - point.Point(0, 0.5), \"cloaked\")\n\n if u.is_selected:\n surf.draw_circle(colors.green, p, u.radius + 0.1, 1)\n\n # Draw the orders of selected units.\n start_point = p\n for o in u.orders:\n target_point = None\n if o.HasField(\"target_world_space_pos\"):\n target_point = point.Point.build(o.target_world_space_pos)\n elif o.HasField(\"target_unit_tag\"):\n if unit_dict is None:\n unit_dict = {t.tag: t\n for t in self._obs.observation.raw_data.units}\n target_unit = unit_dict.get(o.target_unit_tag)\n if target_unit:\n target_point = point.Point.build(target_unit.pos)\n if target_point:\n surf.draw_line(colors.cyan * 0.75, start_point, target_point)\n start_point = target_point\n else:\n break\n for rally in u.rally_targets:\n surf.draw_line(colors.cyan * 0.75, p,\n point.Point.build(rally.point))\n\n @sw.decorate\n def draw_effects(self, surf):\n \"\"\"Draw the effects.\"\"\"\n for effect in self._obs.observation.raw_data.effects:\n color = [\n colors.effects[effect.effect_id],\n colors.effects[effect.effect_id],\n colors.PLAYER_ABSOLUTE_PALETTE[effect.owner],\n ]\n name = self.get_unit_name(\n surf, features.Effects(effect.effect_id).name, effect.radius)\n for pos in effect.pos:\n p = point.Point.build(pos)\n # pygame alpha transparency doesn't work, so just draw thin circles.\n for r in range(1, int(effect.radius * 3)):\n surf.draw_circle(color[r % 3], p, r / 3, thickness=2)\n if name:\n surf.write_world(self._font_small, colors.white, p, name)\n\n @sw.decorate\n def draw_selection(self, surf):\n \"\"\"Draw the selection rectange.\"\"\"\n select_start = self._select_start # Cache to avoid a race condition.\n if select_start:\n mouse_pos = self.get_mouse_pos()\n if (mouse_pos and mouse_pos.surf.surf_type & SurfType.SCREEN and\n mouse_pos.surf.surf_type == select_start.surf.surf_type):\n rect = point.Rect(select_start.world_pos, mouse_pos.world_pos)\n surf.draw_rect(colors.green, rect, 1)\n\n @sw.decorate\n def draw_build_target(self, surf):\n \"\"\"Draw the build target.\"\"\"\n round_half = lambda v, cond: round(v - 0.5) + 0.5 if cond else round(v)\n\n queued_action = self._queued_action\n if queued_action:\n radius = queued_action.footprint_radius\n if radius:\n pos = self.get_mouse_pos()\n if pos:\n pos = point.Point(round_half(pos.world_pos.x, (radius * 2) % 2),\n round_half(pos.world_pos.y, (radius * 2) % 2))\n surf.draw_circle(\n colors.PLAYER_ABSOLUTE_PALETTE[\n self._obs.observation.player_common.player_id],\n pos, radius)\n\n @sw.decorate\n def draw_overlay(self, surf):\n \"\"\"Draw the overlay describing resources.\"\"\"\n obs = self._obs.observation\n player = obs.player_common\n surf.write_screen(\n self._font_large, colors.green, (0.2, 0.2),\n \"Minerals: %s, Vespene: %s, Food: %s / %s\" % (\n player.minerals, player.vespene, player.food_used, player.food_cap))\n times, steps = zip(*self._game_times)\n sec = obs.game_loop // 22.4 # http://liquipedia.net/starcraft2/Game_Speed\n surf.write_screen(\n self._font_large, colors.green, (-0.2, 0.2),\n \"Score: %s, Step: %s, %.1f/s, Time: %d:%02d\" % (\n obs.score.score, obs.game_loop, sum(steps) / (sum(times) or 1),\n sec // 60, sec % 60),\n align=\"right\")\n surf.write_screen(\n self._font_large, colors.green * 0.8, (-0.2, 1.2),\n \"APM: %d, EPM: %d, FPS: O:%.1f, R:%.1f\" % (\n obs.score.score_details.current_apm,\n obs.score.score_details.current_effective_apm,\n len(times) / (sum(times) or 1),\n len(self._render_times) / (sum(self._render_times) or 1)),\n align=\"right\")\n line = 3\n for alert, ts in sorted(self._alerts.items(), key=lambda item: item[1]):\n if time.time() < ts + 3: # Show for 3 seconds.\n surf.write_screen(self._font_large, colors.red, (20, line), alert)\n line += 1\n else:\n del self._alerts[alert]\n\n @sw.decorate\n def draw_help(self, surf):\n \"\"\"Draw the help dialog.\"\"\"\n if not self._help:\n return\n\n def write(loc, text):\n surf.write_screen(self._font_large, colors.black, loc, text)\n\n surf.surf.fill(colors.white * 0.8)\n write((1, 1), \"Shortcuts:\")\n\n max_len = max(len(s) for s, _ in self.shortcuts)\n for i, (hotkey, description) in enumerate(self.shortcuts, start=2):\n write((2, i), hotkey)\n write((3 + max_len * 0.7, i), description)\n\n @sw.decorate\n def draw_commands(self, surf):\n \"\"\"Draw the list of upgrades and available commands.\"\"\"\n line = itertools.count(2)\n\n def write(loc, text, color=colors.yellow):\n surf.write_screen(self._font_large, color, loc, text)\n def write_line(x, *args, **kwargs):\n write((x, next(line)), *args, **kwargs)\n\n action_count = len(self._obs.observation.abilities)\n if action_count > 0:\n write_line(0.2, \"Available Actions:\", colors.green)\n past_abilities = {act.ability\n for act in self._past_actions if act.ability}\n for cmd in sorted(self._abilities(lambda c: c.name != \"Smart\"),\n key=lambda c: c.name):\n if self._queued_action and cmd == self._queued_action:\n color = colors.green\n elif self._queued_hotkey and cmd.hotkey.startswith(self._queued_hotkey):\n color = colors.green * 0.75\n elif cmd.ability_id in past_abilities:\n color = colors.red\n else:\n color = colors.yellow\n hotkey = cmd.hotkey[0:3] # truncate \"escape\" -> \"esc\"\n y = next(line)\n write((1, y), hotkey, color)\n write((4, y), cmd.name, color)\n next(line)\n\n upgrade_count = len(self._obs.observation.raw_data.player.upgrade_ids)\n if upgrade_count > 0:\n write_line(0.2, \"Upgrades: %s\" % upgrade_count, colors.green)\n upgrades = [\n self._static_data.upgrades[upgrade_id].name\n for upgrade_id in self._obs.observation.raw_data.player.upgrade_ids]\n for name in sorted(upgrades):\n write_line(1, name)\n\n @sw.decorate\n def draw_panel(self, surf):\n \"\"\"Draw the unit selection or build queue.\"\"\"\n\n left = -14 # How far from the right border\n line = itertools.count(3)\n\n def unit_name(unit_type):\n return self._static_data.units.get(unit_type, \"<unknown>\")\n\n def write(loc, text, color=colors.yellow):\n surf.write_screen(self._font_large, color, loc, text)\n def write_line(x, *args, **kwargs):\n write((left + x, next(line)), *args, **kwargs)\n\n def write_single(unit):\n \"\"\"Write a description of a single selected unit.\"\"\"\n write_line(1, unit_name(unit.unit_type), colors.cyan)\n write_line(1, \"Health: %s / %s\" % (unit.health, unit.max_health))\n if unit.max_shields:\n write_line(1, \"Shields: %s / %s\" % (unit.shields, unit.max_shields))\n if unit.max_energy:\n write_line(1, \"Energy: %s / %s\" % (unit.energy, unit.max_energy))\n if unit.build_progress > 0:\n write_line(1, \"Progress: %d%%\" % (unit.build_progress * 100))\n if unit.transport_slots_taken > 0:\n write_line(1, \"Slots: %s\" % unit.transport_slots_taken)\n\n def write_multi(units):\n \"\"\"Write a description of multiple selected units.\"\"\"\n counts = collections.defaultdict(int)\n for unit in units:\n counts[unit_name(unit.unit_type)] += 1\n for name, count in sorted(counts.items()):\n y = next(line)\n write((left + 1, y), count)\n write((left + 3, y), name)\n\n ui = self._obs.observation.ui_data\n\n if ui.groups:\n write_line(0, \"Control Groups:\", colors.green)\n for group in ui.groups:\n y = next(line)\n write((left + 1, y), \"%s:\" % group.control_group_index, colors.green)\n write((left + 3, y), \"%s %s\" % (group.count,\n unit_name(group.leader_unit_type)))\n next(line)\n\n if ui.HasField(\"single\"):\n write_line(0, \"Selection:\", colors.green)\n write_single(ui.single.unit)\n if (ui.single.attack_upgrade_level or\n ui.single.armor_upgrade_level or\n ui.single.shield_upgrade_level):\n write_line(1, \"Upgrades:\")\n if ui.single.attack_upgrade_level:\n write_line(2, \"Attack: %s\" % ui.single.attack_upgrade_level)\n if ui.single.armor_upgrade_level:\n write_line(2, \"Armor: %s\" % ui.single.armor_upgrade_level)\n if ui.single.shield_upgrade_level:\n write_line(2, \"Shield: %s\" % ui.single.shield_upgrade_level)\n if ui.single.buffs:\n write_line(1, \"Buffs:\")\n for b in ui.single.buffs:\n write_line(2, buffs.Buffs(b).name)\n elif ui.HasField(\"multi\"):\n write_line(0, \"Selection:\", colors.green)\n write_multi(ui.multi.units)\n elif ui.HasField(\"cargo\"):\n write_line(0, \"Selection:\", colors.green)\n write_single(ui.cargo.unit)\n next(line)\n write_line(0, \"Cargo:\", colors.green)\n write_line(1, \"Empty slots: %s\" % ui.cargo.slots_available)\n write_multi(ui.cargo.passengers)\n elif ui.HasField(\"production\"):\n write_line(0, \"Selection:\", colors.green)\n write_single(ui.production.unit)\n next(line)\n if ui.production.production_queue:\n write_line(0, \"Production:\", colors.green)\n for item in ui.production.production_queue:\n specific_data = self._static_data.abilities[item.ability_id]\n if specific_data.remaps_to_ability_id:\n general_data = self._static_data.abilities[\n specific_data.remaps_to_ability_id]\n else:\n general_data = specific_data\n s = (general_data.friendly_name or general_data.button_name or\n general_data.link_name)\n s = s.replace(\"Research \", \"\").replace(\"Train \", \"\")\n if item.build_progress > 0:\n s += \": %d%%\" % (item.build_progress * 100)\n write_line(1, s)\n elif ui.production.build_queue: # Handle old binaries, no research.\n write_line(0, \"Build Queue:\", colors.green)\n for unit in ui.production.build_queue:\n s = unit_name(unit.unit_type)\n if unit.build_progress > 0:\n s += \": %d%%\" % (unit.build_progress * 100)\n write_line(1, s)\n\n @sw.decorate\n def draw_actions(self):\n \"\"\"Draw the actions so that they can be inspected for accuracy.\"\"\"\n now = time.time()\n for act in self._past_actions:\n if act.pos and now < act.deadline:\n remain = (act.deadline - now) / (act.deadline - act.time)\n if isinstance(act.pos, point.Point):\n size = remain / 3\n self.all_surfs(_Surface.draw_circle, act.color, act.pos, size, 1)\n else:\n # Fade with alpha would be nice, but doesn't seem to work.\n self.all_surfs(_Surface.draw_rect, act.color, act.pos, 1)\n\n @sw.decorate\n def prepare_actions(self, obs):\n \"\"\"Keep a list of the past actions so they can be drawn.\"\"\"\n now = time.time()\n while self._past_actions and self._past_actions[0].deadline < now:\n self._past_actions.pop(0)\n\n def add_act(ability_id, color, pos, timeout=1):\n if ability_id:\n ability = self._static_data.abilities[ability_id]\n if ability.remaps_to_ability_id: # Prefer general abilities.\n ability_id = ability.remaps_to_ability_id\n self._past_actions.append(\n PastAction(ability_id, color, pos, now, now + timeout))\n\n for act in obs.actions:\n if (act.HasField(\"action_raw\") and\n act.action_raw.HasField(\"unit_command\") and\n act.action_raw.unit_command.HasField(\"target_world_space_pos\")):\n pos = point.Point.build(\n act.action_raw.unit_command.target_world_space_pos)\n add_act(act.action_raw.unit_command.ability_id, colors.yellow, pos)\n if act.HasField(\"action_feature_layer\"):\n act_fl = act.action_feature_layer\n if act_fl.HasField(\"unit_command\"):\n if act_fl.unit_command.HasField(\"target_screen_coord\"):\n pos = self._world_to_feature_screen_px.back_pt(\n point.Point.build(act_fl.unit_command.target_screen_coord))\n add_act(act_fl.unit_command.ability_id, colors.cyan, pos)\n elif act_fl.unit_command.HasField(\"target_minimap_coord\"):\n pos = self._world_to_feature_minimap_px.back_pt(\n point.Point.build(act_fl.unit_command.target_minimap_coord))\n add_act(act_fl.unit_command.ability_id, colors.cyan, pos)\n else:\n add_act(act_fl.unit_command.ability_id, None, None)\n if (act_fl.HasField(\"unit_selection_point\") and\n act_fl.unit_selection_point.HasField(\"selection_screen_coord\")):\n pos = self._world_to_feature_screen_px.back_pt(point.Point.build(\n act_fl.unit_selection_point.selection_screen_coord))\n add_act(None, colors.cyan, pos)\n if act_fl.HasField(\"unit_selection_rect\"):\n for r in act_fl.unit_selection_rect.selection_screen_coord:\n rect = point.Rect(\n self._world_to_feature_screen_px.back_pt(\n point.Point.build(r.p0)),\n self._world_to_feature_screen_px.back_pt(\n point.Point.build(r.p1)))\n add_act(None, colors.cyan, rect, 0.3)\n if act.HasField(\"action_render\"):\n act_rgb = act.action_render\n if act_rgb.HasField(\"unit_command\"):\n if act_rgb.unit_command.HasField(\"target_screen_coord\"):\n pos = self._world_to_rgb_screen_px.back_pt(\n point.Point.build(act_rgb.unit_command.target_screen_coord))\n add_act(act_rgb.unit_command.ability_id, colors.red, pos)\n elif act_rgb.unit_command.HasField(\"target_minimap_coord\"):\n pos = self._world_to_rgb_minimap_px.back_pt(\n point.Point.build(act_rgb.unit_command.target_minimap_coord))\n add_act(act_rgb.unit_command.ability_id, colors.red, pos)\n else:\n add_act(act_rgb.unit_command.ability_id, None, None)\n if (act_rgb.HasField(\"unit_selection_point\") and\n act_rgb.unit_selection_point.HasField(\"selection_screen_coord\")):\n pos = self._world_to_rgb_screen_px.back_pt(point.Point.build(\n act_rgb.unit_selection_point.selection_screen_coord))\n add_act(None, colors.red, pos)\n if act_rgb.HasField(\"unit_selection_rect\"):\n for r in act_rgb.unit_selection_rect.selection_screen_coord:\n rect = point.Rect(\n self._world_to_rgb_screen_px.back_pt(\n point.Point.build(r.p0)),\n self._world_to_rgb_screen_px.back_pt(\n point.Point.build(r.p1)))\n add_act(None, colors.red, rect, 0.3)\n\n @sw.decorate\n def draw_base_map(self, surf):\n \"\"\"Draw the base map.\"\"\"\n hmap_feature = features.SCREEN_FEATURES.height_map\n hmap = hmap_feature.unpack(self._obs.observation)\n if not hmap.any():\n hmap = hmap + 100 # pylint: disable=g-no-augmented-assignment\n hmap_color = hmap_feature.color(hmap)\n out = hmap_color * 0.6\n\n creep_feature = features.SCREEN_FEATURES.creep\n creep = creep_feature.unpack(self._obs.observation)\n creep_mask = creep > 0\n creep_color = creep_feature.color(creep)\n out[creep_mask, :] = (0.4 * out[creep_mask, :] +\n 0.6 * creep_color[creep_mask, :])\n\n power_feature = features.SCREEN_FEATURES.power\n power = power_feature.unpack(self._obs.observation)\n power_mask = power > 0\n power_color = power_feature.color(power)\n out[power_mask, :] = (0.7 * out[power_mask, :] +\n 0.3 * power_color[power_mask, :])\n\n if self._render_player_relative:\n player_rel_feature = features.SCREEN_FEATURES.player_relative\n player_rel = player_rel_feature.unpack(self._obs.observation)\n player_rel_mask = player_rel > 0\n player_rel_color = player_rel_feature.color(player_rel)\n out[player_rel_mask, :] = player_rel_color[player_rel_mask, :]\n\n visibility = features.SCREEN_FEATURES.visibility_map.unpack(\n self._obs.observation)\n visibility_fade = np.array([[0.5] * 3, [0.75]*3, [1]*3])\n out *= visibility_fade[visibility]\n\n surf.blit_np_array(out)\n\n @sw.decorate\n def draw_mini_map(self, surf):\n \"\"\"Draw the minimap.\"\"\"\n if (self._render_rgb and self._obs.observation.HasField(\"render_data\") and\n self._obs.observation.render_data.HasField(\"minimap\")):\n # Draw the rendered version.\n surf.blit_np_array(features.Feature.unpack_rgb_image(\n self._obs.observation.render_data.minimap))\n else: # Render it manually from feature layer data.\n hmap_feature = features.MINIMAP_FEATURES.height_map\n hmap = hmap_feature.unpack(self._obs.observation)\n if not hmap.any():\n hmap = hmap + 100 # pylint: disable=g-no-augmented-assignment\n hmap_color = hmap_feature.color(hmap)\n\n creep_feature = features.MINIMAP_FEATURES.creep\n creep = creep_feature.unpack(self._obs.observation)\n creep_mask = creep > 0\n creep_color = creep_feature.color(creep)\n\n if self._obs.observation.player_common.player_id in (0, 16): # observer\n # If we're the observer, show the absolute since otherwise all player\n # units are friendly, making it pretty boring.\n player_feature = features.MINIMAP_FEATURES.player_id\n else:\n player_feature = features.MINIMAP_FEATURES.player_relative\n player_data = player_feature.unpack(self._obs.observation)\n player_mask = player_data > 0\n player_color = player_feature.color(player_data)\n\n visibility = features.MINIMAP_FEATURES.visibility_map.unpack(\n self._obs.observation)\n visibility_fade = np.array([[0.5] * 3, [0.75]*3, [1]*3])\n\n # Compose and color the different layers.\n out = hmap_color * 0.6\n out[creep_mask, :] = (0.4 * out[creep_mask, :] +\n 0.6 * creep_color[creep_mask, :])\n out[player_mask, :] = player_color[player_mask, :]\n out *= visibility_fade[visibility]\n\n # Render the bit of the composited layers that actually correspond to the\n # map. This isn't all of it on non-square maps.\n shape = self._playable.diagonal.scale_max_size(\n self._feature_minimap_px).floor()\n surf.blit_np_array(out[:shape.y, :shape.x, :])\n\n surf.draw_rect(colors.white * 0.8, self._camera, 1) # Camera\n\n # Sensor rings.\n for radar in self._obs.observation.raw_data.radar:\n surf.draw_circle(colors.white / 2, point.Point.build(radar.pos),\n radar.radius, 1)\n\n if self._obs.observation.game_loop < 22.4 * 20:\n for loc in self._game_info.start_raw.start_locations:\n surf.draw_circle(colors.red, point.Point.build(loc), 5, 1)\n\n pygame.draw.rect(surf.surf, colors.red, surf.surf.get_rect(), 1) # Border\n\n def check_valid_queued_action(self):\n # Make sure the existing command is still valid\n if (self._queued_hotkey and not self._abilities(\n lambda cmd: cmd.hotkey.startswith(self._queued_hotkey))):\n self._queued_hotkey = \"\"\n if (self._queued_action and not self._abilities(\n lambda cmd: self._queued_action == cmd)):\n self._queued_action = None\n\n @sw.decorate\n def draw_rendered_map(self, surf):\n \"\"\"Draw the rendered pixels.\"\"\"\n surf.blit_np_array(features.Feature.unpack_rgb_image(\n self._obs.observation.render_data.map))\n\n def draw_screen(self, surf):\n \"\"\"Draw the screen area.\"\"\"\n # surf.fill(colors.black)\n if (self._render_rgb and self._obs.observation.HasField(\"render_data\") and\n self._obs.observation.render_data.HasField(\"map\")):\n self.draw_rendered_map(surf)\n else:\n self.draw_base_map(surf)\n self.draw_effects(surf)\n self.draw_units(surf)\n self.draw_selection(surf)\n self.draw_build_target(surf)\n self.draw_overlay(surf)\n self.draw_commands(surf)\n self.draw_panel(surf)\n\n @sw.decorate\n def draw_feature_layer(self, surf, feature):\n \"\"\"Draw a feature layer.\"\"\"\n layer = feature.unpack(self._obs.observation)\n if layer is not None:\n surf.blit_np_array(feature.color(layer))\n else: # Ignore layers that aren't in this version of SC2.\n surf.surf.fill(colors.black)\n\n @sw.decorate\n def draw_raw_layer(self, surf, from_obs, name, color):\n \"\"\"Draw a raw layer.\"\"\"\n if from_obs:\n layer = getattr(self._obs.observation.raw_data.map_state, name)\n else:\n layer = getattr(self._game_info.start_raw, name)\n layer = features.Feature.unpack_layer(layer)\n if layer is not None:\n surf.blit_np_array(color[layer])\n else: # Ignore layers that aren't in this version of SC2.\n surf.surf.fill(colors.black)\n\n def all_surfs(self, fn, *args, **kwargs):\n for surf in self._surfaces:\n if surf.world_to_surf:\n fn(surf, *args, **kwargs)\n\n @sw.decorate\n def render(self, obs):\n \"\"\"Push an observation onto the queue to be rendered.\"\"\"\n if not self._initialized:\n return\n now = time.time()\n self._game_times.append(\n (now - self._last_time,\n max(1, obs.observation.game_loop - self._obs.observation.game_loop)))\n self._last_time = now\n self._last_game_loop = self._obs.observation.game_loop\n self._obs_queue.put(obs)\n if self._render_sync:\n self._obs_queue.join()\n\n def render_thread(self):\n \"\"\"A render loop that pulls observations off the queue to render.\"\"\"\n obs = True\n while obs: # Send something falsy through the queue to shut down.\n obs = self._obs_queue.get()\n if obs:\n for alert in obs.observation.alerts:\n self._alerts[sc_pb.Alert.Name(alert)] = time.time()\n for err in obs.action_errors:\n if err.result != sc_err.Success:\n self._alerts[sc_err.ActionResult.Name(err.result)] = time.time()\n self.prepare_actions(obs)\n if self._obs_queue.empty():\n # Only render the latest observation so we keep up with the game.\n self.render_obs(obs)\n if self._video_writer:\n self._video_writer.add(np.transpose(\n pygame.surfarray.pixels3d(self._window), axes=(1, 0, 2)))\n self._obs_queue.task_done()\n\n @with_lock(render_lock)\n @sw.decorate\n def render_obs(self, obs):\n \"\"\"Render a frame given an observation.\"\"\"\n start_time = time.time()\n self._obs = obs\n self.check_valid_queued_action()\n self._update_camera(point.Point.build(\n self._obs.observation.raw_data.player.camera))\n\n for surf in self._surfaces:\n # Render that surface.\n surf.draw(surf)\n\n mouse_pos = self.get_mouse_pos()\n if mouse_pos:\n # Draw a small mouse cursor\n self.all_surfs(_Surface.draw_circle, colors.green, mouse_pos.world_pos,\n 0.1)\n\n self.draw_actions()\n\n with sw(\"flip\"):\n pygame.display.flip()\n\n self._render_times.append(time.time() - start_time)\n\n def run(self, run_config, controller, max_game_steps=0, max_episodes=0,\n game_steps_per_episode=0, save_replay=False):\n \"\"\"Run loop that gets observations, renders them, and sends back actions.\"\"\"\n is_replay = (controller.status == remote_controller.Status.in_replay)\n total_game_steps = 0\n start_time = time.time()\n num_episodes = 0\n\n try:\n while True:\n self.init(controller.game_info(), controller.data())\n episode_steps = 0\n num_episodes += 1\n\n controller.step()\n\n while True:\n total_game_steps += self._step_mul\n episode_steps += self._step_mul\n frame_start_time = time.time()\n\n obs = controller.observe()\n self.render(obs)\n\n if obs.player_result:\n break\n\n cmd = self.get_actions(run_config, controller)\n if cmd == ActionCmd.STEP:\n pass\n elif cmd == ActionCmd.QUIT:\n if not is_replay and save_replay:\n self.save_replay(run_config, controller)\n return\n elif cmd == ActionCmd.RESTART:\n break\n else:\n raise Exception(\"Unexpected command: %s\" % cmd)\n\n controller.step(self._step_mul)\n\n if max_game_steps and total_game_steps >= max_game_steps:\n if not is_replay and save_replay:\n self.save_replay(run_config, controller)\n return\n\n if game_steps_per_episode and episode_steps >= game_steps_per_episode:\n break\n\n with sw(\"sleep\"):\n elapsed_time = time.time() - frame_start_time\n time.sleep(max(0, 1 / self._fps - elapsed_time))\n\n if is_replay:\n break\n\n if save_replay:\n self.save_replay(run_config, controller)\n\n if max_episodes and num_episodes >= max_episodes:\n break\n\n print(\"Restarting\")\n controller.restart()\n except KeyboardInterrupt:\n pass\n finally:\n self.close()\n elapsed_time = time.time() - start_time\n print(\"took %.3f seconds for %s steps: %.3f fps\" %\n (elapsed_time, total_game_steps, total_game_steps / elapsed_time))\n\n def __del__(self):\n self.close()\n"
] | [
[
"numpy.array",
"numpy.random.randint"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ccoulombe/thinc | [
"8d891b61ddef3ca00266ca0ec7c47e2d063a3a83"
] | [
"examples/wrap_pytorch.py"
] | [
"import plac\nimport numpy\n\nimport torch\nfrom torch import autograd\nfrom torch import nn\nimport torch.optim\nimport torch.cuda\nfrom thinc.neural.ops import CupyOps\n\nfrom thinc.extra.wrappers import PyTorchWrapper\nfrom thinc.v2v import Model\n\n\ndef main(length=1000, nO=32, nI=32):\n if CupyOps.xp != None:\n print(\"Use GPU\")\n Model.ops = CupyOps()\n Model.Ops = CupyOps\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n pt_model = nn.Linear(nI, nO)\n optimizer = torch.optim.Adam(pt_model.parameters())\n\n model = PyTorchWrapper(pt_model)\n\n X = Model.ops.xp.ones((length, nI), dtype='f')\n y = 1. / X\n for i in range(10):\n yh, get_dX = model.begin_update(X)\n dY = (yh - y) / len(y)\n dX = get_dX(dY)\n\n\nif __name__ == '__main__':\n plac.call(main)\n"
] | [
[
"torch.nn.Linear",
"torch.set_default_tensor_type"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdraw/AlphaPose | [
"bed8e0798f6deed4789b9ae2646f72b9fd138c5b"
] | [
"video_demo.py"
] | [
"import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\n\nimport torch.nn as nn\nimport torch.utils.data\nimport numpy as np\nfrom opt import opt\n\nfrom dataloader import VideoLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco\nfrom yolo.util import write_results, dynamic_write_results\nfrom SPPE.src.main_fast_inference import *\n\nimport ntpath\nimport os\nimport sys\nfrom tqdm import tqdm\nimport time\nfrom fn import getTime\nimport cv2\n\nfrom pPose_nms import pose_nms, write_json\n\nargs = opt\nargs.dataset = 'coco'\nif not args.sp:\n torch.multiprocessing.set_start_method('forkserver', force=True)\n torch.multiprocessing.set_sharing_strategy('file_system')\n\nif __name__ == \"__main__\":\n videofile = args.video\n mode = args.mode\n if not os.path.exists(args.outputpath):\n os.mkdir(args.outputpath)\n \n if not len(videofile):\n raise IOError('Error: must contain --video')\n\n # Load input video\n data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()\n (fourcc,fps,frameSize) = data_loader.videoinfo()\n\n # Load detection loader\n print('Loading YOLO model..')\n sys.stdout.flush()\n det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()\n det_processor = DetectionProcessor(det_loader).start()\n \n # Load pose model\n pose_dataset = Mscoco()\n if args.fast_inference:\n print('Using fast inference...')\n pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)\n else:\n print('Using slow, more accurate inference...')\n pose_model = InferenNet(4 * 1 + 1, pose_dataset)\n pose_model\n pose_model.eval()\n\n runtime_profile = {\n 'dt': [],\n 'pt': [],\n 'pn': []\n }\n\n # Data writer\n save_path = os.path.join(args.outputpath, 'AlphaPose_'+ntpath.basename(videofile).split('.')[0]+'.avi')\n writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()\n\n im_names_desc = tqdm(range(data_loader.length()))\n batchSize = args.posebatch\n for i in im_names_desc:\n start_time = getTime()\n with torch.no_grad():\n (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()\n if orig_img is None:\n break\n if boxes is None or boxes.nelement() == 0:\n writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])\n continue\n\n ckpt_time, det_time = getTime(start_time)\n runtime_profile['dt'].append(det_time)\n # Pose Estimation\n \n datalen = inps.size(0)\n leftover = 0\n if (datalen) % batchSize:\n leftover = 1\n num_batches = datalen // batchSize + leftover\n hm = []\n for j in range(num_batches):\n inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)]\n hm_j = pose_model(inps_j)\n hm.append(hm_j)\n hm = torch.cat(hm)\n ckpt_time, pose_time = getTime(ckpt_time)\n runtime_profile['pt'].append(pose_time)\n\n hm = hm.cpu().data\n writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])\n\n ckpt_time, post_time = getTime(ckpt_time)\n runtime_profile['pn'].append(post_time)\n\n if args.profile:\n # TQDM\n im_names_desc.set_description(\n 'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(\n dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))\n )\n\n print('===========================> Finish Model Running.')\n if (args.save_img or args.save_video) and not args.vis_fast:\n print('===========================> Rendering remaining images in the queue...')\n print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')\n while(writer.running()):\n pass\n writer.stop()\n final_result = writer.results()\n write_json(final_result, args.outputpath)\n"
] | [
[
"torch.multiprocessing.set_start_method",
"torch.cat",
"torch.no_grad",
"numpy.mean",
"torch.multiprocessing.set_sharing_strategy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aiedward/OCR-1 | [
"82ce764fb0071917360ea8b1ec5372035d0897b5"
] | [
"ctpn/show_model.py"
] | [
"from tensorflow.python import pywrap_tensorflow\ncheckpoint_path = 'checkpoints/VGGnet_fast_rcnn_iter_50000.ckpt'\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\nfor key in var_to_shape_map:\n print(\"tensor_name: \", key)\n"
] | [
[
"tensorflow.python.pywrap_tensorflow.NewCheckpointReader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
fligt/inktime | [
"45f20602ef07cc8f62e0192318913cf910eb925b"
] | [
"inktime/rgbkm.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/00_rgbkm.ipynb (unless otherwise specified).\n\n__all__ = ['reflectance']\n\n# Cell\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cv2\n\nimport scipy.optimize as optimize\n\n\ndef reflectance(K, S, D, Rg):\n '''Calculates reflectance for single colorant Kubelka-Munk model.\n\n Based on Nobbs (1997) formulation with modified Saunderson expression for infinite reflectance.\n Function works for single channel, 3 RGB channels, and spectral data/images with muliple wavelength channels.\n\n\n Parameters:\n -----------\n K: tuple-like (n channels)\n Colorant absorption coefficients for wavelength or RGB channels\n S: tuple-like (n channels)\n Colorant scattering coefficients for wavelength or RGB channels\n D: array ( height x width)\n Colorant thickness image\n Rg: array (height x width x n) or rgb tuple with shape (3,)\n Background reflectance image or background color\n\n Returns:\n --------\n refl: array (height x width x n)\n n-channel reflectance image\n\n '''\n\n Rg = np.array(Rg)\n shape = Rg.shape\n\n\n # create uniform background image if Rg is rgb tuple\n\n if len(shape) == 1: # understood as rgb tuple\n\n h, w = D.shape\n\n Rg_img = np.ones([h, w, 3])\n Rg_img[:,:] = Rg\n Rg = Rg_img\n\n shape = Rg.shape\n\n #print('created uniform rgb background image Rg with shape: {}'.format(shape))\n\n\n n_channels = shape[-1]\n\n K = np.array(K).reshape(1, n_channels)\n S = np.array(S).reshape(1, n_channels)\n\n D = np.array(D).reshape(-1, 1)\n Rg = Rg.reshape(-1, n_channels)\n\n # need to return infinity for K =< 0 or S < 0 in optimization code\n #pos_S = S >= 0\n #pos_K = K > 0 # also non-zero\n #ok = pos_S & pos_K\n\n #Rinf = np.zeros([1, n_channels])\n Rinf = (S/K) / ((S/K) + 1 + np.sqrt(1 + 2 * (S/K)))\n #Rinf[ok] = (S[ok]/K[ok]) / ((S[ok]/K[ok]) + 1 + np.sqrt(1 + 2 * (S[ok]/K[ok])))\n #Rinf[~ok] = np.infty\n\n Z = D * np.sqrt(K * (K + 2 * S))\n\n Z = np.clip(Z, a_min=0, a_max=50)\n\n beta = np.exp(2 * Z) - 1\n alpha = (1 - Rinf**2) / (1 - Rg * Rinf)\n\n refl = (alpha * Rg + beta * Rinf) / (alpha + beta)\n refl = refl.reshape(shape)\n\n return refl"
] | [
[
"numpy.sqrt",
"numpy.clip",
"numpy.ones",
"numpy.array",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
beldonl/gpkit | [
"4c422d3f3b65b85f5baacc36305064aee4341ebe"
] | [
"gpkit/constraints/sgp.py"
] | [
"\"\"\"Implement the SequentialGeometricProgram class\"\"\"\nfrom time import time\nfrom collections import OrderedDict\nimport numpy as np\nfrom ..exceptions import InvalidGPConstraint, Infeasible, UnnecessarySGP\nfrom ..keydict import KeyDict\nfrom ..nomials import Variable\nfrom .gp import GeometricProgram\nfrom ..nomials import PosynomialInequality\nfrom .. import NamedVariables\nfrom .costed import CostedConstraintSet\n\n\nEPS = 1e-6 # 1 +/- this is used in a few relative differences\n\n# pylint: disable=too-many-instance-attributes\nclass SequentialGeometricProgram(CostedConstraintSet):\n \"\"\"Prepares a collection of signomials for a SP solve.\n\n Arguments\n ---------\n cost : Posynomial\n Objective to minimize when solving\n constraints : list of Constraint or SignomialConstraint objects\n Constraints to maintain when solving (implicitly Signomials <= 1)\n verbosity : int (optional)\n Currently has no effect: SequentialGeometricPrograms don't know\n anything new after being created, unlike GeometricPrograms.\n\n Attributes with side effects\n ----------------------------\n `gps` is set during a solve\n `result` is set at the end of a solve\n\n Examples\n --------\n >>> gp = gpkit.geometric_program.SequentialGeometricProgram(\n # minimize\n x,\n [ # subject to\n 1/x - y/x, # <= 1, implicitly\n y/10 # <= 1\n ])\n >>> gp.solve()\n \"\"\"\n gps = solver_outs = _results = result = model = None\n _gp = _spvars = _lt_approxs = pccp_penalty = None\n with NamedVariables(\"SGP\"):\n slack = Variable(\"PCCPslack\")\n\n def __init__(self, cost, model, substitutions, *,\n use_pccp=True, pccp_penalty=2e2, **initgpargs):\n # pylint: disable=super-init-not-called,non-parent-init-called\n if cost.any_nonpositive_cs:\n raise UnnecessarySGP(\"\"\"Sequential GPs need Posynomial objectives.\n\n The equivalent of a Signomial objective can be constructed by constraining\n a dummy variable `z` to be greater than the desired Signomial objective `s`\n (z >= s) and then minimizing that dummy variable.\"\"\")\n self.model = model\n self._original_cost = cost\n self.externalfn_vars = \\\n frozenset(Variable(v) for v in self.model.varkeys if v.externalfn)\n if not self.externalfn_vars:\n try:\n sgpconstraints = {\"SP constraints\": [], \"GP constraints\": []}\n self._lt_approxs = []\n for cs in model.flat():\n try:\n if not isinstance(cs, PosynomialInequality):\n cs.as_hmapslt1(substitutions) # gp-compatible?\n sgpconstraints[\"GP constraints\"].append(cs)\n except InvalidGPConstraint:\n sgpconstraints[\"SP constraints\"].append(cs)\n if use_pccp:\n lts = [lt/self.slack for lt in cs.as_approxlts()]\n else:\n lts = cs.as_approxlts()\n self._lt_approxs.append(lts)\n if not sgpconstraints[\"SP constraints\"]:\n raise UnnecessarySGP(\"\"\"Model valid as a Geometric Program.\n\n SequentialGeometricPrograms should only be created with Models containing\n Signomial Constraints, since Models without Signomials have global\n solutions and can be solved with 'Model.solve()'.\"\"\")\n if use_pccp:\n self.pccp_penalty = pccp_penalty\n self.cost = cost * self.slack**pccp_penalty\n sgpconstraints[\"GP constraints\"].append(self.slack >= 1)\n else:\n self.cost = cost\n self.idxlookup = {k: i for i, k in enumerate(sgpconstraints)}\n list.__init__(self, sgpconstraints.values())\n self.substitutions = substitutions\n self._gp = self.init_gp(**initgpargs)\n self.blackboxconstraints = False\n return\n except AttributeError:\n pass # some constraint lacked\n self.blackboxconstraints = True\n self.__bare_init__(cost, model, substitutions)\n\n # pylint: disable=too-many-locals,too-many-branches\n # pylint: disable=too-many-arguments\n # pylint: disable=too-many-statements\n def localsolve(self, solver=None, *, verbosity=1, x0=None, reltol=1e-4,\n iteration_limit=50, mutategp=True, **solveargs):\n \"\"\"Locally solves a SequentialGeometricProgram and returns the solution.\n\n Arguments\n ---------\n solver : str or function (optional)\n By default uses one of the solvers found during installation.\n If set to \"mosek\", \"mosek_cli\", or \"cvxopt\", uses that solver.\n If set to a function, passes that function cs, A, p_idxs, and k.\n verbosity : int (optional)\n If greater than 0, prints solve time and number of iterations.\n Each GP is created and solved with verbosity one less than this, so\n if greater than 1, prints solver name and time for each GP.\n x0 : dict (optional)\n Initial location to approximate signomials about.\n reltol : float\n Iteration ends when this is greater than the distance between two\n consecutive solve's objective values.\n iteration_limit : int\n Maximum GP iterations allowed.\n mutategp: boolean\n Prescribes whether to mutate the previously generated GP\n or to create a new GP with every solve.\n **solveargs :\n Passed to solver function.\n\n Returns\n -------\n result : dict\n A dictionary containing the translated solver result.\n \"\"\"\n self.gps, self.solver_outs, self._results = [], [], []\n # if there's external functions we can't mutate the GP\n mutategp = mutategp and not self.blackboxconstraints\n if not mutategp and not x0:\n raise ValueError(\"Solves with arbitrary constraint generators\"\n \" must specify an initial starting point x0.\")\n if mutategp:\n if x0:\n self._gp = self.init_gp(x0)\n gp = self._gp\n starttime = time()\n if verbosity > 0:\n print(\"Starting a sequence of GP solves\")\n if self.externalfn_vars:\n print(\" for %i variables defined by externalfns\"\n % len(self.externalfn_vars))\n elif mutategp:\n print(\" for %i free variables\" % len(self._spvars))\n print(\" in %i signomial constraints\"\n % len(self[\"SP constraints\"]))\n print(\" and for %i free variables\" % len(gp.varlocs))\n print(\" in %i posynomial inequalities.\" % len(gp.k))\n prevcost, cost, rel_improvement = None, None, None\n while rel_improvement is None or rel_improvement > reltol:\n prevcost = cost\n if len(self.gps) > iteration_limit:\n raise Infeasible(\n \"Unsolved after %s iterations. Check `m.program.results`;\"\n \" if they're converging, try `.localsolve(...,\"\n \" iteration_limit=NEWLIMIT)`.\" % len(self.gps))\n if mutategp:\n self.update_gp(x0)\n else:\n gp = self.gp(x0)\n gp.model = self.model\n self.gps.append(gp) # NOTE: SIDE EFFECTS\n if verbosity > 1:\n print(\"\\nGP Solve %i\" % len(self.gps))\n if verbosity > 2:\n print(\"===============\")\n solver_out = gp.solve(solver, verbosity=verbosity-1,\n gen_result=False, **solveargs)\n self.solver_outs.append(solver_out)\n cost = float(solver_out[\"objective\"])\n x0 = dict(zip(gp.varlocs, np.exp(solver_out[\"primal\"])))\n if verbosity > 2 and self._spvars:\n result = gp.generate_result(solver_out, verbosity=verbosity-3)\n self._results.append(result)\n print(result.table(self._spvars))\n elif verbosity > 1:\n print(\"Solved cost was %.4g.\" % cost)\n if prevcost is None:\n continue\n rel_improvement = (prevcost - cost)/(prevcost + cost)\n if cost*(1 - EPS) > prevcost + EPS and verbosity > -1:\n print(\"SGP not convergent: Cost rose by %.2g%% on GP solve %i.\"\n \" Details can be found in `m.program.results` or by\"\n \" solving at a higher verbosity. Note that convergence is\"\n \" not guaranteed for models with SignomialEqualities.\\n\"\n % (100*(cost - prevcost)/prevcost, len(self.gps)))\n rel_improvement = cost = None\n # solved successfully!\n self.result = gp.generate_result(solver_out, verbosity=verbosity-3)\n self.result[\"soltime\"] = time() - starttime\n if verbosity > 1:\n print()\n if verbosity > 0:\n print(\"Solving took %.3g seconds and %i GP solves.\"\n % (self.result[\"soltime\"], len(self.gps)))\n self.model.process_result(self.result)\n if self.externalfn_vars:\n for v in self.externalfn_vars:\n self[0].insert(0, v.key.externalfn) # for constraint senss\n if self.slack.key in self.result[\"variables\"]:\n excess_slack = self.result[\"variables\"][self.slack.key] - 1\n if excess_slack <= EPS:\n del self.result[\"freevariables\"][self.slack.key]\n del self.result[\"variables\"][self.slack.key]\n del self.result[\"sensitivities\"][\"variables\"][self.slack.key]\n slackconstraint = self[\"GP constraints\"][-1]\n del self.result[\"sensitivities\"][\"constraints\"][slackconstraint]\n elif verbosity > -1:\n print(\"Final solution let signomial constraints slacken by\"\n \" %.2g%%. Calling .localsolve with a higher\"\n \" `pccp_penalty` (it was %.3g this time) will reduce\"\n \" final slack if the model is solvable with less. If\"\n \" you think it might not be, check by solving with \"\n \"`use_pccp=False, x0=(this model's final solution)`.\\n\"\n % (100*excess_slack, self.pccp_penalty))\n return self.result\n\n # pylint: disable=too-many-locals\n def localsolveonce(self, solver=None, verbosity=1, x0=None, reltol=1e-4,\n iteration_limit=50, mutategp=True, **kwargs):\n \"\"\"Locally solves a SequentialGeometricProgram ONCE and returns the solution.\n\n Arguments\n ---------\n solver : str or function (optional)\n By default uses one of the solvers found during installation.\n If set to \"mosek\", \"mosek_cli\", or \"cvxopt\", uses that solver.\n If set to a function, passes that function cs, A, p_idxs, and k.\n verbosity : int (optional)\n If greater than 0, prints solve time and number of iterations.\n Each GP is created and solved with verbosity one less than this, so\n if greater than 1, prints solver name and time for each GP.\n x0 : dict (optional)\n Initial location to approximate signomials about.\n reltol : float\n Iteration ends when this is greater than the distance between two\n consecutive solve's objective values.\n iteration_limit : int\n Maximum GP iterations allowed.\n *args, **kwargs :\n Passed to solver function.\n\n\n Returns\n -------\n result : dict\n A dictionary containing the translated solver result.\n \"\"\"\n starttime = time()\n if verbosity > 0:\n print(\"Beginning signomial solve.\")\n self.gps = [] # NOTE: SIDE EFFECTS\n self.results = []\n if x0 and mutategp:\n self._gp = self.init_gp(self.substitutions, x0)\n slackvar = Variable()\n prevcost, cost, rel_improvement = None, None, None\n while (rel_improvement is None or rel_improvement > reltol) and len(self.gps) < iteration_limit:\n if len(self.gps) > iteration_limit:\n raise RuntimeWarning(\"\"\"problem unsolved after %s iterations.\n\n The last result is available in Model.program.gps[-1].result. If the gps\n appear to be converging, you may wish to increase the iteration limit by\n calling .localsolve(..., iteration_limit=NEWLIMIT).\"\"\" % len(self.gps))\n gp = self.gp(x0, mutategp)\n self.gps.append(gp) # NOTE: SIDE EFFECTS\n try:\n result = gp.solve(solver, verbosity-1,\n warn_on_check=True, **kwargs)\n self.results.append(result)\n except (RuntimeWarning, ValueError):\n feas_constrs = ([slackvar >= 1] +\n [posy <= slackvar\n for posy in gp.posynomials[1:]])\n primal_feas = GeometricProgram(slackvar**100 * gp.cost,\n feas_constrs, None)\n self.gps.append(primal_feas)\n result = primal_feas.solve(solver, verbosity-1, **kwargs)\n result[\"cost\"] = None # reset the cost-counting\n x0 = result[\"freevariables\"]\n prevcost, cost = cost, result[\"cost\"]\n if prevcost is None or cost is None:\n rel_improvement = None\n elif prevcost < (1-reltol)*cost:\n print(\"SP is not converging! Last GP iteration had a higher\"\n \" cost (%.2g) than the previous one (%.2g). Results for\"\n \" each iteration are in (Model).program.results. If your\"\n \" model contains SignomialEqualities, note that\"\n \" convergence is not guaranteed: try replacing any\"\n \" SigEqs you can and solving again.\" % (cost, prevcost))\n else:\n rel_improvement = abs(prevcost-cost)/(prevcost + cost)\n # solved successfully!\n soltime = time() - starttime\n if verbosity > 0:\n print(\"Solving took %i GP solves\" % len(self.gps)\n + \" and %.3g seconds.\" % soltime)\n self.process_result(result)\n self.result = SolutionArray(result.copy()) # NOTE: SIDE EFFECTS\n self.result[\"soltime\"] = soltime\n if self.externalfn_vars:\n for v in self.externalfn_vars:\n self[0].insert(0, v.key.externalfn) # for constraint senss\n return self.result\n\n @property\n def results(self):\n \"Creates and caches results from the raw solver_outs\"\n if not self._results:\n self._results = [o[\"generate_result\"]() for o in self.solver_outs]\n return self._results\n\n\n def _fill_x0(self, x0):\n \"Returns a copy of x0 with subsitutions added.\"\n x0kd = KeyDict()\n x0kd.varkeys = self.varkeys\n if x0:\n x0kd.update(x0) # has to occur after the setting of varkeys\n x0kd.update(self.substitutions)\n return x0kd\n\n def init_gp(self, x0=None, **initgpargs):\n \"Generates a simplified GP representation for later modification\"\n x0 = self._fill_x0(x0)\n constraints = OrderedDict({\"SP approximations\": []})\n constraints[\"GP constraints\"] = self[\"GP constraints\"]\n self._spvars = set([self.slack])\n for cs, lts in zip(self[\"SP constraints\"], self._lt_approxs):\n for lt, gt in zip(lts, cs.as_approxgts(x0)):\n constraint = (lt <= gt)\n constraint.generated_by = cs\n constraints[\"SP approximations\"].append(constraint)\n self._spvars.update({vk for vk in gt.varkeys\n if vk not in self.substitutions})\n gp = GeometricProgram(self.cost, constraints, self.substitutions,\n **initgpargs)\n gp.x0 = x0\n return gp\n\n def update_gp(self, x0):\n \"Update self._gp for x0.\"\n if not self.gps:\n return # we've already generated the first gp\n gp = self._gp\n gp.x0.update({k: v for (k, v) in x0.items() if k in self._spvars})\n hmap_idx = 0\n for sp_constraint, lts in zip(self[\"SP constraints\"], self._lt_approxs):\n for lt, gt in zip(lts, sp_constraint.as_approxgts(gp.x0)):\n unsubbed = lt/gt\n gp[\"SP approximations\"][hmap_idx].unsubbed = [unsubbed]\n hmap = unsubbed.hmap.sub(self.substitutions, unsubbed.varkeys)\n hmap.parent = gp[\"SP approximations\"][hmap_idx]\n hmap_idx += 1 # here because gp.hmaps[0] is the cost hmap\n gp.hmaps[hmap_idx] = hmap\n gp.gen()\n\n def gp(self, x0=None, **gpinitargs):\n \"The GP approximation of this SP at x0.\"\n x0 = self._fill_x0(x0)\n constraints = OrderedDict(\n {\"SP constraints\": [c.as_gpconstr(x0) for c in self.model.flat()]})\n if self.externalfn_vars:\n constraints[\"Generated by externalfns\"] = []\n for v in self.externalfn_vars:\n constraint = v.key.externalfn(v, x0)\n constraint.generated_by = v.key.externalfn\n constraints[\"Generated by externalfns\"].append(constraint)\n gp = GeometricProgram(self._original_cost,\n constraints, self.substitutions, **gpinitargs)\n gp.x0 = x0\n return gp\n"
] | [
[
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
romain-fontugne/roa-counter | [
"35413f036a0a75088ae318dfa3df58b3cbce6095"
] | [
"count.py"
] | [
"from datetime import datetime\nfrom matplotlib import pylab as plt\nfrom requests_cache import CachedSession\n\nCACHE_EXPIRATION_SECS = 3600*24*356\nYEAR_RANGE = range(2018, 2022)\nMARKERS = [\"o\", \"s\", \"d\", \"+\", \"*\"]\n\nRIRS = {\n 'AFRINIC': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/afrinic.tal/',\n 'marker': 'o',\n },\n 'APNIC': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/apnic.tal/',\n 'marker': 's',\n },\n 'ARIN': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/arin.tal/',\n 'marker': 'd'\n },\n 'LACNIC': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/lacnic.tal/',\n 'marker': '+',\n },\n 'RIPE': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/ripencc.tal/',\n 'marker': '*',\n }\n }\n\nsession = CachedSession(ExpirationTime = CACHE_EXPIRATION_SECS)\nplt.figure(figsize=(7,4))\n\nfor rir, rir_info in RIRS.items():\n x = []\n y = []\n for year in YEAR_RANGE:\n for month in range(1,13):\n\n roa_count = -1 # skip the header\n parsed_url = f'{rir_info[\"url\"]}/{year}/{month:02d}/15/roas.csv'\n csv = session.get( parsed_url )\n if csv.status_code != 200:\n print(parsed_url)\n print(csv.status_code)\n continue\n\n for line in csv.iter_lines(decode_unicode=True):\n roa_count += 1\n\n\n if roa_count > 0:\n x.append( datetime(year, month, 15) )\n y.append( roa_count )\n \n\n plt.plot(x, y, label=rir, marker=rir_info['marker'])\n\nplt.grid( True )\nplt.legend()\nplt.ylabel('Number of ROAs')\nplt.xticks(rotation=45)\nplt.tight_layout()\nplt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.png')\nplt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.pdf')\n"
] | [
[
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.grid",
"matplotlib.pylab.xticks",
"matplotlib.pylab.figure",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.plot",
"matplotlib.pylab.legend",
"matplotlib.pylab.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NagisaZj/ProMP | [
"539739ae2b7d5fdcad00855da695f643b23df4b3",
"539739ae2b7d5fdcad00855da695f643b23df4b3"
] | [
"rlkit/torch/networks.py",
"pro-mp_run_cheetah.py"
] | [
"\"\"\"\nGeneral networks for pytorch.\n\nAlgorithm-specific networks should go else-where.\n\"\"\"\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom rlkit.policies.base import Policy\nfrom rlkit.torch import pytorch_util as ptu\nfrom rlkit.torch.core import PyTorchModule\nfrom rlkit.torch.data_management.normalizer import TorchFixedNormalizer\nfrom rlkit.torch.modules import LayerNorm\nimport math\n\ndef identity(x):\n return x\n\n\nclass Mlp(PyTorchModule):\n def __init__(\n self,\n hidden_sizes,\n output_size,\n input_size,\n init_w=3e-3,\n hidden_activation=F.relu,\n output_activation=identity,\n hidden_init=ptu.fanin_init,\n b_init_value=0.1,\n layer_norm=False,\n layer_norm_kwargs=None,\n ):\n self.save_init_params(locals())\n super().__init__()\n\n if layer_norm_kwargs is None:\n layer_norm_kwargs = dict()\n\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_sizes = hidden_sizes\n self.hidden_activation = hidden_activation\n self.output_activation = output_activation\n self.layer_norm = layer_norm\n self.fcs = []\n self.layer_norms = []\n in_size = input_size\n\n for i, next_size in enumerate(hidden_sizes):\n fc = nn.Linear(in_size, next_size)\n in_size = next_size\n hidden_init(fc.weight)\n fc.bias.data.fill_(b_init_value)\n self.__setattr__(\"fc{}\".format(i), fc)\n self.fcs.append(fc)\n\n if self.layer_norm:\n ln = LayerNorm(next_size)\n self.__setattr__(\"layer_norm{}\".format(i), ln)\n self.layer_norms.append(ln)\n\n self.last_fc = nn.Linear(in_size, output_size)\n self.last_fc.weight.data.uniform_(-init_w, init_w)\n self.last_fc.bias.data.uniform_(-init_w, init_w)\n\n def forward(self, input, return_preactivations=False):\n h = input\n for i, fc in enumerate(self.fcs):\n h = fc(h)\n if self.layer_norm and i < len(self.fcs) - 1:\n h = self.layer_norms[i](h)\n h = self.hidden_activation(h)\n preactivation = self.last_fc(h)\n output = self.output_activation(preactivation)\n if return_preactivations:\n return output, preactivation\n else:\n return output\n\n\nclass FlattenMlp(Mlp):\n \"\"\"\n if there are multiple inputs, concatenate along dim 1\n \"\"\"\n\n def forward(self, *inputs, **kwargs):\n flat_inputs = torch.cat(inputs, dim=1)\n return super().forward(flat_inputs, **kwargs)\n\n\nclass MlpPolicy(Mlp, Policy):\n \"\"\"\n A simpler interface for creating policies.\n \"\"\"\n\n def __init__(\n self,\n *args,\n obs_normalizer: TorchFixedNormalizer = None,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(*args, **kwargs)\n self.obs_normalizer = obs_normalizer\n\n def forward(self, obs, **kwargs):\n if self.obs_normalizer:\n obs = self.obs_normalizer.normalize(obs)\n return super().forward(obs, **kwargs)\n\n def get_action(self, obs_np):\n actions = self.get_actions(obs_np[None])\n return actions[0, :], {}\n\n def get_actions(self, obs):\n return self.eval_np(obs)\n\n\nclass TanhMlpPolicy(MlpPolicy):\n \"\"\"\n A helper class since most policies have a tanh output activation.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.save_init_params(locals())\n super().__init__(*args, output_activation=torch.tanh, **kwargs)\n\n\nclass MlpEncoder(FlattenMlp):\n '''\n encode context via MLP\n '''\n\n def reset(self, num_tasks=1):\n pass\n\n def forward_seq(self,context):\n t,b,_ = context.size()\n input = context.view(t*b,-1)\n out = self.forward(input)\n return out.view(t,b,-1)\n\nclass RecurrentEncoder(FlattenMlp):\n '''\n encode context via recurrent network\n '''\n\n def __init__(self,\n *args,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(*args, **kwargs)\n self.hidden_dim = self.hidden_sizes[-1]\n self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))\n\n # input should be (task, seq, feat) and hidden should be (task, 1, feat)\n\n self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)\n\n def forward(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))\n self.hidden = hn\n # take the last hidden state to predict z\n out = out[:, -1, :]\n\n # output layer\n preactivation = self.last_fc(out)\n output = self.output_activation(preactivation)\n if return_preactivations:\n return output, preactivation\n else:\n return output\n\n def reset(self, num_tasks=1):\n self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0)\n\n\nclass RNN(FlattenMlp):\n '''\n encode context via recurrent network\n '''\n\n def __init__(self,\n *args,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(*args, **kwargs)\n self.hidden_dim = self.hidden_sizes[-1]\n self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))\n\n # input should be (task, seq, feat) and hidden should be (task, 1, feat)\n\n self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)\n\n def inner_forward(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))\n self.hidden = hn\n # take the last hidden state to predict z\n out = out.contiguous()\n out = out.view(task * seq, -1)\n\n # output layer\n #preactivation = self.last_fc(out)\n #output = self.output_activation(preactivation)\n if return_preactivations:\n return out, out\n else:\n return out\n\n def forward(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))\n self.hidden = hn\n # take the last hidden state to predict z\n out = out.contiguous()\n out = out.view(task * seq, -1)\n\n # output layer\n preactivation = self.last_fc(out)\n output = self.output_activation(preactivation)\n if return_preactivations:\n return output, output\n else:\n return output\n\n def inner_reset(self, num_tasks=1):\n self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0)\n\nclass SnailEncoder(FlattenMlp):\n def __init__(self,\n input_length,\n *args,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(*args, **kwargs)\n self.hidden_dim = self.hidden_sizes[-1]\n self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))\n self.input_length = input_length\n # input should be (task, seq, feat) and hidden should be (1, task, feat)\n\n #self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)\n layer_count = math.ceil(math.log(input_length)/math.log(2))\n self.TC1 = TCBlock(self.hidden_dim,input_length,16)\n self.atten1 = AttentionBlock(self.hidden_dim+16*layer_count,32,32)\n self.TC2 = TCBlock(self.hidden_dim+16*layer_count+32,input_length,16)\n self.atten2 = AttentionBlock(self.hidden_dim+16*layer_count*2+32,32,32)\n self.out_layer = nn.Linear(self.hidden_dim+16*layer_count*2+32+32,self.output_size)\n self.var_start = int(self.output_size / 2)\n\n def forward(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out = out.permute(0,2,1)\n #print(out.shape)\n out = self.TC1(out)\n out = self.atten1(out)\n out = self.TC2(out)\n out = self.atten2(out)\n out = out[:, :, -1]\n #print('o',out.shape)\n # output layer\n preactivation = self.out_layer(out)\n output = self.output_activation(preactivation)\n #temp = F.softplus(output[..., self.var_start:])\n #output[..., self.var_start:] = temp\n if return_preactivations:\n return output, preactivation\n else:\n return output\n\n def forward_seq(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n in_ = in_.contiguous()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out = out.permute(0,2,1)\n #print(out.shape)\n out = self.TC1(out)\n out = self.atten1(out)\n out = self.TC2(out)\n out = self.atten2(out)\n out = out.permute(0,2,1)\n out = out.view(task * seq,-1)\n\n\n preactivation = self.out_layer(out)\n output = self.output_activation(preactivation)\n #temp = F.softplus(output[..., self.var_start:])\n #output[..., self.var_start:] = temp\n #output = output.view(task,seq,-1)\n if return_preactivations:\n return output, preactivation\n else:\n return output\n\n def reset(self,num_tasks=1):\n return\n\nclass MyMlpEncoder(FlattenMlp):\n '''\n encode context via MLP\n '''\n\n def reset(self, num_tasks=1):\n pass\n\n def forward_seq(self,context):\n t,b,_ = context.size()\n input = context.view(t*b,-1)\n out = self.forward(input)\n return out\n\n def forward(self,context):\n t,b,_ = context.size()\n input = context.view(t*b,-1)\n out = self.forward(input)\n return out\n\nclass CausalConv1d(nn.Module):\n \"\"\"A 1D causal convolution layer.\n\n Input: (B, D_in, T), where B is the minibatch size, D_in is the number of\n dimensions per step, and T is the number of steps.\n Output: (B, D_out, T), where B is the minibatch size, D_out is the number\n of dimensions in the output, and T is the number of steps.\n\n Arguments:\n in_channels (int): number of input channels\n out_channels (int): number of output channels\n \"\"\"\n def __init__(self, in_channels, out_channels, dilation=1):\n super(CausalConv1d, self).__init__()\n self.padding = dilation\n self.causal_conv = nn.Conv1d(\n in_channels,\n out_channels,\n 2,\n padding = self.padding,\n dilation = dilation\n )\n\n def forward(self, minibatch):\n return self.causal_conv(minibatch)[:, :, :-self.padding]\n\n\nclass DenseBlock(nn.Module):\n \"\"\"Two parallel 1D causal convolution layers w/tanh and sigmoid activations\n\n Input: (B, D_in, T), where B is the minibatch size, D_in is the number of\n dimensions of the input, and T is the number of steps.\n Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the\n number of dimensions of the input, `F` is the number of filters, and `T`\n is the length of the input sequence.\n\n Arguments:\n in_channels (int): number of input channels\n filters (int): number of filters per channel\n \"\"\"\n def __init__(self, in_channels, filters, dilation=1):\n super(DenseBlock, self).__init__()\n self.causal_conv1 = CausalConv1d(\n in_channels,\n filters,\n dilation=dilation\n )\n self.causal_conv2 = CausalConv1d(\n in_channels,\n filters,\n dilation=dilation\n )\n\n def forward(self, minibatch):\n tanh = F.tanh(self.causal_conv1(minibatch))\n sig = F.sigmoid(self.causal_conv2(minibatch))\n out = torch.cat([minibatch, tanh*sig], dim=1)\n return out\n\n\nclass TCBlock(nn.Module):\n \"\"\"A stack of DenseBlocks which dilates to desired sequence length\n\n The TCBlock adds `ceil(log_2(seq_len))*filters` channels to the output.\n\n Input: (B, D_in, T), where B is the minibatch size, D_in is the number of\n dimensions of the input, and T is the number of steps.\n Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the\n number of dimensions of the input, `F` is the number of filters, and `T`\n is the length of the input sequence.\n\n Arguments:\n in_channels (int): channels for the input\n seq_len (int): length of the sequence. The number of denseblock layers\n is log base 2 of `seq_len`.\n filters (int): number of filters per channel\n \"\"\"\n def __init__(self, in_channels, seq_len, filters):\n super(TCBlock, self).__init__()\n layer_count = math.ceil(math.log(seq_len)/math.log(2))\n blocks = []\n channel_count = in_channels\n for layer in range(layer_count):\n block = DenseBlock(channel_count, filters, dilation=2**layer)\n blocks.append(block)\n channel_count += filters\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, minibatch):\n return self.blocks(minibatch)\n\n\nclass AttentionBlock(nn.Module):\n \"\"\"An attention mechanism similar to Vaswani et al (2017)\n\n The input of the AttentionBlock is `BxDxT` where `B` is the input\n minibatch size, `D` is the dimensions of each feature, `T` is the length of\n the sequence.\n\n The output of the AttentionBlock is `Bx(D+V)xT` where `V` is the size of the\n attention values.\n\n Arguments:\n input_dims (int): the number of dimensions (or channels) of each element\n in the input sequence\n k_size (int): the size of the attention keys\n v_size (int): the size of the attention values\n \"\"\"\n def __init__(self, input_dims, k_size, v_size):\n super(AttentionBlock, self).__init__()\n self.key_layer = nn.Linear(input_dims, k_size)\n self.query_layer = nn.Linear(input_dims, k_size)\n self.value_layer = nn.Linear(input_dims, v_size)\n self.sqrt_k = math.sqrt(k_size)\n\n def forward(self, minibatch):\n minibatch = minibatch.permute(0,2,1)\n keys = self.key_layer(minibatch)\n queries = self.query_layer(minibatch)\n values = self.value_layer(minibatch)\n logits = torch.bmm(queries, keys.transpose(2,1))\n mask = logits.data.new(logits.size(1), logits.size(2)).fill_(1).byte()\n mask = torch.triu(mask, 1)\n mask = mask.unsqueeze(0).expand_as(logits)\n logits.data.masked_fill_(mask, float('-inf'))\n probs = F.softmax(logits / self.sqrt_k, dim=2)\n read = torch.bmm(probs, values)\n return torch.cat([minibatch, read], dim=2).permute(0,2,1)",
"from meta_policy_search.baselines.linear_baseline import LinearFeatureBaseline\nfrom meta_policy_search.envs.mujoco_envs.half_cheetah_rand_vel import HalfCheetahRandVelEnvSparse\nfrom meta_policy_search.envs.normalized_env import normalize\nfrom meta_policy_search.meta_algos.pro_mp import ProMP\nfrom meta_policy_search.meta_trainer import Trainer\nfrom meta_policy_search.samplers.meta_sampler import MetaSampler\nfrom meta_policy_search.samplers.meta_sample_processor import MetaSampleProcessor\nfrom meta_policy_search.policies.meta_gaussian_mlp_policy import MetaGaussianMLPPolicy\nfrom meta_policy_search.utils import logger\nfrom meta_policy_search.utils.utils import set_seed, ClassEncoder\n\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport json\nimport argparse\nimport time\n\nmeta_policy_search_path = '/'.join(os.path.realpath(os.path.dirname(__file__)).split('/')[:-1])\nmeta_policy_search_path = '.'\ndef main(config):\n set_seed(config['seed'])\n\n\n baseline = globals()[config['baseline']]() #instantiate baseline\n\n env = globals()[config['env']]() # instantiate env\n env = normalize(env) # apply normalize wrapper to env\n\n policy = MetaGaussianMLPPolicy(\n name=\"meta-policy\",\n obs_dim=np.prod(env.observation_space.shape),\n action_dim=np.prod(env.action_space.shape),\n meta_batch_size=config['meta_batch_size'],\n hidden_sizes=config['hidden_sizes'],\n )\n\n sampler = MetaSampler(\n env=env,\n policy=policy,\n rollouts_per_meta_task=config['rollouts_per_meta_task'], # This batch_size is confusing\n meta_batch_size=config['meta_batch_size'],\n max_path_length=config['max_path_length'],\n parallel=config['parallel'],\n )\n\n sample_processor = MetaSampleProcessor(\n baseline=baseline,\n discount=config['discount'],\n gae_lambda=config['gae_lambda'],\n normalize_adv=config['normalize_adv'],\n )\n\n algo = ProMP(\n policy=policy,\n inner_lr=config['inner_lr'],\n meta_batch_size=config['meta_batch_size'],\n num_inner_grad_steps=config['num_inner_grad_steps'],\n learning_rate=config['learning_rate'],\n num_ppo_steps=config['num_promp_steps'],\n clip_eps=config['clip_eps'],\n target_inner_step=config['target_inner_step'],\n init_inner_kl_penalty=config['init_inner_kl_penalty'],\n adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],\n )\n\n trainer = Trainer(\n algo=algo,\n policy=policy,\n env=env,\n sampler=sampler,\n sample_processor=sample_processor,\n n_itr=config['n_itr'],\n num_inner_grad_steps=config['num_inner_grad_steps'],\n )\n\n trainer.train()\n\nif __name__==\"__main__\":\n idx = int(time.time())\n\n parser = argparse.ArgumentParser(description='ProMP: Proximal Meta-Policy Search')\n parser.add_argument('--config_file', type=str, default='', help='json file with run specifications')\n parser.add_argument('--dump_path', type=str, default=meta_policy_search_path + '/data/pro-mp/cheetah/run_%d' % idx)\n\n args = parser.parse_args()\n\n\n if args.config_file: # load configuration from json file\n with open(args.config_file, 'r') as f:\n config = json.load(f)\n\n else: # use default config\n\n config = {\n 'seed': 1000,\n\n 'baseline': 'LinearFeatureBaseline',\n\n 'env': 'HalfCheetahRandVelEnvSparse',\n\n # sampler config\n 'rollouts_per_meta_task': 2,\n 'max_path_length': 64,\n 'parallel': True,\n\n # sample processor config\n 'discount': 0.99,\n 'gae_lambda': 1,\n 'normalize_adv': True,\n\n # policy config\n 'hidden_sizes': (64, 64),\n 'learn_std': True, # whether to learn the standard deviation of the gaussian policy\n\n # ProMP config\n 'inner_lr': 0.1, # adaptation step size\n 'learning_rate': 1e-3, # meta-policy gradient step size\n 'num_promp_steps': 5, # number of ProMp steps without re-sampling\n 'clip_eps': 0.3, # clipping range\n 'target_inner_step': 0.01,\n 'init_inner_kl_penalty': 5e-4,\n 'adaptive_inner_kl_penalty': False, # whether to use an adaptive or fixed KL-penalty coefficient\n 'n_itr': 10001, # number of overall training iterations\n 'meta_batch_size': 40, # number of sampled meta-tasks per iterations\n 'num_inner_grad_steps': 1, # number of inner / adaptation gradient steps\n \"util_params\":\n {'base_log_dir':'outputProMP'}\n ,\n }\n\n # configure logger\n logger.configure(dir=args.dump_path, format_strs=['stdout', 'log', 'csv'],\n snapshot_mode='last_gap')\n\n # dump run configuration before starting training\n json.dump(config, open(args.dump_path + '/params.json', 'w'), cls=ClassEncoder)\n\n # start the actual algorithm\n main(config)"
] | [
[
"torch.nn.Sequential",
"torch.nn.functional.softmax",
"torch.nn.LSTM",
"torch.cat",
"torch.zeros",
"torch.nn.Linear",
"torch.bmm",
"torch.nn.Conv1d",
"torch.triu"
],
[
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShellySrivastava/Machine-Learning | [
"bfdea30c06abe4228c103ae525adcf990015983f"
] | [
"ML_CW1/assgn_1_part_1/2_multiple_variables/plot_cost.py"
] | [
"import matplotlib.pyplot as plt\nimport os\n\ndef plot_cost(cost):\n \n fig, ax1 = plt.subplots()\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Cost')\n plt.plot(cost)\n fig.tight_layout()\n plot_filename = os.path.join(os.getcwd(), 'figures', 'cost.png')\n plt.savefig(plot_filename)\n plt.show()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
e2thenegpii/EnergyCalc | [
"6036b08d01eafae33e80e8754c0e0215c78db6fe"
] | [
"src/TOU.py"
] | [
"from enum import Enum\n\nfrom datetime import datetime, date\nfrom dateutil.relativedelta import relativedelta, MO\nimport argparse\nimport holidays\nimport pandas as pd\n\nclass BGEHolidays(holidays.HolidayBase):\n def _populate(self, year):\n holidays.UnitedStates._populate(self, year)\n\n # Remove Martin Luther King Day\n self.pop(date(year, 1, 1) + relativedelta(weekday=MO(+3)), None)\n\n # Remove Columbus Day\n self.pop(date(year, 10, 1) + relativedelta(weekday=MO(+2)), None)\n\n # Remove Veterans Day\n self.pop(date(year, 11, 11), None)\n\n # Add good friday\n self[holidays.easter(year) + relativedelta(days=-2)] = 'Good Friday'\n\nclass TimeOfUse(Enum):\n peak = 0\n shoulder = 1\n offpeak = 2\n\nclass Season(Enum):\n Winter = 0\n Summer = 1\n\n @classmethod\n def get(cls, dt):\n d = dt.date()\n if date(dt.year, 6, 1) <= d and date(dt.year, 9, 30) >= d:\n return cls.Summer\n return cls.Winter\n\nclass Schedule(Enum):\n R = 'R'\n RL = 'RL'\n EV = 'EV'\n EVP = 'EVP'\n\n def getTOU(self, dt):\n d = dt.date()\n t = dt.time()\n bge_holidays = BGEHolidays(dt.year)\n\n if self == self.R:\n return TimeOfUse.offpeak\n elif self == self.RL:\n if Season.get(dt) == Season.Summer:\n if (t.hour >=10 and t.hour < 20) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.peak\n elif ((t.hour >= 7 and t.hour < 10) or (t.hour >= 20 and t.hour < 23)) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.shoulder\n else:\n return TimeOfUse.offpeak\n else:\n if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.peak\n elif (t.hour >= 11 and t.hour < 17) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.shoulder\n else:\n return TimeOfUse.offpeak\n\n elif self in (self.EV, self.EVP):\n if Season.get(dt) == Season.Summer:\n if (t.hour >= 10 and t.hour < 20) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.peak\n else:\n return TimeOfUse.offpeak\n else:\n if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.peak\n else:\n return TimeOfUse.offpeak\n\nrates = {\n (Schedule.R, Season.Summer, TimeOfUse.offpeak): .06722,\n (Schedule.R, Season.Winter, TimeOfUse.offpeak): .07805,\n (Schedule.RL, Season.Summer, TimeOfUse.peak): .08465,\n (Schedule.RL, Season.Summer, TimeOfUse.shoulder): .06069,\n (Schedule.RL, Season.Summer, TimeOfUse.offpeak): .05744,\n (Schedule.RL, Season.Winter, TimeOfUse.peak): .09053,\n (Schedule.RL, Season.Winter, TimeOfUse.shoulder): .07944,\n (Schedule.RL, Season.Winter, TimeOfUse.offpeak): .07166,\n (Schedule.EV, Season.Summer, TimeOfUse.peak): .1227,\n (Schedule.EV, Season.Summer, TimeOfUse.offpeak): .03886,\n (Schedule.EV, Season.Winter, TimeOfUse.peak): .18474,\n (Schedule.EV, Season.Winter, TimeOfUse.offpeak): .0426,\n (Schedule.EVP, Season.Summer, TimeOfUse.peak): .03886,\n (Schedule.EVP, Season.Summer, TimeOfUse.offpeak): .03886,\n (Schedule.EVP, Season.Winter, TimeOfUse.peak): .0426,\n (Schedule.EVP, Season.Winter, TimeOfUse.offpeak): .0426\n}\n\ndef get_rate(dt, schedule = Schedule.R):\n bge_holidays = BGEHolidays(dt.year)\n\n season = Season.get(dt)\n tou = schedule.getTOU(dt)\n\n return rates[(schedule, season, tou)]\n\ndef process_row(x):\n dt = x['DATE_START TIME']\n val = x['USAGE']\n return pd.Series([dt] + [get_rate(dt, x) * (val + .0700) for x in Schedule], index=['DATE_START TIME'] + [x.value for x in Schedule])\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_file', type=argparse.FileType('r'))\n\n args = parser.parse_args()\n\n df = pd.read_csv(args.input_file, parse_dates=[['DATE', 'START TIME']])[['DATE_START TIME', 'USAGE']]\n\n schedules = df.apply(process_row, axis=1)\n print(schedules[['R', 'RL', 'EV', 'EVP']].sum())\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
AshwinRameshP/AttendanceSystem_FaceRecognition | [
"23c590c10ac296816d7cff23445d28c3863d0138"
] | [
"FRAMS_STUDENT.py"
] | [
"import tkinter as tk\nfrom tkinter import *\nimport cv2\nimport csv\nimport os\nimport numpy as np\nfrom PIL import Image,ImageTk\nimport pandas as pd\nimport datetime\nimport time\n\n\n##Error screen2\ndef del_sc2():\n sc2.destroy()\ndef err_screen1():\n global sc2\n sc2 = tk.Tk()\n sc2.geometry('300x100')\n sc2.iconbitmap('FRAMS.ico')\n sc2.title('Warning!!')\n sc2.configure(background='snow')\n Label(sc2,text='Please enter your subject name!!!',fg='red',bg='white',font=('times', 16, ' bold ')).pack()\n Button(sc2,text='OK',command=del_sc2,fg=\"black\" ,bg=\"lawn green\" ,width=9 ,height=1, activebackground = \"Red\" ,font=('times', 15, ' bold ')).place(x=90,y= 50)\n\ndef Fillattendances():\n sub = tx.get()\n now = time.time() ###For calculate seconds of video\n future = now + 20\n if time.time() < future:\n if sub == '':\n err_screen1()\n else:\n recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()\n try:\n recognizer.read(\"TrainingImageLabel\\Trainner.yml\")\n except:\n e = 'Model not found,Please train model'\n Notifica.configure(text=e, bg=\"red\", fg=\"black\", width=33, font=('times', 15, 'bold'))\n Notifica.place(x=20, y=250)\n\n harcascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(harcascadePath)\n df = pd.read_csv(\"StudentDetails\\StudentDetails.csv\")\n cam = cv2.VideoCapture(0)\n font = cv2.FONT_HERSHEY_SIMPLEX\n col_names = ['Enrollment', 'Name', 'Date', 'Time']\n attendance = pd.DataFrame(columns=col_names)\n while True:\n ret, im = cam.read()\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.2, 5)\n for (x, y, w, h) in faces:\n global Id\n\n Id, conf = recognizer.predict(gray[y:y + h, x:x + w])\n if (conf < 70):\n print(conf)\n global Subject\n global aa\n global date\n global timeStamp\n Subject = tx.get()\n ts = time.time()\n date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')\n aa = df.loc[df['Enrollment'] == Id]['Name'].values\n global tt\n tt = str(Id) + \"-\" + aa\n En = '15624031' + str(Id)\n attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]\n cv2.rectangle(im, (x, y), (x + w, y + h), (0, 260, 0), 7)\n cv2.putText(im, str(tt), (x + h, y), font, 1, (255, 255, 0,), 4)\n\n else:\n Id = 'Unknown'\n tt = str(Id)\n cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7)\n cv2.putText(im, str(tt), (x + h, y), font, 1, (0, 25, 255), 4)\n if time.time() > future:\n break\n\n attendance = attendance.drop_duplicates(['Enrollment'], keep='first')\n cv2.imshow('Filling attedance..', im)\n key = cv2.waitKey(30) & 0xff\n if key == 27:\n break\n\n ts = time.time()\n date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')\n Hour, Minute, Second = timeStamp.split(\":\")\n fileName = \"Attendance/\" + Subject + \"_\" + date + \"_\" + Hour + \"-\" + Minute + \"-\" + Second + \".csv\"\n attendance = attendance.drop_duplicates(['Enrollment'], keep='first')\n print(attendance)\n attendance.to_csv(fileName, index=False)\n\n M = 'Attendance filled Successfully'\n Notifica.configure(text=M, bg=\"Green\", fg=\"white\", width=33, font=('times', 15, 'bold'))\n Notifica.place(x=20, y=250)\n cam.release()\n cv2.destroyAllWindows()\n\n import csv\n import tkinter\n root = tkinter.Tk()\n root.title(\"Attendance of \" + Subject)\n root.configure(background='snow')\n cs = './' + fileName\n with open(cs, newline=\"\") as file:\n reader = csv.reader(file)\n r = 0\n for col in reader:\n c = 0\n for row in col:\n # i've added some styling\n label = tkinter.Label(root, width=8, height=1, fg=\"black\", font=('times', 15, ' bold '),\n bg=\"lawn green\", text=row, relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n c += 1\n r += 1\n root.mainloop()\n print(attendance)\n\n\nif __name__ == '__main__':\n\n ###windo is frame for subject choosing\n windo = tk.Tk()\n windo.iconbitmap('FRAMS.ico')\n windo.title(\"Enter subject name...\")\n windo.geometry('580x320')\n windo.configure(background='snow')\n Notifica = tk.Label(windo, text=\"Attendance filled Successfully\", bg=\"Green\", fg=\"white\", width=33,\n height=2, font=('times', 15, 'bold'))\n\n\n def Attf():\n import subprocess\n subprocess.Popen(\n r'explorer /select,\".\\Attendance\\Manually Attendance\\\"') # open attendance sheet window\n\n\n attf = tk.Button(windo, text=\"Check Sheets\", command=Attf, fg=\"black\", bg=\"lawn green\", width=12, height=1,\n activebackground=\"Red\", font=('times', 14, ' bold '))\n attf.place(x=430, y=255)\n\n sub = tk.Label(windo, text=\"Enter Subject\", width=15, height=2, fg=\"white\", bg=\"blue2\",\n font=('times', 15, ' bold '))\n sub.place(x=30, y=100)\n\n tx = tk.Entry(windo, width=20, bg=\"yellow\", fg=\"red\", font=('times', 23, ' bold '))\n tx.place(x=250, y=105)\n\n fill_a = tk.Button(windo, text=\"Fill Attendance\", fg=\"white\", command=Fillattendances, bg=\"deep pink\", width=20,\n height=2,\n activebackground=\"Red\", font=('times', 15, ' bold '))\n fill_a.place(x=250, y=160)\n windo.mainloop()"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
hanneshapke/text | [
"8bebbbe28749de5509be474bc475cef83490f013"
] | [
"tensorflow_text/python/ops/bert_tokenizer.py"
] | [
"# coding=utf-8\n# Copyright 2020 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic tokenization ops for BERT preprocessing.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow_text.python.ops import regex_split_ops\nfrom tensorflow_text.python.ops.normalize_ops import case_fold_utf8\nfrom tensorflow_text.python.ops.normalize_ops import normalize_utf8\nfrom tensorflow_text.python.ops.tokenization import TokenizerWithOffsets\nfrom tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer\n\n_DELIM_REGEX = [\n r\"\\s+\",\n r\"|\".join([\n r\"[!-/]\",\n r\"[:-@]\",\n r\"[\\[-`]\",\n r\"[{-~]\",\n r\"[\\p{P}]\",\n ]),\n r\"|\".join([\n r\"[\\x{4E00}-\\x{9FFF}]\",\n r\"[\\x{3400}-\\x{4DBF}]\",\n r\"[\\x{20000}-\\x{2A6DF}]\",\n r\"[\\x{2A700}-\\x{2B73F}]\",\n r\"[\\x{2B740}-\\x{2B81F}]\",\n r\"[\\x{2B820}-\\x{2CEAF}]\",\n r\"[\\x{F900}-\\x{FAFF}]\",\n r\"[\\x{2F800}-\\x{2FA1F}]\",\n ]),\n]\n\n_DELIM_REGEX_PATTERN = \"|\".join(_DELIM_REGEX)\n_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)\n_KEEP_DELIM_NO_WHITESPACE.remove(r\"\\s+\")\n_UNUSED_TOKEN_REGEX = \"\\\\[unused\\\\d+\\\\]\"\n_KEEP_DELIM_NO_WHITESPACE_PATTERN = \"|\".join(_KEEP_DELIM_NO_WHITESPACE)\n\n\nclass BasicTokenizer(TokenizerWithOffsets):\n r\"\"\"Basic tokenizer for for tokenizing text.\n\n A basic tokenizer that tokenizes using some deterministic rules:\n - For most languages, this tokenizer will split on whitespace.\n - For Chinese, Japanese, and Korean characters, this tokenizer will split on\n Unicode characters.\n\n Attributes:\n lower_case: bool - If true, a preprocessing step is added to lowercase the\n text, apply NFD normalization, and strip accents characters.\n keep_whitespace: bool - If true, preserves whitespace characters instead of\n stripping them away.\n normalization_form: If true and lower_case=False, the input text will be\n normalized to `normalization_form`. See normalize_utf8() op for a list of\n valid values.\n preserve_unused_token: If true, text in the regex format \"\\\\[unused\\\\d+\\\\]\"\n will be treated as a token and thus remain preserved as is to be looked up\n in the vocabulary.\n \"\"\"\n\n def __init__(self,\n lower_case=False,\n keep_whitespace=False,\n normalization_form=None,\n preserve_unused_token=False):\n self._lower_case = lower_case\n if not keep_whitespace:\n self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN\n else:\n self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN\n self._normalization_form = normalization_form\n\n if preserve_unused_token:\n self._delim_regex_pattern = \"|\".join(\n [_UNUSED_TOKEN_REGEX, _DELIM_REGEX_PATTERN])\n self._keep_delim_regex_pattern = \"|\".join(\n [_UNUSED_TOKEN_REGEX, self._keep_delim_regex_pattern])\n else:\n self._delim_regex_pattern = _DELIM_REGEX_PATTERN\n\n def tokenize(self, text_input):\n tokens, _, _ = self.tokenize_with_offsets(text_input)\n return tokens\n\n def tokenize_with_offsets(self, text_input):\n \"\"\"Performs basic word tokenization for BERT.\n\n Args:\n text_input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings.\n\n Returns:\n A `RaggedTensor` of tokenized strings from text_input.\n \"\"\"\n # lowercase and strip accents (if option is set)\n if self._lower_case:\n text_input = case_fold_utf8(text_input)\n text_input = normalize_utf8(text_input, \"NFD\")\n text_input = string_ops.regex_replace(text_input, r\"\\p{Mn}\", \"\")\n else:\n # utf8 normalization\n if self._normalization_form is not None:\n text_input = normalize_utf8(text_input, self._normalization_form)\n\n # strip out control characters\n text_input = string_ops.regex_replace(text_input, r\"\\p{Cc}|\\p{Cf}\", \" \")\n return regex_split_ops.regex_split_with_offsets(\n text_input, self._delim_regex_pattern, self._keep_delim_regex_pattern,\n \"BertBasicTokenizer\")\n\n\nclass BertTokenizer(TokenizerWithOffsets):\n r\"\"\"Tokenizer used for BERT.\n\n This tokenizer applies an end-to-end, text string to wordpiece tokenization.\n It first applies basic tokenization, and then follwed by wordpiece\n tokenization.\n\n See BasicTokenizer and WordpieceTokenizer for their respective details.\n\n Attributes:\n vocab_lookup_table: A lookup table implementing the LookupInterface\n containing the vocabulary of subwords or a string which is the file path\n to the vocab.txt file.\n suffix_indicator: (optional) The characters prepended to a wordpiece to\n indicate that it is a suffix to another subword. Default is '##'.\n max_bytes_per_word: (optional) Max size of input token. Default is 100.\n max_chars_per_token: (optional) Max size of subwords, excluding suffix\n indicator. If known, providing this improves the efficiency of decoding\n long words.\n token_out_type: (optional) The type of the token to return. This can be\n `tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`.\n unknown_token: (optional) The value to use when an unknown token is found.\n Default is \"[UNK]\". If this is set to a string, and `token_out_type` is\n `tf.int64`, the `vocab_lookup_table` is used to convert the\n `unknown_token` to an integer. If this is set to `None`, out-of-vocabulary\n tokens are left as is.\n split_unknown_characters: (optional) Whether to split out single unknown\n characters as subtokens. If False (default), words containing unknown\n characters will be treated as single unknown tokens.\n lower_case: bool - If true, a preprocessing step is added to lowercase the\n text, apply NFD normalization, and strip accents characters.\n keep_whitespace: bool - If true, preserves whitespace characters instead of\n stripping them away.\n normalization_form: If true and lower_case=False, the input text will be\n normalized to `normalization_form`. See normalize_utf8() op for a list of\n valid values.\n preserve_unused_token: If true, text in the regex format `\\\\[unused\\\\d+\\\\]`\n will be treated as a token and thus remain preserved as is to be looked up\n in the vocabulary.\n \"\"\"\n\n def __init__(self,\n vocab_lookup_table,\n suffix_indicator=\"##\",\n max_bytes_per_word=100,\n max_chars_per_token=None,\n token_out_type=dtypes.int64,\n unknown_token=\"[UNK]\",\n split_unknown_characters=False,\n lower_case=False,\n keep_whitespace=False,\n normalization_form=None,\n preserve_unused_token=False):\n if isinstance(vocab_lookup_table, str) or isinstance(\n vocab_lookup_table, ops.Tensor):\n init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table)\n vocab_lookup_table = lookup_ops.StaticVocabularyTableV1(\n init, num_oov_buckets=1, lookup_key_dtype=dtypes.string)\n\n print(\"Before \", type(lower_case))\n if isinstance(lower_case, ops.Tensor): \n lower_case = tf.compat.v1.get_default_session().run(lower_case)\n print(\"After \", type(lower_case))\n\n self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace,\n normalization_form,\n preserve_unused_token)\n self._wordpiece_tokenizer = WordpieceTokenizer(\n vocab_lookup_table, suffix_indicator, max_bytes_per_word,\n max_chars_per_token, token_out_type, unknown_token,\n split_unknown_characters)\n\n def tokenize_with_offsets(self, text_input):\n tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)\n wordpieces, wp_begin, wp_end = (\n self._wordpiece_tokenizer.tokenize_with_offsets(tokens))\n begin_expanded = array_ops.expand_dims(begin, axis=2)\n final_begin = begin_expanded + wp_begin\n final_end = begin_expanded + wp_end\n return wordpieces, final_begin, final_end\n\n def tokenize(self, text_input):\n \"\"\"Performs untokenized text to wordpiece tokenization for BERT.\n\n Args:\n text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8\n strings.\n\n Returns:\n A `RaggedTensor` of tokens where `tokens[i1...iN, j]` is the string\n contents (or ID in the vocab_lookup_table representing that string)\n of the `jth` token in `input[i1...iN]`\n \"\"\"\n tokens = self._basic_tokenizer.tokenize(text_input)\n return self._wordpiece_tokenizer.tokenize(tokens)\n"
] | [
[
"tensorflow.python.ops.lookup_ops.TextFileIdTableInitializer",
"tensorflow.python.ops.lookup_ops.StaticVocabularyTableV1",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.string_ops.regex_replace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
riotu-lab/tf2trt_with_onnx | [
"f9828ed99af5530836bf6ee608e631502dfb0f02"
] | [
"inference.py"
] | [
"import tensorrt as trt\nimport pycuda.driver as cuda\nimport numpy as np\nimport pycuda.autoinit \n\ndef allocate_buffers(engine, batch_size, data_type):\n\n \"\"\"\n This is the function to allocate buffers for input and output in the device\n Args:\n engine : The path to the TensorRT engine. \n batch_size : The batch size for execution time.\n data_type: The type of the data for input and output, for example trt.float32. \n \n Output:\n h_input_1: Input in the host.\n d_input_1: Input in the device. \n h_output_1: Output in the host. \n d_output_1: Output in the device. \n stream: CUDA stream.\n\n \"\"\"\n\n # Determine dimensions and create page-locked memory buffers (which won't be swapped to disk) to hold host inputs/outputs.\n h_input_1 = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(data_type))\n h_output = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(data_type))\n # Allocate device memory for inputs and outputs.\n d_input_1 = cuda.mem_alloc(h_input_1.nbytes)\n\n d_output = cuda.mem_alloc(h_output.nbytes)\n # Create a stream in which to copy inputs/outputs and run inference.\n stream = cuda.Stream()\n return h_input_1, d_input_1, h_output, d_output, stream \n\ndef load_images_to_buffer(pics, pagelocked_buffer):\n preprocessed = np.asarray(pics).ravel()\n np.copyto(pagelocked_buffer, preprocessed) \n\ndef do_inference(engine, pics_1, h_input_1, d_input_1, h_output, d_output, stream, batch_size, height, width):\n \"\"\"\n This is the function to run the inference\n Args:\n engine : Path to the TensorRT engine \n pics_1 : Input images to the model. \n h_input_1: Input in the host \n d_input_1: Input in the device \n h_output_1: Output in the host \n d_output_1: Output in the device \n stream: CUDA stream\n batch_size : Batch size for execution time\n height: Height of the output image\n width: Width of the output image\n \n Output:\n The list of output images\n\n \"\"\"\n print('load images to buffer')\n load_images_to_buffer(pics_1, h_input_1)\n\n with engine.create_execution_context() as context:\n context.debug_sync = False\n # Transfer input data to the GPU.\n cuda.memcpy_htod_async(d_input_1, h_input_1, stream)\n\n # Run inference.\n print('load profiler')\n context.profiler = trt.Profiler()\n print('execute')\n context.execute(batch_size=1, bindings=[int(d_input_1), int(d_output)])\n print('Transfer predictions back from the GPU.')\n # Transfer predictions back from the GPU.\n cuda.memcpy_dtoh_async(h_output, d_output, stream)\n # Synchronize the stream\n stream.synchronize()\n # Return the host output.\n print(h_output.shape)\n out = h_output.reshape((1,-1))\n return out \n"
] | [
[
"numpy.copyto",
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adines/imagepy | [
"d7cdf3273d25e06046626ef2ef9200b1846ea49a",
"d7cdf3273d25e06046626ef2ef9200b1846ea49a"
] | [
"imagepy/menus/File/Import/roi_plg.py",
"imagepy/menus/Process/Hydrology/hydrology_plgs.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 12/21/2018\n@author: BioinfoTongLI\n\"\"\"\nimport numpy as np\nimport read_roi\nfrom imagepy.core.engine import Free\nfrom imagepy import IPy\nfrom skimage.draw import polygon\n\nclass Plugin(Free):\n \"\"\"load_ij_roi: use read_roi and th pass to shapely objects\"\"\"\n title = 'Import Rois from IJ'\n\n para = {'path': '', 'name': 'Undefined', 'width': 512, 'height': 512}\n\n view = [(str, 'name', 'name', ''),\n (int, 'width', (1, 3000), 0, 'width', 'pix'),\n (int, 'height', (1, 3000), 0, 'height', 'pix')]\n\n def load(self):\n filt = '|'.join(['%s files (*.%s)|*.%s' % (i.upper(), i, i) for i in [\"zip\"]])\n return IPy.getpath(self.title, filt, 'open', self.para)\n\n def run(self, para=None):\n ls = read_roi.read_roi_zip(para['path'])\n img = np.zeros((para['height'], para['width']), dtype=np.int32)\n for i in ls:\n img[polygon(ls[i]['y'], ls[i]['x'], img.shape)] = int(i)\n IPy.show_img([img], para['name'])\n",
"import scipy.ndimage as ndimg\nimport numpy as np\nfrom numba import jit\nfrom imagepy.core.engine import Filter\nfrom imagepy.ipyalg import find_maximum, ridge, stair, isoline, watershed\nfrom imagepy.core.roi import PointRoi\n#from skimage.morphology import watershed, disk\nfrom skimage.filters import rank\nfrom skimage.filters import sobel\nfrom imagepy import IPy\n\nclass IsoLine(Filter):\n title = 'Find IsoLine'\n note = ['8-bit', 'not_slice', 'auto_snap', 'not_channel', 'preview']\n \n para = {'low':0, 'high':255, 'step':20, 'type':'stair'}\n view = [(int, 'low', (0,255), 0, 'low', 'value'),\n (int, 'high', (0,255), 0, 'high', 'value'),\n (int, 'step', (0, 50), 0, 'step', ''),\n (list, 'type', ['stair', 'white line', 'gray line', 'white line on ori'], str, 'output', '')]\n\n #process\n def run(self, ips, snap, img, para = None):\n img[:] = snap\n stair(img, para['low'], para['high'], para['step'])\n if para['type']=='stair':\n stair(img, para['low'], para['high'], para['step'])\n else: mark = isoline(img, para['low'], para['high'], para['step'])\n if para['type'] == 'stair':return\n elif para['type'] == 'white line':\n img[:] = mark\n elif para['type'] == 'gray line':\n np.minimum(snap, mark, out=img)\n if para['type'] == 'white line on ori':\n np.maximum(snap, mark, out=img)\n\nclass FindMax(Filter):\n title = 'Find Maximum'\n note = ['8-bit', 'not_slice', 'auto_snap', 'not_channel', 'preview']\n \n para = {'tol':2, 'mode':False, 'wsd':False}\n view = [(int, 'tol', (0,100), 0, 'tolerance', 'value')]\n\n def run(self, ips, snap, img, para = None):\n pts = find_maximum(self.ips.img, para['tol'])\n self.ips.roi = PointRoi([tuple(i) for i in pts[:,::-1]])\n self.ips.update()\n\nclass FindMin(Filter):\n title = 'Find Minimum'\n note = ['8-bit', 'not_slice', 'auto_snap', 'not_channel', 'preview']\n \n para = {'tol':2, 'mode':False, 'wsd':False}\n view = [(int, 'tol', (0,100), 0, 'tolerance', 'value')]\n\n def run(self, ips, snap, img, para = None):\n pts = find_maximum(self.ips.img, para['tol'], False)\n self.ips.roi = PointRoi([tuple(i) for i in pts[:,::-1]])\n self.ips.update()\n\nclass UPRidge(Filter):\n title = 'Find Riedge'\n note = ['8-bit', 'not_slice', 'auto_snap', 'not_channel', 'preview']\n \n para = {'sigma':1.0, 'thr':0, 'ud':True, 'type':'white line'}\n view = [(float, 'sigma', (0,5), 1, 'sigma', 'pix'),\n ('slide', 'thr', (0,255), 0, 'Low'),\n (bool, 'ud', 'ascend'),\n (list, 'type', ['white line', 'gray line', 'white line on ori'], str, 'output', '')]\n\n def load(self, ips):\n self.buflut = ips.lut\n ips.lut = ips.lut.copy()\n return True\n \n def preview(self, ips, para):\n ips.lut[:] = self.buflut\n if para['ud']:\n ips.lut[:para['thr']] = [0,255,0]\n else:\n ips.lut[para['thr']:] = [255,0,0]\n ips.update()\n\n #process\n def run(self, ips, snap, img, para = None):\n self.ips.lut[:] = self.buflut\n ndimg.gaussian_filter(snap, para['sigma'], output=img)\n mark = img<para['thr'] if para['ud'] else img>para['thr']\n mark = mark.astype(np.uint8)\n\n ridge(img, mark, para['ud'])\n if para['type'] == 'white line':\n img[:] = mark\n if para['type'] == 'gray line':\n np.minimum(snap, mark, out=img)\n if para['type'] == 'white line on ori':\n #img //=2\n np.maximum(snap, mark, out=img)\n\nclass ARidge(Filter):\n title = 'Active Ridge'\n note = ['8-bit', 'not_slice', 'auto_snap', 'not_channel']\n \n para = {'sigma':1.0, 'ud':True, 'type':'white line'}\n view = [(float, (0,5), 1, 'sigma', 'sigma', 'pix'),\n (list, 'type', ['white line', 'gray line', 'white line on ori'], str, 'output', ''),\n (bool, 'ud', 'ascend')]\n \n def run(self, ips, snap, img, para = None):\n mark = np.zeros_like(img, dtype=np.uint8)\n ips.roi.sketch(mark, color=1)\n ridge(img, mark, para['ud'])\n if para['type'] == 'white line':\n img[:] = mark\n if para['type'] == 'gray line':\n np.minimum(snap, mark, out=img)\n if para['type'] == 'white line on ori':\n #img //=2\n np.maximum(snap, mark, out=img)\n\nclass Watershed(Filter):\n title = 'Find Watershed'\n note = ['8-bit', 'auto_snap', 'not_channel', 'preview']\n \n para = {'sigma':1.0, 'thr':0, 'con':False, 'ud':True, 'type':'white line'}\n view = [(float, 'sigma', (0,5), 1, 'sigma', 'pix'),\n ('slide', 'thr', (0,255), 0, 'Low'),\n (bool, 'con', 'full connectivity'),\n (bool, 'ud', 'ascend'),\n (list, 'type', ['white line', 'gray line', 'white line on ori'], str, 'output', '')]\n\n def load(self, ips):\n self.buflut = ips.lut\n ips.lut = ips.lut.copy()\n return True\n \n def preview(self, ips, para):\n ips.lut[:] = self.buflut\n if para['ud']:\n ips.lut[:para['thr']] = [0,255,0]\n else:\n ips.lut[para['thr']:] = [255,0,0]\n ips.update()\n\n #process\n def run(self, ips, snap, img, para = None):\n self.ips.lut[:] = self.buflut\n ndimg.gaussian_filter(snap, para['sigma'], output=img)\n mark = img<para['thr'] if para['ud'] else img>para['thr']\n\n markers, n = ndimg.label(mark, np.ones((3,3)), output=np.uint16)\n if not para['ud']:img[:] = 255-img\n mark = watershed(img, markers, line=True, conn=para['con']+1)\n mark = np.multiply((mark==0), 255, dtype=np.uint8)\n if para['type'] == 'white line':\n img[:] = mark\n if para['type'] == 'gray line':\n np.minimum(snap, mark, out=img)\n if para['type'] == 'white line on ori':\n #img //=2\n np.maximum(snap, mark, out=img)\n\nclass UPWatershed(Filter):\n title = 'Up And Down Watershed'\n note = ['8-bit', 'auto_msk', 'auto_snap', 'preview']\n \n para = {'thr1':0, 'thr2':255, 'type':'line'}\n view = [('slide', 'thr1', (0,255), 0, 'Low'),\n ('slide', 'thr2', (0,255), 0, 'High'),\n (list, 'type', ['line', 'up area', 'down area'], str, 'output', '')]\n\n def load(self, ips):\n self.buflut = ips.lut\n ips.lut = ips.lut.copy()\n return True\n \n def preview(self, ips, para):\n ips.lut[:] = self.buflut\n ips.lut[:para['thr1']] = [0,255,0]\n ips.lut[para['thr2']:] = [255,0,0]\n ips.update()\n\n def cancel(self, ips):\n ips.lut = self.buflut\n ips.update()\n\n #process\n def run(self, ips, snap, img, para = None):\n edge = sobel(snap)\n img[:] = 0\n img[snap>para['thr2']] = 2\n img[snap<para['thr1']] = 1\n ips.lut = self.buflut\n mark = watershed(edge, img, line=True)\n img[:] = ips.range[0]\n if para['type'] == 'line': \n img[mark==0] = ips.range[1]\n elif para['type'] == 'up area':\n img[mark!=1] = ips.range[1]\n elif para['type'] == 'down area':\n img[mark!=2] = ips.range[1]\n\nclass ROIWatershed(Filter):\n title = 'Watershed With ROI'\n note = ['8-bit', 'auto_snap', 'not_channel']\n \n para = {'sigma':0, 'type':'white line', 'con':False, 'ud':True}\n view = [(bool, 'con', 'full connectivity'),\n (bool, 'ud', 'ascend'),\n (list, 'type', ['white line', 'gray line', 'white line on ori'], str, 'output', '')]\n \n def run(self, ips, snap, img, para = None):\n #denoised = rank.median(img, disk(para['sigma']))\n #gradient = rank.gradient(denoised, disk(para['gdt']))\n ndimg.gaussian_filter(snap, para['sigma'], output=img)\n\n markers, n = ndimg.label(ips.get_msk(), np.ones((3,3)), output=np.uint32)\n if not para['ud']:img[:] = 255-img\n mark = watershed(img, markers, line=True, conn=para['con']+1)\n mark = np.multiply((mark==0), 255, dtype=np.uint8)\n\n if para['type'] == 'white line':\n img[:] = mark\n if para['type'] == 'gray line':\n np.minimum(snap, mark, out=img)\n if para['type'] == 'white line on ori':\n np.maximum(snap, mark, out=img)\n\nplgs = [FindMax, FindMin, IsoLine, '-', UPRidge, ARidge, '-', Watershed, UPWatershed, ROIWatershed]"
] | [
[
"numpy.zeros"
],
[
"numpy.maximum",
"scipy.ndimage.gaussian_filter",
"numpy.multiply",
"numpy.minimum",
"numpy.ones",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
bagustris/emotion | [
"5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36",
"5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36",
"5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36",
"5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36"
] | [
"ertk/stats.py",
"scripts/results/view_history.py",
"ertk/tensorflow/utils.py",
"scripts/utils/combine_datasets.py"
] | [
"from functools import partial\nfrom typing import Callable, List, Union\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import friedmanchisquare, rankdata\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom statsmodels.stats.libqsturng import qsturng\n\nMatrix = List[List[float]]\n\n\ndef friedman_nemenyi(table: pd.DataFrame, alpha: float = 0.05):\n \"\"\"Runs Friedman test on given table and optionally graphs a\n critical-difference diagram.\n\n Args:\n -----\n table: DataFrame\n The data table, with subjects as rows and independent variable\n (condition) as columns.\n alpha: float\n Significance level, must be in the range (0, 1), default is\n 0.05.\n\n Returns:\n --------\n pval: float\n The p-value for the Friedman test.\n cd: float\n The critical difference from the Nemenyi post-hoc test.\n df: pd.DataFrame\n A table containing statistics relating to ranking and average\n values of the condiions. The dataframe has these columns:\n \"mean_rank\", \"mean\", \"std\", \"median\", \"mad\", \"effect_size\".\n \"\"\"\n _, pval = friedmanchisquare(*table.transpose().to_numpy())\n names = list(table.columns)\n avgrank = rankdata(-table.to_numpy(), axis=1).mean(0)\n df = pd.DataFrame(\n {\n \"mean_rank\": avgrank,\n \"mean\": table.mean(),\n \"std\": table.std(),\n \"median\": table.median(),\n \"mad\": table.mad(),\n },\n index=names,\n ).sort_values(\"mean_rank\")\n\n topclf = df.index[0]\n n, k = table.shape\n # Effect size is calculated in terms of differences in MAD\n df[\"effect_size\"] = (df.loc[topclf, \"median\"] - df[\"median\"]) / np.sqrt(\n ((n - 1) * df.loc[topclf, \"mad\"] ** 2 + (n - 1) * df[\"mad\"] ** 2) / (2 * n - 2)\n )\n cd = qsturng(1 - alpha, k, np.inf) * np.sqrt((k * (k + 1)) / (12 * n))\n return pval, cd, df\n\n\ndef _get_dist_func(metric: Union[Callable, str], **kwargs):\n if callable(metric):\n return partial(metric, **kwargs)\n else:\n if metric != \"minkowski\" and \"p\" in kwargs:\n del kwargs[\"p\"]\n if metric != \"mahalanobis\" and \"VI\" in kwargs:\n del kwargs[\"VI\"]\n return partial(pairwise_distances, metric=metric, **kwargs)\n\n\ndef bhattacharyya_dist(x: np.ndarray, y: np.ndarray, pinv: bool = False):\n \"\"\"Calculate Bhattacharyya distance between multivariate Gaussian\n distributions.\n\n Args:\n -----\n x: array-like\n Data matrix of shape (n1_samples, n_features) corresponding to\n the first group.\n y: array-like\n Data matrix of shape (n2_samples, n_features) corresponding to\n the second group.\n pinv: bool\n Use pseudoinverse instead of inverse. This is useful if the\n covariance matrices don't have full rank or otherwise aren't\n invertible.\n \"\"\"\n mu1 = np.expand_dims(np.mean(x, axis=0), 1)\n mu2 = np.expand_dims(np.mean(y, axis=0), 1)\n cov1 = np.cov(x, rowvar=False)\n cov2 = np.cov(y, rowvar=False)\n cov = (cov1 + cov2) / 2\n _, ldet1 = np.linalg.slogdet(cov1)\n _, ldet2 = np.linalg.slogdet(cov2)\n _, ldet = np.linalg.slogdet(cov)\n if pinv:\n covinv = np.linalg.pinv(cov, hermitian=True, rcond=1e-8)\n else:\n covinv = np.linalg.inv(cov)\n db = (mu1 - mu2).T.dot(covinv).dot(mu1 - mu2) / 8 + ldet / 2 - ldet1 / 4 - ldet2 / 4\n\n return db.item()\n\n\ndef corr_ratio(x: np.ndarray, groups: Union[List[int], np.ndarray]):\n \"\"\"Calculates correlation ratio for each feature using the given\n groups.\n\n Args:\n -----\n data: numpy.ndarray\n Data matrix, with shape (n_instances, n_features).\n groups: list or numpy.ndarray\n 1D array of groups assignments of length n_instances. Groups\n should be labelled from 0 to G - 1 inclusive, where G is the\n number of groups.\n\n Returns:\n --------\n eta: numpy.ndarray\n 1D array of correlation coefficients of length n_features. Each\n value is in [0, 1] except if a feature takes only one value, in\n which case eta will be nan.\n \"\"\"\n groups = np.array(groups)\n n_groups = groups.max() + 1\n counts = np.bincount(groups)\n mean = x.mean(0)\n g_means = np.empty((n_groups, x.shape[1]))\n for g in range(n_groups):\n g_means[g, :] = x[groups == g].mean(0)\n num = np.sum(counts[:, None] * (g_means - mean) ** 2, axis=0)\n den = np.sum((x - mean) ** 2, axis=0)\n old_err = np.seterr(divide=\"ignore\", invalid=\"ignore\")\n eta2 = num / den\n np.seterr(**old_err)\n return np.sqrt(eta2)\n\n\ndef dunn(\n x: np.ndarray,\n clusters: Union[List[int], np.ndarray],\n intra_method: str = \"mean\",\n inter_method: str = \"cent\",\n metric: Union[Callable, str] = \"l2\",\n p: int = 2,\n):\n \"\"\"Calculates the Dunn index for cluster \"goodness\".\n\n Args:\n -----\n data: numpy.ndarray\n Data matrix, with shape (n_instances, n_features).\n clusters: list or numpy.ndarray\n 1D array of cluster assignments of length n_instances. Clusters\n should be labelled from 0 to C - 1 inclusive, where C is the\n number of clusters.\n intra_method: str\n Method for calculating intra-cluster distance. One of \"max\",\n \"mean\", \"cent\".\n inter_method: str\n Method for calculating inter-cluster distance. One of \"cent\".\n metric: str or callable\n Distance metric. If str, must be one of the sklearn or scipy\n distance methods. If callable, must take one positional argument\n and return a pairwise distance matrix.\n p: int\n Value of p for p-norm when using \"lp\" distance metric.\n\n Returns:\n --------\n dunn: float\n The Dunn index for this data and cluster assignment.\n \"\"\"\n clusters = np.array(clusters, dtype=int)\n n_clusters = clusters.max() + 1\n d = _get_dist_func(metric, p=p)\n\n intra = np.zeros(n_clusters)\n for c in range(n_clusters):\n clust_data = x[clusters == c]\n if intra_method == \"max\":\n idx = np.triu_indices(len(clust_data))\n intra[c] = d(clust_data)[idx].max()\n elif intra_method == \"mean\":\n idx = np.triu_indices(len(clust_data))\n intra[c] = d(clust_data)[idx].mean()\n elif intra_method == \"cent\":\n mean = clust_data.mean(0)\n intra[c] = d(clust_data, mean[None, :]).mean()\n\n inter = np.zeros((n_clusters, n_clusters))\n for i in range(n_clusters):\n inter[i, i] = np.inf # To avoid min = 0\n for j in range(i + 1, n_clusters):\n if inter_method == \"cent\":\n mean_i = x[clusters == i].mean(0)\n mean_j = x[clusters == j].mean(0)\n inter[i, j] = inter[j, i] = d(mean_i[None, :], mean_j[None, :])\n\n return inter.min() / intra.max()\n\n\ndef kappa(data: np.ndarray):\n \"\"\"Calculates Fleiss' kappa for inter-rater agreement.\n\n Args:\n -----\n data: numpy.ndarray\n The data matrix, in the form (raters x units).\n \"\"\"\n cats = np.unique(data)\n n, N = data.shape\n\n counts = np.stack([np.sum(data == c, 0) for c in cats], 1)\n\n p_j = np.sum(counts, axis=0) / (N * n)\n assert np.isclose(np.sum(p_j), 1)\n Pe = np.sum(p_j ** 2)\n\n P = (np.sum(counts ** 2, 1) - n) / (n * (n - 1))\n Pbar = np.mean(P)\n\n return (Pbar - Pe) / (1 - Pe)\n\n\nclass Deltas:\n @staticmethod\n def nominal(c: int, k: int):\n return float(c != k)\n\n @staticmethod\n def interval(c: float, k: float):\n return (c - k) ** 2\n\n\ndef alpha(\n data: np.ndarray,\n delta: Union[Callable[[int, int], float], List[List[float]], str] = \"nominal\",\n):\n \"\"\"Calculates Krippendorff's alpha coefficient [1, sec. 11.3] for\n inter-rater agreement.\n\n [1] K. Krippendorff, Content analysis: An introduction to its\n methodology. Sage publications, 2004.\n\n Args:\n -----\n data: numpy.ndarray\n The data matrix, shape (n_raters, n_units). Each cell (i, j)\n represents the value assigned to unit j by rater i, or 0\n representing no response.\n delta: callable, 2-D array-like or str\n The delta metric. Default is the nominal metric, which takes the\n value 1 in case c != k and 0 otherwise.\n \"\"\"\n # The following implementation was based off the Wikipedia article:\n # https://en.wikipedia.org/wiki/Krippendorff%27s_alpha\n\n # Response categories go from 1 to R, 0 represents no response\n R = np.max(data)\n counts = np.apply_along_axis(lambda x: np.bincount(x, minlength=R + 1), 0, data).T\n count_sum = np.sum(counts, 0)\n assert len(count_sum) == R + 1\n\n def ordinal(c: int, k: int):\n if k < c:\n c, k = k, c\n s = (\n sum(count_sum[g] for g in range(c, k + 1))\n - (count_sum[c] + count_sum[k]) / 2\n )\n return s ** 2\n\n if isinstance(delta, str):\n delta = {\n \"nominal\": Deltas.nominal,\n \"ordinal\": ordinal,\n \"interval\": Deltas.interval,\n }[delta]\n\n if not callable(delta):\n try:\n delta[0][0]\n except IndexError:\n raise TypeError(\"delta must be either str, callable or 2D array.\")\n\n def _delta(c, k):\n new_delta = delta\n return new_delta[c][k]\n\n delta = _delta\n\n m_u = np.sum(counts[:, 1:], 1)\n\n valid = m_u >= 2\n counts = counts[valid]\n m_u = m_u[valid]\n data = data[:, valid]\n\n n = np.sum(m_u)\n\n n_cku = np.matmul(counts[:, :, None], counts[:, None, :])\n for i in range(R + 1):\n n_cku[:, i, i] = counts[:, i] * (counts[:, i] - 1)\n\n D_o = 0\n for c in range(1, R + 1):\n for k in range(1, R + 1):\n D_o += delta(c, k) * n_cku[:, c, k]\n D_o = np.sum(D_o / (n * (m_u - 1)))\n\n D_e = 0\n P_ck = np.bincount(data.flat)\n for c in range(1, R + 1):\n for k in range(1, R + 1):\n D_e += delta(c, k) * P_ck[c] * P_ck[k]\n D_e /= n * (n - 1)\n\n return 1 - D_o / D_e\n",
"from pathlib import Path\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom ertk.utils import PathlibPath\n\n\[email protected]()\[email protected](\"input\", type=PathlibPath(exists=True, dir_okay=False))\[email protected](\"--individual\", help=\"Plot individual folds for each metric.\")\ndef main(input: Path, individual: bool):\n \"\"\"Displays plot of training epochs for cross-validation rounds.\"\"\"\n df = pd.read_csv(input, header=[0, 1], index_col=0)\n\n metrics = df.columns.get_level_values(1).unique()\n n_folds = len(df.columns.get_level_values(0).unique())\n metric_types = [x for x in metrics if not x.startswith(\"val_\")]\n mean = df.mean(axis=1, level=1)\n std = df.std(axis=1, level=1)\n std_err = std / np.sqrt(n_folds)\n for metric in metric_types:\n cols = [metric, \"val_\" + metric]\n fig = plt.figure()\n ax = fig.add_subplot()\n ax.set_title(\"mean \" + metric)\n ax.set_xlabel(\"epoch\")\n for col in cols:\n x = mean.index\n y = mean[col]\n err = std_err[col]\n ax.plot(x, y, label=\"valid\" if col.startswith(\"val_\") else \"train\")\n ax.fill_between(x, y - 2 * err, y + 2 * err, alpha=0.2)\n ax.legend()\n\n if individual:\n metric_dfs = {}\n for key in df.columns.get_level_values(1).unique():\n metric_dfs[key] = df.xs(key, axis=1, level=1)\n for key, df in metric_dfs.items():\n df.plot(title=key)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n",
"from functools import wraps\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.pipeline import Pipeline\nfrom tensorflow.keras.layers import Layer, Wrapper\nfrom tensorflow.keras.losses import Loss\nfrom tensorflow.keras.metrics import Metric\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam, Optimizer\nfrom tensorflow.keras.utils import Sequence\n\nfrom ..utils import batch_arrays, shuffle_multiple\n\nTFModelFunction = Callable[..., Union[Model, Pipeline]]\nDataFunction = Callable[..., tf.data.Dataset]\n\n\ndef compile_wrap(\n model_fn: Optional[TFModelFunction] = None,\n opt_cls: Type[Optimizer] = Adam,\n opt_kwargs: Dict[str, Any] = dict(learning_rate=0.0001),\n metrics: List[Union[str, Metric]] = [\"sparse_categorical_accuracy\"],\n loss: Union[str, Loss] = \"sparse_categorical_crossentropy\",\n **compile_kwargs,\n):\n \"\"\"Wrapper that takes a model creation function and gives a new\n function which returns a compiled model with the given compile\n parameters.\n\n Args:\n -----\n model_fn: callable, optional\n A method that returns an uncompiled model.\n opt_cls: type\n The Optimizer class to use.\n opt_kwargs: dict\n Keyword arguments to pass to opt_cls.\n metrics: list\n List of metrics to use.\n loss: Loss\n The loss function to use.\n **compile_kwargs: dict\n Other keyword arguments to pass to the model's compile() method.\n \"\"\"\n\n def _wrapper(func: Callable[..., Model]):\n @wraps(func)\n def new_model_fn(*args, **kwargs) -> Model:\n model = func(*args, **kwargs)\n model.compile(\n optimizer=opt_cls(**opt_kwargs),\n metrics=metrics,\n loss=loss,\n **compile_kwargs,\n )\n return model\n\n return new_model_fn\n\n if model_fn is not None:\n return _wrapper(model_fn)\n\n return _wrapper\n\n\ndef test_fit(\n model_fn: TFModelFunction,\n input_size: Tuple[int, ...],\n *args,\n batch_size: int = 64,\n num_instances: int = 7000,\n **kwargs,\n):\n \"\"\"Tests the given model architecture/structure by training it on\n dummy data.\n\n Args:\n -----\n model_fn: callable\n Function that returns a Keras model. Called as model_fn(*args,\n **kwargs).\n input_size: tuple of int\n Input shape to the model. This is used to generate dummy data of\n the correct shape.\n *args\n Positional arguments to pass to model_fn().\n batch_size: int\n The batch size to use.\n num_instances: int\n The number of instances to generate.\n **kwargs\n Keyword arguments to pass to model_fn().\n \"\"\"\n for gpu in tf.config.get_visible_devices(\"GPU\"):\n tf.config.experimental.set_memory_growth(gpu, True)\n\n compiled_fn = compile_wrap(model_fn)\n model = compiled_fn(*args, n_classes=7, **kwargs)\n model.summary()\n\n valid = num_instances // 10\n rng = np.random.default_rng()\n x = rng.normal(size=(num_instances,) + input_size)\n y = rng.integers(7, size=num_instances)\n train_data = tf.data.Dataset.from_tensor_slices((x[valid:], y[valid:]))\n train_data = train_data.batch(batch_size)\n valid_data = tf.data.Dataset.from_tensor_slices((x[:valid], y[:valid]))\n valid_data = valid_data.batch(batch_size)\n model.fit(train_data, validation_data=valid_data, epochs=2, verbose=1)\n\n\ndef tf_dataset_gen(\n x: np.ndarray,\n y: np.ndarray,\n sample_weight: Optional[np.ndarray] = None,\n *,\n batch_size: int = 64,\n shuffle: bool = True,\n):\n \"\"\"Returns a TensorFlow generator Dataset instance with the given\n data.\n\n Args:\n -----\n x: numpy.ndarray\n A 2- or 3-D data matrix of shape (n_instances, n_features) or\n (n_instances, seq_len, n_features).\n y: numpy.ndarray\n A 1-D array of length n_instances containing numeric class\n labels.\n sample_weight: numpy.ndarray, optional\n A 1-D array of length n_instances containing sample weights.\n Added as third item in dataset if present.\n batch_size: int\n The batch size to use.\n shuffle: boolean\n Whether or not to shuffle the dataset. Note that shuffling is\n done *before* batching, unlike in `create_tf_dataset_ragged()`.\n \"\"\"\n\n def gen_inst():\n if shuffle:\n perm = np.random.permutation(len(x))\n else:\n perm = np.arange(len(x))\n\n if sample_weight is None:\n for i in perm:\n yield x[i], y[i]\n else:\n for i in perm:\n yield x[i], y[i], sample_weight[i]\n\n sig: Tuple[tf.TensorSpec, ...] = (\n tf.TensorSpec(shape=x[0].shape, dtype=tf.float32),\n tf.TensorSpec(shape=(), dtype=tf.int64),\n )\n if sample_weight is not None:\n sig += (tf.TensorSpec(shape=(), dtype=tf.float32),)\n data = tf.data.Dataset.from_generator(gen_inst, output_signature=sig)\n return data.batch(batch_size).prefetch(2)\n\n\ndef tf_dataset_mem(\n x: np.ndarray,\n y: np.ndarray,\n sample_weight: Optional[np.ndarray] = None,\n *,\n batch_size: int = 64,\n shuffle: bool = True,\n) -> tf.data.Dataset:\n \"\"\"Returns a TensorFlow in-memory Dataset instance with the given\n data.\n\n Args:\n -----\n x: numpy.ndarray\n A 2- or 3-D data matrix of shape (n_instances, n_features) or\n (n_instances, seq_len, n_features).\n y: numpy.ndarray\n A 1-D array of length n_instances containing numeric class\n labels.\n sample_weight: numpy.ndarray, optional\n A 1-D array of length n_instances containing sample weights.\n Added as third item in dataset if present.\n batch_size: int\n The batch size to use.\n shuffle: boolean\n Whether or not to shuffle the dataset. Note that shuffling is\n done *before* batching, unlike in `create_tf_dataset_ragged()`.\n \"\"\"\n with tf.device(\"CPU\"):\n if sample_weight is None:\n data = tf.data.Dataset.from_tensor_slices((x, y))\n else:\n data = tf.data.Dataset.from_tensor_slices((x, y, sample_weight))\n\n if shuffle:\n data = data.shuffle(len(x))\n return data.batch(batch_size).prefetch(2)\n\n\ndef tf_dataset_mem_ragged(\n x: np.ndarray,\n y: np.ndarray,\n sample_weight: Optional[np.ndarray] = None,\n *,\n batch_size: int = 64,\n shuffle: bool = True,\n) -> tf.data.Dataset:\n \"\"\"Returns a TensorFlow in-memory Dataset instance from\n variable-length features.\n\n Args:\n -----\n x: numpy.ndarray\n A 3-D data matrix of shape (n_instances, length[i], n_features)\n with variable length axis 1.\n y: numpy.ndarray\n A 1-D array of length n_instances containing numeric class\n labels.\n sample_weight: numpy.ndarray, optional\n A 1-D array of length n_instances containing sample weights.\n Added as third item in dataset if present.\n batch_size: int\n The batch size to use.\n shuffle: boolean\n Whether or not to shuffle the dataset. Note that shuffling is\n done *after* batching, because sequences are sorted by length,\n then batched in similar lengths.\n \"\"\"\n\n def ragged_to_dense(x: tf.RaggedTensor, y):\n return x.to_tensor(), y\n\n def ragged_to_dense_weighted(x: tf.RaggedTensor, y, sample_weight):\n return x.to_tensor(), y, sample_weight\n\n # Sort according to length\n perm = np.argsort([len(a) for a in x])\n x = x[perm]\n y = y[perm]\n if sample_weight is not None:\n sample_weight = sample_weight[perm]\n\n ragged = tf.RaggedTensor.from_row_lengths(\n np.concatenate(list(x)), [len(a) for a in x]\n )\n with tf.device(\"CPU\"):\n if sample_weight is None:\n data = tf.data.Dataset.from_tensor_slices((ragged, y))\n else:\n data = tf.data.Dataset.from_tensor_slices((ragged, y, sample_weight))\n\n # Group similar lengths in batches, then shuffle batches\n data = data.batch(batch_size)\n if shuffle:\n data = data.shuffle(len(x) // batch_size + 1)\n\n if sample_weight is None:\n data = data.map(ragged_to_dense)\n else:\n data = data.map(ragged_to_dense_weighted)\n return data.prefetch(2)\n\n\nclass BatchedFrameSequence(Sequence):\n \"\"\"Creates a sequence of batches of frames to process.\n\n Parameters:\n -----------\n x: ndarray or list of ndarray\n Sequences of vectors.\n y: ndarray\n Labels corresponding to sequences in x.\n prebatched: bool, default = False\n Whether or not x has already been grouped into batches.\n batch_size: int, default = 32\n Batch size to use. Each generated batch will be at most this size.\n shuffle: bool, default = True\n Whether to shuffle the order of the batches.\n \"\"\"\n\n def __init__(\n self,\n x: Union[np.ndarray, List[np.ndarray]],\n y: np.ndarray,\n prebatched: bool = False,\n batch_size: int = 32,\n shuffle: bool = True,\n ):\n self.x = x\n self.y = y\n if not prebatched:\n self.x, self.y = batch_arrays(\n self.x, self.y, batch_size=batch_size, shuffle=shuffle\n )\n if shuffle:\n self.x, self.y = shuffle_multiple(self.x, self.y, numpy_indexing=True)\n\n def __len__(self):\n return len(self.x)\n\n def __getitem__(self, idx: int):\n return self.x[idx], self.y[idx]\n\n\nclass BatchedSequence(Sequence):\n \"\"\"Creates a sequence of batches to process.\n\n Parameters:\n -----------\n x: ndarray or list of ndarray\n Instance feature vectors. Each vector is assumed to be for a different\n instance.\n y: ndarray\n Labels corresponding to sequences in x.\n prebatched: bool, default = False\n Whether or not x has already been grouped into batches.\n batch_size: int, default = 32\n Batch size to use. Each generated batch will be at most this size.\n shuffle: bool, default = True\n Whether to shuffle the instances.\n \"\"\"\n\n def __init__(\n self, x: np.ndarray, y: np.ndarray, batch_size: int = 32, shuffle: bool = True\n ):\n self.x = x\n self.y = y\n self.batch_size = batch_size\n if shuffle:\n self.x, self.y = shuffle_multiple(self.x, self.y, numpy_indexing=True)\n\n def __len__(self):\n return int(np.ceil(len(self.x) / self.batch_size))\n\n def __getitem__(self, idx: int):\n sl = slice(idx * self.batch_size, (idx + 1) * self.batch_size)\n return self.x[sl], self.y[sl]\n\n\ndef print_linear_model_structure(model: Model):\n \"\"\"Prints the structure of a \"sequential\" model by listing the layer\n types and shapes in order.\n\n Args:\n -----\n model: Model\n The model to describe.\n \"\"\"\n\n def print_inner(model: Layer, depth: int = 0):\n indent = \"\\t\" * depth\n if not isinstance(model, Model):\n print(indent, model.name, model.output_shape)\n return\n\n for layer in model.layers:\n name = layer.name\n if name.startswith(\"tf_op_layer_\"):\n name = name[12:]\n\n print(indent, name, layer.output_shape)\n if isinstance(layer, Model):\n print_inner(layer, depth + 1)\n elif isinstance(layer, Wrapper):\n print_inner(layer.layer, depth + 1)\n\n print_inner(model)\n\n\ndef init_gpu_memory_growth():\n \"\"\"Sets TensorFlow to allocate memory on GPU as needed instead of\n all at once.\n \"\"\"\n for gpu in tf.config.list_physical_devices(\"GPU\"):\n tf.config.experimental.set_memory_growth(gpu, True)\n",
"from pathlib import Path\nfrom typing import Tuple\n\nimport click\nimport netCDF4\nimport numpy as np\n\nfrom ertk.dataset import write_features\nfrom ertk.utils import PathlibPath\n\n\[email protected]()\[email protected](\"input\", type=PathlibPath(exists=True, dir_okay=False), nargs=-1)\[email protected](\"output\", type=Path)\ndef main(input: Tuple[Path], output: Path):\n \"\"\"Combines multiple INPUT netCDF datasets into a larger dataset\n and writes to OUTPUT.\n \"\"\"\n\n if len(input) == 0:\n raise ValueError(\"No input files specified.\")\n\n total_length = 0\n total_instances = 0\n num_features = 0\n feature_names = []\n for filename in input:\n data = netCDF4.Dataset(filename)\n if num_features == 0:\n num_features = len(data.dimensions[\"features\"])\n elif len(data.dimensions[\"features\"]) != num_features:\n raise ValueError(\"Feature size of all datasets must match.\")\n total_length += len(data.dimensions[\"concat\"])\n total_instances += len(data.dimensions[\"instance\"])\n feature_names = list(data.variables[\"feature_names\"])\n data.close()\n\n # Preallocate arrays to save storing as lists in memory\n features = np.empty((total_length, num_features), dtype=np.float32)\n names = np.empty(total_instances, dtype=str)\n slices = np.empty(total_instances, dtype=int)\n l_idx = 0\n i_idx = 0\n for filename in input:\n print(f\"Opened netCDF4 dataset {filename}\")\n data = netCDF4.Dataset(filename)\n length = len(data.dimensions[\"concat\"])\n instances = len(data.dimensions[\"instance\"])\n features[l_idx : l_idx + length, :] = data.variables[\"features\"][:]\n names[i_idx : i_idx + instances] = data.variables[\"name\"][:]\n slices[i_idx : i_idx + instances] = data.variables[\"slices\"][:]\n l_idx += length\n i_idx += instances\n data.close()\n assert l_idx == total_length and i_idx == total_instances\n\n output.parent.mkdir(parents=True, exist_ok=True)\n write_features(\n output,\n corpus=\"combined\",\n names=names,\n slices=slices,\n features=features,\n feature_names=feature_names,\n )\n print(f\"Wrote netCDF4 dataset to {output}\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.sqrt",
"numpy.unique",
"numpy.linalg.inv",
"numpy.linalg.slogdet",
"numpy.matmul",
"numpy.seterr",
"numpy.max",
"numpy.cov",
"numpy.mean",
"numpy.bincount",
"numpy.linalg.pinv",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
],
[
"pandas.read_csv",
"numpy.sqrt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"tensorflow.config.get_visible_devices",
"tensorflow.device",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.config.list_physical_devices",
"tensorflow.data.Dataset.from_generator",
"tensorflow.TensorSpec",
"numpy.random.default_rng"
],
[
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sdickler/FINE | [
"3114fd009e80a7eadacffe26bf5ff8e6a126ac61"
] | [
"FINE/expansionModules/robustPipelineSizing.py"
] | [
"\"\"\"\nLast edited: January 20 2020\n\n|br| @author: FINE Developer Team (FZJ IEK-3) \\n\\n\nThe approaches used are described in\nRobinius et. al. (2019) \"Robust Optimal Discrete Arc Sizing for Tree-Shaped Potential Networks\"\nand they are further developed with the help of\nTheorem 10 of Labbé et. al. (2019) \"Bookings in the European gas market: characterisation of feasibility and\ncomputational complexity results\"\nand Lemma 3.4 and 3.5 of Schewe et. al. (preprint 2020) \"Computing Technical Capacities in the European Entry-Exit\nGas Market is NP-Hard\"\n\"\"\"\nimport pandas as pd\nfrom FINE import utils\nimport networkx as nx\nimport math\nimport pyomo.environ as py\nimport warnings\nfrom pyomo.opt import SolverFactory, SolverStatus, TerminationCondition\nimport numpy as np\nimport copy\nfrom scipy.optimize import fsolve\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport shapely as shp\nimport time\nfrom multiprocessing import Pool\nimport sys\nfrom functools import partial\n\ntry:\n import geopandas as gpd\nexcept ImportError:\n warnings.warn('The GeoPandas python package could not be imported.')\n\n\n# local type und value checker\n\ndef isPandasDataFrameNumber(dataframe):\n # check if dataframe is a pandas dataframe and if each value is float or int\n if not isinstance(dataframe, pd.DataFrame):\n raise TypeError(\"The input argument has to be a pandas DataFrame\")\n else:\n if not dataframe.select_dtypes(exclude=[\"float\", \"int\"]).empty:\n raise ValueError(\"The input pandas DataFrame has to contain only floats or ints\")\n\n\ndef isPandasSeriesPositiveNumber(pandasSeries):\n # Check if the input argument is a pandas series and it contains only positive numbers\n if not isinstance(pandasSeries, pd.Series):\n raise TypeError(\"The input argument has to be a pandas series\")\n else:\n for index in pandasSeries.index:\n utils.isPositiveNumber(pandasSeries[index])\n\n\ndef isNetworkxGraph(graph):\n # Check if the input argument is a networkx graph\n if not isinstance(graph, nx.Graph):\n raise TypeError(\"The input argument has to be a networkx graph\")\n\n\ndef isDictionaryPositiveNumber(dictionary):\n # Check if the input argument is a dictionary with positive numbers as values\n if not isinstance(dictionary, dict):\n raise TypeError(\"The input argument has to be a dictionary\")\n else:\n for key in dictionary.keys():\n utils.isPositiveNumber(dictionary[key])\n\n\ndef checkLowerUpperBoundsOfDicts(lowerDict, upperDict):\n # check if lowerDict and upperDict have the same keys and if lowerDict[key] <= upperDict[key] holds\n if not (lowerDict.keys() == upperDict.keys()):\n raise ValueError(\"The input arguments have to have the same keys\")\n else:\n for key in lowerDict.keys():\n if lowerDict[key] > upperDict[key]:\n raise ValueError(\"The lower bound has to be the smaller than the upper bound\")\n\n\ndef isListOfStrings(strings):\n # check if strings is list of strings\n if not isinstance(strings, list):\n raise TypeError(\"The input argument has to be a list\")\n else:\n for string in strings:\n utils.isString(string)\n\n\ndef isBool(boolean):\n # check if boolean is a bool\n if not isinstance(boolean, bool):\n raise TypeError(\"The input argument has to be a bool\")\n\n\n# End utils checks\n\n\ndef getInjectionWithdrawalRates(componentName='', esM=None, operationVariablesOptimumData=None):\n \"\"\"\n Determines the injection and withdrawal rates into a network from a component in an\n EnergySystemModel object or based on the fluid flow data.\n\n :param componentName: name of the network component in the EnergySystemModel class\n (only required the fluid flows are to be obtained from the EnergySystemModel class)\n |br| * the default value is ''\n :type componentName: string\n\n :param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be\n specified if the operationVariablesOptimumData are to be obtained from the\n EnergySystemModel object) \n |br| * the default value is None\n :type esM: FINE EnergySystemModel\n\n :param operationVariablesOptimumData: the injection and withdrawal rates into and out of the\n network can either be obtained from a DataFrame with the original fluid flows or an\n EnergySystemModel with an optimized Pyomo instance.\n In the former case, the argument is a pandas DataFrame with two index columns (specifying\n the names of the start and end node of a pipeline) and one index row (for the time steps).\n The data in the DataFrame denotes the flow coming from the start node and going to the end\n node [e.g. in kWh or Nm^3]. Example:\n\n 0 1 ... 8759\n node1 node2 0.1 0.0 ... 0.9\n node2 node3 0.0 0.3 ... 0.4\n node2 node1 0.9 0.9 ... 0.2\n node3 node2 1.1 0.2 ... 0.9\n\n |br| * the default value is None\n :type operationVariablesOptimumData: pandas DataFrame with non-negative floats\n\n :return: injection and withdrawal rates (withdrawals from the network are positive while\n injections are negative)\n :rtype: pandas DataFrame\n \"\"\"\n #TODO check type and value correctness\n\n # Get the original optimal operation variables\n if operationVariablesOptimumData is not None:\n op = operationVariablesOptimumData\n else:\n op = esM.componentModelingDict[esM.componentNames[componentName]]. \\\n getOptimalValues('operationVariablesOptimum')['values'].loc[componentName]\n\n # Get a map of the component's network\n if esM is None:\n mapN = {}\n for conn in operationVariablesOptimumData.index:\n loc, loc_ = conn\n mapN.setdefault(loc, {}).update({loc_: loc + '_' + loc_})\n mapN.setdefault(loc_, {}).update({loc: loc_ + '_' + loc})\n else:\n mapN = esM.getComponent(componentName)._mapL\n\n # Initialize list for nodal injection and withdrawal time series data\n injectionWithdrawalRates, nodeIx = [], []\n\n # Reset connections set (not all indices might be in the operationVariablesOptimumData data)\n connections = set()\n\n # For each node loc, compute the injection and withdrawal rates \n for loc, locConn in mapN.items():\n # As in a few cases zero columns/ rows are dropped from data frames, two lists\n # of eligible connection indices are created.\n ixIn, ixOut = [], []\n for loc_, conn in locConn.items():\n if (loc, loc_) in op.index:\n ixOut.append((loc, loc_)), connections.add((loc, loc_))\n if (loc_, loc) in op.index:\n ixIn.append((loc_, loc)), connections.add((loc_, loc))\n # If either list has at least one entry, the incoming and outgoing flows are selected\n # from the original optimal flow variables and aggregated. The resulting commodity\n # withdrawals from the network are positive while injections are negative.\n if (len(ixIn) != 0) | (len(ixOut) != 0):\n injectionWithdrawalRates.append(op.loc[ixIn].sum() - op.loc[ixOut].sum())\n nodeIx.append(loc)\n\n # Concat data to a pandas dataframe\n injectionWithdrawalRates = pd.concat(injectionWithdrawalRates, keys=nodeIx, axis=1)\n\n return injectionWithdrawalRates\n\ndef getNetworkLengthsFromESM(componentName, esM):\n \"\"\"\n Obtains the pipeline lengths of a transmission component in an EnergySystemModel class.\n \n :param componentName: name of the network component in the EnergySystemModel class\n (only required if the fluid flows are to be obtained from the EnergySystemModel class)\n |br| * the default value is ''\n :type componentName: string\n\n :param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be\n specified if the operationVariablesOptimumData are to be obtained from the\n EnergySystemModel object) \n |br| * the default value is None\n :type esM: FINE EnergySystemModel\n\n :return: pipeline distances in the length unit specified in the esM object\n :rtype: pandas series\n \"\"\"\n utils.isString(componentName)\n utils.isEnergySystemModelInstance(esM)\n\n distances = esM.getComponent(componentName).distances.copy()\n indexMap = esM.getComponent(componentName)._mapC\n distances.index = [indexMap[ix] for ix in distances.index]\n\n return distances\n\n\ndef getRefinedShapeFile(shapeFilePath, regColumn1, regColumn2, dic_node_minPress, dic_node_maxPress, minPipeLength, maxPipeLength):\n \"\"\" \n If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,\n i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1\n\n :param shapeFilePath: path to a shape file which connects the gas injection/ withdrawal nodes with each other. The rows of the\n file describe connections between the injection/ withdrawal nodes. The required geometry of these connections is a shapely\n LineString. Additionally, the file has two columns holding the names of the two injection/ withdrawal nodes (start and end\n point of the LineString).\n :type shapeFilePath: string\n\n :param regColumn1: name of the column which holds the name of the injection/ withdrawal node at the beginning of the line\n :type regColumn1: string\n\n :param regColumn2: name of the column which holds the name of the injection/ withdrawal node at the end of the line\n :type regColumn2: string\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar].\n It holds: dic_node_minPress[index] <= dic_node_maxPress[index].\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n :param minPipeLength: desired minimum length of a pipe in [m], note: not always possible to achieve.\n :type minPipeLength: positive number\n\n :param maxPipeLength: determines the maximal length of a pipe in [m].\n :type maxPipeLength: positive number\n\n :return: distances_new - pipeline distances in m\n :rtype: pandas series\n\n :return: dic_node_minPress_new - dictionary that contains for every node of the network its lower pressure bound in [bar]\n :rtype: dictionary key: node of the network, value: non-negative float\n\n :return: dic_node_maxPress_new - dictionary that contains for every node of the network its upper pressure bound in [bar]\n :rtype: dictionary key: node of the network, value: non-negative float\n\n :return: gdfNodes - GeoDataFrame with the nodes of the network and their names\n :rtype: geopandas GeoDataFrame\n\n :return: gdfEdges - GeoDataFrame with the edges of the network and the names of their start and end nodes\n :rtype: geopandas GeoDataFrame\n \"\"\"\n # type and value check\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n utils.isString(regColumn1), utils.isString(regColumn2)\n utils.isStrictlyPositiveNumber(maxPipeLength)\n utils.isStrictlyPositiveNumber(minPipeLength)\n\n # Read shape file with linestrings connecting the entry/ exit nodes of the gas\n gdf=gpd.read_file(shapeFilePath)\n if not (gdf.geometry.type == 'LineString').all():\n raise ValueError(\"Geometries of the shape file have to be LineStrings\")\n\n print('Number of edges before segmentation:', len(gdf))\n originalNodesSet = set(gdf[regColumn1]) | set(gdf[regColumn2])\n print('Number of nodes before segmentation:', len(originalNodesSet))\n\n # Obtain nodes from shape file, assign names and minimum/ maximum pressure levels to them, delete duplicates\n coordNames, coords = [], []\n pMin, pMax = [], []\n lines = []\n\n # Break linestrings into linear pieces\n for i, row in gdf.iterrows():\n # Simplify linestring (to increase the minimum length of pipeline connections wherever possible)\n line = row.geometry.simplify(minPipeLength) \n lines.append(line)\n row.geometry = line\n\n # Get new nodes\n coords_ = [i for i in line.coords]\n coords.extend(coords_)\n\n coordNames_ = [row[regColumn1]]\n coordNames_.extend([row[regColumn1] + '_' + row[regColumn2] + '_' + str(j)\n for j in range(len(coords_)-2)])\n coordNames_.append(row[regColumn2])\n coordNames.extend(coordNames_)\n\n # Get averaged lower and upper pressure levels \n pMin.extend([(dic_node_minPress[row[regColumn1]]*(len(coords_)-j-1) +\n dic_node_minPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])\n \n pMax.extend([(dic_node_maxPress[row[regColumn1]]*(len(coords_)-j-1) +\n dic_node_maxPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])\n\n gdf['geometry'] = lines\n\n # Create DataFrame of old and new nodes and drop duplicates \n dfNodes = pd.DataFrame([coordNames, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']).T\n dfNodes = dfNodes.drop_duplicates(subset='lon_lat')\n dfNodes = dfNodes.drop_duplicates(subset='nodeName')\n\n # Obtain edges from shape file, assign names to them, delete duplicates\n nodesIn_nodesOut = []\n nodesIn = []\n nodesOut = []\n lineStrings = []\n\n for i, row in gdf.iterrows():\n coords_ = [i for i in row.geometry.coords]\n for j in range(len(coords_)-1):\n nodeIn = dfNodes.loc[dfNodes['lon_lat'] == coords_[j],'nodeName'].iloc[0]\n nodeOut = dfNodes.loc[dfNodes['lon_lat'] == coords_[j+1],'nodeName'].iloc[0]\n nodesIn.append(nodeIn), nodesOut.append(nodeOut)\n nodes = [nodeIn,nodeOut]\n nodes.sort()\n nodesIn_nodesOut.append('edge_' + nodes[0] + '_' + nodes[1])\n lineStrings.append(shp.geometry.LineString([coords_[j],coords_[j+1]]))\n \n dfEdges = pd.DataFrame([nodesIn, nodesOut, nodesIn_nodesOut, lineStrings],\n index=['nodeIn', 'nodeOut','edgeName','geometry']).T\n dfEdges = dfEdges.drop_duplicates(subset='edgeName')\n gdfEdges = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'})\n\n print('Number of edges after 1. segmentation:', len(gdfEdges))\n print('Number of nodes after 1. segmentation:', len(dfNodes))\n\n # Add nodes when line distances are too long\n newNodes, newLines, newNodesName, newLinesName = [], [], [], []\n nodesIn, nodesOut, coords = [], [], []\n pMin, pMax = [], []\n\n for i, row in gdfEdges.iterrows():\n # If lines are two long, segment them\n if np.round(row['geometry'].length,2) > maxPipeLength:\n nbNewNodes = int(np.floor(row['geometry'].length/maxPipeLength))\n line = row.geometry \n newNodes_, newLines_, newNodesName_, newLinesName_ = [], [], [], []\n nodesIn_, nodesOut_, coords_ = [], [], []\n pMin_, pMax_ = [], []\n nodeStart, nodeEnd = line.interpolate(0), line.interpolate(line.length)\n nodeStartName = row['nodeIn']\n \n pMinIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMin'].iloc[0]\n pMinOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMin'].iloc[0]\n pMaxIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMax'].iloc[0]\n pMaxOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMax'].iloc[0]\n \n spacing = row['geometry'].length/(nbNewNodes+1)\n for j in range(1,nbNewNodes+1):\n newNode = line.interpolate(j*spacing)\n newNodes_.append(newNode)\n coords_.append((newNode.x, newNode.y))\n \n newNodeName = row['nodeIn'] + '_' + row['nodeOut'] + '_a_' + str(j)\n newNodesName_.append(newNodeName)\n \n newLine = shp.geometry.LineString([nodeStart,newNode])\n newLines_.append(newLine)\n newLinesName_.append('temp'), nodesIn_.append(nodeStartName), nodesOut_.append(newNodeName)\n\n pMin_.append((pMinIn*(nbNewNodes-j+1) + pMinOut*j)/(nbNewNodes+1)) \n pMax_.append((pMaxIn*(nbNewNodes-j+1) + pMaxOut*j)/(nbNewNodes+1))\n \n nodeStart, nodeStartName = newNode, newNodeName\n \n newLines_.append(shp.geometry.LineString([newNode,nodeEnd]))\n newLinesName_.append('temp')\n nodesIn_.append(newNodeName), nodesOut_.append(row['nodeOut'])\n \n newNodes.extend(newNodes_), newLines.extend(newLines_), newNodesName.extend(newNodesName_)\n newLinesName.extend(newLinesName_), pMin.extend(pMin_), pMax.extend(pMax_)\n nodesIn.extend(nodesIn_), nodesOut.extend(nodesOut_), coords.extend(coords_) \n\n if len(newNodes) > 0:\n dfNodes = dfNodes.append(pd.DataFrame([newNodesName, pMin, pMax, coords],\n index=['nodeName','pMin','pMax','lon_lat']).T)\n\n dfEdges = pd.DataFrame([nodesIn, nodesOut, newLinesName, newLines],\n index=['nodeIn', 'nodeOut','edgeName','geometry']).T\n gdfEdgesNew = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'})\n gdfEdges = gdfEdges.append(gdfEdgesNew)\n gdfEdges = gdfEdges[gdfEdges.geometry.length.round(2) <= maxPipeLength]\n\n del gdfEdges['edgeName']\n\n renameDict = {name: 'auxNode' + str(i) for i, name in enumerate(dfNodes.nodeName.values)\n if name not in originalNodesSet}\n\n for node in originalNodesSet:\n renameDict.update({node:node})\n\n gdfEdges['nodeIn'] = gdfEdges.apply(lambda x: renameDict[x['nodeIn']], axis=1)\n gdfEdges['nodeOut'] = gdfEdges.apply(lambda x: renameDict[x['nodeOut']], axis=1)\n\n gdfEdges['distances'] = gdfEdges['geometry'].length\n\n print('Number of edges after 2. segmentation:', len(gdfEdges))\n\n dfNodes['nodeName'] = dfNodes.apply(lambda x: renameDict[x['nodeName']], axis=1)\n dfNodes['geometry'] = dfNodes.apply(lambda x: shp.geometry.Point(x['lon_lat']), axis=1)\n\n del dfNodes['lon_lat']\n\n gdfNodes = gpd.GeoDataFrame(dfNodes,crs=gdf.crs).to_crs({'init': 'epsg:3035'})\n print('Number of nodes after 2. segmentation:', len(gdfNodes))\n\n print('Minimum length [m]:', gdfEdges.distances.min(), 'Maximum length [m]:', gdfEdges.distances.max())\n\n distances_new = pd.Series(gdfEdges['distances'].values,\n index = [(n1, n2) for n1, n2 in zip(gdfEdges['nodeIn'],gdfEdges['nodeOut'])])\n \n dic_node_minPress_new = {n:pMin for n, pMin in zip(gdfNodes['nodeName'], gdfNodes['pMin'])}\n dic_node_maxPress_new = {n:pMax for n, pMax in zip(gdfNodes['nodeName'], gdfNodes['pMax'])}\n\n return distances_new, dic_node_minPress_new, dic_node_maxPress_new, gdfNodes, gdfEdges\n\n\n\ndef createNetwork(distances):\n \"\"\"\n Creates undirected network/graph from given distances; updates distances such that\n either (u,v) or (v,u) are contained\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :return: graph of the network corresponding to the distances\n :rtype: graph object of networkx\n\n :return: pipeline distances in the length unit specified in the esM object\n :rtype: pandas series\n \"\"\"\n # type and value check\n isPandasSeriesPositiveNumber(distances)\n for index in distances.index:\n if not isinstance(index, tuple):\n raise TypeError(\"Index of pandas series has to be a tuple\")\n\n # first check if distances are consistent, i.e. if (u,v) and (v,u) are in distances they have to have the same\n # length and we will delete one of them\n # tmp list for reversed edges that we will be delete\n tmp_edges = []\n for edge in distances.index:\n if (edge[1], edge[0]) in distances.index and (edge[1], edge[0]) not in tmp_edges:\n assert (distances[edge] == distances[(edge[1], edge[0])])\n tmp_edges.append(edge)\n # delete tmp_edges because reversed edges are already contained and we consider an undirected graph\n distances = distances.drop(tmp_edges)\n\n # get edges for graph\n edges = distances.index\n # create empty graph\n G = nx.Graph()\n # create graph from given edges and add length as edge attribute\n for edge in edges:\n G.add_edge(edge[0], edge[1], length=distances[edge])\n return G, distances\n\n\ndef createSteinerTree(graph, distances, inner_nodes):\n \"\"\"\n Computes a steiner tree with minimal sum of pipeline lengths;\n updates distances such that only arcs of the spanning tree are contained with corresponding length\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :return spanning tree with sum of lengths of pipelines is minimal\n :rtype: graph object of networkx\n \"\"\"\n from networkx.algorithms import approximation\n \n # type and value check\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n\n # compute spanning tree with minimal sum of pipeline lengths\n S = approximation.steiner_tree(graph, terminal_nodes=inner_nodes, weight='length')\n # TODO check why function fails when MST function is not called here\n S = nx.minimum_spanning_tree(S, weight='length')\n # delete edges that are in graph but not in the tree from the distance matrix\n edgesToDelete = []\n for edge in distances.index:\n # check if edge or its reversed edge are contained in the tree\n # you have to check both directions because we have an undirected graph\n if edge not in S.edges and (edge[1], edge[0]) not in S.edges:\n edgesToDelete.append(edge)\n distances = distances.drop(edgesToDelete)\n\n return S, distances\n\n\ndef _generateRobustScenarios(startNode_endNode, **kwargs):\n startNode = startNode_endNode[0]\n endNode = startNode_endNode[1]\n return startNode_endNode, computeSingleSpecialScenario(startNode=startNode, endNode=endNode, **kwargs)\n\n\ndef generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress,\n solver='glpk', threads=1, verbose=0):\n \"\"\"\n Compute for every node combination a special robust scenario according to Robinius et. al. (2019)\n and Labbé et. al. (2019)\n\n :param injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while\n injections are negative) for every time step and node; unit [kg/s]\n :type: pandas dataframe\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :param threads: number of threads used for parallelization\n :type threads: positive integer\n\n :param verbose: if > 0, parallelization progress is displayed\n :type verbose: int\n\n :return dictionary that contains for every node pair a dictionary containing all arc flows of the corresponding\n special scenario\n :rtype: dictionary key: (node1,node2), value: dictionary: key: arc, value: arc flow in [kg/s]\n\n :return list of entry node\n :rtype: list of strings\n\n :return list of exit node\n :rtype: list of strings\n \"\"\"\n # Type and value checks\n isPandasDataFrameNumber(injectionWithdrawalRates)\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n\n # get for every entry/exit node the minimal and maximal injection rate and save it in a\n # dictionary: key: node, value: min Rate; respectively max Rate in [kg/s]\n # we note that inner nodes a handled separately in the computation of the special scenario\n dic_nodes_MinCapacity = {}\n dic_nodes_MaxCapacity = {}\n # list of entry nodes and exit nodes; note node can be in both for example storages\n entries = []\n exits = []\n inners = []\n for node in list(injectionWithdrawalRates.columns.values):\n minRate = injectionWithdrawalRates[node].min()\n maxRate = injectionWithdrawalRates[node].max()\n assert (minRate <= maxRate)\n dic_nodes_MinCapacity[node] = minRate\n dic_nodes_MaxCapacity[node] = maxRate\n # if minRate is negative, then node is an entry; if maxRate is positive, then node is an exit\n if minRate < 0.0:\n entries.append(node)\n if maxRate > 0.0:\n exits.append(node)\n elif maxRate > 0:\n exits.append(node)\n else:\n inners.append(node)\n\n maxPressuresAreEqual = True if len(set(dic_node_maxPress.values())) == 1 else False\n\n p_exits = [dic_node_minPress[exit] for exit in exits]\n p_entries_inners = [dic_node_minPress[node] for node in entries]\n p_inners = [dic_node_minPress[node] for node in inners]\n p_entries_inners.extend(p_inners)\n minPressureExitsIsLarger = True if min(p_exits) >= max(p_entries_inners) else False\n\n # compute special scenario for each node combination; see Paper Robinius et. al.(2019); Labbé et. al. (2019)\n # save arc flows of special scenarios for each node combination;\n # dictionary: key: node pair, value: dictionary: key: arc, value: arc flow\n dic_nodePair_flows = {}\n\n if maxPressuresAreEqual and minPressureExitsIsLarger:\n if verbose == 0:\n print('Reduced robust scenario set can be generated' +\n ' (pMax is equal at all nodes & pMin at exits is >= at inner and entry nodes).')\n nodes = [(startNode, endNode) for startNode in entries for endNode in exits if startNode != endNode]\n else:\n nodes = [(startNode, endNode) for startNode in graph.nodes for endNode in graph.nodes if startNode != endNode]\n\n pool = Pool(threads)\n for i, values in enumerate(pool.imap(partial(_generateRobustScenarios, graph=graph, distances=distances,\n entries=entries, exits=exits, dic_nodes_MinCapacity=dic_nodes_MinCapacity,\n dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, solver=solver),\n nodes), 1):\n if verbose == 0:\n sys.stderr.write('\\rPercentage simulated: {:d}%'.format(int(i / len(nodes) * 100)))\n dic_nodePair_flows[values[0]] = values[1]\n pool.close()\n pool.join()\n\n return dic_nodePair_flows, entries, exits\n\n\ndef computeSingleSpecialScenario(graph, distances, entries, exits, startNode, endNode, dic_nodes_MinCapacity,\n dic_nodes_MaxCapacity, specialScenario=True, solver='glpk'):\n \"\"\"\n Compute special robust scenario for given node combination according to Robinius et. al. (2019)\n and Labbé et. al. (2019)\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :param entries: list of entry nodes of the network\n :type entries: list of strings\n\n :param exits: list of exit nodes of the network\n :type exits: list of strings\n\n :param startNode: node of the network (starting node of the special scenario)\n :type startNode: string\n\n :param endNode: node of the network (end node of special scenario)\n :type endNode: string\n\n :param dic_nodes_MinCapacity: dictionary containing minimal capacity for each node\n :type dic_nodes_MinCapacity: dictionary: key: node of the network, value: float\n\n :param dic_nodes_MaxCapacity: dictionary containing maximal capacity for each node\n :type dic_nodes_MaxCapacity: dictionary: key: node of the network, value: float\n\n :param specialScenario: bool: True if we compute special robust scenario; False if we compute scenario for fixed\n demand vector, e.g., for scenario of a time step\n :type specialScenario: bool\n\n :param solver: name of the optimization solver to use\n :type solver: string, default 'glpk'\n\n :return dictionary that contains for every arc the corresponding arc flows of the (special) scenario\n :rtype: dictionary key: arc, value: arc flow\n \"\"\"\n # Type and value check\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n isListOfStrings(entries)\n isListOfStrings(exits)\n utils.isString(startNode)\n utils.isString(endNode)\n if isinstance(dic_nodes_MinCapacity, dict) and isinstance(dic_nodes_MaxCapacity, dict):\n if not (dic_nodes_MinCapacity.keys() == dic_nodes_MaxCapacity.keys()):\n raise TypeError(\"Dictionaries for min and max capacity need same keys\")\n for node in dic_nodes_MinCapacity.keys():\n if not (isinstance(dic_nodes_MinCapacity[node], float) or isinstance(dic_nodes_MinCapacity[node], int)):\n raise TypeError(\"The input argument has to be an number\")\n if not (isinstance(dic_nodes_MaxCapacity[node], float) or isinstance(dic_nodes_MaxCapacity[node], int)):\n raise TypeError(\"The input argument has to be an number\")\n if dic_nodes_MaxCapacity[node] < dic_nodes_MinCapacity[node]:\n raise ValueError(\"minimal node capacity has to be equal or smaller than maximal node capacity\")\n else:\n raise TypeError(\"dic_nodes_MinCapacity and dic_nodes_MinCapacity have to be dictionaries\")\n isBool(specialScenario)\n\n # we build concrete Pyomo Model\n model = py.ConcreteModel()\n\n # Description model: we have a simple directed graph. We allow negative flows because a pipe can be used in both\n # directions by the flows\n model.Nodes = py.Set(initialize=graph.nodes)\n # important to use distances.keys() instead of graph.edges such that we do not have key errors later on because\n # the edges in graph are undirected and in distances.keys() directed\n model.Arcs = py.Set(initialize=distances.keys(), dimen=2)\n\n # create demand variables for every node;\n # if specialScenario is true, then we compute special scenario, i.e. entry/exit demand variables are bounded by\n # min(0,minimal_capacity) <= demandVariable <= max(0, maximal_capacity)\n # demand variables for inner nodes are set to zero\n # if specialScenario is false, the demand variable is just bounded by the minimal and maximal capacity\n if specialScenario:\n def demandCapacities(model, node):\n if node in entries or node in exits:\n return min(0, dic_nodes_MinCapacity[node]), max(0, dic_nodes_MaxCapacity[node])\n else:\n return 0, 0\n\n model.Demand = py.Var(model.Nodes, bounds=demandCapacities)\n else:\n # we do not compute special scenarios; we just compute flows for given, possibly fixed, demands\n def demandCapacities(model, node):\n return dic_nodes_MinCapacity[node], dic_nodes_MaxCapacity[node]\n\n model.Demand = py.Var(model.Nodes, bounds=demandCapacities)\n\n # create arc flow variables for every arc of the network\n model.Flow = py.Var(model.Arcs)\n\n # compute NodesOut, i.e., set of nodes that are connected to considered node by outgoing arc\n def nodes_out_init(model, node):\n retval = []\n for (i, j) in model.Arcs:\n if i == node:\n retval.append(j)\n return retval\n\n model.NodesOut = py.Set(model.Nodes, initialize=nodes_out_init)\n\n # compute NodesIn, i.e., set of nodes connected to considered node by ingoing arc\n def nodes_in_init(model, node):\n retval = []\n for (i, j) in model.Arcs:\n if j == node:\n retval.append(i)\n return retval\n\n model.NodesIn = py.Set(model.Nodes, initialize=nodes_in_init)\n\n # add flow balance constraints corresponding to the node demands\n def flow_balance_rule(model, node):\n return sum(model.Flow[i, node] for i in model.NodesIn[node]) \\\n - sum(model.Flow[node, j] for j in model.NodesOut[node]) \\\n == model.Demand[node]\n\n model.FlowBalance_cons = py.Constraint(model.Nodes, rule=flow_balance_rule)\n\n # compute unique flow-path P(startNode,endNode) from entry to exit; given by list of nodes of the path\n pathNodes = nx.shortest_path(graph, source=startNode, target=endNode)\n # non zero coefficients of objective function\n dic_arc_coef = {}\n # determine coefficients for objective function\n # if for an arc (u,v), u, respectively v, are not in pathNodes, then the coefficient is 0\n # if arc (u,v) of pathNodes satisfies P(startNode, u) subset P(startNode,v), then coefficient is 1, otherwise -1\n for index in range(0, len(pathNodes) - 1):\n # check which direction of the arc is contained in the graph\n if (pathNodes[index], pathNodes[index + 1]) in model.Arcs:\n dic_arc_coef[(pathNodes[index], pathNodes[index + 1])] = 1\n else:\n dic_arc_coef[(pathNodes[index + 1], pathNodes[index])] = -1\n\n # we set objective\n def obj_rule(model):\n return sum(dic_arc_coef[arc] * model.Flow[arc] for arc in dic_arc_coef.keys())\n\n model.Obj = py.Objective(rule=obj_rule, sense=py.maximize)\n\n # Create a solver\n opt = SolverFactory(solver)\n # Solve optimization model\n results = opt.solve(model)\n # status of solver\n status = results.solver.status\n # termination condition\n termCondition = results.solver.termination_condition\n\n # save the solution of the flows in a dictionary key: arcs, values: flow\n dic_scenario_flow = {}\n\n if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown:\n utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +\n '. No output is generated.', 0, 0)\n elif termCondition == TerminationCondition.infeasibleOrUnbounded or \\\n termCondition == TerminationCondition.infeasible or \\\n termCondition == TerminationCondition.unbounded:\n utils.output('Optimization problem is ' + str(termCondition) +\n '. No output is generated.', 0, 0)\n else:\n # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown\n # status), show a warning message.\n if not termCondition == TerminationCondition.optimal:\n warnings.warn('Output is generated for a non-optimal solution.')\n\n # dic_arcScenario has key (v,w,scenario) and value flow will be needed for MIP\n for arc in model.Arcs:\n dic_scenario_flow[arc] = model.Flow[arc].value\n\n return dic_scenario_flow\n\n\ndef computeLargeMergedDiameters(dic_subSetDiam_costs, nDigits=6):\n \"\"\"\n Compute merged diameters, i.e. compute equivalent single diameter for two looped pipes.\n\n :param dic_subSetDiam_costs: dictionary containing diameters in [m] and costs in [Euro/m]\n :type: dictionary: key: diameter, value: costs\n\n :param nDigits: number of digits used in the round function\n |br| * the default value is 6\n :type nDigits: positive int\n\n :return dic_newDiam_costs: dictionary containing merged diameters in [m] and costs in [Euro/m]\n :rtype: dictionary: key: diameter, value: costs\n\n :return dic_newDiam_oldDiam: dictionary matching new diameters to old diameters\n :rtype: dictionary: key: new diameter, value: corresponding old diameter, which will be used in the looped pipe\n\n \"\"\"\n # Type and value check\n if isinstance(dic_subSetDiam_costs, dict):\n for diam in dic_subSetDiam_costs.keys():\n utils.isStrictlyPositiveNumber(diam)\n utils.isStrictlyPositiveNumber(dic_subSetDiam_costs[diam])\n else:\n raise TypeError(\"The input has to be a dictionary\")\n utils.isStrictlyPositiveInt(nDigits)\n\n dic_newDiam_costs = {}\n dic_newDiam_oldDiam = {}\n\n for diam in dic_subSetDiam_costs.keys():\n # compute new diameter in [m] and its costs in [Euro/m]\n # for Formula see (1) in Paper Reuß et. al.\n # since at current state we consider the diameter for a looped pipe the above is\n # equivalent to 2^(2/5) * diam and thus, we do not have to transform diam from [m] to [mm]\n newDiam = ((diam ** (5 / 2) + diam ** (5 / 2)) ** (2 / 5)).__round__(nDigits)\n # costs are two times costs of diam because newDiam represents two looped pipe with diameter diam\n newCosts = 2 * dic_subSetDiam_costs[diam]\n dic_newDiam_costs[newDiam] = newCosts\n dic_newDiam_oldDiam[newDiam] = diam\n\n return dic_newDiam_costs, dic_newDiam_oldDiam\n\n\ndef determinePressureDropCoef(dic_scenario_flows, distances, dic_node_minPress, dic_node_maxPress,\n diameters, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,\n Z_n=1.00062387922965, nDigits=6):\n \"\"\"\n Compute for each scenario, diameter, and each arc the corresponding pressure drop\n\n :param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all\n arc flows in [kg/s] of the corresponding (special) scenario\n :type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :param diameters: list of diameters in [m]\n :type: list of strictly positive numbers\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float; optional\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float; optional\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float; optional\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float; optional\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float; optional\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float; optional\n\n :param nDigits: number of digits used in the round function\n |br| * the default value is 6\n :type nDigits: positive int; optional\n\n :return dictionary that contains for every scenario and diameter the corresponding pressure drops\n :rtype: dictionary key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop\n \"\"\"\n # check type and value\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n isPandasSeriesPositiveNumber(distances)\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n if isinstance(diameters, list):\n for diam in diameters:\n utils.isPositiveNumber(diam)\n else:\n raise TypeError(\"Diameters has to be a list\")\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n utils.isStrictlyPositiveInt(nDigits)\n\n # compute for each diameter, scenario, and arc its pressure drop\n # save results in dic: key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop\n dic_pressureDropCoef = {}\n for diameter in diameters:\n for nodePair in dic_scenario_flows.keys():\n # initialize dictionary\n dic_pressureDropCoef[(diameter, nodePair)] = {}\n # compute cross section of considered pipe and diameter\n tmpvalue_A = 0.25 * np.pi * diameter ** 2\n for arc in dic_scenario_flows[nodePair].keys():\n # check if flow is unequal to zero\n if dic_scenario_flows[nodePair][arc] != 0.0:\n # Compute approximation of average pressure flow in pipe (u,v) by\n # if flow((u,v)) is positive then set p_min to lower pressure bound of v and p_max to\n # upper pressure bound u\n # if flow((u,v)) is negative then set p_min to lower pressure bound of u and p_max to\n # upper pressure bound v\n if dic_scenario_flows[nodePair][arc] > 0:\n p_min = dic_node_minPress[arc[1]]\n p_max = dic_node_maxPress[arc[0]]\n else:\n p_min = dic_node_minPress[arc[0]]\n p_max = dic_node_maxPress[arc[1]]\n # compute approximation of average pressure\n p_m = (2 / 3) * (p_max + p_min - (p_max * p_min) / (p_max + p_min))\n # approximation for density\n rho = 0.11922 * p_m ** 0.91192 - 0.17264\n # approximation of the realgasfactor\n Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050\n K_m = Z_m / Z_n\n # approximation of the dynamic viscosity\n eta = 1.04298 * 10 ** (-10) * p_m ** 1.53560 + 8.79987 * 10 ** (-6)\n nue = eta / rho\n # compute velocity\n tmpvalue_w = (abs(dic_scenario_flows[nodePair][arc]) / rho) / tmpvalue_A\n # compute reynolds number\n tmpvalue_Re = tmpvalue_w * (diameter / nue)\n tmpvalue_alpha = np.exp(-np.exp(6.75 - 0.0025 * tmpvalue_Re))\n tmpvalue_Lambda = (64 / tmpvalue_Re) * (1 - tmpvalue_alpha) + tmpvalue_alpha * (\n -2 * np.log10(2.7 * (np.log10(tmpvalue_Re) ** 1.2 / tmpvalue_Re) + ir / (3.71 * 1000 *\n diameter))) ** (-2)\n # note p_n is in [bar] instead of [PA], thus we divide tmpvalue_C by 10**5\n # explanation: we have p_i^2-p_j^2=C. If p_i is in [PA] and we want p_i in [bar] then this leads to\n # (p_i/10^5)^2-(p_j/10^5)^2=C/10^10\n # but we changed p_n in computation C from [PA] to [bar] hence we only divide C by 10^5\n tmpvalue_C_bar = tmpvalue_Lambda * 16 * rho_n * T_m * p_n * K_m / (np.pi ** 2 * T_n * 10 ** 5)\n # compute final pressure drop coefficient depending on the flow\n tmp_value_C_coef = (distances[arc] / rho_n ** 2) * \\\n (tmpvalue_C_bar * dic_scenario_flows[nodePair][arc] *\n abs(dic_scenario_flows[nodePair][arc]) / diameter ** 5)\n # save pressure drop for considered diameter, scenario, and arc\n dic_pressureDropCoef[(diameter, nodePair)][arc] = tmp_value_C_coef\n else:\n dic_pressureDropCoef[(diameter, nodePair)][arc] = 0\n\n return dic_pressureDropCoef\n\n\ndef determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureDropCoef, specialScenarioNames,\n dic_node_minPress, dic_node_maxPress, dic_diam_costs, robust=True,\n solver='glpk', threads=4, verbose=0):\n \"\"\"\n Model of optimal pipeline sizing (diameter selection) w.r.t. to the given scenarios\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param dic_pressureDropCoef: dictionary that contains for every scenario and diameter the\n corresponding pressure drops in [bar]\n :type dic_pressureDropCoef: dictionary: keys: scenarioName; value: dict: key: arc, value: pressure drop in [bar]\n\n :param specialScenarioNames: list of names of scenarios. In robust case tuples (startNode, endNode).\n :type specialScenarioNames: list of tuples in the robust case, otherwise list of time Steps\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :param dic_diam_costs: dictionary that contains for every diameter in [m] its costs [Euro/m]\n :type dic_diam_costs: dictionary key: diameter, value: non-negative float\n\n :param robust: Bool that is true, if we optimize w.r.t. robust scenarios, otherwise False.\n :type robust: bool\n\n :return dictionary that contains for every arc the optimal diameter in [m]\n :rtype dictionary: key: arc, value: optimal diameter\n\n :param solver: name of the optimization solver to use\n :type solver: string, default 'glpk'\n\n :param threads: number of threads used for optimization (if gurobi is used)\n :type threads: positive integer\n\n :param verbose: if > 0, parallelization progress is displayed\n :type verbose: int\n\n :return dictionary that contains for every scenario the corresponding pressure levels\n :rtype dictionary: key: scenarioName, value: dict: key: node, value: pressure level of node\n \"\"\"\n # type and value checks\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n if not isinstance(dic_pressureDropCoef, dict):\n raise TypeError(\"The input has to be a dictionary\")\n\n if isinstance(specialScenarioNames, list):\n if robust:\n for scenario in specialScenarioNames:\n isinstance(scenario, tuple)\n else:\n raise TypeError(\"The input argument has to be a list\")\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n if isinstance(dic_diam_costs, dict):\n for diam in dic_diam_costs.keys():\n utils.isStrictlyPositiveNumber(diam)\n utils.isStrictlyPositiveNumber(dic_diam_costs[diam])\n else:\n raise TypeError(\"The input has to be a dictionary\")\n if not isinstance(robust, bool):\n raise TypeError(\"The input has to be a bool\")\n utils.isString(solver)\n utils.isPositiveNumber(verbose)\n\n # set list of available diameters\n diameters = dic_diam_costs.keys()\n\n # build concrete pyomo model\n model = py.ConcreteModel()\n\n # sets for nodes, arcs, diameters, scenarios\n model.nodes = py.Set(initialize=graph.nodes)\n model.arcs = py.Set(initialize=list(distances.keys()), dimen=2)\n # diameters assuming that each pipe has the same diameter options\n model.diameters = py.Set(initialize=diameters)\n # if we have special scenarios, scenario names are tuples, otherwise not\n if robust:\n # set indices for each scenario by its nodePair = (startnode, endnode)\n model.scenarios = py.Set(initialize=specialScenarioNames, dimen=2)\n else:\n # set indices for each timeStep number\n model.scenarios = py.Set(initialize=specialScenarioNames, dimen=1)\n\n # create variables binaries x are the same for each scenario\n # pressure variables are different for each scenario\n model.x = py.Var(model.arcs, model.diameters, domain=py.Binary)\n if robust:\n def pressureBounds(model, node, startnode, endnode):\n return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2\n\n model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds)\n else:\n def pressureBounds(model, node, timeStep):\n return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2\n\n model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds)\n\n # objective: minimize the costs\n def obj_rule(model):\n return sum(\n sum(dic_diam_costs[diam] * distances[arc] * model.x[arc, diam] for diam in model.diameters)\n for arc in model.arcs)\n\n model.Obj = py.Objective(rule=obj_rule)\n\n # pressure drop for each cons and each scenario\n if robust:\n def pressure_drop(model, arc0, arc1, scenarioStart, scenarioEnd):\n return model.pi[arc1, (scenarioStart, scenarioEnd)] - model.pi[arc0, (scenarioStart, scenarioEnd)] == \\\n -sum(dic_pressureDropCoef[(diam, (scenarioStart, scenarioEnd))][(arc0, arc1)] *\n model.x[arc0, arc1, diam] for diam in model.diameters)\n\n model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_drop)\n else:\n def pressure_dropNotRobust(model, arc0, arc1, timeStep):\n return model.pi[arc1, timeStep] - model.pi[arc0, timeStep] == \\\n -sum(dic_pressureDropCoef[(diam, timeStep)][(arc0, arc1)] *\n model.x[arc0, arc1, diam] for diam in model.diameters)\n\n model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_dropNotRobust)\n\n # ensure that a single diameter per arc is chosen\n def selection_diameter(model, arc0, arc1):\n return sum(model.x[arc0, arc1, diam] for diam in model.diameters) == 1\n\n model.SelectionDiameter_cons = py.Constraint(model.arcs, rule=selection_diameter)\n\n # Create a solver\n\n opt = SolverFactory(solver)\n # Set the specified solver options\n # Solve optimization problem. The optimization solve time is stored and the solver information is printed.\n if (verbose == 2) & (solver == 'gurobi'):\n optimizationSpecs = ' LogToConsole=0'\n opt.set_options('Threads=' + str(threads) + optimizationSpecs)\n results = opt.solve(model, tee=True, keepfiles=False)\n else:\n results = opt.solve(model, tee=True, report_timing=True, keepfiles=False)\n\n # status of solver\n status = results.solver.status\n # termination condition\n termCondition = results.solver.termination_condition\n # write diameter solution to dictionary: key: arc, value: optimal diameter\n # write pressure solutions to dictionary; key: scenarioName, value: dict: key: node, value: pressure level in [bar]\n dic_arc_diam = {}\n dic_scen_node_press = {}\n\n if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown:\n utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +\n '. No output is generated.', 0, 0)\n elif termCondition == TerminationCondition.infeasibleOrUnbounded or \\\n termCondition == TerminationCondition.infeasible or \\\n termCondition == TerminationCondition.unbounded:\n utils.output('Optimization problem is ' + str(termCondition) +\n '. No output is generated.', 0, 0)\n else:\n # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown\n # status), show a warning message.\n if not termCondition == TerminationCondition.optimal:\n warnings.warn('Output is generated for a non-optimal solution.')\n\n # initialize dict with empty dict\n for scenario in specialScenarioNames:\n dic_scen_node_press[scenario] = {}\n\n for v in model.component_objects(py.Var, active=True):\n varobject = getattr(model, str(v))\n for index in varobject:\n # round because sometimes we are nearly one\n if str(varobject) == 'x' and round(varobject[index].value) == 1:\n dic_arc_diam.update({(index[0], index[1]): index[2]})\n elif str(varobject) == 'pi':\n if robust:\n # need sqrt() because in model pressure is quadratic because of the transformation\n dic_scen_node_press[(index[1], index[2])].update({index[0]: np.sqrt(varobject[index].value)})\n else:\n # need sqrt() because in model pressure is quadratic because of the transformation\n dic_scen_node_press[(index[1])].update({index[0]: np.sqrt(varobject[index].value)})\n\n return dic_arc_diam, dic_scen_node_press\n\n\ndef _postprocessing(scenario, dic_scenario_flows, graph, **kwargs):\n dic_scen_PressLevel = {}\n dic_scen_MaxViolPress = math.inf\n # copy a list of nodes\n tmp_nodes = copy.deepcopy(list(graph.nodes))\n # we now set iteratively the pressure level of a single node to its upper pressure bound and then compute the\n # unique pressure levels until we find valid pressure levels or have tested all nodes\n while tmp_nodes:\n # we have not found valid pressure levels for this scenario\n # temporary pressure levels\n dic_tmp_pressure = {}\n for node in list(graph.nodes):\n dic_tmp_pressure[node] = None\n # choose the node which pressure level is fixed to the upper pressure bound\n current_node = tmp_nodes[0]\n validation, tmp_viol = computePressureAtNode(graph=graph, node=current_node, nodeUpperBound=current_node,\n dic_scenario_flows=dic_scenario_flows[scenario], dic_node_pressure=dic_tmp_pressure, **kwargs)\n # if validation true, then we have feasible pressure levels; empty list of nodes that have to be\n # considered\n if validation:\n tmp_nodes = []\n # we have feasible pressure level and save them\n dic_scen_PressLevel = dic_tmp_pressure\n dic_scen_MaxViolPress = tmp_viol\n else:\n # remove considered entry from list of nodes that will be considered for fixing the pressure level\n tmp_nodes.remove(tmp_nodes[0])\n # we update the maximal pressure level violation\n if tmp_viol < dic_scen_MaxViolPress:\n # save currently best pressure levels\n dic_scen_PressLevel = copy.deepcopy(dic_tmp_pressure)\n dic_scen_MaxViolPress = tmp_viol\n\n return scenario, dic_scen_PressLevel, dic_scen_MaxViolPress\n\n\ndef postprocessing(graph, distances, dic_arc_diam, dic_scenario_flows, dic_node_minPress, dic_node_maxPress,\n threads=1, verbose=0):\n \"\"\"\"\n Compute \"more\" accurate pressure levels for the considered scenarios in the network with optimal diameters\n Apply postprocessing of Master's thesis with adaption that we possibly consider every node for fixing its\n pressure level to the upper pressure bound.\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]\n :type: dictionary: key: arc, value: optimal diameter\n\n :param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all\n arc flows in [kg/s] of the corresponding (special) scenario\n :type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n :param threads: number of threads used for parallelization\n :type threads: positive integer\n\n :param verbose: if > 0, parallelization progress is displayed\n :type verbose: int\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :return: dictionary that contains for every scenario the corresponding pressure levels in [bar]\n :rtype: dictionary key: scenarioName, value: dic: key: arc, value pressure level\n\n :return: dictionary that contains for every scenario the maximal pressure bound violation in [bar]\n :rtype: dictionary key: scenarioName, value: float = maximal pressure bound violation\n \"\"\"\n # Type and value check\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n if isinstance(dic_arc_diam, dict):\n for diam in dic_arc_diam.keys():\n utils.isStrictlyPositiveNumber(dic_arc_diam[diam])\n else:\n raise TypeError(\"The input has to be a dictionary\")\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n\n # best found pressure levels for scenarios; dic key: scenario, value: dic: key: node, value: pressure level in [bar]\n dic_scen_PressLevel = {}\n # maximal violation of pressure bounds; zero if no violation exists; dic: key: scenario, value: pressure violation\n dic_scen_MaxViolPress = {}\n # we compute \"precise\" pressure levels for every scenarios\n\n pool = Pool(threads)\n scenarios = [scenario for scenario in dic_scenario_flows.keys()]\n\n for i, values in enumerate(pool.imap(partial(_postprocessing, validation=True, graph=graph, dic_arc_diam=dic_arc_diam,\n distances=distances, dic_node_minPress=dic_node_minPress, dic_node_maxPress=dic_node_maxPress, tmp_violation=0,\n dic_scenario_flows=dic_scenario_flows), scenarios), 1):\n if verbose == 0:\n sys.stderr.write('\\rPercentage simulated: {:d}%'.format(int(i / len(scenarios) * 100))) \n dic_scen_PressLevel[values[0]] = values[1]\n dic_scen_MaxViolPress[values[0]] = values[2]\n pool.close()\n pool.join()\n\n return dic_scen_PressLevel, dic_scen_MaxViolPress\n\n\ndef computePressureAtNode(validation, node, nodeUpperBound, graph, dic_arc_diam, distances, dic_scenario_flows,\n dic_node_minPress, dic_node_maxPress, tmp_violation, dic_node_pressure,\n ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,\n Z_n=1.00062387922965, nDigits=6):\n \"\"\"\"\n Compute pressure levels recursive for given scenario and node that is fixed to its upper pressure level\n\n :param validation: boolean that is False, if the computed pressure levels are infeasible\n :rtype validation: bool\n\n :param node: node of the network for which we currently consider for computing the pressure levels\n :type node: str\n\n :param nodeUpperBound: node which pressure level is fixed to the upper bound\n :type node: str\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]\n :type: dictionary: key: arc, value: optimal diameter\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]\n :type: dictionary: key: arc, value: arc flow\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :param tmp_violation: violation of the current pressure bounds in [bar]\n :type tmp_violation: float\n\n :param dic_node_pressure: dictionary that contains node pressure levels in [bar]\n :type dic_node_pressure: dictionary key: node of the network, value: non-negative float\n\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float\n\n :param nDigits: number of digits used in the pandas round function. Is applied to the\n specified or determined injection and withdrawal rates.\n |br| * the default value is 6\n :type nDigits: positive int\n\n :return validation: boolean that is true, if the computed pressure levels are feasible\n :rtype: bool\n\n :return maximal violation of the pressure bounds w.r.t. the computed pressure levels in [bar]\n :rtype: float\n \"\"\"\n # Type and value check\n isBool(validation)\n utils.isString(node)\n utils.isString(nodeUpperBound)\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n if isinstance(dic_arc_diam, dict):\n for diam in dic_arc_diam.keys():\n utils.isStrictlyPositiveNumber(dic_arc_diam[diam])\n else:\n raise TypeError(\"The input has to be a dictionary\")\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n utils.isPositiveNumber(tmp_violation)\n if not isinstance(dic_node_pressure, dict):\n raise TypeError(\"The Input has to a dictionary\")\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n utils.isStrictlyPositiveInt(nDigits)\n\n # if node is equal to nodeUpperBound, we fix its pressure level to the upper bound; base case in recursion\n if node == nodeUpperBound:\n dic_node_pressure[node] = dic_node_maxPress[node]\n # list of arcs\n arcs = list(distances.keys())\n # we now compute the neighbors of the considered node\n neighbors = graph.neighbors(node)\n # compute pressure levels for neighbor nodes\n for neighbor in neighbors:\n # check if pressure is already computed\n if dic_node_pressure[neighbor] is None:\n # check if (node,neighbor) or (neighbor,node) is in graph\n if (node, neighbor) in arcs:\n # check flow direction for arc (node,neighbor)\n if dic_scenario_flows[(node, neighbor)] >= 0.0:\n # we know pressure level of beginning node of arc; compute pressure level for end node of arc\n dic_node_pressure[neighbor] = computePressureEndnodeArc((node, neighbor), dic_node_pressure[node],\n dic_scenario_flows, dic_arc_diam, distances,\n ir, rho_n, T_m, T_n, p_n, Z_n)\n else:\n # we know pressure level of endnode\n dic_node_pressure[neighbor] = computePressureStartnodeArc((node, neighbor), dic_node_pressure[node],\n dic_scenario_flows, dic_arc_diam,\n distances,\n ir, rho_n, T_m, T_n, p_n, Z_n,\n tol=10 ** (- nDigits))\n else:\n # we know that arc (neighbor,node) is contained in the graph\n # check flow direction\n if dic_scenario_flows[(neighbor, node)] <= 0.0:\n # we know pressure of start node\n dic_node_pressure[neighbor] = computePressureEndnodeArc((neighbor, node), dic_node_pressure[node],\n dic_scenario_flows, dic_arc_diam, distances,\n ir, rho_n, T_m, T_n, p_n, Z_n)\n else:\n # we know pressure level of end node\n dic_node_pressure[neighbor] = computePressureStartnodeArc((neighbor, node), dic_node_pressure[node],\n dic_scenario_flows, dic_arc_diam,\n distances,\n ir, rho_n, T_m, T_n, p_n, Z_n,\n tol=10 ** (- nDigits))\n # check if new computed pressure level is feasible\n if dic_node_pressure[neighbor] == - math.inf:\n # pressure violation is really high\n tmp_violation = math.inf\n return False, tmp_violation\n # check if we violate pressure bounds for neighbor node\n if dic_node_pressure[neighbor] < dic_node_minPress[neighbor] \\\n or dic_node_pressure[neighbor] > dic_node_maxPress[neighbor]:\n # pressure level is not valid\n validation = False\n # update pressure bound violation\n if dic_node_pressure[neighbor] < dic_node_minPress[neighbor]:\n # update violation and violation node if it is bigger\n if tmp_violation is None or \\\n abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor]) > tmp_violation:\n tmp_violation = abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor])\n else:\n if tmp_violation is None or \\\n abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor]) > tmp_violation:\n tmp_violation = abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor])\n\n # compute value for neighbor of tmp\n validation, tmp_violation = computePressureAtNode(validation, neighbor, nodeUpperBound, graph, dic_arc_diam,\n distances,\n dic_scenario_flows, dic_node_minPress, dic_node_maxPress,\n tmp_violation, dic_node_pressure)\n\n return validation, tmp_violation\n\n\ndef computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows, dic_arc_diam, distances, ir=0.2,\n rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,\n Z_n=1.00062387922965, tol=10 ** (-4)):\n \"\"\"\"\n For given arc and pressure level of endNode compute the pressure of the startNode by solving the corresponding\n equation system\n\n :param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas\n :type arc: tuple\n\n :param pressureEndNode: pressure level of endNode\n :type pressureEndNode: non-negative float\n\n :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]; note arc flow of arc has to be\n positive\n :type: dictionary: key: arc, value: arc flow\n\n :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]\n :type: dictionary: key: arc, value: optimal diameter\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float\n\n :param tol: tolerance to which accuracy we solve the equation system\n |br| * the default value is 10^-4\n :type tol: non-negative float\n\n :return: pressure level of startNode in [bar]\n :rtype: float\n \"\"\"\n # Type and Value check\n if not isinstance(arc, tuple):\n raise TypeError(\"The input has to be a tuple\")\n utils.isStrictlyPositiveNumber(pressureEndNode)\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n if isinstance(dic_arc_diam, dict):\n for diam in dic_arc_diam.keys():\n utils.isStrictlyPositiveNumber(dic_arc_diam[diam])\n isPandasSeriesPositiveNumber(distances)\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n utils.isStrictlyPositiveNumber(tol)\n\n if dic_scenario_flows[arc] == 0.0:\n return pressureEndNode\n\n # define function of nonlinear equation system f(x) = pressure_start^2-pressure_end^2-C\n # because then root is our valid pressure level solution, because we know pressure_end\n\n def f(pressure_start):\n d = dic_arc_diam[arc]\n A = 0.25 * math.pi * d ** 2\n rho_in = 0.11922 * pressure_start ** 0.91192 - 0.17264\n V_in = abs(dic_scenario_flows[arc]) / rho_in\n w_in = V_in / A\n eta_in = 1.04298 * 10 ** (-10) * pressure_start ** 1.53560 + 8.79987 * 10 ** (-6)\n nue_in = eta_in / rho_in\n Re_in = w_in * (d / nue_in)\n alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in))\n Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10(\n (2.7 * (math.log10(Re_in)) ** 1.2) / Re_in +\n ir / (3.71 * 1000 * d))) ** (-2)\n C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d)\n # note pressure_start is in bar\n p_m = pressure_start - C_tilde / 10 ** 5\n if p_m < 0.0:\n # pressure drop too large no valid pressure assignment possible\n return -math.inf\n Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050\n K_m = Z_m / Z_n\n # note flow direction is given by startnode endnode so we square the arcflow\n C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (\n math.pi ** 2 * T_n * rho_n * 10 ** 5 * dic_arc_diam[arc] ** 5) * dic_scenario_flows[arc] ** 2\n return pressure_start ** 2 - pressureEndNode ** 2 - C\n\n # find root of f, start value pressure_end + 0.5(bar)\n # x = fsolve(f, pressureEndNode + 0.5)\n # pressureEndnode + guess for solution depending on flow; you can replace this guess by the approximation of the\n # pressure drop of the MIP to probably achieve better results\n x = fsolve(f, pressureEndNode + 0.5 * (dic_scenario_flows[arc] ** 2) / (dic_arc_diam[arc] ** 5))\n # check if tolerance is ok\n assert isinstance(tol, float)\n # check tolerance of first solution\n if f(x[0]) <= tol:\n # value is ok\n # because x is an array return first entry, we only have one solution for the nonlinear equation system\n return x[0]\n else:\n print('nonlinear equation system failed')\n # this warning means we could not solve the system, this could be the case if the pressure drop is too large\n # or when the start value for the nonlinear equation solver is too far away from the solution\n print(\"Nonlinear equation system in Postprocessing failed. Try another node which pressure level is\"\n \" set to the upper bound\")\n return -math.inf\n\n\ndef computePressureEndnodeArc(arc, pressureStartNode, dic_scenario_flows, dic_arc_diam, distances,\n ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,\n Z_n=1.00062387922965):\n \"\"\"\"\n For given arc and pressure level of startNode compute the pressure of the endNode\n\n :param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas\n :type arc: tuple\n\n :param pressureStartNode: pressure level of endNode\n :type pressureStartNode: non-negative float\n\n :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]\n :type: dictionary: key: arc, value: arc flow\n\n :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]\n :type: dictionary: key: arc, value: optimal diameter\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float\n\n :return: pressure level of endNode in [bar]\n :rtype: float\n \"\"\"\n # Type and Value check\n if not isinstance(arc, tuple):\n raise TypeError(\"The input has to be a tuple\")\n utils.isStrictlyPositiveNumber(pressureStartNode)\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n if isinstance(dic_arc_diam, dict):\n for diam in dic_arc_diam.keys():\n utils.isStrictlyPositiveNumber(dic_arc_diam[diam])\n isPandasSeriesPositiveNumber(distances)\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n\n arcFlow = dic_scenario_flows[arc]\n if arcFlow != 0:\n d = dic_arc_diam[arc]\n A = 0.25 * math.pi * d ** 2\n rho_in = 0.11922 * pressureStartNode ** 0.91192 - 0.17264\n V_in = abs(arcFlow) / rho_in\n w_in = V_in / A\n eta_in = 1.04298 * 10 ** (-10) * pressureStartNode ** 1.53560 + 8.79987 * 10 ** (-6)\n nue_in = eta_in / rho_in\n Re_in = w_in * (d / nue_in)\n alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in))\n Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10(\n (2.7 * (math.log10(Re_in)) ** 1.2) / Re_in +\n ir / (3.71 * 1000 * d))) ** (-2)\n C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d)\n # note pressure_start is in bar\n p_m = pressureStartNode - C_tilde / 10 ** 5\n if p_m < 0.0:\n # pressure drop too large no valid pressure assignment possible\n return -math.inf\n Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050\n K_m = Z_m / Z_n\n # note flow direction is given by startnode endnode so we square the arcflow\n C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (math.pi ** 2 * T_n * rho_n * 10 ** 5 *\n dic_arc_diam[arc] ** 5) * arcFlow ** 2\n else:\n # flow is zero therefore pressure drop is zero\n C = 0\n\n if pressureStartNode ** 2 - C >= 0:\n return math.sqrt(pressureStartNode ** 2 - C)\n else:\n # pressure drop is too big return negative value, which is a invalid pressure value\n return -math.inf\n\ndef _computeTimeStepFlows(index, injectionWithdrawalRates, graph, **kwargs):\n # compute flows corresponding to demand by fixing demand for every node to given value and then compute\n # flows by LP\n dic_nodes_MinCapacity = {}\n dic_nodes_MaxCapacity = {}\n activeNodes = injectionWithdrawalRates.columns\n\n for node in graph.nodes:\n if node in activeNodes:\n dic_nodes_MinCapacity[node] = injectionWithdrawalRates.at[index, node]\n dic_nodes_MaxCapacity[node] = injectionWithdrawalRates.at[index, node]\n else:\n dic_nodes_MinCapacity[node] = 0\n dic_nodes_MaxCapacity[node] = 0\n # compute flows\n return index, computeSingleSpecialScenario(dic_nodes_MinCapacity=dic_nodes_MinCapacity,\n dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, graph=graph, **kwargs)\n\n\ndef computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits, threads=1, verbose=0, solver='glpk'):\n \"\"\"\"\n Compute for each timeStep and demands given by injectionWithdrawalRates the corresponding flow values\n\n :param: injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while\n injections are negative) in [kg^3/s]\n :type injectionWithdrawalRates: pandas DataFrame\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param entries: list of entry nodes of the network\n :type entries: list of str\n\n :param exits: list of exit nodes of the network\n :type exits: list of str\n\n :param threads: number of threads used for parallelization\n :type threads: positive integer\n\n :param verbose: if > 0, parallelization progress is displayed\n :type verbose: int\n\n :param solver: name of the optimization solver to use\n :type solver: string, default 'glpk'\n\n :return: dictionary that contains for every time step the corresponding flows in [kg/s]\n :rtype: dictionary key: timeStep, value: dict: key: arc, value: arc flow\n \"\"\"\n # Type and value check\n isPandasDataFrameNumber(injectionWithdrawalRates)\n isPandasSeriesPositiveNumber(distances)\n isNetworkxGraph(graph)\n isListOfStrings(entries)\n isListOfStrings(exits)\n\n # compute for every time step the corresponding flows; dict: key: timeStep, value: dict: key: arc, value: flow\n dic_timeStep_flows = {}\n # nodes with nonzero demand are given by columns of dataframe\n activeNodes = injectionWithdrawalRates.columns\n pool = Pool(threads)\n\n indexList = list(injectionWithdrawalRates.index)\n\n for i, values in enumerate(pool.imap(partial(_computeTimeStepFlows, graph=graph, distances=distances,\n entries=entries, exits=exits, startNode=activeNodes[0],\n endNode=activeNodes[1], specialScenario=False,\n injectionWithdrawalRates=injectionWithdrawalRates,\n solver=solver),\n indexList), 1):\n if verbose == 0:\n sys.stderr.write('\\rPercentage simulated: {:d}%'.format(int(i / len(indexList) * 100)))\n dic_timeStep_flows[values[0]] = values[1]\n pool.close()\n pool.join()\n\n return dic_timeStep_flows\n\n\ndef networkRefinement(distances, maxPipeLength, dic_node_minPress, dic_node_maxPress):\n \"\"\"\n If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,\n i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1\n # TODO this function is only used for testing\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :param maxPipeLength: determines the maximal length of a pipe in [m].\n :type maxPipeLength: positive number\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :return: graph of the network corresponding to the distances\n :rtype: graph object of networkx\n\n :return: pipeline distances in the length unit specified in the esM object\n :rtype: pandas series\n\n :return: dic_node_minPress dictionary that contains for every node of the network its lower pressure bound in [bar]\n :rtype: dictionary key: node of the network, value: non-negative float\n\n :return dic_node_maxPress dictionary that contains for every node of the network its upper pressure bound in [bar]\n :rtype: dictionary key: node of the network, value: non-negative float\n \"\"\"\n # type and value check\n isPandasSeriesPositiveNumber(distances)\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n if maxPipeLength is not None:\n utils.isStrictlyPositiveNumber(maxPipeLength)\n\n # if maximal pipeline length is a positive number we apply the refinement\n if maxPipeLength is not None:\n # we have to check if pipes satisfy maximal pipeline length\n # list of new arcs that will be added\n newPipes = []\n # list of lengths of new added pipes\n newPipesLengths = []\n # list of split original pipes\n splitEdges = []\n for edge in distances.index:\n # get length of pipeline\n pipeLength = distances[edge]\n if pipeLength > maxPipeLength:\n # compute number of necessary artificial nodes\n nArtificialNodes = math.ceil(pipeLength / maxPipeLength) - 1\n # compute length of new pipelines\n newPipeLength = float(pipeLength / (math.ceil(pipeLength / maxPipeLength)))\n # lower and upper pressure bound for new nodes computed by average of nodes of original edge\n lowPress = (dic_node_minPress[edge[0]] + dic_node_minPress[edge[1]]) / 2\n maxPress = (dic_node_maxPress[edge[0]] + dic_node_maxPress[edge[1]]) / 2\n # add first new pipe and its length\n newPipes.append((edge[0], \"v\" + str(1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])))\n # add length of first new pipe\n newPipesLengths.append(newPipeLength)\n # add lower and upper bound for new artificial node\n dic_node_minPress[\"v\" + str(1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])] = lowPress\n dic_node_maxPress[\"v\" + str(1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])] = maxPress\n # add intermediate artificial pipes, its length, and lower/upper pressure bounds\n for index in range(1, nArtificialNodes):\n newPipes.append((\"v\" + str(index) + \"_\" + str(edge[0]) + \"_\" + str(edge[1]),\n \"v\" + str(index + 1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])))\n newPipesLengths.append(newPipeLength)\n dic_node_minPress[\"v\" + str(index + 1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])] = lowPress\n dic_node_maxPress[\"v\" + str(index + 1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])] = maxPress\n # add last new pipe and its length\n newPipes.append((\"v\" + str(nArtificialNodes) + \"_\" + str(edge[0]) + \"_\" + str(edge[1]),\n edge[1]))\n newPipesLengths.append(newPipeLength)\n # add edge to split edges\n splitEdges.append(edge)\n\n # Now delete edges that have been split\n distances = distances.drop(splitEdges)\n # Add new edges\n distances = distances.append(pd.Series(newPipesLengths, index=newPipes))\n\n # get edges for graph\n edges = distances.index\n # create empty graph\n G = nx.Graph()\n # create graph from given edges and add length as edge attribute\n for edge in edges:\n G.add_edge(edge[0], edge[1], length=distances[edge])\n\n return G, distances, dic_node_minPress, dic_node_maxPress\n\n\ndef determineDiscretePipelineDesign(robust, injectionWithdrawalRates, distances, dic_node_minPress, dic_node_maxPress,\n dic_diameter_costs=None, dic_candidateMergedDiam_costs=None,\n gdfEdges=None, regColumn1='nodeIn', regColumn2='nodeOut', solver='glpk',\n opexForDiameters=None, economicLifetime=30, interestRate=0.08, costUnit='€', ir=0.2,\n rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965,\n originalFluidFlows=None, nDigits=6, verbose=0, threads=1):\n \"\"\"\n We compute a robust (depending on parameter robust) optimal pipeline design,\n i.e. for a given network, we compute a minimal spanning tree w.r.t. its total length.\n Afterward, we compute our robust (special) scenarios, see Robinius et. al..\n Also we compute for every timeStep of injectionWithdrawalRates the corresponding flows.\n We compute merged diameters according to list candidatesMergedDiameter, i.e. we compute a equivalent single diameter\n for two parallel pipes with the same diameter\n If robust is True, then we compute the corresponding pressure drops for every diameter and robust scenario.\n If robust is False, then we compute for every timeStep the corresponding pressure drops for every diameter and\n timeStep.\n If robust is True, then we compute optimal diameters by a MIP for the robust scenarios.\n If robust is False, then we compute optimal diameters by a MIP for the timeStep scenarios. Not Robust Version!\n In a postprocessing step, we compute \"precise\" pressure levels for the robust scenarios and the timeStep scenarios.\n\n Note that if robust is False, then the network may be infeasible for robust scenarios\n which can occur in the network!\n\n :param robust: Bool that is true, we build a robust pipeline network, otherwise not\n :type robust: bool\n\n :param injectionWithdrawalRates: the argument is a pandas DataFrame with the index column\n denoting the timesteps and the index row denoting the name of the network's nodes.\n Injection are denoted with negative floats and withdrawal with positive floats\n in [kg/s]. Example:\n\n node1 node2 node3\n 0 -4 2 2\n 1 3 -1.5 -1.5\n ... ... ... ...\n 8759 0 -1 1.\n\n :type injectionWithdrawalRates: pandas DataFrame with floats\n\n :param distances: the parameter is a pandas Series with the indices being tuples of the\n network's nodes and the values being the lengths of the pipelines in [m]. Example:\n\n (node1, node2) 1000\n (node2, node3) 50000\n (node2, node1) 1000\n (node3, node2) 50000\n\n :type distances: pandas Series\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :param dic_diameter_costs: dictionary that contains all diameters in [m] as keys and the values are the\n corresponding costs in [Euro/m]. Default Value is a preselection of diameters and its costs.\n if None, then we chose the following preselection of diameters and costs\n dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69,\n 0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61,\n 0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28,\n 0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67,\n 1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78}\n :type dic_diameter_costs: dict with keys: diameters, values: cost for pipeline; optional\n\n :param dic_candidateMergedDiam_costs: dictionary that contains a set of diameters in [m] as keys and\n the values are the corresponding costs in [Euro/m]. This diameters are then used to compute a single equivalent\n diameter for two looped (parallel) pipes with the considered diameter.\n |br| * the default value is empty dictionary {}\n :type dic_candidateMergedDiam_costs: dict with keys: diameters, values: cost for pipeline; optional\n\n :param gdfEdges: GeoDataFrame with the edges of the network and the names of their start and end nodes.\n Required for geo-referenced result visualization. Should be obtained from the getRefinedShapeFile\n function.\n :type gdfEdges: GeoDataFrame or None: optional, default is None\n\n :param regColumn1: name of the column in gdfEdges which holds the name of the injection/ withdrawal node\n at the beginning of the line. Required if gdfEdges is specified.\n :type regColumn1: string, optional, default is 'nodeIn'\n\n :param regColumn2: name of the column in gdfEdges which holds the name of the injection/ withdrawal node\n at the end of the line. Required if gdfEdges is specified.\n :type regColumn2: string, optional, default is 'nodeOut'\n\n :param solver: name of the optimization solver to use\n :type solver: string, default 'glpk'\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float\n\n # TODO @Juelich where to use\n param originalFluidFlows: string that specifies the considered fluid\n |br| * the default value is None\n :type originalFluidFlows: str; optional\n\n :param nDigits: number of digits used in the round function\n |br| * the default value is 6\n :type nDigits: positive int\n\n :param verbose: defines how verbose the console logging is:\\n\n - 0: general model logging, warnings and optimization solver logging are displayed.\n - 1: warnings are displayed.\n - 2: no general model logging or warnings are displayed, the optimization solver logging is set to a\n minimum.\\n\n Note: if required, the optimization solver logging can be separately enabled in the optimizationSpecs\n of the optimize function.\n |br| * the default value is 0\n :type verbose: integer (0, 1 or 2)\n\n\n :return: tuple (dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels,\n dic_timeStep_MaxViolPress, gdfEdges), with:\n - dic_arc_optimalDiameters dictionary\n - pressure levels of postprocessing of robust scenarios dic_scen_PressLevels\n - violation of pressure bounds of robust scenarios in optimized network determined by postprocessing\n - dic_scen_MaxViolPress: maximum pressure violation in robust scenarios\n - pressure levels of postprocessing of timeSteps dic_timeStep_PressLevels\n - violation of pressure bounds of timeStep scenarios in optimized network determined by postprocessing\n - dic_timeStep_MaxViolPress: maximum pressure violation in timestep scenarios\n - geopandas GeoDataFrame (information about diameters in 'diam' column and number of pipelines in\n 'nbPipes'); None if kwarg gdfEdges was specified as being Node\n :rtype: return types:\n - dic_arc_optimalDiameters: dictionary, key: arcs, values: (numberOfPipes, diameter) note usually numberOfPipes\n is 1, but if we have chosen a merged diameter, then we have two parallel pipes with the same diameter,\n i.e. numberOfPipes is 2.\n - dic_scen_PressLevels: dictionary, key: nodePair, value: dict: key: arc, value: pressure level in [bar]\n - dic_scen_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number\n (zero means no pressure violation)\n - dic_timeStep_PressLevels: dictionary, key: timeStep, value: dict: key: arc, value: pressure level in [bar]\n - dic_timeStep_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number\n (zero means no pressure violation)\n - gdfEdges: geopandas geodataframe; None if kwarg gdfEdges was specified as being Node\n \"\"\"\n # Do type and value check of input data:\n isBool(robust)\n isPandasDataFrameNumber(injectionWithdrawalRates)\n isPandasSeriesPositiveNumber(distances)\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n # extract diameters for the optimization\n if dic_diameter_costs is not None:\n if isinstance(dic_diameter_costs, dict):\n diameters = list(dic_diameter_costs.keys())\n if isinstance(diameters, list):\n for diam in diameters:\n utils.isStrictlyPositiveNumber(diam)\n else:\n raise TypeError(\"The input argument has to be a list\")\n isDictionaryPositiveNumber(dic_diameter_costs)\n if dic_candidateMergedDiam_costs is not None:\n if isinstance(dic_candidateMergedDiam_costs, dict):\n for diam in dic_candidateMergedDiam_costs.keys():\n utils.isStrictlyPositiveNumber(diam)\n utils.isPositiveNumber(dic_candidateMergedDiam_costs[diam])\n else:\n raise TypeError(\"The input argument has to be a list\")\n utils.isString(regColumn1), utils.isString(regColumn2)\n if gdfEdges is not None:\n if isinstance(gdfEdges, gpd.GeoDataFrame):\n if (not regColumn1 in gdfEdges.columns) | (not regColumn2 in gdfEdges.columns):\n raise ValueError(\"regColumn1 or regColumn2 not in columns of gdfEdges\")\n else:\n gdfEdges['nodes'] = gdfEdges.apply(lambda x: (x['nodeIn'], x['nodeOut']), axis=1)\n else:\n raise TypeError(\"gdfEdges has to be a geopandas GeoDataFrame.\")\n if opexForDiameters is not None:\n if isinstance(opexForDiameters, list):\n for opex in opexForDiameters:\n utils.isPositiveNumber(opex)\n else:\n raise TypeError(\"The input argument has to be a list\")\n utils.isPositiveNumber(interestRate)\n utils.isStrictlyPositiveNumber(economicLifetime)\n utils.isString(costUnit)\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n if originalFluidFlows is not None:\n utils.isString(originalFluidFlows)\n utils.isStrictlyPositiveInt(nDigits)\n\n if dic_diameter_costs is None:\n print(\"There are no diameters to choose in the optimization. Thus, we consider the diameters and costs:\")\n dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69,\n 0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61,\n 0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28,\n 0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67,\n 1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78}\n print(dic_diameter_costs)\n\n # create graph with respect to distances\n utils.output('Creating graph with respect to given distances', verbose, 0)\n graph, distances = createNetwork(distances)\n # plot graph\n if verbose < 1:\n if gdfEdges is not None:\n gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)]\n fig, ax = plt.subplots(figsize=(4,4))\n gdfEdges.plot(ax=ax, color='k'), ax.axis('off')\n else:\n utils.output(\"Original Network Graph:\", verbose, 0)\n nx.draw(graph, with_labels=True)\n plt.show()\n\n # Create a minimum spanning tree of the network with a reasonable logic\n utils.output('Creating a Steiner treee', verbose, 0)\n inner_nodes = list(injectionWithdrawalRates.columns)\n graph, distances = createSteinerTree(graph, distances, inner_nodes)\n\n utils.output(\"Steiner tree:\", verbose, 0)\n if verbose < 1:\n if gdfEdges is not None:\n gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)]\n fig, ax = plt.subplots(figsize=(4,4))\n gdfEdges.plot(ax=ax, color='k'), ax.axis('off')\n else:\n nx.draw(graph, with_labels=True)\n plt.show()\n\n # Compute robust scenarios for spanning tree network\n utils.output(\"Compute robust scenario set for tree network (based on \" +\n str(len(graph.nodes)*len(graph.nodes)-len(graph.nodes)) +\n ' node combinations). Threads: ' + str(threads), verbose, 0)\n timeStart = time.time()\n dic_nodePair_flows, entries, exits = generateRobustScenarios(injectionWithdrawalRates, graph, distances,\n dic_node_minPress, dic_node_maxPress, solver=solver, threads=threads, verbose=verbose)\n utils.output(\"Number of robust scenarios: \" + str(len(dic_nodePair_flows.keys())) , verbose, 0) \n utils.output(\"\\t\\t(%.4f\" % (time.time() - timeStart) + \" sec)\\n\", verbose, 0)\n\n # Compute scenarios for timeSteps\n utils.output(\"Compute scenarios for each timestep. Number of timestep scenarios: \"\n + str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0)\n timeStart = time.time()\n dic_timeStep_flows = computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits,\n solver=solver, threads=threads, verbose=verbose)\n utils.output(\"\\t\\t(%.4f\" % (time.time() - timeStart) + \" sec)\\n\", verbose, 0)\n\n # Compute equivalent single diameters for looped (parallel) pipes\n utils.output(\"Compute equivalent single diameters for looped (parallel) pipes\", verbose, 0)\n # dic_LoopedDiam_costs contains the new computed diameters and its costs\n dic_LoopedDiam_costs = None\n # dic_newDiam_oldDiam merges new and old diameters\n dic_newDiam_oldDiam = None\n if dic_candidateMergedDiam_costs is not None:\n dic_LoopedDiam_costs, dic_newDiam_oldDiam = computeLargeMergedDiameters(dic_candidateMergedDiam_costs)\n\n # merge all diameters to one dictionary for the optimization model\n dic_diameter_costs.update(dic_LoopedDiam_costs)\n\n # Compute pressure drops for each scenario and diameter and the compute optimal diameters\n # depending on robust, we do this w.r.t. robust scenarios or every timeStep\n # dictionary for the pressure coefficients\n dic_pressureCoef = {}\n # dictionary for the optimal diameters\n dic_arc_diam = {}\n if robust:\n # we compute the pressure drops for the robust scenarios\n utils.output(\"Pressure drop coefficients for diameters with respect to robust scenarios\", verbose, 0)\n dic_pressureCoef = determinePressureDropCoef(dic_nodePair_flows, distances, dic_node_minPress,\n dic_node_maxPress, list(dic_diameter_costs.keys()))\n specialScenarionames = list(dic_nodePair_flows.keys())\n\n # Determine optimal discrete pipeline selection by solving a MIP w.r.t. the robust scenarios\n utils.output('Determining optimal robust pipeline design under the consideration of pressure ' +\n 'losses and robust scenarios', verbose, 0)\n # returns dict: key: arc, value: optimal diameter\n # returns dict: key: nodePair, value: dic: key: node, value: pressure level\n dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef,\n specialScenarionames, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, robust, verbose=verbose,\n solver=solver, threads=threads)\n else:\n # we compute pressure drops for every timeStep scenario. Not robust version!\n # we compute the pressure drops for the robust scenarios and optimize\n utils.output(\"Pressure drop coefficients for diameters with respect to robust scenarios\", verbose, 0)\n dic_pressureCoef = determinePressureDropCoef(dic_timeStep_flows, distances, dic_node_minPress,\n dic_node_maxPress, list(dic_diameter_costs.keys()))\n timeSteps = list(dic_timeStep_flows.keys())\n\n # Determine optimal discrete pipeline selection by solving a MIP w.r.t. the timeStep scenarios\n utils.output('Determining optimal pipeline design under the consideration of pressure losses and every time step',\n verbose, 0)\n utils.output('This network design is necessarily robust!', verbose, 0)\n # returns dict: key: arc, value: optimal diameter\n # returns dict: key: timeStep, value: dic: key: node, value: pressure level\n dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef,\n timeSteps, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, False, verbose=verbose,\n solver=solver, threads=threads)\n\n if not dic_arc_diam:\n utils.output(\"No feasible diameter selections exits\", verbose, 0)\n return None\n\n # Do postprocessing: Use a \"more\" accurate pressure model and apply Postprocessing of master's thesis:\n # first do postprocessing for special scenarios\n utils.output(\"Do postprocessing for robust (special) scenarios. Number of scenarios: \" + str(len(dic_nodePair_flows)) +\n '. Threads: ' + str(threads), verbose, 0)\n timeStart = time.time()\n dic_scen_PressLevels, dic_scen_MaxViolPress = postprocessing(graph, distances, dic_arc_diam, dic_nodePair_flows,\n dic_node_minPress, dic_node_maxPress,\n threads=threads, verbose=verbose)\n utils.output(\"\\t\\t(%.4f\" % (time.time() - timeStart) + \" sec)\\n\", verbose, 0)\n # print if some of these scenarios are not feasible for the \"more\" precise pressure model\n for scenario in dic_scen_MaxViolPress.keys():\n if dic_scen_MaxViolPress[scenario] > 0:\n utils.output(\"Robust Scenario \" + str(scenario) + \" violates pressure bounds by \" +\n str(dic_scen_MaxViolPress[scenario]), verbose, 0)\n\n # compute pressure levels for each time step\n utils.output(\"Do postprocessing for each timestep scenarios. Number of scenarios: \" +\n str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0)\n timeStart = time.time()\n dic_timeStep_PressLevels, dic_timeStep_MaxViolPress = postprocessing(graph, distances, dic_arc_diam,\n dic_timeStep_flows, dic_node_minPress,\n dic_node_maxPress,\n threads=threads, verbose=verbose)\n utils.output(\"\\t\\t(%.4f\" % (time.time() - timeStart) + \" sec)\\n\", verbose, 0)\n for timeStep in dic_timeStep_MaxViolPress.keys():\n if dic_timeStep_MaxViolPress[timeStep] > 0:\n utils.output(\"Time Step \" + str(timeStep) + \" violates pressure bounds by \" +\n str(dic_timeStep_MaxViolPress[timeStep]), verbose, 0)\n\n # now determine final output, i.e. dictionary: key: arcs, values: (numberOfPipes, diameter)\n # note usually numberOfPipes is 1, but if we have chosen a merged diameter, then we have two parallel pipes with\n # the same diameter, i.e. numberOfPipes is 2.\n dic_arc_optimalDiameters = {}\n for arc in dic_arc_diam.keys():\n if dic_LoopedDiam_costs is not None:\n if dic_arc_diam[arc] in dic_LoopedDiam_costs.keys():\n dic_arc_optimalDiameters[arc] = (2, dic_newDiam_oldDiam[dic_arc_diam[arc]])\n else:\n dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc])\n else:\n dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc])\n\n if verbose < 1:\n if gdfEdges is not None:\n gdfEdges = gdfEdges[gdfEdges.nodes.isin(dic_arc_optimalDiameters)]\n gdfEdges['diam'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][1], axis=1)\n gdfEdges['nbPipes'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][0], axis=1)\n\n plotOptimizedNetwork(gdfEdges)\n\n else:\n # plot network with new diameters\n utils.output(\"Network with optimized diameters, looped pipes are indicated by two colored edges, \" +\n \"Thicker edge means larger diameter\", verbose, 0)\n finalG = nx.MultiGraph()\n\n for arc in dic_arc_optimalDiameters.keys():\n if dic_arc_optimalDiameters[arc][0] == 1:\n # we have a single not looped pipe\n finalG.add_edge(arc[0], arc[1], color='black', weight=5 * dic_arc_optimalDiameters[arc][1])\n else:\n # we have a looped pipe\n finalG.add_edge(arc[0], arc[1], color='r',\n weight=10 * dic_arc_optimalDiameters[arc][1])\n finalG.add_edge(arc[0], arc[1], color='b',\n weight=5 * dic_arc_optimalDiameters[arc][1])\n # pos = nx.circular_layout(finalG)\n\n edges = finalG.edges()\n\n colors = []\n weight = []\n\n for (u, v, attrib_dict) in list(finalG.edges.data()):\n colors.append(attrib_dict['color'])\n weight.append(attrib_dict['weight'])\n\n nx.draw(finalG, edges=edges, edge_color=colors, width=weight, with_labels=True)\n \n plt.show()\n\n # Add some output which somehow quantifies the difference between the original and the new\n # pipeline design (for this additional input argument are required)\n # TODO @ Juelich just compare original solution to solution dic_arc_optimalDiameters\n\n return dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels, \\\n dic_timeStep_MaxViolPress, gdfEdges\n\n\ndef plotOptimizedNetwork(gdf_pipes, figsize=(4,4), nodesColumn='nodes', diamColumn='diam',\n nbPipesColumn='nbPipes', line_scaling=1, gdf_regions=None, pressureLevels=None, pMin=50, pMax=100,\n cmap='Spectral_r', cbxShift=0.32, cbyShift=0.08, cbWidth=0.4, fontsize=10, cbTitle='Pressure [bar]'):\n \"\"\"Plot optimized network, visualizing chosen pipe diameters and, if selected, pressure levels of\n a scenario.\n \n :param gdf_pipes: GeoDataFrame, containing information about the diameters, number of pipes and\n routes of the pipeline network \n :type gdf_pipes: geopandas GeoDataFrame\n\n :param figsize: figure size, defaults to (4,4)\n :type figsize: tuple, optional\n\n :param nodesColumn: name of the column in gdf_pipes containing a tuple (startNode, endNode) with the\n name of the nodes being strings, defaults to 'nodes'\n :type nodesColumn: str, optional\n\n :param diamColumn: name of the column in gdf_pipes containing the diameters of the pipelines in m,\n defaults to 'diam'\n :type diamColumn: str, optional\n\n :param nbPipesColumn: name of the column in gdf_pipes containing the number of parallel pipes along\n a connection (maximum parallel pipes: 2),\n defaults to 'nbPipes'\n :type nbPipesColumn: str, optional\n\n :param line_scaling: scaling factor for line width, defaults to 1\n :type line_scaling: int, optional\n\n :param gdf_regions: GeoDataFrame for background plotting, defaults to None\n :type gdf_regions: geopandas GeoDataFrame, optional\n\n :param pressureLevels: pressure levels at each node for one scenario/ timestep, defaults to None\n :type pressureLevels: dictionary or series with keys/ indices being the nodes of the network, optional\n\n :param pMin: minimum pressure of colorbar, defaults to 50\n :type pMin: int, optional\n\n :param pMax: maximum pressure of colorbar, defaults to 100\n :type pMax: int, optional\n\n :param cmap: colormap name, defaults to 'Spectral_r'\n :type cmap: str, optional\n\n :param cbxShift: colorbar x shift, defaults to 0.32\n :type cbxShift: float, optional\n\n :param cbyShift: colorbar y shift, defaults to 0.08\n :type cbyShift: float, optional\n\n :param cbWidth: colorbar width, defaults to 0.4\n :type cbWidth: float, optional\n\n :param fontsize: fontsize of legend and colorbar, defaults to 10\n :type fontsize: int, optional\n\n :param cbTitle: colorbar title, defaults to 'Pressure [bar]'\n :type cbTitle: str, optional\n\n :return: tuple (fig, ax)\n :rtype:\n - fig: matplotlib figure\n - ax: matplotlib axis\n \"\"\"\n\n fig, ax = plt.subplots(figsize=figsize)\n cmap = mpl.cm.get_cmap(cmap)\n\n if gdf_regions is not None:\n gdf_regions.plot(ax=ax, facecolor='lightgrey', edgecolor='lightgrey')\n diamMin = gdf_pipes[gdf_pipes[diamColumn] > 0][diamColumn].min()\n for i, row in gdf_pipes.iterrows():\n lw = row[diamColumn]/diamMin*line_scaling\n if pressureLevels is not None:\n p = (pressureLevels[row[nodesColumn][0]] + pressureLevels[row[nodesColumn][1]])/2\n color = cmap((p-pMin)/(pMax-pMin))\n else:\n color='k'\n if (row[nbPipesColumn] == 1):\n gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw, capstyle='round')\n else:\n gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw*3, capstyle='round')\n gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color='white', linewidth=lw)\n ax.axis('off') \n\n lines = []\n for diam in sorted(gdf_pipes[diamColumn].unique()):\n line = plt.Line2D(range(1), range(1), linewidth=diam/diamMin*line_scaling, color='k', marker='_',\n label=\"{:>1.5}\".format(str(diam)) + ' m')\n lines.append(line)\n\n leg = ax.legend(handles=lines, prop={'size': fontsize}, loc=6, bbox_to_anchor=(1,0.5), title='Diameters')\n leg.get_frame().set_edgecolor('white')\n\n\n if pressureLevels is not None:\n sm1 = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=pMin, vmax=pMax))\n sm1._A = []\n cax = fig.add_axes([cbxShift, cbyShift, cbWidth, 0.03])\n cb1 = fig.colorbar(sm1, cax=cax, pad=0.05, aspect=7, fraction=0.07, orientation='horizontal')\n cax.tick_params(labelsize=fontsize)\n cax.set_xlabel(cbTitle, size=fontsize)\n cb1.ax.xaxis.set_label_position('top') \n\n plt.show()\n\n return fig, ax\n"
] | [
[
"pandas.concat",
"scipy.optimize.fsolve",
"pandas.Series",
"numpy.sqrt",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.Normalize",
"numpy.round",
"numpy.log10",
"numpy.floor",
"matplotlib.cm.get_cmap",
"numpy.exp",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
ydai94/tdqn | [
"83c66263cb47016414dbe47ad3b252bb9e681ca8"
] | [
"drrn/drrn.py"
] | [
"import pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom os.path import join as pjoin\nfrom memory import ReplayMemory, Transition, State\nfrom model import DRRN\nfrom util import *\nimport logger\nimport sentencepiece as spm\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass DRRN_Agent:\n def __init__(self, args):\n self.gamma = args.gamma\n self.batch_size = args.batch_size\n self.sp = spm.SentencePieceProcessor()\n self.sp.Load(args.spm_path)\n self.network = DRRN(len(self.sp), args.embedding_dim, args.hidden_dim).to(device)\n self.memory = ReplayMemory(args.memory_size)\n self.save_path = args.output_dir\n self.clip = args.clip\n self.optimizer = torch.optim.Adam(self.network.parameters(),\n lr=args.learning_rate)\n\n\n def observe(self, state, act, rew, next_state, next_acts, done):\n self.memory.push(state, act, rew, next_state, next_acts, done)\n\n\n def build_state(self, obs, infos):\n \"\"\" Returns a state representation built from various info sources. \"\"\"\n obs_ids = [self.sp.EncodeAsIds(o) for o in obs]\n look_ids = [self.sp.EncodeAsIds(info['look']) for info in infos]\n inv_ids = [self.sp.EncodeAsIds(info['inv']) for info in infos]\n return [State(ob, lk, inv) for ob, lk, inv in zip(obs_ids, look_ids, inv_ids)]\n\n\n def encode(self, obs_list):\n \"\"\" Encode a list of observations \"\"\"\n return [self.sp.EncodeAsIds(o) for o in obs_list]\n\n\n def act(self, states, poss_acts, sample=True):\n \"\"\" Returns a string action from poss_acts. \"\"\"\n idxs, values = self.network.act(states, poss_acts, sample)\n act_ids = [poss_acts[batch][idx] for batch, idx in enumerate(idxs)]\n return act_ids, idxs, values\n\n\n def update(self):\n if len(self.memory) < self.batch_size:\n return\n\n transitions = self.memory.sample(self.batch_size)\n batch = Transition(*zip(*transitions))\n\n # Compute Q(s', a') for all a'\n # TODO: Use a target network???\n next_qvals = self.network(batch.next_state, batch.next_acts)\n # Take the max over next q-values\n next_qvals = torch.tensor([vals.max() for vals in next_qvals], device=device)\n # Zero all the next_qvals that are done\n next_qvals = next_qvals * (1-torch.tensor(batch.done, dtype=torch.float, device=device))\n targets = torch.tensor(batch.reward, dtype=torch.float, device=device) + self.gamma * next_qvals\n\n # Next compute Q(s, a)\n # Nest each action in a list - so that it becomes the only admissible cmd\n nested_acts = tuple([[a] for a in batch.act])\n qvals = self.network(batch.state, nested_acts)\n # Combine the qvals: Maybe just do a greedy max for generality\n qvals = torch.cat(qvals)\n\n # Compute Huber loss\n loss = F.smooth_l1_loss(qvals, targets.detach())\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.network.parameters(), self.clip)\n self.optimizer.step()\n return loss.item()\n\n\n def load(self):\n try:\n self.memory = pickle.load(open(pjoin(self.save_path, 'memory.pkl'), 'rb'))\n self.network = torch.load(pjoin(self.save_path, 'model.pt'))\n except Exception as e:\n print(\"Error saving model.\")\n logging.error(traceback.format_exc())\n\n\n def save(self):\n try:\n pickle.dump(self.memory, open(pjoin(self.save_path, 'memory.pkl'), 'wb'))\n torch.save(self.network, pjoin(self.save_path, 'model.pt'))\n except Exception as e:\n print(\"Error saving model.\")\n logging.error(traceback.format_exc())\n"
] | [
[
"torch.tensor",
"torch.cuda.is_available",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dustalov/mnogoznal | [
"bacea1576d31e0d2ad5456159a57950899a116f6"
] | [
"mnogoznal/wsd.py"
] | [
"import abc\nimport csv\nfrom collections import namedtuple, defaultdict, OrderedDict, Counter\n\nimport numpy as np\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.metrics.pairwise import cosine_similarity as sim\nfrom sklearn.pipeline import Pipeline\n\nSTOP_POS = {'CONJ', 'INTJ', 'PART', 'PR', 'UNKNOWN'}\n\nSynset = namedtuple('Synset', 'id synonyms hypernyms bag')\n\n\nclass Inventory(object):\n \"\"\"Sense inventory representation and loader.\"\"\"\n\n synsets = {}\n index = defaultdict(list)\n\n def __init__(self, inventory_path):\n \"\"\"\n During the construction, BaseWSD parses the given sense inventory file.\n \"\"\"\n\n def field_to_bag(field):\n return {word: freq for record in field.split(', ')\n for word, freq in (self.lexeme(record),)\n if record}\n\n with open(inventory_path, 'r', encoding='utf-8', newline='') as f:\n reader = csv.reader(f, delimiter='\\t', quoting=csv.QUOTE_NONE)\n\n for row in reader:\n id = row[0]\n\n synonyms = field_to_bag(row[2])\n hypernyms = field_to_bag(row[4])\n\n self.synsets[id] = Synset(\n id=id,\n synonyms=synonyms,\n hypernyms=hypernyms,\n bag={**synonyms, **hypernyms}\n )\n\n for word in self.synsets[id].bag:\n self.index[word].append(id)\n\n def lexeme(self, record):\n \"\"\"\n Parse the sense representations like 'word#sid:freq'.\n Actually, we do not care about the sid field because\n we use synset identifiers instead.\n \"\"\"\n if '#' in record:\n word, tail = record.split('#', 1)\n else:\n word, tail = record, None\n\n if tail:\n if ':' in tail:\n sid, tail = tail.split(':', 1)\n else:\n sid, tail = tail, None\n\n if tail:\n freq = float(tail)\n else:\n freq = 1\n\n return word, freq\n\n\nSpan = namedtuple('Span', 'token pos lemma index')\n\n\nclass BaseWSD(object):\n \"\"\"\n Base class for word sense disambiguation routines. Should not be used.\n Descendant classes must implement the disambiguate_word() method.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, inventory):\n self.inventory = inventory\n\n def lemmatize(self, sentence):\n \"\"\"\n This method transforms the given sentence into the dict that\n maps the word indices to their lemmas. It also excludes those\n words which part of speech is in the stop list.\n \"\"\"\n return {i: lemma for i, (_, lemma, pos) in enumerate(sentence)\n if pos not in STOP_POS}\n\n @abc.abstractmethod\n def disambiguate_word(self, sentence, index):\n \"\"\"\n Return word sense identifier for the given word in the sentence.\n \"\"\"\n if not sentence or not isinstance(sentence, list):\n raise ValueError('sentence should be a list')\n\n if not isinstance(index, int) or index < 0 or index >= len(sentence):\n raise ValueError('index should be in [0...%d]' % len(sentence))\n\n def disambiguate(self, sentence):\n \"\"\"\n Return word sense identifiers corresponding to the words\n in the given sentence.\n \"\"\"\n result = OrderedDict()\n\n for index, span in enumerate(sentence):\n # here, span is (token, pos, lemma), but we also need index\n span = Span(*span, index)\n\n result[span] = self.disambiguate_word(sentence, index)\n\n return result\n\n\nclass OneBaseline(BaseWSD):\n \"\"\"\n A simple baseline that treats every word as monosemeous. Not thread-safe.\n \"\"\"\n\n counter = {}\n\n def __init__(self):\n super().__init__(None)\n\n def disambiguate_word(self, sentence, index):\n super().disambiguate_word(sentence, index)\n\n word, _, _ = sentence[index]\n\n if word not in self.counter:\n self.counter[word] = len(self.counter)\n\n return str(self.counter[word])\n\n\nclass SingletonsBaseline(BaseWSD):\n \"\"\"\n A simple baseline that puts every instance into a different cluster. Not thread-safe.\n \"\"\"\n\n counter = 0\n\n def __init__(self):\n super().__init__(None)\n\n def disambiguate_word(self, sentence, index):\n super().disambiguate_word(sentence, index)\n\n self.counter += 1\n\n return str(self.counter)\n\n\nclass SparseWSD(BaseWSD):\n \"\"\"\n A simple sparse word sense disambiguation.\n \"\"\"\n\n sparse = Pipeline([('dict', DictVectorizer()), ('tfidf', TfidfTransformer())])\n\n def __init__(self, inventory):\n super().__init__(inventory)\n self.sparse.fit([synset.bag for synset in self.inventory.synsets.values()])\n\n def disambiguate_word(self, sentence, index):\n super().disambiguate_word(sentence, index)\n\n lemmas = self.lemmatize(sentence)\n\n if index not in lemmas:\n return\n\n svector = self.sparse.transform(Counter(lemmas.values())) # sentence vector\n\n def search(query):\n \"\"\"\n Map synset identifiers to the cosine similarity value.\n This function calls the function query(id) that retrieves\n the corresponding dict of words.\n \"\"\"\n return Counter({id: sim(svector, self.sparse.transform(query(id))).item(0)\n for id in self.inventory.index[lemmas[index]]})\n\n candidates = search(lambda id: self.inventory.synsets[id].synonyms)\n\n # give the hypernyms a chance if nothing is found\n if not candidates:\n candidates = search(lambda id: self.inventory.synsets[id].bag)\n\n if not candidates:\n return\n\n for id, _ in candidates.most_common(1):\n return id\n\n\nclass DenseWSD(BaseWSD):\n \"\"\"\n A word sense disambiguation approach that is based on SenseGram.\n \"\"\"\n\n class densedict(dict):\n \"\"\"\n A handy dict that transforms a synset into its dense representation.\n \"\"\"\n\n def __init__(self, synsets, sensegram):\n self.synsets = synsets\n self.sensegram = sensegram\n\n def __missing__(self, id):\n value = self[id] = self.sensegram(self.synsets[id].bag.keys())\n return value\n\n def __init__(self, inventory, wv):\n super().__init__(inventory)\n self.wv = wv\n self.dense = self.densedict(self.inventory.synsets, self.sensegram)\n\n def sensegram(self, words):\n \"\"\"\n This is a simple implementation of SenseGram.\n It just averages the embeddings corresponding to the given words.\n \"\"\"\n vectors = self.words_vec(set(words))\n\n if not vectors:\n return\n\n return np.mean(np.vstack(tuple(vectors.values())), axis=0).reshape(1, -1)\n\n def words_vec(self, words, use_norm=False):\n \"\"\"\n Return a dict that maps the given words to their embeddings.\n \"\"\"\n if callable(getattr(self.wv, 'words_vec', None)):\n return self.wv.words_vec(words, use_norm)\n\n return {word: self.wv.word_vec(word, use_norm) for word in words if word in self.wv}\n\n def disambiguate_word(self, sentence, index):\n super().disambiguate_word(sentence, index)\n\n lemmas = self.lemmatize(sentence)\n\n if index not in lemmas:\n return\n\n svector = self.sensegram(lemmas.values()) # sentence vector\n\n if svector is None:\n return\n\n # map synset identifiers to the cosine similarity value\n candidates = Counter({id: sim(svector, self.dense[id]).item(0)\n for id in self.inventory.index[lemmas[index]]\n if self.dense[id] is not None})\n\n if not candidates:\n return\n\n for id, _ in candidates.most_common(1):\n return id\n\n\nclass LeskWSD(BaseWSD):\n \"\"\"\n A word sense disambiguation approach that is based on Lesk method. \n \"\"\"\n\n def __init__(self, inventory):\n super().__init__(inventory)\n\n def disambiguate_word(self, sentence, word_index):\n super().disambiguate_word(sentence, word_index)\n\n lemmas = self.lemmatize(sentence)\n\n if word_index not in lemmas:\n return\n\n mentions_dict = dict()\n for synset_number in self.inventory.index[lemmas[word_index]]:\n mentions_dict[synset_number] = 0\n for context_word in lemmas.values():\n if context_word != lemmas[word_index]:\n if context_word in self.inventory.synsets[synset_number].synonyms:\n mentions_dict[synset_number] = mentions_dict[synset_number] + 1\n elif context_word in self.inventory.synsets[synset_number].hypernyms:\n mentions_dict[synset_number] = mentions_dict[synset_number] + \\\n self.inventory.synsets[synset_number].hypernyms[context_word]\n\n if len(mentions_dict) > 0:\n return max(mentions_dict, key=mentions_dict.get)\n else:\n return\n"
] | [
[
"sklearn.feature_extraction.DictVectorizer",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.metrics.pairwise.cosine_similarity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HabibMrad/uncertainty | [
"1646a9b07d1179045dd0375149250d5ac7501004",
"1646a9b07d1179045dd0375149250d5ac7501004"
] | [
"project/systems/ecgresnet_ensemble_auxout.py",
"project/systems/ecgresnet_ssensemble.py"
] | [
"import sys\nimport os\nimport torch\nimport pandas as pd\nimport datetime\nfrom argparse import ArgumentParser\nimport numpy as np\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, random_split\nfrom icecream import ic\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.metrics import functional as FM\n\nfrom network.ecgresnet_auxout import ECGResNet_AuxOut\nfrom utils.helpers import create_results_directory\nfrom utils.focalloss_weights import FocalLoss\n\nclass ECGResNetEnsemble_AuxOutSystem(pl.LightningModule):\n \"\"\"\n This class implements the ECGResNet with ensemble and auxiliary output in PyTorch Lightning.\n It can estimate the epistemic and aleatoric uncertainty of its predictions.\n \"\"\"\n\n def __init__(self, in_channels, n_grps, N, \n num_classes, dropout, first_width, stride, \n dilation, learning_rate, ensemble_size, n_logit_samples, loss_weights=None, \n **kwargs):\n \"\"\"\n Initializes the ECGResNetEnsemble_AuxOutSystem\n\n Args:\n in_channels: number of channels of input\n n_grps: number of ResNet groups\n N: number of blocks per groups\n num_classes: number of classes of the classification problem\n dropout: probability of an argument to get zeroed in the dropout layer\n first_width: width of the first input\n stride: tuple with stride value per block per group\n dilation: spacing between the kernel points of the convolutional layers\n learning_rate: the learning rate of the model\n ensemble_size: the number of models that make up the ensemble\n n_logit_samples: number of logit samples of the auxiliary output\n loss_weights: array of weights for the loss term\n \"\"\"\n\n super().__init__()\n self.save_hyperparameters()\n self.learning_rate = learning_rate\n self.num_classes = num_classes\n self.ensemble_size = ensemble_size\n self.n_logit_samples = n_logit_samples\n\n self.IDs = torch.empty(0).type(torch.LongTensor)\n self.predicted_labels = torch.empty(0).type(torch.LongTensor)\n self.correct_predictions = torch.empty(0).type(torch.BoolTensor)\n self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)\n self.aleatoric_uncertainty = torch.empty(0).type(torch.FloatTensor)\n self.total_uncertainty = torch.empty(0).type(torch.FloatTensor)\n\n self.models = []\n self.optimizers = []\n for i in range(self.ensemble_size):\n self.models.append(ECGResNet_AuxOut(in_channels, \n n_grps, N, num_classes, \n dropout, first_width, \n stride, dilation)\n )\n\n if loss_weights is not None:\n weights = torch.tensor(loss_weights, dtype = torch.float)\n else:\n weights = loss_weights\n\n self.loss = FocalLoss(gamma=1, weights = weights)\n\n def forward(self, x, model_idx):\n \"\"\"Performs a forward through a single ensemble member.\n\n Args:\n x (tensor): Input data.\n model_idx (int): Index of the ensemble member.\n\n Returns:\n output1: Output at the auxiliary point of the ensemble member\n output2: Output at the end of the ensemble member\n output2_log_var: The log variance of the ensemble_member\n \"\"\"\n\n output1, output2_mean, output2_log_var = self.models[model_idx](x)\n \n return output1, output2_mean, output2_log_var\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n \"\"\"Performs a training step for all ensemble members.\n\n Args:\n batch (dict): Output of the dataloader.\n batch_idx (int): Index no. of this batch.\n\n Returns:\n tensor: Total loss for this step.\n \"\"\"\n data, target = batch['waveform'], batch['label']\n\n losses = []\n for model_idx in range(self.ensemble_size):\n # Make prediction\n output1, output2_mean, output2_log_var = self(data, model_idx)\n\n # Sample from logits, returning a vector x_i\n x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)\n\n train_loss1 = self.loss(output1, target)\n train_loss2 = self.loss(x_i, target)\n total_train_loss = (0.3 * train_loss1) + train_loss2\n\n # Update weights for each model using individual optimizers\n self.manual_backward(total_train_loss, self.optimizers[model_idx])\n self.optimizers[model_idx].step()\n self.optimizers[model_idx].zero_grad()\n losses.append(total_train_loss.item())\n\n self.log('model_{}_train_loss'.format(model_idx), total_train_loss)\n\n average_train_loss = np.mean(losses)\n self.log('average_train_loss', average_train_loss)\n\n return {'loss': average_train_loss}\n\n def validation_step(self, batch, batch_idx):\n prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n\n data, target = batch['waveform'], batch['label']\n\n # Predict for each model\n for model_idx in range(self.ensemble_size):\n # Make prediction\n _, output2_mean, output2_log_var = self(data, model_idx)\n\n # Sample from logits, returning avector x_i\n x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)\n\n prediction_individual[:, model_idx] = x_i\n \n # Calculate mean over predictions from individual ensemble members\n prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)\n \n val_loss = self.loss(prediction_ensemble_mean, target)\n acc = FM.accuracy(prediction_ensemble_mean, target)\n\n # loss is tensor. The Checkpoint Callback is monitoring 'checkpoint_on'\n metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()}\n self.log('val_acc', acc.item())\n self.log('val_loss', val_loss.item())\n return metrics\n\n def test_step(self, batch, batch_idx, save_to_csv=False):\n\n prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n aleatoric_var = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n data, target = batch['waveform'], batch['label']\n\n # Predict for each model\n for model_idx, model in enumerate(self.models):\n\n # Make prediction\n _, output2_mean, output2_log_var = self(data, model_idx)\n\n # Sample from logits, returning a vector x_i\n x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)\n\n prediction_individual[:, model_idx] = x_i.data\n\n # Take exponent to get the variance\n output2_var = output2_log_var.exp()\n aleatoric_var[:, model_idx] = output2_var.data\n \n # Calculate mean and variance over predictions from individual ensemble members\n prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)\n prediction_ensemble_var = torch.var(prediction_individual, dim=1)\n\n # Get the average aleatoric uncertainty for each prediction\n prediction_aleatoric_var = torch.mean(aleatoric_var, dim=1)\n\n # Select the predicted labels\n predicted_labels = prediction_ensemble_mean.argmax(dim=1)\n\n test_loss = self.loss(prediction_ensemble_mean, target)\n acc = FM.accuracy(prediction_ensemble_mean, target)\n\n # Get the epistemic variance of the predicted labels by selecting the variance of\n # the labels with highest average Softmax value\n predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()\n\n # Get the aleatoric variance of the predicted labels by selecting the variance of\n # the labels with highest average Softmax value\n predicted_labels_aleatoric_var = torch.gather(prediction_aleatoric_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()\n\n total_var = predicted_labels_var + predicted_labels_aleatoric_var\n \n # Log and save metrics\n self.log('test_acc', acc.item())\n self.log('test_loss', test_loss.item())\n\n self.IDs = torch.cat((self.IDs, batch['id']), 0)\n self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0)\n self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0)\n self.aleatoric_uncertainty = torch.cat((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0)\n self.total_uncertainty = torch.cat((self.total_uncertainty, total_var), 0)\n self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0)\n\n return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}\n\n def configure_optimizers(self):\n \"\"\"\n Initialize an optimizer for each model in the ensemble\n \"\"\"\n for i in range(self.ensemble_size):\n self.optimizers.append(optim.Adam(self.models[i].parameters(), lr=self.learning_rate))\n \n return self.optimizers\n\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--model_name', type=str, default='ensemble_none')\n parser.add_argument('--ensemble_size', type=int, default=5)\n parser.add_argument('--ensembling_method', type=bool, default=True)\n parser.add_argument('--n_logit_samples', type=int, default=100)\n return parser\n\n def save_results(self):\n \"\"\"\n Combine results into single dataframe and save to disk as .csv file\n \"\"\"\n results = pd.concat([\n pd.DataFrame(self.IDs.numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)\n",
"import sys\nimport os\nimport torch\nimport pandas as pd\nimport datetime\nfrom argparse import ArgumentParser\nimport numpy as np\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, random_split\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.metrics import functional as FM\n\nfrom network.ecgresnet import ECGResNet\nfrom utils.helpers import create_results_directory, create_weights_directory\nfrom utils.focalloss_weights import FocalLoss\n\nclass ECGResNetSnapshotEnsembleSystem(pl.LightningModule):\n \"\"\"\n This class implements an snapshot ensemble of ECGResNets in PyTorch Lightning.\n It can estimate the epistemic uncertainty of its predictions.\n \"\"\"\n def __init__(self, in_channels, n_grps, N, \n num_classes, dropout, first_width, stride, \n dilation, learning_rate, ensemble_size, max_epochs, initial_lr, cyclical_learning_rate_type, loss_weights=None, \n **kwargs):\n \"\"\"\n Initializes the ECGResNetSnapshotEnsembleSystem\n\n Args:\n in_channels: number of channels of input\n n_grps: number of ResNet groups\n N: number of blocks per groups\n num_classes: number of classes of the classification problem\n dropout: probability of an argument to get zeroed in the dropout layer\n first_width: width of the first input\n stride: tuple with stride value per block per group\n dilation: spacing between the kernel points of the convolutional layers\n learning_rate: the learning rate of the model\n ensemble_size: the number of models that make up the ensemble\n max_epochs: total number of epochs to train for\n initial_lr: the initial learning rate at the start of a learning cycle\n cyclical_learning_rate_type: the type of learning rate cycling to apply\n loss_weights: array of weights for the loss term\n \"\"\"\n super().__init__()\n self.save_hyperparameters()\n self.learning_rate = learning_rate\n self.num_classes = num_classes\n self.ensemble_size = ensemble_size\n self.max_epochs = max_epochs\n self.initial_lr = initial_lr\n self.cyclical_learning_rate_type = cyclical_learning_rate_type\n\n self.IDs = torch.empty(0).type(torch.LongTensor)\n self.predicted_labels = torch.empty(0).type(torch.LongTensor)\n self.correct_predictions = torch.empty(0).type(torch.BoolTensor)\n self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)\n\n self.models = []\n self.optimizers = []\n \n # Initialize a single model during training\n self.models.append(ECGResNet(in_channels, \n n_grps, N, num_classes, \n dropout, first_width, \n stride, dilation))\n\n if loss_weights is not None:\n weights = torch.tensor(loss_weights, dtype = torch.float)\n else:\n weights = loss_weights\n\n self.loss = FocalLoss(gamma=1, weights = weights)\n create_weights_directory()\n\n def forward(self, x, model_idx):\n \"\"\"\n Performs a forward through a single ensemble member.\n\n Args:\n x (tensor): Input data.\n model_idx (int): Index of the ensemble member.\n\n Returns:\n Output1: Output at the auxiliary point of the ensemble member\n Output2: Output at the end of the ensemble member\n \"\"\"\n output1, output2 = self.models[model_idx](x)\n \n return output1, output2\n\n def on_train_epoch_start(self):\n \"\"\"\n Set the cyclical learning rate for the current epoch\n \"\"\"\n learning_rate = self.get_learning_rate(self.current_epoch, self.ensemble_size, self.max_epochs, self.initial_lr, self.cyclical_learning_rate_type)\n self.set_learning_rate(self.optimizers[0], learning_rate)\n self.log('Learning rate', learning_rate)\n print('Epoch: {} learning rate: {}'.format(self.current_epoch, learning_rate))\n\n def training_step(self, batch, batch_idx):\n \"\"\"Performs a training step for all ensemble members.\n\n Args:\n batch (dict): Output of the dataloader.\n batch_idx (int): Index no. of this batch.\n\n Returns:\n tensor: Total loss for this step.\n \"\"\"\n data, target = batch['waveform'], batch['label']\n i = 0\n\n output1, output2 = self(data, 0)\n train_loss1 = self.loss(output1.squeeze(), target)\n train_loss2 = self.loss(output2.squeeze(), target)\n\n # Calculate the loss for single model\n total_train_loss = (0.3 * train_loss1) + train_loss2\n\n # Update weights for single model using optimizer\n self.manual_backward(total_train_loss, self.optimizers[i])\n self.optimizers[i].step()\n self.optimizers[i].zero_grad()\n\n self.log('model_{}_train_loss'.format(i), total_train_loss)\n\n return {'loss': total_train_loss}\n\n def on_train_epoch_end(self, outputs):\n \"\"\"\n Save the model after each learning-rate cycle\n \"\"\"\n if self.cyclical_learning_rate_type == 'cosine-annealing':\n epochs_per_cycle = self.max_epochs/self.ensemble_size\n\n # Check if we are at the end of a learning-rate cycle\n if (self.current_epoch +1) % epochs_per_cycle == 0:\n model_idx = int((self.current_epoch+1 )/ epochs_per_cycle)\n\n # Save current model \n print('\\nSaving model: {}/{}'.format(model_idx, self.ensemble_size))\n torch.save({\n 'epoch': self.current_epoch,\n 'model_state_dict': self.models[0].state_dict(),\n 'optimizer_state_dict': self.optimizers[0].state_dict(),\n }, \"weights/ssensemble_model{}.pt\".format(model_idx))\n # self.trainer.save_checkpoint(\"weights/ssensemble_model{}.ckpt\".format(model_idx))\n\n def validation_step(self, batch, batch_idx):\n data, target = batch['waveform'], batch['label']\n\n # Always check the single model during validation\n i = 0\n\n # Predict using single model \n output1, output2 = self(data, i)\n\n val_loss = self.loss(output2, target)\n acc = FM.accuracy(output2, target)\n\n # Log metrics\n metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()}\n self.log('val_acc', acc.item())\n self.log('val_loss', val_loss.item())\n return metrics\n\n def on_test_epoch_start(self):\n \"\"\"\n Initialize ensemble members from saved checkpoints\n \"\"\"\n print('\\nInitializing ensemble members from checkpoints')\n\n # Remove first model from self.models\n self.models.clear()\n \n for i in range(self.ensemble_size):\n\n # Initialize ensemble members from different epochs in the training stage of the original model\n self.models.append(ECGResNet(self.hparams.in_channels, \n self.hparams.n_grps, self.hparams.N, self.hparams.num_classes, \n self.hparams.dropout, self.hparams.first_width, \n self.hparams.stride, self.hparams.dilation))\n\n model_path = 'weights/ssensemble_model{}.pt'.format(i+1)\n checkpoint = torch.load(model_path)\n self.models[i].load_state_dict(checkpoint['model_state_dict'])\n self.models[i].eval()\n\n print('Model {}/{} initialized\\n'.format(i+1, self.ensemble_size))\n\n def test_step(self, batch, batch_idx, save_to_csv=False):\n prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n data, target = batch['waveform'], batch['label']\n\n # Predict for each model in the ensemble\n for i, model in enumerate(self.models):\n\n output1, output2 = self(data, i)\n prediction_individual[:, i] = output2.data\n \n # Calculate mean and variance over predictions from individual ensemble members\n prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)\n prediction_ensemble_var = torch.var(prediction_individual, dim=1)\n \n test_loss = self.loss(prediction_ensemble_mean, target)\n acc = FM.accuracy(prediction_ensemble_mean, target)\n\n # Get the variance of the predicted labels by selecting the variance of\n # the labels with highest average Softmax value\n predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()\n predicted_labels = prediction_ensemble_mean.argmax(dim=1)\n \n # Log and save metrics\n self.log('test_acc', acc.item())\n self.log('test_loss', test_loss.item())\n\n self.IDs = torch.cat((self.IDs, batch['id']), 0)\n self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0)\n self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0)\n self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0)\n\n return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}\n\n def configure_optimizers(self):\n \"\"\"\n Initialize the optimizer, during training only a single model is used\n \"\"\"\n\n model_idx = 0\n self.optimizers.append(optim.SGD(self.models[model_idx].parameters(), lr=self.initial_lr))\n \n return self.optimizers\n\n def get_learning_rate(self, epoch_idx, n_models, total_epochs, initial_lr, cyclical_learning_rate_type):\n \"\"\"\n Returns the learning rate for the current epoch.\n\n Args:\n epoch_idx: index of the current epoch\n n_models: total number of ensemble members\n total_epochs: total number of epochs to train for\n initial_lr: the initial learning rate at the start of a learning cycle\n cyclical_learning_rate_type: the type of learning rate cycling to apply\n \"\"\"\n if cyclical_learning_rate_type == 'cosine-annealing':\n \"\"\"\n Apply a cosine-annealing cyclical learning rate as proposed by\n Loshchilov et al. in: \"SGDR: Stochastic Gradient Descent with Warm Restarts\"\n \"\"\"\n epochs_per_cycle = total_epochs/n_models \n learning_rate = initial_lr * (np.cos(np.pi * (epoch_idx % epochs_per_cycle) / epochs_per_cycle) + 1) / 2\n return learning_rate\n else:\n return learning_rate\n \n def set_learning_rate(self, optimizer, learning_rate):\n \"\"\"\n Sets the learning rate for an optimizer\n\n Args:\n optimizer: optimizer to apply learning rate to\n learning_rate: learning rate to set\n \"\"\"\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--model_name', type=str, default='ssensemble_none')\n parser.add_argument('--ensemble_size', type=int, default=2)\n parser.add_argument('--ensembling_method', type=bool, default=True)\n parser.add_argument('--initial_lr', type=float, default=0.1)\n parser.add_argument('--cyclical_learning_rate_type', type=str, default='cosine-annealing', choices=['cosine-annealing', 'none'])\n return parser\n\n # Combine results into single dataframe and save to disk\n def save_results(self):\n \"\"\"\n Combine results into single dataframe and save to disk as .csv file\n \"\"\"\n results = pd.concat([\n pd.DataFrame(self.IDs.numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']), \n ], axis=1)\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)\n"
] | [
[
"torch.mean",
"torch.empty",
"torch.cat",
"torch.tensor",
"numpy.mean",
"torch.var"
],
[
"torch.mean",
"torch.empty",
"torch.load",
"torch.cat",
"numpy.cos",
"torch.tensor",
"torch.var"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jacobhjkim/ray | [
"936cb5929c455102d5638ff5d59c80c4ae94770f"
] | [
"python/ray/tune/tests/test_function_api.py"
] | [
"import json\nimport os\nimport sys\nimport shutil\nimport tempfile\nimport unittest\n\nimport ray\nimport ray.cloudpickle as cloudpickle\nfrom ray.rllib import _register_all\n\nfrom ray import tune\nfrom ray.tune.logger import NoopLogger\nfrom ray.tune.utils.trainable import TrainableUtil\nfrom ray.tune.function_runner import with_parameters, wrap_function, \\\n FuncCheckpointUtil\nfrom ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION\n\n\ndef creator_generator(logdir):\n def logger_creator(config):\n return NoopLogger(config, logdir)\n\n return logger_creator\n\n\nclass FuncCheckpointUtilTest(unittest.TestCase):\n def setUp(self):\n self.logdir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.logdir)\n\n def testEmptyCheckpoint(self):\n checkpoint_dir = FuncCheckpointUtil.mk_null_checkpoint_dir(self.logdir)\n assert FuncCheckpointUtil.is_null_checkpoint(checkpoint_dir)\n\n def testTempCheckpointDir(self):\n checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)\n assert FuncCheckpointUtil.is_temp_checkpoint_dir(checkpoint_dir)\n\n def testConvertTempToPermanent(self):\n checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)\n new_checkpoint_dir = FuncCheckpointUtil.create_perm_checkpoint(\n checkpoint_dir, self.logdir, step=4)\n assert new_checkpoint_dir == TrainableUtil.find_checkpoint_dir(\n new_checkpoint_dir)\n assert os.path.exists(new_checkpoint_dir)\n assert not FuncCheckpointUtil.is_temp_checkpoint_dir(\n new_checkpoint_dir)\n\n tmp_checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(\n self.logdir)\n assert tmp_checkpoint_dir != new_checkpoint_dir\n\n\nclass FunctionCheckpointingTest(unittest.TestCase):\n def setUp(self):\n self.logdir = tempfile.mkdtemp()\n self.logger_creator = creator_generator(self.logdir)\n\n def tearDown(self):\n shutil.rmtree(self.logdir)\n\n def testCheckpointReuse(self):\n \"\"\"Test that repeated save/restore never reuses same checkpoint dir.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n count = sum(\"checkpoint-\" in path\n for path in os.listdir(checkpoint_dir))\n assert count == 1, os.listdir(checkpoint_dir)\n\n for step in range(20):\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(step))\n open(path, \"a\").close()\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n checkpoint = None\n for i in range(5):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n if checkpoint:\n new_trainable.restore(checkpoint)\n for i in range(2):\n result = new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n assert result[TRAINING_ITERATION] == 10\n\n def testCheckpointReuseObject(self):\n \"\"\"Test that repeated save/restore never reuses same checkpoint dir.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n count = sum(\"checkpoint-\" in path\n for path in os.listdir(checkpoint_dir))\n assert count == 1, os.listdir(checkpoint_dir)\n\n for step in range(20):\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(step))\n open(path, \"a\").close()\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n checkpoint = None\n for i in range(5):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n if checkpoint:\n new_trainable.restore_from_object(checkpoint)\n for i in range(2):\n result = new_trainable.train()\n checkpoint = new_trainable.save_to_object()\n new_trainable.stop()\n self.assertTrue(result[TRAINING_ITERATION] == 10)\n\n def testCheckpointReuseObjectWithoutTraining(self):\n \"\"\"Test that repeated save/restore never reuses same checkpoint dir.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n count = sum(\"checkpoint-\" in path\n for path in os.listdir(checkpoint_dir))\n assert count == 1, os.listdir(checkpoint_dir)\n\n for step in range(20):\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(step))\n open(path, \"a\").close()\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n new_trainable = wrapped(logger_creator=self.logger_creator)\n for i in range(2):\n result = new_trainable.train()\n checkpoint = new_trainable.save_to_object()\n new_trainable.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore_from_object(checkpoint)\n new_trainable2.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore_from_object(checkpoint)\n result = new_trainable2.train()\n new_trainable2.stop()\n self.assertTrue(result[TRAINING_ITERATION] == 3)\n\n def testReuseNullCheckpoint(self):\n def train(config, checkpoint_dir=None):\n assert not checkpoint_dir\n for step in range(10):\n tune.report(test=step)\n\n # Create checkpoint\n wrapped = wrap_function(train)\n checkpoint = None\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n\n # Use the checkpoint a couple of times\n for i in range(3):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.restore(checkpoint)\n new_trainable.stop()\n\n # Make sure the result is still good\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.restore(checkpoint)\n result = new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n self.assertTrue(result[TRAINING_ITERATION] == 1)\n\n def testMultipleNullCheckpoints(self):\n def train(config, checkpoint_dir=None):\n assert not checkpoint_dir\n for step in range(10):\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n checkpoint = None\n for i in range(5):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n if checkpoint:\n new_trainable.restore(checkpoint)\n result = new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n self.assertTrue(result[TRAINING_ITERATION] == 1)\n\n def testMultipleNullMemoryCheckpoints(self):\n def train(config, checkpoint_dir=None):\n assert not checkpoint_dir\n for step in range(10):\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n checkpoint = None\n for i in range(5):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n if checkpoint:\n new_trainable.restore_from_object(checkpoint)\n result = new_trainable.train()\n checkpoint = new_trainable.save_to_object()\n new_trainable.stop()\n assert result[TRAINING_ITERATION] == 1\n\n def testFunctionNoCheckpointing(self):\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n assert os.path.exists(checkpoint_dir)\n for step in range(10):\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n\n new_trainable = wrapped(logger_creator=self.logger_creator)\n result = new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore(checkpoint)\n result = new_trainable2.train()\n self.assertEquals(result[TRAINING_ITERATION], 1)\n checkpoint = new_trainable2.save()\n new_trainable2.stop()\n\n def testFunctionRecurringSave(self):\n \"\"\"This tests that save and restore are commutative.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n assert os.path.exists(checkpoint_dir)\n for step in range(10):\n if step % 3 == 0:\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n with open(path, \"w\") as f:\n f.write(json.dumps({\"step\": step}))\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.train()\n checkpoint_obj = new_trainable.save_to_object()\n new_trainable.restore_from_object(checkpoint_obj)\n checkpoint = new_trainable.save()\n\n new_trainable.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore(checkpoint)\n new_trainable2.train()\n new_trainable2.stop()\n\n def testFunctionImmediateSave(self):\n \"\"\"This tests that save and restore are commutative.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n assert os.path.exists(checkpoint_dir)\n for step in range(10):\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n print(checkpoint_dir)\n path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(step))\n open(path, \"w\").close()\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.train()\n new_trainable.train()\n checkpoint_obj = new_trainable.save_to_object()\n new_trainable.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore_from_object(checkpoint_obj)\n checkpoint_obj = new_trainable2.save_to_object()\n new_trainable2.train()\n result = new_trainable2.train()\n assert sum(\"tmp\" in path for path in os.listdir(self.logdir)) == 1\n new_trainable2.stop()\n assert sum(\"tmp\" in path for path in os.listdir(self.logdir)) == 0\n assert result[TRAINING_ITERATION] == 4\n\n\nclass FunctionApiTest(unittest.TestCase):\n def setUp(self):\n ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024)\n\n def tearDown(self):\n ray.shutdown()\n _register_all() # re-register the evicted objects\n\n def testCheckpointError(self):\n def train(config, checkpoint_dir=False):\n pass\n\n with self.assertRaises(ValueError):\n tune.run(train, checkpoint_freq=1)\n with self.assertRaises(ValueError):\n tune.run(train, checkpoint_at_end=True)\n\n def testCheckpointFunctionAtEnd(self):\n def train(config, checkpoint_dir=False):\n for i in range(10):\n tune.report(test=i)\n with tune.checkpoint_dir(step=10) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(\"hello\")\n\n [trial] = tune.run(train).trials\n assert os.path.exists(os.path.join(trial.checkpoint.value, \"ckpt.log\"))\n\n def testCheckpointFunctionAtEndContext(self):\n def train(config, checkpoint_dir=False):\n for i in range(10):\n tune.report(test=i)\n with tune.checkpoint_dir(step=10) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(\"hello\")\n\n [trial] = tune.run(train).trials\n assert os.path.exists(os.path.join(trial.checkpoint.value, \"ckpt.log\"))\n\n def testVariousCheckpointFunctionAtEnd(self):\n def train(config, checkpoint_dir=False):\n for i in range(10):\n with tune.checkpoint_dir(step=i) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(\"hello\")\n tune.report(test=i)\n with tune.checkpoint_dir(step=i) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log2\")\n with open(checkpoint_path, \"w\") as f:\n f.write(\"goodbye\")\n\n [trial] = tune.run(train, keep_checkpoints_num=3).trials\n assert os.path.exists(\n os.path.join(trial.checkpoint.value, \"ckpt.log2\"))\n\n def testReuseCheckpoint(self):\n def train(config, checkpoint_dir=None):\n itr = 0\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"ckpt.log\"), \"r\") as f:\n itr = int(f.read()) + 1\n\n for i in range(itr, config[\"max_iter\"]):\n with tune.checkpoint_dir(step=i) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(str(i))\n tune.report(test=i, training_iteration=i)\n\n [trial] = tune.run(\n train,\n config={\n \"max_iter\": 5\n },\n ).trials\n last_ckpt = trial.checkpoint.value\n assert os.path.exists(os.path.join(trial.checkpoint.value, \"ckpt.log\"))\n analysis = tune.run(train, config={\"max_iter\": 10}, restore=last_ckpt)\n trial_dfs = list(analysis.trial_dataframes.values())\n assert len(trial_dfs[0][\"training_iteration\"]) == 5\n\n def testRetry(self):\n def train(config, checkpoint_dir=None):\n restored = bool(checkpoint_dir)\n itr = 0\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"ckpt.log\"), \"r\") as f:\n itr = int(f.read()) + 1\n\n for i in range(itr, 10):\n if i == 5 and not restored:\n raise Exception(\"try to fail me\")\n with tune.checkpoint_dir(step=i) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(str(i))\n tune.report(test=i, training_iteration=i)\n\n analysis = tune.run(train, max_failures=3)\n last_ckpt = analysis.trials[0].checkpoint.value\n assert os.path.exists(os.path.join(last_ckpt, \"ckpt.log\"))\n trial_dfs = list(analysis.trial_dataframes.values())\n assert len(trial_dfs[0][\"training_iteration\"]) == 10\n\n def testEnabled(self):\n def train(config, checkpoint_dir=None):\n is_active = tune.is_session_enabled()\n if is_active:\n tune.report(active=is_active)\n return is_active\n\n assert train({}) is False\n analysis = tune.run(train)\n t = analysis.trials[0]\n assert t.last_result[\"active\"]\n\n def testBlankCheckpoint(self):\n def train(config, checkpoint_dir=None):\n restored = bool(checkpoint_dir)\n itr = 0\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"ckpt.log\"), \"r\") as f:\n itr = int(f.read()) + 1\n\n for i in range(itr, 10):\n if i == 5 and not restored:\n raise Exception(\"try to fail me\")\n with tune.checkpoint_dir(step=itr) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(str(i))\n tune.report(test=i, training_iteration=i)\n\n analysis = tune.run(train, max_failures=3)\n trial_dfs = list(analysis.trial_dataframes.values())\n assert len(trial_dfs[0][\"training_iteration\"]) == 10\n\n def testWithParameters(self):\n class Data:\n def __init__(self):\n self.data = [0] * 500_000\n\n data = Data()\n data.data[100] = 1\n\n def train(config, data=None):\n data.data[101] = 2 # Changes are local\n tune.report(metric=len(data.data), hundred=data.data[100])\n\n trial_1, trial_2 = tune.run(\n with_parameters(train, data=data), num_samples=2).trials\n\n self.assertEquals(data.data[101], 0)\n self.assertEquals(trial_1.last_result[\"metric\"], 500_000)\n self.assertEquals(trial_1.last_result[\"hundred\"], 1)\n self.assertEquals(trial_2.last_result[\"metric\"], 500_000)\n self.assertEquals(trial_2.last_result[\"hundred\"], 1)\n self.assertTrue(str(trial_1).startswith(\"train_\"))\n\n # With checkpoint dir parameter\n def train(config, checkpoint_dir=\"DIR\", data=None):\n data.data[101] = 2 # Changes are local\n tune.report(metric=len(data.data), cp=checkpoint_dir)\n\n trial_1, trial_2 = tune.run(\n with_parameters(train, data=data), num_samples=2).trials\n\n self.assertEquals(data.data[101], 0)\n self.assertEquals(trial_1.last_result[\"metric\"], 500_000)\n self.assertEquals(trial_1.last_result[\"cp\"], \"DIR\")\n self.assertEquals(trial_2.last_result[\"metric\"], 500_000)\n self.assertEquals(trial_2.last_result[\"cp\"], \"DIR\")\n self.assertTrue(str(trial_1).startswith(\"train_\"))\n\n def testWithParameters2(self):\n class Data:\n def __init__(self):\n import numpy as np\n self.data = np.random.rand((2 * 1024 * 1024))\n\n def train(config, data=None):\n tune.report(metric=len(data.data))\n\n trainable = tune.with_parameters(train, data=Data())\n dumped = cloudpickle.dumps(trainable)\n assert sys.getsizeof(dumped) < 100 * 1024\n\n def testReturnAnonymous(self):\n def train(config):\n return config[\"a\"]\n\n trial_1, trial_2 = tune.run(\n train, config={\n \"a\": tune.grid_search([4, 8])\n }).trials\n\n self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4)\n self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8)\n\n def testReturnSpecific(self):\n def train(config):\n return {\"m\": config[\"a\"]}\n\n trial_1, trial_2 = tune.run(\n train, config={\n \"a\": tune.grid_search([4, 8])\n }).trials\n\n self.assertEquals(trial_1.last_result[\"m\"], 4)\n self.assertEquals(trial_2.last_result[\"m\"], 8)\n\n def testYieldAnonymous(self):\n def train(config):\n for i in range(10):\n yield config[\"a\"] + i\n\n trial_1, trial_2 = tune.run(\n train, config={\n \"a\": tune.grid_search([4, 8])\n }).trials\n\n self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4 + 9)\n self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8 + 9)\n\n def testYieldSpecific(self):\n def train(config):\n for i in range(10):\n yield {\"m\": config[\"a\"] + i}\n\n trial_1, trial_2 = tune.run(\n train, config={\n \"a\": tune.grid_search([4, 8])\n }).trials\n\n self.assertEquals(trial_1.last_result[\"m\"], 4 + 9)\n self.assertEquals(trial_2.last_result[\"m\"], 8 + 9)\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
takuto0831/Competition-utils | [
"c738e199c6a771a0c58b9cd237660bb76b4be4fb"
] | [
"pyscript/torch/utils.py"
] | [
"import os\nimport random\nimport subprocess\nimport numpy as np\nimport torch\nimport time\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n XLA = True\nexcept ModuleNotFoundError:\n XLA = False\n\n\ndef freeze_module(module):\n for i, param in enumerate(module.parameters()):\n param.requires_grad = False\n\n\ndef fit_state_dict(state_dict, model):\n '''\n Ignore size mismatch when loading state_dict\n '''\n for name, param in model.named_parameters():\n new_param = state_dict[name]\n if new_param.size() != param.size():\n print(f'Size mismatch in {name}: {new_param.shape} -> {param.shape}')\n state_dict.pop(name)\n\n\ndef get_device(arg):\n if isinstance(arg, torch.device) or \\\n (XLA and isinstance(arg, xm.xla_device)):\n device = arg\n elif arg is None or isinstance(arg, (list, tuple)):\n if XLA:\n device = xm.xla_device()\n else:\n device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu')\n elif isinstance(arg, str):\n if arg == 'xla' and XLA:\n device = xm.xla_device()\n else:\n device = torch.device(arg)\n \n if isinstance(arg, (list, tuple)):\n if isinstance(arg[0], int):\n device_ids = list(arg)\n elif isinstance(arg[0], str) and arg[0].isnumeric():\n device_ids = [ int(a) for a in arg ]\n else:\n raise ValueError(f'Invalid device: {arg}')\n else:\n if device.type == 'cuda':\n assert torch.cuda.is_available()\n if device.index is None:\n device_count = torch.cuda.device_count()\n if device_count > 1:\n device_ids = list(range(device_count))\n else:\n device_ids = [0]\n else:\n device_ids = [device.index]\n else:\n device_ids = [device.index]\n \n return device, device_ids\n\n\ndef seed_everything(random_state=0, deterministic=False):\n random.seed(random_state)\n os.environ['PYTHONHASHSEED'] = str(random_state)\n np.random.seed(random_state)\n torch.manual_seed(random_state)\n torch.cuda.manual_seed(random_state)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n else:\n torch.backends.cudnn.deterministic = False\n\n\ndef get_gpu_memory():\n \"\"\"\n Code borrowed from: \n https://discuss.pytorch.org/t/access-gpu-memory-usage-in-pytorch/3192/4\n\n Get the current gpu usage.\n\n Returns\n -------\n usage: dict\n Keys are device ids as integers.\n Values are memory usage as integers in MB.\n \"\"\"\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n return gpu_memory_map\n\n\ndef get_time(time_format='%H:%M:%S'):\n return time.strftime(time_format, time.localtime())\n"
] | [
[
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.device_count"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mwcvitkovic/Supervised-Learning-on-Relational-Databases-with-GNNs | [
"57195ccab62d23dcbcac1a317f8a9811a9fd6cb5"
] | [
"models/GNN/GIN.py"
] | [
"from dgl import BatchedDGLGraph\nfrom dgl.nn.pytorch.conv import GINConv\nfrom torch import nn\n\nfrom models.GNN.GNNModelBase import GNNModelBase\nfrom models.utils import TypeConditionalLinear\n\n\nclass GIN(GNNModelBase):\n \"\"\"\n Graph Isomorphism Network as described in https://arxiv.org/pdf/1810.00826.pdf\n \"\"\"\n\n def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):\n super().__init__(**kwargs)\n self.layers = nn.ModuleList()\n for _ in range(self.n_layers):\n apply_func_layers = sum(\n [[nn.Linear(self.hidden_dim, self.hidden_dim),\n self.get_act(),\n self.get_norm(self.hidden_dim),\n nn.Dropout(self.p_dropout)] for _ in\n range(n_apply_func_layers)],\n [])\n apply_func = nn.Sequential(*apply_func_layers)\n self.layers.append(GINConv(apply_func=apply_func,\n aggregator_type=aggregator_type,\n init_eps=init_eps,\n learn_eps=learn_eps))\n\n def gnn_forward(self, g: BatchedDGLGraph):\n feats = g.ndata['h']\n for layer in self.layers:\n feats = layer(graph=g, feat=feats)\n readout = self.readout(g, feats)\n out = self.fcout(readout)\n return out\n\n\nclass RelationalGIN(GNNModelBase):\n \"\"\"\n Version of GIN that passes edge-type-conditional messages\n \"\"\"\n\n def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):\n super().__init__(**kwargs)\n self.n_relations = 2 * len(\n self.db_info['edge_type_to_int']) - 1 # there are negative edge types for the reverse edges\n self.layers = nn.ModuleList()\n for _ in range(self.n_layers):\n apply_func_layers = sum(\n [[nn.Linear(self.hidden_dim, self.hidden_dim),\n self.get_act(),\n self.get_norm(self.hidden_dim),\n nn.Dropout(self.p_dropout)] for _ in\n range(n_apply_func_layers)],\n [])\n apply_func = nn.Sequential(*apply_func_layers)\n self.layers.append(RelationalGINConv(apply_func=apply_func,\n activation=self.get_act(),\n aggregator_type=aggregator_type,\n hidden_dim=self.hidden_dim,\n init_eps=init_eps,\n learn_eps=learn_eps,\n num_rels=self.n_relations))\n\n def gnn_forward(self, g: BatchedDGLGraph):\n feats = g.ndata['h']\n etypes = g.edata['edge_types'] + self.n_relations // 2\n for layer in self.layers:\n feats = layer(graph=g, feat=feats, etypes=etypes)\n readout = self.readout(g, feats)\n out = self.fcout(readout)\n return out\n\n\nclass RelationalGINConv(GINConv):\n def __init__(self, apply_func, activation, aggregator_type, hidden_dim, init_eps=0, learn_eps=False, num_rels=0):\n super().__init__(apply_func, aggregator_type, init_eps, learn_eps)\n self.num_rels = num_rels\n self.act = activation\n self.edge_message_layer = TypeConditionalLinear(hidden_dim, hidden_dim, num_rels)\n\n def message_func(self, edges):\n msg = edges.src['h']\n msg = self.edge_message_layer(msg, edges.data['type'])\n msg = self.act(msg)\n return {'msg': msg}\n\n def forward(self, graph, feat, etypes):\n graph = graph.local_var()\n graph.ndata['h'] = feat\n graph.edata['type'] = etypes\n graph.update_all(self.message_func, self._reducer('msg', 'neigh'))\n rst = (1 + self.eps) * feat + graph.ndata['neigh']\n if self.apply_func is not None:\n rst = self.apply_func(rst)\n return rst\n\n\nclass ERGIN(RelationalGIN):\n \"\"\"\n GIN using different linear mappings for each node and edge type\n \"\"\"\n\n def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):\n super().__init__(n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs)\n self.n_node_types = len(self.db_info['node_type_to_int'])\n self.act = self.get_act()\n self.layers = nn.ModuleList()\n self.apply_func_blocks = nn.ModuleList()\n for _ in range(self.n_layers):\n self.layers.append(RelationalGINConv(apply_func=None,\n activation=self.get_act(),\n aggregator_type=aggregator_type,\n hidden_dim=self.hidden_dim,\n init_eps=init_eps,\n learn_eps=learn_eps,\n num_rels=self.n_relations))\n self.apply_func_blocks.append(\n nn.ModuleList([nn.ModuleDict({'tcl': TypeConditionalLinear(self.hidden_dim,\n self.hidden_dim,\n self.n_node_types),\n 'act': self.get_act(),\n 'norm': self.get_norm(self.hidden_dim),\n 'do': nn.Dropout(self.p_dropout)\n })\n for _ in range(n_apply_func_layers)])\n )\n\n def gnn_forward(self, g: BatchedDGLGraph):\n feats = g.ndata['h']\n ntypes = g.ndata['node_types']\n etypes = g.edata['edge_types'] + self.n_relations // 2\n for layer, apply_func_blocks in zip(self.layers, self.apply_func_blocks):\n feats = layer(graph=g, feat=feats, etypes=etypes)\n for block in apply_func_blocks:\n feats = block['tcl'](feats, ntypes)\n feats = block['act'](feats)\n feats = block['norm'](feats)\n feats = block['do'](feats)\n readout = self.readout(g, feats)\n out = self.fcout(readout)\n return out\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.ModuleList",
"torch.nn.Dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jlvdb/the-wizz | [
"21e88888472d2598a0db861aef31076078628b8e",
"21e88888472d2598a0db861aef31076078628b8e",
"21e88888472d2598a0db861aef31076078628b8e"
] | [
"pdf_maker.py",
"the_wizz/pdf_maker_utils.py",
"the_wizz/kdtree_utils.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"This code is the main access point for the majority of users of The-wiZZ. It\ntakes an input subselection of a survey catalog, a The-wiZZ HDF5 data file, and\nmatches the two together to create a resultant clustering redshift estimate\nthat can then be turned into a redshift PDF. This code also takes care of any\nweighting of the objects with unknown redshift, redshift binning, bootstrapping\nerrors, and output. See input_flags.py for a list of options or use --help from\nthe command line.\n\"\"\"\n\nimport numpy as np\n\nfrom the_wizz import core_utils\nfrom the_wizz import pdf_maker_utils\nfrom the_wizz import input_flags\n\n\nif __name__ == \"__main__\":\n print(\"\")\n print(\"The-wiZZ has begun conjuring: running pdf maker...\")\n # First we parse the command line for arguments as usual. See\n # input_flags.py for a full list of input arguments.\n args = input_flags.parse_input_pdf_args()\n input_flags.print_args(args)\n # Load the file containing all matched pairs of spectroscopic and\n # photometric objects.\n print(\"Loading unknown data...\")\n unknown_data = core_utils.file_checker_loader(args.unknown_sample_file)\n # Now we figure out what kind of redshift binning we would like to have.\n # This will be one of the largest impacts on the signal to noise of the\n # measurement. Some rules of thumb are:\n # The narrower bins are in redshift the better. You are measuring a\n # correlation, the narrower the bin size in comoving distance the more\n # correlated things will be and thus increase the amplitude. Aka use\n # Groth/Pebbles[sic] scaling to your advantage.\n # For a spectroscopic sample that is selected for a specific redshift\n # range with few galaxies outside that range (eg DEEP2), adaptive binning\n # is recommended. This will keep a equal number spectra per redshift bin.\n # A good rule is to try to have about 100 spectra per redshift bin for max\n # signal to noise.\n # Linear binning is provided as a curtesy and is not nesassarly\n # recommended. It will not give the best signal to noise compared to\n # adaptive and has the same draw backs as adaptive is that the bias could\n # be changing oddly from bin to bin. It is recommended that the user try\n # adaptive and comoving spaced bins for the best results. Comoving returns\n # bins that are of equal comoving distance from the line of sight. We also\n # provide binning in equal ln(1 + z). This is for people who want a\n # comoving like binning but without the dependece on cosmology. It also\n # has the convienent property of giving errors that can be more easlily\n # compared the usual simga/(1 + z) error.\n print(\"Creating bins...\")\n if args.z_binning_type[0] == 'linear':\n z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins)\n elif args.z_binning_type[0] == 'adaptive':\n z_bin_edge_array = pdf_maker_utils._create_adaptive_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins,\n pdf_maker.reference_redshift_array)\n elif args.z_binning_type[0] == 'comoving':\n z_bin_edge_array = pdf_maker_utils._create_comoving_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins)\n elif args.z_binning_type[0] == 'logspace':\n z_bin_edge_array = pdf_maker_utils._create_logspace_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins)\n elif args.z_binning_type[0] == 'file':\n z_bin_edge_array = np.loadtxt(args.z_binning_type[1])[:-1]\n else:\n print(\"Requested binning name invalid. Valid types are:\")\n print(\"\\tlinear: linear binning in redshift\")\n print(\"\\tadaptive: constant reference objects per redshift bin\")\n print(\"\\tcomoving: linear binning in comoving distance\")\n print(\"\\tfile: file providing the bin edges\")\n print(\"Returning linear binning...\")\n z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins)\n # This is where the heavy lifting happens. We create our PDF maker object\n # which will hold the pair file for use, calculate the over density per\n # redshift bin, and also store intermediary results for later use.\n # Before we can estimate the PDF, we must mask for the objects we want\n # to estimate the redshit of. These objects can be color selected,\n # photo-z selected, or any other object selection you would like. The code\n # line below turns the array of indices in the hdf5 pair file, into a\n # single density estimate around the reference object.\n print(\"Starting indices matcher...\")\n pdf_maker = pdf_maker_utils.collapse_ids_to_single_estimate(\n args.input_pair_hdf5_file, args.pair_scale_name, unknown_data, args)\n # Before we calculated the pdfs, we want to know what the over densities\n # are in each of the regions calculated on the area we consider.\n print(\"Calculating region densities...\")\n pdf_maker.compute_region_densities(z_bin_edge_array, args.z_max)\n if args.output_region_pickle_file is not None:\n pdf_maker.write_region_densities(args.output_region_pickle_file, args)\n # Now that we've \"collapsed\" the estimate around the reference object we\n # need to bin up the results in redshift and create our final PDF.\n print(\"Calculating pdf...\")\n if args.bootstrap_samples is None:\n pdf_maker.compute_pdf_bootstrap(args.n_bootstrap)\n else:\n bootstrap_region_array = np.loadtxt(args.bootstrap_samples,\n dtype=np.int_)\n pdf_maker._compute_pdf_bootstrap(bootstrap_region_array)\n # Write individual bootstraps to file.\n if args.output_bootstraps_file is not None:\n pdf_maker.write_bootstrap_samples_to_ascii(args.output_bootstraps_file,\n args)\n # Now that we have the results. We just need to write them to file and we\n # are done.\n print(\"Writing...\")\n output_file = core_utils.create_ascii_file(args.output_pdf_file_name,\n args)\n pdf_maker.write_pdf_to_ascii(output_file)\n output_file.close()\n print(\"Done!\")\n",
"\n\"\"\"Utility functions for collapsing the id arrays stored in a the-wizz HDF5\nfile into a clustering redshift estimate given an input sample.\n\"\"\"\n\nimport pickle\n\nimport h5py\nfrom multiprocessing import Pool\nimport numpy as np\nfrom scipy.interpolate import InterpolatedUnivariateSpline as iu_spline\n\nfrom the_wizz import core_utils\n\n\ndef _create_linear_redshift_bin_edges(z_min, z_max, n_bins):\n \"\"\"Simple utility for computing redshift bins that are linearly spaced in\n redshift. Not recommened for use if your concern is maximum signal to\n noise.\n --------------------------------------------------------------------------\n args:\n z_min: float, minimum redshift to bin from\n z_max: float, maximum redshift to bin to\n n_bins: int, number of bins\n returns:\n numpy.array of type float and shape (n_bins,) containing the lower bin\n edges. The n_bin + 1 edge is equal to z_max.\n \"\"\"\n return np.linspace(z_min, z_max, n_bins + 1)[:-1]\n\n\ndef _create_adaptive_redshift_bin_edges(z_min, z_max, n_bins, redshift_array):\n \"\"\"Simple utility for computing redshift bins that delivers a consistent\n number of spectroscopic objects per bin.\n ----------------------------------------------------------------------------\n args:\n z_min: float, minimum redshift to bin from\n z_max: float, maximum redshift to bin to\n n_bins: int, number of bins\n redshift_array: numpy.array float of the spectroscopic redshifts.\n returns:\n numpy.array of type float and shape (n_bins,) containing the lower bin\n edges. The n_bin + 1 edge is equal to z_max.\n \"\"\"\n useable_z_array = redshift_array[np.logical_and(redshift_array >= z_min,\n redshift_array < z_max)]\n useable_z_array.sort()\n return useable_z_array[np.arange(0, useable_z_array.shape[0],\n useable_z_array.shape[0]/n_bins,\n dtype=np.int_)]\n\n\ndef _create_logspace_redshift_bin_edges(z_min, z_max, n_bins):\n \"\"\"Simple utility for computing redshift bins that are equally spaced in\n comoving, line of sight distance. This creates bins that have a smoother\n bias versus redshift.\n ----------------------------------------------------------------------------\n args:\n z_min: float, minimum redshift to bin from\n z_max: float, maximum redshift to bin to\n n_bins: int, number of bins\n returns:\n numpy.array of type float and shape (n_bins,) containing the lower bin\n edges. The n_bin + 1 edge is equal to z_max.\n \"\"\"\n log_min = np.log(1 + z_min)\n log_max = np.log(1 + z_max)\n return np.exp(np.linspace(log_min, log_max, n_bins + 1)[:-1]) - 1.0\n\n\ndef _create_comoving_redshift_bin_edges(z_min, z_max, n_bins):\n \"\"\"Simple utility for computing redshift bins that are equally spaced in\n comoving, line of sight distance. This creates bins that have a smoother\n bias versus redshift.\n ----------------------------------------------------------------------------\n args:\n z_min: float, minimum redshift to bin from\n z_max: float, maximum redshift to bin to\n n_bins: int, number of bins\n returns:\n numpy.array of type float and shape (n_bins,) containing the lower bin\n edges. The n_bin + 1 edge is equal to z_max.\n \"\"\"\n comov_min = core_utils.WMAP5.comoving_distance(z_min).value\n comov_max = core_utils.WMAP5.comoving_distance(z_max).value\n\n comov_dist_to_redsihft_spline = _make_redshift_spline(z_min, z_max)\n\n return comov_dist_to_redsihft_spline(\n np.linspace(comov_min, comov_max, n_bins + 1)[:-1])\n\n\ndef _make_redshift_spline(z_min, z_max):\n \"\"\"Utility function for creating a spline for comoving distance to\n redshift.\n \"\"\"\n redshift_array = np.linspace(\n np.min((z_min - 1e-8, 0.0)), z_max + 1e-8, 1000)\n comov_array = core_utils.WMAP5.comoving_distance(redshift_array)\n comov_dist_to_redshift_spline = iu_spline(comov_array, redshift_array)\n return comov_dist_to_redshift_spline\n\n\ndef collapse_ids_to_single_estimate(hdf5_data_file_name, scale_name,\n unknown_data, args):\n \"\"\"This is the heart of the-wizz. It enables the matching of a set of\n catalog ids to the ids stored as pairs to the spectroscopic objects. The\n result of this calculation is a intermediary data product containing the\n density of unknown objects around each reference object stored in the\n PDFMaker data structure class.\n --------------------------------------------------------------------------\n Args:\n hdf5_pairs_group: hdf5 group object containing the pair ids for a fixed\n annulus.\n unknown_data: open fits data containing object ids and relivent weights\n args: ArgumentParser.parse_args object returned from\n input_flags.parse_input_pdf_args\n Returns:\n a pdf_maker class object\n \"\"\"\n # First we load the the ids from the input fits data using the columns\n # names the user has provided and scale the randoms to the correct ammount.\n # Object ids are also sorted in increasing id for later binary search.\n\n open_hdf5_file = core_utils.file_checker_loader(hdf5_data_file_name)\n hdf5_data_grp = open_hdf5_file['data']\n\n # Prime our output array.\n n_reference = len(hdf5_data_grp)\n reference_unknown_array = np.empty(n_reference, dtype=np.float32)\n\n key_array = list(hdf5_data_grp.keys())\n pdf_maker_obj = PDFMaker(key_array, args)\n\n # Initialize the workers.\n loader_pool = Pool(1)\n matcher_pool = Pool(np.min((args.n_processes - 1, 1)))\n\n print(\"\\tPre-loading reference data...\")\n loader_result = loader_pool.imap(\n _load_pair_data,\n [(args.input_pair_hdf5_file, scale_name,\n key_array[start_idx:start_idx + args.n_reference_load_size])\n for start_idx in range(0, len(key_array),\n args.n_reference_load_size)])\n\n print(\"\\tPre-loading unknown data...\")\n id_array, rand_ratio, weight_array, ave_weight = \\\n _compute_region_densities_and_weights(\n unknown_data, hdf5_data_grp, args)\n\n # Close the hdf5 file\n open_hdf5_file.close()\n\n print(\"\\tLoading reference data and starting matching loop...\")\n for loader_idx, pair_data in enumerate(loader_result):\n\n start_idx = loader_idx * args.n_reference_load_size\n end_idx = np.min(((loader_idx + 1) * args.n_reference_load_size,\n len(key_array)))\n\n print(\"\\t\\tmatching pairs: starting references %i-%i...\" %\n (start_idx, end_idx))\n\n if args.unknown_stomp_region_name is not None:\n matcher_pool_iter = matcher_pool.imap(\n _collapse_multiplex,\n [(data_set,\n id_array[data_set['region']],\n weight_array[data_set['region']],\n args.use_inverse_weighting)\n for pair_idx, data_set in enumerate(pair_data)],\n chunksize=np.int(np.where(args.n_processes > 1,\n np.sqrt(len(pair_data)), 1)))\n else:\n matcher_pool_iter = matcher_pool.imap(\n _collapse_multiplex,\n [(data_set, id_array, weight_array,\n args.use_inverse_weighting)\n for pair_idx, data_set in enumerate(pair_data)],\n chunksize=np.int(np.where(args.n_processes > 1,\n np.sqrt(len(pair_data)), 1)))\n\n print('\\t\\tStoring reference data...')\n for ref_idx, ref_data in zip(range(start_idx, end_idx), pair_data):\n pdf_maker_obj.sef_reference_obj_data(ref_idx, ref_data)\n\n print(\"\\t\\tComputing/storing pair count...\")\n for pair_idx, reference_value in enumerate(matcher_pool_iter):\n reference_unknown_array[start_idx + pair_idx] = reference_value\n # Clean up a bit.\n del pair_data\n del matcher_pool_iter\n print(\"\\t\\t\\tWaiting for next load...\")\n\n # Close the workers.\n del loader_result\n loader_pool.close()\n matcher_pool.close()\n loader_pool.join()\n matcher_pool.join()\n # Store the results in our PDFMaker class object.\n pdf_maker_obj.initialize_regions_and_densities()\n pdf_maker_obj.set_reference_unknown_array(reference_unknown_array)\n pdf_maker_obj.scale_random_points(rand_ratio, ave_weight)\n\n return pdf_maker_obj\n\n\ndef _compute_region_densities_and_weights(unknown_data, hdf5_data_grp, args):\n \"\"\" Function for grabbing the ids and catalog weights from the unknown data\n and putting them into a format for use in pdf_maker. Also computes the\n scaling for the random points per region.\n \"\"\"\n\n if args.unknown_weight_name is not None:\n unknown_data = unknown_data[\n unknown_data[args.unknown_weight_name] != 0]\n id_array = unknown_data[args.unknown_index_name]\n id_args_array = id_array.argsort()\n id_array = id_array[id_args_array]\n rand_ratio = (unknown_data.shape[0] /\n hdf5_data_grp.attrs['n_random_points'])\n # If the user specifies the name of a STOMP region column, break up the\n # data into the individual regions.\n if args.unknown_stomp_region_name is not None:\n id_array = [id_array[\n unknown_data[args.unknown_stomp_region_name][id_args_array] ==\n reg_idx]\n for reg_idx in range(hdf5_data_grp.attrs['n_region'])]\n tmp_n_region = np.array(\n [id_array[reg_idx].shape[0]\n for reg_idx in range(hdf5_data_grp.attrs['n_region'])],\n dtype=np.int_)\n rand_ratio = (\n (tmp_n_region / hdf5_data_grp.attrs['n_random_points']) *\n (hdf5_data_grp.attrs['area'] /\n hdf5_data_grp.attrs['region_area']))\n # Now that we loaded the ids we can load unknown object weights in the same\n # way.\n ave_weight = 1.0\n weight_array = np.ones(unknown_data.shape[0], dtype=np.float32)\n if args.unknown_weight_name is not None:\n weight_array = unknown_data[args.unknown_weight_name][id_args_array]\n ave_weight = np.mean(weight_array)\n if args.unknown_stomp_region_name is not None:\n weight_array = [weight_array[\n unknown_data[args.unknown_stomp_region_name][id_args_array] ==\n reg_idx]\n for reg_idx in range(hdf5_data_grp.attrs['n_region'])]\n ave_weight = np.array(\n [weight_array[reg_idx].mean()\n for reg_idx in range(hdf5_data_grp.attrs['n_region'])],\n dtype=np.float_)\n\n return id_array, rand_ratio, weight_array, ave_weight\n\n\ndef _load_pair_data(input_tuple):\n \"\"\"Functions for loading individual reference objects from the HDF5 file.\n ----------------------------------------------------------------------------\n Args:\n hdf5_group: an h5py group object\n key_start: int index of starting object to load\n n_load: int number of objects to load\n Returns:\n list of numpy arrays\n \"\"\"\n (hdf5_file_name, scale_name, key_list) = input_tuple\n open_hdf5_file = h5py.File(hdf5_file_name)\n output_list = []\n for key in key_list:\n ref_data_grp = open_hdf5_file['data/%s' % key]\n scale_grp = ref_data_grp[scale_name]\n output_list.append(\n {'redshift': ref_data_grp.attrs['redshift'],\n 'region': ref_data_grp.attrs['region'],\n 'area': scale_grp.attrs['area'],\n 'ref_ref_n_points': scale_grp.attrs['ref_ref_n_points'],\n 'bin_resolution': scale_grp.attrs['bin_resolution'],\n 'rand_dist_weight': scale_grp.attrs['rand_dist_weight'],\n 'n_random': scale_grp.attrs['n_random'],\n 'ids': scale_grp['ids'][...],\n 'dist_weights': scale_grp['dist_weights'][...]},)\n del ref_data_grp\n del scale_grp\n open_hdf5_file.close()\n del open_hdf5_file\n\n return output_list\n\n\ndef _collapse_multiplex(input_tuple):\n \"\"\"Function for matching indices and calculating the over densities of a\n specific set of unknown objects around the reference object. This specific\n function is meant to be used within the context of python multiprocessing.\n ----------------------------------------------------------------------------\n Args:\n input_tuple: tuple of arrays and values specifying the current data to\n consider for the reference object.\n Returns:\n float n_points\n \"\"\"\n (data_set, id_array, weight_array,\n use_inverse_weighting) = input_tuple\n\n id_data_set = data_set['ids']\n dist_weight_data_set = data_set['dist_weights']\n if id_data_set.shape[0] == 0 or id_array.shape[0] == 0:\n return 0.0\n # Since the ids around the reference are partially localized spatially a\n # cut in id is also a cut spatially. Here we take advantage of this.\n start_idx = np.searchsorted(id_array, id_data_set[0])\n end_idx = np.searchsorted(id_array, id_data_set[-1], side='right')\n if start_idx == end_idx:\n return 0.0\n if start_idx < 0:\n start_idx = 0\n if end_idx > id_array.shape[0]:\n end_idx = id_array.shape[0]\n sub_id_array = id_array[start_idx:end_idx]\n sub_weight_array = weight_array[start_idx:end_idx]\n\n # We can trim a bit of fat by doing the same for the data set.\n start_idx = np.searchsorted(id_data_set, sub_id_array[0])\n end_idx = np.searchsorted(id_data_set, sub_id_array[-1], side='right')\n if start_idx == end_idx:\n return 0.0\n if start_idx < 0:\n start_idx = 0\n if end_idx > id_data_set.shape[0]:\n end_idx = id_data_set.shape[0]\n sub_id_data_set = id_data_set[start_idx:end_idx]\n sub_dist_weight_data_set = dist_weight_data_set[start_idx:end_idx]\n\n tmp_n_points = 0.0\n # We test to see which array is longer, the hdf5 id array or our unknown\n # array. We loop over the shorter of the two. This can yield a significant\n # speed boost for small scales or sparse samples.\n if sub_id_array.shape[0] <= sub_id_data_set.shape[0]:\n sort_idx_array = np.searchsorted(\n sub_id_data_set, sub_id_array)\n sort_idx_mask = np.where(np.logical_and(\n sort_idx_array < sub_id_data_set.shape[0],\n sort_idx_array >= 0))\n if use_inverse_weighting:\n tmp_n_points += np.where(\n np.equal(sub_id_data_set[sort_idx_array[sort_idx_mask]],\n sub_id_array[sort_idx_mask]),\n sub_dist_weight_data_set[sort_idx_array[sort_idx_mask]] *\n sub_weight_array[sort_idx_mask], 0.0).sum()\n else:\n tmp_n_points += np.where(\n np.equal(sub_id_data_set[sort_idx_array[sort_idx_mask]],\n sub_id_array[sort_idx_mask]),\n sub_weight_array[sort_idx_mask], 0.0).sum()\n else:\n sort_idx_array = np.searchsorted(\n sub_id_array, sub_id_data_set)\n sort_idx_mask = np.where(np.logical_and(\n sort_idx_array < sub_id_array.shape[0],\n sort_idx_array >= 0))\n if use_inverse_weighting:\n tmp_n_points += np.where(\n np.equal(sub_id_data_set[sort_idx_mask],\n sub_id_array[sort_idx_array[sort_idx_mask]]),\n sub_dist_weight_data_set[sort_idx_mask] *\n sub_weight_array[sort_idx_array[sort_idx_mask]], 0.0).sum()\n else:\n tmp_n_points += np.where(\n np.equal(sub_id_data_set[sort_idx_mask],\n sub_id_array[sort_idx_array[sort_idx_mask]]),\n sub_weight_array[sort_idx_array[sort_idx_mask]], 0.0).sum()\n return tmp_n_points\n\n\ndef collapse_full_sample(hdf5_pairs_grp, pdf_maker_obj, unknown_data, args):\n \"\"\"Convience function for collapsing the full sample of ids into a single\n estimate.\n ----------------------------------------------------------------------------\n Args:\n hdf5_pairs_group: hdf5 group object containing the pair ids for a fixed\n annulus.\n unknown_data: open fits data containing object ids and relivent weights\n args: ArgumentParser.parse_args object returned from\n input_flags.parse_input_pdf_args\n Returns:\n None\n \"\"\"\n print(\"\\tpre-loading unknown data...\")\n if args.unknown_weight_name is not None:\n unknown_data = unknown_data[\n unknown_data[args.unknown_weight_name] != 0]\n rand_ratio = (unknown_data.shape[0] /\n hdf5_pairs_grp.attrs['n_random_points'])\n if args.unknown_stomp_region_name is not None:\n tmp_n_region = np.array(\n [unknown_data[unknown_data[args.unknown_stomp_region_name] ==\n reg_idx].shape[0]\n for reg_idx in range(hdf5_pairs_grp.attrs['n_region'])],\n dtype=np.int_)\n rand_ratio = ((tmp_n_region /\n hdf5_pairs_grp.attrs['n_random_points']) *\n (hdf5_pairs_grp.attrs['area'] /\n hdf5_pairs_grp.attrs['region_area']))\n id_array = unknown_data[args.unknown_index_name]\n id_args_array = id_array.argsort()\n id_array = id_array[id_args_array]\n n_reference = len(hdf5_pairs_grp)\n reference_unknown_array = np.empty(n_reference, dtype=np.float32)\n print(\"\\t\\tcomputing/storing pair count...\")\n for reference_idx, key_name in enumerate(hdf5_pairs_grp.keys()):\n reference_grp = hdf5_pairs_group[\n '%s/%s' % (key_name, )]\n if args.use_inverse_weighting:\n reference_unknown_array[reference_idx] = np.sum(\n reference_grp['dist_weight'][...])\n else:\n reference_unknown_array[reference_idx] = (\n 1. * reference_grp['ids'][...]).shape[0]\n pdf_maker_obj.set_reference_unknown_array(reference_unknown_array)\n pdf_maker_obj.scale_random_points(rand_ratio, 1.0)\n\n return None\n\n\nclass PDFMaker(object):\n \"\"\"Main class for the heavy lifting of matching an array of object indices\n to the pair hdf5 data file, masking the used/un-used objects, summing the\n data into the spec-z bins, and outputting the posterier redshift\n distribution.\n \"\"\"\n def __init__(self, hdf5_data_keys, args):\n \"\"\"Init function for the PDF maker. The init class is a container for\n arrays of single point estimaties around reference, known redshift\n objects. The class also computes the estimates of clustering redshift\n recovery in spatial regions and the collapsed single PDF.\n ----------------------------------------------------------------------\n Args:\n hdf5_pair_group: HDF5 group object containing the reference object\n pairs\n args: ArgumentParser.parse_args object from\n input_flags.parse_input_pdf_args\n \"\"\"\n self.reference_idx_dict = {}\n for ref_idx, key in enumerate(hdf5_data_keys):\n self.reference_idx_dict[key] = ref_idx\n\n self.reference_redshift_array = np.empty(len(hdf5_data_keys),\n dtype=np.float32)\n self.reference_region_array = np.empty(len(hdf5_data_keys),\n dtype=np.uint32)\n self.reference_area_array = np.empty(len(hdf5_data_keys),\n dtype=np.float32)\n\n self.reference_unknown_array = np.empty(len(hdf5_data_keys),\n dtype=np.float32)\n self.reference_density_array = np.empty(len(hdf5_data_keys),\n dtype=np.float32)\n self.reference_hold_rand_array = np.empty(len(hdf5_data_keys),\n dtype=np.float32)\n self.reference_resolution_array = np.empty(len(hdf5_data_keys),\n dtype=np.uint32)\n self._use_reference_densities = args.use_reference_cleaning\n self._use_inverse_weighting = args.use_inverse_weighting\n self._reference_unknown_array_set = False\n self._computed_region_densities = False\n self._computed_pdf = False\n self._computed_bootstraps = False\n\n def reset_pairs(self):\n self.reference_unknown_array = np.zeros_like(\n self.reference_redshift_array)\n self.reference_rand_array = np.zeros_like(\n self.reference_redshift_array)\n self._reference_unknown_array_set = False\n self._computed_region_densities = False\n self._computed_pdf = False\n self._computed_bootstraps = False\n\n return None\n\n def sef_reference_obj_data(self, ref_idx, ref_data):\n\n self.reference_redshift_array[ref_idx] = (\n ref_data['redshift'])\n self.reference_region_array[ref_idx] = \\\n ref_data['region']\n\n self.reference_area_array[ref_idx] = ref_data['area']\n self.reference_density_array[ref_idx] = (\n ref_data['ref_ref_n_points'] /\n self.reference_area_array[ref_idx])\n self.reference_resolution_array[ref_idx] = (\n ref_data['bin_resolution'])\n if self._use_inverse_weighting:\n self.reference_hold_rand_array[ref_idx] = (\n ref_data['rand_dist_weight'])\n else:\n self.reference_hold_rand_array[ref_idx] = (\n ref_data['n_random'])\n\n return None\n\n def initialize_regions_and_densities(self):\n\n has_density_mask = np.logical_and(\n self.reference_density_array > 0,\n np.isfinite(self.reference_density_array))\n min_reference_density = \\\n self.reference_density_array[has_density_mask].min()\n self.reference_density_array[\n np.logical_not(has_density_mask)] = min_reference_density\n\n max_n_regions = self.reference_region_array.max() + 1\n region_list = []\n for region_idx in range(max_n_regions):\n if np.any(region_idx == self.reference_region_array):\n region_list.append(region_idx)\n self.region_array = np.array(region_list, dtype=np.uint32)\n self.region_dict = {}\n for array_idx, region_idx in enumerate(self.region_array):\n self.region_dict[region_idx] = array_idx\n\n def _load_data_from_hdf5(self, hdf5_data_grp, args):\n \"\"\"Internal function for loading in non-pair search variables such as\n the reference redshift, area, region, etc.\n ------------------------------------------------------------------------\n Args:\n hdf5_pair_group: HDF5 group object containing the reference object\n pairs\n args: ArgumentParser.parse_args object from\n input_flags.parse_input_pdf_args\n Returns:\n None\n \"\"\"\n for reference_idx, key_name in enumerate(hdf5_data_grp.keys()):\n reference_grp = hdf5_data_grp[key_name]\n scale_grp = reference_grp[args.pair_scale_name]\n\n self.reference_redshift_array[reference_idx] = (\n reference_grp.attrs['redshift'])\n self.reference_region_array[reference_idx] = \\\n reference_grp.attrs['region']\n\n self.reference_area_array[reference_idx] = scale_grp.attrs['area']\n self.reference_density_array[reference_idx] = (\n scale_grp.attrs['ref_ref_n_points'] /\n self.reference_area_array[reference_idx])\n self.reference_resolution_array[reference_idx] = (\n scale_grp.attrs['bin_resolution'])\n if self._use_inverse_weighting:\n self.reference_hold_rand_array[reference_idx] = (\n scale_grp.attrs['rand_dist_weight'])\n else:\n self.reference_hold_rand_array[reference_idx] = (\n scale_grp.attrs['n_random'])\n self.initialize_regions_and_densities()\n\n return None\n\n def set_reference_unknown_array(self, unknown_array):\n \"\"\"Function for setting the values of the unknown object density. This\n is done externally rather than internally as Python classes don't play\n to well with the multiprocessing or numba modules.\n ------------------------------------------------------------------------\n Args:\n unknown_array: float array of values defining the number of\n (un)weighted points around a reference objet.\n Returns:\n None\n \"\"\"\n self.reference_unknown_array = unknown_array\n self._reference_unknown_array_set = True\n return None\n\n def scale_random_points(self, rand_ratio, ave_weight):\n \"\"\"Method for setting the scaling relative to the real for the randoms.\n ------------------------------------------------------------------------\n Args:\n rand_ratio: float ratio between the data and randoms\n (# data / # randoms)\n ave_weight: float average value of the weights applied to the\n unknown sample\n Returns:\n None\n \"\"\"\n # TODO:\n # Figure out a way to make this more stable.\n try:\n tmp_rand_ratio = rand_ratio[self.reference_region_array]\n except IndexError:\n tmp_rand_ratio = rand_ratio\n except TypeError:\n tmp_rand_ratio = rand_ratio\n try:\n tmp_ave_weight = ave_weight[self.reference_region_array]\n except IndexError:\n tmp_ave_weight = ave_weight\n except TypeError:\n tmp_ave_weight = ave_weight\n self.reference_rand_array = (\n self.reference_hold_rand_array * tmp_rand_ratio * tmp_ave_weight)\n return None\n\n def write_reference_n_points(self, hdf5_file):\n \"\"\"Method for writing the intermediate products of the over-density of\n the requested sample per known, reference object. This must be run\n after a call to self.colapse_ids_to_single_estimate.\n ----------------------------------------------------------------------\n Args:\n hdf5_file: an open hdf5 object from the return of h5py.File\n Returns:\n None\n \"\"\"\n if not self._reference_unknown_array_set:\n print(\"PDFMaker.set_reference_unknown_array not set. Exiting \"\n \"method.\")\n return None\n # TODO\n pass\n\n def compute_region_densities(self, z_bin_edge_array, z_max):\n \"\"\"Method for computing the over-density of the unknown sample against\n the reference sample binned in reference redshift in each of the\n spatial regions of the considered geometry. This allows for spatial\n bootstrapping of the final, resultant PDF. Will not run if\n set_reference_unknown_array was not set first.\n ------------------------------------------------------------------------\n Args:\n z_bin_edge_array: float array of the lower bin edge of the redshift\n bins.\n z_max: float maximum redshift of the redshift binning.\n Returns:\n None\n \"\"\"\n if not self._reference_unknown_array_set:\n print(\"PDFMaker.set_reference_unknown_array not set. Exiting \"\n \"method.\")\n return None\n # Initilize arrays.\n self._redshift_reg_array = np.zeros(\n (z_bin_edge_array.shape[0], self.region_array.shape[0]),\n dtype=np.float32)\n self._n_reference_reg_array = np.zeros(\n (z_bin_edge_array.shape[0], self.region_array.shape[0]),\n dtype=np.uint32)\n self._unknown_reg_array = np.zeros(\n (z_bin_edge_array.shape[0], self.region_array.shape[0]),\n dtype=np.float32)\n self._rand_reg_array = np.zeros(\n (z_bin_edge_array.shape[0], self.region_array.shape[0]),\n dtype=np.float32)\n self._area_reg_array = np.zeros(\n (z_bin_edge_array.shape[0], self.region_array.shape[0]),\n dtype=np.float32)\n self._resolution_reg_array = np.zeros(\n (z_bin_edge_array.shape[0], self.region_array.shape[0]),\n dtype=np.uint)\n # Loop over reference objects\n for reference_idx, redshift in enumerate(\n self.reference_redshift_array):\n # If the reference object is out of the redshift range continue.\n if redshift < z_bin_edge_array[0] or redshift >= z_max:\n continue\n # Grap the reference object region.\n region_idx = self.region_dict[\n self.reference_region_array[reference_idx]]\n # Find the redshift bin this object belongs to.\n bin_idx = np.searchsorted(z_bin_edge_array, redshift, 'right') - 1\n # Store object properties.\n self._redshift_reg_array[bin_idx, region_idx] += redshift\n self._n_reference_reg_array[bin_idx, region_idx] += 1\n if self._use_reference_densities:\n self._unknown_reg_array[bin_idx, region_idx] += (\n self.reference_unknown_array[reference_idx] /\n self.reference_density_array[reference_idx])\n self._rand_reg_array[bin_idx, region_idx] += (\n self.reference_rand_array[reference_idx] /\n self.reference_density_array[reference_idx])\n else:\n self._unknown_reg_array[bin_idx, region_idx] += (\n self.reference_unknown_array[reference_idx])\n self._rand_reg_array[bin_idx, region_idx] += (\n self.reference_rand_array[reference_idx])\n self._area_reg_array[bin_idx, region_idx] += (\n self.reference_area_array[reference_idx])\n self._resolution_reg_array[bin_idx, region_idx] += (\n self.reference_resolution_array[reference_idx])\n self._computed_region_densities = True\n return None\n\n def write_region_densities(self, output_pickle_file, args):\n \"\"\"Method to write all internal variables describing the over-density\n per spatial region to a pickle file. The data is pickled as a Python\n dictionary.\n ------------------------------------------------------------------------\n Args:\n output_pickle_file: string name of the pickle file to to write out\n to.\n Returns:\n None\n \"\"\"\n if not self._computed_region_densities:\n print(\"PDFMaker.compute_region_densities not run. Exiting method.\")\n return None\n output_file = open(output_pickle_file, 'wb')\n output_dict = {\"input_flags\": args,\n \"n_regions\": self.region_array.shape[0],\n \"redshift\": self._redshift_reg_array,\n \"n_reference\": self._n_reference_reg_array,\n \"unknown\": self._unknown_reg_array,\n \"rand\": self._rand_reg_array,\n \"area\": self._area_reg_array,\n \"resolution\": self._resolution_reg_array}\n pickle.dump(output_dict, output_file)\n output_file.close()\n return None\n\n def compute_pdf(self):\n \"\"\"Method for estimating the redshit posterior distribution of the\n unknown sample without considering the spatial regions. The returned\n over-density vs redshift is calculated using the natural estimator of\n over-density. (DD / DR - 1). Errors are simple Poisson.\n ------------------------------------------------------------------------\n Args:\n None\n Returns:\n None\n \"\"\"\n if not self._computed_region_densities:\n print(\"PDFMaker.compute_region_densities not run. Exiting method.\")\n return None\n self.redshift_array = (self._redshift_reg_array.sum(axis=1) /\n self._n_reference_reg_array.sum(axis=1))\n self.density_array = (self._unknown_reg_array.sum(axis=1) /\n self._rand_reg_array.sum(axis=1) - 1.0)\n self.density_err_array = (\n np.sqrt(self._unknown_reg_array.sum(axis=1)) /\n self._rand_reg_array.sum(axis=1))\n self.n_reference_array = self._n_reference_reg_array.sum(axis=1)\n self.unknown_array = self._unknown_reg_array.sum(axis=1)\n self.rand_array = self._rand_reg_array.sum(axis=1)\n self.area_array = self._area_reg_array.sum(axis=1)\n self.resolution_array = (self._resolution_reg_array.sum(axis=1) /\n self._n_reference_reg_array.sum(axis=1))\n self._computed_pdf = True\n return None\n\n def compute_pdf_bootstrap(self, n_bootstraps):\n \"\"\"Similar to compute_pdf but now the region information is used to\n spatially bootstrap the results in order to estimate errors.\n ------------------------------------------------------------------------\n Args:\n n_bootstraps: int number of spatial bootstraps to sample from the\n regions.\n Returns:\n None\n \"\"\"\n if not self._computed_region_densities:\n print(\"PDFMaker.compute_region_densities not run. Exiting method.\")\n return None\n self.bootstrap_regions = np.random.randint(\n self.region_array.shape[0],\n size=(n_bootstraps, self.region_array.shape[0]))\n self._compute_pdf_bootstrap(self.bootstrap_regions)\n return None\n\n def _compute_pdf_bootstrap(self, boot_region_array):\n \"\"\"Work horse method for computing the bootstrap errors. This method\n takes in an array of bootstrap samples specified by row-wise arrays of\n region ids. Allows for computation of bootstrap errors using the same\n fixed bootstrap samples.\n ------------------------------------------------------------------------\n Args:\n boot_region_array: array of integer region ids\n Returns:\n None\n \"\"\"\n if not self._computed_region_densities:\n print(\"PDFMaker.compute_region_densities not run. Exiting method.\")\n return None\n self.bootstrap_array = np.empty((self._redshift_reg_array.shape[0],\n boot_region_array.shape[0]))\n for boot_idx, boot_regions in enumerate(boot_region_array):\n self.bootstrap_array[:, boot_idx] = np.where(\n self._rand_reg_array[:, boot_regions].sum(axis=1) > 0,\n self._unknown_reg_array[:, boot_regions].sum(axis=1) /\n self._rand_reg_array[:, boot_regions].sum(axis=1) - 1.0,\n 0.0)\n self.redshift_array = (self._redshift_reg_array.sum(axis=1) /\n self._n_reference_reg_array.sum(axis=1))\n self.density_array = np.nanmean(self.bootstrap_array, axis=1)\n self.density_err_array = np.nanstd(self.bootstrap_array, axis=1)\n self.n_reference_array = self._n_reference_reg_array.sum(axis=1)\n self.unknown_array = self._unknown_reg_array.sum(axis=1)\n self.rand_array = self._rand_reg_array.sum(axis=1)\n self.area_array = self._area_reg_array.sum(axis=1)\n self.resolution_array = (self._resolution_reg_array.sum(axis=1) /\n self._n_reference_reg_array.sum(axis=1))\n self._computed_pdf = True\n self._computed_bootstraps = True\n return None\n\n def write_bootstrap_samples_to_ascii(self, output_name, args):\n \"\"\"Method for writing the individual bootstrap samples to ascii.\n ------------------------------------------------------------------------\n Args:\n output_name: string specifying the name of the ascii file to write\n the pdf/density results to. By default any existing file will\n be overwritten.\n args: ArgumentParser.parse_args object returned from\n input_flags.parse_input_pdf_args\n Returns:\n None\n \"\"\"\n output_header = '# input_flags:\\n'\n for arg in vars(args):\n output_header += '#\\t%s : %s\\n' % (arg, getattr(args, arg))\n np.savetxt(output_name, self.bootstrap_array, fmt='%.8f',\n header=output_header)\n return None\n\n def write_pdf_to_ascii(self, output_file):\n \"\"\"Method for writing the results of the different compute pdf methods\n to ascii.\n ------------------------------------------------------------------------\n Args:\n output_name: Python file object specifying the ascii file to write\n the pdf/density results to. By default any existing file will\n be overwritten.\n Returns:\n None\n \"\"\"\n if not self._computed_pdf:\n print(\"PDFMaker.compute_pdf or PDFMaker.compute_pdf_bootstrap not \"\n \"run. Exiting method.\")\n return None\n output_file.writelines('#type1 = redshift\\n')\n output_file.writelines('#type2 = over_density\\n')\n output_file.writelines('#type3 = over_density_err\\n')\n output_file.writelines('#type4 = n_points\\n')\n output_file.writelines('#type5 = n_random\\n')\n output_file.writelines('#type6 = area\\n')\n output_file.writelines('#type7 = ave resolution\\n')\n for bin_idx in range(self.redshift_array.shape[0]):\n output_file.writelines(\n '%.8e %.8e %.8e %.8e %.8e %.8e %.8e\\n' %\n (self.redshift_array[bin_idx], self.density_array[bin_idx],\n self.density_err_array[bin_idx], self.unknown_array[bin_idx],\n self.rand_array[bin_idx], self.area_array[bin_idx],\n self.resolution_array[bin_idx]))\n return None\n",
"\n\"\"\"Utility functions for computing single galaxy clustering redshfits using\na k-dimensional tree in galaxy parameter space.\n\"\"\"\n\nfrom multiprocessing import Pool\nimport numpy as np\nfrom scipy.spatial import cKDTree\n\nfrom the_wizz.pdf_maker_utils import _collapse_multiplex\n\n# TODO:\n# Add option to pickle and loaded pickled CatalogKDtree objects.\n\n\ndef collapse_ids_to_single_estimate(hdf5_pairs_group, pair_data, pdf_maker_obj,\n unknown_data, args):\n \"\"\"This is the heart of the-wizz. It enables the matching of a set of\n catalog ids to the ids stored as pairs to the spectroscopic\n objects. The result of this calculation is a intermediary data product\n containing the density of unknown objects around each reference object\n stored in the PDFMaker data structure class. This specific version is\n for when all the spectra have been pre-loaded in anticipation of running\n a large number of sub-samples as is the case with kdtree recovery.\n ----------------------------------------------------------------------------\n Args:\n hdf5_pairs_group: hdf5 group object containing the pair ids for a fixed\n annulus.\n unknown_data: open fits data containing object ids and relivent weights\n args: ArgumentParser.parse_args object returned from\n input_flags.parse_input_pdf_args\n Returns:\n None\n \"\"\"\n print(\"\\tpre-loading unknown data...\")\n if args.unknown_weight_name is not None:\n unknown_data = unknown_data[\n unknown_data[args.unknown_weight_name] != 0]\n id_array = unknown_data[args.unknown_index_name]\n id_args_array = id_array.argsort()\n id_array = id_array[id_args_array]\n rand_ratio = (\n unknown_data.shape[0]/(1.*hdf5_pairs_group.attrs['n_random_points']))\n if args.unknown_stomp_region_name is not None:\n id_array = [id_array[\n unknown_data[args.unknown_stomp_region_name][id_args_array] ==\n reg_idx]\n for reg_idx in range(hdf5_pairs_group.attrs['n_region'])]\n tmp_n_region = np.array(\n [id_array[reg_idx].shape[0]\n for reg_idx in range(hdf5_pairs_group.attrs['n_region'])],\n dtype=np.int_)\n rand_ratio = ((tmp_n_region /\n (1.*hdf5_pairs_group.attrs['n_random_points'])) *\n (hdf5_pairs_group.attrs['area'] /\n hdf5_pairs_group.attrs['region_area']))\n ave_weight = 1.0\n weight_array = np.ones(unknown_data.shape[0], dtype=np.float32)\n if args.unknown_weight_name is not None:\n weight_array = unknown_data[args.unknown_weight_name][id_args_array]\n ave_weight = np.mean(weight_array)\n if args.unknown_stomp_region_name is not None:\n weight_array = [weight_array[\n unknown_data[args.unknown_stomp_region_name][id_args_array] ==\n reg_idx]\n for reg_idx in range(hdf5_pairs_group.attrs['n_region'])]\n ave_weight = np.array(\n [weight_array[reg_idx].mean()\n for reg_idx in range(hdf5_pairs_group.attrs['n_region'])],\n dtype=np.float_)\n n_reference = len(hdf5_pairs_group)\n reference_unknown_array = np.empty(n_reference, dtype=np.float32)\n pool = Pool(args.n_processes)\n if args.unknown_stomp_region_name is not None:\n pool_iter = pool.imap(\n _collapse_multiplex,\n [(data_set,\n id_array[pdf_maker_obj.reference_region_array[pair_idx]],\n weight_array[pdf_maker_obj.reference_region_array[pair_idx]],\n args.use_inverse_weighting)\n for pair_idx, data_set in enumerate(pair_data)],\n chunksize=np.int(np.where(args.n_processes > 1,\n np.log(len(pair_data)), 1)))\n else:\n pool_iter = pool.imap(\n _collapse_multiplex,\n [(data_set, id_array, weight_array, args.use_inverse_weighting)\n for pair_idx, data_set in enumerate(pair_data)],\n chunksize=np.int(np.where(args.n_processes > 1,\n np.log(len(pair_data)), 1)))\n print(\"\\t\\tcomputing/storing pair count...\")\n for pair_idx, reference_value in enumerate(pool_iter):\n reference_unknown_array[pair_idx] = reference_value\n pool.close()\n pool.join()\n pdf_maker_obj.set_reference_unknown_array(reference_unknown_array)\n pdf_maker_obj.scale_random_points(rand_ratio, ave_weight)\n return None\n\n\ndef create_match_data(input_catalog, mag_name_list, other_name_list,\n use_as_colors):\n kdtree_data_array = np.empty((input_catalog.shape[0], len(mag_name_list)))\n for mag_idx, mag_name in enumerate(mag_name_list):\n kdtree_data_array[:, mag_idx] = input_catalog[mag_name]\n if mag_idx > 0 and use_as_colors:\n kdtree_data_array[:, mag_idx - 1] -= kdtree_data_array[:, mag_idx]\n if use_as_colors:\n kdtree_data_array = np.delete(kdtree_data_array, -1, 1)\n if len(other_name_list) > 0:\n other_data_array = np.empty((input_catalog.shape[0],\n len(other_name_list)))\n for other_idx, other_name in enumerate(other_name_list):\n other_data_array[:, other_idx] = input_catalog[other_name]\n kdtree_data_array = np.concatenate(\n (kdtree_data_array, other_data_array), axis=1)\n return kdtree_data_array\n\n\nclass CatalogKDTree(object):\n \"\"\"Convience class for creating a dataset suitable for a KDTree search, and\n wrapping the scipy KDTree object.\n \"\"\"\n def __init__(self, input_array):\n \"\"\"__init__ method preps the internal data storage and creates the\n KDTree.\n ------------------------------------------------------------------------\n Args:\n input_catalog: astropy.io.fits catalog object containing the\n columns of interest\n column_name_list: list of string names of catalog columns to\n consider for the KDTree\n id_column_name: string name of the column containing the indices\n \"\"\"\n self._internal_array = input_array\n self._normalize_data()\n self._initialize_tree()\n\n def __call__(self, input_array, k):\n \"\"\"Given input properties of an object, return the KDTree, array indices\n of the k nearest neighbors.\n ------------------------------------------------------------------------\n Args:\n input_array: float array of object properties (eg fluxes in survey\n bands)\n k: int number of nearest neighbors to return.\n Returns:\n tuple;\n array of integer array indices of objects\n list of quartile and max distances\n \"\"\"\n tmp_array = (input_array - self._mean_array)/self._std_array\n d, i = self._kd_tree.query(tmp_array, k)\n return i, d[[int(k/4.), int(k/2.), int(3.*k/4.), -1]]\n\n def k_nearest_ball_point(self, input_array, max_dist):\n \"\"\"Method to return the KDTree indicies from all points within a fixed\n distance of the point requested. The distance is expressed in sigma of\n the stored data array, i.e. a value of 1 returns all points within 1\n sigma.\n ------------------------------------------------------------------------\n Args:\n input_array: float array of object properties (eg fluxes in survey\n bands)\n max_dist: Maximum radial distance to search from the input point.\n Returns:\n int array of array indices\n \"\"\"\n tmp_array = (input_array - self._mean_array) / self._std_array\n return self._kd_tree.query_ball_point(tmp_array, max_dist)\n\n def _initialize_tree(self):\n \"\"\"Internal method for intilizing the KDTree object.\n -----------------------------------------------------------------------\n Args:\n self\n Returns:\n None\n \"\"\"\n self._kd_tree = cKDTree(self._internal_array)\n return None\n\n def _normalize_data(self):\n \"\"\"Internal method for scaling the data columns stored to a standard\n normal distribution of mean zero and standard deviation of 1.\n ------------------------------------------------------------------------\n Args:\n self\n Returns:\n None\n \"\"\"\n self._mean_array = self._internal_array.mean(axis=0)\n self._std_array = self._internal_array.std(axis=0)\n for col_idx in range(self._internal_array.shape[1]):\n self._internal_array[:, col_idx] = (\n (self._internal_array[:, col_idx] -\n self._mean_array[col_idx]) / self._std_array[col_idx])\n return None\n\n def get_mean_array(self):\n return self._mean_array\n\n def get_std_array(self):\n return self._std_array\n"
] | [
[
"numpy.loadtxt"
],
[
"numpy.linspace",
"numpy.mean",
"numpy.zeros_like",
"numpy.searchsorted",
"numpy.nanmean",
"numpy.any",
"numpy.nanstd",
"numpy.random.randint",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.arange",
"numpy.zeros",
"numpy.logical_not",
"numpy.log",
"numpy.min",
"numpy.equal",
"numpy.savetxt",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"numpy.isfinite",
"numpy.ones",
"numpy.empty"
],
[
"numpy.ones",
"numpy.concatenate",
"numpy.delete",
"numpy.mean",
"scipy.spatial.cKDTree",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Agyey/fsdl-text-recognizer-2021-labs | [
"4bd85042ab9f6decd78849bb655c197cc13ffc11"
] | [
"lab4/text_recognizer/models/line_cnn.py"
] | [
"from typing import Any, Dict\nimport argparse\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nCONV_DIM = 64\nFC_DIM = 128\nWINDOW_WIDTH = 28\nWINDOW_STRIDE = 28\n\n\nclass ConvBlock(nn.Module):\n \"\"\"\n Simple 3x3 conv with padding size 1 (to leave the input size unchanged), followed by a ReLU.\n \"\"\"\n\n def __init__(self, input_channels: int, output_channels: int, kernel_size: int = 3, stride: int = 1) -> None:\n super().__init__()\n self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=1)\n self.relu = nn.ReLU()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Parameters\n ----------\n x\n of dimensions (B, C, H, W)\n\n Returns\n -------\n torch.Tensor\n of dimensions (B, C, H, W)\n \"\"\"\n c = self.conv(x)\n r = self.relu(c)\n return r\n\n\nclass LineCNN(nn.Module):\n \"\"\"\n Model that uses a simple CNN to process an image of a line of characters with a window, outputting a sequence of logits.\n \"\"\"\n\n def __init__(\n self,\n data_config: Dict[str, Any],\n args: argparse.Namespace = None,\n ) -> None:\n super().__init__()\n self.data_config = data_config\n self.args = vars(args) if args is not None else {}\n self.num_classes = len(data_config[\"mapping\"])\n self.output_length = data_config[\"output_dims\"][0]\n self.limit_output_length = self.args.get(\"limit_output_length\", False)\n\n _C, H, _W = data_config[\"input_dims\"]\n conv_dim = self.args.get(\"conv_dim\", CONV_DIM)\n fc_dim = self.args.get(\"fc_dim\", FC_DIM)\n self.WW = self.args.get(\"window_width\", WINDOW_WIDTH)\n self.WS = self.args.get(\"window_stride\", WINDOW_STRIDE)\n\n # Input is (1, H, W)\n self.conv1 = ConvBlock(1, conv_dim)\n self.conv2 = ConvBlock(conv_dim, conv_dim)\n self.conv3 = ConvBlock(conv_dim, conv_dim, stride=2)\n # Conv math! https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html\n # OW = torch.floor((W // 2 - WW // 2) + 1)\n self.conv4 = ConvBlock(conv_dim, fc_dim, kernel_size=(H // 2, self.WW // 2), stride=(H // 2, self.WS // 2))\n self.dropout = nn.Dropout(0.25)\n self.fc1 = nn.Linear(fc_dim, fc_dim)\n self.fc2 = nn.Linear(fc_dim, self.num_classes)\n\n self._init_weights()\n\n def _init_weights(self):\n \"\"\"\n A better weight initialization scheme than PyTorch default.\n\n See https://github.com/pytorch/pytorch/issues/18182\n \"\"\"\n for m in self.modules():\n if type(m) in {\n nn.Conv2d,\n nn.Conv3d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d,\n nn.Linear,\n }:\n nn.init.kaiming_normal_(m.weight.data, a=0, mode=\"fan_out\", nonlinearity=\"relu\")\n if m.bias is not None:\n _fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Parameters\n ----------\n x\n (B, 1, H, W) input image\n\n Returns\n -------\n torch.Tensor\n (B, C, S) logits, where S is the length of the sequence and C is the number of classes\n S can be computed from W and self.window_width\n C is self.num_classes\n \"\"\"\n _B, _C, _H, W = x.shape\n x = self.conv1(x) # -> (B, CONV_DIM, H, W)\n x = self.conv2(x) # -> (B, CONV_DIM, H, W)\n x = self.conv3(x) # -> (B, CONV_DIM, H//2, W//2)\n OW = math.floor((W // 2 + 2 - self.WW // 2) / (self.WS // 2) + 1)\n x = self.conv4(x) # -> (B, FC_DIM, 1, OW)\n assert x.shape[-1] == OW\n x = x.squeeze().permute(0, 2, 1) # -> (B, OW, FC_DIM)\n x = F.relu(self.fc1(x)) # -> (B, OW, FC_DIM)\n x = self.dropout(x)\n x = self.fc2(x) # -> (B, OW, self.C)\n x = x.permute(0, 2, 1) # -> (B, self.C, OW)\n if self.limit_output_length:\n x = x[:, :, : self.output_length]\n return x\n\n @staticmethod\n def add_to_argparse(parser):\n parser.add_argument(\"--conv_dim\", type=int, default=CONV_DIM)\n parser.add_argument(\"--fc_dim\", type=int, default=FC_DIM)\n parser.add_argument(\n \"--window_width\",\n type=int,\n default=WINDOW_WIDTH,\n help=\"Width of the window that will slide over the input image.\",\n )\n parser.add_argument(\n \"--window_stride\",\n type=int,\n default=WINDOW_STRIDE,\n help=\"Stride of the window that will slide over the input image.\",\n )\n parser.add_argument(\"--limit_output_length\", action=\"store_true\", default=False)\n return parser\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
addschile/qtps | [
"3220af82d409526463dc4fe9e4ea869d655c0bd8"
] | [
"data/compute_rates.py"
] | [
"import numpy as np\nfrom sys import argv\n\ntobs = int(argv[1])\np0 = np.zeros(10)\np2 = np.zeros(10)\np1 = np.zeros(10)\nZab = np.zeros(10)\nrate = np.zeros(10)\n\nfor i in range(10):\n da = np.loadtxt('tobs%d/reweighted_hist_%d.dat'%(tobs,i))\n p0[i] = np.exp(-da[-2,1])\n p2[i] = np.exp(-da[-1,1])\n p1[i] = np.exp(-da[-3,1])\nZab = p1/(p0+p2)\n\nf = open('tobs%d/path_partition_function_%d.dat'%(tobs,tobs),'w')\nfor i in range(10):\n f.write('%d %.16f\\n'%(i,Zab[i]))\n\nZab_avg = np.sum(Zab[:])/10.\nfor i in range(10):\n Zab[i] -= Zab_avg\nZab *= Zab\nstd_err = np.sqrt(np.sum(Zab[:])/10.)\nf.write('%.16f %.16f\\n'%(Zab_avg,std_err))\nf.close()\n"
] | [
[
"numpy.exp",
"numpy.zeros",
"numpy.sum",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TillBeemelmanns/OpenPCDet | [
"b7553c879d0ba36477931efe07a55adbc39823b9",
"b7553c879d0ba36477931efe07a55adbc39823b9"
] | [
"tools/test.py",
"pcdet/utils/common_utils.py"
] | [
"import os\nimport torch\nfrom tensorboardX import SummaryWriter\nimport time\nimport glob\nimport re\nimport datetime\nimport argparse\nfrom pathlib import Path\nimport torch.distributed as dist\nfrom pcdet.datasets import build_dataloader\nfrom pcdet.models import build_network\nfrom pcdet.utils import common_utils\nfrom pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\nfrom eval_utils import eval_utils\n\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='arg parser')\n parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')\n\n parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training')\n parser.add_argument('--epochs', type=int, default=80, required=False, help='Number of epochs to train for')\n parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')\n parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')\n parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')\n parser.add_argument('--mgpus', action='store_true', default=False, help='whether to use multiple gpu')\n parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')\n parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')\n parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')\n parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,\n help='set extra config keys if needed')\n\n parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')\n parser.add_argument('--start_epoch', type=int, default=0, help='')\n parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')\n parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')\n parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')\n parser.add_argument('--save_to_file', action='store_true', default=False, help='')\n\n args = parser.parse_args()\n\n cfg_from_yaml_file(args.cfg_file, cfg)\n cfg.TAG = Path(args.cfg_file).stem\n cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs, cfg)\n\n return args, cfg\n\n\ndef eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):\n # load checkpoint\n model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)\n model.cuda()\n\n # start evaluation\n eval_utils.eval_one_epoch(\n cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,\n result_dir=eval_output_dir, save_to_file=args.save_to_file\n )\n\n\ndef get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):\n ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))\n ckpt_list.sort(key=os.path.getmtime)\n evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]\n\n for cur_ckpt in ckpt_list:\n num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)\n if num_list.__len__() == 0:\n continue\n\n epoch_id = num_list[-1]\n if 'optim' in epoch_id:\n continue\n if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:\n return epoch_id, cur_ckpt\n return -1, None\n\n\ndef repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):\n # evaluated ckpt record\n ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])\n with open(ckpt_record_file, 'a'):\n pass\n\n # tensorboard log\n if cfg.LOCAL_RANK == 0:\n tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))\n total_time = 0\n first_eval = True\n\n while True:\n # check whether there is checkpoint which is not evaluated\n cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)\n if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:\n wait_second = 30\n if cfg.LOCAL_RANK == 0:\n print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \\r'\n % (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)\n time.sleep(wait_second)\n total_time += 30\n if total_time > args.max_waiting_mins * 60 and (first_eval is False):\n break\n continue\n\n total_time = 0\n first_eval = False\n\n model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)\n model.cuda()\n\n # start evaluation\n cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']\n tb_dict = eval_utils.eval_one_epoch(\n cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,\n result_dir=cur_result_dir, save_to_file=args.save_to_file\n )\n\n if cfg.LOCAL_RANK == 0:\n for key, val in tb_dict.items():\n tb_log.add_scalar(key, val, cur_epoch_id)\n\n # record this epoch which has been evaluated\n with open(ckpt_record_file, 'a') as f:\n print('%s' % cur_epoch_id, file=f)\n logger.info('Epoch %s has been evaluated' % cur_epoch_id)\n\n\ndef main():\n args, cfg = parse_config()\n if args.launcher == 'none':\n dist_test = False\n else:\n args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(\n args.batch_size, args.tcp_port, args.local_rank, backend='nccl'\n )\n dist_test = True\n\n output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag\n output_dir.mkdir(parents=True, exist_ok=True)\n\n eval_output_dir = output_dir / 'eval'\n\n if not args.eval_all:\n num_list = re.findall(r'\\d+', args.ckpt) if args.ckpt is not None else []\n epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'\n eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']\n else:\n eval_output_dir = eval_output_dir / 'eval_all_default'\n\n if args.eval_tag is not None:\n eval_output_dir = eval_output_dir / args.eval_tag\n\n eval_output_dir.mkdir(parents=True, exist_ok=True)\n log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))\n logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)\n\n # log to file\n logger.info('**********************Start logging**********************')\n gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'\n logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)\n\n if dist_test:\n total_gpus = dist.get_world_size()\n logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))\n for key, val in vars(args).items():\n logger.info('{:16} {}'.format(key, val))\n log_config_to_file(cfg, logger=logger)\n\n ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'\n\n test_set, test_loader, sampler = build_dataloader(\n dataset_cfg=cfg.DATA_CONFIG,\n class_names=cfg.CLASS_NAMES,\n batch_size=args.batch_size,\n dist=dist_test, workers=args.workers, logger=logger, training=False\n )\n\n model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)\n with torch.no_grad():\n if args.eval_all:\n repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)\n else:\n eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)\n\n\nif __name__ == '__main__':\n main()\n\n",
"import numpy as np\nimport torch\nimport random\nimport logging\nimport os\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\nimport subprocess\nimport pickle\nimport shutil\n\n\ndef check_numpy_to_torch(x):\n if isinstance(x, np.ndarray):\n return torch.from_numpy(x).float(), True\n return x, False\n\n\ndef limit_period(val, offset=0.5, period=np.pi):\n val, is_numpy = check_numpy_to_torch(val)\n ans = val - torch.floor(val / period + offset) * period\n return ans.numpy() if is_numpy else ans\n\n\ndef drop_info_with_name(info, name):\n ret_info = {}\n keep_indices = [i for i, x in enumerate(info['name']) if x != name]\n for key in info.keys():\n ret_info[key] = info[key][keep_indices]\n return ret_info\n\n\ndef rotate_points_along_z(points, angle):\n \"\"\"\n Args:\n points: (B, N, 3 + C)\n angle: (B), angle along z-axis, angle increases x ==> y\n Returns:\n\n \"\"\"\n points, is_numpy = check_numpy_to_torch(points)\n angle, _ = check_numpy_to_torch(angle)\n\n cosa = torch.cos(angle)\n sina = torch.sin(angle)\n zeros = angle.new_zeros(points.shape[0])\n ones = angle.new_ones(points.shape[0])\n rot_matrix = torch.stack((\n cosa, sina, zeros,\n -sina, cosa, zeros,\n zeros, zeros, ones\n ), dim=1).view(-1, 3, 3).float()\n points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)\n points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)\n return points_rot.numpy() if is_numpy else points_rot\n\n\ndef mask_points_by_range(points, limit_range):\n mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \\\n & (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])\n return mask\n\n\ndef get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):\n \"\"\"\n Args:\n voxel_coords: (N, 3)\n downsample_times:\n voxel_size:\n point_cloud_range:\n\n Returns:\n\n \"\"\"\n assert voxel_coords.shape[1] == 3\n voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz)\n voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times\n pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float()\n voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range\n return voxel_centers\n\n\ndef create_logger(log_file=None, rank=0, log_level=logging.INFO):\n logger = logging.getLogger(__name__)\n logger.setLevel(log_level if rank == 0 else 'ERROR')\n formatter = logging.Formatter('%(asctime)s %(levelname)5s %(message)s')\n console = logging.StreamHandler()\n console.setLevel(log_level if rank == 0 else 'ERROR')\n console.setFormatter(formatter)\n logger.addHandler(console)\n if log_file is not None:\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setLevel(log_level if rank == 0 else 'ERROR')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger\n\n\ndef set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef keep_arrays_by_name(gt_names, used_classes):\n inds = [i for i, x in enumerate(gt_names) if x in used_classes]\n inds = np.array(inds, dtype=np.int64)\n return inds\n\n\ndef init_dist_slurm(batch_size, tcp_port, local_rank, backend='nccl'):\n \"\"\"\n modified from https://github.com/open-mmlab/mmdetection\n Args:\n batch_size:\n tcp_port:\n backend:\n\n Returns:\n\n \"\"\"\n proc_id = int(os.environ['SLURM_PROCID'])\n ntasks = int(os.environ['SLURM_NTASKS'])\n node_list = os.environ['SLURM_NODELIST']\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(proc_id % num_gpus)\n addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))\n os.environ['MASTER_PORT'] = str(tcp_port)\n os.environ['MASTER_ADDR'] = addr\n os.environ['WORLD_SIZE'] = str(ntasks)\n os.environ['RANK'] = str(proc_id)\n dist.init_process_group(backend=backend)\n\n total_gpus = dist.get_world_size()\n assert batch_size % total_gpus == 0, 'Batch size should be matched with GPUS: (%d, %d)' % (batch_size, total_gpus)\n batch_size_each_gpu = batch_size // total_gpus\n rank = dist.get_rank()\n return batch_size_each_gpu, rank\n\n\ndef init_dist_pytorch(batch_size, tcp_port, local_rank, backend='nccl'):\n if mp.get_start_method(allow_none=True) is None:\n mp.set_start_method('spawn')\n\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(local_rank % num_gpus)\n dist.init_process_group(\n backend=backend,\n init_method='tcp://127.0.0.1:%d' % tcp_port,\n rank=local_rank,\n world_size=num_gpus\n )\n assert batch_size % num_gpus == 0, 'Batch size should be matched with GPUS: (%d, %d)' % (batch_size, num_gpus)\n batch_size_each_gpu = batch_size // num_gpus\n rank = dist.get_rank()\n return batch_size_each_gpu, rank\n\ndef get_dist_info():\n if torch.__version__ < '1.0':\n initialized = dist._initialized\n else:\n if dist.is_available():\n initialized = dist.is_initialized()\n else:\n initialized = False\n if initialized:\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size\n\ndef merge_results_dist(result_part, size, tmpdir):\n rank, world_size = get_dist_info()\n os.makedirs(tmpdir, exist_ok=True)\n\n dist.barrier()\n pickle.dump(result_part, open(os.path.join(tmpdir, 'result_part_{}.pkl'.format(rank)), 'wb'))\n dist.barrier()\n \n if rank != 0:\n return None\n \n part_list = []\n for i in range(world_size):\n part_file = os.path.join(tmpdir, 'result_part_{}.pkl'.format(i))\n part_list.append(pickle.load(open(part_file, 'rb')))\n\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res)) \n ordered_results = ordered_results[:size]\n shutil.rmtree(tmpdir)\n return ordered_results\n"
] | [
[
"torch.distributed.get_world_size",
"torch.no_grad"
],
[
"torch.cat",
"torch.sin",
"torch.multiprocessing.get_start_method",
"torch.distributed.get_rank",
"torch.multiprocessing.set_start_method",
"torch.distributed.init_process_group",
"torch.from_numpy",
"torch.distributed.barrier",
"torch.tensor",
"torch.cos",
"torch.floor",
"torch.distributed.is_initialized",
"torch.distributed.is_available",
"torch.stack",
"torch.cuda.device_count",
"numpy.array",
"torch.distributed.get_world_size",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anthowen/duplify | [
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e"
] | [
"env/lib/python3.6/site-packages/pandas/core/panel.py",
"env/lib/python3.6/site-packages/scipy/optimize/tests/test__spectral.py",
"env/lib/python3.6/site-packages/pandas/stats/fama_macbeth.py",
"env/lib/python3.6/site-packages/matplotlib/tests/test_tightlayout.py",
"env/lib/python3.6/site-packages/numpy/linalg/linalg.py",
"env/lib/python3.6/site-packages/mpl_toolkits/tests/test_axes_grid1.py",
"env/lib/python3.6/site-packages/pandas/tests/test_msgpack/test_except.py",
"env/lib/python3.6/site-packages/scipy/optimize/_lsq/least_squares.py",
"env/lib/python3.6/site-packages/scipy/stats/setup.py",
"env/lib/python3.6/site-packages/pandas/io/tests/test_pytables.py",
"env/lib/python3.6/site-packages/scipy/interpolate/tests/test_bsplines.py",
"env/lib/python3.6/site-packages/scipy/linalg/_expm_frechet.py",
"env/lib/python3.6/site-packages/matplotlib/backends/_backend_tk.py",
"env/lib/python3.6/site-packages/scipy/special/tests/test_precompute_gammainc.py",
"env/lib/python3.6/site-packages/matplotlib/tests/test_patches.py",
"env/lib/python3.6/site-packages/matplotlib/markers.py",
"env/lib/python3.6/site-packages/scipy/special/tests/test_sf_error.py",
"env/lib/python3.6/site-packages/pandas/computation/tests/test_eval.py",
"env/lib/python3.6/site-packages/scipy/signal/tests/test_signaltools.py",
"env/lib/python3.6/site-packages/pandas/computation/eval.py",
"env/lib/python3.6/site-packages/pandas/io/sas/sasreader.py",
"env/lib/python3.6/site-packages/numpy/fft/helper.py",
"env/lib/python3.6/site-packages/pandas/tests/series/test_operators.py",
"env/lib/python3.6/site-packages/pandas/tests/frame/test_misc_api.py",
"env/lib/python3.6/site-packages/matplotlib/tri/trifinder.py",
"env/lib/python3.6/site-packages/numpy/polynomial/tests/test_hermite.py",
"env/lib/python3.6/site-packages/pandas/tseries/tests/test_bin_groupby.py",
"env/lib/python3.6/site-packages/numpy/ma/tests/test_mrecords.py",
"env/lib/python3.6/site-packages/numpy/core/tests/test_memmap.py"
] | [
"\"\"\"\nContains data structures designed for manipulating panel (3-dimensional) data\n\"\"\"\n# pylint: disable=E1103,W0231,W0212,W0621\nfrom __future__ import division\n\nimport warnings\n\nimport numpy as np\n\nfrom pandas.types.cast import (_infer_dtype_from_scalar,\n _possibly_cast_item)\nfrom pandas.types.common import (is_integer, is_list_like,\n is_string_like, is_scalar)\nfrom pandas.types.missing import notnull\n\nimport pandas.computation.expressions as expressions\nimport pandas.core.common as com\nimport pandas.core.ops as ops\nimport pandas.core.missing as missing\nfrom pandas import compat\nfrom pandas.compat import (map, zip, range, u, OrderedDict, OrderedDefaultdict)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.core.common import PandasError, _try_sort, _default_index\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.index import (Index, MultiIndex, _ensure_index,\n _get_combined_index)\nfrom pandas.formats.printing import pprint_thing\nfrom pandas.core.indexing import maybe_droplevels\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays,\n create_block_manager_from_blocks)\nfrom pandas.core.ops import _op_descriptions\nfrom pandas.core.series import Series\nfrom pandas.tools.util import cartesian_product\nfrom pandas.util.decorators import (deprecate, Appender)\n\n_shared_doc_kwargs = dict(\n axes='items, major_axis, minor_axis',\n klass=\"Panel\",\n axes_single_arg=\"{0, 1, 2, 'items', 'major_axis', 'minor_axis'}\")\n_shared_doc_kwargs['args_transpose'] = (\"three positional arguments: each one\"\n \"of\\n%s\" %\n _shared_doc_kwargs['axes_single_arg'])\n\n\ndef _ensure_like_indices(time, panels):\n \"\"\"\n Makes sure that time and panels are conformable\n \"\"\"\n n_time = len(time)\n n_panel = len(panels)\n u_panels = np.unique(panels) # this sorts!\n u_time = np.unique(time)\n if len(u_time) == n_time:\n time = np.tile(u_time, len(u_panels))\n if len(u_panels) == n_panel:\n panels = np.repeat(u_panels, len(u_time))\n return time, panels\n\n\ndef panel_index(time, panels, names=None):\n \"\"\"\n Returns a multi-index suitable for a panel-like DataFrame\n\n Parameters\n ----------\n time : array-like\n Time index, does not have to repeat\n panels : array-like\n Panel index, does not have to repeat\n names : list, optional\n List containing the names of the indices\n\n Returns\n -------\n multi_index : MultiIndex\n Time index is the first level, the panels are the second level.\n\n Examples\n --------\n >>> years = range(1960,1963)\n >>> panels = ['A', 'B', 'C']\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),\n (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),\n (1962, 'C')], dtype=object)\n\n or\n\n >>> import numpy as np\n >>> years = np.repeat(range(1960,1963), 3)\n >>> panels = np.tile(['A', 'B', 'C'], 3)\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),\n (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),\n (1962, 'C')], dtype=object)\n \"\"\"\n if names is None:\n names = ['time', 'panel']\n time, panels = _ensure_like_indices(time, panels)\n return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)\n\n\nclass Panel(NDFrame):\n \"\"\"\n Represents wide format panel data, stored as 3-dimensional array\n\n Parameters\n ----------\n data : ndarray (items x major x minor), or dict of DataFrames\n items : Index or array-like\n axis=0\n major_axis : Index or array-like\n axis=1\n minor_axis : Index or array-like\n axis=2\n dtype : dtype, default None\n Data type to force, otherwise infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n \"\"\"\n\n @property\n def _constructor(self):\n return type(self)\n\n _constructor_sliced = DataFrame\n\n def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,\n copy=False, dtype=None):\n self._init_data(data=data, items=items, major_axis=major_axis,\n minor_axis=minor_axis, copy=copy, dtype=dtype)\n\n def _init_data(self, data, copy, dtype, **kwargs):\n \"\"\"\n Generate ND initialization; axes are passed\n as required objects to __init__\n \"\"\"\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]\n\n if kwargs:\n raise TypeError('_init_data() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n axes = None\n if isinstance(data, BlockManager):\n if any(x is not None for x in passed_axes):\n axes = [x if x is not None else y\n for x, y in zip(passed_axes, data.axes)]\n mgr = data\n elif isinstance(data, dict):\n mgr = self._init_dict(data, passed_axes, dtype=dtype)\n copy = False\n dtype = None\n elif isinstance(data, (np.ndarray, list)):\n mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)\n copy = False\n dtype = None\n elif is_scalar(data) and all(x is not None for x in passed_axes):\n if dtype is None:\n dtype, data = _infer_dtype_from_scalar(data)\n values = np.empty([len(x) for x in passed_axes], dtype=dtype)\n values.fill(data)\n mgr = self._init_matrix(values, passed_axes, dtype=dtype,\n copy=False)\n copy = False\n else: # pragma: no cover\n raise PandasError('Panel constructor not properly called!')\n\n NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)\n\n def _init_dict(self, data, axes, dtype=None):\n haxis = axes.pop(self._info_axis_number)\n\n # prefilter if haxis passed\n if haxis is not None:\n haxis = _ensure_index(haxis)\n data = OrderedDict((k, v)\n for k, v in compat.iteritems(data)\n if k in haxis)\n else:\n ks = list(data.keys())\n if not isinstance(data, OrderedDict):\n ks = _try_sort(ks)\n haxis = Index(ks)\n\n for k, v in compat.iteritems(data):\n if isinstance(v, dict):\n data[k] = self._constructor_sliced(v)\n\n # extract axis for remaining axes & create the slicemap\n raxes = [self._extract_axis(self, data, axis=i) if a is None else a\n for i, a in enumerate(axes)]\n raxes_sm = self._extract_axes_for_slice(self, raxes)\n\n # shallow copy\n arrays = []\n haxis_shape = [len(a) for a in raxes]\n for h in haxis:\n v = values = data.get(h)\n if v is None:\n values = np.empty(haxis_shape, dtype=dtype)\n values.fill(np.nan)\n elif isinstance(v, self._constructor_sliced):\n d = raxes_sm.copy()\n d['copy'] = False\n v = v.reindex(**d)\n if dtype is not None:\n v = v.astype(dtype)\n values = v.values\n arrays.append(values)\n\n return self._init_arrays(arrays, haxis, [haxis] + raxes)\n\n def _init_arrays(self, arrays, arr_names, axes):\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n @classmethod\n def from_dict(cls, data, intersect=False, orient='items', dtype=None):\n \"\"\"\n Construct Panel from dict of DataFrame objects\n\n Parameters\n ----------\n data : dict\n {field : DataFrame}\n intersect : boolean\n Intersect indexes of input DataFrames\n orient : {'items', 'minor'}, default 'items'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the items of the result panel, pass 'items'\n (default). Otherwise if the columns of the values of the passed\n DataFrame objects should be the items (which in the case of\n mixed-dtype data you should do), instead pass 'minor'\n dtype : dtype, default None\n Data type to force, otherwise infer\n\n Returns\n -------\n Panel\n \"\"\"\n orient = orient.lower()\n if orient == 'minor':\n new_data = OrderedDefaultdict(dict)\n for col, df in compat.iteritems(data):\n for item, s in compat.iteritems(df):\n new_data[item][col] = s\n data = new_data\n elif orient != 'items': # pragma: no cover\n raise ValueError('Orientation must be one of {items, minor}.')\n\n d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)\n ks = list(d['data'].keys())\n if not isinstance(d['data'], OrderedDict):\n ks = list(sorted(ks))\n d[cls._info_axis_name] = Index(ks)\n return cls(**d)\n\n def __getitem__(self, key):\n key = com._apply_if_callable(key, self)\n\n if isinstance(self._info_axis, MultiIndex):\n return self._getitem_multilevel(key)\n if not (is_list_like(key) or isinstance(key, slice)):\n return super(Panel, self).__getitem__(key)\n return self.ix[key]\n\n def _getitem_multilevel(self, key):\n info = self._info_axis\n loc = info.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_index = info[loc]\n result_index = maybe_droplevels(new_index, key)\n slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]\n new_values = self.values[slices]\n\n d = self._construct_axes_dict(self._AXIS_ORDERS[1:])\n d[self._info_axis_name] = result_index\n result = self._constructor(new_values, **d)\n return result\n else:\n return self._get_item_cache(key)\n\n def _init_matrix(self, data, axes, dtype=None, copy=False):\n values = self._prep_ndarray(self, data, copy=copy)\n\n if dtype is not None:\n try:\n values = values.astype(dtype)\n except Exception:\n raise ValueError('failed to cast to %s' % dtype)\n\n shape = values.shape\n fixed_axes = []\n for i, ax in enumerate(axes):\n if ax is None:\n ax = _default_index(shape[i])\n else:\n ax = _ensure_index(ax)\n fixed_axes.append(ax)\n\n return create_block_manager_from_blocks([values], fixed_axes)\n\n # ----------------------------------------------------------------------\n # Comparison methods\n\n def _compare_constructor(self, other, func):\n if not self._indexed_same(other):\n raise Exception('Can only compare identically-labeled '\n 'same type objects')\n\n new_data = {}\n for col in self._info_axis:\n new_data[col] = func(self[col], other[col])\n\n d = self._construct_axes_dict(copy=False)\n return self._constructor(data=new_data, **d)\n\n # ----------------------------------------------------------------------\n # Magic methods\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular Panel\n\n Invoked by unicode(df) in py2 only.\n Yields a Unicode String in both py2/py3.\n \"\"\"\n\n class_name = str(self.__class__)\n\n shape = self.shape\n dims = u('Dimensions: %s') % ' x '.join(\n [\"%d (%s)\" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])\n\n def axis_pretty(a):\n v = getattr(self, a)\n if len(v) > 0:\n return u('%s axis: %s to %s') % (a.capitalize(),\n pprint_thing(v[0]),\n pprint_thing(v[-1]))\n else:\n return u('%s axis: None') % a.capitalize()\n\n output = '\\n'.join(\n [class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])\n return output\n\n def _get_plane_axes_index(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes indexes\n \"\"\"\n axis_name = self._get_axis_name(axis)\n\n if axis_name == 'major_axis':\n index = 'minor_axis'\n columns = 'items'\n if axis_name == 'minor_axis':\n index = 'major_axis'\n columns = 'items'\n elif axis_name == 'items':\n index = 'major_axis'\n columns = 'minor_axis'\n\n return index, columns\n\n def _get_plane_axes(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes\n \"\"\"\n return [self._get_axis(axi)\n for axi in self._get_plane_axes_index(axis)]\n\n fromDict = from_dict\n\n def to_sparse(self, *args, **kwargs):\n \"\"\"\n NOT IMPLEMENTED: do not call this method, as sparsifying is not\n supported for Panel objects and will raise an error.\n\n Convert to SparsePanel\n \"\"\"\n raise NotImplementedError(\"sparsifying is not supported \"\n \"for Panel objects\")\n\n def to_excel(self, path, na_rep='', engine=None, **kwargs):\n \"\"\"\n Write each DataFrame in Panel to a separate excel sheet\n\n Parameters\n ----------\n path : string or ExcelWriter object\n File path or existing ExcelWriter\n na_rep : string, default ''\n Missing data representation\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n\n Other Parameters\n ----------------\n float_format : string, default None\n Format string for floating point numbers\n cols : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : upper left cell row to dump data frame\n startcol : upper left cell column to dump data frame\n\n Notes\n -----\n Keyword arguments (and na_rep) are passed to the ``to_excel`` method\n for each DataFrame written.\n \"\"\"\n from pandas.io.excel import ExcelWriter\n\n if isinstance(path, compat.string_types):\n writer = ExcelWriter(path, engine=engine)\n else:\n writer = path\n kwargs['na_rep'] = na_rep\n\n for item, df in self.iteritems():\n name = str(item)\n df.to_excel(writer, name, **kwargs)\n writer.save()\n\n def as_matrix(self):\n self._consolidate_inplace()\n return self._data.as_matrix()\n\n # ----------------------------------------------------------------------\n # Getting and setting elements\n\n def get_value(self, *args, **kwargs):\n \"\"\"\n Quickly retrieve single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n nargs = len(args)\n nreq = self._AXIS_LEN\n\n # require an arg for each axis\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis, you gave'\n ' {0} args, but {1} are required'.format(nargs,\n nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('get_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n return lower.get_value(*args[1:], takeable=takeable)\n\n def set_value(self, *args, **kwargs):\n \"\"\"\n Quickly set single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n value : scalar\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n panel : Panel\n If label combo is contained, will be reference to calling Panel,\n otherwise a new object\n \"\"\"\n # require an arg for each axis and the value\n nargs = len(args)\n nreq = self._AXIS_LEN + 1\n\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis plus the '\n 'value provided, you gave {0} args, but {1} are '\n 'required'.format(nargs, nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('set_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n try:\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n lower.set_value(*args[1:], takeable=takeable)\n return self\n except KeyError:\n axes = self._expand_axes(args)\n d = self._construct_axes_dict_from(self, axes, copy=False)\n result = self.reindex(**d)\n args = list(args)\n likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])\n made_bigger = not np.array_equal(axes[0], self._info_axis)\n # how to make this logic simpler?\n if made_bigger:\n _possibly_cast_item(result, args[0], likely_dtype)\n\n return result.set_value(*args)\n\n def _box_item_values(self, key, values):\n if self.ndim == values.ndim:\n result = self._constructor(values)\n\n # a dup selection will yield a full ndim\n if result._get_axis(0).is_unique:\n result = result[key]\n\n return result\n\n d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])\n return self._constructor_sliced(values, **d)\n\n def __setitem__(self, key, value):\n key = com._apply_if_callable(key, self)\n shape = tuple(self.shape)\n if isinstance(value, self._constructor_sliced):\n value = value.reindex(\n **self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))\n mat = value.values\n elif isinstance(value, np.ndarray):\n if value.shape != shape[1:]:\n raise ValueError('shape of value must be {0}, shape of given '\n 'object was {1}'.format(\n shape[1:], tuple(map(int, value.shape))))\n mat = np.asarray(value)\n elif is_scalar(value):\n dtype, value = _infer_dtype_from_scalar(value)\n mat = np.empty(shape[1:], dtype=dtype)\n mat.fill(value)\n else:\n raise TypeError('Cannot set item of type: %s' % str(type(value)))\n\n mat = mat.reshape(tuple([1]) + shape[1:])\n NDFrame._set_item(self, key, mat)\n\n def _unpickle_panel_compat(self, state): # pragma: no cover\n \"Unpickle the panel\"\n _unpickle = com._unpickle_array\n vals, items, major, minor = state\n\n items = _unpickle(items)\n major = _unpickle(major)\n minor = _unpickle(minor)\n values = _unpickle(vals)\n wp = Panel(values, items, major, minor)\n self._data = wp._data\n\n def conform(self, frame, axis='items'):\n \"\"\"\n Conform input DataFrame to align with chosen axis pair.\n\n Parameters\n ----------\n frame : DataFrame\n axis : {'items', 'major', 'minor'}\n\n Axis the input corresponds to. E.g., if axis='major', then\n the frame's columns would be items, and the index would be\n values of the minor axis\n\n Returns\n -------\n DataFrame\n \"\"\"\n axes = self._get_plane_axes(axis)\n return frame.reindex(**self._extract_axes_for_slice(self, axes))\n\n def head(self, n=5):\n raise NotImplementedError\n\n def tail(self, n=5):\n raise NotImplementedError\n\n def round(self, decimals=0, *args, **kwargs):\n \"\"\"\n Round each value in Panel to a specified number of decimal places.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n decimals : int\n Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of\n positions to the left of the decimal point.\n\n Returns\n -------\n Panel object\n\n See Also\n --------\n numpy.around\n \"\"\"\n nv.validate_round(args, kwargs)\n\n if is_integer(decimals):\n result = np.apply_along_axis(np.round, 0, self.values)\n return self._wrap_result(result, axis=0)\n raise TypeError(\"decimals must be an integer\")\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\" don't allow a multi reindex on Panel or above ndim \"\"\"\n return False\n\n def align(self, other, **kwargs):\n raise NotImplementedError\n\n def dropna(self, axis=0, how='any', inplace=False):\n \"\"\"\n Drop 2D from panel, holding passed axis constant\n\n Parameters\n ----------\n axis : int, default 0\n Axis to hold constant. E.g. axis=1 will drop major_axis entries\n having a certain amount of NA data\n how : {'all', 'any'}, default 'any'\n 'any': one or more values are NA in the DataFrame along the\n axis. For 'all' they all must be.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : Panel\n \"\"\"\n axis = self._get_axis_number(axis)\n\n values = self.values\n mask = notnull(values)\n\n for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):\n mask = mask.sum(ax)\n\n per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])\n\n if how == 'all':\n cond = mask > 0\n else:\n cond = mask == per_slice\n\n new_ax = self._get_axis(axis)[cond]\n result = self.reindex_axis(new_ax, axis=axis)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _combine(self, other, func, axis=0):\n if isinstance(other, Panel):\n return self._combine_panel(other, func)\n elif isinstance(other, DataFrame):\n return self._combine_frame(other, func, axis=axis)\n elif is_scalar(other):\n return self._combine_const(other, func)\n else:\n raise NotImplementedError(\"%s is not supported in combine \"\n \"operation with %s\" %\n (str(type(other)), str(type(self))))\n\n def _combine_const(self, other, func):\n with np.errstate(all='ignore'):\n new_values = func(self.values, other)\n d = self._construct_axes_dict()\n return self._constructor(new_values, **d)\n\n def _combine_frame(self, other, func, axis=0):\n index, columns = self._get_plane_axes(axis)\n axis = self._get_axis_number(axis)\n\n other = other.reindex(index=index, columns=columns)\n\n with np.errstate(all='ignore'):\n if axis == 0:\n new_values = func(self.values, other.values)\n elif axis == 1:\n new_values = func(self.values.swapaxes(0, 1), other.values.T)\n new_values = new_values.swapaxes(0, 1)\n elif axis == 2:\n new_values = func(self.values.swapaxes(0, 2), other.values)\n new_values = new_values.swapaxes(0, 2)\n\n return self._constructor(new_values, self.items, self.major_axis,\n self.minor_axis)\n\n def _combine_panel(self, other, func):\n items = self.items.union(other.items)\n major = self.major_axis.union(other.major_axis)\n minor = self.minor_axis.union(other.minor_axis)\n\n # could check that everything's the same size, but forget it\n this = self.reindex(items=items, major=major, minor=minor)\n other = other.reindex(items=items, major=major, minor=minor)\n\n with np.errstate(all='ignore'):\n result_values = func(this.values, other.values)\n\n return self._constructor(result_values, items, major, minor)\n\n def major_xs(self, key):\n \"\"\"\n Return slice of panel along major axis\n\n Parameters\n ----------\n key : object\n Major axis label\n\n Returns\n -------\n y : DataFrame\n index -> minor axis, columns -> items\n\n Notes\n -----\n major_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or\n levels and is a superset of major_xs functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n return self.xs(key, axis=self._AXIS_LEN - 2)\n\n def minor_xs(self, key):\n \"\"\"\n Return slice of panel along minor axis\n\n Parameters\n ----------\n key : object\n Minor axis label\n\n Returns\n -------\n y : DataFrame\n index -> major axis, columns -> items\n\n Notes\n -----\n minor_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or\n levels and is a superset of minor_xs functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n return self.xs(key, axis=self._AXIS_LEN - 1)\n\n def xs(self, key, axis=1):\n \"\"\"\n Return slice of panel along selected axis\n\n Parameters\n ----------\n key : object\n Label\n axis : {'items', 'major', 'minor}, default 1/'major'\n\n Returns\n -------\n y : ndim(self)-1\n\n Notes\n -----\n xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or\n levels and is a superset of xs functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n axis = self._get_axis_number(axis)\n if axis == 0:\n return self[key]\n\n self._consolidate_inplace()\n axis_number = self._get_axis_number(axis)\n new_data = self._data.xs(key, axis=axis_number, copy=False)\n result = self._construct_return_type(new_data)\n copy = new_data.is_mixed_type\n result._set_is_copy(self, copy=copy)\n return result\n\n _xs = xs\n\n def _ixs(self, i, axis=0):\n \"\"\"\n i : int, slice, or sequence of integers\n axis : int\n \"\"\"\n\n ax = self._get_axis(axis)\n key = ax[i]\n\n # xs cannot handle a non-scalar key, so just reindex here\n # if we have a multi-index and a single tuple, then its a reduction\n # (GH 7516)\n if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):\n if is_list_like(key):\n indexer = {self._get_axis_name(axis): key}\n return self.reindex(**indexer)\n\n # a reduction\n if axis == 0:\n values = self._data.iget(i)\n return self._box_item_values(key, values)\n\n # xs by position\n self._consolidate_inplace()\n new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)\n return self._construct_return_type(new_data)\n\n def groupby(self, function, axis='major'):\n \"\"\"\n Group data on given axis, returning GroupBy object\n\n Parameters\n ----------\n function : callable\n Mapping function for chosen access\n axis : {'major', 'minor', 'items'}, default 'major'\n\n Returns\n -------\n grouped : PanelGroupBy\n \"\"\"\n from pandas.core.groupby import PanelGroupBy\n axis = self._get_axis_number(axis)\n return PanelGroupBy(self, function, axis=axis)\n\n def to_frame(self, filter_observations=True):\n \"\"\"\n Transform wide format into long (stacked) format as DataFrame whose\n columns are the Panel's items and whose index is a MultiIndex formed\n of the Panel's major and minor axes.\n\n Parameters\n ----------\n filter_observations : boolean, default True\n Drop (major, minor) pairs without a complete set of observations\n across all the items\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n _, N, K = self.shape\n\n if filter_observations:\n # shaped like the return DataFrame\n mask = notnull(self.values).all(axis=0)\n # size = mask.sum()\n selector = mask.ravel()\n else:\n # size = N * K\n selector = slice(None, None)\n\n data = {}\n for item in self.items:\n data[item] = self[item].values.ravel()[selector]\n\n def construct_multi_parts(idx, n_repeat, n_shuffle=1):\n axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)\n labels = [x[selector] for x in axis_idx.labels]\n levels = axis_idx.levels\n names = axis_idx.names\n return labels, levels, names\n\n def construct_index_parts(idx, major=True):\n levels = [idx]\n if major:\n labels = [np.arange(N).repeat(K)[selector]]\n names = idx.name or 'major'\n else:\n labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]\n labels = [labels.ravel()[selector]]\n names = idx.name or 'minor'\n names = [names]\n return labels, levels, names\n\n if isinstance(self.major_axis, MultiIndex):\n major_labels, major_levels, major_names = construct_multi_parts(\n self.major_axis, n_repeat=K)\n else:\n major_labels, major_levels, major_names = construct_index_parts(\n self.major_axis)\n\n if isinstance(self.minor_axis, MultiIndex):\n minor_labels, minor_levels, minor_names = construct_multi_parts(\n self.minor_axis, n_repeat=N, n_shuffle=K)\n else:\n minor_labels, minor_levels, minor_names = construct_index_parts(\n self.minor_axis, major=False)\n\n levels = major_levels + minor_levels\n labels = major_labels + minor_labels\n names = major_names + minor_names\n\n index = MultiIndex(levels=levels, labels=labels, names=names,\n verify_integrity=False)\n\n return DataFrame(data, index=index, columns=self.items)\n\n to_long = deprecate('to_long', to_frame)\n toLong = deprecate('toLong', to_frame)\n\n def apply(self, func, axis='major', **kwargs):\n \"\"\"\n Applies function along axis (or axes) of the Panel\n\n Parameters\n ----------\n func : function\n Function to apply to each combination of 'other' axes\n e.g. if axis = 'items', the combination of major_axis/minor_axis\n will each be passed as a Series; if axis = ('items', 'major'),\n DataFrames of items & major axis will be passed\n axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two\n axes\n Additional keyword arguments will be passed as keywords to the function\n\n Examples\n --------\n\n Returns a Panel with the square root of each element\n\n >>> p = pd.Panel(np.random.rand(4,3,2))\n >>> p.apply(np.sqrt)\n\n Equivalent to p.sum(1), returning a DataFrame\n\n >>> p.apply(lambda x: x.sum(), axis=1)\n\n Equivalent to previous:\n\n >>> p.apply(lambda x: x.sum(), axis='minor')\n\n Return the shapes of each DataFrame over axis 2 (i.e the shapes of\n items x major), as a Series\n\n >>> p.apply(lambda x: x.shape, axis=(0,1))\n\n Returns\n -------\n result : Panel, DataFrame, or Series\n \"\"\"\n\n if kwargs and not isinstance(func, np.ufunc):\n f = lambda x: func(x, **kwargs)\n else:\n f = func\n\n # 2d-slabs\n if isinstance(axis, (tuple, list)) and len(axis) == 2:\n return self._apply_2d(f, axis=axis)\n\n axis = self._get_axis_number(axis)\n\n # try ufunc like\n if isinstance(f, np.ufunc):\n try:\n with np.errstate(all='ignore'):\n result = np.apply_along_axis(func, axis, self.values)\n return self._wrap_result(result, axis=axis)\n except (AttributeError):\n pass\n\n # 1d\n return self._apply_1d(f, axis=axis)\n\n def _apply_1d(self, func, axis):\n\n axis_name = self._get_axis_name(axis)\n ndim = self.ndim\n values = self.values\n\n # iter thru the axes\n slice_axis = self._get_axis(axis)\n slice_indexer = [0] * (ndim - 1)\n indexer = np.zeros(ndim, 'O')\n indlist = list(range(ndim))\n indlist.remove(axis)\n indexer[axis] = slice(None, None)\n indexer.put(indlist, slice_indexer)\n planes = [self._get_axis(axi) for axi in indlist]\n shape = np.array(self.shape).take(indlist)\n\n # all the iteration points\n points = cartesian_product(planes)\n\n results = []\n for i in range(np.prod(shape)):\n\n # construct the object\n pts = tuple([p[i] for p in points])\n indexer.put(indlist, slice_indexer)\n\n obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)\n result = func(obj)\n\n results.append(result)\n\n # increment the indexer\n slice_indexer[-1] += 1\n n = -1\n while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):\n slice_indexer[n - 1] += 1\n slice_indexer[n] = 0\n n -= 1\n\n # empty object\n if not len(results):\n return self._constructor(**self._construct_axes_dict())\n\n # same ndim as current\n if isinstance(results[0], Series):\n arr = np.vstack([r.values for r in results])\n arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))\n tranp = np.array([axis] + indlist).argsort()\n arr = arr.transpose(tuple(list(tranp)))\n return self._constructor(arr, **self._construct_axes_dict())\n\n # ndim-1 shape\n results = np.array(results).reshape(shape)\n if results.ndim == 2 and axis_name != self._info_axis_name:\n results = results.T\n planes = planes[::-1]\n return self._construct_return_type(results, planes)\n\n def _apply_2d(self, func, axis):\n \"\"\" handle 2-d slices, equiv to iterating over the other axis \"\"\"\n\n ndim = self.ndim\n axis = [self._get_axis_number(a) for a in axis]\n\n # construct slabs, in 2-d this is a DataFrame result\n indexer_axis = list(range(ndim))\n for a in axis:\n indexer_axis.remove(a)\n indexer_axis = indexer_axis[0]\n\n slicer = [slice(None, None)] * ndim\n ax = self._get_axis(indexer_axis)\n\n results = []\n for i, e in enumerate(ax):\n slicer[indexer_axis] = i\n sliced = self.iloc[tuple(slicer)]\n\n obj = func(sliced)\n results.append((e, obj))\n\n return self._construct_return_type(dict(results))\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n if numeric_only:\n raise NotImplementedError('Panel.{0} does not implement '\n 'numeric_only.'.format(name))\n\n axis_name = self._get_axis_name(axis)\n axis_number = self._get_axis_number(axis_name)\n f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)\n\n with np.errstate(all='ignore'):\n result = f(self.values)\n\n axes = self._get_plane_axes(axis_name)\n if result.ndim == 2 and axis_name != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n def _construct_return_type(self, result, axes=None):\n \"\"\" return the type for the ndim of the result \"\"\"\n ndim = getattr(result, 'ndim', None)\n\n # need to assume they are the same\n if ndim is None:\n if isinstance(result, dict):\n ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)\n\n # have a dict, so top-level is +1 dim\n if ndim != 0:\n ndim += 1\n\n # scalar\n if ndim == 0:\n return Series(result)\n\n # same as self\n elif self.ndim == ndim:\n # return the construction dictionary for these axes\n if axes is None:\n return self._constructor(result)\n return self._constructor(result, **self._construct_axes_dict())\n\n # sliced\n elif self.ndim == ndim + 1:\n if axes is None:\n return self._constructor_sliced(result)\n return self._constructor_sliced(\n result, **self._extract_axes_for_slice(self, axes))\n\n raise PandasError('invalid _construct_return_type [self->%s] '\n '[result->%s]' % (self, result))\n\n def _wrap_result(self, result, axis):\n axis = self._get_axis_name(axis)\n axes = self._get_plane_axes(axis)\n if result.ndim == 2 and axis != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n @Appender(_shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None else\n kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None else\n kwargs.pop('minor', None))\n return super(Panel, self).reindex(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None else\n kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None else\n kwargs.pop('minor', None))\n return super(Panel, self).rename(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=np.nan):\n return super(Panel, self).reindex_axis(labels=labels, axis=axis,\n method=method, level=level,\n copy=copy, limit=limit,\n fill_value=fill_value)\n\n @Appender(_shared_docs['transpose'] % _shared_doc_kwargs)\n def transpose(self, *args, **kwargs):\n # check if a list of axes was passed in instead as a\n # single *args element\n if (len(args) == 1 and hasattr(args[0], '__iter__') and\n not is_string_like(args[0])):\n axes = args[0]\n else:\n axes = args\n\n if 'axes' in kwargs and axes:\n raise TypeError(\"transpose() got multiple values for \"\n \"keyword argument 'axes'\")\n elif not axes:\n axes = kwargs.pop('axes', ())\n\n return super(Panel, self).transpose(*axes, **kwargs)\n\n @Appender(_shared_docs['fillna'] % _shared_doc_kwargs)\n def fillna(self, value=None, method=None, axis=None, inplace=False,\n limit=None, downcast=None, **kwargs):\n return super(Panel, self).fillna(value=value, method=method, axis=axis,\n inplace=inplace, limit=limit,\n downcast=downcast, **kwargs)\n\n def count(self, axis='major'):\n \"\"\"\n Return number of observations over requested axis.\n\n Parameters\n ----------\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n count : DataFrame\n \"\"\"\n i = self._get_axis_number(axis)\n\n values = self.values\n mask = np.isfinite(values)\n result = mask.sum(axis=i, dtype='int64')\n\n return self._wrap_result(result, axis)\n\n def shift(self, periods=1, freq=None, axis='major'):\n \"\"\"\n Shift index by desired number of periods with an optional time freq.\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original. This is different\n from the behavior of DataFrame.shift()\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n freq : DateOffset, timedelta, or time rule string, optional\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n shifted : Panel\n \"\"\"\n if freq:\n return self.tshift(periods, freq, axis=axis)\n\n return super(Panel, self).slice_shift(periods, axis=axis)\n\n def tshift(self, periods=1, freq=None, axis='major'):\n return super(Panel, self).tshift(periods, freq, axis)\n\n def join(self, other, how='left', lsuffix='', rsuffix=''):\n \"\"\"\n Join items with other Panel either on major and minor axes column\n\n Parameters\n ----------\n other : Panel or list of Panels\n Index should be similar to one of the columns in this one\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n\n Returns\n -------\n joined : Panel\n \"\"\"\n from pandas.tools.merge import concat\n\n if isinstance(other, Panel):\n join_major, join_minor = self._get_join_index(other, how)\n this = self.reindex(major=join_major, minor=join_minor)\n other = other.reindex(major=join_major, minor=join_minor)\n merged_data = this._data.merge(other._data, lsuffix, rsuffix)\n return self._constructor(merged_data)\n else:\n if lsuffix or rsuffix:\n raise ValueError('Suffixes not supported when passing '\n 'multiple panels')\n\n if how == 'left':\n how = 'outer'\n join_axes = [self.major_axis, self.minor_axis]\n elif how == 'right':\n raise ValueError('Right join not supported with multiple '\n 'panels')\n else:\n join_axes = None\n\n return concat([self] + list(other), axis=0, join=how,\n join_axes=join_axes, verify_integrity=True)\n\n def update(self, other, join='left', overwrite=True, filter_func=None,\n raise_conflict=False):\n \"\"\"\n Modify Panel in place using non-NA values from passed\n Panel, or object coercible to Panel. Aligns on items\n\n Parameters\n ----------\n other : Panel, or object coercible to Panel\n join : How to join individual DataFrames\n {'left', 'right', 'outer', 'inner'}, default 'left'\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling panel\n filter_func : callable(1d-array) -> 1d-array<boolean>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated\n raise_conflict : bool\n If True, will raise an error if a DataFrame and other both\n contain data in the same place.\n \"\"\"\n\n if not isinstance(other, self._constructor):\n other = self._constructor(other)\n\n axis_name = self._info_axis_name\n axis_values = self._info_axis\n other = other.reindex(**{axis_name: axis_values})\n\n for frame in axis_values:\n self[frame].update(other[frame], join, overwrite, filter_func,\n raise_conflict)\n\n def _get_join_index(self, other, how):\n if how == 'left':\n join_major, join_minor = self.major_axis, self.minor_axis\n elif how == 'right':\n join_major, join_minor = other.major_axis, other.minor_axis\n elif how == 'inner':\n join_major = self.major_axis.intersection(other.major_axis)\n join_minor = self.minor_axis.intersection(other.minor_axis)\n elif how == 'outer':\n join_major = self.major_axis.union(other.major_axis)\n join_minor = self.minor_axis.union(other.minor_axis)\n return join_major, join_minor\n\n # miscellaneous data creation\n @staticmethod\n def _extract_axes(self, data, axes, **kwargs):\n \"\"\" return a list of the axis indicies \"\"\"\n return [self._extract_axis(self, data, axis=i, **kwargs)\n for i, a in enumerate(axes)]\n\n @staticmethod\n def _extract_axes_for_slice(self, axes):\n \"\"\" return the slice dictionary for these axes \"\"\"\n return dict([(self._AXIS_SLICEMAP[i], a)\n for i, a in zip(\n self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],\n axes)])\n\n @staticmethod\n def _prep_ndarray(self, values, copy=True):\n if not isinstance(values, np.ndarray):\n values = np.asarray(values)\n # NumPy strings are a pain, convert to object\n if issubclass(values.dtype.type, compat.string_types):\n values = np.array(values, dtype=object, copy=True)\n else:\n if copy:\n values = values.copy()\n if values.ndim != self._AXIS_LEN:\n raise ValueError(\"The number of dimensions required is {0}, \"\n \"but the number of dimensions of the \"\n \"ndarray given was {1}\".format(self._AXIS_LEN,\n values.ndim))\n return values\n\n @staticmethod\n def _homogenize_dict(self, frames, intersect=True, dtype=None):\n \"\"\"\n Conform set of _constructor_sliced-like objects to either\n an intersection of indices / columns or a union.\n\n Parameters\n ----------\n frames : dict\n intersect : boolean, default True\n\n Returns\n -------\n dict of aligned results & indicies\n \"\"\"\n\n result = dict()\n # caller differs dict/ODict, presered type\n if isinstance(frames, OrderedDict):\n result = OrderedDict()\n\n adj_frames = OrderedDict()\n for k, v in compat.iteritems(frames):\n if isinstance(v, dict):\n adj_frames[k] = self._constructor_sliced(v)\n else:\n adj_frames[k] = v\n\n axes = self._AXIS_ORDERS[1:]\n axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(\n self, adj_frames, axes, intersect=intersect))])\n\n reindex_dict = dict(\n [(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])\n reindex_dict['copy'] = False\n for key, frame in compat.iteritems(adj_frames):\n if frame is not None:\n result[key] = frame.reindex(**reindex_dict)\n else:\n result[key] = None\n\n axes_dict['data'] = result\n axes_dict['dtype'] = dtype\n return axes_dict\n\n @staticmethod\n def _extract_axis(self, data, axis=0, intersect=False):\n\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_frames = False\n\n for v in data.values():\n if isinstance(v, self._constructor_sliced):\n have_frames = True\n indexes.append(v._get_axis(axis))\n elif v is not None:\n have_raw_arrays = True\n raw_lengths.append(v.shape[axis])\n\n if have_frames:\n index = _get_combined_index(indexes, intersect=intersect)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError('ndarrays must match shape on axis %d' % axis)\n\n if have_frames:\n if lengths[0] != len(index):\n raise AssertionError('Length of data and index must match')\n else:\n index = Index(np.arange(lengths[0]))\n\n if index is None:\n index = Index([])\n\n return _ensure_index(index)\n\n @classmethod\n def _add_aggregate_operations(cls, use_numexpr=True):\n \"\"\" add the operations to the cls; evaluate the doc strings again \"\"\"\n\n # doc strings substitors\n _agg_doc = \"\"\"\nWrapper method for %%s\n\nParameters\n----------\nother : %s or %s\"\"\" % (cls._constructor_sliced.__name__, cls.__name__) + \"\"\"\naxis : {\"\"\" + ', '.join(cls._AXIS_ORDERS) + \"}\" + \"\"\"\n Axis to broadcast over\n\nReturns\n-------\n\"\"\" + cls.__name__ + \"\\n\"\n\n def _panel_arith_method(op, name, str_rep=None, default_axis=None,\n fill_zeros=None, **eval_kwargs):\n def na_op(x, y):\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n raise_on_error=True,\n **eval_kwargs)\n except TypeError:\n result = op(x, y)\n\n # handles discrepancy between numpy and numexpr on division/mod\n # by 0 though, given that these are generally (always?)\n # non-scalars, I'm not sure whether it's worth it at the moment\n result = missing.fill_zeros(result, x, y, name, fill_zeros)\n return result\n\n if name in _op_descriptions:\n op_name = name.replace('__', '')\n op_desc = _op_descriptions[op_name]\n if op_desc['reversed']:\n equiv = 'other ' + op_desc['op'] + ' panel'\n else:\n equiv = 'panel ' + op_desc['op'] + ' other'\n\n _op_doc = \"\"\"\n %%s of series and other, element-wise (binary operator `%%s`).\n Equivalent to ``%%s``.\n\n Parameters\n ----------\n other : %s or %s\"\"\" % (cls._constructor_sliced.__name__,\n cls.__name__) + \"\"\"\n axis : {\"\"\" + ', '.join(cls._AXIS_ORDERS) + \"}\" + \"\"\"\n Axis to broadcast over\n\n Returns\n -------\n \"\"\" + cls.__name__ + \"\"\"\n\n See also\n --------\n \"\"\" + cls.__name__ + \".%s\\n\"\n doc = _op_doc % (op_desc['desc'], op_name, equiv,\n op_desc['reverse'])\n else:\n doc = _agg_doc % name\n\n @Appender(doc)\n def f(self, other, axis=0):\n return self._combine(other, na_op, axis=axis)\n\n f.__name__ = name\n return f\n\n # add `div`, `mul`, `pow`, etc..\n ops.add_flex_arithmetic_methods(\n cls, _panel_arith_method, use_numexpr=use_numexpr,\n flex_comp_method=ops._comp_method_PANEL)\n\n\nPanel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,\n stat_axis=1, aliases={'major': 'major_axis',\n 'minor': 'minor_axis'},\n slicers={'major_axis': 'index',\n 'minor_axis': 'columns'})\n\nops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)\nPanel._add_aggregate_operations()\nPanel._add_numeric_operations()\n\n\n# legacy\nclass WidePanel(Panel):\n def __init__(self, *args, **kwargs):\n # deprecation, #10892\n warnings.warn(\"WidePanel is deprecated. Please use Panel\",\n FutureWarning, stacklevel=2)\n\n super(WidePanel, self).__init__(*args, **kwargs)\n\n\nclass LongPanel(DataFrame):\n def __init__(self, *args, **kwargs):\n # deprecation, #10892\n warnings.warn(\"LongPanel is deprecated. Please use DataFrame\",\n FutureWarning, stacklevel=2)\n\n super(LongPanel, self).__init__(*args, **kwargs)\n",
"from __future__ import division, absolute_import, print_function\n\nimport itertools\n\nimport numpy as np\nfrom numpy import exp\nfrom numpy.testing import assert_, assert_equal\n\nfrom scipy.optimize import root\n\n\ndef test_performance():\n # Compare performance results to those listed in\n # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]\n # and\n # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].\n # and those produced by dfsane.f from M. Raydan's website.\n #\n # Where the results disagree, the largest limits are taken.\n\n e_a = 1e-5\n e_r = 1e-4\n\n table_1 = [\n dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),\n dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),\n dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),\n dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),\n # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors\n dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3\n dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?\n dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?\n dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?\n dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),\n dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12\n ]\n\n # Check also scaling invariance\n for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],\n ['cruz', 'cheng']):\n for problem in table_1:\n n = problem['n']\n func = lambda x, n: yscale*problem['F'](x/xscale, n)\n args = (n,)\n x0 = problem['x0'](n) * xscale\n\n fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))\n\n sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)\n sigma_0 = xscale/yscale\n\n with np.errstate(over='ignore'):\n sol = root(func, x0, args=args,\n options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,\n sigma_0=sigma_0, sigma_eps=sigma_eps,\n line_search=line_search),\n method='DF-SANE')\n\n err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),\n fatol, sol.success, sol.nit, sol.nfev])\n assert_(sol.success, err_msg)\n assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval\n assert_(sol.nit <= problem['nit'], err_msg)\n assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)\n\n\ndef test_complex():\n def func(z):\n return z**2 - 1 + 2j\n x0 = 2.0j\n\n ftol = 1e-4\n sol = root(func, x0, tol=ftol, method='DF-SANE')\n\n assert_(sol.success)\n\n f0 = np.linalg.norm(func(x0))\n fx = np.linalg.norm(func(sol.x))\n assert_(fx <= ftol*f0)\n\n\ndef test_linear_definite():\n # The DF-SANE paper proves convergence for \"strongly isolated\"\n # solutions.\n #\n # For linear systems F(x) = A x - b = 0, with A positive or\n # negative definite, the solution is strongly isolated.\n\n def check_solvability(A, b, line_search='cruz'):\n func = lambda x: A.dot(x) - b\n xp = np.linalg.solve(A, b)\n eps = np.linalg.norm(func(xp)) * 1e3\n sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),\n method='DF-SANE')\n assert_(sol.success)\n assert_(np.linalg.norm(func(sol.x)) <= eps)\n\n n = 90\n\n # Test linear pos.def. system\n np.random.seed(1234)\n A = np.arange(n*n).reshape(n, n)\n A = A + n*n * np.diag(1 + np.arange(n))\n assert_(np.linalg.eigvals(A).min() > 0)\n b = np.arange(n) * 1.0\n check_solvability(A, b, 'cruz')\n check_solvability(A, b, 'cheng')\n\n # Test linear neg.def. system\n check_solvability(-A, b, 'cruz')\n check_solvability(-A, b, 'cheng')\n\n\ndef test_shape():\n def f(x, arg):\n return x - arg\n\n for dt in [float, complex]:\n x = np.zeros([2,2])\n arg = np.ones([2,2], dtype=dt)\n\n sol = root(f, x, args=(arg,), method='DF-SANE')\n assert_(sol.success)\n assert_equal(sol.x.shape, x.shape)\n\n\n# Some of the test functions and initial guesses listed in\n# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]\n\ndef F_1(x, n):\n g = np.zeros([n])\n i = np.arange(2, n+1)\n g[0] = exp(x[0] - 1) - 1\n g[1:] = i*(exp(x[1:] - 1) - x[1:])\n return g\n\ndef x0_1(n):\n x0 = np.empty([n])\n x0.fill(n/(n-1))\n return x0\n\ndef F_2(x, n):\n g = np.zeros([n])\n i = np.arange(2, n+1)\n g[0] = exp(x[0]) - 1\n g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)\n return g\n\ndef x0_2(n):\n x0 = np.empty([n])\n x0.fill(1/n**2)\n return x0\n\ndef F_4(x, n):\n assert_equal(n % 3, 0)\n g = np.zeros([n])\n # Note: the first line is typoed in some of the references;\n # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]\n g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8\n g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16\n g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3\n return g\n\ndef x0_4(n):\n assert_equal(n % 3, 0)\n x0 = np.array([-1, 1/2, -1] * (n//3))\n return x0\n\ndef F_6(x, n):\n c = 0.9\n mu = (np.arange(1, n+1) - 0.5)/n\n return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))\n\ndef x0_6(n):\n return np.ones([n])\n\ndef F_7(x, n):\n assert_equal(n % 3, 0)\n\n def phi(t):\n v = 0.5*t - 2\n v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]\n v[t >= 2] = (0.5*t + 2)[t >= 2]\n return v\n g = np.zeros([n])\n g[::3] = 1e4 * x[1::3]**2 - 1\n g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001\n g[2::3] = phi(x[2::3])\n return g\n\ndef x0_7(n):\n assert_equal(n % 3, 0)\n return np.array([1e-3, 18, 1] * (n//3))\n\ndef F_9(x, n):\n g = np.zeros([n])\n i = np.arange(2, n)\n g[0] = x[0]**3/3 + x[1]**2/2\n g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2\n g[-1] = -x[-1]**2/2 + n*x[-1]**3/3\n return g\n\ndef x0_9(n):\n return np.ones([n])\n\ndef F_10(x, n):\n return np.log(1 + x) - x/n\n\ndef x0_10(n):\n return np.ones([n])\n",
"from pandas.core.base import StringMixin\nfrom pandas.compat import StringIO, range\n\nimport numpy as np\n\nfrom pandas.core.api import Series, DataFrame\nimport pandas.stats.common as common\nfrom pandas.util.decorators import cache_readonly\n\n# flake8: noqa\n\ndef fama_macbeth(**kwargs):\n \"\"\"Runs Fama-MacBeth regression.\n\n Parameters\n ----------\n Takes the same arguments as a panel OLS, in addition to:\n\n nw_lags_beta: int\n Newey-West adjusts the betas by the given lags\n \"\"\"\n window_type = kwargs.get('window_type')\n if window_type is None:\n klass = FamaMacBeth\n else:\n klass = MovingFamaMacBeth\n\n return klass(**kwargs)\n\n\nclass FamaMacBeth(StringMixin):\n\n def __init__(self, y, x, intercept=True, nw_lags=None,\n nw_lags_beta=None,\n entity_effects=False, time_effects=False, x_effects=None,\n cluster=None, dropped_dummies=None, verbose=False):\n import warnings\n warnings.warn(\"The pandas.stats.fama_macbeth module is deprecated and will be \"\n \"removed in a future version. We refer to external packages \"\n \"like statsmodels, see here: \"\n \"http://www.statsmodels.org/stable/index.html\",\n FutureWarning, stacklevel=4)\n\n if dropped_dummies is None:\n dropped_dummies = {}\n self._nw_lags_beta = nw_lags_beta\n\n from pandas.stats.plm import MovingPanelOLS\n self._ols_result = MovingPanelOLS(\n y=y, x=x, window_type='rolling', window=1,\n intercept=intercept,\n nw_lags=nw_lags, entity_effects=entity_effects,\n time_effects=time_effects, x_effects=x_effects, cluster=cluster,\n dropped_dummies=dropped_dummies, verbose=verbose)\n\n self._cols = self._ols_result._x.columns\n\n @cache_readonly\n def _beta_raw(self):\n return self._ols_result._beta_raw\n\n @cache_readonly\n def _stats(self):\n return _calc_t_stat(self._beta_raw, self._nw_lags_beta)\n\n @cache_readonly\n def _mean_beta_raw(self):\n return self._stats[0]\n\n @cache_readonly\n def _std_beta_raw(self):\n return self._stats[1]\n\n @cache_readonly\n def _t_stat_raw(self):\n return self._stats[2]\n\n def _make_result(self, result):\n return Series(result, index=self._cols)\n\n @cache_readonly\n def mean_beta(self):\n return self._make_result(self._mean_beta_raw)\n\n @cache_readonly\n def std_beta(self):\n return self._make_result(self._std_beta_raw)\n\n @cache_readonly\n def t_stat(self):\n return self._make_result(self._t_stat_raw)\n\n @cache_readonly\n def _results(self):\n return {\n 'mean_beta': self._mean_beta_raw,\n 'std_beta': self._std_beta_raw,\n 't_stat': self._t_stat_raw,\n }\n\n @cache_readonly\n def _coef_table(self):\n buffer = StringIO()\n buffer.write('%13s %13s %13s %13s %13s %13s\\n' %\n ('Variable', 'Beta', 'Std Err', 't-stat', 'CI 2.5%', 'CI 97.5%'))\n template = '%13s %13.4f %13.4f %13.2f %13.4f %13.4f\\n'\n\n for i, name in enumerate(self._cols):\n if i and not (i % 5):\n buffer.write('\\n' + common.banner(''))\n\n mean_beta = self._results['mean_beta'][i]\n std_beta = self._results['std_beta'][i]\n t_stat = self._results['t_stat'][i]\n ci1 = mean_beta - 1.96 * std_beta\n ci2 = mean_beta + 1.96 * std_beta\n\n values = '(%s)' % name, mean_beta, std_beta, t_stat, ci1, ci2\n\n buffer.write(template % values)\n\n if self._nw_lags_beta is not None:\n buffer.write('\\n')\n buffer.write('*** The Std Err, t-stat are Newey-West '\n 'adjusted with Lags %5d\\n' % self._nw_lags_beta)\n\n return buffer.getvalue()\n\n def __unicode__(self):\n return self.summary\n\n @cache_readonly\n def summary(self):\n template = \"\"\"\n----------------------Summary of Fama-MacBeth Analysis-------------------------\n\nFormula: Y ~ %(formulaRHS)s\n# betas : %(nu)3d\n\n----------------------Summary of Estimated Coefficients------------------------\n%(coefTable)s\n--------------------------------End of Summary---------------------------------\n\"\"\"\n params = {\n 'formulaRHS': ' + '.join(self._cols),\n 'nu': len(self._beta_raw),\n 'coefTable': self._coef_table,\n }\n\n return template % params\n\n\nclass MovingFamaMacBeth(FamaMacBeth):\n\n def __init__(self, y, x, window_type='rolling', window=10,\n intercept=True, nw_lags=None, nw_lags_beta=None,\n entity_effects=False, time_effects=False, x_effects=None,\n cluster=None, dropped_dummies=None, verbose=False):\n if dropped_dummies is None:\n dropped_dummies = {}\n self._window_type = common._get_window_type(window_type)\n self._window = window\n\n FamaMacBeth.__init__(\n self, y=y, x=x, intercept=intercept,\n nw_lags=nw_lags, nw_lags_beta=nw_lags_beta,\n entity_effects=entity_effects, time_effects=time_effects,\n x_effects=x_effects, cluster=cluster,\n dropped_dummies=dropped_dummies, verbose=verbose)\n\n self._index = self._ols_result._index\n self._T = len(self._index)\n\n @property\n def _is_rolling(self):\n return self._window_type == 'rolling'\n\n def _calc_stats(self):\n mean_betas = []\n std_betas = []\n t_stats = []\n\n # XXX\n\n mask = self._ols_result._rolling_ols_call[2]\n obs_total = mask.astype(int).cumsum()\n\n start = self._window - 1\n betas = self._beta_raw\n for i in range(start, self._T):\n if self._is_rolling:\n begin = i - start\n else:\n begin = 0\n\n B = betas[max(obs_total[begin] - 1, 0): obs_total[i]]\n mean_beta, std_beta, t_stat = _calc_t_stat(B, self._nw_lags_beta)\n mean_betas.append(mean_beta)\n std_betas.append(std_beta)\n t_stats.append(t_stat)\n\n return np.array([mean_betas, std_betas, t_stats])\n\n _stats = cache_readonly(_calc_stats)\n\n def _make_result(self, result):\n return DataFrame(result, index=self._result_index, columns=self._cols)\n\n @cache_readonly\n def _result_index(self):\n mask = self._ols_result._rolling_ols_call[2]\n # HACK XXX\n return self._index[mask.cumsum() >= self._window]\n\n @cache_readonly\n def _results(self):\n return {\n 'mean_beta': self._mean_beta_raw[-1],\n 'std_beta': self._std_beta_raw[-1],\n 't_stat': self._t_stat_raw[-1],\n }\n\n\ndef _calc_t_stat(beta, nw_lags_beta):\n N = len(beta)\n B = beta - beta.mean(0)\n C = np.dot(B.T, B) / N\n\n if nw_lags_beta is not None:\n for i in range(nw_lags_beta + 1):\n\n cov = np.dot(B[i:].T, B[:(N - i)]) / N\n weight = i / (nw_lags_beta + 1)\n C += 2 * (1 - weight) * cov\n\n mean_beta = beta.mean(0)\n std_beta = np.sqrt(np.diag(C)) / np.sqrt(N)\n t_stat = mean_beta / std_beta\n\n return mean_beta, std_beta, t_stat\n",
"from __future__ import absolute_import, division, print_function\n\nimport six\nimport warnings\n\nimport numpy as np\n\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea\nfrom matplotlib.patches import Rectangle\n\n\ndef example_plot(ax, fontsize=12):\n ax.plot([1, 2])\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)\n\n\n@image_comparison(baseline_images=['tight_layout1'])\ndef test_tight_layout1():\n 'Test tight_layout for a single subplot'\n fig = plt.figure()\n ax = fig.add_subplot(111)\n example_plot(ax, fontsize=24)\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout2'])\ndef test_tight_layout2():\n 'Test tight_layout for multiple subplots'\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n example_plot(ax4)\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout3'])\ndef test_tight_layout3():\n 'Test tight_layout for multiple subplots'\n\n fig = plt.figure()\n\n ax1 = plt.subplot(221)\n ax2 = plt.subplot(223)\n ax3 = plt.subplot(122)\n\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout4'],\n freetype_version=('2.5.5', '2.6.1'))\ndef test_tight_layout4():\n 'Test tight_layout for subplot2grid'\n\n fig = plt.figure()\n\n ax1 = plt.subplot2grid((3, 3), (0, 0))\n ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)\n ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)\n ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)\n\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n example_plot(ax4)\n\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout5'])\ndef test_tight_layout5():\n 'Test tight_layout for image'\n\n fig = plt.figure()\n\n ax = plt.subplot(111)\n arr = np.arange(100).reshape((10, 10))\n ax.imshow(arr, interpolation=\"none\")\n\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout6'])\ndef test_tight_layout6():\n 'Test tight_layout for gridspec'\n\n # This raises warnings since tight layout cannot\n # do this fully automatically. But the test is\n # correct since the layout is manually edited\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n fig = plt.figure()\n\n import matplotlib.gridspec as gridspec\n\n gs1 = gridspec.GridSpec(2, 1)\n ax1 = fig.add_subplot(gs1[0])\n ax2 = fig.add_subplot(gs1[1])\n\n example_plot(ax1)\n example_plot(ax2)\n\n gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])\n\n gs2 = gridspec.GridSpec(3, 1)\n\n for ss in gs2:\n ax = fig.add_subplot(ss)\n example_plot(ax)\n ax.set_title(\"\")\n ax.set_xlabel(\"\")\n\n ax.set_xlabel(\"x-label\", fontsize=12)\n\n gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.45)\n\n top = min(gs1.top, gs2.top)\n bottom = max(gs1.bottom, gs2.bottom)\n\n gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),\n 0.5, 1 - (gs1.top-top)])\n gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),\n None, 1 - (gs2.top-top)],\n h_pad=0.45)\n\n\n@image_comparison(baseline_images=['tight_layout7'])\ndef test_tight_layout7():\n # tight layout with left and right titles\n fig = plt.figure()\n fontsize = 24\n ax = fig.add_subplot(111)\n ax.plot([1, 2])\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Left Title', loc='left', fontsize=fontsize)\n ax.set_title('Right Title', loc='right', fontsize=fontsize)\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout8'])\ndef test_tight_layout8():\n 'Test automatic use of tight_layout'\n fig = plt.figure()\n fig.set_tight_layout({'pad': .1})\n ax = fig.add_subplot(111)\n example_plot(ax, fontsize=24)\n\n\n@image_comparison(baseline_images=['tight_layout9'])\ndef test_tight_layout9():\n # Test tight_layout for non-visible suplots\n # GH 8244\n f, axarr = plt.subplots(2, 2)\n axarr[1][1].set_visible(False)\n plt.tight_layout()\n\n\n# The following test is misleading when the text is removed.\n@image_comparison(baseline_images=['outward_ticks'], remove_text=False)\ndef test_outward_ticks():\n 'Test automatic use of tight_layout'\n fig = plt.figure()\n ax = fig.add_subplot(221)\n ax.xaxis.set_tick_params(tickdir='out', length=16, width=3)\n ax.yaxis.set_tick_params(tickdir='out', length=16, width=3)\n ax.xaxis.set_tick_params(\n tickdir='out', length=32, width=3, tick1On=True, which='minor')\n ax.yaxis.set_tick_params(\n tickdir='out', length=32, width=3, tick1On=True, which='minor')\n # The following minor ticks are not labelled, and they\n # are drawn over the major ticks and labels--ugly!\n ax.xaxis.set_ticks([0], minor=True)\n ax.yaxis.set_ticks([0], minor=True)\n ax = fig.add_subplot(222)\n ax.xaxis.set_tick_params(tickdir='in', length=32, width=3)\n ax.yaxis.set_tick_params(tickdir='in', length=32, width=3)\n ax = fig.add_subplot(223)\n ax.xaxis.set_tick_params(tickdir='inout', length=32, width=3)\n ax.yaxis.set_tick_params(tickdir='inout', length=32, width=3)\n ax = fig.add_subplot(224)\n ax.xaxis.set_tick_params(tickdir='out', length=32, width=3)\n ax.yaxis.set_tick_params(tickdir='out', length=32, width=3)\n plt.tight_layout()\n\n\ndef add_offsetboxes(ax, size=10, margin=.1, color='black'):\n \"\"\"\n Surround ax with OffsetBoxes\n \"\"\"\n m, mp = margin, 1+margin\n anchor_points = [(-m, -m), (-m, .5), (-m, mp),\n (mp, .5), (.5, mp), (mp, mp),\n (.5, -m), (mp, -m), (.5, -m)]\n for point in anchor_points:\n da = DrawingArea(size, size)\n background = Rectangle((0, 0), width=size,\n height=size,\n facecolor=color,\n edgecolor='None',\n linewidth=0,\n antialiased=False)\n da.add_artist(background)\n\n anchored_box = AnchoredOffsetbox(\n loc=10,\n child=da,\n pad=0.,\n frameon=False,\n bbox_to_anchor=point,\n bbox_transform=ax.transAxes,\n borderpad=0.)\n ax.add_artist(anchored_box)\n return anchored_box\n\n\n@image_comparison(baseline_images=['tight_layout_offsetboxes1',\n 'tight_layout_offsetboxes2'])\ndef test_tight_layout_offsetboxes():\n # 1.\n # - Create 4 subplots\n # - Plot a diagonal line on them\n # - Surround each plot with 7 boxes\n # - Use tight_layout\n # - See that the squares are included in the tight_layout\n # and that the squares in the middle do not overlap\n #\n # 2.\n # - Make the squares around the right side axes invisible\n # - See that the invisible squares do not affect the\n # tight_layout\n rows = cols = 2\n colors = ['red', 'blue', 'green', 'yellow']\n x = y = [0, 1]\n\n def _subplots():\n _, axs = plt.subplots(rows, cols)\n axs = axs.flat\n for ax, color in zip(axs, colors):\n ax.plot(x, y, color=color)\n add_offsetboxes(ax, 20, color=color)\n return axs\n\n # 1.\n axs = _subplots()\n plt.tight_layout()\n\n # 2.\n axs = _subplots()\n for ax in (axs[cols-1::rows]):\n for child in ax.get_children():\n if isinstance(child, AnchoredOffsetbox):\n child.set_visible(False)\n\n plt.tight_layout()\n\n\ndef test_empty_layout():\n \"\"\"Tests that tight layout doesn't cause an error when there are\n no axes.\n \"\"\"\n\n fig = plt.gcf()\n fig.tight_layout()\n",
"\"\"\"Lite version of scipy.linalg.\n\nNotes\n-----\nThis module is a lite version of the linalg.py module in SciPy which\ncontains high-level Python interface to the LAPACK library. The lite\nversion only accesses the following LAPACK functions: dgesv, zgesv,\ndgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,\nzgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n\n__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',\n 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',\n 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',\n 'LinAlgError', 'multi_dot']\n\nimport warnings\n\nfrom numpy.core import (\n array, asarray, zeros, empty, intc, single, double,\n csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,\n add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite, finfo, errstate, geterrobj, moveaxis, amin, amax, product,\n abs,\n atleast_2d, intp, asanyarray, object_, matmul,\n swapaxes, divide, count_nonzero\n)\nfrom numpy.core.multiarray import normalize_axis_index\nfrom numpy.lib import triu\nfrom numpy.linalg import lapack_lite, _umath_linalg\nfrom numpy.matrixlib.defmatrix import matrix_power\n\n# For Python2/3 compatibility\n_N = b'N'\n_V = b'V'\n_A = b'A'\n_S = b'S'\n_L = b'L'\n\nfortran_int = intc\n\n# Error object\nclass LinAlgError(Exception):\n \"\"\"\n Generic Python-exception-derived object raised by linalg functions.\n\n General purpose exception class, derived from Python's exception.Exception\n class, programmatically raised in linalg functions when a Linear\n Algebra-related condition would prevent further correct execution of the\n function.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> LA.inv(np.zeros((2,2)))\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"...linalg.py\", line 350,\n in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))\n File \"...linalg.py\", line 249,\n in solve\n raise LinAlgError('Singular matrix')\n numpy.linalg.LinAlgError: Singular matrix\n\n \"\"\"\n pass\n\n\ndef _determine_error_states():\n errobj = geterrobj()\n bufsize = errobj[0]\n\n with errstate(invalid='call', over='ignore',\n divide='ignore', under='ignore'):\n invalid_call_errmask = geterrobj()[1]\n\n return [bufsize, invalid_call_errmask, None]\n\n# Dealing with errors in _umath_linalg\n_linalg_error_extobj = _determine_error_states()\ndel _determine_error_states\n\ndef _raise_linalgerror_singular(err, flag):\n raise LinAlgError(\"Singular matrix\")\n\ndef _raise_linalgerror_nonposdef(err, flag):\n raise LinAlgError(\"Matrix is not positive definite\")\n\ndef _raise_linalgerror_eigenvalues_nonconvergence(err, flag):\n raise LinAlgError(\"Eigenvalues did not converge\")\n\ndef _raise_linalgerror_svd_nonconvergence(err, flag):\n raise LinAlgError(\"SVD did not converge\")\n\ndef get_linalg_error_extobj(callback):\n extobj = list(_linalg_error_extobj) # make a copy\n extobj[2] = callback\n return extobj\n\ndef _makearray(a):\n new = asarray(a)\n wrap = getattr(a, \"__array_prepare__\", new.__array_wrap__)\n return new, wrap\n\ndef isComplexType(t):\n return issubclass(t, complexfloating)\n\n_real_types_map = {single : single,\n double : double,\n csingle : single,\n cdouble : double}\n\n_complex_types_map = {single : csingle,\n double : cdouble,\n csingle : csingle,\n cdouble : cdouble}\n\ndef _realType(t, default=double):\n return _real_types_map.get(t, default)\n\ndef _complexType(t, default=cdouble):\n return _complex_types_map.get(t, default)\n\ndef _linalgRealType(t):\n \"\"\"Cast the type t to either double or cdouble.\"\"\"\n return double\n\n_complex_types_map = {single : csingle,\n double : cdouble,\n csingle : csingle,\n cdouble : cdouble}\n\ndef _commonType(*arrays):\n # in lite version, use higher precision (always double or cdouble)\n result_type = single\n is_complex = False\n for a in arrays:\n if issubclass(a.dtype.type, inexact):\n if isComplexType(a.dtype.type):\n is_complex = True\n rt = _realType(a.dtype.type, default=None)\n if rt is None:\n # unsupported inexact scalar\n raise TypeError(\"array type %s is unsupported in linalg\" %\n (a.dtype.name,))\n else:\n rt = double\n if rt is double:\n result_type = double\n if is_complex:\n t = cdouble\n result_type = _complex_types_map[result_type]\n else:\n t = double\n return t, result_type\n\n\n# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).\n\n_fastCT = fastCopyAndTranspose\n\ndef _to_native_byte_order(*arrays):\n ret = []\n for arr in arrays:\n if arr.dtype.byteorder not in ('=', '|'):\n ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))\n else:\n ret.append(arr)\n if len(ret) == 1:\n return ret[0]\n else:\n return ret\n\ndef _fastCopyAndTranspose(type, *arrays):\n cast_arrays = ()\n for a in arrays:\n if a.dtype.type is type:\n cast_arrays = cast_arrays + (_fastCT(a),)\n else:\n cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)\n if len(cast_arrays) == 1:\n return cast_arrays[0]\n else:\n return cast_arrays\n\ndef _assertRank2(*arrays):\n for a in arrays:\n if a.ndim != 2:\n raise LinAlgError('%d-dimensional array given. Array must be '\n 'two-dimensional' % a.ndim)\n\ndef _assertRankAtLeast2(*arrays):\n for a in arrays:\n if a.ndim < 2:\n raise LinAlgError('%d-dimensional array given. Array must be '\n 'at least two-dimensional' % a.ndim)\n\ndef _assertSquareness(*arrays):\n for a in arrays:\n if max(a.shape) != min(a.shape):\n raise LinAlgError('Array must be square')\n\ndef _assertNdSquareness(*arrays):\n for a in arrays:\n if max(a.shape[-2:]) != min(a.shape[-2:]):\n raise LinAlgError('Last 2 dimensions of the array must be square')\n\ndef _assertFinite(*arrays):\n for a in arrays:\n if not (isfinite(a).all()):\n raise LinAlgError(\"Array must not contain infs or NaNs\")\n\ndef _isEmpty2d(arr):\n # check size first for efficiency\n return arr.size == 0 and product(arr.shape[-2:]) == 0\n\ndef _assertNoEmpty2d(*arrays):\n for a in arrays:\n if _isEmpty2d(a):\n raise LinAlgError(\"Arrays cannot be empty\")\n\ndef transpose(a):\n \"\"\"\n Transpose each matrix in a stack of matrices.\n\n Unlike np.transpose, this only swaps the last two axes, rather than all of\n them\n\n Parameters\n ----------\n a : (...,M,N) array_like\n\n Returns\n -------\n aT : (...,N,M) ndarray\n \"\"\"\n return swapaxes(a, -1, -2)\n\n# Linear equations\n\ndef tensorsolve(a, b, axes=None):\n \"\"\"\n Solve the tensor equation ``a x = b`` for x.\n\n It is assumed that all indices of `x` are summed over in the product,\n together with the rightmost indices of `a`, as is done in, for example,\n ``tensordot(a, x, axes=b.ndim)``.\n\n Parameters\n ----------\n a : array_like\n Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals\n the shape of that sub-tensor of `a` consisting of the appropriate\n number of its rightmost indices, and must be such that\n ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be\n 'square').\n b : array_like\n Right-hand tensor, which can be of any shape.\n axes : tuple of ints, optional\n Axes in `a` to reorder to the right, before inversion.\n If None (default), no reordering is done.\n\n Returns\n -------\n x : ndarray, shape Q\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not 'square' (in the above sense).\n\n See Also\n --------\n numpy.tensordot, tensorinv, numpy.einsum\n\n Examples\n --------\n >>> a = np.eye(2*3*4)\n >>> a.shape = (2*3, 4, 2, 3, 4)\n >>> b = np.random.randn(2*3, 4)\n >>> x = np.linalg.tensorsolve(a, b)\n >>> x.shape\n (2, 3, 4)\n >>> np.allclose(np.tensordot(a, x, axes=3), b)\n True\n\n \"\"\"\n a, wrap = _makearray(a)\n b = asarray(b)\n an = a.ndim\n\n if axes is not None:\n allaxes = list(range(0, an))\n for k in axes:\n allaxes.remove(k)\n allaxes.insert(an, k)\n a = a.transpose(allaxes)\n\n oldshape = a.shape[-(an-b.ndim):]\n prod = 1\n for k in oldshape:\n prod *= k\n\n a = a.reshape(-1, prod)\n b = b.ravel()\n res = wrap(solve(a, b))\n res.shape = oldshape\n return res\n\ndef solve(a, b):\n \"\"\"\n Solve a linear matrix equation, or system of linear scalar equations.\n\n Computes the \"exact\" solution, `x`, of the well-determined, i.e., full\n rank, linear matrix equation `ax = b`.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Coefficient matrix.\n b : {(..., M,), (..., M, K)}, array_like\n Ordinate or \"dependent variable\" values.\n\n Returns\n -------\n x : {(..., M,), (..., M, K)} ndarray\n Solution to the system a x = b. Returned shape is identical to `b`.\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not square.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The solutions are computed using LAPACK routine _gesv\n\n `a` must be square and of full-rank, i.e., all rows (or, equivalently,\n columns) must be linearly independent; if either is not true, use\n `lstsq` for the least-squares best \"solution\" of the\n system/equation.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pg. 22.\n\n Examples\n --------\n Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:\n\n >>> a = np.array([[3,1], [1,2]])\n >>> b = np.array([9,8])\n >>> x = np.linalg.solve(a, b)\n >>> x\n array([ 2., 3.])\n\n Check that the solution is correct:\n\n >>> np.allclose(np.dot(a, x), b)\n True\n\n \"\"\"\n a, _ = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n b, wrap = _makearray(b)\n t, result_t = _commonType(a, b)\n\n # We use the b = (..., M,) logic, only if the number of extra dimensions\n # match exactly\n if b.ndim == a.ndim - 1:\n gufunc = _umath_linalg.solve1\n else:\n gufunc = _umath_linalg.solve\n\n signature = 'DD->D' if isComplexType(t) else 'dd->d'\n extobj = get_linalg_error_extobj(_raise_linalgerror_singular)\n r = gufunc(a, b, signature=signature, extobj=extobj)\n\n return wrap(r.astype(result_t, copy=False))\n\n\ndef tensorinv(a, ind=2):\n \"\"\"\n Compute the 'inverse' of an N-dimensional array.\n\n The result is an inverse for `a` relative to the tensordot operation\n ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,\n ``tensordot(tensorinv(a), a, ind)`` is the \"identity\" tensor for the\n tensordot operation.\n\n Parameters\n ----------\n a : array_like\n Tensor to 'invert'. Its shape must be 'square', i. e.,\n ``prod(a.shape[:ind]) == prod(a.shape[ind:])``.\n ind : int, optional\n Number of first indices that are involved in the inverse sum.\n Must be a positive integer, default is 2.\n\n Returns\n -------\n b : ndarray\n `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not 'square' (in the above sense).\n\n See Also\n --------\n numpy.tensordot, tensorsolve\n\n Examples\n --------\n >>> a = np.eye(4*6)\n >>> a.shape = (4, 6, 8, 3)\n >>> ainv = np.linalg.tensorinv(a, ind=2)\n >>> ainv.shape\n (8, 3, 4, 6)\n >>> b = np.random.randn(4, 6)\n >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))\n True\n\n >>> a = np.eye(4*6)\n >>> a.shape = (24, 8, 3)\n >>> ainv = np.linalg.tensorinv(a, ind=1)\n >>> ainv.shape\n (8, 3, 24)\n >>> b = np.random.randn(24)\n >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))\n True\n\n \"\"\"\n a = asarray(a)\n oldshape = a.shape\n prod = 1\n if ind > 0:\n invshape = oldshape[ind:] + oldshape[:ind]\n for k in oldshape[ind:]:\n prod *= k\n else:\n raise ValueError(\"Invalid ind argument.\")\n a = a.reshape(prod, -1)\n ia = inv(a)\n return ia.reshape(*invshape)\n\n\n# Matrix inversion\n\ndef inv(a):\n \"\"\"\n Compute the (multiplicative) inverse of a matrix.\n\n Given a square matrix `a`, return the matrix `ainv` satisfying\n ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Matrix to be inverted.\n\n Returns\n -------\n ainv : (..., M, M) ndarray or matrix\n (Multiplicative) inverse of the matrix `a`.\n\n Raises\n ------\n LinAlgError\n If `a` is not square or inversion fails.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n Examples\n --------\n >>> from numpy.linalg import inv\n >>> a = np.array([[1., 2.], [3., 4.]])\n >>> ainv = inv(a)\n >>> np.allclose(np.dot(a, ainv), np.eye(2))\n True\n >>> np.allclose(np.dot(ainv, a), np.eye(2))\n True\n\n If a is a matrix object, then the return value is a matrix as well:\n\n >>> ainv = inv(np.matrix(a))\n >>> ainv\n matrix([[-2. , 1. ],\n [ 1.5, -0.5]])\n\n Inverses of several matrices can be computed at once:\n\n >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])\n >>> inv(a)\n array([[[-2. , 1. ],\n [ 1.5, -0.5]],\n [[-5. , 2. ],\n [ 3. , -1. ]]])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n\n signature = 'D->D' if isComplexType(t) else 'd->d'\n extobj = get_linalg_error_extobj(_raise_linalgerror_singular)\n ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)\n return wrap(ainv.astype(result_t, copy=False))\n\n\n# Cholesky decomposition\n\ndef cholesky(a):\n \"\"\"\n Cholesky decomposition.\n\n Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,\n where `L` is lower-triangular and .H is the conjugate transpose operator\n (which is the ordinary transpose if `a` is real-valued). `a` must be\n Hermitian (symmetric if real-valued) and positive-definite. Only `L` is\n actually returned.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Hermitian (symmetric if all elements are real), positive-definite\n input matrix.\n\n Returns\n -------\n L : (..., M, M) array_like\n Upper or lower-triangular Cholesky factor of `a`. Returns a\n matrix object if `a` is a matrix object.\n\n Raises\n ------\n LinAlgError\n If the decomposition fails, for example, if `a` is not\n positive-definite.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The Cholesky decomposition is often used as a fast way of solving\n\n .. math:: A \\\\mathbf{x} = \\\\mathbf{b}\n\n (when `A` is both Hermitian/symmetric and positive-definite).\n\n First, we solve for :math:`\\\\mathbf{y}` in\n\n .. math:: L \\\\mathbf{y} = \\\\mathbf{b},\n\n and then for :math:`\\\\mathbf{x}` in\n\n .. math:: L.H \\\\mathbf{x} = \\\\mathbf{y}.\n\n Examples\n --------\n >>> A = np.array([[1,-2j],[2j,5]])\n >>> A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> L = np.linalg.cholesky(A)\n >>> L\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> np.dot(L, L.T.conj()) # verify that L * L.H = A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?\n >>> np.linalg.cholesky(A) # an ndarray object is returned\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> # But a matrix object is returned if A is a matrix object\n >>> LA.cholesky(np.matrix(A))\n matrix([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n\n \"\"\"\n extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)\n gufunc = _umath_linalg.cholesky_lo\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n signature = 'D->D' if isComplexType(t) else 'd->d'\n r = gufunc(a, signature=signature, extobj=extobj)\n return wrap(r.astype(result_t, copy=False))\n\n# QR decompostion\n\ndef qr(a, mode='reduced'):\n \"\"\"\n Compute the qr factorization of a matrix.\n\n Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is\n upper-triangular.\n\n Parameters\n ----------\n a : array_like, shape (M, N)\n Matrix to be factored.\n mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional\n If K = min(M, N), then\n\n * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)\n * 'complete' : returns q, r with dimensions (M, M), (M, N)\n * 'r' : returns r only with dimensions (K, N)\n * 'raw' : returns h, tau with dimensions (N, M), (K,)\n * 'full' : alias of 'reduced', deprecated\n * 'economic' : returns h from 'raw', deprecated.\n\n The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,\n see the notes for more information. The default is 'reduced', and to\n maintain backward compatibility with earlier versions of numpy both\n it and the old default 'full' can be omitted. Note that array h\n returned in 'raw' mode is transposed for calling Fortran. The\n 'economic' mode is deprecated. The modes 'full' and 'economic' may\n be passed using only the first letter for backwards compatibility,\n but all others must be spelled out. See the Notes for more\n explanation.\n\n\n Returns\n -------\n q : ndarray of float or complex, optional\n A matrix with orthonormal columns. When mode = 'complete' the\n result is an orthogonal/unitary matrix depending on whether or not\n a is real/complex. The determinant may be either +/- 1 in that\n case.\n r : ndarray of float or complex, optional\n The upper-triangular matrix.\n (h, tau) : ndarrays of np.double or np.cdouble, optional\n The array h contains the Householder reflectors that generate q\n along with r. The tau array contains scaling factors for the\n reflectors. In the deprecated 'economic' mode only h is returned.\n\n Raises\n ------\n LinAlgError\n If factoring fails.\n\n Notes\n -----\n This is an interface to the LAPACK routines dgeqrf, zgeqrf,\n dorgqr, and zungqr.\n\n For more information on the qr factorization, see for example:\n http://en.wikipedia.org/wiki/QR_factorization\n\n Subclasses of `ndarray` are preserved except for the 'raw' mode. So if\n `a` is of type `matrix`, all the return values will be matrices too.\n\n New 'reduced', 'complete', and 'raw' options for mode were added in\n NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In\n addition the options 'full' and 'economic' were deprecated. Because\n 'full' was the previous default and 'reduced' is the new default,\n backward compatibility can be maintained by letting `mode` default.\n The 'raw' option was added so that LAPACK routines that can multiply\n arrays by q using the Householder reflectors can be used. Note that in\n this case the returned arrays are of type np.double or np.cdouble and\n the h array is transposed to be FORTRAN compatible. No routines using\n the 'raw' return are currently exposed by numpy, but some are available\n in lapack_lite and just await the necessary work.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6)\n >>> dedupe_q, r = np.linalg.qr(a)\n >>> np.allclose(a, np.dot(dedupe_q, r)) # a does equal qr\n True\n >>> r2 = np.linalg.qr(a, mode='r')\n >>> r3 = np.linalg.qr(a, mode='economic')\n >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'\n True\n >>> # But only triu parts are guaranteed equal when mode='economic'\n >>> np.allclose(r, np.triu(r3[:6,:6], k=0))\n True\n\n Example illustrating a common use of `qr`: solving of least squares\n problems\n\n What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for\n the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points\n and you'll see that it should be y0 = 0, m = 1.) The answer is provided\n by solving the over-determined matrix equation ``Ax = b``, where::\n\n A = array([[0, 1], [1, 1], [1, 1], [2, 1]])\n x = array([[y0], [m]])\n b = array([[1], [0], [2], [1]])\n\n If A = qr such that q is orthonormal (which is always possible via\n Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,\n however, we simply use `lstsq`.)\n\n >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])\n >>> A\n array([[0, 1],\n [1, 1],\n [1, 1],\n [2, 1]])\n >>> b = np.array([1, 0, 2, 1])\n >>> dedupe_q, r = LA.qr(A)\n >>> p = np.dot(q.T, b)\n >>> np.dot(LA.inv(r), p)\n array([ 1.1e-16, 1.0e+00])\n\n \"\"\"\n if mode not in ('reduced', 'complete', 'r', 'raw'):\n if mode in ('f', 'full'):\n # 2013-04-01, 1.8\n msg = \"\".join((\n \"The 'full' option is deprecated in favor of 'reduced'.\\n\",\n \"For backward compatibility let mode default.\"))\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n mode = 'reduced'\n elif mode in ('e', 'economic'):\n # 2013-04-01, 1.8\n msg = \"The 'economic' option is deprecated.\"\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n mode = 'economic'\n else:\n raise ValueError(\"Unrecognized mode '%s'\" % mode)\n\n a, wrap = _makearray(a)\n _assertRank2(a)\n _assertNoEmpty2d(a)\n m, n = a.shape\n t, result_t = _commonType(a)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n mn = min(m, n)\n tau = zeros((mn,), t)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgeqrf\n routine_name = 'zgeqrf'\n else:\n lapack_routine = lapack_lite.dgeqrf\n routine_name = 'dgeqrf'\n\n # calculate optimal size of work data 'work'\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, a, m, tau, work, -1, 0)\n if results['info'] != 0:\n raise LinAlgError('%s returns %d' % (routine_name, results['info']))\n\n # do qr decomposition\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, a, m, tau, work, lwork, 0)\n if results['info'] != 0:\n raise LinAlgError('%s returns %d' % (routine_name, results['info']))\n\n # handle modes that don't return q\n if mode == 'r':\n r = _fastCopyAndTranspose(result_t, a[:, :mn])\n return wrap(triu(r))\n\n if mode == 'raw':\n return a, tau\n\n if mode == 'economic':\n if t != result_t :\n a = a.astype(result_t, copy=False)\n return wrap(a.T)\n\n # generate q from a\n if mode == 'complete' and m > n:\n mc = m\n q = empty((m, m), t)\n else:\n mc = mn\n q = empty((n, m), t)\n q[:n] = a\n\n if isComplexType(t):\n lapack_routine = lapack_lite.zungqr\n routine_name = 'zungqr'\n else:\n lapack_routine = lapack_lite.dorgqr\n routine_name = 'dorgqr'\n\n # determine optimal lwork\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)\n if results['info'] != 0:\n raise LinAlgError('%s returns %d' % (routine_name, results['info']))\n\n # compute q\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)\n if results['info'] != 0:\n raise LinAlgError('%s returns %d' % (routine_name, results['info']))\n\n q = _fastCopyAndTranspose(result_t, q[:mc])\n r = _fastCopyAndTranspose(result_t, a[:, :mc])\n\n return wrap(q), wrap(triu(r))\n\n\n# Eigenvalues\n\n\ndef eigvals(a):\n \"\"\"\n Compute the eigenvalues of a general matrix.\n\n Main difference between `eigvals` and `eig`: the eigenvectors aren't\n returned.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n A complex- or real-valued matrix whose eigenvalues will be computed.\n\n Returns\n -------\n w : (..., M,) ndarray\n The eigenvalues, each repeated according to its multiplicity.\n They are not necessarily ordered, nor are they necessarily\n real for real matrices.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eig : eigenvalues and right eigenvectors of general arrays\n eigvalsh : eigenvalues of symmetric or Hermitian arrays.\n eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n This is implemented using the _geev LAPACK routines which compute\n the eigenvalues and eigenvectors of general square arrays.\n\n Examples\n --------\n Illustration, using the fact that the eigenvalues of a diagonal matrix\n are its diagonal elements, that multiplying a matrix on the left\n by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose\n of `Q`), preserves the eigenvalues of the \"middle\" matrix. In other words,\n if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as\n ``A``:\n\n >>> from numpy import linalg as LA\n >>> x = np.random.random()\n >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])\n >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])\n (1.0, 1.0, 0.0)\n\n Now multiply a diagonal matrix by Q on one side and by Q.T on the other:\n\n >>> D = np.diag((-1,1))\n >>> LA.eigvals(D)\n array([-1., 1.])\n >>> A = np.dot(Q, D)\n >>> A = np.dot(A, Q.T)\n >>> LA.eigvals(A)\n array([ 1., -1.])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n _assertFinite(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(\n _raise_linalgerror_eigenvalues_nonconvergence)\n signature = 'D->D' if isComplexType(t) else 'd->D'\n w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)\n\n if not isComplexType(t):\n if all(w.imag == 0):\n w = w.real\n result_t = _realType(result_t)\n else:\n result_t = _complexType(result_t)\n\n return w.astype(result_t, copy=False)\n\ndef eigvalsh(a, UPLO='L'):\n \"\"\"\n Compute the eigenvalues of a Hermitian or real symmetric matrix.\n\n Main difference from eigh: the eigenvectors are not computed.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n A complex- or real-valued matrix whose eigenvalues are to be\n computed.\n UPLO : {'L', 'U'}, optional\n Specifies whether the calculation is done with the lower triangular\n part of `a` ('L', default) or the upper triangular part ('U').\n Irrespective of this value only the real parts of the diagonal will\n be considered in the computation to preserve the notion of a Hermitian\n matrix. It therefore follows that the imaginary part of the diagonal\n will always be treated as zero.\n\n Returns\n -------\n w : (..., M,) ndarray\n The eigenvalues in ascending order, each repeated according to\n its multiplicity.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.\n eigvals : eigenvalues of general real or complex arrays.\n eig : eigenvalues and right eigenvectors of general real or complex\n arrays.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The eigenvalues are computed using LAPACK routines _syevd, _heevd\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, -2j], [2j, 5]])\n >>> LA.eigvalsh(a)\n array([ 0.17157288, 5.82842712])\n\n >>> # demonstrate the treatment of the imaginary part of the diagonal\n >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])\n >>> a\n array([[ 5.+2.j, 9.-2.j],\n [ 0.+2.j, 2.-1.j]])\n >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()\n >>> # with:\n >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])\n >>> b\n array([[ 5.+0.j, 0.-2.j],\n [ 0.+2.j, 2.+0.j]])\n >>> wa = LA.eigvalsh(a)\n >>> wb = LA.eigvals(b)\n >>> wa; wb\n array([ 1., 6.])\n array([ 6.+0.j, 1.+0.j])\n\n \"\"\"\n UPLO = UPLO.upper()\n if UPLO not in ('L', 'U'):\n raise ValueError(\"UPLO argument must be 'L' or 'U'\")\n\n extobj = get_linalg_error_extobj(\n _raise_linalgerror_eigenvalues_nonconvergence)\n if UPLO == 'L':\n gufunc = _umath_linalg.eigvalsh_lo\n else:\n gufunc = _umath_linalg.eigvalsh_up\n\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n signature = 'D->d' if isComplexType(t) else 'd->d'\n w = gufunc(a, signature=signature, extobj=extobj)\n return w.astype(_realType(result_t), copy=False)\n\ndef _convertarray(a):\n t, result_t = _commonType(a)\n a = _fastCT(a.astype(t))\n return a, t, result_t\n\n\n# Eigenvectors\n\n\ndef eig(a):\n \"\"\"\n Compute the eigenvalues and right eigenvectors of a square array.\n\n Parameters\n ----------\n a : (..., M, M) array\n Matrices for which the eigenvalues and right eigenvectors will\n be computed\n\n Returns\n -------\n w : (..., M) array\n The eigenvalues, each repeated according to its multiplicity.\n The eigenvalues are not necessarily ordered. The resulting\n array will be of complex type, unless the imaginary part is\n zero in which case it will be cast to a real type. When `a`\n is real the resulting eigenvalues will be real (0 imaginary\n part) or occur in conjugate pairs\n\n v : (..., M, M) array\n The normalized (unit \"length\") eigenvectors, such that the\n column ``v[:,i]`` is the eigenvector corresponding to the\n eigenvalue ``w[i]``.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigvals : eigenvalues of a non-symmetric array.\n\n eigh : eigenvalues and eigenvectors of a symmetric or Hermitian\n (conjugate symmetric) array.\n\n eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)\n array.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n This is implemented using the _geev LAPACK routines which compute\n the eigenvalues and eigenvectors of general square arrays.\n\n The number `w` is an eigenvalue of `a` if there exists a vector\n `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and\n `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``\n for :math:`i \\\\in \\\\{0,...,M-1\\\\}`.\n\n The array `v` of eigenvectors may not be of maximum rank, that is, some\n of the columns may be linearly dependent, although round-off error may\n obscure that fact. If the eigenvalues are all different, then theoretically\n the eigenvectors are linearly independent. Likewise, the (complex-valued)\n matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,\n if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate\n transpose of `a`.\n\n Finally, it is emphasized that `v` consists of the *right* (as in\n right-hand side) eigenvectors of `a`. A vector `y` satisfying\n ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*\n eigenvector of `a`, and, in general, the left and right eigenvectors\n of a matrix are not necessarily the (perhaps conjugate) transposes\n of each other.\n\n References\n ----------\n G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,\n Academic Press, Inc., 1980, Various pp.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n\n (Almost) trivial example with real e-values and e-vectors.\n\n >>> w, v = LA.eig(np.diag((1, 2, 3)))\n >>> w; v\n array([ 1., 2., 3.])\n array([[ 1., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 1.]])\n\n Real matrix possessing complex e-values and e-vectors; note that the\n e-values are complex conjugates of each other.\n\n >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))\n >>> w; v\n array([ 1. + 1.j, 1. - 1.j])\n array([[ 0.70710678+0.j , 0.70710678+0.j ],\n [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])\n\n Complex-valued matrix with real e-values (but complex-valued e-vectors);\n note that a.conj().T = a, i.e., a is Hermitian.\n\n >>> a = np.array([[1, 1j], [-1j, 1]])\n >>> w, v = LA.eig(a)\n >>> w; v\n array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}\n array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],\n [ 0.70710678+0.j , 0.00000000+0.70710678j]])\n\n Be careful about round-off error!\n\n >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])\n >>> # Theor. e-values are 1 +/- 1e-9\n >>> w, v = LA.eig(a)\n >>> w; v\n array([ 1., 1.])\n array([[ 1., 0.],\n [ 0., 1.]])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n _assertFinite(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(\n _raise_linalgerror_eigenvalues_nonconvergence)\n signature = 'D->DD' if isComplexType(t) else 'd->DD'\n w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)\n\n if not isComplexType(t) and all(w.imag == 0.0):\n w = w.real\n vt = vt.real\n result_t = _realType(result_t)\n else:\n result_t = _complexType(result_t)\n\n vt = vt.astype(result_t, copy=False)\n return w.astype(result_t, copy=False), wrap(vt)\n\n\ndef eigh(a, UPLO='L'):\n \"\"\"\n Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.\n\n Returns two objects, a 1-D array containing the eigenvalues of `a`, and\n a 2-D square array or matrix (depending on the input type) of the\n corresponding eigenvectors (in columns).\n\n Parameters\n ----------\n a : (..., M, M) array\n Hermitian/Symmetric matrices whose eigenvalues and\n eigenvectors are to be computed.\n UPLO : {'L', 'U'}, optional\n Specifies whether the calculation is done with the lower triangular\n part of `a` ('L', default) or the upper triangular part ('U').\n Irrespective of this value only the real parts of the diagonal will\n be considered in the computation to preserve the notion of a Hermitian\n matrix. It therefore follows that the imaginary part of the diagonal\n will always be treated as zero.\n\n Returns\n -------\n w : (..., M) ndarray\n The eigenvalues in ascending order, each repeated according to\n its multiplicity.\n v : {(..., M, M) ndarray, (..., M, M) matrix}\n The column ``v[:, i]`` is the normalized eigenvector corresponding\n to the eigenvalue ``w[i]``. Will return a matrix object if `a` is\n a matrix object.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigvalsh : eigenvalues of symmetric or Hermitian arrays.\n eig : eigenvalues and right eigenvectors for non-symmetric arrays.\n eigvals : eigenvalues of non-symmetric arrays.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,\n _heevd\n\n The eigenvalues of real symmetric or complex Hermitian matrices are\n always real. [1]_ The array `v` of (column) eigenvectors is unitary\n and `a`, `w`, and `v` satisfy the equations\n ``dot(a, v[:, i]) = w[i] * v[:, i]``.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pg. 222.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, -2j], [2j, 5]])\n >>> a\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> w, v = LA.eigh(a)\n >>> w; v\n array([ 0.17157288, 5.82842712])\n array([[-0.92387953+0.j , -0.38268343+0.j ],\n [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])\n\n >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair\n array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])\n >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair\n array([ 0.+0.j, 0.+0.j])\n\n >>> A = np.matrix(a) # what happens if input is a matrix object\n >>> A\n matrix([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> w, v = LA.eigh(A)\n >>> w; v\n array([ 0.17157288, 5.82842712])\n matrix([[-0.92387953+0.j , -0.38268343+0.j ],\n [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])\n\n >>> # demonstrate the treatment of the imaginary part of the diagonal\n >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])\n >>> a\n array([[ 5.+2.j, 9.-2.j],\n [ 0.+2.j, 2.-1.j]])\n >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:\n >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])\n >>> b\n array([[ 5.+0.j, 0.-2.j],\n [ 0.+2.j, 2.+0.j]])\n >>> wa, va = LA.eigh(a)\n >>> wb, vb = LA.eig(b)\n >>> wa; wb\n array([ 1., 6.])\n array([ 6.+0.j, 1.+0.j])\n >>> va; vb\n array([[-0.44721360-0.j , -0.89442719+0.j ],\n [ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])\n array([[ 0.89442719+0.j , 0.00000000-0.4472136j],\n [ 0.00000000-0.4472136j, 0.89442719+0.j ]])\n \"\"\"\n UPLO = UPLO.upper()\n if UPLO not in ('L', 'U'):\n raise ValueError(\"UPLO argument must be 'L' or 'U'\")\n\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(\n _raise_linalgerror_eigenvalues_nonconvergence)\n if UPLO == 'L':\n gufunc = _umath_linalg.eigh_lo\n else:\n gufunc = _umath_linalg.eigh_up\n\n signature = 'D->dD' if isComplexType(t) else 'd->dd'\n w, vt = gufunc(a, signature=signature, extobj=extobj)\n w = w.astype(_realType(result_t), copy=False)\n vt = vt.astype(result_t, copy=False)\n return w, wrap(vt)\n\n\n# Singular value decomposition\n\ndef svd(a, full_matrices=True, compute_uv=True):\n \"\"\"\n Singular Value Decomposition.\n\n When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh\n = (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D\n array of `a`'s singular values. When `a` is higher-dimensional, SVD is\n applied in stacked mode as explained below.\n\n Parameters\n ----------\n a : (..., M, N) array_like\n A real or complex array with ``a.ndim >= 2``.\n full_matrices : bool, optional\n If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and\n ``(..., N, N)``, respectively. Otherwise, the shapes are\n ``(..., M, K)`` and ``(..., K, N)``, respectively, where\n ``K = min(M, N)``.\n compute_uv : bool, optional\n Whether or not to compute `u` and `vh` in addition to `s`. True\n by default.\n\n Returns\n -------\n u : { (..., M, M), (..., M, K) } array\n Unitary array(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n s : (..., K) array\n Vector(s) with the singular values, within each vector sorted in\n descending order. The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`.\n vh : { (..., N, N), (..., K, N) } array\n Unitary array(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n\n Raises\n ------\n LinAlgError\n If SVD computation does not converge.\n\n Notes\n -----\n\n .. versionchanged:: 1.8.0\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The decomposition is performed using LAPACK routine ``_gesdd``.\n\n SVD is usually described for the factorization of a 2D matrix :math:`A`.\n The higher-dimensional case will be discussed below. In the 2D case, SVD is\n written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,\n :math:`S= \\\\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`\n contains the singular values of `a` and `u` and `vh` are unitary. The rows\n of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are\n the eigenvectors of :math:`A A^H`. In both cases the corresponding\n (possibly non-zero) eigenvalues are given by ``s**2``.\n\n If `a` has more than two dimensions, then broadcasting rules apply, as\n explained in :ref:`routines.linalg-broadcasting`. This means that SVD is\n working in \"stacked\" mode: it iterates over all indices of the first\n ``a.ndim - 2`` dimensions and for each combination SVD is applied to the\n last two indices. The matrix `a` can be reconstructed from the\n decomposition with either ``(u * s[..., None, :]) @ vh`` or\n ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the\n function ``np.matmul`` for python versions below 3.5.)\n\n If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are\n all the return values.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)\n >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)\n\n Reconstruction based on full SVD, 2D case:\n\n >>> u, s, vh = np.linalg.svd(a, full_matrices=True)\n >>> u.shape, s.shape, vh.shape\n ((9, 9), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u[:, :6] * s, vh))\n True\n >>> smat = np.zeros((9, 6), dtype=complex)\n >>> smat[:6, :6] = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n Reconstruction based on reduced SVD, 2D case:\n\n >>> u, s, vh = np.linalg.svd(a, full_matrices=False)\n >>> u.shape, s.shape, vh.shape\n ((9, 6), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u * s, vh))\n True\n >>> smat = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n Reconstruction based on full SVD, 4D case:\n\n >>> u, s, vh = np.linalg.svd(b, full_matrices=True)\n >>> u.shape, s.shape, vh.shape\n ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))\n >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))\n True\n >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))\n True\n\n Reconstruction based on reduced SVD, 4D case:\n\n >>> u, s, vh = np.linalg.svd(b, full_matrices=False)\n >>> u.shape, s.shape, vh.shape\n ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))\n >>> np.allclose(b, np.matmul(u * s[..., None, :], vh))\n True\n >>> np.allclose(b, np.matmul(u, s[..., None] * vh))\n True\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertNoEmpty2d(a)\n _assertRankAtLeast2(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)\n\n m = a.shape[-2]\n n = a.shape[-1]\n if compute_uv:\n if full_matrices:\n if m < n:\n gufunc = _umath_linalg.svd_m_f\n else:\n gufunc = _umath_linalg.svd_n_f\n else:\n if m < n:\n gufunc = _umath_linalg.svd_m_s\n else:\n gufunc = _umath_linalg.svd_n_s\n\n signature = 'D->DdD' if isComplexType(t) else 'd->ddd'\n u, s, vh = gufunc(a, signature=signature, extobj=extobj)\n u = u.astype(result_t, copy=False)\n s = s.astype(_realType(result_t), copy=False)\n vh = vh.astype(result_t, copy=False)\n return wrap(u), s, wrap(vh)\n else:\n if m < n:\n gufunc = _umath_linalg.svd_m\n else:\n gufunc = _umath_linalg.svd_n\n\n signature = 'D->d' if isComplexType(t) else 'd->d'\n s = gufunc(a, signature=signature, extobj=extobj)\n s = s.astype(_realType(result_t), copy=False)\n return s\n\n\ndef cond(x, p=None):\n \"\"\"\n Compute the condition number of a matrix.\n\n This function is capable of returning the condition number using\n one of seven different norms, depending on the value of `p` (see\n Parameters below).\n\n Parameters\n ----------\n x : (..., M, N) array_like\n The matrix whose condition number is sought.\n p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional\n Order of the norm:\n\n ===== ============================\n p norm for matrices\n ===== ============================\n None 2-norm, computed directly using the ``SVD``\n 'fro' Frobenius norm\n inf max(sum(abs(x), axis=1))\n -inf min(sum(abs(x), axis=1))\n 1 max(sum(abs(x), axis=0))\n -1 min(sum(abs(x), axis=0))\n 2 2-norm (largest sing. value)\n -2 smallest singular value\n ===== ============================\n\n inf means the numpy.inf object, and the Frobenius norm is\n the root-of-sum-of-squares norm.\n\n Returns\n -------\n c : {float, inf}\n The condition number of the matrix. May be infinite.\n\n See Also\n --------\n numpy.linalg.norm\n\n Notes\n -----\n The condition number of `x` is defined as the norm of `x` times the\n norm of the inverse of `x` [1]_; the norm can be the usual L2-norm\n (root-of-sum-of-squares) or one of a number of other matrix norms.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,\n Academic Press, Inc., 1980, pg. 285.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])\n >>> a\n array([[ 1, 0, -1],\n [ 0, 1, 0],\n [ 1, 0, 1]])\n >>> LA.cond(a)\n 1.4142135623730951\n >>> LA.cond(a, 'fro')\n 3.1622776601683795\n >>> LA.cond(a, np.inf)\n 2.0\n >>> LA.cond(a, -np.inf)\n 1.0\n >>> LA.cond(a, 1)\n 2.0\n >>> LA.cond(a, -1)\n 1.0\n >>> LA.cond(a, 2)\n 1.4142135623730951\n >>> LA.cond(a, -2)\n 0.70710678118654746\n >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))\n 0.70710678118654746\n\n \"\"\"\n x = asarray(x) # in case we have a matrix\n if p is None:\n s = svd(x, compute_uv=False)\n return s[..., 0]/s[..., -1]\n else:\n return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))\n\n\ndef matrix_rank(M, tol=None, hermitian=False):\n \"\"\"\n Return matrix rank of array using SVD method\n\n Rank of the array is the number of singular values of the array that are\n greater than `tol`.\n\n .. versionchanged:: 1.14\n Can now operate on stacks of matrices\n\n Parameters\n ----------\n M : {(M,), (..., M, N)} array_like\n input vector or stack of matrices\n tol : (...) array_like, float, optional\n threshold below which SVD values are considered zero. If `tol` is\n None, and ``S`` is an array with singular values for `M`, and\n ``eps`` is the epsilon value for datatype of ``S``, then `tol` is\n set to ``S.max() * max(M.shape) * eps``.\n\n .. versionchanged:: 1.14\n Broadcasted against the stack of matrices\n hermitian : bool, optional\n If True, `M` is assumed to be Hermitian (symmetric if real-valued),\n enabling a more efficient method for finding singular values.\n Defaults to False.\n\n .. versionadded:: 1.14\n\n Notes\n -----\n The default threshold to detect rank deficiency is a test on the magnitude\n of the singular values of `M`. By default, we identify singular values less\n than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with\n the symbols defined above). This is the algorithm MATLAB uses [1]. It also\n appears in *Numerical recipes* in the discussion of SVD solutions for linear\n least squares [2].\n\n This default threshold is designed to detect rank deficiency accounting for\n the numerical errors of the SVD computation. Imagine that there is a column\n in `M` that is an exact (in floating point) linear combination of other\n columns in `M`. Computing the SVD on `M` will not produce a singular value\n exactly equal to 0 in general: any difference of the smallest SVD value from\n 0 will be caused by numerical imprecision in the calculation of the SVD.\n Our threshold for small SVD values takes this numerical imprecision into\n account, and the default threshold will detect such numerical rank\n deficiency. The threshold may declare a matrix `M` rank deficient even if\n the linear combination of some columns of `M` is not exactly equal to\n another column of `M` but only numerically very close to another column of\n `M`.\n\n We chose our default threshold because it is in wide use. Other thresholds\n are possible. For example, elsewhere in the 2007 edition of *Numerical\n recipes* there is an alternative threshold of ``S.max() *\n np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe\n this threshold as being based on \"expected roundoff error\" (p 71).\n\n The thresholds above deal with floating point roundoff error in the\n calculation of the SVD. However, you may have more information about the\n sources of error in `M` that would make you consider other tolerance values\n to detect *effective* rank deficiency. The most useful measure of the\n tolerance depends on the operations you intend to use on your matrix. For\n example, if your data come from uncertain measurements with uncertainties\n greater than floating point epsilon, choosing a tolerance near that\n uncertainty may be preferable. The tolerance may be absolute if the\n uncertainties are absolute rather than relative.\n\n References\n ----------\n .. [1] MATLAB reference documention, \"Rank\"\n http://www.mathworks.com/help/techdoc/ref/rank.html\n .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,\n \"Numerical Recipes (3rd edition)\", Cambridge University Press, 2007,\n page 795.\n\n Examples\n --------\n >>> from numpy.linalg import matrix_rank\n >>> matrix_rank(np.eye(4)) # Full rank matrix\n 4\n >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix\n >>> matrix_rank(I)\n 3\n >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0\n 1\n >>> matrix_rank(np.zeros((4,)))\n 0\n \"\"\"\n M = asarray(M)\n if M.ndim < 2:\n return int(not all(M==0))\n if hermitian:\n S = abs(eigvalsh(M))\n else:\n S = svd(M, compute_uv=False)\n if tol is None:\n tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps\n else:\n tol = asarray(tol)[..., newaxis]\n return count_nonzero(S > tol, axis=-1)\n\n\n# Generalized inverse\n\ndef pinv(a, rcond=1e-15 ):\n \"\"\"\n Compute the (Moore-Penrose) pseudo-inverse of a matrix.\n\n Calculate the generalized inverse of a matrix using its\n singular-value decomposition (SVD) and including all\n *large* singular values.\n\n .. versionchanged:: 1.14\n Can now operate on stacks of matrices\n\n Parameters\n ----------\n a : (..., M, N) array_like\n Matrix or stack of matrices to be pseudo-inverted.\n rcond : (...) array_like of float\n Cutoff for small singular values.\n Singular values smaller (in modulus) than\n `rcond` * largest_singular_value (again, in modulus)\n are set to zero. Broadcasts against the stack of matrices\n\n Returns\n -------\n B : (..., N, M) ndarray\n The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so\n is `B`.\n\n Raises\n ------\n LinAlgError\n If the SVD computation does not converge.\n\n Notes\n -----\n The pseudo-inverse of a matrix A, denoted :math:`A^+`, is\n defined as: \"the matrix that 'solves' [the least-squares problem]\n :math:`Ax = b`,\" i.e., if :math:`\\\\bar{x}` is said solution, then\n :math:`A^+` is that matrix such that :math:`\\\\bar{x} = A^+b`.\n\n It can be shown that if :math:`Q_1 \\\\Sigma Q_2^T = A` is the singular\n value decomposition of A, then\n :math:`A^+ = Q_2 \\\\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are\n orthogonal matrices, :math:`\\\\Sigma` is a diagonal matrix consisting\n of A's so-called singular values, (followed, typically, by\n zeros), and then :math:`\\\\Sigma^+` is simply the diagonal matrix\n consisting of the reciprocals of A's singular values\n (again, followed by zeros). [1]_\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pp. 139-142.\n\n Examples\n --------\n The following example checks that ``a * a+ * a == a`` and\n ``a+ * a * a+ == a+``:\n\n >>> a = np.random.randn(9, 6)\n >>> B = np.linalg.pinv(a)\n >>> np.allclose(a, np.dot(a, np.dot(B, a)))\n True\n >>> np.allclose(B, np.dot(B, np.dot(a, B)))\n True\n\n \"\"\"\n a, wrap = _makearray(a)\n rcond = asarray(rcond)\n if _isEmpty2d(a):\n res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)\n return wrap(res)\n a = a.conjugate()\n u, s, vt = svd(a, full_matrices=False)\n\n # discard small singular values\n cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)\n large = s > cutoff\n s = divide(1, s, where=large, out=s)\n s[~large] = 0\n\n res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))\n return wrap(res)\n\n# Determinant\n\ndef slogdet(a):\n \"\"\"\n Compute the sign and (natural) logarithm of the determinant of an array.\n\n If an array has a very small or very large determinant, then a call to\n `det` may overflow or underflow. This routine is more robust against such\n issues, because it computes the logarithm of the determinant rather than\n the determinant itself.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Input array, has to be a square 2-D array.\n\n Returns\n -------\n sign : (...) array_like\n A number representing the sign of the determinant. For a real matrix,\n this is 1, 0, or -1. For a complex matrix, this is a complex number\n with absolute value 1 (i.e., it is on the unit circle), or else 0.\n logdet : (...) array_like\n The natural log of the absolute value of the determinant.\n\n If the determinant is zero, then `sign` will be 0 and `logdet` will be\n -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.\n\n See Also\n --------\n det\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n .. versionadded:: 1.6.0\n\n The determinant is computed via LU factorization using the LAPACK\n routine z/dgetrf.\n\n\n Examples\n --------\n The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:\n\n >>> a = np.array([[1, 2], [3, 4]])\n >>> (sign, logdet) = np.linalg.slogdet(a)\n >>> (sign, logdet)\n (-1, 0.69314718055994529)\n >>> sign * np.exp(logdet)\n -2.0\n\n Computing log-determinants for a stack of matrices:\n\n >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])\n >>> a.shape\n (3, 2, 2)\n >>> sign, logdet = np.linalg.slogdet(a)\n >>> (sign, logdet)\n (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))\n >>> sign * np.exp(logdet)\n array([-2., -3., -8.])\n\n This routine succeeds where ordinary `det` does not:\n\n >>> np.linalg.det(np.eye(500) * 0.1)\n 0.0\n >>> np.linalg.slogdet(np.eye(500) * 0.1)\n (1, -1151.2925464970228)\n\n \"\"\"\n a = asarray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n real_t = _realType(result_t)\n signature = 'D->Dd' if isComplexType(t) else 'd->dd'\n sign, logdet = _umath_linalg.slogdet(a, signature=signature)\n sign = sign.astype(result_t, copy=False)\n logdet = logdet.astype(real_t, copy=False)\n return sign, logdet\n\ndef det(a):\n \"\"\"\n Compute the determinant of an array.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Input array to compute determinants for.\n\n Returns\n -------\n det : (...) array_like\n Determinant of `a`.\n\n See Also\n --------\n slogdet : Another way to representing the determinant, more suitable\n for large matrices where underflow/overflow may occur.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The determinant is computed via LU factorization using the LAPACK\n routine z/dgetrf.\n\n Examples\n --------\n The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:\n\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.linalg.det(a)\n -2.0\n\n Computing determinants for a stack of matrices:\n\n >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])\n >>> a.shape\n (3, 2, 2)\n >>> np.linalg.det(a)\n array([-2., -3., -8.])\n\n \"\"\"\n a = asarray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n signature = 'D->D' if isComplexType(t) else 'd->d'\n r = _umath_linalg.det(a, signature=signature)\n r = r.astype(result_t, copy=False)\n return r\n\n# Linear Least Squares\n\ndef lstsq(a, b, rcond=\"warn\"):\n \"\"\"\n Return the least-squares solution to a linear matrix equation.\n\n Solves the equation `a x = b` by computing a vector `x` that\n minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n be under-, well-, or over- determined (i.e., the number of\n linearly independent rows of `a` can be less than, equal to, or\n greater than its number of linearly independent columns). If `a`\n is square and of full rank, then `x` (but for round-off error) is\n the \"exact\" solution of the equation.\n\n Parameters\n ----------\n a : (M, N) array_like\n \"Coefficient\" matrix.\n b : {(M,), (M, K)} array_like\n Ordinate or \"dependent variable\" values. If `b` is two-dimensional,\n the least-squares solution is calculated for each of the `K` columns\n of `b`.\n rcond : float, optional\n Cut-off ratio for small singular values of `a`.\n For the purposes of rank determination, singular values are treated\n as zero if they are smaller than `rcond` times the largest singular\n value of `a`.\n\n .. versionchanged:: 1.14.0\n If not set, a FutureWarning is given. The previous default\n of ``-1`` will use the machine precision as `rcond` parameter,\n the new default will use the machine precision times `max(M, N)`.\n To silence the warning and use the new default, use ``rcond=None``,\n to keep using the old behavior, use ``rcond=-1``.\n\n Returns\n -------\n x : {(N,), (N, K)} ndarray\n Least-squares solution. If `b` is two-dimensional,\n the solutions are in the `K` columns of `x`.\n residuals : {(1,), (K,), (0,)} ndarray\n Sums of residuals; squared Euclidean 2-norm for each column in\n ``b - a*x``.\n If the rank of `a` is < N or M <= N, this is an empty array.\n If `b` is 1-dimensional, this is a (1,) shape array.\n Otherwise the shape is (K,).\n rank : int\n Rank of matrix `a`.\n s : (min(M, N),) ndarray\n Singular values of `a`.\n\n Raises\n ------\n LinAlgError\n If computation does not converge.\n\n Notes\n -----\n If `b` is a matrix, then all array results are returned as matrices.\n\n Examples\n --------\n Fit a line, ``y = mx + c``, through some noisy data-points:\n\n >>> x = np.array([0, 1, 2, 3])\n >>> y = np.array([-1, 0.2, 0.9, 2.1])\n\n By examining the coefficients, we see that the line should have a\n gradient of roughly 1 and cut the y-axis at, more or less, -1.\n\n We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``\n and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:\n\n >>> A = np.vstack([x, np.ones(len(x))]).T\n >>> A\n array([[ 0., 1.],\n [ 1., 1.],\n [ 2., 1.],\n [ 3., 1.]])\n\n >>> m, c = np.linalg.lstsq(A, y)[0]\n >>> print(m, c)\n 1.0 -0.95\n\n Plot the data along with the fitted line:\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'o', label='Original data', markersize=10)\n >>> plt.plot(x, m*x + c, 'r', label='Fitted line')\n >>> plt.legend()\n >>> plt.show()\n\n \"\"\"\n import math\n a, _ = _makearray(a)\n b, wrap = _makearray(b)\n is_1d = b.ndim == 1\n if is_1d:\n b = b[:, newaxis]\n _assertRank2(a, b)\n _assertNoEmpty2d(a, b) # TODO: relax this constraint\n m = a.shape[0]\n n = a.shape[1]\n n_rhs = b.shape[1]\n ldb = max(n, m)\n if m != b.shape[0]:\n raise LinAlgError('Incompatible dimensions')\n\n t, result_t = _commonType(a, b)\n real_t = _linalgRealType(t)\n result_real_t = _realType(result_t)\n\n # Determine default rcond value\n if rcond == \"warn\":\n # 2017-08-19, 1.14.0\n warnings.warn(\"`rcond` parameter will change to the default of \"\n \"machine precision times ``max(M, N)`` where M and N \"\n \"are the input matrix dimensions.\\n\"\n \"To use the future default and silence this warning \"\n \"we advise to pass `rcond=None`, to keep using the old, \"\n \"explicitly pass `rcond=-1`.\",\n FutureWarning, stacklevel=2)\n rcond = -1\n if rcond is None:\n rcond = finfo(t).eps * ldb\n\n bstar = zeros((ldb, n_rhs), t)\n bstar[:m, :n_rhs] = b\n a, bstar = _fastCopyAndTranspose(t, a, bstar)\n a, bstar = _to_native_byte_order(a, bstar)\n s = zeros((min(m, n),), real_t)\n # This line:\n # * is incorrect, according to the LAPACK documentation\n # * raises a ValueError if min(m,n) == 0\n # * should not be calculated here anyway, as LAPACK should calculate\n # `liwork` for us. But that only works if our version of lapack does\n # not have this bug:\n # http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html\n # Lapack_lite does have that bug...\n nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )\n iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgelsd\n lwork = 1\n rwork = zeros((lwork,), real_t)\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, -1, rwork, iwork, 0)\n lrwork = int(rwork[0])\n lwork = int(work[0].real)\n work = zeros((lwork,), t)\n rwork = zeros((lrwork,), real_t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, lwork, rwork, iwork, 0)\n else:\n lapack_routine = lapack_lite.dgelsd\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, -1, iwork, 0)\n lwork = int(work[0])\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, lwork, iwork, 0)\n if results['info'] > 0:\n raise LinAlgError('SVD did not converge in Linear Least Squares')\n\n # undo transpose imposed by fortran-order arrays\n b_out = bstar.T\n\n # b_out contains both the solution and the components of the residuals\n x = b_out[:n,:]\n r_parts = b_out[n:,:]\n if isComplexType(t):\n resids = sum(abs(r_parts)**2, axis=-2)\n else:\n resids = sum(r_parts**2, axis=-2)\n\n rank = results['rank']\n\n # remove the axis we added\n if is_1d:\n x = x.squeeze(axis=-1)\n # we probably should squeeze resids too, but we can't\n # without breaking compatibility.\n\n # as documented\n if rank != n or m <= n:\n resids = array([], result_real_t)\n\n # coerce output arrays\n s = s.astype(result_real_t, copy=False)\n resids = resids.astype(result_real_t, copy=False)\n x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed\n return wrap(x), wrap(resids), rank, s\n\n\ndef _multi_svd_norm(x, row_axis, col_axis, op):\n \"\"\"Compute a function of the singular values of the 2-D matrices in `x`.\n\n This is a private utility function used by numpy.linalg.norm().\n\n Parameters\n ----------\n x : ndarray\n row_axis, col_axis : int\n The axes of `x` that hold the 2-D matrices.\n op : callable\n This should be either numpy.amin or numpy.amax or numpy.sum.\n\n Returns\n -------\n result : float or ndarray\n If `x` is 2-D, the return values is a float.\n Otherwise, it is an array with ``x.ndim - 2`` dimensions.\n The return values are either the minimum or maximum or sum of the\n singular values of the matrices, depending on whether `op`\n is `numpy.amin` or `numpy.amax` or `numpy.sum`.\n\n \"\"\"\n y = moveaxis(x, (row_axis, col_axis), (-2, -1))\n result = op(svd(y, compute_uv=0), axis=-1)\n return result\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"\n Matrix or vector norm.\n\n This function is able to return one of eight different matrix norms,\n or one of an infinite number of vector norms (described below), depending\n on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : array_like\n Input array. If `axis` is None, `x` must be 1-D or 2-D.\n ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional\n Order of the norm (see table under ``Notes``). inf means numpy's\n `inf` object.\n axis : {int, 2-tuple of ints, None}, optional\n If `axis` is an integer, it specifies the axis of `x` along which to\n compute the vector norms. If `axis` is a 2-tuple, it specifies the\n axes that hold 2-D matrices, and the matrix norms of these matrices\n are computed. If `axis` is None then either a vector norm (when `x`\n is 1-D) or a matrix norm (when `x` is 2-D) is returned.\n keepdims : bool, optional\n If this is set to True, the axes which are normed over are left in the\n result as dimensions with size one. With this option the result will\n broadcast correctly against the original `x`.\n\n .. versionadded:: 1.10.0\n\n Returns\n -------\n n : float or ndarray\n Norm of the matrix or vector(s).\n\n Notes\n -----\n For values of ``ord <= 0``, the result is, strictly speaking, not a\n mathematical 'norm', but it may still be useful for various numerical\n purposes.\n\n The following norms can be calculated:\n\n ===== ============================ ==========================\n ord norm for matrices norm for vectors\n ===== ============================ ==========================\n None Frobenius norm 2-norm\n 'fro' Frobenius norm --\n 'nuc' nuclear norm --\n inf max(sum(abs(x), axis=1)) max(abs(x))\n -inf min(sum(abs(x), axis=1)) min(abs(x))\n 0 -- sum(x != 0)\n 1 max(sum(abs(x), axis=0)) as below\n -1 min(sum(abs(x), axis=0)) as below\n 2 2-norm (largest sing. value) as below\n -2 smallest singular value as below\n other -- sum(abs(x)**ord)**(1./ord)\n ===== ============================ ==========================\n\n The Frobenius norm is given by [1]_:\n\n :math:`||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`\n\n The nuclear norm is the sum of the singular values.\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,\n Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.arange(9) - 4\n >>> a\n array([-4, -3, -2, -1, 0, 1, 2, 3, 4])\n >>> b = a.reshape((3, 3))\n >>> b\n array([[-4, -3, -2],\n [-1, 0, 1],\n [ 2, 3, 4]])\n\n >>> LA.norm(a)\n 7.745966692414834\n >>> LA.norm(b)\n 7.745966692414834\n >>> LA.norm(b, 'fro')\n 7.745966692414834\n >>> LA.norm(a, np.inf)\n 4.0\n >>> LA.norm(b, np.inf)\n 9.0\n >>> LA.norm(a, -np.inf)\n 0.0\n >>> LA.norm(b, -np.inf)\n 2.0\n\n >>> LA.norm(a, 1)\n 20.0\n >>> LA.norm(b, 1)\n 7.0\n >>> LA.norm(a, -1)\n -4.6566128774142013e-010\n >>> LA.norm(b, -1)\n 6.0\n >>> LA.norm(a, 2)\n 7.745966692414834\n >>> LA.norm(b, 2)\n 7.3484692283495345\n\n >>> LA.norm(a, -2)\n nan\n >>> LA.norm(b, -2)\n 1.8570331885190563e-016\n >>> LA.norm(a, 3)\n 5.8480354764257312\n >>> LA.norm(a, -3)\n nan\n\n Using the `axis` argument to compute vector norms:\n\n >>> c = np.array([[ 1, 2, 3],\n ... [-1, 1, 4]])\n >>> LA.norm(c, axis=0)\n array([ 1.41421356, 2.23606798, 5. ])\n >>> LA.norm(c, axis=1)\n array([ 3.74165739, 4.24264069])\n >>> LA.norm(c, ord=1, axis=1)\n array([ 6., 6.])\n\n Using the `axis` argument to compute matrix norms:\n\n >>> m = np.arange(8).reshape(2,2,2)\n >>> LA.norm(m, axis=(1,2))\n array([ 3.74165739, 11.22497216])\n >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])\n (3.7416573867739413, 11.224972160321824)\n\n \"\"\"\n x = asarray(x)\n\n if not issubclass(x.dtype.type, (inexact, object_)):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if ((ord is None) or\n (ord in ('f', 'fro') and ndim == 2) or\n (ord == 2 and ndim == 1)):\n\n x = x.ravel(order='K')\n if isComplexType(x.dtype.type):\n sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)\n else:\n sqnorm = dot(x, x)\n ret = sqrt(sqnorm)\n if keepdims:\n ret = ret.reshape(ndim*[1])\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except Exception:\n raise TypeError(\"'axis' must be None, an integer or a tuple of integers\")\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)\n elif ord == 1:\n # special case for speedup\n return add.reduce(abs(x), axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n s = (x.conj() * x).real\n return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))\n else:\n try:\n ord + 1\n except TypeError:\n raise ValueError(\"Invalid norm order for vectors.\")\n absx = abs(x)\n absx **= ord\n ret = add.reduce(absx, axis=axis, keepdims=keepdims)\n ret **= (1 / ord)\n return ret\n elif len(axis) == 2:\n row_axis, col_axis = axis\n row_axis = normalize_axis_index(row_axis, nd)\n col_axis = normalize_axis_index(col_axis, nd)\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 2:\n ret = _multi_svd_norm(x, row_axis, col_axis, amax)\n elif ord == -2:\n ret = _multi_svd_norm(x, row_axis, col_axis, amin)\n elif ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)\n elif ord == Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)\n elif ord == -Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))\n elif ord == 'nuc':\n ret = _multi_svd_norm(x, row_axis, col_axis, sum)\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n\n\n# multi_dot\n\ndef multi_dot(arrays):\n \"\"\"\n Compute the dot product of two or more arrays in a single function call,\n while automatically selecting the fastest evaluation order.\n\n `multi_dot` chains `numpy.dot` and uses optimal parenthesization\n of the matrices [1]_ [2]_. Depending on the shapes of the matrices,\n this can speed up the multiplication a lot.\n\n If the first argument is 1-D it is treated as a row vector.\n If the last argument is 1-D it is treated as a column vector.\n The other arguments must be 2-D.\n\n Think of `multi_dot` as::\n\n def multi_dot(arrays): return functools.reduce(np.dot, arrays)\n\n\n Parameters\n ----------\n arrays : sequence of array_like\n If the first argument is 1-D it is treated as row vector.\n If the last argument is 1-D it is treated as column vector.\n The other arguments must be 2-D.\n\n Returns\n -------\n output : ndarray\n Returns the dot product of the supplied arrays.\n\n See Also\n --------\n dot : dot multiplication with two arguments.\n\n References\n ----------\n\n .. [1] Cormen, \"Introduction to Algorithms\", Chapter 15.2, p. 370-378\n .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication\n\n Examples\n --------\n `multi_dot` allows you to write::\n\n >>> from numpy.linalg import multi_dot\n >>> # Prepare some data\n >>> A = np.random.random(10000, 100)\n >>> B = np.random.random(100, 1000)\n >>> C = np.random.random(1000, 5)\n >>> D = np.random.random(5, 333)\n >>> # the actual dot multiplication\n >>> multi_dot([A, B, C, D])\n\n instead of::\n\n >>> np.dot(np.dot(np.dot(A, B), C), D)\n >>> # or\n >>> A.dot(B).dot(C).dot(D)\n\n Notes\n -----\n The cost for a matrix multiplication can be calculated with the\n following function::\n\n def cost(A, B):\n return A.shape[0] * A.shape[1] * B.shape[1]\n\n Let's assume we have three matrices\n :math:`A_{10x100}, B_{100x5}, C_{5x50}`.\n\n The costs for the two different parenthesizations are as follows::\n\n cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500\n cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000\n\n \"\"\"\n n = len(arrays)\n # optimization only makes sense for len(arrays) > 2\n if n < 2:\n raise ValueError(\"Expecting at least two arrays.\")\n elif n == 2:\n return dot(arrays[0], arrays[1])\n\n arrays = [asanyarray(a) for a in arrays]\n\n # save original ndim to reshape the result array into the proper form later\n ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim\n # Explicitly convert vectors to 2D arrays to keep the logic of the internal\n # _multi_dot_* functions as simple as possible.\n if arrays[0].ndim == 1:\n arrays[0] = atleast_2d(arrays[0])\n if arrays[-1].ndim == 1:\n arrays[-1] = atleast_2d(arrays[-1]).T\n _assertRank2(*arrays)\n\n # _multi_dot_three is much faster than _multi_dot_matrix_chain_order\n if n == 3:\n result = _multi_dot_three(arrays[0], arrays[1], arrays[2])\n else:\n order = _multi_dot_matrix_chain_order(arrays)\n result = _multi_dot(arrays, order, 0, n - 1)\n\n # return proper shape\n if ndim_first == 1 and ndim_last == 1:\n return result[0, 0] # scalar\n elif ndim_first == 1 or ndim_last == 1:\n return result.ravel() # 1-D\n else:\n return result\n\n\ndef _multi_dot_three(A, B, C):\n \"\"\"\n Find the best order for three arrays and do the multiplication.\n\n For three arguments `_multi_dot_three` is approximately 15 times faster\n than `_multi_dot_matrix_chain_order`\n\n \"\"\"\n a0, a1b0 = A.shape\n b1c0, c1 = C.shape\n # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1\n cost1 = a0 * b1c0 * (a1b0 + c1)\n # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1\n cost2 = a1b0 * c1 * (a0 + b1c0)\n\n if cost1 < cost2:\n return dot(dot(A, B), C)\n else:\n return dot(A, dot(B, C))\n\n\ndef _multi_dot_matrix_chain_order(arrays, return_costs=False):\n \"\"\"\n Return a np.array that encodes the optimal order of mutiplications.\n\n The optimal order array is then used by `_multi_dot()` to do the\n multiplication.\n\n Also return the cost matrix if `return_costs` is `True`\n\n The implementation CLOSELY follows Cormen, \"Introduction to Algorithms\",\n Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.\n\n cost[i, j] = min([\n cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)\n for k in range(i, j)])\n\n \"\"\"\n n = len(arrays)\n # p stores the dimensions of the matrices\n # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]\n p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]\n # m is a matrix of costs of the subproblems\n # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}\n m = zeros((n, n), dtype=double)\n # s is the actual ordering\n # s[i, j] is the value of k at which we split the product A_i..A_j\n s = empty((n, n), dtype=intp)\n\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = Inf\n for k in range(i, j):\n q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k # Note that Cormen uses 1-based index\n\n return (s, m) if return_costs else s\n\n\ndef _multi_dot(arrays, order, i, j):\n \"\"\"Actually do the multiplication with the given order.\"\"\"\n if i == j:\n return arrays[i]\n else:\n return dot(_multi_dot(arrays, order, i, order[i, j]),\n _multi_dot(arrays, order, order[i, j] + 1, j))\n",
"from __future__ import absolute_import, division, print_function\n\nimport six\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.testing.decorators import image_comparison\n\nfrom mpl_toolkits.axes_grid1 import host_subplot\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.axes_grid1 import AxesGrid\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset\nfrom mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar\n\nfrom matplotlib.colors import LogNorm\nfrom itertools import product\n\nimport numpy as np\n\n\n@image_comparison(baseline_images=['divider_append_axes'])\ndef test_divider_append_axes():\n\n # the random data\n np.random.seed(0)\n x = np.random.randn(1000)\n y = np.random.randn(1000)\n\n fig, axScatter = plt.subplots()\n\n # the scatter plot:\n axScatter.scatter(x, y)\n\n # create new axes on the right and on the top of the current axes\n # The first argument of the new_vertical(new_horizontal) method is\n # the height (width) of the axes to be created in inches.\n divider = make_axes_locatable(axScatter)\n axHistbot = divider.append_axes(\"bottom\", 1.2, pad=0.1, sharex=axScatter)\n axHistright = divider.append_axes(\"right\", 1.2, pad=0.1, sharey=axScatter)\n axHistleft = divider.append_axes(\"left\", 1.2, pad=0.1, sharey=axScatter)\n axHisttop = divider.append_axes(\"top\", 1.2, pad=0.1, sharex=axScatter)\n\n # now determine nice limits by hand:\n binwidth = 0.25\n xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))\n lim = (int(xymax/binwidth) + 1) * binwidth\n\n bins = np.arange(-lim, lim + binwidth, binwidth)\n axHisttop.hist(x, bins=bins)\n axHistbot.hist(x, bins=bins)\n axHistleft.hist(y, bins=bins, orientation='horizontal')\n axHistright.hist(y, bins=bins, orientation='horizontal')\n\n axHistbot.invert_yaxis()\n axHistleft.invert_xaxis()\n\n axHisttop.xaxis.set_ticklabels(())\n axHistbot.xaxis.set_ticklabels(())\n axHistleft.yaxis.set_ticklabels(())\n axHistright.yaxis.set_ticklabels(())\n\n\n@image_comparison(baseline_images=['twin_axes_empty_and_removed'],\n extensions=[\"png\"], tol=1)\ndef test_twin_axes_empty_and_removed():\n # Purely cosmetic font changes (avoid overlap)\n matplotlib.rcParams.update({\"font.size\": 8})\n matplotlib.rcParams.update({\"xtick.labelsize\": 8})\n matplotlib.rcParams.update({\"ytick.labelsize\": 8})\n generators = [ \"twinx\", \"twiny\", \"twin\" ]\n modifiers = [ \"\", \"host invisible\", \"twin removed\", \"twin invisible\",\n \"twin removed\\nhost invisible\" ]\n # Unmodified host subplot at the beginning for reference\n h = host_subplot(len(modifiers)+1, len(generators), 2)\n h.text(0.5, 0.5, \"host_subplot\", horizontalalignment=\"center\",\n verticalalignment=\"center\")\n # Host subplots with various modifications (twin*, visibility) applied\n for i, (mod, gen) in enumerate(product(modifiers, generators),\n len(generators)+1):\n h = host_subplot(len(modifiers)+1, len(generators), i)\n t = getattr(h, gen)()\n if \"twin invisible\" in mod:\n t.axis[:].set_visible(False)\n if \"twin removed\" in mod:\n t.remove()\n if \"host invisible\" in mod:\n h.axis[:].set_visible(False)\n h.text(0.5, 0.5, gen + (\"\\n\" + mod if mod else \"\"),\n horizontalalignment=\"center\", verticalalignment=\"center\")\n plt.subplots_adjust(wspace=0.5, hspace=1)\n\n\ndef test_axesgrid_colorbar_log_smoketest():\n fig = plt.figure()\n grid = AxesGrid(fig, 111, # modified to be only subplot\n nrows_ncols=(1, 1),\n label_mode=\"L\",\n cbar_location=\"top\",\n cbar_mode=\"single\",\n )\n\n Z = 10000 * np.random.rand(10, 10)\n im = grid[0].imshow(Z, interpolation=\"nearest\", norm=LogNorm())\n\n grid.cbar_axes[0].colorbar(im)\n\n\n@image_comparison(\n baseline_images=['inset_locator'], style='default', extensions=['png'],\n remove_text=True)\ndef test_inset_locator():\n def get_demo_image():\n from matplotlib.cbook import get_sample_data\n import numpy as np\n f = get_sample_data(\"axes_grid/bivariate_normal.npy\", asfileobj=False)\n z = np.load(f)\n # z is a numpy array of 15x15\n return z, (-3, 4, -4, 3)\n\n fig, ax = plt.subplots(figsize=[5, 4])\n\n # prepare the demo image\n Z, extent = get_demo_image()\n Z2 = np.zeros([150, 150], dtype=\"d\")\n ny, nx = Z.shape\n Z2[30:30 + ny, 30:30 + nx] = Z\n\n # extent = [-3, 4, -4, 3]\n ax.imshow(Z2, extent=extent, interpolation=\"nearest\",\n origin=\"lower\")\n\n axins = zoomed_inset_axes(ax, 6, loc=1) # zoom = 6\n axins.imshow(Z2, extent=extent, interpolation=\"nearest\",\n origin=\"lower\")\n axins.yaxis.get_major_locator().set_params(nbins=7)\n axins.xaxis.get_major_locator().set_params(nbins=7)\n # sub region of the original image\n x1, x2, y1, y2 = -1.5, -0.9, -2.5, -1.9\n axins.set_xlim(x1, x2)\n axins.set_ylim(y1, y2)\n\n plt.xticks(visible=False)\n plt.yticks(visible=False)\n\n # draw a bbox of the region of the inset axes in the parent axes and\n # connecting lines between the bbox and the inset axes area\n mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\")\n\n asb = AnchoredSizeBar(ax.transData,\n 0.5,\n '0.5',\n loc=8,\n pad=0.1, borderpad=0.5, sep=5,\n frameon=False)\n ax.add_artist(asb)\n\n\n@image_comparison(baseline_images=['zoomed_axes',\n 'inverted_zoomed_axes'],\n extensions=['png'])\ndef test_zooming_with_inverted_axes():\n fig, ax = plt.subplots()\n ax.plot([1, 2, 3], [1, 2, 3])\n ax.axis([1, 3, 1, 3])\n inset_ax = zoomed_inset_axes(ax, zoom=2.5, loc=4)\n inset_ax.axis([1.1, 1.4, 1.1, 1.4])\n\n fig, ax = plt.subplots()\n ax.plot([1, 2, 3], [1, 2, 3])\n ax.axis([3, 1, 3, 1])\n inset_ax = zoomed_inset_axes(ax, zoom=2.5, loc=4)\n inset_ax.axis([1.4, 1.1, 1.4, 1.1])\n",
"#!/usr/bin/env python\n# coding: utf-8\n\nimport unittest\nfrom pandas.msgpack import packb, unpackb\n\n\nclass DummyException(Exception):\n pass\n\n\nclass TestExceptions(unittest.TestCase):\n\n def test_raise_on_find_unsupported_value(self):\n import datetime\n self.assertRaises(TypeError, packb, datetime.datetime.now())\n\n def test_raise_from_object_hook(self):\n def hook(obj):\n raise DummyException\n\n self.assertRaises(DummyException, unpackb, packb({}), object_hook=hook)\n self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}),\n object_hook=hook)\n self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}),\n object_pairs_hook=hook)\n self.assertRaises(DummyException, unpackb,\n packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)\n self.assertRaises(DummyException, unpackb,\n packb({'fizz': {'buzz': 'spam'}}),\n object_pairs_hook=hook)\n\n def test_invalidvalue(self):\n self.assertRaises(ValueError, unpackb, b'\\xd9\\x97#DL_')\n",
"\"\"\"Generic interface for least-square minimization.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom warnings import warn\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom scipy.sparse import issparse, csr_matrix\nfrom scipy.sparse.linalg import LinearOperator\nfrom scipy.optimize import _minpack, OptimizeResult\nfrom scipy.optimize._numdiff import approx_derivative, group_columns\nfrom scipy._lib.six import string_types\n\nfrom .trf import trf\nfrom .dogbox import dogbox\nfrom .common import EPS, in_bounds, make_strictly_feasible\n\n\nTERMINATION_MESSAGES = {\n -1: \"Improper input parameters status returned from `leastsq`\",\n 0: \"The maximum number of function evaluations is exceeded.\",\n 1: \"`gtol` termination condition is satisfied.\",\n 2: \"`ftol` termination condition is satisfied.\",\n 3: \"`xtol` termination condition is satisfied.\",\n 4: \"Both `ftol` and `xtol` termination conditions are satisfied.\"\n}\n\n\nFROM_MINPACK_TO_COMMON = {\n 0: -1, # Improper input parameters from MINPACK.\n 1: 2,\n 2: 3,\n 3: 4,\n 4: 1,\n 5: 0\n # There are 6, 7, 8 for too small tolerance parameters,\n # but we guard against it by checking ftol, xtol, gtol beforehand.\n}\n\n\ndef call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):\n n = x0.size\n\n if diff_step is None:\n epsfcn = EPS\n else:\n epsfcn = diff_step**2\n\n # Compute MINPACK's `diag`, which is inverse of our `x_scale` and\n # ``x_scale='jac'`` corresponds to ``diag=None``.\n if isinstance(x_scale, string_types) and x_scale == 'jac':\n diag = None\n else:\n diag = 1 / x_scale\n\n full_output = True\n col_deriv = False\n factor = 100.0\n\n if jac is None:\n if max_nfev is None:\n # n squared to account for Jacobian evaluations.\n max_nfev = 100 * n * (n + 1)\n x, info, status = _minpack._lmdif(\n fun, x0, (), full_output, ftol, xtol, gtol,\n max_nfev, epsfcn, factor, diag)\n else:\n if max_nfev is None:\n max_nfev = 100 * n\n x, info, status = _minpack._lmder(\n fun, jac, x0, (), full_output, col_deriv,\n ftol, xtol, gtol, max_nfev, factor, diag)\n\n f = info['fvec']\n\n if callable(jac):\n J = jac(x)\n else:\n J = np.atleast_2d(approx_derivative(fun, x))\n\n cost = 0.5 * np.dot(f, f)\n g = J.T.dot(f)\n g_norm = norm(g, ord=np.inf)\n\n nfev = info['nfev']\n njev = info.get('njev', None)\n\n status = FROM_MINPACK_TO_COMMON[status]\n active_mask = np.zeros_like(x0, dtype=int)\n\n return OptimizeResult(\n x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,\n active_mask=active_mask, nfev=nfev, njev=njev, status=status)\n\n\ndef prepare_bounds(bounds, n):\n lb, ub = [np.asarray(b, dtype=float) for b in bounds]\n if lb.ndim == 0:\n lb = np.resize(lb, n)\n\n if ub.ndim == 0:\n ub = np.resize(ub, n)\n\n return lb, ub\n\n\ndef check_tolerance(ftol, xtol, gtol):\n message = \"{} is too low, setting to machine epsilon {}.\"\n if ftol < EPS:\n warn(message.format(\"`ftol`\", EPS))\n ftol = EPS\n if xtol < EPS:\n warn(message.format(\"`xtol`\", EPS))\n xtol = EPS\n if gtol < EPS:\n warn(message.format(\"`gtol`\", EPS))\n gtol = EPS\n\n return ftol, xtol, gtol\n\n\ndef check_x_scale(x_scale, x0):\n if isinstance(x_scale, string_types) and x_scale == 'jac':\n return x_scale\n\n try:\n x_scale = np.asarray(x_scale, dtype=float)\n valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)\n except (ValueError, TypeError):\n valid = False\n\n if not valid:\n raise ValueError(\"`x_scale` must be 'jac' or array_like with \"\n \"positive numbers.\")\n\n if x_scale.ndim == 0:\n x_scale = np.resize(x_scale, x0.shape)\n\n if x_scale.shape != x0.shape:\n raise ValueError(\"Inconsistent shapes between `x_scale` and `x0`.\")\n\n return x_scale\n\n\ndef check_jac_sparsity(jac_sparsity, m, n):\n if jac_sparsity is None:\n return None\n\n if not issparse(jac_sparsity):\n jac_sparsity = np.atleast_2d(jac_sparsity)\n\n if jac_sparsity.shape != (m, n):\n raise ValueError(\"`jac_sparsity` has wrong shape.\")\n\n return jac_sparsity, group_columns(jac_sparsity)\n\n\n# Loss functions.\n\n\ndef huber(z, rho, cost_only):\n mask = z <= 1\n rho[0, mask] = z[mask]\n rho[0, ~mask] = 2 * z[~mask]**0.5 - 1\n if cost_only:\n return\n rho[1, mask] = 1\n rho[1, ~mask] = z[~mask]**-0.5\n rho[2, mask] = 0\n rho[2, ~mask] = -0.5 * z[~mask]**-1.5\n\n\ndef soft_l1(z, rho, cost_only):\n t = 1 + z\n rho[0] = 2 * (t**0.5 - 1)\n if cost_only:\n return\n rho[1] = t**-0.5\n rho[2] = -0.5 * t**-1.5\n\n\ndef cauchy(z, rho, cost_only):\n rho[0] = np.log1p(z)\n if cost_only:\n return\n t = 1 + z\n rho[1] = 1 / t\n rho[2] = -1 / t**2\n\n\ndef arctan(z, rho, cost_only):\n rho[0] = np.arctan(z)\n if cost_only:\n return\n t = 1 + z**2\n rho[1] = 1 / t\n rho[2] = -2 * z / t**2\n\n\nIMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,\n cauchy=cauchy, arctan=arctan)\n\n\ndef construct_loss_function(m, loss, f_scale):\n if loss == 'linear':\n return None\n\n if not callable(loss):\n loss = IMPLEMENTED_LOSSES[loss]\n rho = np.empty((3, m))\n\n def loss_function(f, cost_only=False):\n z = (f / f_scale) ** 2\n loss(z, rho, cost_only=cost_only)\n if cost_only:\n return 0.5 * f_scale ** 2 * np.sum(rho[0])\n rho[0] *= f_scale ** 2\n rho[2] /= f_scale ** 2\n return rho\n else:\n def loss_function(f, cost_only=False):\n z = (f / f_scale) ** 2\n rho = loss(z)\n if cost_only:\n return 0.5 * f_scale ** 2 * np.sum(rho[0])\n rho[0] *= f_scale ** 2\n rho[2] /= f_scale ** 2\n return rho\n\n return loss_function\n\n\ndef least_squares(\n fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',\n ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',\n f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},\n jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):\n \"\"\"Solve a nonlinear least-squares problem with bounds on the variables.\n\n Given the residuals f(x) (an m-dimensional real function of n real\n variables) and the loss function rho(s) (a scalar function), `least_squares`\n finds a local minimum of the cost function F(x)::\n\n minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)\n subject to lb <= x <= ub\n\n The purpose of the loss function rho(s) is to reduce the influence of\n outliers on the solution.\n\n Parameters\n ----------\n fun : callable\n Function which computes the vector of residuals, with the signature\n ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with\n respect to its first argument. The argument ``x`` passed to this\n function is an ndarray of shape (n,) (never a scalar, even for n=1).\n It must return a 1-d array_like of shape (m,) or a scalar. If the\n argument ``x`` is complex or the function ``fun`` returns complex\n residuals, it must be wrapped in a real function of real arguments,\n as shown at the end of the Examples section.\n x0 : array_like with shape (n,) or float\n Initial guess on independent variables. If float, it will be treated\n as a 1-d array with one element.\n jac : {'2-point', '3-point', 'cs', callable}, optional\n Method of computing the Jacobian matrix (an m-by-n matrix, where\n element (i, j) is the partial derivative of f[i] with respect to\n x[j]). The keywords select a finite difference scheme for numerical\n estimation. The scheme '3-point' is more accurate, but requires\n twice as much operations compared to '2-point' (default). The\n scheme 'cs' uses complex steps, and while potentially the most\n accurate, it is applicable only when `fun` correctly handles\n complex inputs and can be analytically continued to the complex\n plane. Method 'lm' always uses the '2-point' scheme. If callable,\n it is used as ``jac(x, *args, **kwargs)`` and should return a\n good approximation (or the exact value) for the Jacobian as an\n array_like (np.atleast_2d is applied), a sparse matrix or a\n `scipy.sparse.linalg.LinearOperator`.\n bounds : 2-tuple of array_like, optional\n Lower and upper bounds on independent variables. Defaults to no bounds.\n Each array must match the size of `x0` or be a scalar, in the latter\n case a bound will be the same for all variables. Use ``np.inf`` with\n an appropriate sign to disable bounds on all or some variables.\n method : {'trf', 'dogbox', 'lm'}, optional\n Algorithm to perform minimization.\n\n * 'trf' : Trust Region Reflective algorithm, particularly suitable\n for large sparse problems with bounds. Generally robust method.\n * 'dogbox' : dogleg algorithm with rectangular trust regions,\n typical use case is small problems with bounds. Not recommended\n for problems with rank-deficient Jacobian.\n * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.\n Doesn't handle bounds and sparse Jacobians. Usually the most\n efficient method for small unconstrained problems.\n\n Default is 'trf'. See Notes for more information.\n ftol : float, optional\n Tolerance for termination by the change of the cost function. Default\n is 1e-8. The optimization process is stopped when ``dF < ftol * F``,\n and there was an adequate agreement between a local quadratic model and\n the true model in the last step.\n xtol : float, optional\n Tolerance for termination by the change of the independent variables.\n Default is 1e-8. The exact condition depends on the `method` used:\n\n * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``\n * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is\n a trust-region radius and ``xs`` is the value of ``x``\n scaled according to `x_scale` parameter (see below).\n\n gtol : float, optional\n Tolerance for termination by the norm of the gradient. Default is 1e-8.\n The exact condition depends on a `method` used:\n\n * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where\n ``g_scaled`` is the value of the gradient scaled to account for\n the presence of the bounds [STIR]_.\n * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where\n ``g_free`` is the gradient with respect to the variables which\n are not in the optimal state on the boundary.\n * For 'lm' : the maximum absolute value of the cosine of angles\n between columns of the Jacobian and the residual vector is less\n than `gtol`, or the residual vector is zero.\n\n x_scale : array_like or 'jac', optional\n Characteristic scale of each variable. Setting `x_scale` is equivalent\n to reformulating the problem in scaled variables ``xs = x / x_scale``.\n An alternative view is that the size of a trust region along j-th\n dimension is proportional to ``x_scale[j]``. Improved convergence may\n be achieved by setting `x_scale` such that a step of a given size\n along any of the scaled variables has a similar effect on the cost\n function. If set to 'jac', the scale is iteratively updated using the\n inverse norms of the columns of the Jacobian matrix (as described in\n [JJMore]_).\n loss : str or callable, optional\n Determines the loss function. The following keyword values are allowed:\n\n * 'linear' (default) : ``rho(z) = z``. Gives a standard\n least-squares problem.\n * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth\n approximation of l1 (absolute value) loss. Usually a good\n choice for robust least squares.\n * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works\n similarly to 'soft_l1'.\n * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers\n influence, but may cause difficulties in optimization process.\n * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on\n a single residual, has properties similar to 'cauchy'.\n\n If callable, it must take a 1-d ndarray ``z=f**2`` and return an\n array_like with shape (3, m) where row 0 contains function values,\n row 1 contains first derivatives and row 2 contains second\n derivatives. Method 'lm' supports only 'linear' loss.\n f_scale : float, optional\n Value of soft margin between inlier and outlier residuals, default\n is 1.0. The loss function is evaluated as follows\n ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,\n and ``rho`` is determined by `loss` parameter. This parameter has\n no effect with ``loss='linear'``, but for other `loss` values it is\n of crucial importance.\n max_nfev : None or int, optional\n Maximum number of function evaluations before the termination.\n If None (default), the value is chosen automatically:\n\n * For 'trf' and 'dogbox' : 100 * n.\n * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)\n otherwise (because 'lm' counts function calls in Jacobian\n estimation).\n\n diff_step : None or array_like, optional\n Determines the relative step size for the finite difference\n approximation of the Jacobian. The actual step is computed as\n ``x * diff_step``. If None (default), then `diff_step` is taken to be\n a conventional \"optimal\" power of machine epsilon for the finite\n difference scheme used [NR]_.\n tr_solver : {None, 'exact', 'lsmr'}, optional\n Method for solving trust-region subproblems, relevant only for 'trf'\n and 'dogbox' methods.\n\n * 'exact' is suitable for not very large problems with dense\n Jacobian matrices. The computational complexity per iteration is\n comparable to a singular value decomposition of the Jacobian\n matrix.\n * 'lsmr' is suitable for problems with sparse and large Jacobian\n matrices. It uses the iterative procedure\n `scipy.sparse.linalg.lsmr` for finding a solution of a linear\n least-squares problem and only requires matrix-vector product\n evaluations.\n\n If None (default) the solver is chosen based on the type of Jacobian\n returned on the first iteration.\n tr_options : dict, optional\n Keyword options passed to trust-region solver.\n\n * ``tr_solver='exact'``: `tr_options` are ignored.\n * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.\n Additionally ``method='trf'`` supports 'regularize' option\n (bool, default is True) which adds a regularization term to the\n normal equation, which improves convergence if the Jacobian is\n rank-deficient [Byrd]_ (eq. 3.4).\n\n jac_sparsity : {None, array_like, sparse matrix}, optional\n Defines the sparsity structure of the Jacobian matrix for finite\n difference estimation, its shape must be (m, n). If the Jacobian has\n only few non-zero elements in *each* row, providing the sparsity\n structure will greatly speed up the computations [Curtis]_. A zero\n entry means that a corresponding element in the Jacobian is identically\n zero. If provided, forces the use of 'lsmr' trust-region solver.\n If None (default) then dense differencing will be used. Has no effect\n for 'lm' method.\n verbose : {0, 1, 2}, optional\n Level of algorithm's verbosity:\n\n * 0 (default) : work silently.\n * 1 : display a termination report.\n * 2 : display progress during iterations (not supported by 'lm'\n method).\n\n args, kwargs : tuple and dict, optional\n Additional arguments passed to `fun` and `jac`. Both empty by default.\n The calling signature is ``fun(x, *args, **kwargs)`` and the same for\n `jac`.\n\n Returns\n -------\n `OptimizeResult` with the following fields defined:\n x : ndarray, shape (n,)\n Solution found.\n cost : float\n Value of the cost function at the solution.\n fun : ndarray, shape (m,)\n Vector of residuals at the solution.\n jac : ndarray, sparse matrix or LinearOperator, shape (m, n)\n Modified Jacobian matrix at the solution, in the sense that J^T J\n is a Gauss-Newton approximation of the Hessian of the cost function.\n The type is the same as the one used by the algorithm.\n grad : ndarray, shape (m,)\n Gradient of the cost function at the solution.\n optimality : float\n First-order optimality measure. In unconstrained problems, it is always\n the uniform norm of the gradient. In constrained problems, it is the\n quantity which was compared with `gtol` during iterations.\n active_mask : ndarray of int, shape (n,)\n Each component shows whether a corresponding constraint is active\n (that is, whether a variable is at the bound):\n\n * 0 : a constraint is not active.\n * -1 : a lower bound is active.\n * 1 : an upper bound is active.\n\n Might be somewhat arbitrary for 'trf' method as it generates a sequence\n of strictly feasible iterates and `active_mask` is determined within a\n tolerance threshold.\n nfev : int\n Number of function evaluations done. Methods 'trf' and 'dogbox' do not\n count function calls for numerical Jacobian approximation, as opposed\n to 'lm' method.\n njev : int or None\n Number of Jacobian evaluations done. If numerical Jacobian\n approximation is used in 'lm' method, it is set to None.\n status : int\n The reason for algorithm termination:\n\n * -1 : improper input parameters status returned from MINPACK.\n * 0 : the maximum number of function evaluations is exceeded.\n * 1 : `gtol` termination condition is satisfied.\n * 2 : `ftol` termination condition is satisfied.\n * 3 : `xtol` termination condition is satisfied.\n * 4 : Both `ftol` and `xtol` termination conditions are satisfied.\n\n message : str\n Verbal description of the termination reason.\n success : bool\n True if one of the convergence criteria is satisfied (`status` > 0).\n\n See Also\n --------\n leastsq : A legacy wrapper for the MINPACK implementation of the\n Levenberg-Marquadt algorithm.\n curve_fit : Least-squares minimization applied to a curve fitting problem.\n\n Notes\n -----\n Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares\n algorithms implemented in MINPACK (lmder, lmdif). It runs the\n Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.\n The implementation is based on paper [JJMore]_, it is very robust and\n efficient with a lot of smart tricks. It should be your first choice\n for unconstrained problems. Note that it doesn't support bounds. Also\n it doesn't work when m < n.\n\n Method 'trf' (Trust Region Reflective) is motivated by the process of\n solving a system of equations, which constitute the first-order optimality\n condition for a bound-constrained minimization problem as formulated in\n [STIR]_. The algorithm iteratively solves trust-region subproblems\n augmented by a special diagonal quadratic term and with trust-region shape\n determined by the distance from the bounds and the direction of the\n gradient. This enhancements help to avoid making steps directly into bounds\n and efficiently explore the whole space of variables. To further improve\n convergence, the algorithm considers search directions reflected from the\n bounds. To obey theoretical requirements, the algorithm keeps iterates\n strictly feasible. With dense Jacobians trust-region subproblems are\n solved by an exact method very similar to the one described in [JJMore]_\n (and implemented in MINPACK). The difference from the MINPACK\n implementation is that a singular value decomposition of a Jacobian\n matrix is done once per iteration, instead of a QR decomposition and series\n of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace\n approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.\n The subspace is spanned by a scaled gradient and an approximate\n Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no\n constraints are imposed the algorithm is very similar to MINPACK and has\n generally comparable performance. The algorithm works quite robust in\n unbounded and bounded problems, thus it is chosen as a default algorithm.\n\n Method 'dogbox' operates in a trust-region framework, but considers\n rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.\n The intersection of a current trust region and initial bounds is again\n rectangular, so on each iteration a quadratic minimization problem subject\n to bound constraints is solved approximately by Powell's dogleg method\n [NumOpt]_. The required Gauss-Newton step can be computed exactly for\n dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large\n sparse Jacobians. The algorithm is likely to exhibit slow convergence when\n the rank of Jacobian is less than the number of variables. The algorithm\n often outperforms 'trf' in bounded problems with a small number of\n variables.\n\n Robust loss functions are implemented as described in [BA]_. The idea\n is to modify a residual vector and a Jacobian matrix on each iteration\n such that computed gradient and Gauss-Newton Hessian approximation match\n the true gradient and Hessian approximation of the cost function. Then\n the algorithm proceeds in a normal way, i.e. robust loss functions are\n implemented as a simple wrapper over standard least-squares algorithms.\n\n .. versionadded:: 0.17.0\n\n References\n ----------\n .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, \"A Subspace, Interior,\n and Conjugate Gradient Method for Large-Scale Bound-Constrained\n Minimization Problems,\" SIAM Journal on Scientific Computing,\n Vol. 21, Number 1, pp 1-23, 1999.\n .. [NR] William H. Press et. al., \"Numerical Recipes. The Art of Scientific\n Computing. 3rd edition\", Sec. 5.7.\n .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, \"Approximate\n solution of the trust region problem by minimization over\n two-dimensional subspaces\", Math. Programming, 40, pp. 247-263,\n 1988.\n .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, \"On the estimation of\n sparse Jacobian matrices\", Journal of the Institute of\n Mathematics and its Applications, 13, pp. 117-120, 1974.\n .. [JJMore] J. J. More, \"The Levenberg-Marquardt Algorithm: Implementation\n and Theory,\" Numerical Analysis, ed. G. A. Watson, Lecture\n Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.\n .. [Voglis] C. Voglis and I. E. Lagaris, \"A Rectangular Trust Region\n Dogleg Approach for Unconstrained and Bound Constrained\n Nonlinear Optimization\", WSEAS International Conference on\n Applied Mathematics, Corfu, Greece, 2004.\n .. [NumOpt] J. Nocedal and S. J. Wright, \"Numerical optimization,\n 2nd edition\", Chapter 4.\n .. [BA] B. Triggs et. al., \"Bundle Adjustment - A Modern Synthesis\",\n Proceedings of the International Workshop on Vision Algorithms:\n Theory and Practice, pp. 298-372, 1999.\n\n Examples\n --------\n In this example we find a minimum of the Rosenbrock function without bounds\n on independent variables.\n\n >>> def fun_rosenbrock(x):\n ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])\n\n Notice that we only provide the vector of the residuals. The algorithm\n constructs the cost function as a sum of squares of the residuals, which\n gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.\n\n >>> from scipy.optimize import least_squares\n >>> x0_rosenbrock = np.array([2, 2])\n >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)\n >>> res_1.x\n array([ 1., 1.])\n >>> res_1.cost\n 9.8669242910846867e-30\n >>> res_1.optimality\n 8.8928864934219529e-14\n\n We now constrain the variables, in such a way that the previous solution\n becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and\n ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter\n to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.\n\n We also provide the analytic Jacobian:\n\n >>> def jac_rosenbrock(x):\n ... return np.array([\n ... [-20 * x[0], 10],\n ... [-1, 0]])\n\n Putting this all together, we see that the new solution lies on the bound:\n\n >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,\n ... bounds=([-np.inf, 1.5], np.inf))\n >>> res_2.x\n array([ 1.22437075, 1.5 ])\n >>> res_2.cost\n 0.025213093946805685\n >>> res_2.optimality\n 1.5885401433157753e-07\n\n Now we solve a system of equations (i.e., the cost function should be zero\n at a minimum) for a Broyden tridiagonal vector-valued function of 100000\n variables:\n\n >>> def fun_broyden(x):\n ... f = (3 - x) * x + 1\n ... f[1:] -= x[:-1]\n ... f[:-1] -= 2 * x[1:]\n ... return f\n\n The corresponding Jacobian matrix is sparse. We tell the algorithm to\n estimate it by finite differences and provide the sparsity structure of\n Jacobian to significantly speed up this process.\n\n >>> from scipy.sparse import lil_matrix\n >>> def sparsity_broyden(n):\n ... sparsity = lil_matrix((n, n), dtype=int)\n ... i = np.arange(n)\n ... sparsity[i, i] = 1\n ... i = np.arange(1, n)\n ... sparsity[i, i - 1] = 1\n ... i = np.arange(n - 1)\n ... sparsity[i, i + 1] = 1\n ... return sparsity\n ...\n >>> n = 100000\n >>> x0_broyden = -np.ones(n)\n ...\n >>> res_3 = least_squares(fun_broyden, x0_broyden,\n ... jac_sparsity=sparsity_broyden(n))\n >>> res_3.cost\n 4.5687069299604613e-23\n >>> res_3.optimality\n 1.1650454296851518e-11\n\n Let's also solve a curve fitting problem using robust loss function to\n take care of outliers in the data. Define the model function as\n ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an\n observation and a, b, c are parameters to estimate.\n\n First, define the function which generates the data with noise and\n outliers, define the model parameters, and generate data:\n\n >>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):\n ... y = a + b * np.exp(t * c)\n ...\n ... rnd = np.random.RandomState(random_state)\n ... error = noise * rnd.randn(t.size)\n ... outliers = rnd.randint(0, t.size, n_outliers)\n ... error[outliers] *= 10\n ...\n ... return y + error\n ...\n >>> a = 0.5\n >>> b = 2.0\n >>> c = -1\n >>> t_min = 0\n >>> t_max = 10\n >>> n_points = 15\n ...\n >>> t_train = np.linspace(t_min, t_max, n_points)\n >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)\n\n Define function for computing residuals and initial estimate of\n parameters.\n\n >>> def fun(x, t, y):\n ... return x[0] + x[1] * np.exp(x[2] * t) - y\n ...\n >>> x0 = np.array([1.0, 1.0, 0.0])\n\n Compute a standard least-squares solution:\n\n >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))\n\n Now compute two solutions with two different robust loss functions. The\n parameter `f_scale` is set to 0.1, meaning that inlier residuals should\n not significantly exceed 0.1 (the noise level used).\n\n >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,\n ... args=(t_train, y_train))\n >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,\n ... args=(t_train, y_train))\n\n And finally plot all the curves. We see that by selecting an appropriate\n `loss` we can get estimates close to optimal even in the presence of\n strong outliers. But keep in mind that generally it is recommended to try\n 'soft_l1' or 'huber' losses first (if at all necessary) as the other two\n options may cause difficulties in optimization process.\n\n >>> t_test = np.linspace(t_min, t_max, n_points * 10)\n >>> y_true = gen_data(t_test, a, b, c)\n >>> y_lsq = gen_data(t_test, *res_lsq.x)\n >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)\n >>> y_log = gen_data(t_test, *res_log.x)\n ...\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(t_train, y_train, 'o')\n >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')\n >>> plt.plot(t_test, y_lsq, label='linear loss')\n >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')\n >>> plt.plot(t_test, y_log, label='cauchy loss')\n >>> plt.xlabel(\"t\")\n >>> plt.ylabel(\"y\")\n >>> plt.legend()\n >>> plt.show()\n\n In the next example, we show how complex-valued residual functions of\n complex variables can be optimized with ``least_squares()``. Consider the\n following function:\n\n >>> def f(z):\n ... return z - (0.5 + 0.5j)\n\n We wrap it into a function of real variables that returns real residuals\n by simply handling the real and imaginary parts as independent variables:\n\n >>> def f_wrap(x):\n ... fx = f(x[0] + 1j*x[1])\n ... return np.array([fx.real, fx.imag])\n\n Thus, instead of the original m-dimensional complex function of n complex\n variables we optimize a 2m-dimensional real function of 2n real variables:\n\n >>> from scipy.optimize import least_squares\n >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))\n >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j\n >>> z\n (0.49999999999925893+0.49999999999925893j)\n\n \"\"\"\n if method not in ['trf', 'dogbox', 'lm']:\n raise ValueError(\"`method` must be 'trf', 'dogbox' or 'lm'.\")\n\n if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):\n raise ValueError(\"`jac` must be '2-point', '3-point', 'cs' or \"\n \"callable.\")\n\n if tr_solver not in [None, 'exact', 'lsmr']:\n raise ValueError(\"`tr_solver` must be None, 'exact' or 'lsmr'.\")\n\n if loss not in IMPLEMENTED_LOSSES and not callable(loss):\n raise ValueError(\"`loss` must be one of {0} or a callable.\"\n .format(IMPLEMENTED_LOSSES.keys()))\n\n if method == 'lm' and loss != 'linear':\n raise ValueError(\"method='lm' supports only 'linear' loss function.\")\n\n if verbose not in [0, 1, 2]:\n raise ValueError(\"`verbose` must be in [0, 1, 2].\")\n\n if len(bounds) != 2:\n raise ValueError(\"`bounds` must contain 2 elements.\")\n\n if max_nfev is not None and max_nfev <= 0:\n raise ValueError(\"`max_nfev` must be None or positive integer.\")\n\n if np.iscomplexobj(x0):\n raise ValueError(\"`x0` must be real.\")\n\n x0 = np.atleast_1d(x0).astype(float)\n\n if x0.ndim > 1:\n raise ValueError(\"`x0` must have at most 1 dimension.\")\n\n lb, ub = prepare_bounds(bounds, x0.shape[0])\n\n if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):\n raise ValueError(\"Method 'lm' doesn't support bounds.\")\n\n if lb.shape != x0.shape or ub.shape != x0.shape:\n raise ValueError(\"Inconsistent shapes between bounds and `x0`.\")\n\n if np.any(lb >= ub):\n raise ValueError(\"Each lower bound must be strictly less than each \"\n \"upper bound.\")\n\n if not in_bounds(x0, lb, ub):\n raise ValueError(\"`x0` is infeasible.\")\n\n x_scale = check_x_scale(x_scale, x0)\n\n ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)\n\n def fun_wrapped(x):\n return np.atleast_1d(fun(x, *args, **kwargs))\n\n if method == 'trf':\n x0 = make_strictly_feasible(x0, lb, ub)\n\n f0 = fun_wrapped(x0)\n\n if f0.ndim != 1:\n raise ValueError(\"`fun` must return at most 1-d array_like.\")\n\n if not np.all(np.isfinite(f0)):\n raise ValueError(\"Residuals are not finite in the initial point.\")\n\n n = x0.size\n m = f0.size\n\n if method == 'lm' and m < n:\n raise ValueError(\"Method 'lm' doesn't work when the number of \"\n \"residuals is less than the number of variables.\")\n\n loss_function = construct_loss_function(m, loss, f_scale)\n if callable(loss):\n rho = loss_function(f0)\n if rho.shape != (3, m):\n raise ValueError(\"The return value of `loss` callable has wrong \"\n \"shape.\")\n initial_cost = 0.5 * np.sum(rho[0])\n elif loss_function is not None:\n initial_cost = loss_function(f0, cost_only=True)\n else:\n initial_cost = 0.5 * np.dot(f0, f0)\n\n if callable(jac):\n J0 = jac(x0, *args, **kwargs)\n\n if issparse(J0):\n J0 = csr_matrix(J0)\n\n def jac_wrapped(x, _=None):\n return csr_matrix(jac(x, *args, **kwargs))\n\n elif isinstance(J0, LinearOperator):\n def jac_wrapped(x, _=None):\n return jac(x, *args, **kwargs)\n\n else:\n J0 = np.atleast_2d(J0)\n\n def jac_wrapped(x, _=None):\n return np.atleast_2d(jac(x, *args, **kwargs))\n\n else: # Estimate Jacobian by finite differences.\n if method == 'lm':\n if jac_sparsity is not None:\n raise ValueError(\"method='lm' does not support \"\n \"`jac_sparsity`.\")\n\n if jac != '2-point':\n warn(\"jac='{0}' works equivalently to '2-point' \"\n \"for method='lm'.\".format(jac))\n\n J0 = jac_wrapped = None\n else:\n if jac_sparsity is not None and tr_solver == 'exact':\n raise ValueError(\"tr_solver='exact' is incompatible \"\n \"with `jac_sparsity`.\")\n\n jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)\n\n def jac_wrapped(x, f):\n J = approx_derivative(fun, x, rel_step=diff_step, method=jac,\n f0=f, bounds=bounds, args=args,\n kwargs=kwargs, sparsity=jac_sparsity)\n if J.ndim != 2: # J is guaranteed not sparse.\n J = np.atleast_2d(J)\n\n return J\n\n J0 = jac_wrapped(x0, f0)\n\n if J0 is not None:\n if J0.shape != (m, n):\n raise ValueError(\n \"The return value of `jac` has wrong shape: expected {0}, \"\n \"actual {1}.\".format((m, n), J0.shape))\n\n if not isinstance(J0, np.ndarray):\n if method == 'lm':\n raise ValueError(\"method='lm' works only with dense \"\n \"Jacobian matrices.\")\n\n if tr_solver == 'exact':\n raise ValueError(\n \"tr_solver='exact' works only with dense \"\n \"Jacobian matrices.\")\n\n jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'\n if isinstance(J0, LinearOperator) and jac_scale:\n raise ValueError(\"x_scale='jac' can't be used when `jac` \"\n \"returns LinearOperator.\")\n\n if tr_solver is None:\n if isinstance(J0, np.ndarray):\n tr_solver = 'exact'\n else:\n tr_solver = 'lsmr'\n\n if method == 'lm':\n result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,\n max_nfev, x_scale, diff_step)\n\n elif method == 'trf':\n result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,\n gtol, max_nfev, x_scale, loss_function, tr_solver,\n tr_options.copy(), verbose)\n\n elif method == 'dogbox':\n if tr_solver == 'lsmr' and 'regularize' in tr_options:\n warn(\"The keyword 'regularize' in `tr_options` is not relevant \"\n \"for 'dogbox' method.\")\n tr_options = tr_options.copy()\n del tr_options['regularize']\n\n result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,\n xtol, gtol, max_nfev, x_scale, loss_function,\n tr_solver, tr_options, verbose)\n\n result.message = TERMINATION_MESSAGES[result.status]\n result.success = result.status > 0\n\n if verbose >= 1:\n print(result.message)\n print(\"Function evaluations {0}, initial cost {1:.4e}, final cost \"\n \"{2:.4e}, first-order optimality {3:.2e}.\"\n .format(result.nfev, initial_cost, result.cost,\n result.optimality))\n\n return result\n",
"from __future__ import division, print_function, absolute_import\n\nfrom os.path import join\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('stats', parent_package, top_path)\n\n config.add_data_dir('tests')\n\n statlib_src = [join('statlib', '*.f')]\n config.add_library('statlib', sources=statlib_src)\n\n # add statlib module\n config.add_extension('statlib',\n sources=['statlib.pyf'],\n f2py_options=['--no-wrap-functions'],\n libraries=['statlib'],\n depends=statlib_src\n )\n\n # add _stats module\n config.add_extension('_stats',\n sources=['_stats.c'],\n )\n\n # add mvn module\n config.add_extension('mvn',\n sources=['mvn.pyf','mvndst.f'],\n )\n\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n",
"import nose\nimport sys\nimport os\nimport warnings\nimport tempfile\nfrom contextlib import contextmanager\n\nimport datetime\nimport numpy as np\n\nimport pandas\nimport pandas as pd\nfrom pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,\n RangeIndex, Categorical, bdate_range,\n date_range, timedelta_range, Index, DatetimeIndex,\n isnull)\n\nfrom pandas.compat import is_platform_windows, PY3, PY35\nfrom pandas.formats.printing import pprint_thing\nfrom pandas.io.pytables import _tables, TableIterator\ntry:\n _tables()\nexcept ImportError as e:\n raise nose.SkipTest(e)\n\n\nfrom pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,\n IncompatibilityWarning, PerformanceWarning,\n AttributeConflictWarning, DuplicateWarning,\n PossibleDataLossError, ClosedFileError)\nfrom pandas.io import pytables as pytables\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (assert_panel4d_equal,\n assert_panel_equal,\n assert_frame_equal,\n assert_series_equal,\n assert_produces_warning,\n set_timezone)\nfrom pandas import concat, Timestamp\nfrom pandas import compat\nfrom pandas.compat import range, lrange, u\n\ntry:\n import tables\nexcept ImportError:\n raise nose.SkipTest('no pytables')\n\nfrom distutils.version import LooseVersion\n\n_default_compressor = ('blosc' if LooseVersion(tables.__version__) >= '2.2'\n else 'zlib')\n\n_multiprocess_can_split_ = False\n\n# testing on windows/py3 seems to fault\n# for using compression\nskip_compression = PY3 and is_platform_windows()\n\n# contextmanager to ensure the file cleanup\n\n\ndef safe_remove(path):\n if path is not None:\n try:\n os.remove(path)\n except:\n pass\n\n\ndef safe_close(store):\n try:\n if store is not None:\n store.close()\n except:\n pass\n\n\ndef create_tempfile(path):\n \"\"\" create an unopened named temporary file \"\"\"\n return os.path.join(tempfile.gettempdir(), path)\n\n\n@contextmanager\ndef ensure_clean_store(path, mode='a', complevel=None, complib=None,\n fletcher32=False):\n\n try:\n\n # put in the temporary path if we don't have one already\n if not len(os.path.dirname(path)):\n path = create_tempfile(path)\n\n store = HDFStore(path, mode=mode, complevel=complevel,\n complib=complib, fletcher32=False)\n yield store\n finally:\n safe_close(store)\n if mode == 'w' or mode == 'a':\n safe_remove(path)\n\n\n@contextmanager\ndef ensure_clean_path(path):\n \"\"\"\n return essentially a named temporary file that is not opened\n and deleted on existing; if path is a list, then create and\n return list of filenames\n \"\"\"\n try:\n if isinstance(path, list):\n filenames = [create_tempfile(p) for p in path]\n yield filenames\n else:\n filenames = [create_tempfile(path)]\n yield filenames[0]\n finally:\n for f in filenames:\n safe_remove(f)\n\n\n# set these parameters so we don't have file sharing\ntables.parameters.MAX_NUMEXPR_THREADS = 1\ntables.parameters.MAX_BLOSC_THREADS = 1\ntables.parameters.MAX_THREADS = 1\n\n\ndef _maybe_remove(store, key):\n \"\"\"For tests using tables, try removing the table to be sure there is\n no content from previous tests using the same table name.\"\"\"\n try:\n store.remove(key)\n except:\n pass\n\n\n@contextmanager\ndef compat_assert_produces_warning(w):\n \"\"\" don't produce a warning under PY3 \"\"\"\n if compat.PY3:\n yield\n else:\n with tm.assert_produces_warning(expected_warning=w,\n check_stacklevel=False):\n yield\n\n\nclass Base(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(Base, cls).setUpClass()\n\n # Pytables 3.0.0 deprecates lots of things\n tm.reset_testing_mode()\n\n @classmethod\n def tearDownClass(cls):\n super(Base, cls).tearDownClass()\n\n # Pytables 3.0.0 deprecates lots of things\n tm.set_testing_mode()\n\n def setUp(self):\n warnings.filterwarnings(action='ignore', category=FutureWarning)\n\n self.path = 'tmp.__%s__.h5' % tm.rands(10)\n\n def tearDown(self):\n pass\n\n\nclass TestHDFStore(Base, tm.TestCase):\n\n def test_factory_fun(self):\n path = create_tempfile(self.path)\n try:\n with get_store(path) as tbl:\n raise ValueError('blah')\n except ValueError:\n pass\n finally:\n safe_remove(path)\n\n try:\n with get_store(path) as tbl:\n tbl['a'] = tm.makeDataFrame()\n\n with get_store(path) as tbl:\n self.assertEqual(len(tbl), 1)\n self.assertEqual(type(tbl['a']), DataFrame)\n finally:\n safe_remove(self.path)\n\n def test_context(self):\n path = create_tempfile(self.path)\n try:\n with HDFStore(path) as tbl:\n raise ValueError('blah')\n except ValueError:\n pass\n finally:\n safe_remove(path)\n\n try:\n with HDFStore(path) as tbl:\n tbl['a'] = tm.makeDataFrame()\n\n with HDFStore(path) as tbl:\n self.assertEqual(len(tbl), 1)\n self.assertEqual(type(tbl['a']), DataFrame)\n finally:\n safe_remove(path)\n\n def test_conv_read_write(self):\n path = create_tempfile(self.path)\n try:\n def roundtrip(key, obj, **kwargs):\n obj.to_hdf(path, key, **kwargs)\n return read_hdf(path, key)\n\n o = tm.makeTimeSeries()\n assert_series_equal(o, roundtrip('series', o))\n\n o = tm.makeStringSeries()\n assert_series_equal(o, roundtrip('string_series', o))\n\n o = tm.makeDataFrame()\n assert_frame_equal(o, roundtrip('frame', o))\n\n o = tm.makePanel()\n assert_panel_equal(o, roundtrip('panel', o))\n\n # table\n df = DataFrame(dict(A=lrange(5), B=lrange(5)))\n df.to_hdf(path, 'table', append=True)\n result = read_hdf(path, 'table', where=['index>2'])\n assert_frame_equal(df[df.index > 2], result)\n\n finally:\n safe_remove(path)\n\n def test_long_strings(self):\n\n # GH6166\n # unconversion of long strings was being chopped in earlier\n # versions of numpy < 1.7.2\n df = DataFrame({'a': tm.rands_array(100, size=10)},\n index=tm.rands_array(100, size=10))\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df, data_columns=['a'])\n\n result = store.select('df')\n assert_frame_equal(df, result)\n\n def test_api(self):\n\n # GH4584\n # API issue when to_hdf doesn't acdept append AND format args\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.iloc[:10].to_hdf(path, 'df', append=True, format='table')\n df.iloc[10:].to_hdf(path, 'df', append=True, format='table')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n # append to False\n df.iloc[:10].to_hdf(path, 'df', append=False, format='table')\n df.iloc[10:].to_hdf(path, 'df', append=True, format='table')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.iloc[:10].to_hdf(path, 'df', append=True)\n df.iloc[10:].to_hdf(path, 'df', append=True, format='table')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n # append to False\n df.iloc[:10].to_hdf(path, 'df', append=False, format='table')\n df.iloc[10:].to_hdf(path, 'df', append=True)\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path, 'df', append=False, format='fixed')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n df.to_hdf(path, 'df', append=False, format='f')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n df.to_hdf(path, 'df', append=False)\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n df.to_hdf(path, 'df')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n with ensure_clean_store(self.path) as store:\n\n path = store._path\n df = tm.makeDataFrame()\n\n _maybe_remove(store, 'df')\n store.append('df', df.iloc[:10], append=True, format='table')\n store.append('df', df.iloc[10:], append=True, format='table')\n assert_frame_equal(store.select('df'), df)\n\n # append to False\n _maybe_remove(store, 'df')\n store.append('df', df.iloc[:10], append=False, format='table')\n store.append('df', df.iloc[10:], append=True, format='table')\n assert_frame_equal(store.select('df'), df)\n\n # formats\n _maybe_remove(store, 'df')\n store.append('df', df.iloc[:10], append=False, format='table')\n store.append('df', df.iloc[10:], append=True, format='table')\n assert_frame_equal(store.select('df'), df)\n\n _maybe_remove(store, 'df')\n store.append('df', df.iloc[:10], append=False, format='table')\n store.append('df', df.iloc[10:], append=True, format=None)\n assert_frame_equal(store.select('df'), df)\n\n with ensure_clean_path(self.path) as path:\n\n # invalid\n df = tm.makeDataFrame()\n self.assertRaises(ValueError, df.to_hdf, path,\n 'df', append=True, format='f')\n self.assertRaises(ValueError, df.to_hdf, path,\n 'df', append=True, format='fixed')\n\n self.assertRaises(TypeError, df.to_hdf, path,\n 'df', append=True, format='foo')\n self.assertRaises(TypeError, df.to_hdf, path,\n 'df', append=False, format='bar')\n\n # File path doesn't exist\n path = \"\"\n self.assertRaises(compat.FileNotFoundError,\n read_hdf, path, 'df')\n\n def test_api_default_format(self):\n\n # default_format option\n with ensure_clean_store(self.path) as store:\n df = tm.makeDataFrame()\n\n pandas.set_option('io.hdf.default_format', 'fixed')\n _maybe_remove(store, 'df')\n store.put('df', df)\n self.assertFalse(store.get_storer('df').is_table)\n self.assertRaises(ValueError, store.append, 'df2', df)\n\n pandas.set_option('io.hdf.default_format', 'table')\n _maybe_remove(store, 'df')\n store.put('df', df)\n self.assertTrue(store.get_storer('df').is_table)\n _maybe_remove(store, 'df2')\n store.append('df2', df)\n self.assertTrue(store.get_storer('df').is_table)\n\n pandas.set_option('io.hdf.default_format', None)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n\n pandas.set_option('io.hdf.default_format', 'fixed')\n df.to_hdf(path, 'df')\n with get_store(path) as store:\n self.assertFalse(store.get_storer('df').is_table)\n self.assertRaises(ValueError, df.to_hdf, path, 'df2', append=True)\n\n pandas.set_option('io.hdf.default_format', 'table')\n df.to_hdf(path, 'df3')\n with HDFStore(path) as store:\n self.assertTrue(store.get_storer('df3').is_table)\n df.to_hdf(path, 'df4', append=True)\n with HDFStore(path) as store:\n self.assertTrue(store.get_storer('df4').is_table)\n\n pandas.set_option('io.hdf.default_format', None)\n\n def test_keys(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeStringSeries()\n store['c'] = tm.makeDataFrame()\n store['d'] = tm.makePanel()\n store['foo/bar'] = tm.makePanel()\n self.assertEqual(len(store), 5)\n expected = set(['/a', '/b', '/c', '/d', '/foo/bar'])\n self.assertTrue(set(store.keys()) == expected)\n self.assertTrue(set(store) == expected)\n\n def test_iter_empty(self):\n\n with ensure_clean_store(self.path) as store:\n # GH 12221\n self.assertTrue(list(store) == [])\n\n def test_repr(self):\n\n with ensure_clean_store(self.path) as store:\n repr(store)\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeStringSeries()\n store['c'] = tm.makeDataFrame()\n store['d'] = tm.makePanel()\n store['foo/bar'] = tm.makePanel()\n store.append('e', tm.makePanel())\n\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)\n df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)\n df.ix[3:6, ['obj1']] = np.nan\n df = df.consolidate()._convert(datetime=True)\n\n warnings.filterwarnings('ignore', category=PerformanceWarning)\n store['df'] = df\n warnings.filterwarnings('always', category=PerformanceWarning)\n\n # make a random group in hdf space\n store._handle.create_group(store._handle.root, 'bah')\n\n repr(store)\n str(store)\n\n # storers\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeDataFrame()\n store.append('df', df)\n\n s = store.get_storer('df')\n repr(s)\n str(s)\n\n def test_contains(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeDataFrame()\n store['foo/bar'] = tm.makeDataFrame()\n self.assertIn('a', store)\n self.assertIn('b', store)\n self.assertNotIn('c', store)\n self.assertIn('foo/bar', store)\n self.assertIn('/foo/bar', store)\n self.assertNotIn('/foo/b', store)\n self.assertNotIn('bar', store)\n\n # GH 2694\n warnings.filterwarnings(\n 'ignore', category=tables.NaturalNameWarning)\n store['node())'] = tm.makeDataFrame()\n self.assertIn('node())', store)\n\n def test_versioning(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeDataFrame()\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n self.assertEqual(store.root.a._v_attrs.pandas_version, '0.15.2')\n self.assertEqual(store.root.b._v_attrs.pandas_version, '0.15.2')\n self.assertEqual(store.root.df1._v_attrs.pandas_version, '0.15.2')\n\n # write a file and wipe its versioning\n _maybe_remove(store, 'df2')\n store.append('df2', df)\n\n # this is an error because its table_type is appendable, but no\n # version info\n store.get_node('df2')._v_attrs.pandas_version = None\n self.assertRaises(Exception, store.select, 'df2')\n\n def test_mode(self):\n\n df = tm.makeTimeDataFrame()\n\n def check(mode):\n\n with ensure_clean_path(self.path) as path:\n\n # constructor\n if mode in ['r', 'r+']:\n self.assertRaises(IOError, HDFStore, path, mode=mode)\n\n else:\n store = HDFStore(path, mode=mode)\n self.assertEqual(store._handle.mode, mode)\n store.close()\n\n with ensure_clean_path(self.path) as path:\n\n # context\n if mode in ['r', 'r+']:\n def f():\n with HDFStore(path, mode=mode) as store: # noqa\n pass\n self.assertRaises(IOError, f)\n else:\n with HDFStore(path, mode=mode) as store:\n self.assertEqual(store._handle.mode, mode)\n\n with ensure_clean_path(self.path) as path:\n\n # conv write\n if mode in ['r', 'r+']:\n self.assertRaises(IOError, df.to_hdf,\n path, 'df', mode=mode)\n df.to_hdf(path, 'df', mode='w')\n else:\n df.to_hdf(path, 'df', mode=mode)\n\n # conv read\n if mode in ['w']:\n self.assertRaises(ValueError, read_hdf,\n path, 'df', mode=mode)\n else:\n result = read_hdf(path, 'df', mode=mode)\n assert_frame_equal(result, df)\n\n def check_default_mode():\n\n # read_hdf uses default mode\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='w')\n result = read_hdf(path, 'df')\n assert_frame_equal(result, df)\n\n check('r')\n check('r+')\n check('a')\n check('w')\n check_default_mode()\n\n def test_reopen_handle(self):\n\n with ensure_clean_path(self.path) as path:\n\n store = HDFStore(path, mode='a')\n store['a'] = tm.makeTimeSeries()\n\n # invalid mode change\n self.assertRaises(PossibleDataLossError, store.open, 'w')\n store.close()\n self.assertFalse(store.is_open)\n\n # truncation ok here\n store.open('w')\n self.assertTrue(store.is_open)\n self.assertEqual(len(store), 0)\n store.close()\n self.assertFalse(store.is_open)\n\n store = HDFStore(path, mode='a')\n store['a'] = tm.makeTimeSeries()\n\n # reopen as read\n store.open('r')\n self.assertTrue(store.is_open)\n self.assertEqual(len(store), 1)\n self.assertEqual(store._mode, 'r')\n store.close()\n self.assertFalse(store.is_open)\n\n # reopen as append\n store.open('a')\n self.assertTrue(store.is_open)\n self.assertEqual(len(store), 1)\n self.assertEqual(store._mode, 'a')\n store.close()\n self.assertFalse(store.is_open)\n\n # reopen as append (again)\n store.open('a')\n self.assertTrue(store.is_open)\n self.assertEqual(len(store), 1)\n self.assertEqual(store._mode, 'a')\n store.close()\n self.assertFalse(store.is_open)\n\n def test_open_args(self):\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n\n # create an in memory store\n store = HDFStore(path, mode='a', driver='H5FD_CORE',\n driver_core_backing_store=0)\n store['df'] = df\n store.append('df2', df)\n\n tm.assert_frame_equal(store['df'], df)\n tm.assert_frame_equal(store['df2'], df)\n\n store.close()\n\n # the file should not have actually been written\n self.assertFalse(os.path.exists(path))\n\n def test_flush(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store.flush()\n store.flush(fsync=True)\n\n def test_get(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n left = store.get('a')\n right = store['a']\n tm.assert_series_equal(left, right)\n\n left = store.get('/a')\n right = store['/a']\n tm.assert_series_equal(left, right)\n\n self.assertRaises(KeyError, store.get, 'b')\n\n def test_getattr(self):\n\n with ensure_clean_store(self.path) as store:\n\n s = tm.makeTimeSeries()\n store['a'] = s\n\n # test attribute access\n result = store.a\n tm.assert_series_equal(result, s)\n result = getattr(store, 'a')\n tm.assert_series_equal(result, s)\n\n df = tm.makeTimeDataFrame()\n store['df'] = df\n result = store.df\n tm.assert_frame_equal(result, df)\n\n # errors\n self.assertRaises(AttributeError, getattr, store, 'd')\n\n for x in ['mode', 'path', 'handle', 'complib']:\n self.assertRaises(AttributeError, getattr, store, x)\n\n # not stores\n for x in ['mode', 'path', 'handle', 'complib']:\n getattr(store, \"_%s\" % x)\n\n def test_put(self):\n\n with ensure_clean_store(self.path) as store:\n\n ts = tm.makeTimeSeries()\n df = tm.makeTimeDataFrame()\n store['a'] = ts\n store['b'] = df[:10]\n store['foo/bar/bah'] = df[:10]\n store['foo'] = df[:10]\n store['/foo'] = df[:10]\n store.put('c', df[:10], format='table')\n\n # not OK, not a table\n self.assertRaises(\n ValueError, store.put, 'b', df[10:], append=True)\n\n # node does not currently exist, test _is_table_type returns False\n # in this case\n # _maybe_remove(store, 'f')\n # self.assertRaises(ValueError, store.put, 'f', df[10:],\n # append=True)\n\n # can't put to a table (use append instead)\n self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)\n\n # overwrite table\n store.put('c', df[:10], format='table', append=False)\n tm.assert_frame_equal(df[:10], store['c'])\n\n def test_put_string_index(self):\n\n with ensure_clean_store(self.path) as store:\n\n index = Index(\n [\"I am a very long string index: %s\" % i for i in range(20)])\n s = Series(np.arange(20), index=index)\n df = DataFrame({'A': s, 'B': s})\n\n store['a'] = s\n tm.assert_series_equal(store['a'], s)\n\n store['b'] = df\n tm.assert_frame_equal(store['b'], df)\n\n # mixed length\n index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] +\n [\"I am a very long string index: %s\" % i\n for i in range(20)])\n s = Series(np.arange(21), index=index)\n df = DataFrame({'A': s, 'B': s})\n store['a'] = s\n tm.assert_series_equal(store['a'], s)\n\n store['b'] = df\n tm.assert_frame_equal(store['b'], df)\n\n def test_put_compression(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeTimeDataFrame()\n\n store.put('c', df, format='table', complib='zlib')\n tm.assert_frame_equal(store['c'], df)\n\n # can't compress if format='fixed'\n self.assertRaises(ValueError, store.put, 'b', df,\n format='fixed', complib='zlib')\n\n def test_put_compression_blosc(self):\n tm.skip_if_no_package('tables', '2.2', app='blosc support')\n if skip_compression:\n raise nose.SkipTest(\"skipping on windows/PY3\")\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n\n # can't compress if format='fixed'\n self.assertRaises(ValueError, store.put, 'b', df,\n format='fixed', complib='blosc')\n\n store.put('c', df, format='table', complib='blosc')\n tm.assert_frame_equal(store['c'], df)\n\n def test_put_integer(self):\n # non-date, non-string index\n df = DataFrame(np.random.randn(50, 100))\n self._check_roundtrip(df, tm.assert_frame_equal)\n\n def test_put_mixed_type(self):\n df = tm.makeTimeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)\n df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)\n df.ix[3:6, ['obj1']] = np.nan\n df = df.consolidate()._convert(datetime=True)\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n\n # cannot use assert_produces_warning here for some reason\n # a PendingDeprecationWarning is also raised?\n warnings.filterwarnings('ignore', category=PerformanceWarning)\n store.put('df', df)\n warnings.filterwarnings('always', category=PerformanceWarning)\n\n expected = store.get('df')\n tm.assert_frame_equal(expected, df)\n\n def test_append(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n tm.assert_frame_equal(store['df1'], df)\n\n _maybe_remove(store, 'df2')\n store.put('df2', df[:10], format='table')\n store.append('df2', df[10:])\n tm.assert_frame_equal(store['df2'], df)\n\n _maybe_remove(store, 'df3')\n store.append('/df3', df[:10])\n store.append('/df3', df[10:])\n tm.assert_frame_equal(store['df3'], df)\n\n # this is allowed by almost always don't want to do it\n with tm.assert_produces_warning(\n expected_warning=tables.NaturalNameWarning):\n _maybe_remove(store, '/df3 foo')\n store.append('/df3 foo', df[:10])\n store.append('/df3 foo', df[10:])\n tm.assert_frame_equal(store['df3 foo'], df)\n\n # panel\n wp = tm.makePanel()\n _maybe_remove(store, 'wp1')\n store.append('wp1', wp.ix[:, :10, :])\n store.append('wp1', wp.ix[:, 10:, :])\n assert_panel_equal(store['wp1'], wp)\n\n # ndim\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n p4d = tm.makePanel4D()\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :])\n store.append('p4d', p4d.ix[:, :, 10:, :])\n assert_panel4d_equal(store['p4d'], p4d)\n\n # test using axis labels\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=[\n 'items', 'major_axis', 'minor_axis'])\n store.append('p4d', p4d.ix[:, :, 10:, :], axes=[\n 'items', 'major_axis', 'minor_axis'])\n assert_panel4d_equal(store['p4d'], p4d)\n\n # test using differnt number of items on each axis\n p4d2 = p4d.copy()\n p4d2['l4'] = p4d['l1']\n p4d2['l5'] = p4d['l1']\n _maybe_remove(store, 'p4d2')\n store.append(\n 'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])\n assert_panel4d_equal(store['p4d2'], p4d2)\n\n # test using differt order of items on the non-index axes\n _maybe_remove(store, 'wp1')\n wp_append1 = wp.ix[:, :10, :]\n store.append('wp1', wp_append1)\n wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])\n store.append('wp1', wp_append2)\n assert_panel_equal(store['wp1'], wp)\n\n # dtype issues - mizxed type in a single object column\n df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])\n df['mixed_column'] = 'testing'\n df.ix[2, 'mixed_column'] = np.nan\n _maybe_remove(store, 'df')\n store.append('df', df)\n tm.assert_frame_equal(store['df'], df)\n\n # uints - test storage of uints\n uint_data = DataFrame({\n 'u08': Series(np.random.randint(0, high=255, size=5),\n dtype=np.uint8),\n 'u16': Series(np.random.randint(0, high=65535, size=5),\n dtype=np.uint16),\n 'u32': Series(np.random.randint(0, high=2**30, size=5),\n dtype=np.uint32),\n 'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],\n dtype=np.uint64)}, index=np.arange(5))\n _maybe_remove(store, 'uints')\n store.append('uints', uint_data)\n tm.assert_frame_equal(store['uints'], uint_data)\n\n # uints - test storage of uints in indexable columns\n _maybe_remove(store, 'uints')\n # 64-bit indices not yet supported\n store.append('uints', uint_data, data_columns=[\n 'u08', 'u16', 'u32'])\n tm.assert_frame_equal(store['uints'], uint_data)\n\n def test_append_series(self):\n\n with ensure_clean_store(self.path) as store:\n\n # basic\n ss = tm.makeStringSeries()\n ts = tm.makeTimeSeries()\n ns = Series(np.arange(100))\n\n store.append('ss', ss)\n result = store['ss']\n tm.assert_series_equal(result, ss)\n self.assertIsNone(result.name)\n\n store.append('ts', ts)\n result = store['ts']\n tm.assert_series_equal(result, ts)\n self.assertIsNone(result.name)\n\n ns.name = 'foo'\n store.append('ns', ns)\n result = store['ns']\n tm.assert_series_equal(result, ns)\n self.assertEqual(result.name, ns.name)\n\n # select on the values\n expected = ns[ns > 60]\n result = store.select('ns', Term('foo>60'))\n tm.assert_series_equal(result, expected)\n\n # select on the index and values\n expected = ns[(ns > 70) & (ns.index < 90)]\n result = store.select('ns', [Term('foo>70'), Term('index<90')])\n tm.assert_series_equal(result, expected)\n\n # multi-index\n mi = DataFrame(np.random.randn(5, 1), columns=['A'])\n mi['B'] = np.arange(len(mi))\n mi['C'] = 'foo'\n mi.loc[3:5, 'C'] = 'bar'\n mi.set_index(['C', 'B'], inplace=True)\n s = mi.stack()\n s.index = s.index.droplevel(2)\n store.append('mi', s)\n tm.assert_series_equal(store['mi'], s)\n\n def test_store_index_types(self):\n # GH5386\n # test storing various index types\n\n with ensure_clean_store(self.path) as store:\n\n def check(format, index):\n df = DataFrame(np.random.randn(10, 2), columns=list('AB'))\n df.index = index(len(df))\n\n _maybe_remove(store, 'df')\n store.put('df', df, format=format)\n assert_frame_equal(df, store['df'])\n\n for index in [tm.makeFloatIndex, tm.makeStringIndex,\n tm.makeIntIndex, tm.makeDateIndex]:\n\n check('table', index)\n check('fixed', index)\n\n # period index currently broken for table\n # seee GH7796 FIXME\n check('fixed', tm.makePeriodIndex)\n # check('table',tm.makePeriodIndex)\n\n # unicode\n index = tm.makeUnicodeIndex\n if compat.PY3:\n check('table', index)\n check('fixed', index)\n else:\n\n # only support for fixed types (and they have a perf warning)\n self.assertRaises(TypeError, check, 'table', index)\n with tm.assert_produces_warning(\n expected_warning=PerformanceWarning):\n check('fixed', index)\n\n def test_encoding(self):\n\n if sys.byteorder != 'little':\n raise nose.SkipTest('system byteorder is not little')\n\n with ensure_clean_store(self.path) as store:\n df = DataFrame(dict(A='foo', B='bar'), index=range(5))\n df.loc[2, 'A'] = np.nan\n df.loc[3, 'B'] = np.nan\n _maybe_remove(store, 'df')\n store.append('df', df, encoding='ascii')\n tm.assert_frame_equal(store['df'], df)\n\n expected = df.reindex(columns=['A'])\n result = store.select('df', Term('columns=A', encoding='ascii'))\n tm.assert_frame_equal(result, expected)\n\n def test_latin_encoding(self):\n\n if compat.PY2:\n self.assertRaisesRegexp(\n TypeError, r'\\[unicode\\] is not implemented as a table column')\n return\n\n values = [[b'E\\xc9, 17', b'', b'a', b'b', b'c'],\n [b'E\\xc9, 17', b'a', b'b', b'c'],\n [b'EE, 17', b'', b'a', b'b', b'c'],\n [b'E\\xc9, 17', b'\\xf8\\xfc', b'a', b'b', b'c'],\n [b'', b'a', b'b', b'c'],\n [b'\\xf8\\xfc', b'a', b'b', b'c'],\n [b'A\\xf8\\xfc', b'', b'a', b'b', b'c'],\n [np.nan, b'', b'b', b'c'],\n [b'A\\xf8\\xfc', np.nan, b'', b'b', b'c']]\n\n def _try_decode(x, encoding='latin-1'):\n try:\n return x.decode(encoding)\n except AttributeError:\n return x\n # not sure how to remove latin-1 from code in python 2 and 3\n values = [[_try_decode(x) for x in y] for y in values]\n\n examples = []\n for dtype in ['category', object]:\n for val in values:\n examples.append(pandas.Series(val, dtype=dtype))\n\n def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):\n with ensure_clean_path(self.path) as store:\n s.to_hdf(store, key, format='table', encoding=encoding,\n nan_rep=nan_rep)\n retr = read_hdf(store, key)\n s_nan = s.replace(nan_rep, np.nan)\n assert_series_equal(s_nan, retr, check_categorical=False)\n\n for s in examples:\n roundtrip(s)\n\n # fails:\n # for x in examples:\n # roundtrip(s, nan_rep=b'\\xf8\\xfc')\n\n def test_append_some_nans(self):\n\n with ensure_clean_store(self.path) as store:\n df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'),\n 'A1': np.random.randn(20),\n 'A2': np.random.randn(20),\n 'B': 'foo', 'C': 'bar',\n 'D': Timestamp(\"20010101\"),\n 'E': datetime.datetime(2001, 1, 2, 0, 0)},\n index=np.arange(20))\n # some nans\n _maybe_remove(store, 'df1')\n df.ix[0:15, ['A1', 'B', 'D', 'E']] = np.nan\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n tm.assert_frame_equal(store['df1'], df)\n\n # first column\n df1 = df.copy()\n df1.ix[:, 'A1'] = np.nan\n _maybe_remove(store, 'df1')\n store.append('df1', df1[:10])\n store.append('df1', df1[10:])\n tm.assert_frame_equal(store['df1'], df1)\n\n # 2nd column\n df2 = df.copy()\n df2.ix[:, 'A2'] = np.nan\n _maybe_remove(store, 'df2')\n store.append('df2', df2[:10])\n store.append('df2', df2[10:])\n tm.assert_frame_equal(store['df2'], df2)\n\n # datetimes\n df3 = df.copy()\n df3.ix[:, 'E'] = np.nan\n _maybe_remove(store, 'df3')\n store.append('df3', df3[:10])\n store.append('df3', df3[10:])\n tm.assert_frame_equal(store['df3'], df3)\n\n def test_append_all_nans(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame({'A1': np.random.randn(20),\n 'A2': np.random.randn(20)},\n index=np.arange(20))\n df.ix[0:15, :] = np.nan\n\n # nan some entire rows (dropna=True)\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df[-4:])\n\n # nan some entire rows (dropna=False)\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n # tests the option io.hdf.dropna_table\n pandas.set_option('io.hdf.dropna_table', False)\n _maybe_remove(store, 'df3')\n store.append('df3', df[:10])\n store.append('df3', df[10:])\n tm.assert_frame_equal(store['df3'], df)\n\n pandas.set_option('io.hdf.dropna_table', True)\n _maybe_remove(store, 'df4')\n store.append('df4', df[:10])\n store.append('df4', df[10:])\n tm.assert_frame_equal(store['df4'], df[-4:])\n\n # nan some entire rows (string are still written!)\n df = DataFrame({'A1': np.random.randn(20),\n 'A2': np.random.randn(20),\n 'B': 'foo', 'C': 'bar'},\n index=np.arange(20))\n\n df.ix[0:15, :] = np.nan\n\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df)\n\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n # nan some entire rows (but since we have dates they are still\n # written!)\n df = DataFrame({'A1': np.random.randn(20),\n 'A2': np.random.randn(20),\n 'B': 'foo', 'C': 'bar',\n 'D': Timestamp(\"20010101\"),\n 'E': datetime.datetime(2001, 1, 2, 0, 0)},\n index=np.arange(20))\n\n df.ix[0:15, :] = np.nan\n\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df)\n\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n # Test to make sure defaults are to not drop.\n # Corresponding to Issue 9382\n df_with_missing = DataFrame(\n {'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]})\n\n with ensure_clean_path(self.path) as path:\n df_with_missing.to_hdf(path, 'df_with_missing', format='table')\n reloaded = read_hdf(path, 'df_with_missing')\n tm.assert_frame_equal(df_with_missing, reloaded)\n\n matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]],\n [[np.nan, np.nan, np.nan], [np.nan, 5, 6]],\n [[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]]\n\n panel_with_missing = Panel(matrix, items=['Item1', 'Item2', 'Item3'],\n major_axis=[1, 2],\n minor_axis=['A', 'B', 'C'])\n\n with ensure_clean_path(self.path) as path:\n panel_with_missing.to_hdf(\n path, 'panel_with_missing', format='table')\n reloaded_panel = read_hdf(path, 'panel_with_missing')\n tm.assert_panel_equal(panel_with_missing, reloaded_panel)\n\n def test_append_frame_column_oriented(self):\n\n with ensure_clean_store(self.path) as store:\n\n # column oriented\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df.ix[:, :2], axes=['columns'])\n store.append('df1', df.ix[:, 2:])\n tm.assert_frame_equal(store['df1'], df)\n\n result = store.select('df1', 'columns=A')\n expected = df.reindex(columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n # selection on the non-indexable\n result = store.select(\n 'df1', ('columns=A', Term('index=df.index[0:4]')))\n expected = df.reindex(columns=['A'], index=df.index[0:4])\n tm.assert_frame_equal(expected, result)\n\n # this isn't supported\n self.assertRaises(TypeError, store.select, 'df1', (\n 'columns=A', Term('index>df.index[4]')))\n\n def test_append_with_different_block_ordering(self):\n\n # GH 4096; using same frames, but different block orderings\n with ensure_clean_store(self.path) as store:\n\n for i in range(10):\n\n df = DataFrame(np.random.randn(10, 2), columns=list('AB'))\n df['index'] = range(10)\n df['index'] += i * 10\n df['int64'] = Series([1] * len(df), dtype='int64')\n df['int16'] = Series([1] * len(df), dtype='int16')\n\n if i % 2 == 0:\n del df['int64']\n df['int64'] = Series([1] * len(df), dtype='int64')\n if i % 3 == 0:\n a = df.pop('A')\n df['A'] = a\n\n df.set_index('index', inplace=True)\n\n store.append('df', df)\n\n # test a different ordering but with more fields (like invalid\n # combinate)\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(np.random.randn(10, 2),\n columns=list('AB'), dtype='float64')\n df['int64'] = Series([1] * len(df), dtype='int64')\n df['int16'] = Series([1] * len(df), dtype='int16')\n store.append('df', df)\n\n # store additonal fields in different blocks\n df['int16_2'] = Series([1] * len(df), dtype='int16')\n self.assertRaises(ValueError, store.append, 'df', df)\n\n # store multile additonal fields in different blocks\n df['float_3'] = Series([1.] * len(df), dtype='float64')\n self.assertRaises(ValueError, store.append, 'df', df)\n\n def test_ndim_indexables(self):\n # test using ndim tables in new ways\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n with ensure_clean_store(self.path) as store:\n\n p4d = tm.makePanel4D()\n\n def check_indexers(key, indexers):\n for i, idx in enumerate(indexers):\n descr = getattr(store.root, key).table.description\n self.assertTrue(getattr(descr, idx)._v_pos == i)\n\n # append then change (will take existing schema)\n indexers = ['items', 'major_axis', 'minor_axis']\n\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)\n store.append('p4d', p4d.ix[:, :, 10:, :])\n assert_panel4d_equal(store.select('p4d'), p4d)\n check_indexers('p4d', indexers)\n\n # same as above, but try to append with differnt axes\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)\n store.append('p4d', p4d.ix[:, :, 10:, :], axes=[\n 'labels', 'items', 'major_axis'])\n assert_panel4d_equal(store.select('p4d'), p4d)\n check_indexers('p4d', indexers)\n\n # pass incorrect number of axes\n _maybe_remove(store, 'p4d')\n self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[\n :, :, :10, :], axes=['major_axis', 'minor_axis'])\n\n # different than default indexables #1\n indexers = ['labels', 'major_axis', 'minor_axis']\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)\n store.append('p4d', p4d.ix[:, :, 10:, :])\n assert_panel4d_equal(store['p4d'], p4d)\n check_indexers('p4d', indexers)\n\n # different than default indexables #2\n indexers = ['major_axis', 'labels', 'minor_axis']\n _maybe_remove(store, 'p4d')\n store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)\n store.append('p4d', p4d.ix[:, :, 10:, :])\n assert_panel4d_equal(store['p4d'], p4d)\n check_indexers('p4d', indexers)\n\n # partial selection\n result = store.select('p4d', ['labels=l1'])\n expected = p4d.reindex(labels=['l1'])\n assert_panel4d_equal(result, expected)\n\n # partial selection2\n result = store.select('p4d', [Term(\n 'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])\n expected = p4d.reindex(\n labels=['l1'], items=['ItemA'], minor_axis=['B'])\n assert_panel4d_equal(result, expected)\n\n # non-existant partial selection\n result = store.select('p4d', [Term(\n 'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])\n expected = p4d.reindex(labels=['l1'], items=[],\n minor_axis=['B'])\n assert_panel4d_equal(result, expected)\n\n def test_append_with_strings(self):\n\n with ensure_clean_store(self.path) as store:\n wp = tm.makePanel()\n wp2 = wp.rename_axis(\n dict([(x, \"%s_extra\" % x) for x in wp.minor_axis]), axis=2)\n\n def check_col(key, name, size):\n self.assertEqual(getattr(store.get_storer(\n key).table.description, name).itemsize, size)\n\n store.append('s1', wp, min_itemsize=20)\n store.append('s1', wp2)\n expected = concat([wp, wp2], axis=2)\n expected = expected.reindex(minor_axis=sorted(expected.minor_axis))\n assert_panel_equal(store['s1'], expected)\n check_col('s1', 'minor_axis', 20)\n\n # test dict format\n store.append('s2', wp, min_itemsize={'minor_axis': 20})\n store.append('s2', wp2)\n expected = concat([wp, wp2], axis=2)\n expected = expected.reindex(minor_axis=sorted(expected.minor_axis))\n assert_panel_equal(store['s2'], expected)\n check_col('s2', 'minor_axis', 20)\n\n # apply the wrong field (similar to #1)\n store.append('s3', wp, min_itemsize={'major_axis': 20})\n self.assertRaises(ValueError, store.append, 's3', wp2)\n\n # test truncation of bigger strings\n store.append('s4', wp)\n self.assertRaises(ValueError, store.append, 's4', wp2)\n\n # avoid truncation on elements\n df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])\n store.append('df_big', df)\n tm.assert_frame_equal(store.select('df_big'), df)\n check_col('df_big', 'values_block_1', 15)\n\n # appending smaller string ok\n df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])\n store.append('df_big', df2)\n expected = concat([df, df2])\n tm.assert_frame_equal(store.select('df_big'), expected)\n check_col('df_big', 'values_block_1', 15)\n\n # avoid truncation on elements\n df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])\n store.append('df_big2', df, min_itemsize={'values': 50})\n tm.assert_frame_equal(store.select('df_big2'), df)\n check_col('df_big2', 'values_block_1', 50)\n\n # bigger string on next append\n store.append('df_new', df)\n df_new = DataFrame(\n [[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])\n self.assertRaises(ValueError, store.append, 'df_new', df_new)\n\n # min_itemsize on Series index (GH 11412)\n df = tm.makeMixedDataFrame().set_index('C')\n store.append('ss', df['B'], min_itemsize={'index': 4})\n tm.assert_series_equal(store.select('ss'), df['B'])\n\n # same as above, with data_columns=True\n store.append('ss2', df['B'], data_columns=True,\n min_itemsize={'index': 4})\n tm.assert_series_equal(store.select('ss2'), df['B'])\n\n # min_itemsize in index without appending (GH 10381)\n store.put('ss3', df, format='table',\n min_itemsize={'index': 6})\n # just make sure there is a longer string:\n df2 = df.copy().reset_index().assign(C='longer').set_index('C')\n store.append('ss3', df2)\n tm.assert_frame_equal(store.select('ss3'),\n pd.concat([df, df2]))\n\n # same as above, with a Series\n store.put('ss4', df['B'], format='table',\n min_itemsize={'index': 6})\n store.append('ss4', df2['B'])\n tm.assert_series_equal(store.select('ss4'),\n pd.concat([df['B'], df2['B']]))\n\n # with nans\n _maybe_remove(store, 'df')\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.ix[1:4, 'string'] = np.nan\n df['string2'] = 'bar'\n df.ix[4:8, 'string2'] = np.nan\n df['string3'] = 'bah'\n df.ix[1:, 'string3'] = np.nan\n store.append('df', df)\n result = store.select('df')\n tm.assert_frame_equal(result, df)\n\n with ensure_clean_store(self.path) as store:\n\n def check_col(key, name, size):\n self.assertEqual(getattr(store.get_storer(\n key).table.description, name).itemsize, size)\n\n df = DataFrame(dict(A='foo', B='bar'), index=range(10))\n\n # a min_itemsize that creates a data_column\n _maybe_remove(store, 'df')\n store.append('df', df, min_itemsize={'A': 200})\n check_col('df', 'A', 200)\n self.assertEqual(store.get_storer('df').data_columns, ['A'])\n\n # a min_itemsize that creates a data_column2\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})\n check_col('df', 'A', 200)\n self.assertEqual(store.get_storer('df').data_columns, ['B', 'A'])\n\n # a min_itemsize that creates a data_column2\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=[\n 'B'], min_itemsize={'values': 200})\n check_col('df', 'B', 200)\n check_col('df', 'values_block_0', 200)\n self.assertEqual(store.get_storer('df').data_columns, ['B'])\n\n # infer the .typ on subsequent appends\n _maybe_remove(store, 'df')\n store.append('df', df[:5], min_itemsize=200)\n store.append('df', df[5:], min_itemsize=200)\n tm.assert_frame_equal(store['df'], df)\n\n # invalid min_itemsize keys\n df = DataFrame(['foo', 'foo', 'foo', 'barh',\n 'barh', 'barh'], columns=['A'])\n _maybe_remove(store, 'df')\n self.assertRaises(ValueError, store.append, 'df',\n df, min_itemsize={'foo': 20, 'foobar': 20})\n\n def test_to_hdf_with_min_itemsize(self):\n\n with ensure_clean_path(self.path) as path:\n\n # min_itemsize in index with to_hdf (GH 10381)\n df = tm.makeMixedDataFrame().set_index('C')\n df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6})\n # just make sure there is a longer string:\n df2 = df.copy().reset_index().assign(C='longer').set_index('C')\n df2.to_hdf(path, 'ss3', append=True, format='table')\n tm.assert_frame_equal(pd.read_hdf(path, 'ss3'),\n pd.concat([df, df2]))\n\n # same as above, with a Series\n df['B'].to_hdf(path, 'ss4', format='table',\n min_itemsize={'index': 6})\n df2['B'].to_hdf(path, 'ss4', append=True, format='table')\n tm.assert_series_equal(pd.read_hdf(path, 'ss4'),\n pd.concat([df['B'], df2['B']]))\n\n def test_append_with_data_columns(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeTimeDataFrame()\n df.loc[:, 'B'].iloc[0] = 1.\n _maybe_remove(store, 'df')\n store.append('df', df[:2], data_columns=['B'])\n store.append('df', df[2:])\n tm.assert_frame_equal(store['df'], df)\n\n # check that we have indicies created\n assert(store._handle.root.df.table.cols.index.is_indexed is True)\n assert(store._handle.root.df.table.cols.B.is_indexed is True)\n\n # data column searching\n result = store.select('df', [Term('B>0')])\n expected = df[df.B > 0]\n tm.assert_frame_equal(result, expected)\n\n # data column searching (with an indexable and a data_columns)\n result = store.select(\n 'df', [Term('B>0'), Term('index>df.index[3]')])\n df_new = df.reindex(index=df.index[4:])\n expected = df_new[df_new.B > 0]\n tm.assert_frame_equal(result, expected)\n\n # data column selection with a string data_column\n df_new = df.copy()\n df_new['string'] = 'foo'\n df_new.loc[1:4, 'string'] = np.nan\n df_new.loc[5:6, 'string'] = 'bar'\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'])\n result = store.select('df', [Term('string=foo')])\n expected = df_new[df_new.string == 'foo']\n tm.assert_frame_equal(result, expected)\n\n # using min_itemsize and a data column\n def check_col(key, name, size):\n self.assertEqual(getattr(store.get_storer(\n key).table.description, name).itemsize, size)\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'],\n min_itemsize={'string': 30})\n check_col('df', 'string', 30)\n _maybe_remove(store, 'df')\n store.append(\n 'df', df_new, data_columns=['string'], min_itemsize=30)\n check_col('df', 'string', 30)\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'],\n min_itemsize={'values': 30})\n check_col('df', 'string', 30)\n\n with ensure_clean_store(self.path) as store:\n df_new['string2'] = 'foobarbah'\n df_new['string_block1'] = 'foobarbah1'\n df_new['string_block2'] = 'foobarbah2'\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string', 'string2'],\n min_itemsize={'string': 30, 'string2': 40,\n 'values': 50})\n check_col('df', 'string', 30)\n check_col('df', 'string2', 40)\n check_col('df', 'values_block_1', 50)\n\n with ensure_clean_store(self.path) as store:\n # multiple data columns\n df_new = df.copy()\n df_new.ix[0, 'A'] = 1.\n df_new.ix[0, 'B'] = -1.\n df_new['string'] = 'foo'\n df_new.loc[1:4, 'string'] = np.nan\n df_new.loc[5:6, 'string'] = 'bar'\n df_new['string2'] = 'foo'\n df_new.loc[2:5, 'string2'] = np.nan\n df_new.loc[7:8, 'string2'] = 'bar'\n _maybe_remove(store, 'df')\n store.append(\n 'df', df_new, data_columns=['A', 'B', 'string', 'string2'])\n result = store.select('df', [Term('string=foo'), Term(\n 'string2=foo'), Term('A>0'), Term('B<0')])\n expected = df_new[(df_new.string == 'foo') & (\n df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n # yield an empty frame\n result = store.select('df', [Term('string=foo'), Term(\n 'string2=cool')])\n expected = df_new[(df_new.string == 'foo') & (\n df_new.string2 == 'cool')]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n with ensure_clean_store(self.path) as store:\n # doc example\n df_dc = df.copy()\n df_dc['string'] = 'foo'\n df_dc.ix[4:6, 'string'] = np.nan\n df_dc.ix[7:9, 'string'] = 'bar'\n df_dc['string2'] = 'cool'\n df_dc['datetime'] = Timestamp('20010102')\n df_dc = df_dc._convert(datetime=True)\n df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan\n\n _maybe_remove(store, 'df_dc')\n store.append('df_dc', df_dc,\n data_columns=['B', 'C', 'string',\n 'string2', 'datetime'])\n result = store.select('df_dc', [Term('B>0')])\n\n expected = df_dc[df_dc.B > 0]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n result = store.select(\n 'df_dc', ['B > 0', 'C > 0', 'string == foo'])\n expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (\n df_dc.string == 'foo')]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n with ensure_clean_store(self.path) as store:\n # doc example part 2\n np.random.seed(1234)\n index = date_range('1/1/2000', periods=8)\n df_dc = DataFrame(np.random.randn(8, 3), index=index,\n columns=['A', 'B', 'C'])\n df_dc['string'] = 'foo'\n df_dc.ix[4:6, 'string'] = np.nan\n df_dc.ix[7:9, 'string'] = 'bar'\n df_dc.ix[:, ['B', 'C']] = df_dc.ix[:, ['B', 'C']].abs()\n df_dc['string2'] = 'cool'\n\n # on-disk operations\n store.append('df_dc', df_dc, data_columns=[\n 'B', 'C', 'string', 'string2'])\n\n result = store.select('df_dc', [Term('B>0')])\n expected = df_dc[df_dc.B > 0]\n tm.assert_frame_equal(result, expected)\n\n result = store.select(\n 'df_dc', ['B > 0', 'C > 0', 'string == \"foo\"'])\n expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) &\n (df_dc.string == 'foo')]\n tm.assert_frame_equal(result, expected)\n\n with ensure_clean_store(self.path) as store:\n # panel\n # GH5717 not handling data_columns\n np.random.seed(1234)\n p = tm.makePanel()\n\n store.append('p1', p)\n tm.assert_panel_equal(store.select('p1'), p)\n\n store.append('p2', p, data_columns=True)\n tm.assert_panel_equal(store.select('p2'), p)\n\n result = store.select('p2', where='ItemA>0')\n expected = p.to_frame()\n expected = expected[expected['ItemA'] > 0]\n tm.assert_frame_equal(result.to_frame(), expected)\n\n result = store.select('p2', where='ItemA>0 & minor_axis=[\"A\",\"B\"]')\n expected = p.to_frame()\n expected = expected[expected['ItemA'] > 0]\n expected = expected[expected.reset_index(\n level=['major']).index.isin(['A', 'B'])]\n tm.assert_frame_equal(result.to_frame(), expected)\n\n def test_create_table_index(self):\n\n with ensure_clean_store(self.path) as store:\n\n def col(t, column):\n return getattr(store.get_storer(t).table.cols, column)\n\n # index=False\n wp = tm.makePanel()\n store.append('p5', wp, index=False)\n store.create_table_index('p5', columns=['major_axis'])\n assert(col('p5', 'major_axis').is_indexed is True)\n assert(col('p5', 'minor_axis').is_indexed is False)\n\n # index=True\n store.append('p5i', wp, index=True)\n assert(col('p5i', 'major_axis').is_indexed is True)\n assert(col('p5i', 'minor_axis').is_indexed is True)\n\n # default optlevels\n store.get_storer('p5').create_index()\n assert(col('p5', 'major_axis').index.optlevel == 6)\n assert(col('p5', 'minor_axis').index.kind == 'medium')\n\n # let's change the indexing scheme\n store.create_table_index('p5')\n assert(col('p5', 'major_axis').index.optlevel == 6)\n assert(col('p5', 'minor_axis').index.kind == 'medium')\n store.create_table_index('p5', optlevel=9)\n assert(col('p5', 'major_axis').index.optlevel == 9)\n assert(col('p5', 'minor_axis').index.kind == 'medium')\n store.create_table_index('p5', kind='full')\n assert(col('p5', 'major_axis').index.optlevel == 9)\n assert(col('p5', 'minor_axis').index.kind == 'full')\n store.create_table_index('p5', optlevel=1, kind='light')\n assert(col('p5', 'major_axis').index.optlevel == 1)\n assert(col('p5', 'minor_axis').index.kind == 'light')\n\n # data columns\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df['string2'] = 'bar'\n store.append('f', df, data_columns=['string', 'string2'])\n assert(col('f', 'index').is_indexed is True)\n assert(col('f', 'string').is_indexed is True)\n assert(col('f', 'string2').is_indexed is True)\n\n # specify index=columns\n store.append(\n 'f2', df, index=['string'], data_columns=['string', 'string2'])\n assert(col('f2', 'index').is_indexed is False)\n assert(col('f2', 'string').is_indexed is True)\n assert(col('f2', 'string2').is_indexed is False)\n\n # try to index a non-table\n _maybe_remove(store, 'f2')\n store.put('f2', df)\n self.assertRaises(TypeError, store.create_table_index, 'f2')\n\n def test_append_diff_item_order(self):\n\n wp = tm.makePanel()\n wp1 = wp.ix[:, :10, :]\n wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]\n\n with ensure_clean_store(self.path) as store:\n store.put('panel', wp1, format='table')\n self.assertRaises(ValueError, store.put, 'panel', wp2,\n append=True)\n\n def test_append_hierarchical(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo', 'bar'])\n df = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n with ensure_clean_store(self.path) as store:\n store.append('mi', df)\n result = store.select('mi')\n tm.assert_frame_equal(result, df)\n\n # GH 3748\n result = store.select('mi', columns=['A', 'B'])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(result, expected)\n\n with ensure_clean_path('test.hdf') as path:\n df.to_hdf(path, 'df', format='table')\n result = read_hdf(path, 'df', columns=['A', 'B'])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(result, expected)\n\n def test_column_multiindex(self):\n # GH 4710\n # recreate multi-indexes properly\n\n index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),\n ('B', 'a'), ('B', 'b')],\n names=['first', 'second'])\n df = DataFrame(np.arange(12).reshape(3, 4), columns=index)\n expected = df.copy()\n if isinstance(expected.index, RangeIndex):\n expected.index = Int64Index(expected.index)\n\n with ensure_clean_store(self.path) as store:\n\n store.put('df', df)\n tm.assert_frame_equal(store['df'], expected,\n check_index_type=True,\n check_column_type=True)\n\n store.put('df1', df, format='table')\n tm.assert_frame_equal(store['df1'], expected,\n check_index_type=True,\n check_column_type=True)\n\n self.assertRaises(ValueError, store.put, 'df2', df,\n format='table', data_columns=['A'])\n self.assertRaises(ValueError, store.put, 'df3', df,\n format='table', data_columns=True)\n\n # appending multi-column on existing table (see GH 6167)\n with ensure_clean_store(self.path) as store:\n store.append('df2', df)\n store.append('df2', df)\n\n tm.assert_frame_equal(store['df2'], concat((df, df)))\n\n # non_index_axes name\n df = DataFrame(np.arange(12).reshape(3, 4),\n columns=Index(list('ABCD'), name='foo'))\n expected = df.copy()\n if isinstance(expected.index, RangeIndex):\n expected.index = Int64Index(expected.index)\n\n with ensure_clean_store(self.path) as store:\n\n store.put('df1', df, format='table')\n tm.assert_frame_equal(store['df1'], expected,\n check_index_type=True,\n check_column_type=True)\n\n def test_store_multiindex(self):\n\n # validate multi-index names\n # GH 5527\n with ensure_clean_store(self.path) as store:\n\n def make_index(names=None):\n return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d),\n s, t)\n for d in range(1, 3)\n for s in range(2)\n for t in range(3)],\n names=names)\n\n # no names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=[\n 'a', 'b'], index=make_index())\n store.append('df', df)\n tm.assert_frame_equal(store.select('df'), df)\n\n # partial names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=[\n 'a', 'b'], index=make_index(['date', None, None]))\n store.append('df', df)\n tm.assert_frame_equal(store.select('df'), df)\n\n # series\n _maybe_remove(store, 's')\n s = Series(np.zeros(12), index=make_index(['date', None, None]))\n store.append('s', s)\n xp = Series(np.zeros(12), index=make_index(\n ['date', 'level_1', 'level_2']))\n tm.assert_series_equal(store.select('s'), xp)\n\n # dup with column\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=[\n 'a', 'b'], index=make_index(['date', 'a', 't']))\n self.assertRaises(ValueError, store.append, 'df', df)\n\n # dup within level\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=['a', 'b'],\n index=make_index(['date', 'date', 'date']))\n self.assertRaises(ValueError, store.append, 'df', df)\n\n # fully names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=[\n 'a', 'b'], index=make_index(['date', 's', 't']))\n store.append('df', df)\n tm.assert_frame_equal(store.select('df'), df)\n\n def test_select_columns_in_where(self):\n\n # GH 6169\n # recreate multi-indexes when columns is passed\n # in the `where` argument\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo_name', 'bar_name'])\n\n # With a DataFrame\n df = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table')\n expected = df[['A']]\n\n tm.assert_frame_equal(store.select('df', columns=['A']), expected)\n\n tm.assert_frame_equal(store.select(\n 'df', where=\"columns=['A']\"), expected)\n\n # With a Series\n s = Series(np.random.randn(10), index=index,\n name='A')\n with ensure_clean_store(self.path) as store:\n store.put('s', s, format='table')\n tm.assert_series_equal(store.select('s', where=\"columns=['A']\"), s)\n\n def test_mi_data_columns(self):\n # GH 14435\n idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5),\n range(5)], names=['date', 'id'])\n df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df, data_columns=True)\n\n actual = store.select('df', where='id == 1')\n expected = df.iloc[[1], :]\n tm.assert_frame_equal(actual, expected)\n\n def test_pass_spec_to_storer(self):\n\n df = tm.makeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df)\n self.assertRaises(TypeError, store.select, 'df', columns=['A'])\n self.assertRaises(TypeError, store.select,\n 'df', where=[('columns=A')])\n\n def test_append_misc(self):\n\n with ensure_clean_store(self.path) as store:\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n\n # unsuported data types for non-tables\n p4d = tm.makePanel4D()\n self.assertRaises(TypeError, store.put, 'p4d', p4d)\n\n # unsuported data types\n self.assertRaises(TypeError, store.put, 'abc', None)\n self.assertRaises(TypeError, store.put, 'abc', '123')\n self.assertRaises(TypeError, store.put, 'abc', 123)\n self.assertRaises(TypeError, store.put, 'abc', np.arange(5))\n\n df = tm.makeDataFrame()\n store.append('df', df, chunksize=1)\n result = store.select('df')\n tm.assert_frame_equal(result, df)\n\n store.append('df1', df, expectedrows=10)\n result = store.select('df1')\n tm.assert_frame_equal(result, df)\n\n # more chunksize in append tests\n def check(obj, comparator):\n for c in [10, 200, 1000]:\n with ensure_clean_store(self.path, mode='w') as store:\n store.append('obj', obj, chunksize=c)\n result = store.select('obj')\n comparator(result, obj)\n\n df = tm.makeDataFrame()\n df['string'] = 'foo'\n df['float322'] = 1.\n df['float322'] = df['float322'].astype('float32')\n df['bool'] = df['float322'] > 0\n df['time1'] = Timestamp('20130101')\n df['time2'] = Timestamp('20130102')\n check(df, tm.assert_frame_equal)\n\n p = tm.makePanel()\n check(p, assert_panel_equal)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n p4d = tm.makePanel4D()\n check(p4d, assert_panel4d_equal)\n\n # empty frame, GH4273\n with ensure_clean_store(self.path) as store:\n\n # 0 len\n df_empty = DataFrame(columns=list('ABC'))\n store.append('df', df_empty)\n self.assertRaises(KeyError, store.select, 'df')\n\n # repeated append of 0/non-zero frames\n df = DataFrame(np.random.rand(10, 3), columns=list('ABC'))\n store.append('df', df)\n assert_frame_equal(store.select('df'), df)\n store.append('df', df_empty)\n assert_frame_equal(store.select('df'), df)\n\n # store\n df = DataFrame(columns=list('ABC'))\n store.put('df2', df)\n assert_frame_equal(store.select('df2'), df)\n\n # 0 len\n p_empty = Panel(items=list('ABC'))\n store.append('p', p_empty)\n self.assertRaises(KeyError, store.select, 'p')\n\n # repeated append of 0/non-zero frames\n p = Panel(np.random.randn(3, 4, 5), items=list('ABC'))\n store.append('p', p)\n assert_panel_equal(store.select('p'), p)\n store.append('p', p_empty)\n assert_panel_equal(store.select('p'), p)\n\n # store\n store.put('p2', p_empty)\n assert_panel_equal(store.select('p2'), p_empty)\n\n def test_append_raise(self):\n\n with ensure_clean_store(self.path) as store:\n\n # test append with invalid input to get good error messages\n\n # list in column\n df = tm.makeDataFrame()\n df['invalid'] = [['a']] * len(df)\n self.assertEqual(df.dtypes['invalid'], np.object_)\n self.assertRaises(TypeError, store.append, 'df', df)\n\n # multiple invalid columns\n df['invalid2'] = [['a']] * len(df)\n df['invalid3'] = [['a']] * len(df)\n self.assertRaises(TypeError, store.append, 'df', df)\n\n # datetime with embedded nans as object\n df = tm.makeDataFrame()\n s = Series(datetime.datetime(2001, 1, 2), index=df.index)\n s = s.astype(object)\n s[0:5] = np.nan\n df['invalid'] = s\n self.assertEqual(df.dtypes['invalid'], np.object_)\n self.assertRaises(TypeError, store.append, 'df', df)\n\n # directy ndarray\n self.assertRaises(TypeError, store.append, 'df', np.arange(10))\n\n # series directly\n self.assertRaises(TypeError, store.append,\n 'df', Series(np.arange(10)))\n\n # appending an incompatbile table\n df = tm.makeDataFrame()\n store.append('df', df)\n\n df['foo'] = 'foo'\n self.assertRaises(ValueError, store.append, 'df', df)\n\n def test_table_index_incompatible_dtypes(self):\n df1 = DataFrame({'a': [1, 2, 3]})\n df2 = DataFrame({'a': [4, 5, 6]},\n index=date_range('1/1/2000', periods=3))\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df1, format='table')\n self.assertRaises(TypeError, store.put, 'frame', df2,\n format='table', append=True)\n\n def test_table_values_dtypes_roundtrip(self):\n\n with ensure_clean_store(self.path) as store:\n df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')\n store.append('df_f8', df1)\n assert_series_equal(df1.dtypes, store['df_f8'].dtypes)\n\n df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')\n store.append('df_i8', df2)\n assert_series_equal(df2.dtypes, store['df_i8'].dtypes)\n\n # incompatible dtype\n self.assertRaises(ValueError, store.append, 'df_i8', df1)\n\n # check creation/storage/retrieval of float32 (a bit hacky to\n # actually create them thought)\n df1 = DataFrame(\n np.array([[1], [2], [3]], dtype='f4'), columns=['A'])\n store.append('df_f4', df1)\n assert_series_equal(df1.dtypes, store['df_f4'].dtypes)\n assert df1.dtypes[0] == 'float32'\n\n # check with mixed dtypes\n df1 = DataFrame(dict([(c, Series(np.random.randn(5), dtype=c))\n for c in ['float32', 'float64', 'int32',\n 'int64', 'int16', 'int8']]))\n df1['string'] = 'foo'\n df1['float322'] = 1.\n df1['float322'] = df1['float322'].astype('float32')\n df1['bool'] = df1['float32'] > 0\n df1['time1'] = Timestamp('20130101')\n df1['time2'] = Timestamp('20130102')\n\n store.append('df_mixed_dtypes1', df1)\n result = store.select('df_mixed_dtypes1').get_dtype_counts()\n expected = Series({'float32': 2, 'float64': 1, 'int32': 1,\n 'bool': 1, 'int16': 1, 'int8': 1,\n 'int64': 1, 'object': 1, 'datetime64[ns]': 2})\n result.sort()\n expected.sort()\n tm.assert_series_equal(result, expected)\n\n def test_table_mixed_dtypes(self):\n\n # frame\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)\n df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)\n df.ix[3:6, ['obj1']] = np.nan\n df = df.consolidate()._convert(datetime=True)\n\n with ensure_clean_store(self.path) as store:\n store.append('df1_mixed', df)\n tm.assert_frame_equal(store.select('df1_mixed'), df)\n\n # panel\n wp = tm.makePanel()\n wp['obj1'] = 'foo'\n wp['obj2'] = 'bar'\n wp['bool1'] = wp['ItemA'] > 0\n wp['bool2'] = wp['ItemB'] > 0\n wp['int1'] = 1\n wp['int2'] = 2\n wp = wp.consolidate()\n\n with ensure_clean_store(self.path) as store:\n store.append('p1_mixed', wp)\n assert_panel_equal(store.select('p1_mixed'), wp)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # ndim\n wp = tm.makePanel4D()\n wp['obj1'] = 'foo'\n wp['obj2'] = 'bar'\n wp['bool1'] = wp['l1'] > 0\n wp['bool2'] = wp['l2'] > 0\n wp['int1'] = 1\n wp['int2'] = 2\n wp = wp.consolidate()\n\n with ensure_clean_store(self.path) as store:\n store.append('p4d_mixed', wp)\n assert_panel4d_equal(store.select('p4d_mixed'), wp)\n\n def test_unimplemented_dtypes_table_columns(self):\n\n with ensure_clean_store(self.path) as store:\n\n l = [('date', datetime.date(2001, 1, 2))]\n\n # py3 ok for unicode\n if not compat.PY3:\n l.append(('unicode', u('\\\\u03c3')))\n\n # currently not supported dtypes ####\n for n, f in l:\n df = tm.makeDataFrame()\n df[n] = f\n self.assertRaises(\n TypeError, store.append, 'df1_%s' % n, df)\n\n # frame\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['datetime1'] = datetime.date(2001, 1, 2)\n df = df.consolidate()._convert(datetime=True)\n\n with ensure_clean_store(self.path) as store:\n # this fails because we have a date in the object block......\n self.assertRaises(TypeError, store.append, 'df_unimplemented', df)\n\n def test_calendar_roundtrip_issue(self):\n\n # 8591\n # doc example from tseries holiday section\n weekmask_egypt = 'Sun Mon Tue Wed Thu'\n holidays = ['2012-05-01',\n datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]\n bday_egypt = pandas.offsets.CustomBusinessDay(\n holidays=holidays, weekmask=weekmask_egypt)\n dt = datetime.datetime(2013, 4, 30)\n dts = date_range(dt, periods=5, freq=bday_egypt)\n\n s = (Series(dts.weekday, dts).map(\n Series('Mon Tue Wed Thu Fri Sat Sun'.split())))\n\n with ensure_clean_store(self.path) as store:\n\n store.put('fixed', s)\n result = store.select('fixed')\n assert_series_equal(result, s)\n\n store.append('table', s)\n result = store.select('table')\n assert_series_equal(result, s)\n\n def test_append_with_timedelta(self):\n # GH 3577\n # append timedelta\n\n from datetime import timedelta\n df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(\n '20130101') + timedelta(days=i, seconds=10) for i in range(10)]))\n df['C'] = df['A'] - df['B']\n df.ix[3:5, 'C'] = np.nan\n\n with ensure_clean_store(self.path) as store:\n\n # table\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=True)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n result = store.select('df', Term(\"C<100000\"))\n assert_frame_equal(result, df)\n\n result = store.select('df', Term(\"C\", \"<\", -3 * 86400))\n assert_frame_equal(result, df.iloc[3:])\n\n result = store.select('df', \"C<'-3D'\")\n assert_frame_equal(result, df.iloc[3:])\n\n # a bit hacky here as we don't really deal with the NaT properly\n\n result = store.select('df', \"C<'-500000s'\")\n result = result.dropna(subset=['C'])\n assert_frame_equal(result, df.iloc[6:])\n\n result = store.select('df', \"C<'-3.5D'\")\n result = result.iloc[1:]\n assert_frame_equal(result, df.iloc[4:])\n\n # fixed\n _maybe_remove(store, 'df2')\n store.put('df2', df)\n result = store.select('df2')\n assert_frame_equal(result, df)\n\n def test_remove(self):\n\n with ensure_clean_store(self.path) as store:\n\n ts = tm.makeTimeSeries()\n df = tm.makeDataFrame()\n store['a'] = ts\n store['b'] = df\n _maybe_remove(store, 'a')\n self.assertEqual(len(store), 1)\n tm.assert_frame_equal(df, store['b'])\n\n _maybe_remove(store, 'b')\n self.assertEqual(len(store), 0)\n\n # nonexistence\n self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')\n\n # pathing\n store['a'] = ts\n store['b/foo'] = df\n _maybe_remove(store, 'foo')\n _maybe_remove(store, 'b/foo')\n self.assertEqual(len(store), 1)\n\n store['a'] = ts\n store['b/foo'] = df\n _maybe_remove(store, 'b')\n self.assertEqual(len(store), 1)\n\n # __delitem__\n store['a'] = ts\n store['b'] = df\n del store['a']\n del store['b']\n self.assertEqual(len(store), 0)\n\n def test_remove_where(self):\n\n with ensure_clean_store(self.path) as store:\n\n # non-existance\n crit1 = Term('index>foo')\n self.assertRaises(KeyError, store.remove, 'a', [crit1])\n\n # try to remove non-table (with crit)\n # non-table ok (where = None)\n wp = tm.makePanel(30)\n store.put('wp', wp, format='table')\n store.remove('wp', [\"minor_axis=['A', 'D']\"])\n rs = store.select('wp')\n expected = wp.reindex(minor_axis=['B', 'C'])\n assert_panel_equal(rs, expected)\n\n # empty where\n _maybe_remove(store, 'wp')\n store.put('wp', wp, format='table')\n\n # deleted number (entire table)\n n = store.remove('wp', [])\n self.assertTrue(n == 120)\n\n # non - empty where\n _maybe_remove(store, 'wp')\n store.put('wp', wp, format='table')\n self.assertRaises(ValueError, store.remove,\n 'wp', ['foo'])\n\n # selectin non-table with a where\n # store.put('wp2', wp, format='f')\n # self.assertRaises(ValueError, store.remove,\n # 'wp2', [('column', ['A', 'D'])])\n\n def test_remove_startstop(self):\n # GH #4835 and #6177\n\n with ensure_clean_store(self.path) as store:\n\n wp = tm.makePanel(30)\n\n # start\n _maybe_remove(store, 'wp1')\n store.put('wp1', wp, format='t')\n n = store.remove('wp1', start=32)\n self.assertTrue(n == 120 - 32)\n result = store.select('wp1')\n expected = wp.reindex(major_axis=wp.major_axis[:32 // 4])\n assert_panel_equal(result, expected)\n\n _maybe_remove(store, 'wp2')\n store.put('wp2', wp, format='t')\n n = store.remove('wp2', start=-32)\n self.assertTrue(n == 32)\n result = store.select('wp2')\n expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4])\n assert_panel_equal(result, expected)\n\n # stop\n _maybe_remove(store, 'wp3')\n store.put('wp3', wp, format='t')\n n = store.remove('wp3', stop=32)\n self.assertTrue(n == 32)\n result = store.select('wp3')\n expected = wp.reindex(major_axis=wp.major_axis[32 // 4:])\n assert_panel_equal(result, expected)\n\n _maybe_remove(store, 'wp4')\n store.put('wp4', wp, format='t')\n n = store.remove('wp4', stop=-32)\n self.assertTrue(n == 120 - 32)\n result = store.select('wp4')\n expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:])\n assert_panel_equal(result, expected)\n\n # start n stop\n _maybe_remove(store, 'wp5')\n store.put('wp5', wp, format='t')\n n = store.remove('wp5', start=16, stop=-16)\n self.assertTrue(n == 120 - 32)\n result = store.select('wp5')\n expected = wp.reindex(major_axis=wp.major_axis[\n :16 // 4].union(wp.major_axis[-16 // 4:]))\n assert_panel_equal(result, expected)\n\n _maybe_remove(store, 'wp6')\n store.put('wp6', wp, format='t')\n n = store.remove('wp6', start=16, stop=16)\n self.assertTrue(n == 0)\n result = store.select('wp6')\n expected = wp.reindex(major_axis=wp.major_axis)\n assert_panel_equal(result, expected)\n\n # with where\n _maybe_remove(store, 'wp7')\n\n # TODO: unused?\n date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa\n\n crit = Term('major_axis=date')\n store.put('wp7', wp, format='t')\n n = store.remove('wp7', where=[crit], stop=80)\n self.assertTrue(n == 28)\n result = store.select('wp7')\n expected = wp.reindex(major_axis=wp.major_axis.difference(\n wp.major_axis[np.arange(0, 20, 3)]))\n assert_panel_equal(result, expected)\n\n def test_remove_crit(self):\n\n with ensure_clean_store(self.path) as store:\n\n wp = tm.makePanel(30)\n\n # group row removal\n _maybe_remove(store, 'wp3')\n date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])\n crit4 = Term('major_axis=date4')\n store.put('wp3', wp, format='t')\n n = store.remove('wp3', where=[crit4])\n self.assertTrue(n == 36)\n\n result = store.select('wp3')\n expected = wp.reindex(major_axis=wp.major_axis.difference(date4))\n assert_panel_equal(result, expected)\n\n # upper half\n _maybe_remove(store, 'wp')\n store.put('wp', wp, format='table')\n date = wp.major_axis[len(wp.major_axis) // 2]\n\n crit1 = Term('major_axis>date')\n crit2 = Term(\"minor_axis=['A', 'D']\")\n n = store.remove('wp', where=[crit1])\n self.assertTrue(n == 56)\n\n n = store.remove('wp', where=[crit2])\n self.assertTrue(n == 32)\n\n result = store['wp']\n expected = wp.truncate(after=date).reindex(minor=['B', 'C'])\n assert_panel_equal(result, expected)\n\n # individual row elements\n _maybe_remove(store, 'wp2')\n store.put('wp2', wp, format='table')\n\n date1 = wp.major_axis[1:3]\n crit1 = Term('major_axis=date1')\n store.remove('wp2', where=[crit1])\n result = store.select('wp2')\n expected = wp.reindex(major_axis=wp.major_axis.difference(date1))\n assert_panel_equal(result, expected)\n\n date2 = wp.major_axis[5]\n crit2 = Term('major_axis=date2')\n store.remove('wp2', where=[crit2])\n result = store['wp2']\n expected = wp.reindex(major_axis=wp.major_axis.difference(date1)\n .difference(Index([date2])))\n assert_panel_equal(result, expected)\n\n date3 = [wp.major_axis[7], wp.major_axis[9]]\n crit3 = Term('major_axis=date3')\n store.remove('wp2', where=[crit3])\n result = store['wp2']\n expected = wp.reindex(major_axis=wp.major_axis\n .difference(date1)\n .difference(Index([date2]))\n .difference(Index(date3)))\n assert_panel_equal(result, expected)\n\n # corners\n _maybe_remove(store, 'wp4')\n store.put('wp4', wp, format='table')\n n = store.remove(\n 'wp4', where=[Term('major_axis>wp.major_axis[-1]')])\n result = store.select('wp4')\n assert_panel_equal(result, wp)\n\n def test_invalid_terms(self):\n\n with ensure_clean_store(self.path) as store:\n\n with compat_assert_produces_warning(FutureWarning):\n\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.ix[0:4, 'string'] = 'bar'\n wp = tm.makePanel()\n\n p4d = tm.makePanel4D()\n store.put('df', df, format='table')\n store.put('wp', wp, format='table')\n store.put('p4d', p4d, format='table')\n\n # some invalid terms\n self.assertRaises(ValueError, store.select,\n 'wp', \"minor=['A', 'B']\")\n self.assertRaises(ValueError, store.select,\n 'wp', [\"index=['20121114']\"])\n self.assertRaises(ValueError, store.select, 'wp', [\n \"index=['20121114', '20121114']\"])\n self.assertRaises(TypeError, Term)\n\n # more invalid\n self.assertRaises(\n ValueError, store.select, 'df', 'df.index[3]')\n self.assertRaises(SyntaxError, store.select, 'df', 'index>')\n self.assertRaises(\n ValueError, store.select, 'wp',\n \"major_axis<'20000108' & minor_axis['A', 'B']\")\n\n # from the docs\n with ensure_clean_path(self.path) as path:\n dfq = DataFrame(np.random.randn(10, 4), columns=list(\n 'ABCD'), index=date_range('20130101', periods=10))\n dfq.to_hdf(path, 'dfq', format='table', data_columns=True)\n\n # check ok\n read_hdf(path, 'dfq',\n where=\"index>Timestamp('20130104') & columns=['A', 'B']\")\n read_hdf(path, 'dfq', where=\"A>0 or C>0\")\n\n # catch the invalid reference\n with ensure_clean_path(self.path) as path:\n dfq = DataFrame(np.random.randn(10, 4), columns=list(\n 'ABCD'), index=date_range('20130101', periods=10))\n dfq.to_hdf(path, 'dfq', format='table')\n\n self.assertRaises(ValueError, read_hdf, path,\n 'dfq', where=\"A>0 or C>0\")\n\n def test_terms(self):\n\n with ensure_clean_store(self.path) as store:\n\n wp = tm.makePanel()\n wpneg = Panel.fromDict({-1: tm.makeDataFrame(),\n 0: tm.makeDataFrame(),\n 1: tm.makeDataFrame()})\n\n with compat_assert_produces_warning(FutureWarning):\n\n p4d = tm.makePanel4D()\n store.put('p4d', p4d, format='table')\n\n store.put('wp', wp, format='table')\n store.put('wpneg', wpneg, format='table')\n\n # panel\n result = store.select('wp', [Term(\n 'major_axis<\"20000108\"'), Term(\"minor_axis=['A', 'B']\")])\n expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])\n assert_panel_equal(result, expected)\n\n # with deprecation\n result = store.select('wp', [Term(\n 'major_axis', '<', \"20000108\"), Term(\"minor_axis=['A', 'B']\")])\n expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])\n tm.assert_panel_equal(result, expected)\n\n # p4d\n with compat_assert_produces_warning(FutureWarning):\n\n result = store.select('p4d',\n [Term('major_axis<\"20000108\"'),\n Term(\"minor_axis=['A', 'B']\"),\n Term(\"items=['ItemA', 'ItemB']\")])\n expected = p4d.truncate(after='20000108').reindex(\n minor=['A', 'B'], items=['ItemA', 'ItemB'])\n assert_panel4d_equal(result, expected)\n\n # back compat invalid terms\n terms = [dict(field='major_axis', op='>', value='20121114'),\n [dict(field='major_axis', op='>', value='20121114')],\n [\"minor_axis=['A','B']\",\n dict(field='major_axis', op='>', value='20121114')]]\n for t in terms:\n with tm.assert_produces_warning(expected_warning=FutureWarning,\n check_stacklevel=False):\n Term(t)\n\n with compat_assert_produces_warning(FutureWarning):\n\n # valid terms\n terms = [('major_axis=20121114'),\n ('major_axis>20121114'),\n ((\"major_axis=['20121114', '20121114']\"),),\n ('major_axis=datetime.datetime(2012, 11, 14)'),\n 'major_axis> 20121114',\n 'major_axis >20121114',\n 'major_axis > 20121114',\n ((\"minor_axis=['A', 'B']\"),),\n ((\"minor_axis=['A', 'B']\"),),\n (((\"minor_axis==['A', 'B']\"),),),\n ((\"items=['ItemA', 'ItemB']\"),),\n ('items=ItemA'),\n ]\n\n for t in terms:\n store.select('wp', t)\n store.select('p4d', t)\n\n # valid for p4d only\n terms = [((\"labels=['l1', 'l2']\"),),\n Term(\"labels=['l1', 'l2']\"),\n ]\n\n for t in terms:\n store.select('p4d', t)\n\n with tm.assertRaisesRegexp(TypeError,\n 'Only named functions are supported'):\n store.select('wp', Term(\n 'major_axis == (lambda x: x)(\"20130101\")'))\n\n # check USub node parsing\n res = store.select('wpneg', Term('items == -1'))\n expected = Panel({-1: wpneg[-1]})\n tm.assert_panel_equal(res, expected)\n\n with tm.assertRaisesRegexp(NotImplementedError,\n 'Unary addition not supported'):\n store.select('wpneg', Term('items == +1'))\n\n def test_term_compat(self):\n with ensure_clean_store(self.path) as store:\n\n wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],\n major_axis=date_range('1/1/2000', periods=5),\n minor_axis=['A', 'B', 'C', 'D'])\n store.append('wp', wp)\n\n result = store.select('wp', [Term('major_axis>20000102'),\n Term('minor_axis', '=', ['A', 'B'])])\n expected = wp.loc[:, wp.major_axis >\n Timestamp('20000102'), ['A', 'B']]\n assert_panel_equal(result, expected)\n\n store.remove('wp', Term('major_axis>20000103'))\n result = store.select('wp')\n expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]\n assert_panel_equal(result, expected)\n\n with ensure_clean_store(self.path) as store:\n\n wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],\n major_axis=date_range('1/1/2000', periods=5),\n minor_axis=['A', 'B', 'C', 'D'])\n store.append('wp', wp)\n\n # stringified datetimes\n result = store.select(\n 'wp', [Term('major_axis', '>', datetime.datetime(2000, 1, 2))])\n expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]\n assert_panel_equal(result, expected)\n\n result = store.select(\n 'wp', [Term('major_axis', '>',\n datetime.datetime(2000, 1, 2, 0, 0))])\n expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]\n assert_panel_equal(result, expected)\n\n result = store.select(\n 'wp', [Term('major_axis', '=',\n [datetime.datetime(2000, 1, 2, 0, 0),\n datetime.datetime(2000, 1, 3, 0, 0)])])\n expected = wp.loc[:, [Timestamp('20000102'),\n Timestamp('20000103')]]\n assert_panel_equal(result, expected)\n\n result = store.select('wp', [Term('minor_axis', '=', ['A', 'B'])])\n expected = wp.loc[:, :, ['A', 'B']]\n assert_panel_equal(result, expected)\n\n def test_backwards_compat_without_term_object(self):\n with ensure_clean_store(self.path) as store:\n\n wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],\n major_axis=date_range('1/1/2000', periods=5),\n minor_axis=['A', 'B', 'C', 'D'])\n store.append('wp', wp)\n with assert_produces_warning(expected_warning=FutureWarning,\n check_stacklevel=False):\n result = store.select('wp', [('major_axis>20000102'),\n ('minor_axis', '=', ['A', 'B'])])\n expected = wp.loc[:,\n wp.major_axis > Timestamp('20000102'),\n ['A', 'B']]\n assert_panel_equal(result, expected)\n\n store.remove('wp', ('major_axis>20000103'))\n result = store.select('wp')\n expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]\n assert_panel_equal(result, expected)\n\n with ensure_clean_store(self.path) as store:\n\n wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],\n major_axis=date_range('1/1/2000', periods=5),\n minor_axis=['A', 'B', 'C', 'D'])\n store.append('wp', wp)\n\n # stringified datetimes\n with assert_produces_warning(expected_warning=FutureWarning,\n check_stacklevel=False):\n result = store.select('wp',\n [('major_axis',\n '>',\n datetime.datetime(2000, 1, 2))])\n expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]\n assert_panel_equal(result, expected)\n with assert_produces_warning(expected_warning=FutureWarning,\n check_stacklevel=False):\n result = store.select('wp',\n [('major_axis',\n '>',\n datetime.datetime(2000, 1, 2, 0, 0))])\n expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]\n assert_panel_equal(result, expected)\n with assert_produces_warning(expected_warning=FutureWarning,\n check_stacklevel=False):\n result = store.select('wp',\n [('major_axis',\n '=',\n [datetime.datetime(2000, 1, 2, 0, 0),\n datetime.datetime(2000, 1, 3, 0, 0)])]\n )\n expected = wp.loc[:, [Timestamp('20000102'),\n Timestamp('20000103')]]\n assert_panel_equal(result, expected)\n\n def test_same_name_scoping(self):\n\n with ensure_clean_store(self.path) as store:\n\n import pandas as pd\n df = DataFrame(np.random.randn(20, 2),\n index=pd.date_range('20130101', periods=20))\n store.put('df', df, format='table')\n expected = df[df.index > pd.Timestamp('20130105')]\n\n import datetime # noqa\n result = store.select('df', 'index>datetime.datetime(2013,1,5)')\n assert_frame_equal(result, expected)\n\n from datetime import datetime # noqa\n\n # technically an error, but allow it\n result = store.select('df', 'index>datetime.datetime(2013,1,5)')\n assert_frame_equal(result, expected)\n\n result = store.select('df', 'index>datetime(2013,1,5)')\n assert_frame_equal(result, expected)\n\n def test_series(self):\n\n s = tm.makeStringSeries()\n self._check_roundtrip(s, tm.assert_series_equal)\n\n ts = tm.makeTimeSeries()\n self._check_roundtrip(ts, tm.assert_series_equal)\n\n ts2 = Series(ts.index, Index(ts.index, dtype=object))\n self._check_roundtrip(ts2, tm.assert_series_equal)\n\n ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),\n dtype=object))\n self._check_roundtrip(ts3, tm.assert_series_equal,\n check_index_type=False)\n\n def test_sparse_series(self):\n\n s = tm.makeStringSeries()\n s[3:5] = np.nan\n ss = s.to_sparse()\n self._check_roundtrip(ss, tm.assert_series_equal,\n check_series_type=True)\n\n ss2 = s.to_sparse(kind='integer')\n self._check_roundtrip(ss2, tm.assert_series_equal,\n check_series_type=True)\n\n ss3 = s.to_sparse(fill_value=0)\n self._check_roundtrip(ss3, tm.assert_series_equal,\n check_series_type=True)\n\n def test_sparse_frame(self):\n\n s = tm.makeDataFrame()\n s.ix[3:5, 1:3] = np.nan\n s.ix[8:10, -2] = np.nan\n ss = s.to_sparse()\n\n self._check_double_roundtrip(ss, tm.assert_frame_equal,\n check_frame_type=True)\n\n ss2 = s.to_sparse(kind='integer')\n self._check_double_roundtrip(ss2, tm.assert_frame_equal,\n check_frame_type=True)\n\n ss3 = s.to_sparse(fill_value=0)\n self._check_double_roundtrip(ss3, tm.assert_frame_equal,\n check_frame_type=True)\n\n def test_float_index(self):\n\n # GH #454\n index = np.random.randn(10)\n s = Series(np.random.randn(10), index=index)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n def test_tuple_index(self):\n\n # GH #492\n col = np.arange(10)\n idx = [(0., 1.), (2., 3.), (4., 5.)]\n data = np.random.randn(30).reshape((3, 10))\n DF = DataFrame(data, index=idx, columns=col)\n\n expected_warning = Warning if PY35 else PerformanceWarning\n with tm.assert_produces_warning(expected_warning=expected_warning,\n check_stacklevel=False):\n self._check_roundtrip(DF, tm.assert_frame_equal)\n\n def test_index_types(self):\n\n values = np.random.randn(2)\n\n func = lambda l, r: tm.assert_series_equal(l, r,\n check_dtype=True,\n check_index_type=True,\n check_series_type=True)\n\n # nose has a deprecation warning in 3.5\n expected_warning = Warning if PY35 else PerformanceWarning\n with tm.assert_produces_warning(expected_warning=expected_warning,\n check_stacklevel=False):\n ser = Series(values, [0, 'y'])\n self._check_roundtrip(ser, func)\n\n with tm.assert_produces_warning(expected_warning=expected_warning,\n check_stacklevel=False):\n ser = Series(values, [datetime.datetime.today(), 0])\n self._check_roundtrip(ser, func)\n\n with tm.assert_produces_warning(expected_warning=expected_warning,\n check_stacklevel=False):\n ser = Series(values, ['y', 0])\n self._check_roundtrip(ser, func)\n\n with tm.assert_produces_warning(expected_warning=expected_warning,\n check_stacklevel=False):\n ser = Series(values, [datetime.date.today(), 'a'])\n self._check_roundtrip(ser, func)\n\n with tm.assert_produces_warning(expected_warning=expected_warning,\n check_stacklevel=False):\n ser = Series(values, [1.23, 'b'])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [1, 1.53])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [1, 5])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [datetime.datetime(\n 2012, 1, 1), datetime.datetime(2012, 1, 2)])\n self._check_roundtrip(ser, func)\n\n def test_timeseries_preepoch(self):\n\n if sys.version_info[0] == 2 and sys.version_info[1] < 7:\n raise nose.SkipTest(\"won't work on Python < 2.7\")\n\n dr = bdate_range('1/1/1940', '1/1/1960')\n ts = Series(np.random.randn(len(dr)), index=dr)\n try:\n self._check_roundtrip(ts, tm.assert_series_equal)\n except OverflowError:\n raise nose.SkipTest('known failer on some windows platforms')\n\n def test_frame(self):\n\n df = tm.makeDataFrame()\n\n # put in some random NAs\n df.values[0, 0] = np.nan\n df.values[5, 3] = np.nan\n\n self._check_roundtrip_table(df, tm.assert_frame_equal)\n self._check_roundtrip(df, tm.assert_frame_equal)\n\n if not skip_compression:\n self._check_roundtrip_table(df, tm.assert_frame_equal,\n compression=True)\n self._check_roundtrip(df, tm.assert_frame_equal,\n compression=True)\n\n tdf = tm.makeTimeDataFrame()\n self._check_roundtrip(tdf, tm.assert_frame_equal)\n\n if not skip_compression:\n self._check_roundtrip(tdf, tm.assert_frame_equal,\n compression=True)\n\n with ensure_clean_store(self.path) as store:\n # not consolidated\n df['foo'] = np.random.randn(len(df))\n store['df'] = df\n recons = store['df']\n self.assertTrue(recons._data.is_consolidated())\n\n # empty\n self._check_roundtrip(df[:0], tm.assert_frame_equal)\n\n def test_empty_series_frame(self):\n s0 = Series()\n s1 = Series(name='myseries')\n df0 = DataFrame()\n df1 = DataFrame(index=['a', 'b', 'c'])\n df2 = DataFrame(columns=['d', 'e', 'f'])\n\n self._check_roundtrip(s0, tm.assert_series_equal)\n self._check_roundtrip(s1, tm.assert_series_equal)\n self._check_roundtrip(df0, tm.assert_frame_equal)\n self._check_roundtrip(df1, tm.assert_frame_equal)\n self._check_roundtrip(df2, tm.assert_frame_equal)\n\n def test_empty_series(self):\n for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:\n s = Series(dtype=dtype)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n def test_can_serialize_dates(self):\n\n rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n self._check_roundtrip(frame, tm.assert_frame_equal)\n\n def test_store_hierarchical(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo', 'bar'])\n frame = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n self._check_roundtrip(frame, tm.assert_frame_equal)\n self._check_roundtrip(frame.T, tm.assert_frame_equal)\n self._check_roundtrip(frame['A'], tm.assert_series_equal)\n\n # check that the names are stored\n with ensure_clean_store(self.path) as store:\n store['frame'] = frame\n recons = store['frame']\n tm.assert_frame_equal(recons, frame)\n\n def test_store_index_name(self):\n df = tm.makeDataFrame()\n df.index.name = 'foo'\n\n with ensure_clean_store(self.path) as store:\n store['frame'] = df\n recons = store['frame']\n tm.assert_frame_equal(recons, df)\n\n def test_store_index_name_with_tz(self):\n # GH 13884\n df = pd.DataFrame({'A': [1, 2]})\n df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])\n df.index = df.index.tz_localize('UTC')\n df.index.name = 'foo'\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df, format='table')\n recons = store['frame']\n tm.assert_frame_equal(recons, df)\n\n def test_store_series_name(self):\n df = tm.makeDataFrame()\n series = df['A']\n\n with ensure_clean_store(self.path) as store:\n store['series'] = series\n recons = store['series']\n tm.assert_series_equal(recons, series)\n\n def test_store_mixed(self):\n\n def _make_one():\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['int1'] = 1\n df['int2'] = 2\n return df.consolidate()\n\n df1 = _make_one()\n df2 = _make_one()\n\n self._check_roundtrip(df1, tm.assert_frame_equal)\n self._check_roundtrip(df2, tm.assert_frame_equal)\n\n with ensure_clean_store(self.path) as store:\n store['obj'] = df1\n tm.assert_frame_equal(store['obj'], df1)\n store['obj'] = df2\n tm.assert_frame_equal(store['obj'], df2)\n\n # check that can store Series of all of these types\n self._check_roundtrip(df1['obj1'], tm.assert_series_equal)\n self._check_roundtrip(df1['bool1'], tm.assert_series_equal)\n self._check_roundtrip(df1['int1'], tm.assert_series_equal)\n\n if not skip_compression:\n self._check_roundtrip(df1['obj1'], tm.assert_series_equal,\n compression=True)\n self._check_roundtrip(df1['bool1'], tm.assert_series_equal,\n compression=True)\n self._check_roundtrip(df1['int1'], tm.assert_series_equal,\n compression=True)\n self._check_roundtrip(df1, tm.assert_frame_equal,\n compression=True)\n\n def test_wide(self):\n\n wp = tm.makePanel()\n self._check_roundtrip(wp, assert_panel_equal)\n\n def test_wide_table(self):\n\n wp = tm.makePanel()\n self._check_roundtrip_table(wp, assert_panel_equal)\n\n def test_select_with_dups(self):\n\n # single dtypes\n df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])\n df.index = date_range('20130101 9:30', periods=10, freq='T')\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df)\n\n result = store.select('df')\n expected = df\n assert_frame_equal(result, expected, by_blocks=True)\n\n result = store.select('df', columns=df.columns)\n expected = df\n assert_frame_equal(result, expected, by_blocks=True)\n\n result = store.select('df', columns=['A'])\n expected = df.loc[:, ['A']]\n assert_frame_equal(result, expected)\n\n # dups accross dtypes\n df = concat([DataFrame(np.random.randn(10, 4),\n columns=['A', 'A', 'B', 'B']),\n DataFrame(np.random.randint(0, 10, size=20)\n .reshape(10, 2),\n columns=['A', 'C'])],\n axis=1)\n df.index = date_range('20130101 9:30', periods=10, freq='T')\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df)\n\n result = store.select('df')\n expected = df\n assert_frame_equal(result, expected, by_blocks=True)\n\n result = store.select('df', columns=df.columns)\n expected = df\n assert_frame_equal(result, expected, by_blocks=True)\n\n expected = df.loc[:, ['A']]\n result = store.select('df', columns=['A'])\n assert_frame_equal(result, expected, by_blocks=True)\n\n expected = df.loc[:, ['B', 'A']]\n result = store.select('df', columns=['B', 'A'])\n assert_frame_equal(result, expected, by_blocks=True)\n\n # duplicates on both index and columns\n with ensure_clean_store(self.path) as store:\n store.append('df', df)\n store.append('df', df)\n\n expected = df.loc[:, ['B', 'A']]\n expected = concat([expected, expected])\n result = store.select('df', columns=['B', 'A'])\n assert_frame_equal(result, expected, by_blocks=True)\n\n def test_wide_table_dups(self):\n wp = tm.makePanel()\n with ensure_clean_store(self.path) as store:\n store.put('panel', wp, format='table')\n store.put('panel', wp, format='table', append=True)\n\n with tm.assert_produces_warning(expected_warning=DuplicateWarning):\n recons = store['panel']\n\n assert_panel_equal(recons, wp)\n\n def test_long(self):\n def _check(left, right):\n assert_panel_equal(left.to_panel(), right.to_panel())\n\n wp = tm.makePanel()\n self._check_roundtrip(wp.to_frame(), _check)\n\n # empty\n # self._check_roundtrip(wp.to_frame()[:0], _check)\n\n def test_longpanel(self):\n pass\n\n def test_overwrite_node(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeDataFrame()\n ts = tm.makeTimeSeries()\n store['a'] = ts\n\n tm.assert_series_equal(store['a'], ts)\n\n def test_sparse_with_compression(self):\n\n # GH 2931\n\n # make sparse dataframe\n arr = np.random.binomial(n=1, p=.01, size=(1000, 10))\n df = DataFrame(arr).to_sparse(fill_value=0)\n\n # case 1: store uncompressed\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression=False,\n check_frame_type=True)\n\n # case 2: store compressed (works)\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression='zlib',\n check_frame_type=True)\n\n # set one series to be completely sparse\n df[0] = np.zeros(1000)\n\n # case 3: store df with completely sparse series uncompressed\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression=False,\n check_frame_type=True)\n\n # case 4: try storing df with completely sparse series compressed\n # (fails)\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression='zlib',\n check_frame_type=True)\n\n def test_select(self):\n wp = tm.makePanel()\n\n with ensure_clean_store(self.path) as store:\n\n # put/select ok\n _maybe_remove(store, 'wp')\n store.put('wp', wp, format='table')\n store.select('wp')\n\n # non-table ok (where = None)\n _maybe_remove(store, 'wp')\n store.put('wp2', wp)\n store.select('wp2')\n\n # selection on the non-indexable with a large number of columns\n wp = Panel(np.random.randn(100, 100, 100),\n items=['Item%03d' % i for i in range(100)],\n major_axis=date_range('1/1/2000', periods=100),\n minor_axis=['E%03d' % i for i in range(100)])\n\n _maybe_remove(store, 'wp')\n store.append('wp', wp)\n items = ['Item%03d' % i for i in range(80)]\n result = store.select('wp', Term('items=items'))\n expected = wp.reindex(items=items)\n assert_panel_equal(expected, result)\n\n # selectin non-table with a where\n # self.assertRaises(ValueError, store.select,\n # 'wp2', ('column', ['A', 'D']))\n\n # select with columns=\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df')\n store.append('df', df)\n result = store.select('df', columns=['A', 'B'])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # equivalentsly\n result = store.select('df', [(\"columns=['A', 'B']\")])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # with a data column\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['A'])\n result = store.select('df', ['A > 0'], columns=['A', 'B'])\n expected = df[df.A > 0].reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # all a data columns\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=True)\n result = store.select('df', ['A > 0'], columns=['A', 'B'])\n expected = df[df.A > 0].reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # with a data column, but different columns\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['A'])\n result = store.select('df', ['A > 0'], columns=['C', 'D'])\n expected = df[df.A > 0].reindex(columns=['C', 'D'])\n tm.assert_frame_equal(expected, result)\n\n def test_select_dtypes(self):\n\n with ensure_clean_store(self.path) as store:\n # with a Timestamp data column (GH #2637)\n df = DataFrame(dict(\n ts=bdate_range('2012-01-01', periods=300),\n A=np.random.randn(300)))\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['ts', 'A'])\n\n result = store.select('df', [Term(\"ts>=Timestamp('2012-02-01')\")])\n expected = df[df.ts >= Timestamp('2012-02-01')]\n tm.assert_frame_equal(expected, result)\n\n # bool columns (GH #2849)\n df = DataFrame(np.random.randn(5, 2), columns=['A', 'B'])\n df['object'] = 'foo'\n df.ix[4:5, 'object'] = 'bar'\n df['boolv'] = df['A'] > 0\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=True)\n\n expected = (df[df.boolv == True] # noqa\n .reindex(columns=['A', 'boolv']))\n for v in [True, 'true', 1]:\n result = store.select('df', Term(\n 'boolv == %s' % str(v)), columns=['A', 'boolv'])\n tm.assert_frame_equal(expected, result)\n\n expected = (df[df.boolv == False] # noqa\n .reindex(columns=['A', 'boolv']))\n for v in [False, 'false', 0]:\n result = store.select('df', Term(\n 'boolv == %s' % str(v)), columns=['A', 'boolv'])\n tm.assert_frame_equal(expected, result)\n\n # integer index\n df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))\n _maybe_remove(store, 'df_int')\n store.append('df_int', df)\n result = store.select(\n 'df_int', [Term(\"index<10\"), Term(\"columns=['A']\")])\n expected = df.reindex(index=list(df.index)[0:10], columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n # float index\n df = DataFrame(dict(A=np.random.rand(\n 20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))\n _maybe_remove(store, 'df_float')\n store.append('df_float', df)\n result = store.select(\n 'df_float', [Term(\"index<10.0\"), Term(\"columns=['A']\")])\n expected = df.reindex(index=list(df.index)[0:10], columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n with ensure_clean_store(self.path) as store:\n\n # floats w/o NaN\n df = DataFrame(\n dict(cols=range(11), values=range(11)), dtype='float64')\n df['cols'] = (df['cols'] + 10).apply(str)\n\n store.append('df1', df, data_columns=True)\n result = store.select(\n 'df1', where='values>2.0')\n expected = df[df['values'] > 2.0]\n tm.assert_frame_equal(expected, result)\n\n # floats with NaN\n df.iloc[0] = np.nan\n expected = df[df['values'] > 2.0]\n\n store.append('df2', df, data_columns=True, index=False)\n result = store.select(\n 'df2', where='values>2.0')\n tm.assert_frame_equal(expected, result)\n\n # https://github.com/PyTables/PyTables/issues/282\n # bug in selection when 0th row has a np.nan and an index\n # store.append('df3',df,data_columns=True)\n # result = store.select(\n # 'df3', where='values>2.0')\n # tm.assert_frame_equal(expected, result)\n\n # not in first position float with NaN ok too\n df = DataFrame(\n dict(cols=range(11), values=range(11)), dtype='float64')\n df['cols'] = (df['cols'] + 10).apply(str)\n\n df.iloc[1] = np.nan\n expected = df[df['values'] > 2.0]\n\n store.append('df4', df, data_columns=True)\n result = store.select(\n 'df4', where='values>2.0')\n tm.assert_frame_equal(expected, result)\n\n # test selection with comparison against numpy scalar\n # GH 11283\n with ensure_clean_store(self.path) as store:\n df = tm.makeDataFrame()\n\n expected = df[df['A'] > 0]\n\n store.append('df', df, data_columns=True)\n np_zero = np.float64(0) # noqa\n result = store.select('df', where=[\"A>np_zero\"])\n tm.assert_frame_equal(expected, result)\n\n def test_select_with_many_inputs(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),\n A=np.random.randn(300),\n B=range(300),\n users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 +\n ['a%03d' % i for i in range(100)]))\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])\n\n # regular select\n result = store.select('df', [Term(\"ts>=Timestamp('2012-02-01')\")])\n expected = df[df.ts >= Timestamp('2012-02-01')]\n tm.assert_frame_equal(expected, result)\n\n # small selector\n result = store.select(\n 'df', [Term(\"ts>=Timestamp('2012-02-01') & \"\n \"users=['a','b','c']\")])\n expected = df[(df.ts >= Timestamp('2012-02-01')) &\n df.users.isin(['a', 'b', 'c'])]\n tm.assert_frame_equal(expected, result)\n\n # big selector along the columns\n selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]\n result = store.select(\n 'df', [Term(\"ts>=Timestamp('2012-02-01')\"),\n Term('users=selector')])\n expected = df[(df.ts >= Timestamp('2012-02-01')) &\n df.users.isin(selector)]\n tm.assert_frame_equal(expected, result)\n\n selector = range(100, 200)\n result = store.select('df', [Term('B=selector')])\n expected = df[df.B.isin(selector)]\n tm.assert_frame_equal(expected, result)\n self.assertEqual(len(result), 100)\n\n # big selector along the index\n selector = Index(df.ts[0:100].values)\n result = store.select('df', [Term('ts=selector')])\n expected = df[df.ts.isin(selector.values)]\n tm.assert_frame_equal(expected, result)\n self.assertEqual(len(result), 100)\n\n def test_select_iterator(self):\n\n # single table\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeTimeDataFrame(500)\n _maybe_remove(store, 'df')\n store.append('df', df)\n\n expected = store.select('df')\n\n results = [s for s in store.select('df', iterator=True)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n results = [s for s in store.select('df', chunksize=100)]\n self.assertEqual(len(results), 5)\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n results = [s for s in store.select('df', chunksize=150)]\n result = concat(results)\n tm.assert_frame_equal(result, expected)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeTimeDataFrame(500)\n df.to_hdf(path, 'df_non_table')\n self.assertRaises(TypeError, read_hdf, path,\n 'df_non_table', chunksize=100)\n self.assertRaises(TypeError, read_hdf, path,\n 'df_non_table', iterator=True)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeTimeDataFrame(500)\n df.to_hdf(path, 'df', format='table')\n\n results = [s for s in read_hdf(path, 'df', chunksize=100)]\n result = concat(results)\n\n self.assertEqual(len(results), 5)\n tm.assert_frame_equal(result, df)\n tm.assert_frame_equal(result, read_hdf(path, 'df'))\n\n # multiple\n\n with ensure_clean_store(self.path) as store:\n\n df1 = tm.makeTimeDataFrame(500)\n store.append('df1', df1, data_columns=True)\n df2 = tm.makeTimeDataFrame(500).rename(\n columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n store.append('df2', df2)\n\n df = concat([df1, df2], axis=1)\n\n # full selection\n expected = store.select_as_multiple(\n ['df1', 'df2'], selector='df1')\n results = [s for s in store.select_as_multiple(\n ['df1', 'df2'], selector='df1', chunksize=150)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n # where selection\n # expected = store.select_as_multiple(\n # ['df1', 'df2'], where= Term('A>0'), selector='df1')\n # results = []\n # for s in store.select_as_multiple(\n # ['df1', 'df2'], where= Term('A>0'), selector='df1',\n # chunksize=25):\n # results.append(s)\n # result = concat(results)\n # tm.assert_frame_equal(expected, result)\n\n def test_select_iterator_complete_8014(self):\n\n # GH 8014\n # using iterator and where clause\n chunksize = 1e4\n\n # no iterator\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100064, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n beg_dt = expected.index[0]\n end_dt = expected.index[-1]\n\n # select w/o iteration and no where clause works\n result = store.select('df')\n tm.assert_frame_equal(expected, result)\n\n # select w/o iterator and where clause, single term, begin\n # of range, works\n where = \"index >= '%s'\" % beg_dt\n result = store.select('df', where=where)\n tm.assert_frame_equal(expected, result)\n\n # select w/o iterator and where clause, single term, end\n # of range, works\n where = \"index <= '%s'\" % end_dt\n result = store.select('df', where=where)\n tm.assert_frame_equal(expected, result)\n\n # select w/o iterator and where clause, inclusive range,\n # works\n where = \"index >= '%s' & index <= '%s'\" % (beg_dt, end_dt)\n result = store.select('df', where=where)\n tm.assert_frame_equal(expected, result)\n\n # with iterator, full range\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100064, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n beg_dt = expected.index[0]\n end_dt = expected.index[-1]\n\n # select w/iterator and no where clause works\n results = [s for s in store.select('df', chunksize=chunksize)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n # select w/iterator and where clause, single term, begin of range\n where = \"index >= '%s'\" % beg_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n # select w/iterator and where clause, single term, end of range\n where = \"index <= '%s'\" % end_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n # select w/iterator and where clause, inclusive range\n where = \"index >= '%s' & index <= '%s'\" % (beg_dt, end_dt)\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n def test_select_iterator_non_complete_8014(self):\n\n # GH 8014\n # using iterator and where clause\n chunksize = 1e4\n\n # with iterator, non complete range\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100064, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n beg_dt = expected.index[1]\n end_dt = expected.index[-2]\n\n # select w/iterator and where clause, single term, begin of range\n where = \"index >= '%s'\" % beg_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n rexpected = expected[expected.index >= beg_dt]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause, single term, end of range\n where = \"index <= '%s'\" % end_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n rexpected = expected[expected.index <= end_dt]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause, inclusive range\n where = \"index >= '%s' & index <= '%s'\" % (beg_dt, end_dt)\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n rexpected = expected[(expected.index >= beg_dt) &\n (expected.index <= end_dt)]\n tm.assert_frame_equal(rexpected, result)\n\n # with iterator, empty where\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100064, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n end_dt = expected.index[-1]\n\n # select w/iterator and where clause, single term, begin of range\n where = \"index > '%s'\" % end_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n self.assertEqual(0, len(results))\n\n def test_select_iterator_many_empty_frames(self):\n\n # GH 8014\n # using iterator and where clause can return many empty\n # frames.\n chunksize = int(1e4)\n\n # with iterator, range limited to the first chunk\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100000, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n beg_dt = expected.index[0]\n end_dt = expected.index[chunksize - 1]\n\n # select w/iterator and where clause, single term, begin of range\n where = \"index >= '%s'\" % beg_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n rexpected = expected[expected.index >= beg_dt]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause, single term, end of range\n where = \"index <= '%s'\" % end_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n\n tm.assert_equal(1, len(results))\n result = concat(results)\n rexpected = expected[expected.index <= end_dt]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause, inclusive range\n where = \"index >= '%s' & index <= '%s'\" % (beg_dt, end_dt)\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n\n # should be 1, is 10\n tm.assert_equal(1, len(results))\n result = concat(results)\n rexpected = expected[(expected.index >= beg_dt) &\n (expected.index <= end_dt)]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause which selects\n # *nothing*.\n #\n # To be consistent with Python idiom I suggest this should\n # return [] e.g. `for e in []: print True` never prints\n # True.\n\n where = \"index <= '%s' & index >= '%s'\" % (beg_dt, end_dt)\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n\n # should be []\n tm.assert_equal(0, len(results))\n\n def test_retain_index_attributes(self):\n\n # GH 3499, losing frequency info on index recreation\n df = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2000-1-1', periods=3, freq='H'))))\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'data')\n store.put('data', df, format='table')\n\n result = store.get('data')\n tm.assert_frame_equal(df, result)\n\n for attr in ['freq', 'tz', 'name']:\n for idx in ['index', 'columns']:\n self.assertEqual(getattr(getattr(df, idx), attr, None),\n getattr(getattr(result, idx), attr, None))\n\n # try to append a table with a different frequency\n with tm.assert_produces_warning(\n expected_warning=AttributeConflictWarning):\n df2 = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2002-1-1',\n periods=3, freq='D'))))\n store.append('data', df2)\n\n self.assertIsNone(store.get_storer('data').info['index']['freq'])\n\n # this is ok\n _maybe_remove(store, 'df2')\n df2 = DataFrame(dict(\n A=Series(lrange(3),\n index=[Timestamp('20010101'), Timestamp('20010102'),\n Timestamp('20020101')])))\n store.append('df2', df2)\n df3 = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2002-1-1', periods=3,\n freq='D'))))\n store.append('df2', df3)\n\n def test_retain_index_attributes2(self):\n with ensure_clean_path(self.path) as path:\n expected_warning = Warning if PY35 else AttributeConflictWarning\n with tm.assert_produces_warning(expected_warning=expected_warning,\n check_stacklevel=False):\n\n df = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2000-1-1',\n periods=3, freq='H'))))\n df.to_hdf(path, 'data', mode='w', append=True)\n df2 = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2002-1-1', periods=3,\n freq='D'))))\n df2.to_hdf(path, 'data', append=True)\n\n idx = date_range('2000-1-1', periods=3, freq='H')\n idx.name = 'foo'\n df = DataFrame(dict(A=Series(lrange(3), index=idx)))\n df.to_hdf(path, 'data', mode='w', append=True)\n\n self.assertEqual(read_hdf(path, 'data').index.name, 'foo')\n\n with tm.assert_produces_warning(expected_warning=expected_warning,\n check_stacklevel=False):\n\n idx2 = date_range('2001-1-1', periods=3, freq='H')\n idx2.name = 'bar'\n df2 = DataFrame(dict(A=Series(lrange(3), index=idx2)))\n df2.to_hdf(path, 'data', append=True)\n\n self.assertIsNone(read_hdf(path, 'data').index.name)\n\n def test_panel_select(self):\n\n wp = tm.makePanel()\n\n with ensure_clean_store(self.path) as store:\n store.put('wp', wp, format='table')\n date = wp.major_axis[len(wp.major_axis) // 2]\n\n crit1 = ('major_axis>=date')\n crit2 = (\"minor_axis=['A', 'D']\")\n\n result = store.select('wp', [crit1, crit2])\n expected = wp.truncate(before=date).reindex(minor=['A', 'D'])\n assert_panel_equal(result, expected)\n\n result = store.select(\n 'wp', ['major_axis>=\"20000124\"', (\"minor_axis=['A', 'B']\")])\n expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])\n assert_panel_equal(result, expected)\n\n def test_frame_select(self):\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df, format='table')\n date = df.index[len(df) // 2]\n\n crit1 = Term('index>=date')\n self.assertEqual(crit1.env.scope['date'], date)\n\n crit2 = (\"columns=['A', 'D']\")\n crit3 = ('columns=A')\n\n result = store.select('frame', [crit1, crit2])\n expected = df.ix[date:, ['A', 'D']]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('frame', [crit3])\n expected = df.ix[:, ['A']]\n tm.assert_frame_equal(result, expected)\n\n # invalid terms\n df = tm.makeTimeDataFrame()\n store.append('df_time', df)\n self.assertRaises(\n ValueError, store.select, 'df_time', [Term(\"index>0\")])\n\n # can't select if not written as table\n # store['frame'] = df\n # self.assertRaises(ValueError, store.select,\n # 'frame', [crit1, crit2])\n\n def test_frame_select_complex(self):\n # select via complex criteria\n\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.loc[df.index[0:4], 'string'] = 'bar'\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table', data_columns=['string'])\n\n # empty\n result = store.select('df', 'index>df.index[3] & string=\"bar\"')\n expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('df', 'index>df.index[3] & string=\"foo\"')\n expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')]\n tm.assert_frame_equal(result, expected)\n\n # or\n result = store.select('df', 'index>df.index[3] | string=\"bar\"')\n expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('df', '(index>df.index[3] & '\n 'index<=df.index[6]) | string=\"bar\"')\n expected = df.loc[((df.index > df.index[3]) & (\n df.index <= df.index[6])) | (df.string == 'bar')]\n tm.assert_frame_equal(result, expected)\n\n # invert\n result = store.select('df', 'string!=\"bar\"')\n expected = df.loc[df.string != 'bar']\n tm.assert_frame_equal(result, expected)\n\n # invert not implemented in numexpr :(\n self.assertRaises(NotImplementedError,\n store.select, 'df', '~(string=\"bar\")')\n\n # invert ok for filters\n result = store.select('df', \"~(columns=['A','B'])\")\n expected = df.loc[:, df.columns.difference(['A', 'B'])]\n tm.assert_frame_equal(result, expected)\n\n # in\n result = store.select(\n 'df', \"index>df.index[3] & columns in ['A','B']\")\n expected = df.loc[df.index > df.index[3]].reindex(columns=[\n 'A', 'B'])\n tm.assert_frame_equal(result, expected)\n\n def test_frame_select_complex2(self):\n\n with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths:\n\n pp, hh = paths\n\n # use non-trivial selection criteria\n parms = DataFrame({'A': [1, 1, 2, 2, 3]})\n parms.to_hdf(pp, 'df', mode='w',\n format='table', data_columns=['A'])\n\n selection = read_hdf(pp, 'df', where='A=[2,3]')\n hist = DataFrame(np.random.randn(25, 1),\n columns=['data'],\n index=MultiIndex.from_tuples(\n [(i, j) for i in range(5)\n for j in range(5)],\n names=['l1', 'l2']))\n\n hist.to_hdf(hh, 'df', mode='w', format='table')\n\n expected = read_hdf(hh, 'df', where=Term('l1', '=', [2, 3, 4]))\n\n # list like\n result = read_hdf(hh, 'df', where=Term(\n 'l1', '=', selection.index.tolist()))\n assert_frame_equal(result, expected)\n l = selection.index.tolist() # noqa\n\n # sccope with list like\n store = HDFStore(hh)\n result = store.select('df', where='l1=l')\n assert_frame_equal(result, expected)\n store.close()\n\n result = read_hdf(hh, 'df', where='l1=l')\n assert_frame_equal(result, expected)\n\n # index\n index = selection.index # noqa\n result = read_hdf(hh, 'df', where='l1=index')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh, 'df', where='l1=selection.index')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh, 'df', where='l1=selection.index.tolist()')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh, 'df', where='l1=list(selection.index)')\n assert_frame_equal(result, expected)\n\n # sccope with index\n store = HDFStore(hh)\n\n result = store.select('df', where='l1=index')\n assert_frame_equal(result, expected)\n\n result = store.select('df', where='l1=selection.index')\n assert_frame_equal(result, expected)\n\n result = store.select('df', where='l1=selection.index.tolist()')\n assert_frame_equal(result, expected)\n\n result = store.select('df', where='l1=list(selection.index)')\n assert_frame_equal(result, expected)\n\n store.close()\n\n def test_invalid_filtering(self):\n\n # can't use more than one filter (atm)\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table')\n\n # not implemented\n self.assertRaises(NotImplementedError, store.select,\n 'df', \"columns=['A'] | columns=['B']\")\n\n # in theory we could deal with this\n self.assertRaises(NotImplementedError, store.select,\n 'df', \"columns=['A','B'] & columns=['C']\")\n\n def test_string_select(self):\n # GH 2973\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeTimeDataFrame()\n\n # test string ==/!=\n df['x'] = 'none'\n df.ix[2:7, 'x'] = ''\n\n store.append('df', df, data_columns=['x'])\n\n result = store.select('df', Term('x=none'))\n expected = df[df.x == 'none']\n assert_frame_equal(result, expected)\n\n try:\n result = store.select('df', Term('x!=none'))\n expected = df[df.x != 'none']\n assert_frame_equal(result, expected)\n except Exception as detail:\n pprint_thing(\"[{0}]\".format(detail))\n pprint_thing(store)\n pprint_thing(expected)\n\n df2 = df.copy()\n df2.loc[df2.x == '', 'x'] = np.nan\n\n store.append('df2', df2, data_columns=['x'])\n result = store.select('df2', Term('x!=none'))\n expected = df2[isnull(df2.x)]\n assert_frame_equal(result, expected)\n\n # int ==/!=\n df['int'] = 1\n df.ix[2:7, 'int'] = 2\n\n store.append('df3', df, data_columns=['int'])\n\n result = store.select('df3', Term('int=2'))\n expected = df[df.int == 2]\n assert_frame_equal(result, expected)\n\n result = store.select('df3', Term('int!=2'))\n expected = df[df.int != 2]\n assert_frame_equal(result, expected)\n\n def test_read_column(self):\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n store.append('df', df)\n\n # error\n self.assertRaises(KeyError, store.select_column, 'df', 'foo')\n\n def f():\n store.select_column('df', 'index', where=['index>5'])\n self.assertRaises(Exception, f)\n\n # valid\n result = store.select_column('df', 'index')\n tm.assert_almost_equal(result.values, Series(df.index).values)\n self.assertIsInstance(result, Series)\n\n # not a data indexable column\n self.assertRaises(\n ValueError, store.select_column, 'df', 'values_block_0')\n\n # a data column\n df2 = df.copy()\n df2['string'] = 'foo'\n store.append('df2', df2, data_columns=['string'])\n result = store.select_column('df2', 'string')\n tm.assert_almost_equal(result.values, df2['string'].values)\n\n # a data column with NaNs, result excludes the NaNs\n df3 = df.copy()\n df3['string'] = 'foo'\n df3.ix[4:6, 'string'] = np.nan\n store.append('df3', df3, data_columns=['string'])\n result = store.select_column('df3', 'string')\n tm.assert_almost_equal(result.values, df3['string'].values)\n\n # start/stop\n result = store.select_column('df3', 'string', start=2)\n tm.assert_almost_equal(result.values, df3['string'].values[2:])\n\n result = store.select_column('df3', 'string', start=-2)\n tm.assert_almost_equal(result.values, df3['string'].values[-2:])\n\n result = store.select_column('df3', 'string', stop=2)\n tm.assert_almost_equal(result.values, df3['string'].values[:2])\n\n result = store.select_column('df3', 'string', stop=-2)\n tm.assert_almost_equal(result.values, df3['string'].values[:-2])\n\n result = store.select_column('df3', 'string', start=2, stop=-2)\n tm.assert_almost_equal(result.values, df3['string'].values[2:-2])\n\n result = store.select_column('df3', 'string', start=-2, stop=2)\n tm.assert_almost_equal(result.values, df3['string'].values[-2:2])\n\n # GH 10392 - make sure column name is preserved\n df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})\n store.append('df4', df4, data_columns=True)\n expected = df4['B']\n result = store.select_column('df4', 'B')\n tm.assert_series_equal(result, expected)\n\n def test_coordinates(self):\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n\n _maybe_remove(store, 'df')\n store.append('df', df)\n\n # all\n c = store.select_as_coordinates('df')\n assert((c.values == np.arange(len(df.index))).all())\n\n # get coordinates back & test vs frame\n _maybe_remove(store, 'df')\n\n df = DataFrame(dict(A=lrange(5), B=lrange(5)))\n store.append('df', df)\n c = store.select_as_coordinates('df', ['index<3'])\n assert((c.values == np.arange(3)).all())\n result = store.select('df', where=c)\n expected = df.ix[0:2, :]\n tm.assert_frame_equal(result, expected)\n\n c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])\n assert((c.values == np.arange(2) + 3).all())\n result = store.select('df', where=c)\n expected = df.ix[3:4, :]\n tm.assert_frame_equal(result, expected)\n self.assertIsInstance(c, Index)\n\n # multiple tables\n _maybe_remove(store, 'df1')\n _maybe_remove(store, 'df2')\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n store.append('df1', df1, data_columns=['A', 'B'])\n store.append('df2', df2)\n\n c = store.select_as_coordinates('df1', ['A>0', 'B>0'])\n df1_result = store.select('df1', c)\n df2_result = store.select('df2', c)\n result = concat([df1_result, df2_result], axis=1)\n\n expected = concat([df1, df2], axis=1)\n expected = expected[(expected.A > 0) & (expected.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n # pass array/mask as the coordinates\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(np.random.randn(1000, 2),\n index=date_range('20000101', periods=1000))\n store.append('df', df)\n c = store.select_column('df', 'index')\n where = c[DatetimeIndex(c).month == 5].index\n expected = df.iloc[where]\n\n # locations\n result = store.select('df', where=where)\n tm.assert_frame_equal(result, expected)\n\n # boolean\n result = store.select('df', where=where)\n tm.assert_frame_equal(result, expected)\n\n # invalid\n self.assertRaises(ValueError, store.select, 'df',\n where=np.arange(len(df), dtype='float64'))\n self.assertRaises(ValueError, store.select, 'df',\n where=np.arange(len(df) + 1))\n self.assertRaises(ValueError, store.select, 'df',\n where=np.arange(len(df)), start=5)\n self.assertRaises(ValueError, store.select, 'df',\n where=np.arange(len(df)), start=5, stop=10)\n\n # selection with filter\n selection = date_range('20000101', periods=500)\n result = store.select('df', where='index in selection')\n expected = df[df.index.isin(selection)]\n tm.assert_frame_equal(result, expected)\n\n # list\n df = DataFrame(np.random.randn(10, 2))\n store.append('df2', df)\n result = store.select('df2', where=[0, 3, 5])\n expected = df.iloc[[0, 3, 5]]\n tm.assert_frame_equal(result, expected)\n\n # boolean\n where = [True] * 10\n where[-2] = False\n result = store.select('df2', where=where)\n expected = df.loc[where]\n tm.assert_frame_equal(result, expected)\n\n # start/stop\n result = store.select('df2', start=5, stop=10)\n expected = df[5:10]\n tm.assert_frame_equal(result, expected)\n\n def test_append_to_multiple(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n df = concat([df1, df2], axis=1)\n\n with ensure_clean_store(self.path) as store:\n\n # exceptions\n self.assertRaises(ValueError, store.append_to_multiple,\n {'df1': ['A', 'B'], 'df2': None}, df,\n selector='df3')\n self.assertRaises(ValueError, store.append_to_multiple,\n {'df1': None, 'df2': None}, df, selector='df3')\n self.assertRaises(\n ValueError, store.append_to_multiple, 'df1', df, 'df1')\n\n # regular operation\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df1')\n result = store.select_as_multiple(\n ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')\n expected = df[(df.A > 0) & (df.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n def test_append_to_multiple_dropna(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df1.ix[1, ['A', 'B']] = np.nan\n df = concat([df1, df2], axis=1)\n\n with ensure_clean_store(self.path) as store:\n # dropna=True should guarantee rows are synchronized\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df1',\n dropna=True)\n result = store.select_as_multiple(['df1', 'df2'])\n expected = df.dropna()\n tm.assert_frame_equal(result, expected)\n tm.assert_index_equal(store.select('df1').index,\n store.select('df2').index)\n\n # dropna=False shouldn't synchronize row indexes\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df1',\n dropna=False)\n self.assertRaises(\n ValueError, store.select_as_multiple, ['df1', 'df2'])\n assert not store.select('df1').index.equals(\n store.select('df2').index)\n\n def test_select_as_multiple(self):\n\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n\n with ensure_clean_store(self.path) as store:\n\n # no tables stored\n self.assertRaises(Exception, store.select_as_multiple,\n None, where=['A>0', 'B>0'], selector='df1')\n\n store.append('df1', df1, data_columns=['A', 'B'])\n store.append('df2', df2)\n\n # exceptions\n self.assertRaises(Exception, store.select_as_multiple,\n None, where=['A>0', 'B>0'], selector='df1')\n self.assertRaises(Exception, store.select_as_multiple,\n [None], where=['A>0', 'B>0'], selector='df1')\n self.assertRaises(KeyError, store.select_as_multiple,\n ['df1', 'df3'], where=['A>0', 'B>0'],\n selector='df1')\n self.assertRaises(KeyError, store.select_as_multiple,\n ['df3'], where=['A>0', 'B>0'], selector='df1')\n self.assertRaises(KeyError, store.select_as_multiple,\n ['df1', 'df2'], where=['A>0', 'B>0'],\n selector='df4')\n\n # default select\n result = store.select('df1', ['A>0', 'B>0'])\n expected = store.select_as_multiple(\n ['df1'], where=['A>0', 'B>0'], selector='df1')\n tm.assert_frame_equal(result, expected)\n expected = store.select_as_multiple(\n 'df1', where=['A>0', 'B>0'], selector='df1')\n tm.assert_frame_equal(result, expected)\n\n # multiple\n result = store.select_as_multiple(\n ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')\n expected = concat([df1, df2], axis=1)\n expected = expected[(expected.A > 0) & (expected.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n # multiple (diff selector)\n result = store.select_as_multiple(['df1', 'df2'], where=[Term(\n 'index>df2.index[4]')], selector='df2')\n expected = concat([df1, df2], axis=1)\n expected = expected[5:]\n tm.assert_frame_equal(result, expected)\n\n # test excpection for diff rows\n store.append('df3', tm.makeTimeDataFrame(nper=50))\n self.assertRaises(ValueError, store.select_as_multiple,\n ['df1', 'df3'], where=['A>0', 'B>0'],\n selector='df1')\n\n def test_nan_selection_bug_4858(self):\n\n # GH 4858; nan selection bug, only works for pytables >= 3.1\n if LooseVersion(tables.__version__) < '3.1.0':\n raise nose.SkipTest('tables version does not support fix for nan '\n 'selection bug: GH 4858')\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(dict(cols=range(6), values=range(6)),\n dtype='float64')\n df['cols'] = (df['cols'] + 10).apply(str)\n df.iloc[0] = np.nan\n\n expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[\n 3., 4., 5.]), index=[3, 4, 5])\n\n # write w/o the index on that particular column\n store.append('df', df, data_columns=True, index=['cols'])\n result = store.select('df', where='values>2.0')\n assert_frame_equal(result, expected)\n\n def test_start_stop_table(self):\n\n with ensure_clean_store(self.path) as store:\n\n # table\n df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))\n store.append('df', df)\n\n result = store.select(\n 'df', [Term(\"columns=['A']\")], start=0, stop=5)\n expected = df.ix[0:4, ['A']]\n tm.assert_frame_equal(result, expected)\n\n # out of range\n result = store.select(\n 'df', [Term(\"columns=['A']\")], start=30, stop=40)\n self.assertTrue(len(result) == 0)\n expected = df.ix[30:40, ['A']]\n tm.assert_frame_equal(result, expected)\n\n def test_start_stop_fixed(self):\n\n with ensure_clean_store(self.path) as store:\n\n # fixed, GH 8287\n df = DataFrame(dict(A=np.random.rand(20),\n B=np.random.rand(20)),\n index=pd.date_range('20130101', periods=20))\n store.put('df', df)\n\n result = store.select(\n 'df', start=0, stop=5)\n expected = df.iloc[0:5, :]\n tm.assert_frame_equal(result, expected)\n\n result = store.select(\n 'df', start=5, stop=10)\n expected = df.iloc[5:10, :]\n tm.assert_frame_equal(result, expected)\n\n # out of range\n result = store.select(\n 'df', start=30, stop=40)\n expected = df.iloc[30:40, :]\n tm.assert_frame_equal(result, expected)\n\n # series\n s = df.A\n store.put('s', s)\n result = store.select('s', start=0, stop=5)\n expected = s.iloc[0:5]\n tm.assert_series_equal(result, expected)\n\n result = store.select('s', start=5, stop=10)\n expected = s.iloc[5:10]\n tm.assert_series_equal(result, expected)\n\n # sparse; not implemented\n df = tm.makeDataFrame()\n df.ix[3:5, 1:3] = np.nan\n df.ix[8:10, -2] = np.nan\n dfs = df.to_sparse()\n store.put('dfs', dfs)\n with self.assertRaises(NotImplementedError):\n store.select('dfs', start=0, stop=5)\n\n def test_select_filter_corner(self):\n\n df = DataFrame(np.random.randn(50, 100))\n df.index = ['%.3d' % c for c in df.index]\n df.columns = ['%.3d' % c for c in df.columns]\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df, format='table')\n\n crit = Term('columns=df.columns[:75]')\n result = store.select('frame', [crit])\n tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])\n\n crit = Term('columns=df.columns[:75:2]')\n result = store.select('frame', [crit])\n tm.assert_frame_equal(result, df.ix[:, df.columns[:75:2]])\n\n def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):\n\n options = {}\n if compression:\n options['complib'] = _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store['obj'] = obj\n retrieved = store['obj']\n comparator(retrieved, obj, **kwargs)\n\n def _check_double_roundtrip(self, obj, comparator, compression=False,\n **kwargs):\n options = {}\n if compression:\n options['complib'] = compression or _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store['obj'] = obj\n retrieved = store['obj']\n comparator(retrieved, obj, **kwargs)\n store['obj'] = retrieved\n again = store['obj']\n comparator(again, obj, **kwargs)\n\n def _check_roundtrip_table(self, obj, comparator, compression=False):\n options = {}\n if compression:\n options['complib'] = _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store.put('obj', obj, format='table')\n retrieved = store['obj']\n # sorted_obj = _test_sort(obj)\n comparator(retrieved, obj)\n\n def test_multiple_open_close(self):\n # GH 4409, open & close multiple times\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path, 'df', mode='w', format='table')\n\n # single\n store = HDFStore(path)\n self.assertNotIn('CLOSED', str(store))\n self.assertTrue(store.is_open)\n store.close()\n self.assertIn('CLOSED', str(store))\n self.assertFalse(store.is_open)\n\n with ensure_clean_path(self.path) as path:\n\n if pytables._table_file_open_policy_is_strict:\n\n # multiples\n store1 = HDFStore(path)\n\n def f():\n HDFStore(path)\n self.assertRaises(ValueError, f)\n store1.close()\n\n else:\n\n # multiples\n store1 = HDFStore(path)\n store2 = HDFStore(path)\n\n self.assertNotIn('CLOSED', str(store1))\n self.assertNotIn('CLOSED', str(store2))\n self.assertTrue(store1.is_open)\n self.assertTrue(store2.is_open)\n\n store1.close()\n self.assertIn('CLOSED', str(store1))\n self.assertFalse(store1.is_open)\n self.assertNotIn('CLOSED', str(store2))\n self.assertTrue(store2.is_open)\n\n store2.close()\n self.assertIn('CLOSED', str(store1))\n self.assertIn('CLOSED', str(store2))\n self.assertFalse(store1.is_open)\n self.assertFalse(store2.is_open)\n\n # nested close\n store = HDFStore(path, mode='w')\n store.append('df', df)\n\n store2 = HDFStore(path)\n store2.append('df2', df)\n store2.close()\n self.assertIn('CLOSED', str(store2))\n self.assertFalse(store2.is_open)\n\n store.close()\n self.assertIn('CLOSED', str(store))\n self.assertFalse(store.is_open)\n\n # double closing\n store = HDFStore(path, mode='w')\n store.append('df', df)\n\n store2 = HDFStore(path)\n store.close()\n self.assertIn('CLOSED', str(store))\n self.assertFalse(store.is_open)\n\n store2.close()\n self.assertIn('CLOSED', str(store2))\n self.assertFalse(store2.is_open)\n\n # ops on a closed store\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path, 'df', mode='w', format='table')\n\n store = HDFStore(path)\n store.close()\n\n self.assertRaises(ClosedFileError, store.keys)\n self.assertRaises(ClosedFileError, lambda: 'df' in store)\n self.assertRaises(ClosedFileError, lambda: len(store))\n self.assertRaises(ClosedFileError, lambda: store['df'])\n self.assertRaises(ClosedFileError, lambda: store.df)\n self.assertRaises(ClosedFileError, store.select, 'df')\n self.assertRaises(ClosedFileError, store.get, 'df')\n self.assertRaises(ClosedFileError, store.append, 'df2', df)\n self.assertRaises(ClosedFileError, store.put, 'df3', df)\n self.assertRaises(ClosedFileError, store.get_storer, 'df2')\n self.assertRaises(ClosedFileError, store.remove, 'df2')\n\n def f():\n store.select('df')\n tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)\n\n def test_pytables_native_read(self):\n\n with ensure_clean_store(\n tm.get_data_path('legacy_hdf/pytables_native.h5'),\n mode='r') as store:\n d2 = store['detector/readout']\n self.assertIsInstance(d2, DataFrame)\n\n def test_pytables_native2_read(self):\n # fails on win/3.5 oddly\n if PY35 and is_platform_windows():\n raise nose.SkipTest(\"native2 read fails oddly on windows / 3.5\")\n\n with ensure_clean_store(\n tm.get_data_path('legacy_hdf/pytables_native2.h5'),\n mode='r') as store:\n str(store)\n d1 = store['detector']\n self.assertIsInstance(d1, DataFrame)\n\n def test_legacy_read(self):\n with ensure_clean_store(\n tm.get_data_path('legacy_hdf/legacy.h5'),\n mode='r') as store:\n store['a']\n store['b']\n store['c']\n store['d']\n\n def test_legacy_table_read(self):\n # legacy table types\n with ensure_clean_store(\n tm.get_data_path('legacy_hdf/legacy_table.h5'),\n mode='r') as store:\n store.select('df1')\n store.select('df2')\n store.select('wp1')\n\n # force the frame\n store.select('df2', typ='legacy_frame')\n\n # old version warning\n with tm.assert_produces_warning(\n expected_warning=IncompatibilityWarning):\n self.assertRaises(\n Exception, store.select, 'wp1', Term('minor_axis=B'))\n\n df2 = store.select('df2')\n result = store.select('df2', Term('index>df2.index[2]'))\n expected = df2[df2.index > df2.index[2]]\n assert_frame_equal(expected, result)\n\n def test_legacy_0_10_read(self):\n # legacy from 0.10\n with compat_assert_produces_warning(FutureWarning):\n path = tm.get_data_path('legacy_hdf/legacy_0.10.h5')\n with ensure_clean_store(path, mode='r') as store:\n str(store)\n for k in store.keys():\n store.select(k)\n\n def test_legacy_0_11_read(self):\n # legacy from 0.11\n path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')\n with ensure_clean_store(tm.get_data_path(path), mode='r') as store:\n str(store)\n assert 'df' in store\n assert 'df1' in store\n assert 'mi' in store\n df = store.select('df')\n df1 = store.select('df1')\n mi = store.select('mi')\n assert isinstance(df, DataFrame)\n assert isinstance(df1, DataFrame)\n assert isinstance(mi, DataFrame)\n\n def test_copy(self):\n\n with compat_assert_produces_warning(FutureWarning):\n\n def do_copy(f=None, new_f=None, keys=None,\n propindexes=True, **kwargs):\n try:\n if f is None:\n f = tm.get_data_path(os.path.join('legacy_hdf',\n 'legacy_0.10.h5'))\n\n store = HDFStore(f, 'r')\n\n if new_f is None:\n import tempfile\n fd, new_f = tempfile.mkstemp()\n\n tstore = store.copy(\n new_f, keys=keys, propindexes=propindexes, **kwargs)\n\n # check keys\n if keys is None:\n keys = store.keys()\n self.assertEqual(set(keys), set(tstore.keys()))\n\n # check indicies & nrows\n for k in tstore.keys():\n if tstore.get_storer(k).is_table:\n new_t = tstore.get_storer(k)\n orig_t = store.get_storer(k)\n\n self.assertEqual(orig_t.nrows, new_t.nrows)\n\n # check propindixes\n if propindexes:\n for a in orig_t.axes:\n if a.is_indexed:\n self.assertTrue(\n new_t[a.name].is_indexed)\n\n finally:\n safe_close(store)\n safe_close(tstore)\n try:\n os.close(fd)\n except:\n pass\n safe_remove(new_f)\n\n do_copy()\n do_copy(keys=['/a', '/b', '/df1_mixed'])\n do_copy(propindexes=False)\n\n # new table\n df = tm.makeDataFrame()\n\n try:\n path = create_tempfile(self.path)\n st = HDFStore(path)\n st.append('df', df, data_columns=['A'])\n st.close()\n do_copy(f=path)\n do_copy(f=path, propindexes=False)\n finally:\n safe_remove(path)\n\n def test_legacy_table_write(self):\n raise nose.SkipTest(\"cannot write legacy tables\")\n\n store = HDFStore(tm.get_data_path(\n 'legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')\n\n df = tm.makeDataFrame()\n wp = tm.makePanel()\n\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo', 'bar'])\n df = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n store.append('mi', df)\n\n df = DataFrame(dict(A='foo', B='bar'), index=lrange(10))\n store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})\n store.append('wp', wp)\n\n store.close()\n\n def test_store_datetime_fractional_secs(self):\n\n with ensure_clean_store(self.path) as store:\n dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)\n series = Series([0], [dt])\n store['a'] = series\n self.assertEqual(store['a'].index[0], dt)\n\n def test_tseries_indices_series(self):\n\n with ensure_clean_store(self.path) as store:\n idx = tm.makeDateIndex(10)\n ser = Series(np.random.randn(len(idx)), idx)\n store['a'] = ser\n result = store['a']\n\n assert_series_equal(result, ser)\n self.assertEqual(type(result.index), type(ser.index))\n self.assertEqual(result.index.freq, ser.index.freq)\n\n idx = tm.makePeriodIndex(10)\n ser = Series(np.random.randn(len(idx)), idx)\n store['a'] = ser\n result = store['a']\n\n assert_series_equal(result, ser)\n self.assertEqual(type(result.index), type(ser.index))\n self.assertEqual(result.index.freq, ser.index.freq)\n\n def test_tseries_indices_frame(self):\n\n with ensure_clean_store(self.path) as store:\n idx = tm.makeDateIndex(10)\n df = DataFrame(np.random.randn(len(idx), 3), index=idx)\n store['a'] = df\n result = store['a']\n\n assert_frame_equal(result, df)\n self.assertEqual(type(result.index), type(df.index))\n self.assertEqual(result.index.freq, df.index.freq)\n\n idx = tm.makePeriodIndex(10)\n df = DataFrame(np.random.randn(len(idx), 3), idx)\n store['a'] = df\n result = store['a']\n\n assert_frame_equal(result, df)\n self.assertEqual(type(result.index), type(df.index))\n self.assertEqual(result.index.freq, df.index.freq)\n\n def test_unicode_index(self):\n\n unicode_values = [u('\\u03c3'), u('\\u03c3\\u03c3')]\n\n with compat_assert_produces_warning(PerformanceWarning):\n s = Series(np.random.randn(len(unicode_values)), unicode_values)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n def test_unicode_longer_encoded(self):\n # GH 11234\n char = '\\u0394'\n df = pd.DataFrame({'A': [char]})\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table', encoding='utf-8')\n result = store.get('df')\n tm.assert_frame_equal(result, df)\n\n df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table', encoding='utf-8')\n result = store.get('df')\n tm.assert_frame_equal(result, df)\n\n def test_store_datetime_mixed(self):\n\n df = DataFrame(\n {'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})\n ts = tm.makeTimeSeries()\n df['d'] = ts.index[:3]\n self._check_roundtrip(df, tm.assert_frame_equal)\n\n # def test_cant_write_multiindex_table(self):\n # # for now, #1848\n # df = DataFrame(np.random.randn(10, 4),\n # index=[np.arange(5).repeat(2),\n # np.tile(np.arange(2), 5)])\n\n # self.assertRaises(Exception, store.put, 'foo', df, format='table')\n\n def test_append_with_diff_col_name_types_raises_value_error(self):\n df = DataFrame(np.random.randn(10, 1))\n df2 = DataFrame({'a': np.random.randn(10)})\n df3 = DataFrame({(1, 2): np.random.randn(10)})\n df4 = DataFrame({('1', 2): np.random.randn(10)})\n df5 = DataFrame({('1', 2, object): np.random.randn(10)})\n\n with ensure_clean_store(self.path) as store:\n name = 'df_%s' % tm.rands(10)\n store.append(name, df)\n\n for d in (df2, df3, df4, df5):\n with tm.assertRaises(ValueError):\n store.append(name, d)\n\n def test_query_with_nested_special_character(self):\n df = DataFrame({'a': ['a', 'a', 'c', 'b',\n 'test & test', 'c', 'b', 'e'],\n 'b': [1, 2, 3, 4, 5, 6, 7, 8]})\n expected = df[df.a == 'test & test']\n with ensure_clean_store(self.path) as store:\n store.append('test', df, format='table', data_columns=True)\n result = store.select('test', 'a = \"test & test\"')\n tm.assert_frame_equal(expected, result)\n\n def test_categorical(self):\n\n with ensure_clean_store(self.path) as store:\n\n # basic\n _maybe_remove(store, 's')\n s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[\n 'a', 'b', 'c', 'd'], ordered=False))\n store.append('s', s, format='table')\n result = store.select('s')\n tm.assert_series_equal(s, result)\n\n _maybe_remove(store, 's_ordered')\n s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[\n 'a', 'b', 'c', 'd'], ordered=True))\n store.append('s_ordered', s, format='table')\n result = store.select('s_ordered')\n tm.assert_series_equal(s, result)\n\n _maybe_remove(store, 'df')\n df = DataFrame({\"s\": s, \"vals\": [1, 2, 3, 4, 5, 6]})\n store.append('df', df, format='table')\n result = store.select('df')\n tm.assert_frame_equal(result, df)\n\n # dtypes\n s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category')\n store.append('si', s)\n result = store.select('si')\n tm.assert_series_equal(result, s)\n\n s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category')\n store.append('si2', s)\n result = store.select('si2')\n tm.assert_series_equal(result, s)\n\n # multiple\n df2 = df.copy()\n df2['s2'] = Series(list('abcdefg')).astype('category')\n store.append('df2', df2)\n result = store.select('df2')\n tm.assert_frame_equal(result, df2)\n\n # make sure the metadata is ok\n self.assertTrue('/df2 ' in str(store))\n self.assertTrue('/df2/meta/values_block_0/meta' in str(store))\n self.assertTrue('/df2/meta/values_block_1/meta' in str(store))\n\n # unordered\n s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[\n 'a', 'b', 'c', 'd'], ordered=False))\n store.append('s2', s, format='table')\n result = store.select('s2')\n tm.assert_series_equal(result, s)\n\n # query\n store.append('df3', df, data_columns=['s'])\n expected = df[df.s.isin(['b', 'c'])]\n result = store.select('df3', where=['s in [\"b\",\"c\"]'])\n tm.assert_frame_equal(result, expected)\n\n expected = df[df.s.isin(['b', 'c'])]\n result = store.select('df3', where=['s = [\"b\",\"c\"]'])\n tm.assert_frame_equal(result, expected)\n\n expected = df[df.s.isin(['d'])]\n result = store.select('df3', where=['s in [\"d\"]'])\n tm.assert_frame_equal(result, expected)\n\n expected = df[df.s.isin(['f'])]\n result = store.select('df3', where=['s in [\"f\"]'])\n tm.assert_frame_equal(result, expected)\n\n # appending with same categories is ok\n store.append('df3', df)\n\n df = concat([df, df])\n expected = df[df.s.isin(['b', 'c'])]\n result = store.select('df3', where=['s in [\"b\",\"c\"]'])\n tm.assert_frame_equal(result, expected)\n\n # appending must have the same categories\n df3 = df.copy()\n df3['s'].cat.remove_unused_categories(inplace=True)\n\n self.assertRaises(ValueError, lambda: store.append('df3', df3))\n\n # remove\n # make sure meta data is removed (its a recursive removal so should\n # be)\n result = store.select('df3/meta/s/meta')\n self.assertIsNotNone(result)\n store.remove('df3')\n self.assertRaises(\n KeyError, lambda: store.select('df3/meta/s/meta'))\n\n def test_categorical_conversion(self):\n\n # GH13322\n # Check that read_hdf with categorical columns doesn't return rows if\n # where criteria isn't met.\n obsids = ['ESP_012345_6789', 'ESP_987654_3210']\n imgids = ['APF00006np', 'APF0001imm']\n data = [4.3, 9.8]\n\n # Test without categories\n df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))\n\n # We are expecting an empty DataFrame matching types of df\n expected = df.iloc[[], :]\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table', data_columns=True)\n result = read_hdf(path, 'df', where='obsids=B')\n tm.assert_frame_equal(result, expected)\n\n # Test with categories\n df.obsids = df.obsids.astype('category')\n df.imgids = df.imgids.astype('category')\n\n # We are expecting an empty DataFrame matching types of df\n expected = df.iloc[[], :]\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table', data_columns=True)\n result = read_hdf(path, 'df', where='obsids=B')\n tm.assert_frame_equal(result, expected)\n\n def test_duplicate_column_name(self):\n df = DataFrame(columns=[\"a\", \"a\"], data=[[0, 0]])\n\n with ensure_clean_path(self.path) as path:\n self.assertRaises(ValueError, df.to_hdf,\n path, 'df', format='fixed')\n\n df.to_hdf(path, 'df', format='table')\n other = read_hdf(path, 'df')\n\n tm.assert_frame_equal(df, other)\n self.assertTrue(df.equals(other))\n self.assertTrue(other.equals(df))\n\n def test_round_trip_equals(self):\n # GH 9330\n df = DataFrame({\"B\": [1, 2], \"A\": [\"x\", \"y\"]})\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table')\n other = read_hdf(path, 'df')\n tm.assert_frame_equal(df, other)\n self.assertTrue(df.equals(other))\n self.assertTrue(other.equals(df))\n\n def test_preserve_timedeltaindex_type(self):\n # GH9635\n # Storing TimedeltaIndexed DataFrames in fixed stores did not preserve\n # the type of the index.\n df = DataFrame(np.random.normal(size=(10, 5)))\n df.index = timedelta_range(\n start='0s', periods=10, freq='1s', name='example')\n\n with ensure_clean_store(self.path) as store:\n\n store['df'] = df\n assert_frame_equal(store['df'], df)\n\n def test_colums_multiindex_modified(self):\n # BUG: 7212\n # read_hdf store.select modified the passed columns parameters\n # when multi-indexed.\n\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n df.index.name = 'letters'\n df = df.set_index(keys='E', append=True)\n\n data_columns = df.index.names + df.columns.tolist()\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df',\n mode='a',\n append=True,\n data_columns=data_columns,\n index=False)\n cols2load = list('BCD')\n cols2load_original = list(cols2load)\n df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa\n self.assertTrue(cols2load_original == cols2load)\n\n def test_to_hdf_with_object_column_names(self):\n # GH9057\n # Writing HDF5 table format should only work for string-like\n # column types\n\n types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex,\n tm.makeDateIndex, tm.makeTimedeltaIndex,\n tm.makePeriodIndex]\n types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex]\n\n if compat.PY3:\n types_should_run.append(tm.makeUnicodeIndex)\n else:\n types_should_fail.append(tm.makeUnicodeIndex)\n\n for index in types_should_fail:\n df = DataFrame(np.random.randn(10, 2), columns=index(2))\n with ensure_clean_path(self.path) as path:\n with self.assertRaises(\n ValueError, msg=(\"cannot have non-object label \"\n \"DataIndexableCol\")):\n df.to_hdf(path, 'df', format='table', data_columns=True)\n\n for index in types_should_run:\n df = DataFrame(np.random.randn(10, 2), columns=index(2))\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table', data_columns=True)\n result = pd.read_hdf(\n path, 'df', where=\"index = [{0}]\".format(df.index[0]))\n assert(len(result))\n\n def test_read_hdf_open_store(self):\n # GH10330\n # No check for non-string path_or-buf, and no test of open store\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n df.index.name = 'letters'\n df = df.set_index(keys='E', append=True)\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='w')\n direct = read_hdf(path, 'df')\n store = HDFStore(path, mode='r')\n indirect = read_hdf(store, 'df')\n tm.assert_frame_equal(direct, indirect)\n self.assertTrue(store.is_open)\n store.close()\n\n def test_read_hdf_iterator(self):\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n df.index.name = 'letters'\n df = df.set_index(keys='E', append=True)\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='w', format='t')\n direct = read_hdf(path, 'df')\n iterator = read_hdf(path, 'df', iterator=True)\n self.assertTrue(isinstance(iterator, TableIterator))\n indirect = next(iterator.__iter__())\n tm.assert_frame_equal(direct, indirect)\n iterator.store.close()\n\n def test_read_hdf_errors(self):\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n with ensure_clean_path(self.path) as path:\n self.assertRaises(IOError, read_hdf, path, 'key')\n df.to_hdf(path, 'df')\n store = HDFStore(path, mode='r')\n store.close()\n self.assertRaises(IOError, read_hdf, store, 'df')\n with open(path, mode='r') as store:\n self.assertRaises(NotImplementedError, read_hdf, store, 'df')\n\n def test_invalid_complib(self):\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n with ensure_clean_path(self.path) as path:\n self.assertRaises(ValueError, df.to_hdf, path,\n 'df', complib='blosc:zlib')\n # GH10443\n\n def test_read_nokey(self):\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n # Categorical dtype not supported for \"fixed\" format. So no need\n # to test with that dtype in the dataframe here.\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='a')\n reread = read_hdf(path)\n assert_frame_equal(df, reread)\n df.to_hdf(path, 'df2', mode='a')\n self.assertRaises(ValueError, read_hdf, path)\n\n def test_read_nokey_table(self):\n # GH13231\n df = DataFrame({'i': range(5),\n 'c': Series(list('abacd'), dtype='category')})\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='a', format='table')\n reread = read_hdf(path)\n assert_frame_equal(df, reread)\n df.to_hdf(path, 'df2', mode='a', format='table')\n self.assertRaises(ValueError, read_hdf, path)\n\n def test_read_nokey_empty(self):\n with ensure_clean_path(self.path) as path:\n store = HDFStore(path)\n store.close()\n self.assertRaises(ValueError, read_hdf, path)\n\n def test_read_from_pathlib_path(self):\n\n # GH11773\n tm._skip_if_no_pathlib()\n\n from pathlib import Path\n\n expected = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n with ensure_clean_path(self.path) as filename:\n path_obj = Path(filename)\n\n expected.to_hdf(path_obj, 'df', mode='a')\n actual = read_hdf(path_obj, 'df')\n\n tm.assert_frame_equal(expected, actual)\n\n def test_read_from_py_localpath(self):\n\n # GH11773\n tm._skip_if_no_localpath()\n\n from py.path import local as LocalPath\n\n expected = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n with ensure_clean_path(self.path) as filename:\n path_obj = LocalPath(filename)\n\n expected.to_hdf(path_obj, 'df', mode='a')\n actual = read_hdf(path_obj, 'df')\n\n tm.assert_frame_equal(expected, actual)\n\n def test_query_long_float_literal(self):\n # GH 14241\n df = pd.DataFrame({'A': [1000000000.0009,\n 1000000000.0011,\n 1000000000.0015]})\n\n with ensure_clean_store(self.path) as store:\n store.append('test', df, format='table', data_columns=True)\n\n cutoff = 1000000000.0006\n result = store.select('test', \"A < %.4f\" % cutoff)\n self.assertTrue(result.empty)\n\n cutoff = 1000000000.0010\n result = store.select('test', \"A > %.4f\" % cutoff)\n expected = df.loc[[1, 2], :]\n tm.assert_frame_equal(expected, result)\n\n exact = 1000000000.0011\n result = store.select('test', 'A == %.4f' % exact)\n expected = df.loc[[1], :]\n tm.assert_frame_equal(expected, result)\n\n\nclass TestHDFComplexValues(Base):\n # GH10447\n\n def test_complex_fixed(self):\n df = DataFrame(np.random.rand(4, 5).astype(np.complex64),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n df = DataFrame(np.random.rand(4, 5).astype(np.complex128),\n index=list('abcd'),\n columns=list('ABCDE'))\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n def test_complex_table(self):\n df = DataFrame(np.random.rand(4, 5).astype(np.complex64),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n df = DataFrame(np.random.rand(4, 5).astype(np.complex128),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table', mode='w')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n def test_complex_mixed_fixed(self):\n complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,\n 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],\n dtype=np.complex128)\n df = DataFrame({'A': [1, 2, 3, 4],\n 'B': ['a', 'b', 'c', 'd'],\n 'C': complex64,\n 'D': complex128,\n 'E': [1.0, 2.0, 3.0, 4.0]},\n index=list('abcd'))\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n def test_complex_mixed_table(self):\n complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,\n 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],\n dtype=np.complex128)\n df = DataFrame({'A': [1, 2, 3, 4],\n 'B': ['a', 'b', 'c', 'd'],\n 'C': complex64,\n 'D': complex128,\n 'E': [1.0, 2.0, 3.0, 4.0]},\n index=list('abcd'))\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df, data_columns=['A', 'B'])\n result = store.select('df', where=Term('A>2'))\n assert_frame_equal(df.loc[df.A > 2], result)\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n def test_complex_across_dimensions_fixed(self):\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])\n s = Series(complex128, index=list('abcd'))\n df = DataFrame({'A': s, 'B': s})\n p = Panel({'One': df, 'Two': df})\n\n objs = [s, df, p]\n comps = [tm.assert_series_equal, tm.assert_frame_equal,\n tm.assert_panel_equal]\n for obj, comp in zip(objs, comps):\n with ensure_clean_path(self.path) as path:\n obj.to_hdf(path, 'obj', format='fixed')\n reread = read_hdf(path, 'obj')\n comp(obj, reread)\n\n def test_complex_across_dimensions(self):\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])\n s = Series(complex128, index=list('abcd'))\n df = DataFrame({'A': s, 'B': s})\n p = Panel({'One': df, 'Two': df})\n\n with compat_assert_produces_warning(FutureWarning):\n p4d = pd.Panel4D({'i': p, 'ii': p})\n\n objs = [df, p, p4d]\n comps = [tm.assert_frame_equal, tm.assert_panel_equal,\n tm.assert_panel4d_equal]\n for obj, comp in zip(objs, comps):\n with ensure_clean_path(self.path) as path:\n obj.to_hdf(path, 'obj', format='table')\n reread = read_hdf(path, 'obj')\n comp(obj, reread)\n\n def test_complex_indexing_error(self):\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],\n dtype=np.complex128)\n df = DataFrame({'A': [1, 2, 3, 4],\n 'B': ['a', 'b', 'c', 'd'],\n 'C': complex128},\n index=list('abcd'))\n with ensure_clean_store(self.path) as store:\n self.assertRaises(TypeError, store.append,\n 'df', df, data_columns=['C'])\n\n def test_complex_series_error(self):\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])\n s = Series(complex128, index=list('abcd'))\n\n with ensure_clean_path(self.path) as path:\n self.assertRaises(TypeError, s.to_hdf, path, 'obj', format='t')\n\n with ensure_clean_path(self.path) as path:\n s.to_hdf(path, 'obj', format='t', index=False)\n reread = read_hdf(path, 'obj')\n tm.assert_series_equal(s, reread)\n\n def test_complex_append(self):\n df = DataFrame({'a': np.random.randn(100).astype(np.complex128),\n 'b': np.random.randn(100)})\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df, data_columns=['b'])\n store.append('df', df)\n result = store.select('df')\n assert_frame_equal(pd.concat([df, df], 0), result)\n\n\nclass TestTimezones(Base, tm.TestCase):\n\n def _compare_with_tz(self, a, b):\n tm.assert_frame_equal(a, b)\n\n # compare the zones on each element\n for c in a.columns:\n for i in a.index:\n a_e = a.loc[i, c]\n b_e = b.loc[i, c]\n if not (a_e == b_e and a_e.tz == b_e.tz):\n raise AssertionError(\n \"invalid tz comparsion [%s] [%s]\" % (a_e, b_e))\n\n def test_append_with_timezones_dateutil(self):\n\n from datetime import timedelta\n tm._skip_if_no_dateutil()\n\n # use maybe_get_tz instead of dateutil.tz.gettz to handle the windows\n # filename issues.\n from pandas.tslib import maybe_get_tz\n gettz = lambda x: maybe_get_tz('dateutil/' + x)\n\n # as columns\n with ensure_clean_store(self.path) as store:\n\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz(\n 'US/Eastern')) + timedelta(hours=1) * i for i in range(5)]))\n\n store.append('df_tz', df, data_columns=['A'])\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n # select with tz aware\n expected = df[df.A >= df.A[3]]\n result = store.select('df_tz', where=Term('A>=df.A[3]'))\n self._compare_with_tz(result, expected)\n\n # ensure we include dates in DST and STD time here.\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A=Timestamp('20130102',\n tz=gettz('US/Eastern')),\n B=Timestamp('20130603',\n tz=gettz('US/Eastern'))),\n index=range(5))\n store.append('df_tz', df)\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n df = DataFrame(dict(A=Timestamp('20130102',\n tz=gettz('US/Eastern')),\n B=Timestamp('20130102', tz=gettz('EET'))),\n index=range(5))\n self.assertRaises(ValueError, store.append, 'df_tz', df)\n\n # this is ok\n _maybe_remove(store, 'df_tz')\n store.append('df_tz', df, data_columns=['A', 'B'])\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n # can't append with diff timezone\n df = DataFrame(dict(A=Timestamp('20130102',\n tz=gettz('US/Eastern')),\n B=Timestamp('20130102', tz=gettz('CET'))),\n index=range(5))\n self.assertRaises(ValueError, store.append, 'df_tz', df)\n\n # as index\n with ensure_clean_store(self.path) as store:\n\n # GH 4098 example\n df = DataFrame(dict(A=Series(lrange(3), index=date_range(\n '2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))\n\n _maybe_remove(store, 'df')\n store.put('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n _maybe_remove(store, 'df')\n store.append('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n def test_append_with_timezones_pytz(self):\n\n from datetime import timedelta\n\n # as columns\n with ensure_clean_store(self.path) as store:\n\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A=[Timestamp('20130102 2:00:00',\n tz='US/Eastern') +\n timedelta(hours=1) * i\n for i in range(5)]))\n store.append('df_tz', df, data_columns=['A'])\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n # select with tz aware\n self._compare_with_tz(store.select(\n 'df_tz', where=Term('A>=df.A[3]')), df[df.A >= df.A[3]])\n\n _maybe_remove(store, 'df_tz')\n # ensure we include dates in DST and STD time here.\n df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),\n B=Timestamp('20130603', tz='US/Eastern')),\n index=range(5))\n store.append('df_tz', df)\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),\n B=Timestamp('20130102', tz='EET')),\n index=range(5))\n self.assertRaises(ValueError, store.append, 'df_tz', df)\n\n # this is ok\n _maybe_remove(store, 'df_tz')\n store.append('df_tz', df, data_columns=['A', 'B'])\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n # can't append with diff timezone\n df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),\n B=Timestamp('20130102', tz='CET')),\n index=range(5))\n self.assertRaises(ValueError, store.append, 'df_tz', df)\n\n # as index\n with ensure_clean_store(self.path) as store:\n\n # GH 4098 example\n df = DataFrame(dict(A=Series(lrange(3), index=date_range(\n '2000-1-1', periods=3, freq='H', tz='US/Eastern'))))\n\n _maybe_remove(store, 'df')\n store.put('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n _maybe_remove(store, 'df')\n store.append('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n def test_tseries_select_index_column(self):\n # GH7777\n # selecting a UTC datetimeindex column did\n # not preserve UTC tzinfo set before storing\n\n # check that no tz still works\n rng = date_range('1/1/2000', '1/30/2000')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store.append('frame', frame)\n result = store.select_column('frame', 'index')\n self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)\n\n # check utc\n rng = date_range('1/1/2000', '1/30/2000', tz='UTC')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store.append('frame', frame)\n result = store.select_column('frame', 'index')\n self.assertEqual(rng.tz, result.dt.tz)\n\n # double check non-utc\n rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store.append('frame', frame)\n result = store.select_column('frame', 'index')\n self.assertEqual(rng.tz, result.dt.tz)\n\n def test_timezones_fixed(self):\n with ensure_clean_store(self.path) as store:\n\n # index\n rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')\n df = DataFrame(np.random.randn(len(rng), 4), index=rng)\n store['df'] = df\n result = store['df']\n assert_frame_equal(result, df)\n\n # as data\n # GH11411\n _maybe_remove(store, 'df')\n df = DataFrame({'A': rng,\n 'B': rng.tz_convert('UTC').tz_localize(None),\n 'C': rng.tz_convert('CET'),\n 'D': range(len(rng))}, index=rng)\n store['df'] = df\n result = store['df']\n assert_frame_equal(result, df)\n\n def test_fixed_offset_tz(self):\n rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store['frame'] = frame\n recons = store['frame']\n self.assert_index_equal(recons.index, rng)\n self.assertEqual(rng.tz, recons.index.tz)\n\n def test_store_timezone(self):\n # GH2852\n # issue storing datetime.date with a timezone as it resets when read\n # back in a new timezone\n\n # original method\n with ensure_clean_store(self.path) as store:\n\n today = datetime.date(2013, 9, 10)\n df = DataFrame([1, 2, 3], index=[today, today, today])\n store['obj1'] = df\n result = store['obj1']\n assert_frame_equal(result, df)\n\n # with tz setting\n with ensure_clean_store(self.path) as store:\n\n with set_timezone('EST5EDT'):\n today = datetime.date(2013, 9, 10)\n df = DataFrame([1, 2, 3], index=[today, today, today])\n store['obj1'] = df\n\n with set_timezone('CST6CDT'):\n result = store['obj1']\n\n assert_frame_equal(result, df)\n\n def test_legacy_datetimetz_object(self):\n # legacy from < 0.17.0\n # 8260\n expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),\n B=Timestamp('20130603', tz='CET')),\n index=range(5))\n with ensure_clean_store(\n tm.get_data_path('legacy_hdf/datetimetz_object.h5'),\n mode='r') as store:\n result = store['df']\n assert_frame_equal(result, expected)\n\n def test_dst_transitions(self):\n # make sure we are not failing on transaitions\n with ensure_clean_store(self.path) as store:\n times = pd.date_range(\"2013-10-26 23:00\", \"2013-10-27 01:00\",\n tz=\"Europe/London\",\n freq=\"H\",\n ambiguous='infer')\n\n for i in [times, times + pd.Timedelta('10min')]:\n _maybe_remove(store, 'df')\n df = DataFrame({'A': range(len(i)), 'B': i}, index=i)\n store.append('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n\ndef _test_sort(obj):\n if isinstance(obj, DataFrame):\n return obj.reindex(sorted(obj.index))\n elif isinstance(obj, Panel):\n return obj.reindex(major=sorted(obj.major_axis))\n else:\n raise ValueError('type not supported here')\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n",
"from __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom numpy.testing import assert_equal, assert_allclose, assert_\nfrom scipy._lib._numpy_compat import suppress_warnings\nfrom pytest import raises as assert_raises\nimport pytest\n\nfrom scipy.interpolate import (BSpline, BPoly, PPoly, make_interp_spline,\n make_lsq_spline, _bspl, splev, splrep, splprep, splder, splantider,\n sproot, splint, insert)\nimport scipy.linalg as sl\n\nfrom scipy.interpolate._bsplines import _not_a_knot, _augknt\nimport scipy.interpolate._fitpack_impl as _impl\nfrom scipy.interpolate._fitpack import _splint\n\n\nclass TestBSpline(object):\n\n def test_ctor(self):\n # knots should be an ordered 1D array of finite real numbers\n assert_raises((TypeError, ValueError), BSpline,\n **dict(t=[1, 1.j], c=[1.], k=0))\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))\n assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))\n assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))\n assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))\n\n # for n+k+1 knots and degree k need at least n coefficients\n assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))\n assert_raises(ValueError, BSpline,\n **dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))\n\n # non-integer orders\n assert_raises(ValueError, BSpline,\n **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=\"cubic\"))\n assert_raises(ValueError, BSpline,\n **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))\n\n # basic interval cannot have measure zero (here: [1..1])\n assert_raises(ValueError, BSpline,\n **dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))\n\n # tck vs self.tck\n n, k = 11, 3\n t = np.arange(n+k+1)\n c = np.random.random(n)\n b = BSpline(t, c, k)\n\n assert_allclose(t, b.t)\n assert_allclose(c, b.c)\n assert_equal(k, b.k)\n\n def test_tck(self):\n b = _make_random_spline()\n tck = b.tck\n\n assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)\n assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)\n assert_equal(b.k, tck[2])\n\n # b.tck is read-only\n try:\n b.tck = 'foo'\n except AttributeError:\n pass\n except:\n raise AssertionError(\"AttributeError not raised.\")\n\n def test_degree_0(self):\n xx = np.linspace(0, 1, 10)\n\n b = BSpline(t=[0, 1], c=[3.], k=0)\n assert_allclose(b(xx), 3)\n\n b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)\n assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))\n\n def test_degree_1(self):\n t = [0, 1, 2, 3, 4]\n c = [1, 2, 3]\n k = 1\n b = BSpline(t, c, k)\n\n x = np.linspace(1, 3, 50)\n assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),\n b(x), atol=1e-14)\n assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)\n\n def test_bernstein(self):\n # a special knot vector: Bernstein polynomials\n k = 3\n t = np.asarray([0]*(k+1) + [1]*(k+1))\n c = np.asarray([1., 2., 3., 4.])\n bp = BPoly(c.reshape(-1, 1), [0, 1])\n bspl = BSpline(t, c, k)\n\n xx = np.linspace(-1., 2., 10)\n assert_allclose(bp(xx, extrapolate=True),\n bspl(xx, extrapolate=True), atol=1e-14)\n assert_allclose(splev(xx, (t, c, k)),\n bspl(xx), atol=1e-14)\n\n def test_rndm_naive_eval(self):\n # test random coefficient spline *on the base interval*,\n # t[k] <= x < t[-k-1]\n b = _make_random_spline()\n t, c, k = b.tck\n xx = np.linspace(t[k], t[-k-1], 50)\n y_b = b(xx)\n\n y_n = [_naive_eval(x, t, c, k) for x in xx]\n assert_allclose(y_b, y_n, atol=1e-14)\n\n y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]\n assert_allclose(y_b, y_n2, atol=1e-14)\n\n def test_rndm_splev(self):\n b = _make_random_spline()\n t, c, k = b.tck\n xx = np.linspace(t[k], t[-k-1], 50)\n assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)\n\n def test_rndm_splrep(self):\n np.random.seed(1234)\n x = np.sort(np.random.random(20))\n y = np.random.random(20)\n\n tck = splrep(x, y)\n b = BSpline(*tck)\n\n t, k = b.t, b.k\n xx = np.linspace(t[k], t[-k-1], 80)\n assert_allclose(b(xx), splev(xx, tck), atol=1e-14)\n\n def test_rndm_unity(self):\n b = _make_random_spline()\n b.c = np.ones_like(b.c)\n xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100)\n assert_allclose(b(xx), 1.)\n\n def test_vectorization(self):\n n, k = 22, 3\n t = np.sort(np.random.random(n))\n c = np.random.random(size=(n, 6, 7))\n b = BSpline(t, c, k)\n tm, tp = t[k], t[-k-1]\n xx = tm + (tp - tm) * np.random.random((3, 4, 5))\n assert_equal(b(xx).shape, (3, 4, 5, 6, 7))\n\n def test_len_c(self):\n # for n+k+1 knots, only first n coefs are used.\n # and BTW this is consistent with FITPACK\n n, k = 33, 3\n t = np.sort(np.random.random(n+k+1))\n c = np.random.random(n)\n\n # pad coefficients with random garbage\n c_pad = np.r_[c, np.random.random(k+1)]\n\n b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)\n\n dt = t[-1] - t[0]\n xx = np.linspace(t[0] - dt, t[-1] + dt, 50)\n assert_allclose(b(xx), b_pad(xx), atol=1e-14)\n assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)\n assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)\n\n def test_endpoints(self):\n # base interval is closed\n b = _make_random_spline()\n t, _, k = b.tck\n tm, tp = t[k], t[-k-1]\n for extrap in (True, False):\n assert_allclose(b([tm, tp], extrap),\n b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9)\n\n def test_continuity(self):\n # assert continuity at internal knots\n b = _make_random_spline()\n t, _, k = b.tck\n assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),\n atol=1e-9)\n\n def test_extrap(self):\n b = _make_random_spline()\n t, c, k = b.tck\n dt = t[-1] - t[0]\n xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50)\n mask = (t[k] < xx) & (xx < t[-k-1])\n\n # extrap has no effect within the base interval\n assert_allclose(b(xx[mask], extrapolate=True),\n b(xx[mask], extrapolate=False))\n\n # extrapolated values agree with FITPACK\n assert_allclose(b(xx, extrapolate=True),\n splev(xx, (t, c, k), ext=0))\n\n def test_default_extrap(self):\n # BSpline defaults to extrapolate=True\n b = _make_random_spline()\n t, _, k = b.tck\n xx = [t[0] - 1, t[-1] + 1]\n yy = b(xx)\n assert_(not np.all(np.isnan(yy)))\n\n def test_periodic_extrap(self):\n np.random.seed(1234)\n t = np.sort(np.random.random(8))\n c = np.random.random(4)\n k = 3\n b = BSpline(t, c, k, extrapolate='periodic')\n n = t.size - (k + 1)\n\n dt = t[-1] - t[0]\n xx = np.linspace(t[k] - dt, t[n] + dt, 50)\n xy = t[k] + (xx - t[k]) % (t[n] - t[k])\n assert_allclose(b(xx), splev(xy, (t, c, k)))\n\n # Direct check\n xx = [-1, 0, 0.5, 1]\n xy = t[k] + (xx - t[k]) % (t[n] - t[k])\n assert_equal(b(xx, extrapolate='periodic'), b(xy, extrapolate=True))\n\n def test_ppoly(self):\n b = _make_random_spline()\n t, c, k = b.tck\n pp = PPoly.from_spline((t, c, k))\n\n xx = np.linspace(t[k], t[-k], 100)\n assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14)\n\n def test_derivative_rndm(self):\n b = _make_random_spline()\n t, c, k = b.tck\n xx = np.linspace(t[0], t[-1], 50)\n xx = np.r_[xx, t]\n\n for der in range(1, k+1):\n yd = splev(xx, (t, c, k), der=der)\n assert_allclose(yd, b(xx, nu=der), atol=1e-14)\n\n # higher derivatives all vanish\n assert_allclose(b(xx, nu=k+1), 0, atol=1e-14)\n\n def test_derivative_jumps(self):\n # example from de Boor, Chap IX, example (24)\n # NB: knots augmented & corresp coefs are zeroed out\n # in agreement with the convention (29)\n k = 2\n t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]\n np.random.seed(1234)\n c = np.r_[0, 0, np.random.random(5), 0, 0]\n b = BSpline(t, c, k)\n\n # b is continuous at x != 6 (triple knot)\n x = np.asarray([1, 3, 4, 6])\n assert_allclose(b(x[x != 6] - 1e-10),\n b(x[x != 6] + 1e-10))\n assert_(not np.allclose(b(6.-1e-10), b(6+1e-10)))\n\n # 1st derivative jumps at double knots, 1 & 6:\n x0 = np.asarray([3, 4])\n assert_allclose(b(x0 - 1e-10, nu=1),\n b(x0 + 1e-10, nu=1))\n x1 = np.asarray([1, 6])\n assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1),\n b(x1 + 1e-10, nu=1))))\n\n # 2nd derivative is not guaranteed to be continuous either\n assert_(not np.all(np.allclose(b(x - 1e-10, nu=2),\n b(x + 1e-10, nu=2))))\n\n def test_basis_element_quadratic(self):\n xx = np.linspace(-1, 4, 20)\n b = BSpline.basis_element(t=[0, 1, 2, 3])\n assert_allclose(b(xx),\n splev(xx, (b.t, b.c, b.k)), atol=1e-14)\n assert_allclose(b(xx),\n B_0123(xx), atol=1e-14)\n\n b = BSpline.basis_element(t=[0, 1, 1, 2])\n xx = np.linspace(0, 2, 10)\n assert_allclose(b(xx),\n np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)\n\n def test_basis_element_rndm(self):\n b = _make_random_spline()\n t, c, k = b.tck\n xx = np.linspace(t[k], t[-k-1], 20)\n assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)\n\n def test_cmplx(self):\n b = _make_random_spline()\n t, c, k = b.tck\n cc = c * (1. + 3.j)\n\n b = BSpline(t, cc, k)\n b_re = BSpline(t, b.c.real, k)\n b_im = BSpline(t, b.c.imag, k)\n\n xx = np.linspace(t[k], t[-k-1], 20)\n assert_allclose(b(xx).real, b_re(xx), atol=1e-14)\n assert_allclose(b(xx).imag, b_im(xx), atol=1e-14)\n\n def test_nan(self):\n # nan in, nan out.\n b = BSpline.basis_element([0, 1, 1, 2])\n assert_(np.isnan(b(np.nan)))\n\n def test_derivative_method(self):\n b = _make_random_spline(k=5)\n t, c, k = b.tck\n b0 = BSpline(t, c, k)\n xx = np.linspace(t[k], t[-k-1], 20)\n for j in range(1, k):\n b = b.derivative()\n assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)\n\n def test_antiderivative_method(self):\n b = _make_random_spline()\n t, c, k = b.tck\n xx = np.linspace(t[k], t[-k-1], 20)\n assert_allclose(b.antiderivative().derivative()(xx),\n b(xx), atol=1e-14, rtol=1e-14)\n\n # repeat with n-D array for c\n c = np.c_[c, c, c]\n c = np.dstack((c, c))\n b = BSpline(t, c, k)\n assert_allclose(b.antiderivative().derivative()(xx),\n b(xx), atol=1e-14, rtol=1e-14)\n\n def test_integral(self):\n b = BSpline.basis_element([0, 1, 2]) # x for x < 1 else 2 - x\n assert_allclose(b.integrate(0, 1), 0.5)\n assert_allclose(b.integrate(1, 0), -1 * 0.5)\n assert_allclose(b.integrate(1, 0), -0.5)\n\n # extrapolate or zeros outside of [0, 2]; default is yes\n assert_allclose(b.integrate(-1, 1), 0)\n assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)\n assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)\n assert_allclose(b.integrate(1, -1, extrapolate=False), -1 * 0.5)\n\n # Test ``_fitpack._splint()``\n t, c, k = b.tck\n assert_allclose(b.integrate(1, -1, extrapolate=False),\n _splint(t, c, k, 1, -1)[0])\n\n # Test ``extrapolate='periodic'``.\n b.extrapolate = 'periodic'\n i = b.antiderivative()\n period_int = i(2) - i(0)\n\n assert_allclose(b.integrate(0, 2), period_int)\n assert_allclose(b.integrate(2, 0), -1 * period_int)\n assert_allclose(b.integrate(-9, -7), period_int)\n assert_allclose(b.integrate(-8, -4), 2 * period_int)\n\n assert_allclose(b.integrate(0.5, 1.5), i(1.5) - i(0.5))\n assert_allclose(b.integrate(1.5, 3), i(1) - i(0) + i(2) - i(1.5))\n assert_allclose(b.integrate(1.5 + 12, 3 + 12),\n i(1) - i(0) + i(2) - i(1.5))\n assert_allclose(b.integrate(1.5, 3 + 12),\n i(1) - i(0) + i(2) - i(1.5) + 6 * period_int)\n\n assert_allclose(b.integrate(0, -1), i(0) - i(1))\n assert_allclose(b.integrate(-9, -10), i(0) - i(1))\n assert_allclose(b.integrate(0, -9), i(1) - i(2) - 4 * period_int)\n\n def test_integrate_ppoly(self):\n # test .integrate method to be consistent with PPoly.integrate\n x = [0, 1, 2, 3, 4]\n b = make_interp_spline(x, x)\n b.extrapolate = 'periodic'\n p = PPoly.from_spline(b)\n\n for x0, x1 in [(-5, 0.5), (0.5, 5), (-4, 13)]:\n assert_allclose(b.integrate(x0, x1),\n p.integrate(x0, x1))\n\n def test_subclassing(self):\n # classmethods should not decay to the base class\n class B(BSpline):\n pass\n\n b = B.basis_element([0, 1, 2, 2])\n assert_equal(b.__class__, B)\n assert_equal(b.derivative().__class__, B)\n assert_equal(b.antiderivative().__class__, B)\n\n def test_axis(self):\n n, k = 22, 3\n t = np.linspace(0, 1, n + k + 1)\n sh0 = [6, 7, 8]\n for axis in range(4):\n sh = sh0[:]\n sh.insert(axis, n) # [22, 6, 7, 8] etc\n c = np.random.random(size=sh)\n b = BSpline(t, c, k, axis=axis)\n assert_equal(b.c.shape,\n [sh[axis],] + sh[:axis] + sh[axis+1:])\n\n xp = np.random.random((3, 4, 5))\n assert_equal(b(xp).shape,\n sh[:axis] + list(xp.shape) + sh[axis+1:])\n\n #0 <= axis < c.ndim\n for ax in [-1, len(sh)+1]:\n assert_raises(ValueError, BSpline, **dict(t=t, c=c, k=k, axis=ax))\n\n # derivative, antiderivative keeps the axis\n for b1 in [BSpline(t, c, k, axis=axis).derivative(),\n BSpline(t, c, k, axis=axis).derivative(2),\n BSpline(t, c, k, axis=axis).antiderivative(),\n BSpline(t, c, k, axis=axis).antiderivative(2)]:\n assert_equal(b1.axis, b.axis)\n\n\ndef test_knots_multiplicity():\n # Take a spline w/ random coefficients, throw in knots of varying\n # multiplicity.\n\n def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14):\n # check evaluations against FITPACK, incl extrapolations\n t, c, k = b.tck\n x = np.unique(t)\n x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1]\n assert_allclose(splev(x, (t, c, k), der), b(x, der),\n atol=atol, rtol=rtol, err_msg='der = %s k = %s' % (der, b.k))\n\n # test loop itself\n # [the index `j` is for interpreting the traceback in case of a failure]\n for k in [1, 2, 3, 4, 5]:\n b = _make_random_spline(k=k)\n for j, b1 in enumerate(_make_multiples(b)):\n check_splev(b1, j)\n for der in range(1, k+1):\n check_splev(b1, j, der, 1e-12, 1e-12)\n\n\n### stolen from @pv, verbatim\ndef _naive_B(x, k, i, t):\n \"\"\"\n Naive way to compute B-spline basis functions. Useful only for testing!\n computes B(x; t[i],..., t[i+k+1])\n \"\"\"\n if k == 0:\n return 1.0 if t[i] <= x < t[i+1] else 0.0\n if t[i+k] == t[i]:\n c1 = 0.0\n else:\n c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t)\n if t[i+k+1] == t[i+1]:\n c2 = 0.0\n else:\n c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t)\n return (c1 + c2)\n\n\n### stolen from @pv, verbatim\ndef _naive_eval(x, t, c, k):\n \"\"\"\n Naive B-spline evaluation. Useful only for testing!\n \"\"\"\n if x == t[k]:\n i = k\n else:\n i = np.searchsorted(t, x) - 1\n assert t[i] <= x <= t[i+1]\n assert i >= k and i < len(t) - k\n return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1))\n\n\ndef _naive_eval_2(x, t, c, k):\n \"\"\"Naive B-spline evaluation, another way.\"\"\"\n n = len(t) - (k+1)\n assert n >= k+1\n assert len(c) >= n\n assert t[k] <= x <= t[n]\n return sum(c[i] * _naive_B(x, k, i, t) for i in range(n))\n\n\ndef _sum_basis_elements(x, t, c, k):\n n = len(t) - (k+1)\n assert n >= k+1\n assert len(c) >= n\n s = 0.\n for i in range(n):\n b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)\n s += c[i] * np.nan_to_num(b) # zero out out-of-bounds elements\n return s\n\n\ndef B_012(x):\n \"\"\" A linear B-spline function B(x | 0, 1, 2).\"\"\"\n x = np.atleast_1d(x)\n return np.piecewise(x, [(x < 0) | (x > 2),\n (x >= 0) & (x < 1),\n (x >= 1) & (x <= 2)],\n [lambda x: 0., lambda x: x, lambda x: 2.-x])\n\n\ndef B_0123(x, der=0):\n \"\"\"A quadratic B-spline function B(x | 0, 1, 2, 3).\"\"\"\n x = np.atleast_1d(x)\n conds = [x < 1, (x > 1) & (x < 2), x > 2]\n if der == 0:\n funcs = [lambda x: x*x/2.,\n lambda x: 3./4 - (x-3./2)**2,\n lambda x: (3.-x)**2 / 2]\n elif der == 2:\n funcs = [lambda x: 1.,\n lambda x: -2.,\n lambda x: 1.]\n else:\n raise ValueError('never be here: der=%s' % der)\n pieces = np.piecewise(x, conds, funcs)\n return pieces\n\n\ndef _make_random_spline(n=35, k=3):\n np.random.seed(123)\n t = np.sort(np.random.random(n+k+1))\n c = np.random.random(n)\n return BSpline.construct_fast(t, c, k)\n\n\ndef _make_multiples(b):\n \"\"\"Increase knot multiplicity.\"\"\"\n c, k = b.c, b.k\n\n t1 = b.t.copy()\n t1[17:19] = t1[17]\n t1[22] = t1[21]\n yield BSpline(t1, c, k)\n\n t1 = b.t.copy()\n t1[:k+1] = t1[0]\n yield BSpline(t1, c, k)\n\n t1 = b.t.copy()\n t1[-k-1:] = t1[-1]\n yield BSpline(t1, c, k)\n\n\nclass TestInterop(object):\n #\n # Test that FITPACK-based spl* functions can deal with BSpline objects\n #\n def setup_method(self):\n xx = np.linspace(0, 4.*np.pi, 41)\n yy = np.cos(xx)\n b = make_interp_spline(xx, yy)\n self.tck = (b.t, b.c, b.k)\n self.xx, self.yy, self.b = xx, yy, b\n\n self.xnew = np.linspace(0, 4.*np.pi, 21)\n\n c2 = np.c_[b.c, b.c, b.c]\n self.c2 = np.dstack((c2, c2))\n self.b2 = BSpline(b.t, self.c2, b.k)\n\n def test_splev(self):\n xnew, b, b2 = self.xnew, self.b, self.b2\n\n # check that splev works with 1D array of coefficients\n # for array and scalar `x`\n assert_allclose(splev(xnew, b),\n b(xnew), atol=1e-15, rtol=1e-15)\n assert_allclose(splev(xnew, b.tck),\n b(xnew), atol=1e-15, rtol=1e-15)\n assert_allclose([splev(x, b) for x in xnew],\n b(xnew), atol=1e-15, rtol=1e-15)\n\n # With n-D coefficients, there's a quirck:\n # splev(x, BSpline) is equivalent to BSpline(x)\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning,\n \"Calling splev.. with BSpline objects with c.ndim > 1 is not recommended.\")\n assert_allclose(splev(xnew, b2), b2(xnew), atol=1e-15, rtol=1e-15)\n\n # However, splev(x, BSpline.tck) needs some transposes. This is because\n # BSpline interpolates along the first axis, while the legacy FITPACK\n # wrapper does list(map(...)) which effectively interpolates along the\n # last axis. Like so:\n sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0)\n cc = b2.c.transpose(sh)\n tck = (b2.t, cc, b2.k)\n assert_allclose(splev(xnew, tck),\n b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)\n\n def test_splrep(self):\n x, y = self.xx, self.yy\n # test that \"new\" splrep is equivalent to _impl.splrep\n tck = splrep(x, y)\n t, c, k = _impl.splrep(x, y)\n assert_allclose(tck[0], t, atol=1e-15)\n assert_allclose(tck[1], c, atol=1e-15)\n assert_equal(tck[2], k)\n\n # also cover the `full_output=True` branch\n tck_f, _, _, _ = splrep(x, y, full_output=True)\n assert_allclose(tck_f[0], t, atol=1e-15)\n assert_allclose(tck_f[1], c, atol=1e-15)\n assert_equal(tck_f[2], k)\n\n # test that the result of splrep roundtrips with splev:\n # evaluate the spline on the original `x` points\n yy = splev(x, tck)\n assert_allclose(y, yy, atol=1e-15)\n\n # ... and also it roundtrips if wrapped in a BSpline\n b = BSpline(*tck)\n assert_allclose(y, b(x), atol=1e-15)\n\n def test_splrep_errors(self):\n # test that both \"old\" and \"new\" splrep raise for an n-D ``y`` array\n # with n > 1\n x, y = self.xx, self.yy\n y2 = np.c_[y, y]\n msg = \"failed in converting 3rd argument `y' of dfitpack.curfit to C/Fortran array\"\n with assert_raises(Exception, message=msg):\n splrep(x, y2)\n with assert_raises(Exception, message=msg):\n _impl.splrep(x, y2)\n\n # input below minimum size\n with assert_raises(TypeError, message=\"m > k must hold\"):\n splrep(x[:3], y[:3])\n with assert_raises(TypeError, message=\"m > k must hold\"):\n _impl.splrep(x[:3], y[:3])\n\n def test_splprep(self):\n x = np.arange(15).reshape((3, 5))\n b, u = splprep(x)\n tck, u1 = _impl.splprep(x)\n\n # test the roundtrip with splev for both \"old\" and \"new\" output\n assert_allclose(u, u1, atol=1e-15)\n assert_allclose(splev(u, b), x, atol=1e-15)\n assert_allclose(splev(u, tck), x, atol=1e-15)\n\n # cover the ``full_output=True`` branch\n (b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True)\n assert_allclose(u, u_f, atol=1e-15)\n assert_allclose(splev(u_f, b_f), x, atol=1e-15)\n\n def test_splprep_errors(self):\n # test that both \"old\" and \"new\" code paths raise for x.ndim > 2\n x = np.arange(3*4*5).reshape((3, 4, 5))\n with assert_raises(ValueError, message=\"too many values to unpack\"):\n splprep(x)\n with assert_raises(ValueError, message=\"too many values to unpack\"):\n _impl.splprep(x)\n\n # input below minimum size\n x = np.linspace(0, 40, num=3)\n with assert_raises(TypeError, message=\"m > k must hold\"):\n splprep([x])\n with assert_raises(TypeError, message=\"m > k must hold\"):\n _impl.splprep([x])\n\n # automatically calculated parameters are non-increasing\n # see gh-7589\n x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266]\n with assert_raises(ValueError, message=\"Invalid inputs\"):\n splprep([x])\n with assert_raises(ValueError, message=\"Invalid inputs\"):\n _impl.splprep([x])\n\n # given non-increasing parameter values u\n x = [1, 3, 2, 4]\n u = [0, 0.3, 0.2, 1]\n with assert_raises(ValueError, message=\"Invalid inputs\"):\n splprep(*[[x], None, u])\n\n def test_sproot(self):\n b, b2 = self.b, self.b2\n roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi\n # sproot accepts a BSpline obj w/ 1D coef array\n assert_allclose(sproot(b), roots, atol=1e-7, rtol=1e-7)\n assert_allclose(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7)\n\n # ... and deals with trailing dimensions if coef array is n-D\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning,\n \"Calling sproot.. with BSpline objects with c.ndim > 1 is not recommended.\")\n r = sproot(b2, mest=50)\n r = np.asarray(r)\n\n assert_equal(r.shape, (3, 2, 4))\n assert_allclose(r - roots, 0, atol=1e-12)\n\n # and legacy behavior is preserved for a tck tuple w/ n-D coef\n c2r = b2.c.transpose(1, 2, 0)\n rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50))\n assert_equal(rr.shape, (3, 2, 4))\n assert_allclose(rr - roots, 0, atol=1e-12)\n\n def test_splint(self):\n # test that splint accepts BSpline objects\n b, b2 = self.b, self.b2\n assert_allclose(splint(0, 1, b), \n splint(0, 1, b.tck), atol=1e-14)\n assert_allclose(splint(0, 1, b),\n b.integrate(0, 1), atol=1e-14)\n\n # ... and deals with n-D arrays of coefficients\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning,\n \"Calling splint.. with BSpline objects with c.ndim > 1 is not recommended.\")\n assert_allclose(splint(0, 1, b2), b2.integrate(0, 1), atol=1e-14)\n\n # and the legacy behavior is preserved for a tck tuple w/ n-D coef\n c2r = b2.c.transpose(1, 2, 0)\n integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k)))\n assert_equal(integr.shape, (3, 2))\n assert_allclose(integr,\n splint(0, 1, b), atol=1e-14)\n\n def test_splder(self):\n for b in [self.b, self.b2]:\n # pad the c array (FITPACK convention)\n ct = len(b.t) - len(b.c)\n if ct > 0:\n b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]\n\n for n in [1, 2, 3]:\n bd = splder(b)\n tck_d = _impl.splder((b.t, b.c, b.k))\n assert_allclose(bd.t, tck_d[0], atol=1e-15)\n assert_allclose(bd.c, tck_d[1], atol=1e-15)\n assert_equal(bd.k, tck_d[2])\n assert_(isinstance(bd, BSpline))\n assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out\n\n def test_splantider(self):\n for b in [self.b, self.b2]:\n # pad the c array (FITPACK convention)\n ct = len(b.t) - len(b.c)\n if ct > 0:\n b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]\n\n for n in [1, 2, 3]:\n bd = splantider(b)\n tck_d = _impl.splantider((b.t, b.c, b.k))\n assert_allclose(bd.t, tck_d[0], atol=1e-15)\n assert_allclose(bd.c, tck_d[1], atol=1e-15)\n assert_equal(bd.k, tck_d[2])\n assert_(isinstance(bd, BSpline))\n assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out\n\n def test_insert(self):\n b, b2, xx = self.b, self.b2, self.xx\n\n j = b.t.size // 2\n tn = 0.5*(b.t[j] + b.t[j+1])\n\n bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))\n assert_allclose(splev(xx, bn),\n splev(xx, tck_n), atol=1e-15)\n assert_(isinstance(bn, BSpline))\n assert_(isinstance(tck_n, tuple)) # back-compat: tck in, tck out\n\n # for n-D array of coefficients, BSpline.c needs to be transposed\n # after that, the results are equivalent.\n sh = tuple(range(b2.c.ndim))\n c_ = b2.c.transpose(sh[1:] + (0,))\n tck_n2 = insert(tn, (b2.t, c_, b2.k))\n\n bn2 = insert(tn, b2)\n\n # need a transpose for comparing the results, cf test_splev\n assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),\n bn2(xx), atol=1e-15)\n assert_(isinstance(bn2, BSpline))\n assert_(isinstance(tck_n2, tuple)) # back-compat: tck in, tck out\n\n\nclass TestInterp(object):\n #\n # Test basic ways of constructing interpolating splines.\n #\n xx = np.linspace(0., 2.*np.pi)\n yy = np.sin(xx)\n\n def test_order_0(self):\n b = make_interp_spline(self.xx, self.yy, k=0)\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n\n def test_linear(self):\n b = make_interp_spline(self.xx, self.yy, k=1)\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n\n def test_not_a_knot(self):\n for k in [3, 5]:\n b = make_interp_spline(self.xx, self.yy, k)\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n\n def test_quadratic_deriv(self):\n der = [(1, 8.)] # order, value: f'(x) = 8.\n\n # derivative at right-hand edge\n b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(None, der))\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n assert_allclose(b(self.xx[-1], 1), der[0][1], atol=1e-14, rtol=1e-14)\n\n # derivative at left-hand edge\n b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(der, None))\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n assert_allclose(b(self.xx[0], 1), der[0][1], atol=1e-14, rtol=1e-14)\n\n def test_cubic_deriv(self):\n k = 3\n\n # first derivatives at left & right edges:\n der_l, der_r = [(1, 3.)], [(1, 4.)]\n b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n assert_allclose([b(self.xx[0], 1), b(self.xx[-1], 1)],\n [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)\n\n # 'natural' cubic spline, zero out 2nd derivatives at the boundaries\n der_l, der_r = [(2, 0)], [(2, 0)]\n b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n\n def test_quintic_derivs(self):\n k, n = 5, 7\n x = np.arange(n).astype(np.float_)\n y = np.sin(x)\n der_l = [(1, -12.), (2, 1)]\n der_r = [(1, 8.), (2, 3.)]\n b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r))\n assert_allclose(b(x), y, atol=1e-14, rtol=1e-14)\n assert_allclose([b(x[0], 1), b(x[0], 2)],\n [val for (nu, val) in der_l])\n assert_allclose([b(x[-1], 1), b(x[-1], 2)],\n [val for (nu, val) in der_r])\n\n @pytest.mark.xfail(reason='unstable')\n def test_cubic_deriv_unstable(self):\n # 1st and 2nd derivative at x[0], no derivative information at x[-1]\n # The problem is not that it fails [who would use this anyway],\n # the problem is that it fails *silently*, and I've no idea\n # how to detect this sort of instability.\n # In this particular case: it's OK for len(t) < 20, goes haywire\n # at larger `len(t)`.\n k = 3\n t = _augknt(self.xx, k)\n\n der_l = [(1, 3.), (2, 4.)]\n b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None))\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n\n def test_knots_not_data_sites(self):\n # Knots need not coincide with the data sites.\n # use a quadratic spline, knots are at data averages,\n # two additional constraints are zero 2nd derivs at edges\n k = 2\n t = np.r_[(self.xx[0],)*(k+1),\n (self.xx[1:] + self.xx[:-1]) / 2.,\n (self.xx[-1],)*(k+1)]\n b = make_interp_spline(self.xx, self.yy, k, t,\n bc_type=([(2, 0)], [(2, 0)]))\n\n assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)\n assert_allclose([b(self.xx[0], 2), b(self.xx[-1], 2)], [0., 0.],\n atol=1e-14)\n\n def test_minimum_points_and_deriv(self):\n # interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and \n # f'(0) = 0, f'(1) = 3.\n k = 3\n x = [0., 1.]\n y = [0., 1.]\n b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)]))\n \n xx = np.linspace(0., 1.)\n yy = xx**3\n assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)\n \n # If one of the derivatives is omitted, the spline definition is \n # incomplete:\n assert_raises(ValueError, make_interp_spline, x, y, k, \n **dict(bc_type=([(1, 0.)], None)))\n\n def test_complex(self):\n k = 3\n xx = self.xx\n yy = self.yy + 1.j*self.yy\n\n # first derivatives at left & right edges:\n der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)]\n b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))\n assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)\n assert_allclose([b(xx[0], 1), b(xx[-1], 1)],\n [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)\n\n # also test zero and first order\n for k in (0, 1):\n b = make_interp_spline(xx, yy, k=k)\n assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)\n\n def test_int_xy(self):\n x = np.arange(10).astype(np.int_)\n y = np.arange(10).astype(np.int_)\n\n # cython chokes on \"buffer type mismatch\" (construction) or\n # \"no matching signature found\" (evaluation)\n for k in (0, 1, 2, 3):\n b = make_interp_spline(x, y, k=k)\n b(x)\n\n def test_sliced_input(self):\n # cython code chokes on non C contiguous arrays\n xx = np.linspace(-1, 1, 100)\n\n x = xx[::5]\n y = xx[::5]\n\n for k in (0, 1, 2, 3):\n make_interp_spline(x, y, k=k)\n\n def test_check_finite(self):\n # check_finite defaults to True; nans and such trigger a ValueError\n x = np.arange(10).astype(float)\n y = x**2\n\n for z in [np.nan, np.inf, -np.inf]:\n y[-1] = z\n assert_raises(ValueError, make_interp_spline, x, y)\n\n @pytest.mark.parametrize('k', [1, 2, 3, 5])\n def test_list_input(self, k):\n # regression test for gh-8714: TypeError for x, y being lists and k=2\n x = list(range(10))\n y = [a**2 for a in x]\n make_interp_spline(x, y, k=k)\n\n def test_multiple_rhs(self):\n yy = np.c_[np.sin(self.xx), np.cos(self.xx)]\n der_l = [(1, [1., 2.])]\n der_r = [(1, [3., 4.])]\n\n b = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))\n assert_allclose(b(self.xx), yy, atol=1e-14, rtol=1e-14)\n assert_allclose(b(self.xx[0], 1), der_l[0][1], atol=1e-14, rtol=1e-14)\n assert_allclose(b(self.xx[-1], 1), der_r[0][1], atol=1e-14, rtol=1e-14)\n\n def test_shapes(self):\n np.random.seed(1234)\n k, n = 3, 22\n x = np.sort(np.random.random(size=n))\n y = np.random.random(size=(n, 5, 6, 7))\n\n b = make_interp_spline(x, y, k)\n assert_equal(b.c.shape, (n, 5, 6, 7))\n\n # now throw in some derivatives\n d_l = [(1, np.random.random((5, 6, 7)))]\n d_r = [(1, np.random.random((5, 6, 7)))]\n b = make_interp_spline(x, y, k, bc_type=(d_l, d_r))\n assert_equal(b.c.shape, (n + k - 1, 5, 6, 7))\n\n def test_string_aliases(self):\n yy = np.sin(self.xx)\n\n # a single string is duplicated\n b1 = make_interp_spline(self.xx, yy, k=3, bc_type='natural')\n b2 = make_interp_spline(self.xx, yy, k=3, bc_type=([(2, 0)], [(2, 0)]))\n assert_allclose(b1.c, b2.c, atol=1e-15)\n\n # two strings are handled\n b1 = make_interp_spline(self.xx, yy, k=3,\n bc_type=('natural', 'clamped'))\n b2 = make_interp_spline(self.xx, yy, k=3,\n bc_type=([(2, 0)], [(1, 0)]))\n assert_allclose(b1.c, b2.c, atol=1e-15)\n\n # one-sided BCs are OK\n b1 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, 'clamped'))\n b2 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, [(1, 0.0)]))\n assert_allclose(b1.c, b2.c, atol=1e-15)\n\n # 'not-a-knot' is equivalent to None\n b1 = make_interp_spline(self.xx, yy, k=3, bc_type='not-a-knot')\n b2 = make_interp_spline(self.xx, yy, k=3, bc_type=None)\n assert_allclose(b1.c, b2.c, atol=1e-15)\n\n # unknown strings do not pass\n with assert_raises(ValueError):\n make_interp_spline(self.xx, yy, k=3, bc_type='typo')\n\n # string aliases are handled for 2D values\n yy = np.c_[np.sin(self.xx), np.cos(self.xx)]\n der_l = [(1, [0., 0.])]\n der_r = [(2, [0., 0.])]\n b2 = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))\n b1 = make_interp_spline(self.xx, yy, k=3,\n bc_type=('clamped', 'natural'))\n assert_allclose(b1.c, b2.c, atol=1e-15)\n\n # ... and for n-D values:\n np.random.seed(1234)\n k, n = 3, 22\n x = np.sort(np.random.random(size=n))\n y = np.random.random(size=(n, 5, 6, 7))\n\n # now throw in some derivatives\n d_l = [(1, np.zeros((5, 6, 7)))]\n d_r = [(1, np.zeros((5, 6, 7)))]\n b1 = make_interp_spline(x, y, k, bc_type=(d_l, d_r))\n b2 = make_interp_spline(x, y, k, bc_type='clamped')\n assert_allclose(b1.c, b2.c, atol=1e-15)\n\n def test_full_matrix(self):\n np.random.seed(1234)\n k, n = 3, 7\n x = np.sort(np.random.random(size=n))\n y = np.random.random(size=n)\n t = _not_a_knot(x, k)\n\n b = make_interp_spline(x, y, k, t)\n cf = make_interp_full_matr(x, y, t, k)\n assert_allclose(b.c, cf, atol=1e-14, rtol=1e-14)\n\n\ndef make_interp_full_matr(x, y, t, k):\n \"\"\"Assemble an spline order k with knots t to interpolate\n y(x) using full matrices.\n Not-a-knot BC only.\n\n This routine is here for testing only (even though it's functional).\n \"\"\"\n assert x.size == y.size\n assert t.size == x.size + k + 1\n n = x.size\n\n A = np.zeros((n, n), dtype=np.float_)\n\n for j in range(n):\n xval = x[j]\n if xval == t[k]:\n left = k\n else:\n left = np.searchsorted(t, xval) - 1\n\n # fill a row\n bb = _bspl.evaluate_all_bspl(t, k, xval, left)\n A[j, left-k:left+1] = bb\n\n c = sl.solve(A, y)\n return c\n\n\n### XXX: 'periodic' interp spline using full matrices\ndef make_interp_per_full_matr(x, y, t, k):\n x, y, t = map(np.asarray, (x, y, t))\n\n n = x.size\n nt = t.size - k - 1\n\n # have `n` conditions for `nt` coefficients; need nt-n derivatives\n assert nt - n == k - 1\n\n # LHS: the collocation matrix + derivatives at edges\n A = np.zeros((nt, nt), dtype=np.float_)\n\n # derivatives at x[0]:\n offset = 0\n\n if x[0] == t[k]:\n left = k\n else:\n left = np.searchsorted(t, x[0]) - 1\n\n if x[-1] == t[k]:\n left2 = k\n else:\n left2 = np.searchsorted(t, x[-1]) - 1\n\n for i in range(k-1):\n bb = _bspl.evaluate_all_bspl(t, k, x[0], left, nu=i+1)\n A[i, left-k:left+1] = bb\n bb = _bspl.evaluate_all_bspl(t, k, x[-1], left2, nu=i+1)\n A[i, left2-k:left2+1] = -bb\n offset += 1\n\n # RHS\n y = np.r_[[0]*(k-1), y]\n\n # collocation matrix\n for j in range(n):\n xval = x[j]\n # find interval\n if xval == t[k]:\n left = k\n else:\n left = np.searchsorted(t, xval) - 1\n\n # fill a row\n bb = _bspl.evaluate_all_bspl(t, k, xval, left)\n A[j + offset, left-k:left+1] = bb\n\n c = sl.solve(A, y)\n return c\n\n\ndef make_lsq_full_matrix(x, y, t, k=3):\n \"\"\"Make the least-square spline, full matrices.\"\"\"\n x, y, t = map(np.asarray, (x, y, t))\n m = x.size\n n = t.size - k - 1\n\n A = np.zeros((m, n), dtype=np.float_)\n\n for j in range(m):\n xval = x[j]\n # find interval\n if xval == t[k]:\n left = k\n else:\n left = np.searchsorted(t, xval) - 1\n\n # fill a row\n bb = _bspl.evaluate_all_bspl(t, k, xval, left)\n A[j, left-k:left+1] = bb\n\n # have observation matrix, can solve the LSQ problem\n B = np.dot(A.T, A)\n Y = np.dot(A.T, y)\n c = sl.solve(B, Y)\n\n return c, (A, Y)\n\n\nclass TestLSQ(object):\n #\n # Test make_lsq_spline\n #\n np.random.seed(1234)\n n, k = 13, 3\n x = np.sort(np.random.random(n))\n y = np.random.random(n)\n t = _augknt(np.linspace(x[0], x[-1], 7), k)\n\n def test_lstsq(self):\n # check LSQ construction vs a full matrix version\n x, y, t, k = self.x, self.y, self.t, self.k\n\n c0, AY = make_lsq_full_matrix(x, y, t, k)\n b = make_lsq_spline(x, y, t, k)\n\n assert_allclose(b.c, c0)\n assert_equal(b.c.shape, (t.size - k - 1,))\n\n # also check against numpy.lstsq\n aa, yy = AY\n c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1)\n assert_allclose(b.c, c1)\n\n def test_weights(self):\n # weights = 1 is same as None\n x, y, t, k = self.x, self.y, self.t, self.k\n w = np.ones_like(x)\n\n b = make_lsq_spline(x, y, t, k)\n b_w = make_lsq_spline(x, y, t, k, w=w)\n\n assert_allclose(b.t, b_w.t, atol=1e-14)\n assert_allclose(b.c, b_w.c, atol=1e-14)\n assert_equal(b.k, b_w.k)\n\n def test_multiple_rhs(self):\n x, t, k, n = self.x, self.t, self.k, self.n\n y = np.random.random(size=(n, 5, 6, 7))\n\n b = make_lsq_spline(x, y, t, k)\n assert_equal(b.c.shape, (t.size-k-1, 5, 6, 7))\n\n def test_complex(self):\n # cmplx-valued `y`\n x, t, k = self.x, self.t, self.k\n yc = self.y * (1. + 2.j)\n\n b = make_lsq_spline(x, yc, t, k)\n b_re = make_lsq_spline(x, yc.real, t, k)\n b_im = make_lsq_spline(x, yc.imag, t, k)\n\n assert_allclose(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)\n\n def test_int_xy(self):\n x = np.arange(10).astype(np.int_)\n y = np.arange(10).astype(np.int_)\n t = _augknt(x, k=1)\n # cython chokes on \"buffer type mismatch\"\n make_lsq_spline(x, y, t, k=1)\n\n def test_sliced_input(self):\n # cython code chokes on non C contiguous arrays\n xx = np.linspace(-1, 1, 100)\n\n x = xx[::3]\n y = xx[::3]\n t = _augknt(x, 1)\n make_lsq_spline(x, y, t, k=1)\n\n def test_checkfinite(self):\n # check_finite defaults to True; nans and such trigger a ValueError\n x = np.arange(12).astype(float)\n y = x**2\n t = _augknt(x, 3)\n\n for z in [np.nan, np.inf, -np.inf]:\n y[-1] = z\n assert_raises(ValueError, make_lsq_spline, x, y, t)\n\n",
"\"\"\"Frechet derivative of the matrix exponential.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport scipy.linalg\n\n__all__ = ['expm_frechet', 'expm_cond']\n\n\ndef expm_frechet(A, E, method=None, compute_expm=True, check_finite=True):\n \"\"\"\n Frechet derivative of the matrix exponential of A in the direction E.\n\n Parameters\n ----------\n A : (N, N) array_like\n Matrix of which to take the matrix exponential.\n E : (N, N) array_like\n Matrix direction in which to take the Frechet derivative.\n method : str, optional\n Choice of algorithm. Should be one of\n\n - `SPS` (default)\n - `blockEnlarge`\n\n compute_expm : bool, optional\n Whether to compute also `expm_A` in addition to `expm_frechet_AE`.\n Default is True.\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n expm_A : ndarray\n Matrix exponential of A.\n expm_frechet_AE : ndarray\n Frechet derivative of the matrix exponential of A in the direction E.\n\n For ``compute_expm = False``, only `expm_frechet_AE` is returned.\n\n See also\n --------\n expm : Compute the exponential of a matrix.\n\n Notes\n -----\n This section describes the available implementations that can be selected\n by the `method` parameter. The default method is *SPS*.\n\n Method *blockEnlarge* is a naive algorithm.\n\n Method *SPS* is Scaling-Pade-Squaring [1]_.\n It is a sophisticated implementation which should take\n only about 3/8 as much time as the naive implementation.\n The asymptotics are the same.\n\n .. versionadded:: 0.13.0\n\n References\n ----------\n .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)\n Computing the Frechet Derivative of the Matrix Exponential,\n with an application to Condition Number Estimation.\n SIAM Journal On Matrix Analysis and Applications.,\n 30 (4). pp. 1639-1657. ISSN 1095-7162\n\n Examples\n --------\n >>> import scipy.linalg\n >>> A = np.random.randn(3, 3)\n >>> E = np.random.randn(3, 3)\n >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)\n >>> expm_A.shape, expm_frechet_AE.shape\n ((3, 3), (3, 3))\n\n >>> import scipy.linalg\n >>> A = np.random.randn(3, 3)\n >>> E = np.random.randn(3, 3)\n >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)\n >>> M = np.zeros((6, 6))\n >>> M[:3, :3] = A; M[:3, 3:] = E; M[3:, 3:] = A\n >>> expm_M = scipy.linalg.expm(M)\n >>> np.allclose(expm_A, expm_M[:3, :3])\n True\n >>> np.allclose(expm_frechet_AE, expm_M[:3, 3:])\n True\n\n \"\"\"\n if check_finite:\n A = np.asarray_chkfinite(A)\n E = np.asarray_chkfinite(E)\n else:\n A = np.asarray(A)\n E = np.asarray(E)\n if A.ndim != 2 or A.shape[0] != A.shape[1]:\n raise ValueError('expected A to be a square matrix')\n if E.ndim != 2 or E.shape[0] != E.shape[1]:\n raise ValueError('expected E to be a square matrix')\n if A.shape != E.shape:\n raise ValueError('expected A and E to be the same shape')\n if method is None:\n method = 'SPS'\n if method == 'SPS':\n expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E)\n elif method == 'blockEnlarge':\n expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E)\n else:\n raise ValueError('Unknown implementation %s' % method)\n if compute_expm:\n return expm_A, expm_frechet_AE\n else:\n return expm_frechet_AE\n\n\ndef expm_frechet_block_enlarge(A, E):\n \"\"\"\n This is a helper function, mostly for testing and profiling.\n Return expm(A), frechet(A, E)\n \"\"\"\n n = A.shape[0]\n M = np.vstack([\n np.hstack([A, E]),\n np.hstack([np.zeros_like(A), A])])\n expm_M = scipy.linalg.expm(M)\n return expm_M[:n, :n], expm_M[:n, n:]\n\n\n\"\"\"\nMaximal values ell_m of ||2**-s A|| such that the backward error bound\ndoes not exceed 2**-53.\n\"\"\"\nell_table_61 = (\n None,\n # 1\n 2.11e-8,\n 3.56e-4,\n 1.08e-2,\n 6.49e-2,\n 2.00e-1,\n 4.37e-1,\n 7.83e-1,\n 1.23e0,\n 1.78e0,\n 2.42e0,\n # 11\n 3.13e0,\n 3.90e0,\n 4.74e0,\n 5.63e0,\n 6.56e0,\n 7.52e0,\n 8.53e0,\n 9.56e0,\n 1.06e1,\n 1.17e1,\n )\n\n\n# The b vectors and U and V are copypasted\n# from scipy.sparse.linalg.matfuncs.py.\n# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3)\n\ndef _diff_pade3(A, E, ident):\n b = (120., 60., 12., 1.)\n A2 = A.dot(A)\n M2 = np.dot(A, E) + np.dot(E, A)\n U = A.dot(b[3]*A2 + b[1]*ident)\n V = b[2]*A2 + b[0]*ident\n Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident)\n Lv = b[2]*M2\n return U, V, Lu, Lv\n\n\ndef _diff_pade5(A, E, ident):\n b = (30240., 15120., 3360., 420., 30., 1.)\n A2 = A.dot(A)\n M2 = np.dot(A, E) + np.dot(E, A)\n A4 = np.dot(A2, A2)\n M4 = np.dot(A2, M2) + np.dot(M2, A2)\n U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[4]*A4 + b[2]*A2 + b[0]*ident\n Lu = (A.dot(b[5]*M4 + b[3]*M2) +\n E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident))\n Lv = b[4]*M4 + b[2]*M2\n return U, V, Lu, Lv\n\n\ndef _diff_pade7(A, E, ident):\n b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)\n A2 = A.dot(A)\n M2 = np.dot(A, E) + np.dot(E, A)\n A4 = np.dot(A2, A2)\n M4 = np.dot(A2, M2) + np.dot(M2, A2)\n A6 = np.dot(A2, A4)\n M6 = np.dot(A4, M2) + np.dot(M4, A2)\n U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) +\n E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))\n Lv = b[6]*M6 + b[4]*M4 + b[2]*M2\n return U, V, Lu, Lv\n\n\ndef _diff_pade9(A, E, ident):\n b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,\n 2162160., 110880., 3960., 90., 1.)\n A2 = A.dot(A)\n M2 = np.dot(A, E) + np.dot(E, A)\n A4 = np.dot(A2, A2)\n M4 = np.dot(A2, M2) + np.dot(M2, A2)\n A6 = np.dot(A2, A4)\n M6 = np.dot(A4, M2) + np.dot(M4, A2)\n A8 = np.dot(A4, A4)\n M8 = np.dot(A4, M4) + np.dot(M4, A4)\n U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) +\n E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))\n Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2\n return U, V, Lu, Lv\n\n\ndef expm_frechet_algo_64(A, E):\n n = A.shape[0]\n s = None\n ident = np.identity(n)\n A_norm_1 = scipy.linalg.norm(A, 1)\n m_pade_pairs = (\n (3, _diff_pade3),\n (5, _diff_pade5),\n (7, _diff_pade7),\n (9, _diff_pade9))\n for m, pade in m_pade_pairs:\n if A_norm_1 <= ell_table_61[m]:\n U, V, Lu, Lv = pade(A, E, ident)\n s = 0\n break\n if s is None:\n # scaling\n s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13]))))\n A = A * 2.0**-s\n E = E * 2.0**-s\n # pade order 13\n A2 = np.dot(A, A)\n M2 = np.dot(A, E) + np.dot(E, A)\n A4 = np.dot(A2, A2)\n M4 = np.dot(A2, M2) + np.dot(M2, A2)\n A6 = np.dot(A2, A4)\n M6 = np.dot(A4, M2) + np.dot(M4, A2)\n b = (64764752532480000., 32382376266240000., 7771770303897600.,\n 1187353796428800., 129060195264000., 10559470521600.,\n 670442572800., 33522128640., 1323241920., 40840800., 960960.,\n 16380., 182., 1.)\n W1 = b[13]*A6 + b[11]*A4 + b[9]*A2\n W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident\n Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2\n Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n W = np.dot(A6, W1) + W2\n U = np.dot(A, W)\n V = np.dot(A6, Z1) + Z2\n Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2\n Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2\n Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2\n Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2\n Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2\n Lu = np.dot(A, Lw) + np.dot(E, W)\n Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2\n # factor once and solve twice\n lu_piv = scipy.linalg.lu_factor(-U + V)\n R = scipy.linalg.lu_solve(lu_piv, U + V)\n L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R))\n # squaring\n for k in range(s):\n L = np.dot(R, L) + np.dot(L, R)\n R = np.dot(R, R)\n return R, L\n\n\ndef vec(M):\n \"\"\"\n Stack columns of M to construct a single vector.\n\n This is somewhat standard notation in linear algebra.\n\n Parameters\n ----------\n M : 2d array_like\n Input matrix\n\n Returns\n -------\n v : 1d ndarray\n Output vector\n\n \"\"\"\n return M.T.ravel()\n\n\ndef expm_frechet_kronform(A, method=None, check_finite=True):\n \"\"\"\n Construct the Kronecker form of the Frechet derivative of expm.\n\n Parameters\n ----------\n A : array_like with shape (N, N)\n Matrix to be expm'd.\n method : str, optional\n Extra keyword to be passed to expm_frechet.\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n K : 2d ndarray with shape (N*N, N*N)\n Kronecker form of the Frechet derivative of the matrix exponential.\n\n Notes\n -----\n This function is used to help compute the condition number\n of the matrix exponential.\n\n See also\n --------\n expm : Compute a matrix exponential.\n expm_frechet : Compute the Frechet derivative of the matrix exponential.\n expm_cond : Compute the relative condition number of the matrix exponential\n in the Frobenius norm.\n\n \"\"\"\n if check_finite:\n A = np.asarray_chkfinite(A)\n else:\n A = np.asarray(A)\n if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n raise ValueError('expected a square matrix')\n\n n = A.shape[0]\n ident = np.identity(n)\n cols = []\n for i in range(n):\n for j in range(n):\n E = np.outer(ident[i], ident[j])\n F = expm_frechet(A, E,\n method=method, compute_expm=False, check_finite=False)\n cols.append(vec(F))\n return np.vstack(cols).T\n\n\ndef expm_cond(A, check_finite=True):\n \"\"\"\n Relative condition number of the matrix exponential in the Frobenius norm.\n\n Parameters\n ----------\n A : 2d array_like\n Square input matrix with shape (N, N).\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n kappa : float\n The relative condition number of the matrix exponential\n in the Frobenius norm\n\n Notes\n -----\n A faster estimate for the condition number in the 1-norm\n has been published but is not yet implemented in scipy.\n\n .. versionadded:: 0.14.0\n\n See also\n --------\n expm : Compute the exponential of a matrix.\n expm_frechet : Compute the Frechet derivative of the matrix exponential.\n\n Examples\n --------\n >>> from scipy.linalg import expm_cond\n >>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]])\n >>> k = expm_cond(A)\n >>> k\n 1.7787805864469866\n\n \"\"\"\n if check_finite:\n A = np.asarray_chkfinite(A)\n else:\n A = np.asarray(A)\n if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n raise ValueError('expected a square matrix')\n\n X = scipy.linalg.expm(A)\n K = expm_frechet_kronform(A, check_finite=False)\n\n # The following norm choices are deliberate.\n # The norms of A and X are Frobenius norms,\n # and the norm of K is the induced 2-norm.\n A_norm = scipy.linalg.norm(A, 'fro')\n X_norm = scipy.linalg.norm(X, 'fro')\n K_norm = scipy.linalg.norm(K, 2)\n\n kappa = (K_norm * A_norm) / X_norm\n return kappa\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nfrom six.moves import tkinter as Tk\n\nimport logging\nimport os.path\nimport sys\n\n# Paint image to Tk photo blitter extension\nimport matplotlib.backends.tkagg as tkagg\n\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nimport matplotlib.backends.windowing as windowing\n\nimport matplotlib\nfrom matplotlib import backend_tools, cbook, rcParams\nfrom matplotlib.backend_bases import (\n _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,\n StatusbarBase, TimerBase, ToolContainerBase, cursors)\nfrom matplotlib.backend_managers import ToolManager\nfrom matplotlib._pylab_helpers import Gcf\nfrom matplotlib.figure import Figure\nfrom matplotlib.widgets import SubplotTool\n\n\n_log = logging.getLogger(__name__)\n\nbackend_version = Tk.TkVersion\n\n# the true dots per inch on the screen; should be display dependent\n# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi\nPIXELS_PER_INCH = 75\n\ncursord = {\n cursors.MOVE: \"fleur\",\n cursors.HAND: \"hand2\",\n cursors.POINTER: \"arrow\",\n cursors.SELECT_REGION: \"tcross\",\n cursors.WAIT: \"watch\",\n }\n\n\ndef raise_msg_to_str(msg):\n \"\"\"msg is a return arg from a raise. Join with new lines\"\"\"\n if not isinstance(msg, six.string_types):\n msg = '\\n'.join(map(str, msg))\n return msg\n\ndef error_msg_tkpaint(msg, parent=None):\n from six.moves import tkinter_messagebox as tkMessageBox\n tkMessageBox.showerror(\"matplotlib\", msg)\n\n\nclass TimerTk(TimerBase):\n '''\n Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.\n\n Attributes\n ----------\n interval : int\n The time between timer events in milliseconds. Default is 1000 ms.\n single_shot : bool\n Boolean flag indicating whether this timer should operate as single\n shot (run once and then stop). Defaults to False.\n callbacks : list\n Stores list of (func, args) tuples that will be called upon timer\n events. This list can be manipulated directly, or the functions\n `add_callback` and `remove_callback` can be used.\n\n '''\n def __init__(self, parent, *args, **kwargs):\n TimerBase.__init__(self, *args, **kwargs)\n self.parent = parent\n self._timer = None\n\n def _timer_start(self):\n self._timer_stop()\n self._timer = self.parent.after(self._interval, self._on_timer)\n\n def _timer_stop(self):\n if self._timer is not None:\n self.parent.after_cancel(self._timer)\n self._timer = None\n\n def _on_timer(self):\n TimerBase._on_timer(self)\n\n # Tk after() is only a single shot, so we need to add code here to\n # reset the timer if we're not operating in single shot mode. However,\n # if _timer is None, this means that _timer_stop has been called; so\n # don't recreate the timer in that case.\n if not self._single and self._timer:\n self._timer = self.parent.after(self._interval, self._on_timer)\n else:\n self._timer = None\n\n\nclass FigureCanvasTk(FigureCanvasBase):\n keyvald = {65507 : 'control',\n 65505 : 'shift',\n 65513 : 'alt',\n 65515 : 'super',\n 65508 : 'control',\n 65506 : 'shift',\n 65514 : 'alt',\n 65361 : 'left',\n 65362 : 'up',\n 65363 : 'right',\n 65364 : 'down',\n 65307 : 'escape',\n 65470 : 'f1',\n 65471 : 'f2',\n 65472 : 'f3',\n 65473 : 'f4',\n 65474 : 'f5',\n 65475 : 'f6',\n 65476 : 'f7',\n 65477 : 'f8',\n 65478 : 'f9',\n 65479 : 'f10',\n 65480 : 'f11',\n 65481 : 'f12',\n 65300 : 'scroll_lock',\n 65299 : 'break',\n 65288 : 'backspace',\n 65293 : 'enter',\n 65379 : 'insert',\n 65535 : 'delete',\n 65360 : 'home',\n 65367 : 'end',\n 65365 : 'pageup',\n 65366 : 'pagedown',\n 65438 : '0',\n 65436 : '1',\n 65433 : '2',\n 65435 : '3',\n 65430 : '4',\n 65437 : '5',\n 65432 : '6',\n 65429 : '7',\n 65431 : '8',\n 65434 : '9',\n 65451 : '+',\n 65453 : '-',\n 65450 : '*',\n 65455 : '/',\n 65439 : 'dec',\n 65421 : 'enter',\n }\n\n _keycode_lookup = {\n 262145: 'control',\n 524320: 'alt',\n 524352: 'alt',\n 1048584: 'super',\n 1048592: 'super',\n 131074: 'shift',\n 131076: 'shift',\n }\n \"\"\"_keycode_lookup is used for badly mapped (i.e. no event.key_sym set)\n keys on apple keyboards.\"\"\"\n\n def __init__(self, figure, master=None, resize_callback=None):\n super(FigureCanvasTk, self).__init__(figure)\n self._idle = True\n self._idle_callback = None\n t1,t2,w,h = self.figure.bbox.bounds\n w, h = int(w), int(h)\n self._tkcanvas = Tk.Canvas(\n master=master, background=\"white\",\n width=w, height=h, borderwidth=0, highlightthickness=0)\n self._tkphoto = Tk.PhotoImage(\n master=self._tkcanvas, width=w, height=h)\n self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)\n self._resize_callback = resize_callback\n self._tkcanvas.bind(\"<Configure>\", self.resize)\n self._tkcanvas.bind(\"<Key>\", self.key_press)\n self._tkcanvas.bind(\"<Motion>\", self.motion_notify_event)\n self._tkcanvas.bind(\"<KeyRelease>\", self.key_release)\n for name in \"<Button-1>\", \"<Button-2>\", \"<Button-3>\":\n self._tkcanvas.bind(name, self.button_press_event)\n for name in \"<Double-Button-1>\", \"<Double-Button-2>\", \"<Double-Button-3>\":\n self._tkcanvas.bind(name, self.button_dblclick_event)\n for name in \"<ButtonRelease-1>\", \"<ButtonRelease-2>\", \"<ButtonRelease-3>\":\n self._tkcanvas.bind(name, self.button_release_event)\n\n # Mouse wheel on Linux generates button 4/5 events\n for name in \"<Button-4>\", \"<Button-5>\":\n self._tkcanvas.bind(name, self.scroll_event)\n # Mouse wheel for windows goes to the window with the focus.\n # Since the canvas won't usually have the focus, bind the\n # event to the window containing the canvas instead.\n # See http://wiki.tcl.tk/3893 (mousewheel) for details\n root = self._tkcanvas.winfo_toplevel()\n root.bind(\"<MouseWheel>\", self.scroll_event_windows, \"+\")\n\n # Can't get destroy events by binding to _tkcanvas. Therefore, bind\n # to the window and filter.\n def filter_destroy(evt):\n if evt.widget is self._tkcanvas:\n self._master.update_idletasks()\n self.close_event()\n root.bind(\"<Destroy>\", filter_destroy, \"+\")\n\n self._master = master\n self._tkcanvas.focus_set()\n\n def resize(self, event):\n width, height = event.width, event.height\n if self._resize_callback is not None:\n self._resize_callback(event)\n\n # compute desired figure size in inches\n dpival = self.figure.dpi\n winch = width/dpival\n hinch = height/dpival\n self.figure.set_size_inches(winch, hinch, forward=False)\n\n\n self._tkcanvas.delete(self._tkphoto)\n self._tkphoto = Tk.PhotoImage(\n master=self._tkcanvas, width=int(width), height=int(height))\n self._tkcanvas.create_image(int(width/2),int(height/2),image=self._tkphoto)\n self.resize_event()\n self.draw()\n\n # a resizing will in general move the pointer position\n # relative to the canvas, so process it as a motion notify\n # event. An intended side effect of this call is to allow\n # window raises (which trigger a resize) to get the cursor\n # position to the mpl event framework so key presses which are\n # over the axes will work w/o clicks or explicit motion\n self._update_pointer_position(event)\n\n def _update_pointer_position(self, guiEvent=None):\n \"\"\"\n Figure out if we are inside the canvas or not and update the\n canvas enter/leave events\n \"\"\"\n # if the pointer if over the canvas, set the lastx and lasty\n # attrs of the canvas so it can process event w/o mouse click\n # or move\n\n # the window's upper, left coords in screen coords\n xw = self._tkcanvas.winfo_rootx()\n yw = self._tkcanvas.winfo_rooty()\n # the pointer's location in screen coords\n xp, yp = self._tkcanvas.winfo_pointerxy()\n\n # not figure out the canvas coordinates of the pointer\n xc = xp - xw\n yc = yp - yw\n\n # flip top/bottom\n yc = self.figure.bbox.height - yc\n\n # JDH: this method was written originally to get the pointer\n # location to the backend lastx and lasty attrs so that events\n # like KeyEvent can be handled without mouse events. e.g., if\n # the cursor is already above the axes, then key presses like\n # 'g' should toggle the grid. In order for this to work in\n # backend_bases, the canvas needs to know _lastx and _lasty.\n # There are three ways to get this info the canvas:\n #\n # 1) set it explicitly\n #\n # 2) call enter/leave events explicitly. The downside of this\n # in the impl below is that enter could be repeatedly\n # triggered if the mouse is over the axes and one is\n # resizing with the keyboard. This is not entirely bad,\n # because the mouse position relative to the canvas is\n # changing, but it may be surprising to get repeated entries\n # without leaves\n #\n # 3) process it as a motion notify event. This also has pros\n # and cons. The mouse is moving relative to the window, but\n # this may surpise an event handler writer who is getting\n # motion_notify_events even if the mouse has not moved\n\n # here are the three scenarios\n if 1:\n # just manually set it\n self._lastx, self._lasty = xc, yc\n elif 0:\n # alternate implementation: process it as a motion\n FigureCanvasBase.motion_notify_event(self, xc, yc, guiEvent)\n elif 0:\n # alternate implementation -- process enter/leave events\n # instead of motion/notify\n if self.figure.bbox.contains(xc, yc):\n self.enter_notify_event(guiEvent, xy=(xc,yc))\n else:\n self.leave_notify_event(guiEvent)\n\n show = cbook.deprecated(\"2.2\", name=\"FigureCanvasTk.show\",\n alternative=\"FigureCanvasTk.draw\")(\n lambda self: self.draw())\n\n def draw_idle(self):\n 'update drawing area only if idle'\n if self._idle is False:\n return\n\n self._idle = False\n\n def idle_draw(*args):\n try:\n self.draw()\n finally:\n self._idle = True\n\n self._idle_callback = self._tkcanvas.after_idle(idle_draw)\n\n def get_tk_widget(self):\n \"\"\"returns the Tk widget used to implement FigureCanvasTkAgg.\n Although the initial implementation uses a Tk canvas, this routine\n is intended to hide that fact.\n \"\"\"\n return self._tkcanvas\n\n def motion_notify_event(self, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y\n FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)\n\n\n def button_press_event(self, event, dblclick=False):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y\n num = getattr(event, 'num', None)\n\n if sys.platform=='darwin':\n # 2 and 3 were reversed on the OSX platform I\n # tested under tkagg\n if num==2: num=3\n elif num==3: num=2\n\n FigureCanvasBase.button_press_event(self, x, y, num, dblclick=dblclick, guiEvent=event)\n\n def button_dblclick_event(self,event):\n self.button_press_event(event,dblclick=True)\n\n def button_release_event(self, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y\n\n num = getattr(event, 'num', None)\n\n if sys.platform=='darwin':\n # 2 and 3 were reversed on the OSX platform I\n # tested under tkagg\n if num==2: num=3\n elif num==3: num=2\n\n FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)\n\n def scroll_event(self, event):\n x = event.x\n y = self.figure.bbox.height - event.y\n num = getattr(event, 'num', None)\n if num==4: step = +1\n elif num==5: step = -1\n else: step = 0\n\n FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)\n\n def scroll_event_windows(self, event):\n \"\"\"MouseWheel event processor\"\"\"\n # need to find the window that contains the mouse\n w = event.widget.winfo_containing(event.x_root, event.y_root)\n if w == self._tkcanvas:\n x = event.x_root - w.winfo_rootx()\n y = event.y_root - w.winfo_rooty()\n y = self.figure.bbox.height - y\n step = event.delta/120.\n FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)\n\n def _get_key(self, event):\n val = event.keysym_num\n if val in self.keyvald:\n key = self.keyvald[val]\n elif val == 0 and sys.platform == 'darwin' and \\\n event.keycode in self._keycode_lookup:\n key = self._keycode_lookup[event.keycode]\n elif val < 256:\n key = chr(val)\n else:\n key = None\n\n # add modifier keys to the key string. Bit details originate from\n # http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm\n # BIT_SHIFT = 0x001; BIT_CAPSLOCK = 0x002; BIT_CONTROL = 0x004;\n # BIT_LEFT_ALT = 0x008; BIT_NUMLOCK = 0x010; BIT_RIGHT_ALT = 0x080;\n # BIT_MB_1 = 0x100; BIT_MB_2 = 0x200; BIT_MB_3 = 0x400;\n # In general, the modifier key is excluded from the modifier flag,\n # however this is not the case on \"darwin\", so double check that\n # we aren't adding repeat modifier flags to a modifier key.\n if sys.platform == 'win32':\n modifiers = [(17, 'alt', 'alt'),\n (2, 'ctrl', 'control'),\n ]\n elif sys.platform == 'darwin':\n modifiers = [(3, 'super', 'super'),\n (4, 'alt', 'alt'),\n (2, 'ctrl', 'control'),\n ]\n else:\n modifiers = [(6, 'super', 'super'),\n (3, 'alt', 'alt'),\n (2, 'ctrl', 'control'),\n ]\n\n if key is not None:\n # note, shift is not added to the keys as this is already accounted for\n for bitmask, prefix, key_name in modifiers:\n if event.state & (1 << bitmask) and key_name not in key:\n key = '{0}+{1}'.format(prefix, key)\n\n return key\n\n def key_press(self, event):\n key = self._get_key(event)\n FigureCanvasBase.key_press_event(self, key, guiEvent=event)\n\n def key_release(self, event):\n key = self._get_key(event)\n FigureCanvasBase.key_release_event(self, key, guiEvent=event)\n\n def new_timer(self, *args, **kwargs):\n \"\"\"\n Creates a new backend-specific subclass of :class:`backend_bases.Timer`.\n This is useful for getting periodic events through the backend's native\n event loop. Implemented only for backends with GUIs.\n\n Other Parameters\n ----------------\n interval : scalar\n Timer interval in milliseconds\n callbacks : list\n Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``\n will be executed by the timer every *interval*.\n\n \"\"\"\n return TimerTk(self._tkcanvas, *args, **kwargs)\n\n def flush_events(self):\n self._master.update()\n\n\nclass FigureManagerTk(FigureManagerBase):\n \"\"\"\n Attributes\n ----------\n canvas : `FigureCanvas`\n The FigureCanvas instance\n num : int or str\n The Figure number\n toolbar : tk.Toolbar\n The tk.Toolbar\n window : tk.Window\n The tk.Window\n\n \"\"\"\n def __init__(self, canvas, num, window):\n FigureManagerBase.__init__(self, canvas, num)\n self.window = window\n self.window.withdraw()\n self.set_window_title(\"Figure %d\" % num)\n self.canvas = canvas\n self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n self._num = num\n\n self.toolmanager = self._get_toolmanager()\n self.toolbar = self._get_toolbar()\n self.statusbar = None\n\n if self.toolmanager:\n backend_tools.add_tools_to_manager(self.toolmanager)\n if self.toolbar:\n backend_tools.add_tools_to_container(self.toolbar)\n self.statusbar = StatusbarTk(self.window, self.toolmanager)\n\n self._shown = False\n\n def notify_axes_change(fig):\n 'this will be called whenever the current axes is changed'\n if self.toolmanager is not None:\n pass\n elif self.toolbar is not None:\n self.toolbar.update()\n self.canvas.figure.add_axobserver(notify_axes_change)\n\n def _get_toolbar(self):\n if matplotlib.rcParams['toolbar'] == 'toolbar2':\n toolbar = NavigationToolbar2Tk(self.canvas, self.window)\n elif matplotlib.rcParams['toolbar'] == 'toolmanager':\n toolbar = ToolbarTk(self.toolmanager, self.window)\n else:\n toolbar = None\n return toolbar\n\n def _get_toolmanager(self):\n if rcParams['toolbar'] == 'toolmanager':\n toolmanager = ToolManager(self.canvas.figure)\n else:\n toolmanager = None\n return toolmanager\n\n def resize(self, width, height=None):\n # before 09-12-22, the resize method takes a single *event*\n # parameter. On the other hand, the resize method of other\n # FigureManager class takes *width* and *height* parameter,\n # which is used to change the size of the window. For the\n # Figure.set_size_inches with forward=True work with Tk\n # backend, I changed the function signature but tried to keep\n # it backward compatible. -JJL\n\n # when a single parameter is given, consider it as a event\n if height is None:\n cbook.warn_deprecated(\"2.2\", \"FigureManagerTkAgg.resize now takes \"\n \"width and height as separate arguments\")\n width = width.width\n else:\n self.canvas._tkcanvas.master.geometry(\"%dx%d\" % (width, height))\n\n if self.toolbar is not None:\n self.toolbar.configure(width=width)\n\n def show(self):\n \"\"\"\n this function doesn't segfault but causes the\n PyEval_RestoreThread: NULL state bug on win32\n \"\"\"\n _focus = windowing.FocusManager()\n if not self._shown:\n def destroy(*args):\n self.window = None\n Gcf.destroy(self._num)\n self.canvas._tkcanvas.bind(\"<Destroy>\", destroy)\n self.window.deiconify()\n else:\n self.canvas.draw_idle()\n # Raise the new window.\n self.canvas.manager.window.attributes('-topmost', 1)\n self.canvas.manager.window.attributes('-topmost', 0)\n self._shown = True\n\n def destroy(self, *args):\n if self.window is not None:\n #self.toolbar.destroy()\n if self.canvas._idle_callback:\n self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)\n self.window.destroy()\n if Gcf.get_num_fig_managers()==0:\n if self.window is not None:\n self.window.quit()\n self.window = None\n\n def get_window_title(self):\n return self.window.wm_title()\n\n def set_window_title(self, title):\n self.window.wm_title(title)\n\n def full_screen_toggle(self):\n is_fullscreen = bool(self.window.attributes('-fullscreen'))\n self.window.attributes('-fullscreen', not is_fullscreen)\n\n\[email protected](\"2.2\")\nclass AxisMenu(object):\n def __init__(self, master, naxes):\n self._master = master\n self._naxes = naxes\n self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)\n self._mbar.pack(side=Tk.LEFT)\n self._mbutton = Tk.Menubutton(\n master=self._mbar, text=\"Axes\", underline=0)\n self._mbutton.pack(side=Tk.LEFT, padx=\"2m\")\n self._mbutton.menu = Tk.Menu(self._mbutton)\n self._mbutton.menu.add_command(\n label=\"Select All\", command=self.select_all)\n self._mbutton.menu.add_command(\n label=\"Invert All\", command=self.invert_all)\n self._axis_var = []\n self._checkbutton = []\n for i in range(naxes):\n self._axis_var.append(Tk.IntVar())\n self._axis_var[i].set(1)\n self._checkbutton.append(self._mbutton.menu.add_checkbutton(\n label = \"Axis %d\" % (i+1),\n variable=self._axis_var[i],\n command=self.set_active))\n self._mbutton.menu.invoke(self._mbutton.menu.index(\"Select All\"))\n self._mbutton['menu'] = self._mbutton.menu\n self._mbar.tk_menuBar(self._mbutton)\n self.set_active()\n\n def adjust(self, naxes):\n if self._naxes < naxes:\n for i in range(self._naxes, naxes):\n self._axis_var.append(Tk.IntVar())\n self._axis_var[i].set(1)\n self._checkbutton.append( self._mbutton.menu.add_checkbutton(\n label = \"Axis %d\" % (i+1),\n variable=self._axis_var[i],\n command=self.set_active))\n elif self._naxes > naxes:\n for i in range(self._naxes-1, naxes-1, -1):\n del self._axis_var[i]\n self._mbutton.menu.forget(self._checkbutton[i])\n del self._checkbutton[i]\n self._naxes = naxes\n self.set_active()\n\n def get_indices(self):\n a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]\n return a\n\n def set_active(self):\n self._master.set_active(self.get_indices())\n\n def invert_all(self):\n for a in self._axis_var:\n a.set(not a.get())\n self.set_active()\n\n def select_all(self):\n for a in self._axis_var:\n a.set(1)\n self.set_active()\n\n\nclass NavigationToolbar2Tk(NavigationToolbar2, Tk.Frame):\n \"\"\"\n Attributes\n ----------\n canvas : `FigureCanvas`\n the figure canvas on which to operate\n win : tk.Window\n the tk.Window which owns this toolbar\n\n \"\"\"\n def __init__(self, canvas, window):\n self.canvas = canvas\n self.window = window\n NavigationToolbar2.__init__(self, canvas)\n\n def destroy(self, *args):\n del self.message\n Tk.Frame.destroy(self, *args)\n\n def set_message(self, s):\n self.message.set(s)\n\n def draw_rubberband(self, event, x0, y0, x1, y1):\n height = self.canvas.figure.bbox.height\n y0 = height - y0\n y1 = height - y1\n if hasattr(self, \"lastrect\"):\n self.canvas._tkcanvas.delete(self.lastrect)\n self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)\n\n #self.canvas.draw()\n\n def release(self, event):\n try: self.lastrect\n except AttributeError: pass\n else:\n self.canvas._tkcanvas.delete(self.lastrect)\n del self.lastrect\n\n def set_cursor(self, cursor):\n self.window.configure(cursor=cursord[cursor])\n self.window.update_idletasks()\n\n def _Button(self, text, file, command, extension='.gif'):\n img_file = os.path.join(\n rcParams['datapath'], 'images', file + extension)\n im = Tk.PhotoImage(master=self, file=img_file)\n b = Tk.Button(\n master=self, text=text, padx=2, pady=2, image=im, command=command)\n b._ntimage = im\n b.pack(side=Tk.LEFT)\n return b\n\n def _Spacer(self):\n # Buttons are 30px high, so make this 26px tall with padding to center it\n s = Tk.Frame(\n master=self, height=26, relief=Tk.RIDGE, pady=2, bg=\"DarkGray\")\n s.pack(side=Tk.LEFT, padx=5)\n return s\n\n def _init_toolbar(self):\n xmin, xmax = self.canvas.figure.bbox.intervalx\n height, width = 50, xmax-xmin\n Tk.Frame.__init__(self, master=self.window,\n width=int(width), height=int(height),\n borderwidth=2)\n\n self.update() # Make axes menu\n\n for text, tooltip_text, image_file, callback in self.toolitems:\n if text is None:\n # Add a spacer; return value is unused.\n self._Spacer()\n else:\n button = self._Button(text=text, file=image_file,\n command=getattr(self, callback))\n if tooltip_text is not None:\n ToolTip.createToolTip(button, tooltip_text)\n\n self.message = Tk.StringVar(master=self)\n self._message_label = Tk.Label(master=self, textvariable=self.message)\n self._message_label.pack(side=Tk.RIGHT)\n self.pack(side=Tk.BOTTOM, fill=Tk.X)\n\n def configure_subplots(self):\n toolfig = Figure(figsize=(6,3))\n window = Tk.Toplevel()\n canvas = type(self.canvas)(toolfig, master=window)\n toolfig.subplots_adjust(top=0.9)\n canvas.tool = SubplotTool(self.canvas.figure, toolfig)\n canvas.draw()\n canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n window.grab_set()\n\n def save_figure(self, *args):\n from six.moves import tkinter_tkfiledialog, tkinter_messagebox\n filetypes = self.canvas.get_supported_filetypes().copy()\n default_filetype = self.canvas.get_default_filetype()\n\n # Tk doesn't provide a way to choose a default filetype,\n # so we just have to put it first\n default_filetype_name = filetypes.pop(default_filetype)\n sorted_filetypes = ([(default_filetype, default_filetype_name)]\n + sorted(six.iteritems(filetypes)))\n tk_filetypes = [(name, '*.%s' % ext) for ext, name in sorted_filetypes]\n\n # adding a default extension seems to break the\n # asksaveasfilename dialog when you choose various save types\n # from the dropdown. Passing in the empty string seems to\n # work - JDH!\n #defaultextension = self.canvas.get_default_filetype()\n defaultextension = ''\n initialdir = os.path.expanduser(rcParams['savefig.directory'])\n initialfile = self.canvas.get_default_filename()\n fname = tkinter_tkfiledialog.asksaveasfilename(\n master=self.window,\n title='Save the figure',\n filetypes=tk_filetypes,\n defaultextension=defaultextension,\n initialdir=initialdir,\n initialfile=initialfile,\n )\n\n if fname in [\"\", ()]:\n return\n # Save dir for next time, unless empty str (i.e., use cwd).\n if initialdir != \"\":\n rcParams['savefig.directory'] = (\n os.path.dirname(six.text_type(fname)))\n try:\n # This method will handle the delegation to the correct type\n self.canvas.figure.savefig(fname)\n except Exception as e:\n tkinter_messagebox.showerror(\"Error saving file\", str(e))\n\n def set_active(self, ind):\n self._ind = ind\n self._active = [self._axes[i] for i in self._ind]\n\n def update(self):\n _focus = windowing.FocusManager()\n self._axes = self.canvas.figure.axes\n NavigationToolbar2.update(self)\n\n\nclass ToolTip(object):\n \"\"\"\n Tooltip recipe from\n http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387\n \"\"\"\n @staticmethod\n def createToolTip(widget, text):\n toolTip = ToolTip(widget)\n def enter(event):\n toolTip.showtip(text)\n def leave(event):\n toolTip.hidetip()\n widget.bind('<Enter>', enter)\n widget.bind('<Leave>', leave)\n\n def __init__(self, widget):\n self.widget = widget\n self.tipwindow = None\n self.id = None\n self.x = self.y = 0\n\n def showtip(self, text):\n \"Display text in tooltip window\"\n self.text = text\n if self.tipwindow or not self.text:\n return\n x, y, _, _ = self.widget.bbox(\"insert\")\n x = x + self.widget.winfo_rootx() + 27\n y = y + self.widget.winfo_rooty()\n self.tipwindow = tw = Tk.Toplevel(self.widget)\n tw.wm_overrideredirect(1)\n tw.wm_geometry(\"+%d+%d\" % (x, y))\n try:\n # For Mac OS\n tw.tk.call(\"::tk::unsupported::MacWindowStyle\",\n \"style\", tw._w,\n \"help\", \"noActivates\")\n except Tk.TclError:\n pass\n label = Tk.Label(tw, text=self.text, justify=Tk.LEFT,\n background=\"#ffffe0\", relief=Tk.SOLID, borderwidth=1)\n label.pack(ipadx=1)\n\n def hidetip(self):\n tw = self.tipwindow\n self.tipwindow = None\n if tw:\n tw.destroy()\n\n\nclass RubberbandTk(backend_tools.RubberbandBase):\n def __init__(self, *args, **kwargs):\n backend_tools.RubberbandBase.__init__(self, *args, **kwargs)\n\n def draw_rubberband(self, x0, y0, x1, y1):\n height = self.figure.canvas.figure.bbox.height\n y0 = height - y0\n y1 = height - y1\n if hasattr(self, \"lastrect\"):\n self.figure.canvas._tkcanvas.delete(self.lastrect)\n self.lastrect = self.figure.canvas._tkcanvas.create_rectangle(\n x0, y0, x1, y1)\n\n def remove_rubberband(self):\n if hasattr(self, \"lastrect\"):\n self.figure.canvas._tkcanvas.delete(self.lastrect)\n del self.lastrect\n\n\nclass SetCursorTk(backend_tools.SetCursorBase):\n def set_cursor(self, cursor):\n self.figure.canvas.manager.window.configure(cursor=cursord[cursor])\n\n\nclass ToolbarTk(ToolContainerBase, Tk.Frame):\n _icon_extension = '.gif'\n def __init__(self, toolmanager, window):\n ToolContainerBase.__init__(self, toolmanager)\n xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx\n height, width = 50, xmax - xmin\n Tk.Frame.__init__(self, master=window,\n width=int(width), height=int(height),\n borderwidth=2)\n self._toolitems = {}\n self.pack(side=Tk.TOP, fill=Tk.X)\n self._groups = {}\n\n def add_toolitem(\n self, name, group, position, image_file, description, toggle):\n frame = self._get_groupframe(group)\n button = self._Button(name, image_file, toggle, frame)\n if description is not None:\n ToolTip.createToolTip(button, description)\n self._toolitems.setdefault(name, [])\n self._toolitems[name].append(button)\n\n def _get_groupframe(self, group):\n if group not in self._groups:\n if self._groups:\n self._add_separator()\n frame = Tk.Frame(master=self, borderwidth=0)\n frame.pack(side=Tk.LEFT, fill=Tk.Y)\n self._groups[group] = frame\n return self._groups[group]\n\n def _add_separator(self):\n separator = Tk.Frame(master=self, bd=5, width=1, bg='black')\n separator.pack(side=Tk.LEFT, fill=Tk.Y, padx=2)\n\n def _Button(self, text, image_file, toggle, frame):\n if image_file is not None:\n im = Tk.PhotoImage(master=self, file=image_file)\n else:\n im = None\n\n if not toggle:\n b = Tk.Button(master=frame, text=text, padx=2, pady=2, image=im,\n command=lambda: self._button_click(text))\n else:\n # There is a bug in tkinter included in some python 3.6 versions\n # that without this variable, produces a \"visual\" toggling of\n # other near checkbuttons\n # https://bugs.python.org/issue29402\n # https://bugs.python.org/issue25684\n var = Tk.IntVar()\n b = Tk.Checkbutton(master=frame, text=text, padx=2, pady=2,\n image=im, indicatoron=False,\n command=lambda: self._button_click(text),\n variable=var)\n b._ntimage = im\n b.pack(side=Tk.LEFT)\n return b\n\n def _button_click(self, name):\n self.trigger_tool(name)\n\n def toggle_toolitem(self, name, toggled):\n if name not in self._toolitems:\n return\n for toolitem in self._toolitems[name]:\n if toggled:\n toolitem.select()\n else:\n toolitem.deselect()\n\n def remove_toolitem(self, name):\n for toolitem in self._toolitems[name]:\n toolitem.pack_forget()\n del self._toolitems[name]\n\n\nclass StatusbarTk(StatusbarBase, Tk.Frame):\n def __init__(self, window, *args, **kwargs):\n StatusbarBase.__init__(self, *args, **kwargs)\n xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx\n height, width = 50, xmax - xmin\n Tk.Frame.__init__(self, master=window,\n width=int(width), height=int(height),\n borderwidth=2)\n self._message = Tk.StringVar(master=self)\n self._message_label = Tk.Label(master=self, textvariable=self._message)\n self._message_label.pack(side=Tk.RIGHT)\n self.pack(side=Tk.TOP, fill=Tk.X)\n\n def set_message(self, s):\n self._message.set(s)\n\n\nclass SaveFigureTk(backend_tools.SaveFigureBase):\n def trigger(self, *args):\n from six.moves import tkinter_tkfiledialog, tkinter_messagebox\n filetypes = self.figure.canvas.get_supported_filetypes().copy()\n default_filetype = self.figure.canvas.get_default_filetype()\n\n # Tk doesn't provide a way to choose a default filetype,\n # so we just have to put it first\n default_filetype_name = filetypes.pop(default_filetype)\n sorted_filetypes = ([(default_filetype, default_filetype_name)]\n + sorted(six.iteritems(filetypes)))\n tk_filetypes = [(name, '*.%s' % ext) for ext, name in sorted_filetypes]\n\n # adding a default extension seems to break the\n # asksaveasfilename dialog when you choose various save types\n # from the dropdown. Passing in the empty string seems to\n # work - JDH!\n # defaultextension = self.figure.canvas.get_default_filetype()\n defaultextension = ''\n initialdir = os.path.expanduser(rcParams['savefig.directory'])\n initialfile = self.figure.canvas.get_default_filename()\n fname = tkinter_tkfiledialog.asksaveasfilename(\n master=self.figure.canvas.manager.window,\n title='Save the figure',\n filetypes=tk_filetypes,\n defaultextension=defaultextension,\n initialdir=initialdir,\n initialfile=initialfile,\n )\n\n if fname == \"\" or fname == ():\n return\n else:\n if initialdir == '':\n # explicitly missing key or empty str signals to use cwd\n rcParams['savefig.directory'] = initialdir\n else:\n # save dir for next time\n rcParams['savefig.directory'] = os.path.dirname(\n six.text_type(fname))\n try:\n # This method will handle the delegation to the correct type\n self.figure.savefig(fname)\n except Exception as e:\n tkinter_messagebox.showerror(\"Error saving file\", str(e))\n\n\nclass ConfigureSubplotsTk(backend_tools.ConfigureSubplotsBase):\n def __init__(self, *args, **kwargs):\n backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)\n self.window = None\n\n def trigger(self, *args):\n self.init_window()\n self.window.lift()\n\n def init_window(self):\n if self.window:\n return\n\n toolfig = Figure(figsize=(6, 3))\n self.window = Tk.Tk()\n\n canvas = type(self.canvas)(toolfig, master=self.window)\n toolfig.subplots_adjust(top=0.9)\n _tool = SubplotTool(self.figure, toolfig)\n canvas.draw()\n canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n self.window.protocol(\"WM_DELETE_WINDOW\", self.destroy)\n\n def destroy(self, *args, **kwargs):\n self.window.destroy()\n self.window = None\n\n\nbackend_tools.ToolSaveFigure = SaveFigureTk\nbackend_tools.ToolConfigureSubplots = ConfigureSubplotsTk\nbackend_tools.ToolSetCursor = SetCursorTk\nbackend_tools.ToolRubberband = RubberbandTk\nToolbar = ToolbarTk\n\n\n@_Backend.export\nclass _BackendTk(_Backend):\n FigureManager = FigureManagerTk\n\n @classmethod\n def new_figure_manager_given_figure(cls, num, figure):\n \"\"\"\n Create a new figure manager instance for the given figure.\n \"\"\"\n _focus = windowing.FocusManager()\n window = Tk.Tk(className=\"matplotlib\")\n window.withdraw()\n\n # Put a mpl icon on the window rather than the default tk icon.\n # Tkinter doesn't allow colour icons on linux systems, but tk>=8.5 has\n # a iconphoto command which we call directly. Source:\n # http://mail.python.org/pipermail/tkinter-discuss/2006-November/000954.html\n icon_fname = os.path.join(\n rcParams['datapath'], 'images', 'matplotlib.ppm')\n icon_img = Tk.PhotoImage(file=icon_fname)\n try:\n window.tk.call('wm', 'iconphoto', window._w, icon_img)\n except Exception as exc:\n # log the failure (due e.g. to Tk version), but carry on\n _log.info('Could not load matplotlib icon: %s', exc)\n\n canvas = cls.FigureCanvas(figure, master=window)\n manager = cls.FigureManager(canvas, num, window)\n if matplotlib.is_interactive():\n manager.show()\n canvas.draw_idle()\n return manager\n\n @staticmethod\n def trigger_manager_draw(manager):\n manager.show()\n\n @staticmethod\n def mainloop():\n Tk.mainloop()\n",
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport pytest\n\nfrom scipy.special._testutils import MissingModule, check_version\nfrom scipy.special._mptestutils import (\n Arg, IntArg, mp_assert_allclose, assert_mpmath_equal)\nfrom scipy.special._precompute.gammainc_asy import (\n compute_g, compute_alpha, compute_d)\nfrom scipy.special._precompute.gammainc_data import gammainc, gammaincc\n\ntry:\n import sympy\nexcept ImportError:\n sympy = MissingModule('sympy')\n\ntry:\n import mpmath as mp\nexcept ImportError:\n mp = MissingModule('mpmath')\n\n\n_is_32bit_platform = np.intp(0).itemsize < 8\n\n\n@check_version(mp, '0.19')\ndef test_g():\n # Test data for the g_k. See DLMF 5.11.4.\n with mp.workdps(30):\n g = [mp.mpf(1), mp.mpf(1)/12, mp.mpf(1)/288,\n -mp.mpf(139)/51840, -mp.mpf(571)/2488320,\n mp.mpf(163879)/209018880, mp.mpf(5246819)/75246796800]\n mp_assert_allclose(compute_g(7), g)\n\n\[email protected]\n@check_version(mp, '0.19')\n@check_version(sympy, '0.7')\[email protected](condition=_is_32bit_platform, reason=\"rtol only 2e-11, see gh-6938\")\ndef test_alpha():\n # Test data for the alpha_k. See DLMF 8.12.14.\n with mp.workdps(30):\n alpha = [mp.mpf(0), mp.mpf(1), mp.mpf(1)/3, mp.mpf(1)/36,\n -mp.mpf(1)/270, mp.mpf(1)/4320, mp.mpf(1)/17010,\n -mp.mpf(139)/5443200, mp.mpf(1)/204120]\n mp_assert_allclose(compute_alpha(9), alpha)\n\n\[email protected]\n@check_version(mp, '0.19')\n@check_version(sympy, '0.7')\ndef test_d():\n # Compare the d_{k, n} to the results in appendix F of [1].\n #\n # Sources\n # -------\n # [1] DiDonato and Morris, Computation of the Incomplete Gamma\n # Function Ratios and their Inverse, ACM Transactions on\n # Mathematical Software, 1986.\n\n with mp.workdps(50):\n dataset = [(0, 0, -mp.mpf('0.333333333333333333333333333333')),\n (0, 12, mp.mpf('0.102618097842403080425739573227e-7')),\n (1, 0, -mp.mpf('0.185185185185185185185185185185e-2')),\n (1, 12, mp.mpf('0.119516285997781473243076536700e-7')),\n (2, 0, mp.mpf('0.413359788359788359788359788360e-2')),\n (2, 12, -mp.mpf('0.140925299108675210532930244154e-7')),\n (3, 0, mp.mpf('0.649434156378600823045267489712e-3')),\n (3, 12, -mp.mpf('0.191111684859736540606728140873e-7')),\n (4, 0, -mp.mpf('0.861888290916711698604702719929e-3')),\n (4, 12, mp.mpf('0.288658297427087836297341274604e-7')),\n (5, 0, -mp.mpf('0.336798553366358150308767592718e-3')),\n (5, 12, mp.mpf('0.482409670378941807563762631739e-7')),\n (6, 0, mp.mpf('0.531307936463992223165748542978e-3')),\n (6, 12, -mp.mpf('0.882860074633048352505085243179e-7')),\n (7, 0, mp.mpf('0.344367606892377671254279625109e-3')),\n (7, 12, -mp.mpf('0.175629733590604619378669693914e-6')),\n (8, 0, -mp.mpf('0.652623918595309418922034919727e-3')),\n (8, 12, mp.mpf('0.377358774161109793380344937299e-6')),\n (9, 0, -mp.mpf('0.596761290192746250124390067179e-3')),\n (9, 12, mp.mpf('0.870823417786464116761231237189e-6'))]\n d = compute_d(10, 13)\n res = []\n for k, n, std in dataset:\n res.append(d[k][n])\n std = map(lambda x: x[2], dataset)\n mp_assert_allclose(res, std)\n\n\n@check_version(mp, '0.19')\ndef test_gammainc():\n # Quick check that the gammainc in\n # special._precompute.gammainc_data agrees with mpmath's\n # gammainc.\n assert_mpmath_equal(gammainc,\n lambda a, x: mp.gammainc(a, b=x, regularized=True),\n [Arg(0, 100, inclusive_a=False), Arg(0, 100)],\n nan_ok=False, rtol=1e-17, n=50, dps=50)\n\n\[email protected]\n@check_version(mp, '0.19')\ndef test_gammaincc():\n # Check that the gammaincc in special._precompute.gammainc_data\n # agrees with mpmath's gammainc.\n assert_mpmath_equal(lambda a, x: gammaincc(a, x, dps=1000),\n lambda a, x: mp.gammainc(a, a=x, regularized=True),\n [Arg(20, 100), Arg(20, 100)],\n nan_ok=False, rtol=1e-17, n=50, dps=1000)\n\n # Test the fast integer path\n assert_mpmath_equal(gammaincc,\n lambda a, x: mp.gammainc(a, a=x, regularized=True),\n [IntArg(1, 100), Arg(0, 100)],\n nan_ok=False, rtol=1e-17, n=50, dps=50)\n",
"\"\"\"\nTests specific to the patches module.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_array_equal\nimport pytest\n\nfrom matplotlib.patches import Polygon\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.collections as mcollections\nfrom matplotlib import path as mpath\nfrom matplotlib import transforms as mtransforms\nimport matplotlib.style as mstyle\n\nimport sys\non_win = (sys.platform == 'win32')\n\n\ndef test_Polygon_close():\n #: Github issue #1018 identified a bug in the Polygon handling\n #: of the closed attribute; the path was not getting closed\n #: when set_xy was used to set the vertices.\n\n # open set of vertices:\n xy = [[0, 0], [0, 1], [1, 1]]\n # closed set:\n xyclosed = xy + [[0, 0]]\n\n # start with open path and close it:\n p = Polygon(xy, closed=True)\n assert_array_equal(p.get_xy(), xyclosed)\n p.set_xy(xy)\n assert_array_equal(p.get_xy(), xyclosed)\n\n # start with closed path and open it:\n p = Polygon(xyclosed, closed=False)\n assert_array_equal(p.get_xy(), xy)\n p.set_xy(xyclosed)\n assert_array_equal(p.get_xy(), xy)\n\n # start with open path and leave it open:\n p = Polygon(xy, closed=False)\n assert_array_equal(p.get_xy(), xy)\n p.set_xy(xy)\n assert_array_equal(p.get_xy(), xy)\n\n # start with closed path and leave it closed:\n p = Polygon(xyclosed, closed=True)\n assert_array_equal(p.get_xy(), xyclosed)\n p.set_xy(xyclosed)\n assert_array_equal(p.get_xy(), xyclosed)\n\n\ndef test_rotate_rect():\n loc = np.asarray([1.0, 2.0])\n width = 2\n height = 3\n angle = 30.0\n\n # A rotated rectangle\n rect1 = Rectangle(loc, width, height, angle=angle)\n\n # A non-rotated rectangle\n rect2 = Rectangle(loc, width, height)\n\n # Set up an explicit rotation matrix (in radians)\n angle_rad = np.pi * angle / 180.0\n rotation_matrix = np.array([[np.cos(angle_rad), -np.sin(angle_rad)],\n [np.sin(angle_rad), np.cos(angle_rad)]])\n\n # Translate to origin, rotate each vertex, and then translate back\n new_verts = np.inner(rotation_matrix, rect2.get_verts() - loc).T + loc\n\n # They should be the same\n assert_almost_equal(rect1.get_verts(), new_verts)\n\n\ndef test_negative_rect():\n # These two rectangles have the same vertices, but starting from a\n # different point. (We also drop the last vertex, which is a duplicate.)\n pos_vertices = Rectangle((-3, -2), 3, 2).get_verts()[:-1]\n neg_vertices = Rectangle((0, 0), -3, -2).get_verts()[:-1]\n assert_array_equal(np.roll(neg_vertices, 2, 0), pos_vertices)\n\n\n@image_comparison(baseline_images=['clip_to_bbox'])\ndef test_clip_to_bbox():\n fig = plt.figure()\n\n ax = fig.add_subplot(111)\n ax.set_xlim([-18, 20])\n ax.set_ylim([-150, 100])\n\n path = mpath.Path.unit_regular_star(8).deepcopy()\n path.vertices *= [10, 100]\n path.vertices -= [5, 25]\n\n path2 = mpath.Path.unit_circle().deepcopy()\n path2.vertices *= [10, 100]\n path2.vertices += [10, -25]\n\n combined = mpath.Path.make_compound_path(path, path2)\n\n patch = mpatches.PathPatch(\n combined, alpha=0.5, facecolor='coral', edgecolor='none')\n ax.add_patch(patch)\n\n bbox = mtransforms.Bbox([[-12, -77.5], [50, -110]])\n result_path = combined.clip_to_bbox(bbox)\n result_patch = mpatches.PathPatch(\n result_path, alpha=0.5, facecolor='green', lw=4, edgecolor='black')\n\n ax.add_patch(result_patch)\n\n\n@image_comparison(baseline_images=['patch_alpha_coloring'], remove_text=True)\ndef test_patch_alpha_coloring():\n \"\"\"\n Test checks that the patch and collection are rendered with the specified\n alpha values in their facecolor and edgecolor.\n \"\"\"\n star = mpath.Path.unit_regular_star(6)\n circle = mpath.Path.unit_circle()\n # concatenate the star with an internal cutout of the circle\n verts = np.concatenate([circle.vertices, star.vertices[::-1]])\n codes = np.concatenate([circle.codes, star.codes])\n cut_star1 = mpath.Path(verts, codes)\n cut_star2 = mpath.Path(verts + 1, codes)\n\n ax = plt.axes()\n patch = mpatches.PathPatch(cut_star1,\n linewidth=5, linestyle='dashdot',\n facecolor=(1, 0, 0, 0.5),\n edgecolor=(0, 0, 1, 0.75))\n ax.add_patch(patch)\n\n col = mcollections.PathCollection([cut_star2],\n linewidth=5, linestyles='dashdot',\n facecolor=(1, 0, 0, 0.5),\n edgecolor=(0, 0, 1, 0.75))\n ax.add_collection(col)\n\n ax.set_xlim([-1, 2])\n ax.set_ylim([-1, 2])\n\n\n@image_comparison(baseline_images=['patch_alpha_override'], remove_text=True)\ndef test_patch_alpha_override():\n #: Test checks that specifying an alpha attribute for a patch or\n #: collection will override any alpha component of the facecolor\n #: or edgecolor.\n star = mpath.Path.unit_regular_star(6)\n circle = mpath.Path.unit_circle()\n # concatenate the star with an internal cutout of the circle\n verts = np.concatenate([circle.vertices, star.vertices[::-1]])\n codes = np.concatenate([circle.codes, star.codes])\n cut_star1 = mpath.Path(verts, codes)\n cut_star2 = mpath.Path(verts + 1, codes)\n\n ax = plt.axes()\n patch = mpatches.PathPatch(cut_star1,\n linewidth=5, linestyle='dashdot',\n alpha=0.25,\n facecolor=(1, 0, 0, 0.5),\n edgecolor=(0, 0, 1, 0.75))\n ax.add_patch(patch)\n\n col = mcollections.PathCollection([cut_star2],\n linewidth=5, linestyles='dashdot',\n alpha=0.25,\n facecolor=(1, 0, 0, 0.5),\n edgecolor=(0, 0, 1, 0.75))\n ax.add_collection(col)\n\n ax.set_xlim([-1, 2])\n ax.set_ylim([-1, 2])\n\n\[email protected]('default')\ndef test_patch_color_none():\n # Make sure the alpha kwarg does not override 'none' facecolor.\n # Addresses issue #7478.\n c = plt.Circle((0, 0), 1, facecolor='none', alpha=1)\n assert c.get_facecolor()[0] == 0\n\n\n@image_comparison(baseline_images=['patch_custom_linestyle'],\n remove_text=True)\ndef test_patch_custom_linestyle():\n #: A test to check that patches and collections accept custom dash\n #: patterns as linestyle and that they display correctly.\n star = mpath.Path.unit_regular_star(6)\n circle = mpath.Path.unit_circle()\n # concatenate the star with an internal cutout of the circle\n verts = np.concatenate([circle.vertices, star.vertices[::-1]])\n codes = np.concatenate([circle.codes, star.codes])\n cut_star1 = mpath.Path(verts, codes)\n cut_star2 = mpath.Path(verts + 1, codes)\n\n ax = plt.axes()\n patch = mpatches.PathPatch(cut_star1,\n linewidth=5, linestyle=(0.0, (5.0, 7.0, 10.0, 7.0)),\n facecolor=(1, 0, 0),\n edgecolor=(0, 0, 1))\n ax.add_patch(patch)\n\n col = mcollections.PathCollection([cut_star2],\n linewidth=5, linestyles=[(0.0, (5.0, 7.0, 10.0, 7.0))],\n facecolor=(1, 0, 0),\n edgecolor=(0, 0, 1))\n ax.add_collection(col)\n\n ax.set_xlim([-1, 2])\n ax.set_ylim([-1, 2])\n\n\ndef test_patch_linestyle_accents():\n #: Test if linestyle can also be specified with short menoics\n #: like \"--\"\n #: c.f. Gihub issue #2136\n star = mpath.Path.unit_regular_star(6)\n circle = mpath.Path.unit_circle()\n # concatenate the star with an internal cutout of the circle\n verts = np.concatenate([circle.vertices, star.vertices[::-1]])\n codes = np.concatenate([circle.codes, star.codes])\n\n linestyles = [\"-\", \"--\", \"-.\", \":\",\n \"solid\", \"dashed\", \"dashdot\", \"dotted\"]\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n for i, ls in enumerate(linestyles):\n star = mpath.Path(verts + i, codes)\n patch = mpatches.PathPatch(star,\n linewidth=3, linestyle=ls,\n facecolor=(1, 0, 0),\n edgecolor=(0, 0, 1))\n ax.add_patch(patch)\n\n ax.set_xlim([-1, i + 1])\n ax.set_ylim([-1, i + 1])\n fig.canvas.draw()\n assert True\n\n\ndef test_wedge_movement():\n param_dict = {'center': ((0, 0), (1, 1), 'set_center'),\n 'r': (5, 8, 'set_radius'),\n 'width': (2, 3, 'set_width'),\n 'theta1': (0, 30, 'set_theta1'),\n 'theta2': (45, 50, 'set_theta2')}\n\n init_args = dict((k, v[0]) for (k, v) in six.iteritems(param_dict))\n\n w = mpatches.Wedge(**init_args)\n for attr, (old_v, new_v, func) in six.iteritems(param_dict):\n assert getattr(w, attr) == old_v\n getattr(w, func)(new_v)\n assert getattr(w, attr) == new_v\n\n\n# png needs tol>=0.06, pdf tol>=1.617\n@image_comparison(baseline_images=['wedge_range'],\n remove_text=True, tol=1.65 if on_win else 0)\ndef test_wedge_range():\n ax = plt.axes()\n\n t1 = 2.313869244286224\n\n args = [[52.31386924, 232.31386924],\n [52.313869244286224, 232.31386924428622],\n [t1, t1 + 180.0],\n [0, 360],\n [90, 90 + 360],\n [-180, 180],\n [0, 380],\n [45, 46],\n [46, 45]]\n\n for i, (theta1, theta2) in enumerate(args):\n x = i % 3\n y = i // 3\n\n wedge = mpatches.Wedge((x * 3, y * 3), 1, theta1, theta2,\n facecolor='none', edgecolor='k', lw=3)\n\n ax.add_artist(wedge)\n\n ax.set_xlim([-2, 8])\n ax.set_ylim([-2, 9])\n\n\ndef test_patch_str():\n \"\"\"\n Check that patches have nice and working `str` representation.\n\n Note that the logic is that `__str__` is defined such that:\n str(eval(str(p))) == str(p)\n \"\"\"\n p = mpatches.Circle(xy=(1, 2), radius=3)\n assert str(p) == 'Circle(xy=(1, 2), radius=3)'\n\n p = mpatches.Ellipse(xy=(1, 2), width=3, height=4, angle=5)\n assert str(p) == 'Ellipse(xy=(1, 2), width=3, height=4, angle=5)'\n\n p = mpatches.Rectangle(xy=(1, 2), width=3, height=4, angle=5)\n assert str(p) == 'Rectangle(xy=(1, 2), width=3, height=4, angle=5)'\n\n p = mpatches.Wedge(center=(1, 2), r=3, theta1=4, theta2=5, width=6)\n assert str(p) == 'Wedge(center=(1, 2), r=3, theta1=4, theta2=5, width=6)'\n\n p = mpatches.Arc(xy=(1, 2), width=3, height=4, angle=5, theta1=6, theta2=7)\n expected = 'Arc(xy=(1, 2), width=3, height=4, angle=5, theta1=6, theta2=7)'\n assert str(p) == expected\n\n\n@image_comparison(baseline_images=['multi_color_hatch'],\n remove_text=True, style='default')\ndef test_multi_color_hatch():\n fig, ax = plt.subplots()\n\n rects = ax.bar(range(5), range(1, 6))\n for i, rect in enumerate(rects):\n rect.set_facecolor('none')\n rect.set_edgecolor('C{}'.format(i))\n rect.set_hatch('/')\n\n for i in range(5):\n with mstyle.context({'hatch.color': 'C{}'.format(i)}):\n r = Rectangle((i - .8 / 2, 5), .8, 1, hatch='//', fc='none')\n ax.add_patch(r)\n\n\n@image_comparison(baseline_images=['units_rectangle'], extensions=['png'])\ndef test_units_rectangle():\n import matplotlib.testing.jpl_units as U\n U.register()\n\n p = mpatches.Rectangle((5*U.km, 6*U.km), 1*U.km, 2*U.km)\n\n fig, ax = plt.subplots()\n ax.add_patch(p)\n ax.set_xlim([4*U.km, 7*U.km])\n ax.set_ylim([5*U.km, 9*U.km])\n\n\n@image_comparison(baseline_images=['connection_patch'], extensions=['png'],\n style='mpl20', remove_text=True)\ndef test_connection_patch():\n fig, (ax1, ax2) = plt.subplots(1, 2)\n\n con = mpatches.ConnectionPatch(xyA=(0.1, 0.1), xyB=(0.9, 0.9),\n coordsA='data', coordsB='data',\n axesA=ax2, axesB=ax1,\n arrowstyle=\"->\")\n ax2.add_artist(con)\n\n\ndef test_datetime_rectangle():\n # Check that creating a rectangle with timedeltas doesn't fail\n from datetime import datetime, timedelta\n\n start = datetime(2017, 1, 1, 0, 0, 0)\n delta = timedelta(seconds=16)\n patch = mpatches.Rectangle((start, 0), delta, 1)\n\n fig, ax = plt.subplots()\n ax.add_patch(patch)\n\n\ndef test_datetime_datetime_fails():\n from datetime import datetime\n\n start = datetime(2017, 1, 1, 0, 0, 0)\n dt_delta = datetime(1970, 1, 5) # Will be 5 days if units are done wrong\n\n with pytest.raises(TypeError):\n mpatches.Rectangle((start, 0), dt_delta, 1)\n\n with pytest.raises(TypeError):\n mpatches.Rectangle((0, start), 1, dt_delta)\n\n\ndef test_contains_point():\n ell = mpatches.Ellipse((0.5, 0.5), 0.5, 1.0, 0)\n points = [(0.0, 0.5), (0.2, 0.5), (0.25, 0.5), (0.5, 0.5)]\n path = ell.get_path()\n transform = ell.get_transform()\n radius = ell._process_radius(None)\n expected = np.array([path.contains_point(point,\n transform,\n radius) for point in points])\n result = np.array([ell.contains_point(point) for point in points])\n assert np.all(result == expected)\n\n\ndef test_contains_points():\n ell = mpatches.Ellipse((0.5, 0.5), 0.5, 1.0, 0)\n points = [(0.0, 0.5), (0.2, 0.5), (0.25, 0.5), (0.5, 0.5)]\n path = ell.get_path()\n transform = ell.get_transform()\n radius = ell._process_radius(None)\n expected = path.contains_points(points, transform, radius)\n result = ell.contains_points(points)\n assert np.all(result == expected)\n",
"\"\"\"\nThis module contains functions to handle markers. Used by both the\nmarker functionality of `~matplotlib.axes.Axes.plot` and\n`~matplotlib.axes.Axes.scatter`.\n\nAll possible markers are defined here:\n\n============================== ===============================================\nmarker description\n============================== ===============================================\n`\".\"` point\n`\",\"` pixel\n`\"o\"` circle\n`\"v\"` triangle_down\n`\"^\"` triangle_up\n`\"<\"` triangle_left\n`\">\"` triangle_right\n`\"1\"` tri_down\n`\"2\"` tri_up\n`\"3\"` tri_left\n`\"4\"` tri_right\n`\"8\"` octagon\n`\"s\"` square\n`\"p\"` pentagon\n`\"P\"` plus (filled)\n`\"*\"` star\n`\"h\"` hexagon1\n`\"H\"` hexagon2\n`\"+\"` plus\n`\"x\"` x\n`\"X\"` x (filled)\n`\"D\"` diamond\n`\"d\"` thin_diamond\n`\"|\"` vline\n`\"_\"` hline\nTICKLEFT tickleft\nTICKRIGHT tickright\nTICKUP tickup\nTICKDOWN tickdown\nCARETLEFT caretleft (centered at tip)\nCARETRIGHT caretright (centered at tip)\nCARETUP caretup (centered at tip)\nCARETDOWN caretdown (centered at tip)\nCARETLEFTBASE caretleft (centered at base)\nCARETRIGHTBASE caretright (centered at base)\nCARETUPBASE caretup (centered at base)\n`\"None\"`, `\" \"` or `\"\"` nothing\n``'$...$'`` render the string using mathtext.\n`verts` a list of (x, y) pairs used for Path vertices.\n The center of the marker is located at (0,0) and\n the size is normalized.\npath a `~matplotlib.path.Path` instance.\n(`numsides`, `style`, `angle`) The marker can also be a tuple (`numsides`,\n `style`, `angle`), which will create a custom,\n regular symbol.\n\n `numsides`:\n the number of sides\n\n `style`:\n the style of the regular symbol:\n\n 0\n a regular polygon\n 1\n a star-like symbol\n 2\n an asterisk\n 3\n a circle (`numsides` and `angle` is\n ignored)\n\n `angle`:\n the angle of rotation of the symbol\n============================== ===============================================\n\nFor backward compatibility, the form (`verts`, 0) is also accepted,\nbut it is equivalent to just `verts` for giving a raw set of vertices\nthat define the shape.\n\n`None` is the default which means 'nothing', however this table is\nreferred to from other docs for the valid inputs from marker inputs and in\nthose cases `None` still means 'default'.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nfrom six.moves import xrange\n\nfrom collections import Sized\n\nimport numpy as np\n\nfrom . import rcParams\nfrom .cbook import is_math_text, is_numlike\nfrom .path import Path\nfrom .transforms import IdentityTransform, Affine2D\n\n# special-purpose marker identifiers:\n(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,\n CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,\n CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE) = xrange(12)\n\n_empty_path = Path(np.empty((0, 2)))\n\n\nclass MarkerStyle(object):\n\n markers = {\n '.': 'point',\n ',': 'pixel',\n 'o': 'circle',\n 'v': 'triangle_down',\n '^': 'triangle_up',\n '<': 'triangle_left',\n '>': 'triangle_right',\n '1': 'tri_down',\n '2': 'tri_up',\n '3': 'tri_left',\n '4': 'tri_right',\n '8': 'octagon',\n 's': 'square',\n 'p': 'pentagon',\n '*': 'star',\n 'h': 'hexagon1',\n 'H': 'hexagon2',\n '+': 'plus',\n 'x': 'x',\n 'D': 'diamond',\n 'd': 'thin_diamond',\n '|': 'vline',\n '_': 'hline',\n 'P': 'plus_filled',\n 'X': 'x_filled',\n TICKLEFT: 'tickleft',\n TICKRIGHT: 'tickright',\n TICKUP: 'tickup',\n TICKDOWN: 'tickdown',\n CARETLEFT: 'caretleft',\n CARETRIGHT: 'caretright',\n CARETUP: 'caretup',\n CARETDOWN: 'caretdown',\n CARETLEFTBASE: 'caretleftbase',\n CARETRIGHTBASE: 'caretrightbase',\n CARETUPBASE: 'caretupbase',\n CARETDOWNBASE: 'caretdownbase',\n \"None\": 'nothing',\n None: 'nothing',\n ' ': 'nothing',\n '': 'nothing'\n }\n\n # Just used for informational purposes. is_filled()\n # is calculated in the _set_* functions.\n filled_markers = (\n 'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd',\n 'P', 'X')\n\n fillstyles = ('full', 'left', 'right', 'bottom', 'top', 'none')\n _half_fillstyles = ('left', 'right', 'bottom', 'top')\n\n # TODO: Is this ever used as a non-constant?\n _point_size_reduction = 0.5\n\n def __init__(self, marker=None, fillstyle=None):\n \"\"\"\n MarkerStyle\n\n Attributes\n ----------\n markers : list of known marks\n\n fillstyles : list of known fillstyles\n\n filled_markers : list of known filled markers.\n\n Parameters\n ----------\n marker : string or array_like, optional, default: None\n See the descriptions of possible markers in the module docstring.\n\n fillstyle : string, optional, default: 'full'\n 'full', 'left\", 'right', 'bottom', 'top', 'none'\n \"\"\"\n self._marker_function = None\n self.set_fillstyle(fillstyle)\n self.set_marker(marker)\n\n def __getstate__(self):\n d = self.__dict__.copy()\n d.pop('_marker_function')\n return d\n\n def __setstate__(self, statedict):\n self.__dict__ = statedict\n self.set_marker(self._marker)\n\n def _recache(self):\n if self._marker_function is None:\n return\n self._path = _empty_path\n self._transform = IdentityTransform()\n self._alt_path = None\n self._alt_transform = None\n self._snap_threshold = None\n self._joinstyle = 'round'\n self._capstyle = 'butt'\n self._filled = True\n self._marker_function()\n\n if six.PY3:\n def __bool__(self):\n return bool(len(self._path.vertices))\n else:\n def __nonzero__(self):\n return bool(len(self._path.vertices))\n\n def is_filled(self):\n return self._filled\n\n def get_fillstyle(self):\n return self._fillstyle\n\n def set_fillstyle(self, fillstyle):\n \"\"\"\n Sets fillstyle\n\n Parameters\n ----------\n fillstyle : string amongst known fillstyles\n \"\"\"\n if fillstyle is None:\n fillstyle = rcParams['markers.fillstyle']\n if fillstyle not in self.fillstyles:\n raise ValueError(\"Unrecognized fillstyle %s\"\n % ' '.join(self.fillstyles))\n self._fillstyle = fillstyle\n self._recache()\n\n def get_joinstyle(self):\n return self._joinstyle\n\n def get_capstyle(self):\n return self._capstyle\n\n def get_marker(self):\n return self._marker\n\n def set_marker(self, marker):\n if (isinstance(marker, np.ndarray) and marker.ndim == 2 and\n marker.shape[1] == 2):\n self._marker_function = self._set_vertices\n elif (isinstance(marker, Sized) and len(marker) in (2, 3) and\n marker[1] in (0, 1, 2, 3)):\n self._marker_function = self._set_tuple_marker\n elif (not isinstance(marker, (np.ndarray, list)) and\n marker in self.markers):\n self._marker_function = getattr(\n self, '_set_' + self.markers[marker])\n elif isinstance(marker, six.string_types) and is_math_text(marker):\n self._marker_function = self._set_mathtext_path\n elif isinstance(marker, Path):\n self._marker_function = self._set_path_marker\n else:\n try:\n Path(marker)\n self._marker_function = self._set_vertices\n except ValueError:\n raise ValueError('Unrecognized marker style'\n ' {0}'.format(marker))\n\n self._marker = marker\n self._recache()\n\n def get_path(self):\n return self._path\n\n def get_transform(self):\n return self._transform.frozen()\n\n def get_alt_path(self):\n return self._alt_path\n\n def get_alt_transform(self):\n return self._alt_transform.frozen()\n\n def get_snap_threshold(self):\n return self._snap_threshold\n\n def _set_nothing(self):\n self._filled = False\n\n def _set_custom_marker(self, path):\n verts = path.vertices\n rescale = max(np.max(np.abs(verts[:, 0])),\n np.max(np.abs(verts[:, 1])))\n self._transform = Affine2D().scale(0.5 / rescale)\n self._path = path\n\n def _set_path_marker(self):\n self._set_custom_marker(self._marker)\n\n def _set_vertices(self):\n verts = self._marker\n marker = Path(verts)\n self._set_custom_marker(marker)\n\n def _set_tuple_marker(self):\n marker = self._marker\n if is_numlike(marker[0]):\n if len(marker) == 2:\n numsides, rotation = marker[0], 0.0\n elif len(marker) == 3:\n numsides, rotation = marker[0], marker[2]\n symstyle = marker[1]\n if symstyle == 0:\n self._path = Path.unit_regular_polygon(numsides)\n self._joinstyle = 'miter'\n elif symstyle == 1:\n self._path = Path.unit_regular_star(numsides)\n self._joinstyle = 'bevel'\n elif symstyle == 2:\n self._path = Path.unit_regular_asterisk(numsides)\n self._filled = False\n self._joinstyle = 'bevel'\n elif symstyle == 3:\n self._path = Path.unit_circle()\n self._transform = Affine2D().scale(0.5).rotate_deg(rotation)\n else:\n verts = np.asarray(marker[0])\n path = Path(verts)\n self._set_custom_marker(path)\n\n def _set_mathtext_path(self):\n \"\"\"\n Draws mathtext markers '$...$' using TextPath object.\n\n Submitted by tcb\n \"\"\"\n from matplotlib.text import TextPath\n from matplotlib.font_manager import FontProperties\n\n # again, the properties could be initialised just once outside\n # this function\n # Font size is irrelevant here, it will be rescaled based on\n # the drawn size later\n props = FontProperties(size=1.0)\n text = TextPath(xy=(0, 0), s=self.get_marker(), fontproperties=props,\n usetex=rcParams['text.usetex'])\n if len(text.vertices) == 0:\n return\n\n xmin, ymin = text.vertices.min(axis=0)\n xmax, ymax = text.vertices.max(axis=0)\n width = xmax - xmin\n height = ymax - ymin\n max_dim = max(width, height)\n self._transform = Affine2D() \\\n .translate(-xmin + 0.5 * -width, -ymin + 0.5 * -height) \\\n .scale(1.0 / max_dim)\n self._path = text\n self._snap = False\n\n def _half_fill(self):\n fs = self.get_fillstyle()\n result = fs in self._half_fillstyles\n return result\n\n def _set_circle(self, reduction=1.0):\n self._transform = Affine2D().scale(0.5 * reduction)\n self._snap_threshold = np.inf\n fs = self.get_fillstyle()\n if not self._half_fill():\n self._path = Path.unit_circle()\n else:\n # build a right-half circle\n if fs == 'bottom':\n rotate = 270.\n elif fs == 'top':\n rotate = 90.\n elif fs == 'left':\n rotate = 180.\n else:\n rotate = 0.\n\n self._path = self._alt_path = Path.unit_circle_righthalf()\n self._transform.rotate_deg(rotate)\n self._alt_transform = self._transform.frozen().rotate_deg(180.)\n\n def _set_pixel(self):\n self._path = Path.unit_rectangle()\n # Ideally, you'd want -0.5, -0.5 here, but then the snapping\n # algorithm in the Agg backend will round this to a 2x2\n # rectangle from (-1, -1) to (1, 1). By offsetting it\n # slightly, we can force it to be (0, 0) to (1, 1), which both\n # makes it only be a single pixel and places it correctly\n # aligned to 1-width stroking (i.e. the ticks). This hack is\n # the best of a number of bad alternatives, mainly because the\n # backends are not aware of what marker is actually being used\n # beyond just its path data.\n self._transform = Affine2D().translate(-0.49999, -0.49999)\n self._snap_threshold = None\n\n def _set_point(self):\n self._set_circle(reduction=self._point_size_reduction)\n\n _triangle_path = Path(\n [[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]],\n [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])\n # Going down halfway looks to small. Golden ratio is too far.\n _triangle_path_u = Path(\n [[0.0, 1.0], [-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [0.0, 1.0]],\n [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])\n _triangle_path_d = Path(\n [[-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [1.0, -1.0], [-1.0, -1.0],\n [-3 / 5., -1 / 5.]],\n [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])\n _triangle_path_l = Path(\n [[0.0, 1.0], [0.0, -1.0], [-1.0, -1.0], [0.0, 1.0]],\n [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])\n _triangle_path_r = Path(\n [[0.0, 1.0], [0.0, -1.0], [1.0, -1.0], [0.0, 1.0]],\n [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])\n\n def _set_triangle(self, rot, skip):\n self._transform = Affine2D().scale(0.5, 0.5).rotate_deg(rot)\n self._snap_threshold = 5.0\n fs = self.get_fillstyle()\n\n if not self._half_fill():\n self._path = self._triangle_path\n else:\n mpaths = [self._triangle_path_u,\n self._triangle_path_l,\n self._triangle_path_d,\n self._triangle_path_r]\n\n if fs == 'top':\n self._path = mpaths[(0 + skip) % 4]\n self._alt_path = mpaths[(2 + skip) % 4]\n elif fs == 'bottom':\n self._path = mpaths[(2 + skip) % 4]\n self._alt_path = mpaths[(0 + skip) % 4]\n elif fs == 'left':\n self._path = mpaths[(1 + skip) % 4]\n self._alt_path = mpaths[(3 + skip) % 4]\n else:\n self._path = mpaths[(3 + skip) % 4]\n self._alt_path = mpaths[(1 + skip) % 4]\n\n self._alt_transform = self._transform\n\n self._joinstyle = 'miter'\n\n def _set_triangle_up(self):\n return self._set_triangle(0.0, 0)\n\n def _set_triangle_down(self):\n return self._set_triangle(180.0, 2)\n\n def _set_triangle_left(self):\n return self._set_triangle(90.0, 3)\n\n def _set_triangle_right(self):\n return self._set_triangle(270.0, 1)\n\n def _set_square(self):\n self._transform = Affine2D().translate(-0.5, -0.5)\n self._snap_threshold = 2.0\n fs = self.get_fillstyle()\n if not self._half_fill():\n self._path = Path.unit_rectangle()\n else:\n # build a bottom filled square out of two rectangles, one\n # filled. Use the rotation to support left, right, bottom\n # or top\n if fs == 'bottom':\n rotate = 0.\n elif fs == 'top':\n rotate = 180.\n elif fs == 'left':\n rotate = 270.\n else:\n rotate = 90.\n\n self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 0.5],\n [0.0, 0.5], [0.0, 0.0]])\n self._alt_path = Path([[0.0, 0.5], [1.0, 0.5], [1.0, 1.0],\n [0.0, 1.0], [0.0, 0.5]])\n self._transform.rotate_deg(rotate)\n self._alt_transform = self._transform\n\n self._joinstyle = 'miter'\n\n def _set_diamond(self):\n self._transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45)\n self._snap_threshold = 5.0\n fs = self.get_fillstyle()\n if not self._half_fill():\n self._path = Path.unit_rectangle()\n else:\n self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]])\n self._alt_path = Path([[0.0, 0.0], [0.0, 1.0],\n [1.0, 1.0], [0.0, 0.0]])\n\n if fs == 'bottom':\n rotate = 270.\n elif fs == 'top':\n rotate = 90.\n elif fs == 'left':\n rotate = 180.\n else:\n rotate = 0.\n\n self._transform.rotate_deg(rotate)\n self._alt_transform = self._transform\n\n self._joinstyle = 'miter'\n\n def _set_thin_diamond(self):\n self._set_diamond()\n self._transform.scale(0.6, 1.0)\n\n def _set_pentagon(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = 5.0\n\n polypath = Path.unit_regular_polygon(5)\n fs = self.get_fillstyle()\n\n if not self._half_fill():\n self._path = polypath\n else:\n verts = polypath.vertices\n\n y = (1 + np.sqrt(5)) / 4.\n top = Path([verts[0], verts[1], verts[4], verts[0]])\n bottom = Path([verts[1], verts[2], verts[3], verts[4], verts[1]])\n left = Path([verts[0], verts[1], verts[2], [0, -y], verts[0]])\n right = Path([verts[0], verts[4], verts[3], [0, -y], verts[0]])\n\n if fs == 'top':\n mpath, mpath_alt = top, bottom\n elif fs == 'bottom':\n mpath, mpath_alt = bottom, top\n elif fs == 'left':\n mpath, mpath_alt = left, right\n else:\n mpath, mpath_alt = right, left\n self._path = mpath\n self._alt_path = mpath_alt\n self._alt_transform = self._transform\n\n self._joinstyle = 'miter'\n\n def _set_star(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = 5.0\n\n fs = self.get_fillstyle()\n polypath = Path.unit_regular_star(5, innerCircle=0.381966)\n\n if not self._half_fill():\n self._path = polypath\n else:\n verts = polypath.vertices\n\n top = Path(np.vstack((verts[0:4, :], verts[7:10, :], verts[0])))\n bottom = Path(np.vstack((verts[3:8, :], verts[3])))\n left = Path(np.vstack((verts[0:6, :], verts[0])))\n right = Path(np.vstack((verts[0], verts[5:10, :], verts[0])))\n\n if fs == 'top':\n mpath, mpath_alt = top, bottom\n elif fs == 'bottom':\n mpath, mpath_alt = bottom, top\n elif fs == 'left':\n mpath, mpath_alt = left, right\n else:\n mpath, mpath_alt = right, left\n self._path = mpath\n self._alt_path = mpath_alt\n self._alt_transform = self._transform\n\n self._joinstyle = 'bevel'\n\n def _set_hexagon1(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = None\n\n fs = self.get_fillstyle()\n polypath = Path.unit_regular_polygon(6)\n\n if not self._half_fill():\n self._path = polypath\n else:\n verts = polypath.vertices\n\n # not drawing inside lines\n x = np.abs(np.cos(5 * np.pi / 6.))\n top = Path(np.vstack(([-x, 0], verts[(1, 0, 5), :], [x, 0])))\n bottom = Path(np.vstack(([-x, 0], verts[2:5, :], [x, 0])))\n left = Path(verts[(0, 1, 2, 3), :])\n right = Path(verts[(0, 5, 4, 3), :])\n\n if fs == 'top':\n mpath, mpath_alt = top, bottom\n elif fs == 'bottom':\n mpath, mpath_alt = bottom, top\n elif fs == 'left':\n mpath, mpath_alt = left, right\n else:\n mpath, mpath_alt = right, left\n\n self._path = mpath\n self._alt_path = mpath_alt\n self._alt_transform = self._transform\n\n self._joinstyle = 'miter'\n\n def _set_hexagon2(self):\n self._transform = Affine2D().scale(0.5).rotate_deg(30)\n self._snap_threshold = None\n\n fs = self.get_fillstyle()\n polypath = Path.unit_regular_polygon(6)\n\n if not self._half_fill():\n self._path = polypath\n else:\n verts = polypath.vertices\n\n # not drawing inside lines\n x, y = np.sqrt(3) / 4, 3 / 4.\n top = Path(verts[(1, 0, 5, 4, 1), :])\n bottom = Path(verts[(1, 2, 3, 4), :])\n left = Path(np.vstack(([x, y], verts[(0, 1, 2), :],\n [-x, -y], [x, y])))\n right = Path(np.vstack(([x, y], verts[(5, 4, 3), :], [-x, -y])))\n\n if fs == 'top':\n mpath, mpath_alt = top, bottom\n elif fs == 'bottom':\n mpath, mpath_alt = bottom, top\n elif fs == 'left':\n mpath, mpath_alt = left, right\n else:\n mpath, mpath_alt = right, left\n\n self._path = mpath\n self._alt_path = mpath_alt\n self._alt_transform = self._transform\n\n self._joinstyle = 'miter'\n\n def _set_octagon(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = 5.0\n\n fs = self.get_fillstyle()\n polypath = Path.unit_regular_polygon(8)\n\n if not self._half_fill():\n self._transform.rotate_deg(22.5)\n self._path = polypath\n else:\n x = np.sqrt(2.) / 4.\n half = Path([[0, -1], [0, 1], [-x, 1], [-1, x],\n [-1, -x], [-x, -1], [0, -1]])\n\n if fs == 'bottom':\n rotate = 90.\n elif fs == 'top':\n rotate = 270.\n elif fs == 'right':\n rotate = 180.\n else:\n rotate = 0.\n\n self._transform.rotate_deg(rotate)\n self._path = self._alt_path = half\n self._alt_transform = self._transform.frozen().rotate_deg(180.0)\n\n self._joinstyle = 'miter'\n\n _line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])\n\n def _set_vline(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = 1.0\n self._filled = False\n self._path = self._line_marker_path\n\n def _set_hline(self):\n self._set_vline()\n self._transform = self._transform.rotate_deg(90)\n\n _tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])\n\n def _set_tickleft(self):\n self._transform = Affine2D().scale(-1.0, 1.0)\n self._snap_threshold = 1.0\n self._filled = False\n self._path = self._tickhoriz_path\n\n def _set_tickright(self):\n self._transform = Affine2D().scale(1.0, 1.0)\n self._snap_threshold = 1.0\n self._filled = False\n self._path = self._tickhoriz_path\n\n _tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])\n\n def _set_tickup(self):\n self._transform = Affine2D().scale(1.0, 1.0)\n self._snap_threshold = 1.0\n self._filled = False\n self._path = self._tickvert_path\n\n def _set_tickdown(self):\n self._transform = Affine2D().scale(1.0, -1.0)\n self._snap_threshold = 1.0\n self._filled = False\n self._path = self._tickvert_path\n\n _tri_path = Path([[0.0, 0.0], [0.0, -1.0],\n [0.0, 0.0], [0.8, 0.5],\n [0.0, 0.0], [-0.8, 0.5]],\n [Path.MOVETO, Path.LINETO,\n Path.MOVETO, Path.LINETO,\n Path.MOVETO, Path.LINETO])\n\n def _set_tri_down(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = 5.0\n self._filled = False\n self._path = self._tri_path\n\n def _set_tri_up(self):\n self._set_tri_down()\n self._transform = self._transform.rotate_deg(180)\n\n def _set_tri_left(self):\n self._set_tri_down()\n self._transform = self._transform.rotate_deg(270)\n\n def _set_tri_right(self):\n self._set_tri_down()\n self._transform = self._transform.rotate_deg(90)\n\n _caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])\n\n def _set_caretdown(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = 3.0\n self._filled = False\n self._path = self._caret_path\n self._joinstyle = 'miter'\n\n def _set_caretup(self):\n self._set_caretdown()\n self._transform = self._transform.rotate_deg(180)\n\n def _set_caretleft(self):\n self._set_caretdown()\n self._transform = self._transform.rotate_deg(270)\n\n def _set_caretright(self):\n self._set_caretdown()\n self._transform = self._transform.rotate_deg(90)\n\n _caret_path_base = Path([[-1.0, 0.0], [0.0, -1.5], [1.0, 0]])\n\n def _set_caretdownbase(self):\n self._set_caretdown()\n self._path = self._caret_path_base\n\n def _set_caretupbase(self):\n self._set_caretdownbase()\n self._transform = self._transform.rotate_deg(180)\n\n def _set_caretleftbase(self):\n self._set_caretdownbase()\n self._transform = self._transform.rotate_deg(270)\n\n def _set_caretrightbase(self):\n self._set_caretdownbase()\n self._transform = self._transform.rotate_deg(90)\n\n _plus_path = Path([[-1.0, 0.0], [1.0, 0.0],\n [0.0, -1.0], [0.0, 1.0]],\n [Path.MOVETO, Path.LINETO,\n Path.MOVETO, Path.LINETO])\n\n def _set_plus(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = 1.0\n self._filled = False\n self._path = self._plus_path\n\n _x_path = Path([[-1.0, -1.0], [1.0, 1.0],\n [-1.0, 1.0], [1.0, -1.0]],\n [Path.MOVETO, Path.LINETO,\n Path.MOVETO, Path.LINETO])\n\n def _set_x(self):\n self._transform = Affine2D().scale(0.5)\n self._snap_threshold = 3.0\n self._filled = False\n self._path = self._x_path\n\n _plus_filled_path = Path([(1/3, 0), (2/3, 0), (2/3, 1/3),\n (1, 1/3), (1, 2/3), (2/3, 2/3),\n (2/3, 1), (1/3, 1), (1/3, 2/3),\n (0, 2/3), (0, 1/3), (1/3, 1/3),\n (1/3, 0)],\n [Path.MOVETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.CLOSEPOLY])\n\n _plus_filled_path_t = Path([(1, 1/2), (1, 2/3), (2/3, 2/3),\n (2/3, 1), (1/3, 1), (1/3, 2/3),\n (0, 2/3), (0, 1/2), (1, 1/2)],\n [Path.MOVETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO,\n Path.CLOSEPOLY])\n\n def _set_plus_filled(self):\n self._transform = Affine2D().translate(-0.5, -0.5)\n self._snap_threshold = 5.0\n self._joinstyle = 'miter'\n fs = self.get_fillstyle()\n if not self._half_fill():\n self._path = self._plus_filled_path\n else:\n # Rotate top half path to support all partitions\n if fs == 'top':\n rotate, rotate_alt = 0, 180\n elif fs == 'bottom':\n rotate, rotate_alt = 180, 0\n elif fs == 'left':\n rotate, rotate_alt = 90, 270\n else:\n rotate, rotate_alt = 270, 90\n\n self._path = self._plus_filled_path_t\n self._alt_path = self._plus_filled_path_t\n self._alt_transform = Affine2D().translate(-0.5, -0.5)\n self._transform.rotate_deg(rotate)\n self._alt_transform.rotate_deg(rotate_alt)\n\n _x_filled_path = Path([(0.25, 0), (0.5, 0.25), (0.75, 0), (1, 0.25),\n (0.75, 0.5), (1, 0.75), (0.75, 1), (0.5, 0.75),\n (0.25, 1), (0, 0.75), (0.25, 0.5), (0, 0.25),\n (0.25, 0)],\n [Path.MOVETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.CLOSEPOLY])\n\n _x_filled_path_t = Path([(0.75, 0.5), (1, 0.75), (0.75, 1),\n (0.5, 0.75), (0.25, 1), (0, 0.75),\n (0.25, 0.5), (0.75, 0.5)],\n [Path.MOVETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.CLOSEPOLY])\n\n def _set_x_filled(self):\n self._transform = Affine2D().translate(-0.5, -0.5)\n self._snap_threshold = 5.0\n self._joinstyle = 'miter'\n fs = self.get_fillstyle()\n if not self._half_fill():\n self._path = self._x_filled_path\n else:\n # Rotate top half path to support all partitions\n if fs == 'top':\n rotate, rotate_alt = 0, 180\n elif fs == 'bottom':\n rotate, rotate_alt = 180, 0\n elif fs == 'left':\n rotate, rotate_alt = 90, 270\n else:\n rotate, rotate_alt = 270, 90\n\n self._path = self._x_filled_path_t\n self._alt_path = self._x_filled_path_t\n self._alt_transform = Affine2D().translate(-0.5, -0.5)\n self._transform.rotate_deg(rotate)\n self._alt_transform.rotate_deg(rotate_alt)\n",
"from __future__ import division, print_function, absolute_import\n\nimport warnings\n\nfrom numpy.testing import assert_, assert_equal\nfrom scipy._lib._numpy_compat import suppress_warnings\nimport pytest\nfrom pytest import raises as assert_raises\n\nimport scipy.special as sc\nfrom scipy.special._ufuncs import _sf_error_test_function\n\n_sf_error_code_map = {\n # skip 'ok'\n 'singular': 1,\n 'underflow': 2,\n 'overflow': 3,\n 'slow': 4,\n 'loss': 5,\n 'no_result': 6,\n 'domain': 7,\n 'arg': 8,\n 'other': 9\n}\n\n_sf_error_actions = [\n 'ignore',\n 'warn',\n 'raise'\n]\n\n\ndef _check_action(fun, args, action):\n if action == 'warn':\n with pytest.warns(sc.SpecialFunctionWarning):\n fun(*args)\n elif action == 'raise':\n with assert_raises(sc.SpecialFunctionError):\n fun(*args)\n else:\n # action == 'ignore', make sure there are no warnings/exceptions\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\")\n fun(*args)\n\n\ndef test_geterr():\n err = sc.geterr()\n for key, value in err.items():\n assert_(key in _sf_error_code_map.keys())\n assert_(value in _sf_error_actions)\n\n\ndef test_seterr():\n entry_err = sc.geterr()\n try:\n for category in _sf_error_code_map.keys():\n for action in _sf_error_actions:\n geterr_olderr = sc.geterr()\n seterr_olderr = sc.seterr(**{category: action})\n assert_(geterr_olderr == seterr_olderr)\n newerr = sc.geterr()\n assert_(newerr[category] == action)\n geterr_olderr.pop(category)\n newerr.pop(category)\n assert_(geterr_olderr == newerr)\n _check_action(_sf_error_test_function,\n (_sf_error_code_map[category],),\n action)\n finally:\n sc.seterr(**entry_err)\n\n\ndef test_errstate_pyx_basic():\n olderr = sc.geterr()\n with sc.errstate(singular='raise'):\n with assert_raises(sc.SpecialFunctionError):\n sc.loggamma(0)\n assert_equal(olderr, sc.geterr())\n\n\ndef test_errstate_c_basic():\n olderr = sc.geterr()\n with sc.errstate(domain='raise'):\n with assert_raises(sc.SpecialFunctionError):\n sc.spence(-1)\n assert_equal(olderr, sc.geterr())\n\n\ndef test_errstate_cpp_basic():\n olderr = sc.geterr()\n with sc.errstate(underflow='raise'):\n with assert_raises(sc.SpecialFunctionError):\n sc.wrightomega(-1000)\n assert_equal(olderr, sc.geterr())\n\n\ndef test_errstate():\n for category in _sf_error_code_map.keys():\n for action in _sf_error_actions:\n olderr = sc.geterr()\n with sc.errstate(**{category: action}):\n _check_action(_sf_error_test_function,\n (_sf_error_code_map[category],),\n action)\n assert_equal(olderr, sc.geterr())\n\n\ndef test_errstate_all_but_one():\n olderr = sc.geterr()\n with sc.errstate(all='raise', singular='ignore'):\n sc.gammaln(0)\n with assert_raises(sc.SpecialFunctionError):\n sc.spence(-1.0)\n assert_equal(olderr, sc.geterr())\n\n\ndef test_errprint():\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \"`errprint` is deprecated!\")\n flag = sc.errprint(True)\n\n try:\n assert_(isinstance(flag, bool))\n with pytest.warns(sc.SpecialFunctionWarning):\n sc.loggamma(0)\n finally:\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \"`errprint` is deprecated!\")\n sc.errprint(flag)\n",
"#!/usr/bin/env python\n\n# flake8: noqa\n\nimport warnings\nimport operator\nfrom itertools import product\nfrom distutils.version import LooseVersion\n\nimport nose\nfrom nose.tools import assert_raises\n\nfrom numpy.random import randn, rand, randint\nimport numpy as np\n\nfrom pandas.types.common import is_list_like, is_scalar\nimport pandas as pd\nfrom pandas.core import common as com\nfrom pandas import DataFrame, Series, Panel, date_range\nfrom pandas.util.testing import makeCustomDataframe as mkdf\n\nfrom pandas.computation import pytables\nfrom pandas.computation.engines import _engines, NumExprClobberingError\nfrom pandas.computation.expr import PythonExprVisitor, PandasExprVisitor\nfrom pandas.computation.ops import (_binary_ops_dict,\n _special_case_arith_ops_syms,\n _arith_ops_syms, _bool_ops_syms,\n _unary_math_ops, _binary_math_ops)\n\nimport pandas.computation.expr as expr\nimport pandas.util.testing as tm\nimport pandas.lib as lib\nfrom pandas.util.testing import (assert_frame_equal, randbool,\n assertRaisesRegexp, assert_numpy_array_equal,\n assert_produces_warning, assert_series_equal,\n slow)\nfrom pandas.compat import PY3, u, reduce\n\n_series_frame_incompatible = _bool_ops_syms\n_scalar_skip = 'in', 'not in'\n\n\ndef engine_has_neg_frac(engine):\n return _engines[engine].has_neg_frac\n\n\ndef _eval_single_bin(lhs, cmp1, rhs, engine):\n c = _binary_ops_dict[cmp1]\n if engine_has_neg_frac(engine):\n try:\n return c(lhs, rhs)\n except ValueError as e:\n if str(e).startswith('negative number cannot be raised to a fractional power'):\n return np.nan\n raise\n return c(lhs, rhs)\n\n\ndef _series_and_2d_ndarray(lhs, rhs):\n return ((isinstance(lhs, Series) and\n isinstance(rhs, np.ndarray) and rhs.ndim > 1)\n or (isinstance(rhs, Series) and\n isinstance(lhs, np.ndarray) and lhs.ndim > 1))\n\n\ndef _series_and_frame(lhs, rhs):\n return ((isinstance(lhs, Series) and isinstance(rhs, DataFrame))\n or (isinstance(rhs, Series) and isinstance(lhs, DataFrame)))\n\n\ndef _bool_and_frame(lhs, rhs):\n return isinstance(lhs, bool) and isinstance(rhs, pd.core.generic.NDFrame)\n\n\ndef _is_py3_complex_incompat(result, expected):\n return (PY3 and isinstance(expected, (complex, np.complexfloating)) and\n np.isnan(result))\n\n\n_good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms)\n\n\nclass TestEvalNumexprPandas(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestEvalNumexprPandas, cls).setUpClass()\n tm.skip_if_no_ne()\n import numexpr as ne\n cls.ne = ne\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n\n @classmethod\n def tearDownClass(cls):\n super(TestEvalNumexprPandas, cls).tearDownClass()\n del cls.engine, cls.parser\n if hasattr(cls, 'ne'):\n del cls.ne\n\n def setup_data(self):\n nan_df1 = DataFrame(rand(10, 5))\n nan_df1[nan_df1 > 0.5] = np.nan\n nan_df2 = DataFrame(rand(10, 5))\n nan_df2[nan_df2 > 0.5] = np.nan\n\n self.pandas_lhses = (DataFrame(randn(10, 5)), Series(randn(5)),\n Series([1, 2, np.nan, np.nan, 5]), nan_df1)\n self.pandas_rhses = (DataFrame(randn(10, 5)), Series(randn(5)),\n Series([1, 2, np.nan, np.nan, 5]), nan_df2)\n self.scalar_lhses = randn(),\n self.scalar_rhses = randn(),\n\n self.lhses = self.pandas_lhses + self.scalar_lhses\n self.rhses = self.pandas_rhses + self.scalar_rhses\n\n def setup_ops(self):\n self.cmp_ops = expr._cmp_ops_syms\n self.cmp2_ops = self.cmp_ops[::-1]\n self.bin_ops = expr._bool_ops_syms\n self.special_case_ops = _special_case_arith_ops_syms\n self.arith_ops = _good_arith_ops\n self.unary_ops = '-', '~', 'not '\n\n def setUp(self):\n self.setup_ops()\n self.setup_data()\n self.current_engines = filter(lambda x: x != self.engine, _engines)\n\n def tearDown(self):\n del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses\n del self.pandas_rhses, self.pandas_lhses, self.current_engines\n\n @slow\n def test_complex_cmp_ops(self):\n cmp_ops = ('!=', '==', '<=', '>=', '<', '>')\n cmp2_ops = ('>', '<')\n for lhs, cmp1, rhs, binop, cmp2 in product(self.lhses, cmp_ops,\n self.rhses, self.bin_ops,\n cmp2_ops):\n self.check_complex_cmp_op(lhs, cmp1, rhs, binop, cmp2)\n\n def test_simple_cmp_ops(self):\n bool_lhses = (DataFrame(randbool(size=(10, 5))),\n Series(randbool((5,))), randbool())\n bool_rhses = (DataFrame(randbool(size=(10, 5))),\n Series(randbool((5,))), randbool())\n for lhs, rhs, cmp_op in product(bool_lhses, bool_rhses, self.cmp_ops):\n self.check_simple_cmp_op(lhs, cmp_op, rhs)\n\n @slow\n def test_binary_arith_ops(self):\n for lhs, op, rhs in product(self.lhses, self.arith_ops, self.rhses):\n self.check_binary_arith_op(lhs, op, rhs)\n\n def test_modulus(self):\n for lhs, rhs in product(self.lhses, self.rhses):\n self.check_modulus(lhs, '%', rhs)\n\n def test_floor_division(self):\n for lhs, rhs in product(self.lhses, self.rhses):\n self.check_floor_division(lhs, '//', rhs)\n\n def test_pow(self):\n tm._skip_if_windows()\n\n # odd failure on win32 platform, so skip\n for lhs, rhs in product(self.lhses, self.rhses):\n self.check_pow(lhs, '**', rhs)\n\n @slow\n def test_single_invert_op(self):\n for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):\n self.check_single_invert_op(lhs, op, rhs)\n\n @slow\n def test_compound_invert_op(self):\n for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):\n self.check_compound_invert_op(lhs, op, rhs)\n\n @slow\n def test_chained_cmp_op(self):\n mids = self.lhses\n cmp_ops = '<', '>'\n for lhs, cmp1, mid, cmp2, rhs in product(self.lhses, cmp_ops,\n mids, cmp_ops, self.rhses):\n self.check_chained_cmp_op(lhs, cmp1, mid, cmp2, rhs)\n\n def check_equal(self, result, expected):\n if isinstance(result, DataFrame):\n tm.assert_frame_equal(result, expected)\n elif isinstance(result, Series):\n tm.assert_series_equal(result, expected)\n elif isinstance(result, np.ndarray):\n tm.assert_numpy_array_equal(result, expected)\n else:\n self.assertEqual(result, expected)\n\n def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):\n skip_these = _scalar_skip\n ex = '(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)'.format(cmp1=cmp1,\n binop=binop,\n cmp2=cmp2)\n scalar_with_in_notin = (is_scalar(rhs) and (cmp1 in skip_these or\n cmp2 in skip_these))\n if scalar_with_in_notin:\n with tm.assertRaises(TypeError):\n pd.eval(ex, engine=self.engine, parser=self.parser)\n self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,\n parser=self.parser, local_dict={'lhs': lhs,\n 'rhs': rhs})\n else:\n lhs_new = _eval_single_bin(lhs, cmp1, rhs, self.engine)\n rhs_new = _eval_single_bin(lhs, cmp2, rhs, self.engine)\n if (isinstance(lhs_new, Series) and isinstance(rhs_new, DataFrame)\n and binop in _series_frame_incompatible):\n pass\n # TODO: the code below should be added back when left and right\n # hand side bool ops are fixed.\n\n # try:\n # self.assertRaises(Exception, pd.eval, ex,\n #local_dict={'lhs': lhs, 'rhs': rhs},\n # engine=self.engine, parser=self.parser)\n # except AssertionError:\n #import ipdb; ipdb.set_trace()\n # raise\n else:\n expected = _eval_single_bin(\n lhs_new, binop, rhs_new, self.engine)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n self.check_equal(result, expected)\n\n def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):\n skip_these = _scalar_skip\n\n def check_operands(left, right, cmp_op):\n return _eval_single_bin(left, cmp_op, right, self.engine)\n\n lhs_new = check_operands(lhs, mid, cmp1)\n rhs_new = check_operands(mid, rhs, cmp2)\n\n if lhs_new is not None and rhs_new is not None:\n ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2)\n ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp1, cmp2)\n ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp1, cmp2)\n expected = _eval_single_bin(lhs_new, '&', rhs_new, self.engine)\n\n for ex in (ex1, ex2, ex3):\n result = pd.eval(ex, engine=self.engine,\n parser=self.parser)\n\n tm.assert_almost_equal(result, expected)\n\n def check_simple_cmp_op(self, lhs, cmp1, rhs):\n ex = 'lhs {0} rhs'.format(cmp1)\n if cmp1 in ('in', 'not in') and not is_list_like(rhs):\n self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,\n parser=self.parser, local_dict={'lhs': lhs,\n 'rhs': rhs})\n else:\n expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n self.check_equal(result, expected)\n\n def check_binary_arith_op(self, lhs, arith1, rhs):\n ex = 'lhs {0} rhs'.format(arith1)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n expected = _eval_single_bin(lhs, arith1, rhs, self.engine)\n\n tm.assert_almost_equal(result, expected)\n ex = 'lhs {0} rhs {0} rhs'.format(arith1)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n nlhs = _eval_single_bin(lhs, arith1, rhs,\n self.engine)\n self.check_alignment(result, nlhs, rhs, arith1)\n\n def check_alignment(self, result, nlhs, ghs, op):\n try:\n nlhs, ghs = nlhs.align(ghs)\n except (ValueError, TypeError, AttributeError):\n # ValueError: series frame or frame series align\n # TypeError, AttributeError: series or frame with scalar align\n pass\n else:\n\n # direct numpy comparison\n expected = self.ne.evaluate('nlhs {0} ghs'.format(op))\n tm.assert_numpy_array_equal(result.values, expected)\n\n # modulus, pow, and floor division require special casing\n\n def check_modulus(self, lhs, arith1, rhs):\n ex = 'lhs {0} rhs'.format(arith1)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n expected = lhs % rhs\n\n tm.assert_almost_equal(result, expected)\n expected = self.ne.evaluate('expected {0} rhs'.format(arith1))\n if isinstance(result, (DataFrame, Series)):\n tm.assert_almost_equal(result.values, expected)\n else:\n tm.assert_almost_equal(result, expected.item())\n\n def check_floor_division(self, lhs, arith1, rhs):\n ex = 'lhs {0} rhs'.format(arith1)\n\n if self.engine == 'python':\n res = pd.eval(ex, engine=self.engine, parser=self.parser)\n expected = lhs // rhs\n self.check_equal(res, expected)\n else:\n self.assertRaises(TypeError, pd.eval, ex, local_dict={'lhs': lhs,\n 'rhs': rhs},\n engine=self.engine, parser=self.parser)\n\n def get_expected_pow_result(self, lhs, rhs):\n try:\n expected = _eval_single_bin(lhs, '**', rhs, self.engine)\n except ValueError as e:\n if str(e).startswith('negative number cannot be raised to a fractional power'):\n if self.engine == 'python':\n raise nose.SkipTest(str(e))\n else:\n expected = np.nan\n else:\n raise\n return expected\n\n def check_pow(self, lhs, arith1, rhs):\n ex = 'lhs {0} rhs'.format(arith1)\n expected = self.get_expected_pow_result(lhs, rhs)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n\n if (is_scalar(lhs) and is_scalar(rhs) and\n _is_py3_complex_incompat(result, expected)):\n self.assertRaises(AssertionError, tm.assert_numpy_array_equal,\n result, expected)\n else:\n tm.assert_almost_equal(result, expected)\n\n ex = '(lhs {0} rhs) {0} rhs'.format(arith1)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n expected = self.get_expected_pow_result(\n self.get_expected_pow_result(lhs, rhs), rhs)\n tm.assert_almost_equal(result, expected)\n\n def check_single_invert_op(self, lhs, cmp1, rhs):\n # simple\n for el in (lhs, rhs):\n try:\n elb = el.astype(bool)\n except AttributeError:\n elb = np.array([bool(el)])\n expected = ~elb\n result = pd.eval('~elb', engine=self.engine, parser=self.parser)\n tm.assert_almost_equal(expected, result)\n\n for engine in self.current_engines:\n tm.skip_if_no_ne(engine)\n tm.assert_almost_equal(result, pd.eval('~elb', engine=engine,\n parser=self.parser))\n\n def check_compound_invert_op(self, lhs, cmp1, rhs):\n skip_these = 'in', 'not in'\n ex = '~(lhs {0} rhs)'.format(cmp1)\n\n if is_scalar(rhs) and cmp1 in skip_these:\n self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,\n parser=self.parser, local_dict={'lhs': lhs,\n 'rhs': rhs})\n else:\n # compound\n if is_scalar(lhs) and is_scalar(rhs):\n lhs, rhs = map(lambda x: np.array([x]), (lhs, rhs))\n expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)\n if is_scalar(expected):\n expected = not expected\n else:\n expected = ~expected\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n tm.assert_almost_equal(expected, result)\n\n # make sure the other engines work the same as this one\n for engine in self.current_engines:\n tm.skip_if_no_ne(engine)\n ev = pd.eval(ex, engine=self.engine, parser=self.parser)\n tm.assert_almost_equal(ev, result)\n\n def ex(self, op, var_name='lhs'):\n return '{0}{1}'.format(op, var_name)\n\n def test_frame_invert(self):\n expr = self.ex('~')\n\n # ~ ##\n # frame\n # float always raises\n lhs = DataFrame(randn(5, 2))\n if self.engine == 'numexpr':\n with tm.assertRaises(NotImplementedError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n\n # int raises on numexpr\n lhs = DataFrame(randint(5, size=(5, 2)))\n if self.engine == 'numexpr':\n with tm.assertRaises(NotImplementedError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = ~lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_frame_equal(expect, result)\n\n # bool always works\n lhs = DataFrame(rand(5, 2) > 0.5)\n expect = ~lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_frame_equal(expect, result)\n\n # object raises\n lhs = DataFrame({'b': ['a', 1, 2.0], 'c': rand(3) > 0.5})\n if self.engine == 'numexpr':\n with tm.assertRaises(ValueError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n\n def test_series_invert(self):\n # ~ ####\n expr = self.ex('~')\n\n # series\n # float raises\n lhs = Series(randn(5))\n if self.engine == 'numexpr':\n with tm.assertRaises(NotImplementedError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n\n # int raises on numexpr\n lhs = Series(randint(5, size=5))\n if self.engine == 'numexpr':\n with tm.assertRaises(NotImplementedError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = ~lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_series_equal(expect, result)\n\n # bool\n lhs = Series(rand(5) > 0.5)\n expect = ~lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_series_equal(expect, result)\n\n # float\n # int\n # bool\n\n # object\n lhs = Series(['a', 1, 2.0])\n if self.engine == 'numexpr':\n with tm.assertRaises(ValueError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n\n def test_frame_negate(self):\n expr = self.ex('-')\n\n # float\n lhs = DataFrame(randn(5, 2))\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_frame_equal(expect, result)\n\n # int\n lhs = DataFrame(randint(5, size=(5, 2)))\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_frame_equal(expect, result)\n\n # bool doesn't work with numexpr but works elsewhere\n lhs = DataFrame(rand(5, 2) > 0.5)\n if self.engine == 'numexpr':\n with tm.assertRaises(NotImplementedError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_frame_equal(expect, result)\n\n def test_series_negate(self):\n expr = self.ex('-')\n\n # float\n lhs = Series(randn(5))\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_series_equal(expect, result)\n\n # int\n lhs = Series(randint(5, size=5))\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_series_equal(expect, result)\n\n # bool doesn't work with numexpr but works elsewhere\n lhs = Series(rand(5) > 0.5)\n if self.engine == 'numexpr':\n with tm.assertRaises(NotImplementedError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_series_equal(expect, result)\n\n def test_frame_pos(self):\n expr = self.ex('+')\n\n # float\n lhs = DataFrame(randn(5, 2))\n if self.engine == 'python':\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_frame_equal(expect, result)\n\n # int\n lhs = DataFrame(randint(5, size=(5, 2)))\n if self.engine == 'python':\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_frame_equal(expect, result)\n\n # bool doesn't work with numexpr but works elsewhere\n lhs = DataFrame(rand(5, 2) > 0.5)\n if self.engine == 'python':\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_frame_equal(expect, result)\n\n def test_series_pos(self):\n expr = self.ex('+')\n\n # float\n lhs = Series(randn(5))\n if self.engine == 'python':\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_series_equal(expect, result)\n\n # int\n lhs = Series(randint(5, size=5))\n if self.engine == 'python':\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_series_equal(expect, result)\n\n # bool doesn't work with numexpr but works elsewhere\n lhs = Series(rand(5) > 0.5)\n if self.engine == 'python':\n with tm.assertRaises(TypeError):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert_series_equal(expect, result)\n\n def test_scalar_unary(self):\n with tm.assertRaises(TypeError):\n pd.eval('~1.0', engine=self.engine, parser=self.parser)\n\n self.assertEqual(\n pd.eval('-1.0', parser=self.parser, engine=self.engine), -1.0)\n self.assertEqual(\n pd.eval('+1.0', parser=self.parser, engine=self.engine), +1.0)\n\n self.assertEqual(\n pd.eval('~1', parser=self.parser, engine=self.engine), ~1)\n self.assertEqual(\n pd.eval('-1', parser=self.parser, engine=self.engine), -1)\n self.assertEqual(\n pd.eval('+1', parser=self.parser, engine=self.engine), +1)\n\n self.assertEqual(\n pd.eval('~True', parser=self.parser, engine=self.engine), ~True)\n self.assertEqual(\n pd.eval('~False', parser=self.parser, engine=self.engine), ~False)\n self.assertEqual(\n pd.eval('-True', parser=self.parser, engine=self.engine), -True)\n self.assertEqual(\n pd.eval('-False', parser=self.parser, engine=self.engine), -False)\n self.assertEqual(\n pd.eval('+True', parser=self.parser, engine=self.engine), +True)\n self.assertEqual(\n pd.eval('+False', parser=self.parser, engine=self.engine), +False)\n\n def test_unary_in_array(self):\n # GH 11235\n assert_numpy_array_equal(\n pd.eval('[-True, True, ~True, +True,'\n '-False, False, ~False, +False,'\n '-37, 37, ~37, +37]'),\n np.array([-True, True, ~True, +True,\n -False, False, ~False, +False,\n -37, 37, ~37, +37], dtype=np.object_))\n\n def test_disallow_scalar_bool_ops(self):\n exprs = '1 or 2', '1 and 2'\n exprs += 'a and b', 'a or b'\n exprs += '1 or 2 and (3 + 2) > 3',\n exprs += '2 * x > 2 or 1 and 2',\n exprs += '2 * df > 3 and 1 or a',\n\n x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2))\n for ex in exprs:\n with tm.assertRaises(NotImplementedError):\n pd.eval(ex, engine=self.engine, parser=self.parser)\n\n def test_identical(self):\n # GH 10546\n x = 1\n result = pd.eval('x', engine=self.engine, parser=self.parser)\n self.assertEqual(result, 1)\n self.assertTrue(is_scalar(result))\n\n x = 1.5\n result = pd.eval('x', engine=self.engine, parser=self.parser)\n self.assertEqual(result, 1.5)\n self.assertTrue(is_scalar(result))\n\n x = False\n result = pd.eval('x', engine=self.engine, parser=self.parser)\n self.assertEqual(result, False)\n self.assertTrue(is_scalar(result))\n\n x = np.array([1])\n result = pd.eval('x', engine=self.engine, parser=self.parser)\n tm.assert_numpy_array_equal(result, np.array([1]))\n self.assertEqual(result.shape, (1, ))\n\n x = np.array([1.5])\n result = pd.eval('x', engine=self.engine, parser=self.parser)\n tm.assert_numpy_array_equal(result, np.array([1.5]))\n self.assertEqual(result.shape, (1, ))\n\n x = np.array([False])\n result = pd.eval('x', engine=self.engine, parser=self.parser)\n tm.assert_numpy_array_equal(result, np.array([False]))\n self.assertEqual(result.shape, (1, ))\n\n def test_line_continuation(self):\n # GH 11149\n exp = \"\"\"1 + 2 * \\\n 5 - 1 + 2 \"\"\"\n result = pd.eval(exp, engine=self.engine, parser=self.parser)\n self.assertEqual(result, 12)\n\n def test_float_truncation(self):\n # GH 14241\n exp = '1000000000.006'\n result = pd.eval(exp, engine=self.engine, parser=self.parser)\n expected = np.float64(exp)\n self.assertEqual(result, expected)\n\n df = pd.DataFrame({'A': [1000000000.0009,\n 1000000000.0011,\n 1000000000.0015]})\n cutoff = 1000000000.0006\n result = df.query(\"A < %.4f\" % cutoff)\n self.assertTrue(result.empty)\n\n cutoff = 1000000000.0010\n result = df.query(\"A > %.4f\" % cutoff)\n expected = df.loc[[1, 2], :]\n tm.assert_frame_equal(expected, result)\n\n exact = 1000000000.0011\n result = df.query('A == %.4f' % exact)\n expected = df.loc[[1], :]\n tm.assert_frame_equal(expected, result)\n\n\n\nclass TestEvalNumexprPython(TestEvalNumexprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestEvalNumexprPython, cls).setUpClass()\n tm.skip_if_no_ne()\n import numexpr as ne\n cls.ne = ne\n cls.engine = 'numexpr'\n cls.parser = 'python'\n\n def setup_ops(self):\n self.cmp_ops = list(filter(lambda x: x not in ('in', 'not in'),\n expr._cmp_ops_syms))\n self.cmp2_ops = self.cmp_ops[::-1]\n self.bin_ops = [s for s in expr._bool_ops_syms\n if s not in ('and', 'or')]\n self.special_case_ops = _special_case_arith_ops_syms\n self.arith_ops = _good_arith_ops\n self.unary_ops = '+', '-', '~'\n\n def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):\n ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2)\n with tm.assertRaises(NotImplementedError):\n pd.eval(ex1, engine=self.engine, parser=self.parser)\n\n\nclass TestEvalPythonPython(TestEvalNumexprPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestEvalPythonPython, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'python'\n\n def check_modulus(self, lhs, arith1, rhs):\n ex = 'lhs {0} rhs'.format(arith1)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n\n expected = lhs % rhs\n tm.assert_almost_equal(result, expected)\n\n expected = _eval_single_bin(expected, arith1, rhs, self.engine)\n tm.assert_almost_equal(result, expected)\n\n def check_alignment(self, result, nlhs, ghs, op):\n try:\n nlhs, ghs = nlhs.align(ghs)\n except (ValueError, TypeError, AttributeError):\n # ValueError: series frame or frame series align\n # TypeError, AttributeError: series or frame with scalar align\n pass\n else:\n expected = eval('nlhs {0} ghs'.format(op))\n tm.assert_almost_equal(result, expected)\n\n\nclass TestEvalPythonPandas(TestEvalPythonPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestEvalPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n\n def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):\n TestEvalNumexprPandas.check_chained_cmp_op(self, lhs, cmp1, mid, cmp2,\n rhs)\n\n\nf = lambda *args, **kwargs: np.random.randn()\n\n\nENGINES_PARSERS = list(product(_engines, expr._parsers))\n\n#-------------------------------------\n# typecasting rules consistency with python\n# issue #12388\n\nclass TestTypeCasting(object):\n\n def check_binop_typecasting(self, engine, parser, op, dt):\n tm.skip_if_no_ne(engine)\n df = mkdf(5, 3, data_gen_f=f, dtype=dt)\n s = 'df {} 3'.format(op)\n res = pd.eval(s, engine=engine, parser=parser)\n assert df.values.dtype == dt\n assert res.values.dtype == dt\n assert_frame_equal(res, eval(s))\n\n s = '3 {} df'.format(op)\n res = pd.eval(s, engine=engine, parser=parser)\n assert df.values.dtype == dt\n assert res.values.dtype == dt\n assert_frame_equal(res, eval(s))\n\n def test_binop_typecasting(self):\n for engine, parser in ENGINES_PARSERS:\n for op in ['+', '-', '*', '**', '/']:\n # maybe someday... numexpr has too many upcasting rules now\n #for dt in chain(*(np.sctypes[x] for x in ['uint', 'int', 'float'])):\n for dt in [np.float32, np.float64]:\n yield self.check_binop_typecasting, engine, parser, op, dt\n\n\n#-------------------------------------\n# basic and complex alignment\n\ndef _is_datetime(x):\n return issubclass(x.dtype.type, np.datetime64)\n\n\ndef should_warn(*args):\n not_mono = not any(map(operator.attrgetter('is_monotonic'), args))\n only_one_dt = reduce(operator.xor, map(_is_datetime, args))\n return not_mono and only_one_dt\n\n\nclass TestAlignment(object):\n\n index_types = 'i', 'u', 'dt'\n lhs_index_types = index_types + ('s',) # 'p'\n\n def check_align_nested_unary_op(self, engine, parser):\n tm.skip_if_no_ne(engine)\n s = 'df * ~2'\n df = mkdf(5, 3, data_gen_f=f)\n res = pd.eval(s, engine=engine, parser=parser)\n assert_frame_equal(res, df * ~2)\n\n def test_align_nested_unary_op(self):\n for engine, parser in ENGINES_PARSERS:\n yield self.check_align_nested_unary_op, engine, parser\n\n def check_basic_frame_alignment(self, engine, parser):\n tm.skip_if_no_ne(engine)\n args = product(self.lhs_index_types, self.index_types,\n self.index_types)\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('always', RuntimeWarning)\n for lr_idx_type, rr_idx_type, c_idx_type in args:\n df = mkdf(10, 10, data_gen_f=f, r_idx_type=lr_idx_type,\n c_idx_type=c_idx_type)\n df2 = mkdf(20, 10, data_gen_f=f, r_idx_type=rr_idx_type,\n c_idx_type=c_idx_type)\n # only warns if not monotonic and not sortable\n if should_warn(df.index, df2.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval('df + df2', engine=engine, parser=parser)\n else:\n res = pd.eval('df + df2', engine=engine, parser=parser)\n assert_frame_equal(res, df + df2)\n\n def test_basic_frame_alignment(self):\n for engine, parser in ENGINES_PARSERS:\n yield self.check_basic_frame_alignment, engine, parser\n\n def check_frame_comparison(self, engine, parser):\n tm.skip_if_no_ne(engine)\n args = product(self.lhs_index_types, repeat=2)\n for r_idx_type, c_idx_type in args:\n df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,\n c_idx_type=c_idx_type)\n res = pd.eval('df < 2', engine=engine, parser=parser)\n assert_frame_equal(res, df < 2)\n\n df3 = DataFrame(randn(*df.shape), index=df.index,\n columns=df.columns)\n res = pd.eval('df < df3', engine=engine, parser=parser)\n assert_frame_equal(res, df < df3)\n\n def test_frame_comparison(self):\n for engine, parser in ENGINES_PARSERS:\n yield self.check_frame_comparison, engine, parser\n\n def check_medium_complex_frame_alignment(self, engine, parser):\n tm.skip_if_no_ne(engine)\n args = product(self.lhs_index_types, self.index_types,\n self.index_types, self.index_types)\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('always', RuntimeWarning)\n\n for r1, c1, r2, c2 in args:\n df = mkdf(3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)\n df2 = mkdf(4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)\n df3 = mkdf(5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)\n if should_warn(df.index, df2.index, df3.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval('df + df2 + df3', engine=engine,\n parser=parser)\n else:\n res = pd.eval('df + df2 + df3',\n engine=engine, parser=parser)\n assert_frame_equal(res, df + df2 + df3)\n\n @slow\n def test_medium_complex_frame_alignment(self):\n for engine, parser in ENGINES_PARSERS:\n yield self.check_medium_complex_frame_alignment, engine, parser\n\n def check_basic_frame_series_alignment(self, engine, parser):\n tm.skip_if_no_ne(engine)\n\n def testit(r_idx_type, c_idx_type, index_name):\n df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,\n c_idx_type=c_idx_type)\n index = getattr(df, index_name)\n s = Series(np.random.randn(5), index[:5])\n\n if should_warn(df.index, s.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval('df + s', engine=engine, parser=parser)\n else:\n res = pd.eval('df + s', engine=engine, parser=parser)\n\n if r_idx_type == 'dt' or c_idx_type == 'dt':\n expected = df.add(s) if engine == 'numexpr' else df + s\n else:\n expected = df + s\n assert_frame_equal(res, expected)\n\n args = product(self.lhs_index_types, self.index_types,\n ('index', 'columns'))\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('always', RuntimeWarning)\n for r_idx_type, c_idx_type, index_name in args:\n testit(r_idx_type, c_idx_type, index_name)\n\n def test_basic_frame_series_alignment(self):\n for engine, parser in ENGINES_PARSERS:\n yield self.check_basic_frame_series_alignment, engine, parser\n\n def check_basic_series_frame_alignment(self, engine, parser):\n tm.skip_if_no_ne(engine)\n\n def testit(r_idx_type, c_idx_type, index_name):\n df = mkdf(10, 7, data_gen_f=f, r_idx_type=r_idx_type,\n c_idx_type=c_idx_type)\n index = getattr(df, index_name)\n s = Series(np.random.randn(5), index[:5])\n if should_warn(s.index, df.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval('s + df', engine=engine, parser=parser)\n else:\n res = pd.eval('s + df', engine=engine, parser=parser)\n\n if r_idx_type == 'dt' or c_idx_type == 'dt':\n expected = df.add(s) if engine == 'numexpr' else s + df\n else:\n expected = s + df\n assert_frame_equal(res, expected)\n\n # only test dt with dt, otherwise weird joins result\n args = product(['i', 'u', 's'], ['i', 'u', 's'], ('index', 'columns'))\n with warnings.catch_warnings(record=True):\n for r_idx_type, c_idx_type, index_name in args:\n testit(r_idx_type, c_idx_type, index_name)\n\n # dt with dt\n args = product(['dt'], ['dt'], ('index', 'columns'))\n with warnings.catch_warnings(record=True):\n for r_idx_type, c_idx_type, index_name in args:\n testit(r_idx_type, c_idx_type, index_name)\n\n def test_basic_series_frame_alignment(self):\n for engine, parser in ENGINES_PARSERS:\n yield self.check_basic_series_frame_alignment, engine, parser\n\n def check_series_frame_commutativity(self, engine, parser):\n tm.skip_if_no_ne(engine)\n args = product(self.lhs_index_types, self.index_types, ('+', '*'),\n ('index', 'columns'))\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('always', RuntimeWarning)\n for r_idx_type, c_idx_type, op, index_name in args:\n df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,\n c_idx_type=c_idx_type)\n index = getattr(df, index_name)\n s = Series(np.random.randn(5), index[:5])\n\n lhs = 's {0} df'.format(op)\n rhs = 'df {0} s'.format(op)\n if should_warn(df.index, s.index):\n with tm.assert_produces_warning(RuntimeWarning):\n a = pd.eval(lhs, engine=engine, parser=parser)\n with tm.assert_produces_warning(RuntimeWarning):\n b = pd.eval(rhs, engine=engine, parser=parser)\n else:\n a = pd.eval(lhs, engine=engine, parser=parser)\n b = pd.eval(rhs, engine=engine, parser=parser)\n\n if r_idx_type != 'dt' and c_idx_type != 'dt':\n if engine == 'numexpr':\n assert_frame_equal(a, b)\n\n def test_series_frame_commutativity(self):\n for engine, parser in ENGINES_PARSERS:\n yield self.check_series_frame_commutativity, engine, parser\n\n def check_complex_series_frame_alignment(self, engine, parser):\n tm.skip_if_no_ne(engine)\n\n import random\n args = product(self.lhs_index_types, self.index_types,\n self.index_types, self.index_types)\n n = 3\n m1 = 5\n m2 = 2 * m1\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('always', RuntimeWarning)\n for r1, r2, c1, c2 in args:\n index_name = random.choice(['index', 'columns'])\n obj_name = random.choice(['df', 'df2'])\n\n df = mkdf(m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)\n df2 = mkdf(m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)\n index = getattr(locals().get(obj_name), index_name)\n s = Series(np.random.randn(n), index[:n])\n\n if r2 == 'dt' or c2 == 'dt':\n if engine == 'numexpr':\n expected2 = df2.add(s)\n else:\n expected2 = df2 + s\n else:\n expected2 = df2 + s\n\n if r1 == 'dt' or c1 == 'dt':\n if engine == 'numexpr':\n expected = expected2.add(df)\n else:\n expected = expected2 + df\n else:\n expected = expected2 + df\n\n if should_warn(df2.index, s.index, df.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval('df2 + s + df', engine=engine,\n parser=parser)\n else:\n res = pd.eval('df2 + s + df', engine=engine, parser=parser)\n tm.assert_equal(res.shape, expected.shape)\n assert_frame_equal(res, expected)\n\n @slow\n def test_complex_series_frame_alignment(self):\n for engine, parser in ENGINES_PARSERS:\n yield self.check_complex_series_frame_alignment, engine, parser\n\n def check_performance_warning_for_poor_alignment(self, engine, parser):\n tm.skip_if_no_ne(engine)\n df = DataFrame(randn(1000, 10))\n s = Series(randn(10000))\n if engine == 'numexpr':\n seen = pd.core.common.PerformanceWarning\n else:\n seen = False\n\n with assert_produces_warning(seen):\n pd.eval('df + s', engine=engine, parser=parser)\n\n s = Series(randn(1000))\n with assert_produces_warning(False):\n pd.eval('df + s', engine=engine, parser=parser)\n\n df = DataFrame(randn(10, 10000))\n s = Series(randn(10000))\n with assert_produces_warning(False):\n pd.eval('df + s', engine=engine, parser=parser)\n\n df = DataFrame(randn(10, 10))\n s = Series(randn(10000))\n\n is_python_engine = engine == 'python'\n\n if not is_python_engine:\n wrn = pd.core.common.PerformanceWarning\n else:\n wrn = False\n\n with assert_produces_warning(wrn) as w:\n pd.eval('df + s', engine=engine, parser=parser)\n\n if not is_python_engine:\n tm.assert_equal(len(w), 1)\n msg = str(w[0].message)\n expected = (\"Alignment difference on axis {0} is larger\"\n \" than an order of magnitude on term {1!r}, \"\n \"by more than {2:.4g}; performance may suffer\"\n \"\".format(1, 'df', np.log10(s.size - df.shape[1])))\n tm.assert_equal(msg, expected)\n\n def test_performance_warning_for_poor_alignment(self):\n for engine, parser in ENGINES_PARSERS:\n yield (self.check_performance_warning_for_poor_alignment, engine,\n parser)\n\n\n#------------------------------------\n# slightly more complex ops\n\nclass TestOperationsNumExprPandas(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestOperationsNumExprPandas, cls).setUpClass()\n tm.skip_if_no_ne()\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms\n\n @classmethod\n def tearDownClass(cls):\n super(TestOperationsNumExprPandas, cls).tearDownClass()\n del cls.engine, cls.parser\n\n def eval(self, *args, **kwargs):\n kwargs['engine'] = self.engine\n kwargs['parser'] = self.parser\n kwargs['level'] = kwargs.pop('level', 0) + 1\n return pd.eval(*args, **kwargs)\n\n def test_simple_arith_ops(self):\n ops = self.arith_ops\n\n for op in filter(lambda x: x != '//', ops):\n ex = '1 {0} 1'.format(op)\n ex2 = 'x {0} 1'.format(op)\n ex3 = '1 {0} (x + 1)'.format(op)\n\n if op in ('in', 'not in'):\n self.assertRaises(TypeError, pd.eval, ex,\n engine=self.engine, parser=self.parser)\n else:\n expec = _eval_single_bin(1, op, 1, self.engine)\n x = self.eval(ex, engine=self.engine, parser=self.parser)\n tm.assert_equal(x, expec)\n\n expec = _eval_single_bin(x, op, 1, self.engine)\n y = self.eval(ex2, local_dict={'x': x}, engine=self.engine,\n parser=self.parser)\n tm.assert_equal(y, expec)\n\n expec = _eval_single_bin(1, op, x + 1, self.engine)\n y = self.eval(ex3, local_dict={'x': x},\n engine=self.engine, parser=self.parser)\n tm.assert_equal(y, expec)\n\n def test_simple_bool_ops(self):\n for op, lhs, rhs in product(expr._bool_ops_syms, (True, False),\n (True, False)):\n ex = '{0} {1} {2}'.format(lhs, op, rhs)\n res = self.eval(ex)\n exp = eval(ex)\n self.assertEqual(res, exp)\n\n def test_bool_ops_with_constants(self):\n for op, lhs, rhs in product(expr._bool_ops_syms, ('True', 'False'),\n ('True', 'False')):\n ex = '{0} {1} {2}'.format(lhs, op, rhs)\n res = self.eval(ex)\n exp = eval(ex)\n self.assertEqual(res, exp)\n\n def test_panel_fails(self):\n x = Panel(randn(3, 4, 5))\n y = Series(randn(10))\n assert_raises(NotImplementedError, self.eval, 'x + y',\n local_dict={'x': x, 'y': y})\n\n def test_4d_ndarray_fails(self):\n x = randn(3, 4, 5, 6)\n y = Series(randn(10))\n assert_raises(NotImplementedError, self.eval, 'x + y',\n local_dict={'x': x, 'y': y})\n\n def test_constant(self):\n x = self.eval('1')\n tm.assert_equal(x, 1)\n\n def test_single_variable(self):\n df = DataFrame(randn(10, 2))\n df2 = self.eval('df', local_dict={'df': df})\n assert_frame_equal(df, df2)\n\n def test_truediv(self):\n s = np.array([1])\n ex = 's / 1'\n d = {'s': s}\n\n if PY3:\n res = self.eval(ex, truediv=False)\n tm.assert_numpy_array_equal(res, np.array([1.0]))\n\n res = self.eval(ex, truediv=True)\n tm.assert_numpy_array_equal(res, np.array([1.0]))\n\n res = self.eval('1 / 2', truediv=True)\n expec = 0.5\n self.assertEqual(res, expec)\n\n res = self.eval('1 / 2', truediv=False)\n expec = 0.5\n self.assertEqual(res, expec)\n\n res = self.eval('s / 2', truediv=False)\n expec = 0.5\n self.assertEqual(res, expec)\n\n res = self.eval('s / 2', truediv=True)\n expec = 0.5\n self.assertEqual(res, expec)\n else:\n res = self.eval(ex, truediv=False)\n tm.assert_numpy_array_equal(res, np.array([1]))\n\n res = self.eval(ex, truediv=True)\n tm.assert_numpy_array_equal(res, np.array([1.0]))\n\n res = self.eval('1 / 2', truediv=True)\n expec = 0.5\n self.assertEqual(res, expec)\n\n res = self.eval('1 / 2', truediv=False)\n expec = 0\n self.assertEqual(res, expec)\n\n res = self.eval('s / 2', truediv=False)\n expec = 0\n self.assertEqual(res, expec)\n\n res = self.eval('s / 2', truediv=True)\n expec = 0.5\n self.assertEqual(res, expec)\n\n def test_failing_subscript_with_name_error(self):\n df = DataFrame(np.random.randn(5, 3))\n with tm.assertRaises(NameError):\n self.eval('df[x > 2] > 2')\n\n def test_lhs_expression_subscript(self):\n df = DataFrame(np.random.randn(5, 3))\n result = self.eval('(df + 1)[df > 2]', local_dict={'df': df})\n expected = (df + 1)[df > 2]\n assert_frame_equal(result, expected)\n\n def test_attr_expression(self):\n df = DataFrame(np.random.randn(5, 3), columns=list('abc'))\n expr1 = 'df.a < df.b'\n expec1 = df.a < df.b\n expr2 = 'df.a + df.b + df.c'\n expec2 = df.a + df.b + df.c\n expr3 = 'df.a + df.b + df.c[df.b < 0]'\n expec3 = df.a + df.b + df.c[df.b < 0]\n exprs = expr1, expr2, expr3\n expecs = expec1, expec2, expec3\n for e, expec in zip(exprs, expecs):\n assert_series_equal(expec, self.eval(e, local_dict={'df': df}))\n\n def test_assignment_fails(self):\n df = DataFrame(np.random.randn(5, 3), columns=list('abc'))\n df2 = DataFrame(np.random.randn(5, 3))\n expr1 = 'df = df2'\n self.assertRaises(ValueError, self.eval, expr1,\n local_dict={'df': df, 'df2': df2})\n\n def test_assignment_column(self):\n tm.skip_if_no_ne('numexpr')\n df = DataFrame(np.random.randn(5, 2), columns=list('ab'))\n orig_df = df.copy()\n\n # multiple assignees\n self.assertRaises(SyntaxError, df.eval, 'd c = a + b')\n\n # invalid assignees\n self.assertRaises(SyntaxError, df.eval, 'd,c = a + b')\n self.assertRaises(\n SyntaxError, df.eval, 'Timestamp(\"20131001\") = a + b')\n\n # single assignment - existing variable\n expected = orig_df.copy()\n expected['a'] = expected['a'] + expected['b']\n df = orig_df.copy()\n df.eval('a = a + b', inplace=True)\n assert_frame_equal(df, expected)\n\n # single assignment - new variable\n expected = orig_df.copy()\n expected['c'] = expected['a'] + expected['b']\n df = orig_df.copy()\n df.eval('c = a + b', inplace=True)\n assert_frame_equal(df, expected)\n\n # with a local name overlap\n def f():\n df = orig_df.copy()\n a = 1 # noqa\n df.eval('a = 1 + b', inplace=True)\n return df\n\n df = f()\n expected = orig_df.copy()\n expected['a'] = 1 + expected['b']\n assert_frame_equal(df, expected)\n\n df = orig_df.copy()\n\n def f():\n a = 1 # noqa\n old_a = df.a.copy()\n df.eval('a = a + b', inplace=True)\n result = old_a + df.b\n assert_series_equal(result, df.a, check_names=False)\n self.assertTrue(result.name is None)\n\n f()\n\n # multiple assignment\n df = orig_df.copy()\n df.eval('c = a + b', inplace=True)\n self.assertRaises(SyntaxError, df.eval, 'c = a = b')\n\n # explicit targets\n df = orig_df.copy()\n self.eval('c = df.a + df.b', local_dict={'df': df},\n target=df, inplace=True)\n expected = orig_df.copy()\n expected['c'] = expected['a'] + expected['b']\n assert_frame_equal(df, expected)\n\n def test_column_in(self):\n # GH 11235\n df = DataFrame({'a': [11], 'b': [-32]})\n result = df.eval('a in [11, -32]')\n expected = Series([True])\n assert_series_equal(result, expected)\n\n def assignment_not_inplace(self):\n # GH 9297\n tm.skip_if_no_ne('numexpr')\n df = DataFrame(np.random.randn(5, 2), columns=list('ab'))\n\n actual = df.eval('c = a + b', inplace=False)\n self.assertIsNotNone(actual)\n expected = df.copy()\n expected['c'] = expected['a'] + expected['b']\n assert_frame_equal(df, expected)\n\n # default for inplace will change\n with tm.assert_produces_warnings(FutureWarning):\n df.eval('c = a + b')\n\n # but don't warn without assignment\n with tm.assert_produces_warnings(None):\n df.eval('a + b')\n\n def test_multi_line_expression(self):\n # GH 11149\n tm.skip_if_no_ne('numexpr')\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n expected = df.copy()\n\n expected['c'] = expected['a'] + expected['b']\n expected['d'] = expected['c'] + expected['b']\n ans = df.eval(\"\"\"\n c = a + b\n d = c + b\"\"\", inplace=True)\n assert_frame_equal(expected, df)\n self.assertIsNone(ans)\n\n expected['a'] = expected['a'] - 1\n expected['e'] = expected['a'] + 2\n ans = df.eval(\"\"\"\n a = a - 1\n e = a + 2\"\"\", inplace=True)\n assert_frame_equal(expected, df)\n self.assertIsNone(ans)\n\n # multi-line not valid if not all assignments\n with tm.assertRaises(ValueError):\n df.eval(\"\"\"\n a = b + 2\n b - 2\"\"\", inplace=False)\n\n def test_multi_line_expression_not_inplace(self):\n # GH 11149\n tm.skip_if_no_ne('numexpr')\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n expected = df.copy()\n\n expected['c'] = expected['a'] + expected['b']\n expected['d'] = expected['c'] + expected['b']\n df = df.eval(\"\"\"\n c = a + b\n d = c + b\"\"\", inplace=False)\n assert_frame_equal(expected, df)\n\n expected['a'] = expected['a'] - 1\n expected['e'] = expected['a'] + 2\n df = df.eval(\"\"\"\n a = a - 1\n e = a + 2\"\"\", inplace=False)\n assert_frame_equal(expected, df)\n\n def test_assignment_in_query(self):\n # GH 8664\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n df_orig = df.copy()\n with tm.assertRaises(ValueError):\n df.query('a = 1')\n assert_frame_equal(df, df_orig)\n\n def query_inplace(self):\n # GH 11149\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n expected = df.copy()\n expected = expected[expected['a'] == 2]\n df.query('a == 2', inplace=True)\n assert_frame_equal(expected, df)\n\n def test_basic_period_index_boolean_expression(self):\n df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')\n\n e = df < 2\n r = self.eval('df < 2', local_dict={'df': df})\n x = df < 2\n\n assert_frame_equal(r, e)\n assert_frame_equal(x, e)\n\n def test_basic_period_index_subscript_expression(self):\n df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')\n r = self.eval('df[df < 2 + 3]', local_dict={'df': df})\n e = df[df < 2 + 3]\n assert_frame_equal(r, e)\n\n def test_nested_period_index_subscript_expression(self):\n df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')\n r = self.eval('df[df[df < 2] < 2] + df * 2', local_dict={'df': df})\n e = df[df[df < 2] < 2] + df * 2\n assert_frame_equal(r, e)\n\n def test_date_boolean(self):\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n res = self.eval('df.dates1 < 20130101', local_dict={'df': df},\n engine=self.engine, parser=self.parser)\n expec = df.dates1 < '20130101'\n assert_series_equal(res, expec, check_names=False)\n\n def test_simple_in_ops(self):\n if self.parser != 'python':\n res = pd.eval('1 in [1, 2]', engine=self.engine,\n parser=self.parser)\n self.assertTrue(res)\n\n res = pd.eval('2 in (1, 2)', engine=self.engine,\n parser=self.parser)\n self.assertTrue(res)\n\n res = pd.eval('3 in (1, 2)', engine=self.engine,\n parser=self.parser)\n self.assertFalse(res)\n\n res = pd.eval('3 not in (1, 2)', engine=self.engine,\n parser=self.parser)\n self.assertTrue(res)\n\n res = pd.eval('[3] not in (1, 2)', engine=self.engine,\n parser=self.parser)\n self.assertTrue(res)\n\n res = pd.eval('[3] in ([3], 2)', engine=self.engine,\n parser=self.parser)\n self.assertTrue(res)\n\n res = pd.eval('[[3]] in [[[3]], 2]', engine=self.engine,\n parser=self.parser)\n self.assertTrue(res)\n\n res = pd.eval('(3,) in [(3,), 2]', engine=self.engine,\n parser=self.parser)\n self.assertTrue(res)\n\n res = pd.eval('(3,) not in [(3,), 2]', engine=self.engine,\n parser=self.parser)\n self.assertFalse(res)\n\n res = pd.eval('[(3,)] in [[(3,)], 2]', engine=self.engine,\n parser=self.parser)\n self.assertTrue(res)\n else:\n with tm.assertRaises(NotImplementedError):\n pd.eval('1 in [1, 2]', engine=self.engine, parser=self.parser)\n with tm.assertRaises(NotImplementedError):\n pd.eval('2 in (1, 2)', engine=self.engine, parser=self.parser)\n with tm.assertRaises(NotImplementedError):\n pd.eval('3 in (1, 2)', engine=self.engine, parser=self.parser)\n with tm.assertRaises(NotImplementedError):\n pd.eval('3 not in (1, 2)', engine=self.engine,\n parser=self.parser)\n with tm.assertRaises(NotImplementedError):\n pd.eval('[(3,)] in (1, 2, [(3,)])', engine=self.engine,\n parser=self.parser)\n with tm.assertRaises(NotImplementedError):\n pd.eval('[3] not in (1, 2, [[3]])', engine=self.engine,\n parser=self.parser)\n\n\nclass TestOperationsNumExprPython(TestOperationsNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestOperationsNumExprPython, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'python'\n tm.skip_if_no_ne(cls.engine)\n cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms\n cls.arith_ops = filter(lambda x: x not in ('in', 'not in'),\n cls.arith_ops)\n\n def test_check_many_exprs(self):\n a = 1\n expr = ' * '.join('a' * 33)\n expected = 1\n res = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_equal(res, expected)\n\n def test_fails_and(self):\n df = DataFrame(np.random.randn(5, 3))\n self.assertRaises(NotImplementedError, pd.eval, 'df > 2 and df > 3',\n local_dict={'df': df}, parser=self.parser,\n engine=self.engine)\n\n def test_fails_or(self):\n df = DataFrame(np.random.randn(5, 3))\n self.assertRaises(NotImplementedError, pd.eval, 'df > 2 or df > 3',\n local_dict={'df': df}, parser=self.parser,\n engine=self.engine)\n\n def test_fails_not(self):\n df = DataFrame(np.random.randn(5, 3))\n self.assertRaises(NotImplementedError, pd.eval, 'not df > 2',\n local_dict={'df': df}, parser=self.parser,\n engine=self.engine)\n\n def test_fails_ampersand(self):\n df = DataFrame(np.random.randn(5, 3))\n ex = '(df + 2)[df > 1] > 0 & (df > 0)'\n with tm.assertRaises(NotImplementedError):\n pd.eval(ex, parser=self.parser, engine=self.engine)\n\n def test_fails_pipe(self):\n df = DataFrame(np.random.randn(5, 3))\n ex = '(df + 2)[df > 1] > 0 | (df > 0)'\n with tm.assertRaises(NotImplementedError):\n pd.eval(ex, parser=self.parser, engine=self.engine)\n\n def test_bool_ops_with_constants(self):\n for op, lhs, rhs in product(expr._bool_ops_syms, ('True', 'False'),\n ('True', 'False')):\n ex = '{0} {1} {2}'.format(lhs, op, rhs)\n if op in ('and', 'or'):\n with tm.assertRaises(NotImplementedError):\n self.eval(ex)\n else:\n res = self.eval(ex)\n exp = eval(ex)\n self.assertEqual(res, exp)\n\n def test_simple_bool_ops(self):\n for op, lhs, rhs in product(expr._bool_ops_syms, (True, False),\n (True, False)):\n ex = 'lhs {0} rhs'.format(op)\n if op in ('and', 'or'):\n with tm.assertRaises(NotImplementedError):\n pd.eval(ex, engine=self.engine, parser=self.parser)\n else:\n res = pd.eval(ex, engine=self.engine, parser=self.parser)\n exp = eval(ex)\n self.assertEqual(res, exp)\n\n\nclass TestOperationsPythonPython(TestOperationsNumExprPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestOperationsPythonPython, cls).setUpClass()\n cls.engine = cls.parser = 'python'\n cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms\n cls.arith_ops = filter(lambda x: x not in ('in', 'not in'),\n cls.arith_ops)\n\n\nclass TestOperationsPythonPandas(TestOperationsNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestOperationsPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms\n\n\nclass TestMathPythonPython(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestMathPythonPython, cls).setUpClass()\n tm.skip_if_no_ne()\n cls.engine = 'python'\n cls.parser = 'pandas'\n cls.unary_fns = _unary_math_ops\n cls.binary_fns = _binary_math_ops\n\n @classmethod\n def tearDownClass(cls):\n del cls.engine, cls.parser\n\n def eval(self, *args, **kwargs):\n kwargs['engine'] = self.engine\n kwargs['parser'] = self.parser\n kwargs['level'] = kwargs.pop('level', 0) + 1\n return pd.eval(*args, **kwargs)\n\n def test_unary_functions(self):\n df = DataFrame({'a': np.random.randn(10)})\n a = df.a\n for fn in self.unary_fns:\n expr = \"{0}(a)\".format(fn)\n got = self.eval(expr)\n with np.errstate(all='ignore'):\n expect = getattr(np, fn)(a)\n tm.assert_series_equal(got, expect, check_names=False)\n\n def test_binary_functions(self):\n df = DataFrame({'a': np.random.randn(10),\n 'b': np.random.randn(10)})\n a = df.a\n b = df.b\n for fn in self.binary_fns:\n expr = \"{0}(a, b)\".format(fn)\n got = self.eval(expr)\n with np.errstate(all='ignore'):\n expect = getattr(np, fn)(a, b)\n tm.assert_almost_equal(got, expect, check_names=False)\n\n def test_df_use_case(self):\n df = DataFrame({'a': np.random.randn(10),\n 'b': np.random.randn(10)})\n df.eval(\"e = arctan2(sin(a), b)\",\n engine=self.engine,\n parser=self.parser, inplace=True)\n got = df.e\n expect = np.arctan2(np.sin(df.a), df.b)\n tm.assert_series_equal(got, expect, check_names=False)\n\n def test_df_arithmetic_subexpression(self):\n df = DataFrame({'a': np.random.randn(10),\n 'b': np.random.randn(10)})\n df.eval(\"e = sin(a + b)\",\n engine=self.engine,\n parser=self.parser, inplace=True)\n got = df.e\n expect = np.sin(df.a + df.b)\n tm.assert_series_equal(got, expect, check_names=False)\n\n def check_result_type(self, dtype, expect_dtype):\n df = DataFrame({'a': np.random.randn(10).astype(dtype)})\n self.assertEqual(df.a.dtype, dtype)\n df.eval(\"b = sin(a)\",\n engine=self.engine,\n parser=self.parser, inplace=True)\n got = df.b\n expect = np.sin(df.a)\n self.assertEqual(expect.dtype, got.dtype)\n self.assertEqual(expect_dtype, got.dtype)\n tm.assert_series_equal(got, expect, check_names=False)\n\n def test_result_types(self):\n self.check_result_type(np.int32, np.float64)\n self.check_result_type(np.int64, np.float64)\n self.check_result_type(np.float32, np.float32)\n self.check_result_type(np.float64, np.float64)\n\n def test_result_types2(self):\n # xref https://github.com/pandas-dev/pandas/issues/12293\n raise nose.SkipTest(\"unreliable tests on complex128\")\n\n # Did not test complex64 because DataFrame is converting it to\n # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952\n self.check_result_type(np.complex128, np.complex128)\n\n def test_undefined_func(self):\n df = DataFrame({'a': np.random.randn(10)})\n with tm.assertRaisesRegexp(ValueError,\n \"\\\"mysin\\\" is not a supported function\"):\n df.eval(\"mysin(a)\",\n engine=self.engine,\n parser=self.parser)\n\n def test_keyword_arg(self):\n df = DataFrame({'a': np.random.randn(10)})\n with tm.assertRaisesRegexp(TypeError,\n \"Function \\\"sin\\\" does not support \"\n \"keyword arguments\"):\n df.eval(\"sin(x=a)\",\n engine=self.engine,\n parser=self.parser)\n\n\nclass TestMathPythonPandas(TestMathPythonPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestMathPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n\n\nclass TestMathNumExprPandas(TestMathPythonPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestMathNumExprPandas, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n\n\nclass TestMathNumExprPython(TestMathPythonPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestMathNumExprPython, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'python'\n\n\n_var_s = randn(10)\n\n\nclass TestScope(object):\n\n def check_global_scope(self, e, engine, parser):\n tm.skip_if_no_ne(engine)\n tm.assert_numpy_array_equal(_var_s * 2, pd.eval(e, engine=engine,\n parser=parser))\n\n def test_global_scope(self):\n e = '_var_s * 2'\n for engine, parser in product(_engines, expr._parsers):\n yield self.check_global_scope, e, engine, parser\n\n def check_no_new_locals(self, engine, parser):\n tm.skip_if_no_ne(engine)\n x = 1\n lcls = locals().copy()\n pd.eval('x + 1', local_dict=lcls, engine=engine, parser=parser)\n lcls2 = locals().copy()\n lcls2.pop('lcls')\n tm.assert_equal(lcls, lcls2)\n\n def test_no_new_locals(self):\n for engine, parser in product(_engines, expr._parsers):\n yield self.check_no_new_locals, engine, parser\n\n def check_no_new_globals(self, engine, parser):\n tm.skip_if_no_ne(engine)\n x = 1\n gbls = globals().copy()\n pd.eval('x + 1', engine=engine, parser=parser)\n gbls2 = globals().copy()\n tm.assert_equal(gbls, gbls2)\n\n def test_no_new_globals(self):\n for engine, parser in product(_engines, expr._parsers):\n yield self.check_no_new_globals, engine, parser\n\n\ndef test_invalid_engine():\n tm.skip_if_no_ne()\n assertRaisesRegexp(KeyError, 'Invalid engine \\'asdf\\' passed',\n pd.eval, 'x + y', local_dict={'x': 1, 'y': 2},\n engine='asdf')\n\n\ndef test_invalid_parser():\n tm.skip_if_no_ne()\n assertRaisesRegexp(KeyError, 'Invalid parser \\'asdf\\' passed',\n pd.eval, 'x + y', local_dict={'x': 1, 'y': 2},\n parser='asdf')\n\n\n_parsers = {'python': PythonExprVisitor, 'pytables': pytables.ExprVisitor,\n 'pandas': PandasExprVisitor}\n\n\ndef check_disallowed_nodes(engine, parser):\n tm.skip_if_no_ne(engine)\n VisitorClass = _parsers[parser]\n uns_ops = VisitorClass.unsupported_nodes\n inst = VisitorClass('x + 1', engine, parser)\n\n for ops in uns_ops:\n assert_raises(NotImplementedError, getattr(inst, ops))\n\n\ndef test_disallowed_nodes():\n for engine, visitor in product(_parsers, repeat=2):\n yield check_disallowed_nodes, engine, visitor\n\n\ndef check_syntax_error_exprs(engine, parser):\n tm.skip_if_no_ne(engine)\n e = 's +'\n assert_raises(SyntaxError, pd.eval, e, engine=engine, parser=parser)\n\n\ndef test_syntax_error_exprs():\n for engine, parser in ENGINES_PARSERS:\n yield check_syntax_error_exprs, engine, parser\n\n\ndef check_name_error_exprs(engine, parser):\n tm.skip_if_no_ne(engine)\n e = 's + t'\n with tm.assertRaises(NameError):\n pd.eval(e, engine=engine, parser=parser)\n\n\ndef test_name_error_exprs():\n for engine, parser in ENGINES_PARSERS:\n yield check_name_error_exprs, engine, parser\n\n\ndef check_invalid_local_variable_reference(engine, parser):\n tm.skip_if_no_ne(engine)\n\n a, b = 1, 2\n exprs = 'a + @b', '@a + b', '@a + @b'\n for expr in exprs:\n if parser != 'pandas':\n with tm.assertRaisesRegexp(SyntaxError, \"The '@' prefix is only\"):\n pd.eval(exprs, engine=engine, parser=parser)\n else:\n with tm.assertRaisesRegexp(SyntaxError, \"The '@' prefix is not\"):\n pd.eval(exprs, engine=engine, parser=parser)\n\n\ndef test_invalid_local_variable_reference():\n for engine, parser in ENGINES_PARSERS:\n yield check_invalid_local_variable_reference, engine, parser\n\n\ndef check_numexpr_builtin_raises(engine, parser):\n tm.skip_if_no_ne(engine)\n sin, dotted_line = 1, 2\n if engine == 'numexpr':\n with tm.assertRaisesRegexp(NumExprClobberingError,\n 'Variables in expression .+'):\n pd.eval('sin + dotted_line', engine=engine, parser=parser)\n else:\n res = pd.eval('sin + dotted_line', engine=engine, parser=parser)\n tm.assert_equal(res, sin + dotted_line)\n\n\ndef test_numexpr_builtin_raises():\n for engine, parser in ENGINES_PARSERS:\n yield check_numexpr_builtin_raises, engine, parser\n\n\ndef check_bad_resolver_raises(engine, parser):\n tm.skip_if_no_ne(engine)\n cannot_resolve = 42, 3.0\n with tm.assertRaisesRegexp(TypeError, 'Resolver of type .+'):\n pd.eval('1 + 2', resolvers=cannot_resolve, engine=engine,\n parser=parser)\n\n\ndef test_bad_resolver_raises():\n for engine, parser in ENGINES_PARSERS:\n yield check_bad_resolver_raises, engine, parser\n\n\ndef check_empty_string_raises(engine, parser):\n # GH 13139\n tm.skip_if_no_ne(engine)\n with tm.assertRaisesRegexp(ValueError, 'expr cannot be an empty string'):\n pd.eval('', engine=engine, parser=parser)\n\n\ndef test_empty_string_raises():\n for engine, parser in ENGINES_PARSERS:\n yield check_empty_string_raises, engine, parser\n\n\ndef check_more_than_one_expression_raises(engine, parser):\n tm.skip_if_no_ne(engine)\n with tm.assertRaisesRegexp(SyntaxError,\n 'only a single expression is allowed'):\n pd.eval('1 + 1; 2 + 2', engine=engine, parser=parser)\n\n\ndef test_more_than_one_expression_raises():\n for engine, parser in ENGINES_PARSERS:\n yield check_more_than_one_expression_raises, engine, parser\n\n\ndef check_bool_ops_fails_on_scalars(gen, lhs, cmp, rhs, engine, parser):\n tm.skip_if_no_ne(engine)\n mid = gen[type(lhs)]()\n ex1 = 'lhs {0} mid {1} rhs'.format(cmp, cmp)\n ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp, cmp)\n ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp, cmp)\n for ex in (ex1, ex2, ex3):\n with tm.assertRaises(NotImplementedError):\n pd.eval(ex, engine=engine, parser=parser)\n\n\ndef test_bool_ops_fails_on_scalars():\n _bool_ops_syms = 'and', 'or'\n dtypes = int, float\n gen = {int: lambda: np.random.randint(10), float: np.random.randn}\n for engine, parser, dtype1, cmp, dtype2 in product(_engines, expr._parsers,\n dtypes, _bool_ops_syms,\n dtypes):\n yield (check_bool_ops_fails_on_scalars, gen, gen[dtype1](), cmp,\n gen[dtype2](), engine, parser)\n\n\ndef check_inf(engine, parser):\n tm.skip_if_no_ne(engine)\n s = 'inf + 1'\n expected = np.inf\n result = pd.eval(s, engine=engine, parser=parser)\n tm.assert_equal(result, expected)\n\n\ndef test_inf():\n for engine, parser in ENGINES_PARSERS:\n yield check_inf, engine, parser\n\n\ndef check_negate_lt_eq_le(engine, parser):\n tm.skip_if_no_ne(engine)\n df = pd.DataFrame([[0, 10], [1, 20]], columns=['cat', 'count'])\n expected = df[~(df.cat > 0)]\n\n result = df.query('~(cat > 0)', engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n if parser == 'python':\n with tm.assertRaises(NotImplementedError):\n df.query('not (cat > 0)', engine=engine, parser=parser)\n else:\n result = df.query('not (cat > 0)', engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_negate_lt_eq_le():\n for engine, parser in product(_engines, expr._parsers):\n yield check_negate_lt_eq_le, engine, parser\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport sys\n\nfrom decimal import Decimal\nfrom itertools import product\nimport warnings\n\nimport pytest\nfrom pytest import raises as assert_raises\nfrom numpy.testing import (\n assert_equal,\n assert_almost_equal, assert_array_equal, assert_array_almost_equal,\n assert_allclose, assert_, assert_warns, assert_array_less)\nfrom scipy._lib._numpy_compat import suppress_warnings\nfrom numpy import array, arange\nimport numpy as np\n\nfrom scipy.ndimage.filters import correlate1d\nfrom scipy.optimize import fmin\nfrom scipy import signal\nfrom scipy.signal import (\n correlate, convolve, convolve2d, fftconvolve, choose_conv_method,\n hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos,\n invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt,\n sosfilt_zi, tf2zpk, BadCoefficients)\nfrom scipy.signal.windows import hann\nfrom scipy.signal.signaltools import _filtfilt_gust\n\n\nif sys.version_info.major >= 3 and sys.version_info.minor >= 5:\n from math import gcd\nelse:\n from fractions import gcd\n\n\nclass _TestConvolve(object):\n\n def test_basic(self):\n a = [3, 4, 5, 6, 5, 4]\n b = [1, 2, 3]\n c = convolve(a, b)\n assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12]))\n\n def test_same(self):\n a = [3, 4, 5]\n b = [1, 2, 3, 4]\n c = convolve(a, b, mode=\"same\")\n assert_array_equal(c, array([10, 22, 34]))\n\n def test_same_eq(self):\n a = [3, 4, 5]\n b = [1, 2, 3]\n c = convolve(a, b, mode=\"same\")\n assert_array_equal(c, array([10, 22, 22]))\n\n def test_complex(self):\n x = array([1 + 1j, 2 + 1j, 3 + 1j])\n y = array([1 + 1j, 2 + 1j])\n z = convolve(x, y)\n assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j]))\n\n def test_zero_rank(self):\n a = 1289\n b = 4567\n c = convolve(a, b)\n assert_equal(c, a * b)\n\n def test_single_element(self):\n a = array([4967])\n b = array([3920])\n c = convolve(a, b)\n assert_equal(c, a * b)\n\n def test_2d_arrays(self):\n a = [[1, 2, 3], [3, 4, 5]]\n b = [[2, 3, 4], [4, 5, 6]]\n c = convolve(a, b)\n d = array([[2, 7, 16, 17, 12],\n [10, 30, 62, 58, 38],\n [12, 31, 58, 49, 30]])\n assert_array_equal(c, d)\n\n def test_input_swapping(self):\n small = arange(8).reshape(2, 2, 2)\n big = 1j * arange(27).reshape(3, 3, 3)\n big += arange(27)[::-1].reshape(3, 3, 3)\n\n out_array = array(\n [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j],\n [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j],\n [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j],\n [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]],\n\n [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j],\n [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j],\n [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j],\n [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]],\n\n [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j],\n [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j],\n [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j],\n [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]],\n\n [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j],\n [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j],\n [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j],\n [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]])\n\n assert_array_equal(convolve(small, big, 'full'), out_array)\n assert_array_equal(convolve(big, small, 'full'), out_array)\n assert_array_equal(convolve(small, big, 'same'),\n out_array[1:3, 1:3, 1:3])\n assert_array_equal(convolve(big, small, 'same'),\n out_array[0:3, 0:3, 0:3])\n assert_array_equal(convolve(small, big, 'valid'),\n out_array[1:3, 1:3, 1:3])\n assert_array_equal(convolve(big, small, 'valid'),\n out_array[1:3, 1:3, 1:3])\n\n def test_invalid_params(self):\n a = [3, 4, 5]\n b = [1, 2, 3]\n assert_raises(ValueError, convolve, a, b, mode='spam')\n assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft')\n assert_raises(ValueError, convolve, a, b, mode='ham', method='direct')\n assert_raises(ValueError, convolve, a, b, mode='full', method='bacon')\n assert_raises(ValueError, convolve, a, b, mode='same', method='bacon')\n\n\nclass TestConvolve(_TestConvolve):\n\n def test_valid_mode2(self):\n # See gh-5897\n a = [1, 2, 3, 6, 5, 3]\n b = [2, 3, 4, 5, 3, 4, 2, 2, 1]\n expected = [70, 78, 73, 65]\n\n out = convolve(a, b, 'valid')\n assert_array_equal(out, expected)\n\n out = convolve(b, a, 'valid')\n assert_array_equal(out, expected)\n\n a = [1 + 5j, 2 - 1j, 3 + 0j]\n b = [2 - 3j, 1 + 0j]\n expected = [2 - 3j, 8 - 10j]\n\n out = convolve(a, b, 'valid')\n assert_array_equal(out, expected)\n\n out = convolve(b, a, 'valid')\n assert_array_equal(out, expected)\n\n def test_same_mode(self):\n a = [1, 2, 3, 3, 1, 2]\n b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3]\n c = convolve(a, b, 'same')\n d = array([57, 61, 63, 57, 45, 36])\n assert_array_equal(c, d)\n\n def test_invalid_shapes(self):\n # By \"invalid,\" we mean that no one\n # array has dimensions that are all at\n # least as large as the corresponding\n # dimensions of the other array. This\n # setup should throw a ValueError.\n a = np.arange(1, 7).reshape((2, 3))\n b = np.arange(-6, 0).reshape((3, 2))\n\n assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'})\n assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'})\n\n def test_convolve_method(self, n=100):\n types = sum([t for _, t in np.sctypes.items()], [])\n types = {np.dtype(t).name for t in types}\n\n # These types include 'bool' and all precisions (int8, float32, etc)\n # The removed types throw errors in correlate or fftconvolve\n for dtype in ['complex256', 'complex192', 'float128', 'float96',\n 'str', 'void', 'bytes', 'object', 'unicode', 'string']:\n if dtype in types:\n types.remove(dtype)\n\n args = [(t1, t2, mode) for t1 in types for t2 in types\n for mode in ['valid', 'full', 'same']]\n\n # These are random arrays, which means test is much stronger than\n # convolving testing by convolving two np.ones arrays\n np.random.seed(42)\n array_types = {'i': np.random.choice([0, 1], size=n),\n 'f': np.random.randn(n)}\n array_types['b'] = array_types['u'] = array_types['i']\n array_types['c'] = array_types['f'] + 0.5j*array_types['f']\n\n for t1, t2, mode in args:\n x1 = array_types[np.dtype(t1).kind].astype(t1)\n x2 = array_types[np.dtype(t2).kind].astype(t2)\n\n results = {key: convolve(x1, x2, method=key, mode=mode)\n for key in ['fft', 'direct']}\n\n assert_equal(results['fft'].dtype, results['direct'].dtype)\n\n if 'bool' in t1 and 'bool' in t2:\n assert_equal(choose_conv_method(x1, x2), 'direct')\n continue\n\n # Found by experiment. Found approx smallest value for (rtol, atol)\n # threshold to have tests pass.\n if any([t in {'complex64', 'float32'} for t in [t1, t2]]):\n kwargs = {'rtol': 1.0e-4, 'atol': 1e-6}\n elif 'float16' in [t1, t2]:\n # atol is default for np.allclose\n kwargs = {'rtol': 1e-3, 'atol': 1e-8}\n else:\n # defaults for np.allclose (different from assert_allclose)\n kwargs = {'rtol': 1e-5, 'atol': 1e-8}\n\n assert_allclose(results['fft'], results['direct'], **kwargs)\n\n def test_convolve_method_large_input(self):\n # This is really a test that convolving two large integers goes to the\n # direct method even if they're in the fft method.\n for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]:\n z = np.array([2**n], dtype=np.int64)\n fft = convolve(z, z, method='fft')\n direct = convolve(z, z, method='direct')\n\n # this is the case when integer precision gets to us\n # issue #6076 has more detail, hopefully more tests after resolved\n if n < 50:\n assert_equal(fft, direct)\n assert_equal(fft, 2**(2*n))\n assert_equal(direct, 2**(2*n))\n\n def test_mismatched_dims(self):\n # Input arrays should have the same number of dimensions\n assert_raises(ValueError, convolve, [1], 2, method='direct')\n assert_raises(ValueError, convolve, 1, [2], method='direct')\n assert_raises(ValueError, convolve, [1], 2, method='fft')\n assert_raises(ValueError, convolve, 1, [2], method='fft')\n assert_raises(ValueError, convolve, [1], [[2]])\n assert_raises(ValueError, convolve, [3], 2)\n\n\nclass _TestConvolve2d(object):\n\n def test_2d_arrays(self):\n a = [[1, 2, 3], [3, 4, 5]]\n b = [[2, 3, 4], [4, 5, 6]]\n d = array([[2, 7, 16, 17, 12],\n [10, 30, 62, 58, 38],\n [12, 31, 58, 49, 30]])\n e = convolve2d(a, b)\n assert_array_equal(e, d)\n\n def test_valid_mode(self):\n e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]\n f = [[1, 2, 3], [3, 4, 5]]\n h = array([[62, 80, 98, 116, 134]])\n\n g = convolve2d(e, f, 'valid')\n assert_array_equal(g, h)\n\n # See gh-5897\n g = convolve2d(f, e, 'valid')\n assert_array_equal(g, h)\n\n def test_valid_mode_complx(self):\n e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]\n f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j\n h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]])\n\n g = convolve2d(e, f, 'valid')\n assert_array_almost_equal(g, h)\n\n # See gh-5897\n g = convolve2d(f, e, 'valid')\n assert_array_equal(g, h)\n\n def test_fillvalue(self):\n a = [[1, 2, 3], [3, 4, 5]]\n b = [[2, 3, 4], [4, 5, 6]]\n fillval = 1\n c = convolve2d(a, b, 'full', 'fill', fillval)\n d = array([[24, 26, 31, 34, 32],\n [28, 40, 62, 64, 52],\n [32, 46, 67, 62, 48]])\n assert_array_equal(c, d)\n\n def test_fillvalue_deprecations(self):\n # Deprecated 2017-07, scipy version 1.0.0\n with suppress_warnings() as sup:\n sup.filter(np.ComplexWarning, \"Casting complex values to real\")\n r = sup.record(DeprecationWarning, \"could not cast `fillvalue`\")\n convolve2d([[1]], [[1, 2]], fillvalue=1j)\n assert_(len(r) == 1)\n warnings.filterwarnings(\n \"error\", message=\"could not cast `fillvalue`\",\n category=DeprecationWarning)\n assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],\n fillvalue=1j)\n\n with suppress_warnings():\n warnings.filterwarnings(\n \"always\", message=\"`fillvalue` must be scalar or an array \",\n category=DeprecationWarning)\n assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]],\n fillvalue=[1, 2])\n warnings.filterwarnings(\n \"error\", message=\"`fillvalue` must be scalar or an array \",\n category=DeprecationWarning)\n assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],\n fillvalue=[1, 2])\n\n def test_fillvalue_empty(self):\n # Check that fillvalue being empty raises an error:\n assert_raises(ValueError, convolve2d, [[1]], [[1, 2]],\n fillvalue=[])\n\n def test_wrap_boundary(self):\n a = [[1, 2, 3], [3, 4, 5]]\n b = [[2, 3, 4], [4, 5, 6]]\n c = convolve2d(a, b, 'full', 'wrap')\n d = array([[80, 80, 74, 80, 80],\n [68, 68, 62, 68, 68],\n [80, 80, 74, 80, 80]])\n assert_array_equal(c, d)\n\n def test_sym_boundary(self):\n a = [[1, 2, 3], [3, 4, 5]]\n b = [[2, 3, 4], [4, 5, 6]]\n c = convolve2d(a, b, 'full', 'symm')\n d = array([[34, 30, 44, 62, 66],\n [52, 48, 62, 80, 84],\n [82, 78, 92, 110, 114]])\n assert_array_equal(c, d)\n\n def test_invalid_shapes(self):\n # By \"invalid,\" we mean that no one\n # array has dimensions that are all at\n # least as large as the corresponding\n # dimensions of the other array. This\n # setup should throw a ValueError.\n a = np.arange(1, 7).reshape((2, 3))\n b = np.arange(-6, 0).reshape((3, 2))\n\n assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'})\n assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'})\n\n\nclass TestConvolve2d(_TestConvolve2d):\n\n def test_same_mode(self):\n e = [[1, 2, 3], [3, 4, 5]]\n f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]\n g = convolve2d(e, f, 'same')\n h = array([[22, 28, 34],\n [80, 98, 116]])\n assert_array_equal(g, h)\n\n def test_valid_mode2(self):\n # See gh-5897\n e = [[1, 2, 3], [3, 4, 5]]\n f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]\n expected = [[62, 80, 98, 116, 134]]\n\n out = convolve2d(e, f, 'valid')\n assert_array_equal(out, expected)\n\n out = convolve2d(f, e, 'valid')\n assert_array_equal(out, expected)\n\n e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]]\n f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]]\n expected = [[27 - 1j, 46. + 2j]]\n\n out = convolve2d(e, f, 'valid')\n assert_array_equal(out, expected)\n\n # See gh-5897\n out = convolve2d(f, e, 'valid')\n assert_array_equal(out, expected)\n\n def test_consistency_convolve_funcs(self):\n # Compare np.convolve, signal.convolve, signal.convolve2d\n a = np.arange(5)\n b = np.array([3.2, 1.4, 3])\n for mode in ['full', 'valid', 'same']:\n assert_almost_equal(np.convolve(a, b, mode=mode),\n signal.convolve(a, b, mode=mode))\n assert_almost_equal(np.squeeze(\n signal.convolve2d([a], [b], mode=mode)),\n signal.convolve(a, b, mode=mode))\n\n def test_invalid_dims(self):\n assert_raises(ValueError, convolve2d, 3, 4)\n assert_raises(ValueError, convolve2d, [3], [4])\n assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]])\n\n\nclass TestFFTConvolve(object):\n\n def test_real(self):\n x = array([1, 2, 3])\n assert_array_almost_equal(signal.fftconvolve(x, x), [1, 4, 10, 12, 9.])\n\n def test_complex(self):\n x = array([1 + 1j, 2 + 2j, 3 + 3j])\n assert_array_almost_equal(signal.fftconvolve(x, x),\n [0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])\n\n def test_2d_real_same(self):\n a = array([[1, 2, 3], [4, 5, 6]])\n assert_array_almost_equal(signal.fftconvolve(a, a),\n array([[1, 4, 10, 12, 9],\n [8, 26, 56, 54, 36],\n [16, 40, 73, 60, 36]]))\n\n def test_2d_complex_same(self):\n a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]])\n c = fftconvolve(a, a)\n d = array([[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],\n [10j, 44j, 118j, 156j, 122j],\n [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]])\n assert_array_almost_equal(c, d)\n\n def test_real_same_mode(self):\n a = array([1, 2, 3])\n b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])\n c = fftconvolve(a, b, 'same')\n d = array([35., 41., 47.])\n assert_array_almost_equal(c, d)\n\n def test_real_same_mode2(self):\n a = array([3, 3, 5, 6, 8, 7, 9, 0, 1])\n b = array([1, 2, 3])\n c = fftconvolve(a, b, 'same')\n d = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])\n assert_array_almost_equal(c, d)\n\n def test_valid_mode(self):\n # See gh-5897\n a = array([3, 2, 1])\n b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])\n expected = array([24., 31., 41., 43., 49., 25., 12.])\n\n out = fftconvolve(a, b, 'valid')\n assert_array_almost_equal(out, expected)\n\n out = fftconvolve(b, a, 'valid')\n assert_array_almost_equal(out, expected)\n\n a = array([3 - 1j, 2 + 7j, 1 + 0j])\n b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])\n expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])\n\n out = fftconvolve(a, b, 'valid')\n assert_array_almost_equal(out, expected)\n\n out = fftconvolve(b, a, 'valid')\n assert_array_almost_equal(out, expected)\n\n def test_real_valid_mode(self):\n a = array([3, 3, 5, 6, 8, 7, 9, 0, 1])\n b = array([3, 2, 1])\n d = array([24., 31., 41., 43., 49., 25., 12.])\n\n c = fftconvolve(a, b, 'valid')\n assert_array_almost_equal(c, d)\n\n # See gh-5897\n c = fftconvolve(b, a, 'valid')\n assert_array_almost_equal(c, d)\n\n def test_empty(self):\n # Regression test for #1745: crashes with 0-length input.\n assert_(fftconvolve([], []).size == 0)\n assert_(fftconvolve([5, 6], []).size == 0)\n assert_(fftconvolve([], [7]).size == 0)\n\n def test_zero_rank(self):\n a = array(4967)\n b = array(3920)\n c = fftconvolve(a, b)\n assert_equal(c, a * b)\n\n def test_single_element(self):\n a = array([4967])\n b = array([3920])\n c = fftconvolve(a, b)\n assert_equal(c, a * b)\n\n def test_random_data(self):\n np.random.seed(1234)\n a = np.random.rand(1233) + 1j * np.random.rand(1233)\n b = np.random.rand(1321) + 1j * np.random.rand(1321)\n c = fftconvolve(a, b, 'full')\n d = np.convolve(a, b, 'full')\n assert_(np.allclose(c, d, rtol=1e-10))\n\n @pytest.mark.slow\n def test_many_sizes(self):\n np.random.seed(1234)\n\n def ns():\n for j in range(1, 100):\n yield j\n for j in range(1000, 1500):\n yield j\n for k in range(50):\n yield np.random.randint(1001, 10000)\n\n for n in ns():\n msg = 'n=%d' % (n,)\n a = np.random.rand(n) + 1j * np.random.rand(n)\n b = np.random.rand(n) + 1j * np.random.rand(n)\n c = fftconvolve(a, b, 'full')\n d = np.convolve(a, b, 'full')\n assert_allclose(c, d, atol=1e-10, err_msg=msg)\n\n def test_invalid_shapes(self):\n # By \"invalid,\" we mean that no one\n # array has dimensions that are all at\n # least as large as the corresponding\n # dimensions of the other array. This\n # setup should throw a ValueError.\n a = np.arange(1, 7).reshape((2, 3))\n b = np.arange(-6, 0).reshape((3, 2))\n\n assert_raises(ValueError, fftconvolve, *(a, b), **{'mode': 'valid'})\n assert_raises(ValueError, fftconvolve, *(b, a), **{'mode': 'valid'})\n\n def test_mismatched_dims(self):\n assert_raises(ValueError, fftconvolve, [1], 2)\n assert_raises(ValueError, fftconvolve, 1, [2])\n assert_raises(ValueError, fftconvolve, [1], [[2]])\n assert_raises(ValueError, fftconvolve, [3], 2)\n\n def test_invalid_flags(self):\n assert_raises(ValueError, fftconvolve, [1], [2], mode='chips')\n\n\nclass TestMedFilt(object):\n\n def test_basic(self):\n f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],\n [50, 50, 50, 50, 50, 0, 72, 77, 68, 66],\n [50, 50, 50, 50, 50, 46, 47, 19, 64, 77],\n [50, 50, 50, 50, 50, 42, 15, 29, 95, 35],\n [50, 50, 50, 50, 50, 46, 34, 9, 21, 66],\n [70, 97, 28, 68, 78, 77, 61, 58, 71, 42],\n [64, 53, 44, 29, 68, 32, 19, 68, 24, 84],\n [3, 33, 53, 67, 1, 78, 74, 55, 12, 83],\n [7, 11, 46, 70, 60, 47, 24, 43, 61, 26],\n [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]\n\n d = signal.medfilt(f, [7, 3])\n e = signal.medfilt2d(np.array(f, float), [7, 3])\n assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0],\n [0, 50, 50, 50, 50, 42, 19, 21, 29, 0],\n [50, 50, 50, 50, 50, 47, 34, 34, 46, 35],\n [50, 50, 50, 50, 50, 50, 42, 47, 64, 42],\n [50, 50, 50, 50, 50, 50, 46, 55, 64, 35],\n [33, 50, 50, 50, 50, 47, 46, 43, 55, 26],\n [32, 50, 50, 50, 50, 47, 46, 45, 55, 26],\n [7, 46, 50, 50, 47, 46, 46, 43, 45, 21],\n [0, 32, 33, 39, 32, 32, 43, 43, 43, 0],\n [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]])\n assert_array_equal(d, e)\n\n def test_none(self):\n # Ticket #1124. Ensure this does not segfault.\n try:\n signal.medfilt(None)\n except:\n pass\n # Expand on this test to avoid a regression with possible contiguous\n # numpy arrays that have odd strides. The stride value below gets\n # us into wrong memory if used (but it does not need to be used)\n dummy = np.arange(10, dtype=np.float64)\n a = dummy[5:6]\n a.strides = 16\n assert_(signal.medfilt(a, 1) == 5.)\n\n def test_refcounting(self):\n # Check a refcounting-related crash\n a = Decimal(123)\n x = np.array([a, a], dtype=object)\n if hasattr(sys, 'getrefcount'):\n n = 2 * sys.getrefcount(a)\n else:\n n = 10\n # Shouldn't segfault:\n for j in range(n):\n signal.medfilt(x)\n if hasattr(sys, 'getrefcount'):\n assert_(sys.getrefcount(a) < n)\n assert_equal(x, [a, a])\n\n\nclass TestWiener(object):\n\n def test_basic(self):\n g = array([[5, 6, 4, 3],\n [3, 5, 6, 2],\n [2, 3, 5, 6],\n [1, 6, 9, 7]], 'd')\n h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667],\n [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],\n [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],\n [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])\n assert_array_almost_equal(signal.wiener(g), h, decimal=6)\n assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6)\n\n\nclass TestResample(object):\n\n def test_basic(self):\n # Some basic tests\n\n # Regression test for issue #3603.\n # window.shape must equal to sig.shape[0]\n sig = np.arange(128)\n num = 256\n win = signal.get_window(('kaiser', 8.0), 160)\n assert_raises(ValueError, signal.resample, sig, num, window=win)\n\n # Other degenerate conditions\n assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1)\n assert_raises(ValueError, signal.resample_poly, sig, 1, 0)\n\n # test for issue #6505 - should not modify window.shape when axis ≠ 0\n sig2 = np.tile(np.arange(160), (2,1))\n signal.resample(sig2, num, axis=-1, window=win)\n assert_(win.shape == (160,))\n\n def test_fft(self):\n # Test FFT-based resampling\n self._test_data(method='fft')\n\n def test_polyphase(self):\n # Test polyphase resampling\n self._test_data(method='polyphase')\n\n def test_polyphase_extfilter(self):\n # Test external specification of downsampling filter\n self._test_data(method='polyphase', ext=True)\n\n def test_mutable_window(self):\n # Test that a mutable window is not modified\n impulse = np.zeros(3)\n window = np.random.RandomState(0).randn(2)\n window_orig = window.copy()\n signal.resample_poly(impulse, 5, 1, window=window)\n assert_array_equal(window, window_orig)\n\n def _test_data(self, method, ext=False):\n # Test resampling of sinusoids and random noise (1-sec)\n rate = 100\n rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201]\n\n # Sinusoids, windowed to avoid edge artifacts\n t = np.arange(rate) / float(rate)\n freqs = np.array((1., 10., 40.))[:, np.newaxis]\n x = np.sin(2 * np.pi * freqs * t) * hann(rate)\n\n for rate_to in rates_to:\n t_to = np.arange(rate_to) / float(rate_to)\n y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to)\n if method == 'fft':\n y_resamps = signal.resample(x, rate_to, axis=-1)\n else:\n if ext and rate_to != rate:\n # Match default window design\n g = gcd(rate_to, rate)\n up = rate_to // g\n down = rate // g\n max_rate = max(up, down)\n f_c = 1. / max_rate\n half_len = 10 * max_rate\n window = signal.firwin(2 * half_len + 1, f_c,\n window=('kaiser', 5.0))\n polyargs = {'window': window}\n else:\n polyargs = {}\n\n y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1,\n **polyargs)\n\n for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs):\n if freq >= 0.5 * rate_to:\n y_to.fill(0.) # mostly low-passed away\n assert_allclose(y_resamp, y_to, atol=1e-3)\n else:\n assert_array_equal(y_to.shape, y_resamp.shape)\n corr = np.corrcoef(y_to, y_resamp)[0, 1]\n assert_(corr > 0.99, msg=(corr, rate, rate_to))\n\n # Random data\n rng = np.random.RandomState(0)\n x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind\n for rate_to in rates_to:\n # random data\n t_to = np.arange(rate_to) / float(rate_to)\n y_to = np.interp(t_to, t, x)\n if method == 'fft':\n y_resamp = signal.resample(x, rate_to)\n else:\n y_resamp = signal.resample_poly(x, rate_to, rate)\n assert_array_equal(y_to.shape, y_resamp.shape)\n corr = np.corrcoef(y_to, y_resamp)[0, 1]\n assert_(corr > 0.99, msg=corr)\n\n # More tests of fft method (Master 0.18.1 fails these)\n if method == 'fft':\n x1 = np.array([1.+0.j,0.+0.j])\n y1_test = signal.resample(x1,4)\n y1_true = np.array([1.+0.j,0.5+0.j,0.+0.j,0.5+0.j]) # upsampling a complex array\n assert_allclose(y1_test, y1_true, atol=1e-12)\n x2 = np.array([1.,0.5,0.,0.5])\n y2_test = signal.resample(x2,2) # downsampling a real array\n y2_true = np.array([1.,0.])\n assert_allclose(y2_test, y2_true, atol=1e-12)\n\n def test_poly_vs_filtfilt(self):\n # Check that up=1.0 gives same answer as filtfilt + slicing\n random_state = np.random.RandomState(17)\n try_types = (int, np.float32, np.complex64, float, complex)\n size = 10000\n down_factors = [2, 11, 79]\n\n for dtype in try_types:\n x = random_state.randn(size).astype(dtype)\n if dtype in (np.complex64, np.complex128):\n x += 1j * random_state.randn(size)\n\n # resample_poly assumes zeros outside of signl, whereas filtfilt\n # can only constant-pad. Make them equivalent:\n x[0] = 0\n x[-1] = 0\n\n for down in down_factors:\n h = signal.firwin(31, 1. / down, window='hamming')\n yf = filtfilt(h, 1.0, x, padtype='constant')[::down]\n\n # Need to pass convolved version of filter to resample_poly,\n # since filtfilt does forward and backward, but resample_poly\n # only goes forward\n hc = convolve(h, h[::-1])\n y = signal.resample_poly(x, 1, down, window=hc)\n assert_allclose(yf, y, atol=1e-7, rtol=1e-7)\n\n def test_correlate1d(self):\n for down in [2, 4]:\n for nx in range(1, 40, down):\n for nweights in (32, 33):\n x = np.random.random((nx,))\n weights = np.random.random((nweights,))\n y_g = correlate1d(x, weights[::-1], mode='constant')\n y_s = signal.resample_poly(x, up=1, down=down, window=weights)\n assert_allclose(y_g[::down], y_s)\n\n\nclass TestCSpline1DEval(object):\n\n def test_basic(self):\n y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0])\n x = arange(len(y))\n dx = x[1] - x[0]\n cj = signal.cspline1d(y)\n\n x2 = arange(len(y) * 10.0) / 10.0\n y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0])\n\n # make sure interpolated values are on knot points\n assert_array_almost_equal(y2[::10], y, decimal=5)\n\n def test_complex(self):\n # create some smoothly varying complex signal to interpolate\n x = np.arange(2)\n y = np.zeros(x.shape, dtype=np.complex64)\n T = 10.0\n f = 1.0 / T\n y = np.exp(2.0J * np.pi * f * x)\n\n # get the cspline transform\n cy = signal.cspline1d(y)\n\n # determine new test x value and interpolate\n xnew = np.array([0.5])\n ynew = signal.cspline1d_eval(cy, xnew)\n\n assert_equal(ynew.dtype, y.dtype)\n\nclass TestOrderFilt(object):\n\n def test_basic(self):\n assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1),\n [2, 3, 2])\n\n\nclass _TestLinearFilter(object):\n def generate(self, shape):\n x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)\n return self.convert_dtype(x)\n\n def convert_dtype(self, arr):\n if self.dtype == np.dtype('O'):\n arr = np.asarray(arr)\n out = np.empty(arr.shape, self.dtype)\n iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'],\n [['readonly'],['writeonly']])\n for x, y in iter:\n y[...] = self.type(x[()])\n return out\n else:\n return np.array(arr, self.dtype, copy=False)\n\n def test_rank_1_IIR(self):\n x = self.generate((6,))\n b = self.convert_dtype([1, -1])\n a = self.convert_dtype([0.5, -0.5])\n y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])\n assert_array_almost_equal(lfilter(b, a, x), y_r)\n\n def test_rank_1_FIR(self):\n x = self.generate((6,))\n b = self.convert_dtype([1, 1])\n a = self.convert_dtype([1])\n y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.])\n assert_array_almost_equal(lfilter(b, a, x), y_r)\n\n def test_rank_1_IIR_init_cond(self):\n x = self.generate((6,))\n b = self.convert_dtype([1, 0, -1])\n a = self.convert_dtype([0.5, -0.5])\n zi = self.convert_dtype([1, 2])\n y_r = self.convert_dtype([1, 5, 9, 13, 17, 21])\n zf_r = self.convert_dtype([13, -10])\n y, zf = lfilter(b, a, x, zi=zi)\n assert_array_almost_equal(y, y_r)\n assert_array_almost_equal(zf, zf_r)\n\n def test_rank_1_FIR_init_cond(self):\n x = self.generate((6,))\n b = self.convert_dtype([1, 1, 1])\n a = self.convert_dtype([1])\n zi = self.convert_dtype([1, 1])\n y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.])\n zf_r = self.convert_dtype([9, 5])\n y, zf = lfilter(b, a, x, zi=zi)\n assert_array_almost_equal(y, y_r)\n assert_array_almost_equal(zf, zf_r)\n\n def test_rank_2_IIR_axis_0(self):\n x = self.generate((4, 3))\n b = self.convert_dtype([1, -1])\n a = self.convert_dtype([0.5, 0.5])\n y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4],\n [6, 4, 2]])\n y = lfilter(b, a, x, axis=0)\n assert_array_almost_equal(y_r2_a0, y)\n\n def test_rank_2_IIR_axis_1(self):\n x = self.generate((4, 3))\n b = self.convert_dtype([1, -1])\n a = self.convert_dtype([0.5, 0.5])\n y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12],\n [18, -16, 18]])\n y = lfilter(b, a, x, axis=1)\n assert_array_almost_equal(y_r2_a1, y)\n\n def test_rank_2_IIR_axis_0_init_cond(self):\n x = self.generate((4, 3))\n b = self.convert_dtype([1, -1])\n a = self.convert_dtype([0.5, 0.5])\n zi = self.convert_dtype(np.ones((4,1)))\n\n y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13],\n [19, -17, 19]])\n zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis]\n y, zf = lfilter(b, a, x, axis=1, zi=zi)\n assert_array_almost_equal(y_r2_a0_1, y)\n assert_array_almost_equal(zf, zf_r)\n\n def test_rank_2_IIR_axis_1_init_cond(self):\n x = self.generate((4,3))\n b = self.convert_dtype([1, -1])\n a = self.convert_dtype([0.5, 0.5])\n zi = self.convert_dtype(np.ones((1,3)))\n\n y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1],\n [1, 3, 5], [5, 3, 1]])\n zf_r = self.convert_dtype([[-23, -23, -23]])\n y, zf = lfilter(b, a, x, axis=0, zi=zi)\n assert_array_almost_equal(y_r2_a0_0, y)\n assert_array_almost_equal(zf, zf_r)\n\n def test_rank_3_IIR(self):\n x = self.generate((4, 3, 2))\n b = self.convert_dtype([1, -1])\n a = self.convert_dtype([0.5, 0.5])\n\n for axis in range(x.ndim):\n y = lfilter(b, a, x, axis)\n y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)\n assert_array_almost_equal(y, y_r)\n\n def test_rank_3_IIR_init_cond(self):\n x = self.generate((4, 3, 2))\n b = self.convert_dtype([1, -1])\n a = self.convert_dtype([0.5, 0.5])\n\n for axis in range(x.ndim):\n zi_shape = list(x.shape)\n zi_shape[axis] = 1\n zi = self.convert_dtype(np.ones(zi_shape))\n zi1 = self.convert_dtype([1])\n y, zf = lfilter(b, a, x, axis, zi)\n lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]\n lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]\n y_r = np.apply_along_axis(lf0, axis, x)\n zf_r = np.apply_along_axis(lf1, axis, x)\n assert_array_almost_equal(y, y_r)\n assert_array_almost_equal(zf, zf_r)\n\n def test_rank_3_FIR(self):\n x = self.generate((4, 3, 2))\n b = self.convert_dtype([1, 0, -1])\n a = self.convert_dtype([1])\n\n for axis in range(x.ndim):\n y = lfilter(b, a, x, axis)\n y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)\n assert_array_almost_equal(y, y_r)\n\n def test_rank_3_FIR_init_cond(self):\n x = self.generate((4, 3, 2))\n b = self.convert_dtype([1, 0, -1])\n a = self.convert_dtype([1])\n\n for axis in range(x.ndim):\n zi_shape = list(x.shape)\n zi_shape[axis] = 2\n zi = self.convert_dtype(np.ones(zi_shape))\n zi1 = self.convert_dtype([1, 1])\n y, zf = lfilter(b, a, x, axis, zi)\n lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]\n lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]\n y_r = np.apply_along_axis(lf0, axis, x)\n zf_r = np.apply_along_axis(lf1, axis, x)\n assert_array_almost_equal(y, y_r)\n assert_array_almost_equal(zf, zf_r)\n\n def test_zi_pseudobroadcast(self):\n x = self.generate((4, 5, 20))\n b,a = signal.butter(8, 0.2, output='ba')\n b = self.convert_dtype(b)\n a = self.convert_dtype(a)\n zi_size = b.shape[0] - 1\n\n # lfilter requires x.ndim == zi.ndim exactly. However, zi can have\n # length 1 dimensions.\n zi_full = self.convert_dtype(np.ones((4, 5, zi_size)))\n zi_sing = self.convert_dtype(np.ones((1, 1, zi_size)))\n\n y_full, zf_full = lfilter(b, a, x, zi=zi_full)\n y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing)\n\n assert_array_almost_equal(y_sing, y_full)\n assert_array_almost_equal(zf_full, zf_sing)\n\n # lfilter does not prepend ones\n assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size))\n\n def test_scalar_a(self):\n # a can be a scalar.\n x = self.generate(6)\n b = self.convert_dtype([1, 0, -1])\n a = self.convert_dtype([1])\n y_r = self.convert_dtype([0, 1, 2, 2, 2, 2])\n\n y = lfilter(b, a[0], x)\n assert_array_almost_equal(y, y_r)\n\n def test_zi_some_singleton_dims(self):\n # lfilter doesn't really broadcast (no prepending of 1's). But does\n # do singleton expansion if x and zi have the same ndim. This was\n # broken only if a subset of the axes were singletons (gh-4681).\n x = self.convert_dtype(np.zeros((3,2,5), 'l'))\n b = self.convert_dtype(np.ones(5, 'l'))\n a = self.convert_dtype(np.array([1,0,0]))\n zi = np.ones((3,1,4), 'l')\n zi[1,:,:] *= 2\n zi[2,:,:] *= 3\n zi = self.convert_dtype(zi)\n\n zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l'))\n y_expected = np.zeros((3,2,5), 'l')\n y_expected[:,:,:4] = [[[1]], [[2]], [[3]]]\n y_expected = self.convert_dtype(y_expected)\n\n # IIR\n y_iir, zf_iir = lfilter(b, a, x, -1, zi)\n assert_array_almost_equal(y_iir, y_expected)\n assert_array_almost_equal(zf_iir, zf_expected)\n\n # FIR\n y_fir, zf_fir = lfilter(b, a[0], x, -1, zi)\n assert_array_almost_equal(y_fir, y_expected)\n assert_array_almost_equal(zf_fir, zf_expected)\n\n def base_bad_size_zi(self, b, a, x, axis, zi):\n b = self.convert_dtype(b)\n a = self.convert_dtype(a)\n x = self.convert_dtype(x)\n zi = self.convert_dtype(zi)\n assert_raises(ValueError, lfilter, b, a, x, axis, zi)\n\n def test_bad_size_zi(self):\n # rank 1\n x1 = np.arange(6)\n self.base_bad_size_zi([1], [1], x1, -1, [1])\n self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1])\n self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]])\n self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2])\n self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]])\n self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2])\n self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1])\n self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]])\n self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3])\n self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0])\n self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]])\n self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2])\n self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3])\n\n # rank 2\n x2 = np.arange(12).reshape((4,3))\n # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3)\n self.base_bad_size_zi([1], [1], x2, 0, [0])\n\n # for each of these there are 5 cases tested (in this order):\n # 1. not deep enough, right # elements\n # 2. too deep, right # elements\n # 3. right depth, right # elements, transposed\n # 4. right depth, too few elements\n # 5. right depth, too many elements\n\n self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2])\n self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]])\n self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]])\n self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]])\n self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]])\n\n self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5])\n self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]])\n self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]])\n self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]])\n self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]])\n\n self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2])\n self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]])\n self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]])\n self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]])\n self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]])\n\n self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5])\n self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]])\n self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]])\n self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]])\n self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])\n\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])\n\n # for axis=1 zi.shape should == (4, max(len(a),len(b))-1)\n self.base_bad_size_zi([1], [1], x2, 1, [0])\n\n self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3])\n self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]])\n self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]])\n self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]])\n self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]])\n\n self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7])\n self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])\n self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]])\n self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]])\n self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])\n\n self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3])\n self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]])\n self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]])\n self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]])\n self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]])\n\n self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7])\n self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])\n self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])\n self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]])\n self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])\n\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]])\n self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])\n\n def test_empty_zi(self):\n # Regression test for #880: empty array for zi crashes.\n x = self.generate((5,))\n a = self.convert_dtype([1])\n b = self.convert_dtype([1])\n zi = self.convert_dtype([])\n y, zf = lfilter(b, a, x, zi=zi)\n assert_array_almost_equal(y, x)\n assert_equal(zf.dtype, self.dtype)\n assert_equal(zf.size, 0)\n\n def test_lfiltic_bad_zi(self):\n # Regression test for #3699: bad initial conditions\n a = self.convert_dtype([1])\n b = self.convert_dtype([1])\n # \"y\" sets the datatype of zi, so it truncates if int\n zi = lfiltic(b, a, [1., 0])\n zi_1 = lfiltic(b, a, [1, 0])\n zi_2 = lfiltic(b, a, [True, False])\n assert_array_equal(zi, zi_1)\n assert_array_equal(zi, zi_2)\n\n def test_short_x_FIR(self):\n # regression test for #5116\n # x shorter than b, with non None zi fails\n a = self.convert_dtype([1])\n b = self.convert_dtype([1, 0, -1])\n zi = self.convert_dtype([2, 7])\n x = self.convert_dtype([72])\n ye = self.convert_dtype([74])\n zfe = self.convert_dtype([7, -72])\n y, zf = lfilter(b, a, x, zi=zi)\n assert_array_almost_equal(y, ye)\n assert_array_almost_equal(zf, zfe)\n\n def test_short_x_IIR(self):\n # regression test for #5116\n # x shorter than b, with non None zi fails\n a = self.convert_dtype([1, 1])\n b = self.convert_dtype([1, 0, -1])\n zi = self.convert_dtype([2, 7])\n x = self.convert_dtype([72])\n ye = self.convert_dtype([74])\n zfe = self.convert_dtype([-67, -72])\n y, zf = lfilter(b, a, x, zi=zi)\n assert_array_almost_equal(y, ye)\n assert_array_almost_equal(zf, zfe)\n\n def test_do_not_modify_a_b_IIR(self):\n x = self.generate((6,))\n b = self.convert_dtype([1, -1])\n b0 = b.copy()\n a = self.convert_dtype([0.5, -0.5])\n a0 = a.copy()\n y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])\n y_f = lfilter(b, a, x)\n assert_array_almost_equal(y_f, y_r)\n assert_equal(b, b0)\n assert_equal(a, a0)\n\n def test_do_not_modify_a_b_FIR(self):\n x = self.generate((6,))\n b = self.convert_dtype([1, 0, 1])\n b0 = b.copy()\n a = self.convert_dtype([2])\n a0 = a.copy()\n y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.])\n y_f = lfilter(b, a, x)\n assert_array_almost_equal(y_f, y_r)\n assert_equal(b, b0)\n assert_equal(a, a0)\n\n\nclass TestLinearFilterFloat32(_TestLinearFilter):\n dtype = np.dtype('f')\n\n\nclass TestLinearFilterFloat64(_TestLinearFilter):\n dtype = np.dtype('d')\n\n\nclass TestLinearFilterFloatExtended(_TestLinearFilter):\n dtype = np.dtype('g')\n\n\nclass TestLinearFilterComplex64(_TestLinearFilter):\n dtype = np.dtype('F')\n\n\nclass TestLinearFilterComplex128(_TestLinearFilter):\n dtype = np.dtype('D')\n\n\nclass TestLinearFilterComplexExtended(_TestLinearFilter):\n dtype = np.dtype('G')\n\nclass TestLinearFilterDecimal(_TestLinearFilter):\n dtype = np.dtype('O')\n\n def type(self, x):\n return Decimal(str(x))\n\n\nclass TestLinearFilterObject(_TestLinearFilter):\n dtype = np.dtype('O')\n type = float\n\n\ndef test_lfilter_bad_object():\n # lfilter: object arrays with non-numeric objects raise TypeError.\n # Regression test for ticket #1452.\n assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0])\n assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0])\n assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0])\n\n\ndef test_lfilter_notimplemented_input():\n # Should not crash, gh-7991\n assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5])\n\n\[email protected]('dt', [np.ubyte, np.byte, np.ushort, np.short,\n np.uint, int, np.ulonglong, np.ulonglong,\n np.float32, np.float64, np.longdouble,\n Decimal])\nclass TestCorrelateReal(object):\n def _setup_rank1(self, dt):\n a = np.linspace(0, 3, 4).astype(dt)\n b = np.linspace(1, 2, 2).astype(dt)\n\n y_r = np.array([0, 2, 5, 8, 3]).astype(dt)\n return a, b, y_r\n\n def equal_tolerance(self, res_dt):\n # default value of keyword\n decimal = 6\n try:\n dt_info = np.finfo(res_dt)\n if hasattr(dt_info, 'resolution'):\n decimal = int(-0.5*np.log10(dt_info.resolution))\n except Exception:\n pass\n return decimal\n\n def test_method(self, dt):\n if dt == Decimal:\n method = choose_conv_method([Decimal(4)], [Decimal(3)])\n assert_equal(method, 'direct')\n else:\n a, b, y_r = self._setup_rank3(dt)\n y_fft = correlate(a, b, method='fft')\n y_direct = correlate(a, b, method='direct')\n\n assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance(y_fft.dtype))\n assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_fft.dtype))\n assert_equal(y_fft.dtype, dt)\n assert_equal(y_direct.dtype, dt)\n\n def test_rank1_valid(self, dt):\n a, b, y_r = self._setup_rank1(dt)\n y = correlate(a, b, 'valid')\n assert_array_almost_equal(y, y_r[1:4])\n assert_equal(y.dtype, dt)\n\n # See gh-5897\n y = correlate(b, a, 'valid')\n assert_array_almost_equal(y, y_r[1:4][::-1])\n assert_equal(y.dtype, dt)\n\n def test_rank1_same(self, dt):\n a, b, y_r = self._setup_rank1(dt)\n y = correlate(a, b, 'same')\n assert_array_almost_equal(y, y_r[:-1])\n assert_equal(y.dtype, dt)\n\n def test_rank1_full(self, dt):\n a, b, y_r = self._setup_rank1(dt)\n y = correlate(a, b, 'full')\n assert_array_almost_equal(y, y_r)\n assert_equal(y.dtype, dt)\n\n def _setup_rank3(self, dt):\n a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(\n dt)\n b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(\n dt)\n\n y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.],\n [46., 432., 1062., 1840., 2672., 1698., 864., 266.],\n [134., 736., 1662., 2768., 3920., 2418., 1168., 314.],\n [260., 952., 1932., 3056., 4208., 2580., 1240., 332.],\n [202., 664., 1290., 1984., 2688., 1590., 712., 150.],\n [114., 344., 642., 960., 1280., 726., 296., 38.]],\n\n [[23., 400., 1035., 1832., 2696., 1737., 904., 293.],\n [134., 920., 2166., 3680., 5280., 3306., 1640., 474.],\n [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.],\n [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.],\n [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.],\n [241., 700., 1281., 1888., 2496., 1383., 532., 39.]],\n\n [[22., 214., 528., 916., 1332., 846., 430., 132.],\n [86., 484., 1098., 1832., 2600., 1602., 772., 206.],\n [188., 802., 1698., 2732., 3788., 2256., 1018., 218.],\n [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.],\n [230., 692., 1290., 1928., 2568., 1458., 596., 78.],\n [126., 354., 636., 924., 1212., 654., 234., 0.]]],\n dtype=dt)\n\n return a, b, y_r\n\n def test_rank3_valid(self, dt):\n a, b, y_r = self._setup_rank3(dt)\n y = correlate(a, b, \"valid\")\n assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5])\n assert_equal(y.dtype, dt)\n\n # See gh-5897\n y = correlate(b, a, \"valid\")\n assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1])\n assert_equal(y.dtype, dt)\n\n def test_rank3_same(self, dt):\n a, b, y_r = self._setup_rank3(dt)\n y = correlate(a, b, \"same\")\n assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2])\n assert_equal(y.dtype, dt)\n\n def test_rank3_all(self, dt):\n a, b, y_r = self._setup_rank3(dt)\n y = correlate(a, b)\n assert_array_almost_equal(y, y_r)\n assert_equal(y.dtype, dt)\n\n\nclass TestCorrelate(object):\n # Tests that don't depend on dtype\n\n def test_invalid_shapes(self):\n # By \"invalid,\" we mean that no one\n # array has dimensions that are all at\n # least as large as the corresponding\n # dimensions of the other array. This\n # setup should throw a ValueError.\n a = np.arange(1, 7).reshape((2, 3))\n b = np.arange(-6, 0).reshape((3, 2))\n\n assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'})\n assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'})\n\n def test_invalid_params(self):\n a = [3, 4, 5]\n b = [1, 2, 3]\n assert_raises(ValueError, correlate, a, b, mode='spam')\n assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft')\n assert_raises(ValueError, correlate, a, b, mode='ham', method='direct')\n assert_raises(ValueError, correlate, a, b, mode='full', method='bacon')\n assert_raises(ValueError, correlate, a, b, mode='same', method='bacon')\n\n def test_mismatched_dims(self):\n # Input arrays should have the same number of dimensions\n assert_raises(ValueError, correlate, [1], 2, method='direct')\n assert_raises(ValueError, correlate, 1, [2], method='direct')\n assert_raises(ValueError, correlate, [1], 2, method='fft')\n assert_raises(ValueError, correlate, 1, [2], method='fft')\n assert_raises(ValueError, correlate, [1], [[2]])\n assert_raises(ValueError, correlate, [3], 2)\n\n def test_numpy_fastpath(self):\n a = [1, 2, 3]\n b = [4, 5]\n assert_allclose(correlate(a, b, mode='same'), [5, 14, 23])\n\n a = [1, 2, 3]\n b = [4, 5, 6]\n assert_allclose(correlate(a, b, mode='same'), [17, 32, 23])\n assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12])\n assert_allclose(correlate(a, b, mode='valid'), [32])\n\n\[email protected]('dt', [np.csingle, np.cdouble, np.clongdouble])\nclass TestCorrelateComplex(object):\n # The decimal precision to be used for comparing results.\n # This value will be passed as the 'decimal' keyword argument of\n # assert_array_almost_equal().\n\n def decimal(self, dt):\n return int(2 * np.finfo(dt).precision / 3)\n\n def _setup_rank1(self, dt, mode):\n np.random.seed(9)\n a = np.random.randn(10).astype(dt)\n a += 1j * np.random.randn(10).astype(dt)\n b = np.random.randn(8).astype(dt)\n b += 1j * np.random.randn(8).astype(dt)\n\n y_r = (correlate(a.real, b.real, mode=mode) +\n correlate(a.imag, b.imag, mode=mode)).astype(dt)\n y_r += 1j * (-correlate(a.real, b.imag, mode=mode) +\n correlate(a.imag, b.real, mode=mode))\n return a, b, y_r\n\n def test_rank1_valid(self, dt):\n a, b, y_r = self._setup_rank1(dt, 'valid')\n y = correlate(a, b, 'valid')\n assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))\n assert_equal(y.dtype, dt)\n\n # See gh-5897\n y = correlate(b, a, 'valid')\n assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt))\n assert_equal(y.dtype, dt)\n\n def test_rank1_same(self, dt):\n a, b, y_r = self._setup_rank1(dt, 'same')\n y = correlate(a, b, 'same')\n assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))\n assert_equal(y.dtype, dt)\n\n def test_rank1_full(self, dt):\n a, b, y_r = self._setup_rank1(dt, 'full')\n y = correlate(a, b, 'full')\n assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))\n assert_equal(y.dtype, dt)\n\n def test_swap_full(self, dt):\n d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt)\n k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt)\n y = correlate(d, k)\n assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j])\n\n def test_swap_same(self, dt):\n d = [0.+0.j, 1.+1.j, 2.+2.j]\n k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j]\n y = correlate(d, k, mode=\"same\")\n assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j])\n\n def test_rank3(self, dt):\n a = np.random.randn(10, 8, 6).astype(dt)\n a += 1j * np.random.randn(10, 8, 6).astype(dt)\n b = np.random.randn(8, 6, 4).astype(dt)\n b += 1j * np.random.randn(8, 6, 4).astype(dt)\n\n y_r = (correlate(a.real, b.real)\n + correlate(a.imag, b.imag)).astype(dt)\n y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))\n\n y = correlate(a, b, 'full')\n assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)\n assert_equal(y.dtype, dt)\n\n def test_rank0(self, dt):\n a = np.array(np.random.randn()).astype(dt)\n a += 1j * np.array(np.random.randn()).astype(dt)\n b = np.array(np.random.randn()).astype(dt)\n b += 1j * np.array(np.random.randn()).astype(dt)\n\n y_r = (correlate(a.real, b.real)\n + correlate(a.imag, b.imag)).astype(dt)\n y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))\n\n y = correlate(a, b, 'full')\n assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)\n assert_equal(y.dtype, dt)\n\n assert_equal(correlate([1], [2j]), correlate(1, 2j))\n assert_equal(correlate([2j], [3j]), correlate(2j, 3j))\n assert_equal(correlate([3j], [4]), correlate(3j, 4))\n\n\nclass TestCorrelate2d(object):\n\n def test_consistency_correlate_funcs(self):\n # Compare np.correlate, signal.correlate, signal.correlate2d\n a = np.arange(5)\n b = np.array([3.2, 1.4, 3])\n for mode in ['full', 'valid', 'same']:\n assert_almost_equal(np.correlate(a, b, mode=mode),\n signal.correlate(a, b, mode=mode))\n assert_almost_equal(np.squeeze(signal.correlate2d([a], [b],\n mode=mode)),\n signal.correlate(a, b, mode=mode))\n\n # See gh-5897\n if mode == 'valid':\n assert_almost_equal(np.correlate(b, a, mode=mode),\n signal.correlate(b, a, mode=mode))\n assert_almost_equal(np.squeeze(signal.correlate2d([b], [a],\n mode=mode)),\n signal.correlate(b, a, mode=mode))\n\n def test_invalid_shapes(self):\n # By \"invalid,\" we mean that no one\n # array has dimensions that are all at\n # least as large as the corresponding\n # dimensions of the other array. This\n # setup should throw a ValueError.\n a = np.arange(1, 7).reshape((2, 3))\n b = np.arange(-6, 0).reshape((3, 2))\n\n assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'})\n assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'})\n\n def test_complex_input(self):\n assert_equal(signal.correlate2d([[1]], [[2j]]), -2j)\n assert_equal(signal.correlate2d([[2j]], [[3j]]), 6)\n assert_equal(signal.correlate2d([[3j]], [[4]]), 12j)\n\n\nclass TestLFilterZI(object):\n\n def test_basic(self):\n a = np.array([1.0, -1.0, 0.5])\n b = np.array([1.0, 0.0, 2.0])\n zi_expected = np.array([5.0, -1.0])\n zi = lfilter_zi(b, a)\n assert_array_almost_equal(zi, zi_expected)\n\n def test_scale_invariance(self):\n # Regression test. There was a bug in which b was not correctly\n # rescaled when a[0] was nonzero.\n b = np.array([2, 8, 5])\n a = np.array([1, 1, 8])\n zi1 = lfilter_zi(b, a)\n zi2 = lfilter_zi(2*b, 2*a)\n assert_allclose(zi2, zi1, rtol=1e-12)\n\n\nclass TestFiltFilt(object):\n filtfilt_kind = 'tf'\n\n def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None,\n method='pad', irlen=None):\n if self.filtfilt_kind == 'tf':\n b, a = zpk2tf(*zpk)\n return filtfilt(b, a, x, axis, padtype, padlen, method, irlen)\n elif self.filtfilt_kind == 'sos':\n sos = zpk2sos(*zpk)\n return sosfiltfilt(sos, x, axis, padtype, padlen)\n\n def test_basic(self):\n zpk = tf2zpk([1, 2, 3], [1, 2, 3])\n out = self.filtfilt(zpk, np.arange(12))\n assert_allclose(out, arange(12), atol=1e-11)\n\n def test_sine(self):\n rate = 2000\n t = np.linspace(0, 1.0, rate + 1)\n # A signal with low frequency and a high frequency.\n xlow = np.sin(5 * 2 * np.pi * t)\n xhigh = np.sin(250 * 2 * np.pi * t)\n x = xlow + xhigh\n\n zpk = butter(8, 0.125, output='zpk')\n # r is the magnitude of the largest pole.\n r = np.abs(zpk[1]).max()\n eps = 1e-5\n # n estimates the number of steps for the\n # transient to decay by a factor of eps.\n n = int(np.ceil(np.log(eps) / np.log(r)))\n\n # High order lowpass filter...\n y = self.filtfilt(zpk, x, padlen=n)\n # Result should be just xlow.\n err = np.abs(y - xlow).max()\n assert_(err < 1e-4)\n\n # A 2D case.\n x2d = np.vstack([xlow, xlow + xhigh])\n y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1)\n assert_equal(y2d.shape, x2d.shape)\n err = np.abs(y2d - xlow).max()\n assert_(err < 1e-4)\n\n # Use the previous result to check the use of the axis keyword.\n # (Regression test for ticket #1620)\n y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0)\n assert_equal(y2d, y2dt.T)\n\n def test_axis(self):\n # Test the 'axis' keyword on a 3D array.\n x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)\n zpk = butter(3, 0.125, output='zpk')\n y0 = self.filtfilt(zpk, x, padlen=0, axis=0)\n y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1)\n assert_array_equal(y0, np.swapaxes(y1, 0, 1))\n y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2)\n assert_array_equal(y0, np.swapaxes(y2, 0, 2))\n\n def test_acoeff(self):\n if self.filtfilt_kind != 'tf':\n return # only necessary for TF\n # test for 'a' coefficient as single number\n out = signal.filtfilt([.5, .5], 1, np.arange(10))\n assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14)\n\n def test_gust_simple(self):\n if self.filtfilt_kind != 'tf':\n pytest.skip('gust only implemented for TF systems')\n # The input array has length 2. The exact solution for this case\n # was computed \"by hand\".\n x = np.array([1.0, 2.0])\n b = np.array([0.5])\n a = np.array([1.0, -0.5])\n y, z1, z2 = _filtfilt_gust(b, a, x)\n assert_allclose([z1[0], z2[0]],\n [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]])\n assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1],\n 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]])\n\n def test_gust_scalars(self):\n if self.filtfilt_kind != 'tf':\n pytest.skip('gust only implemented for TF systems')\n # The filter coefficients are both scalars, so the filter simply\n # multiplies its input by b/a. When it is used in filtfilt, the\n # factor is (b/a)**2.\n x = np.arange(12)\n b = 3.0\n a = 2.0\n y = filtfilt(b, a, x, method=\"gust\")\n expected = (b/a)**2 * x\n assert_allclose(y, expected)\n\n\nclass TestSOSFiltFilt(TestFiltFilt):\n filtfilt_kind = 'sos'\n\n def test_equivalence(self):\n \"\"\"Test equivalence between sosfiltfilt and filtfilt\"\"\"\n x = np.random.RandomState(0).randn(1000)\n for order in range(1, 6):\n zpk = signal.butter(order, 0.35, output='zpk')\n b, a = zpk2tf(*zpk)\n sos = zpk2sos(*zpk)\n y = filtfilt(b, a, x)\n y_sos = sosfiltfilt(sos, x)\n assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order)\n\n\ndef filtfilt_gust_opt(b, a, x):\n \"\"\"\n An alternative implementation of filtfilt with Gustafsson edges.\n\n This function computes the same result as\n `scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays\n are accepted. The problem is solved using `fmin` from `scipy.optimize`.\n `_filtfilt_gust` is significanly faster than this implementation.\n \"\"\"\n def filtfilt_gust_opt_func(ics, b, a, x):\n \"\"\"Objective function used in filtfilt_gust_opt.\"\"\"\n m = max(len(a), len(b)) - 1\n z0f = ics[:m]\n z0b = ics[m:]\n y_f = lfilter(b, a, x, zi=z0f)[0]\n y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1]\n\n y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]\n y_bf = lfilter(b, a, y_b, zi=z0f)[0]\n value = np.sum((y_fb - y_bf)**2)\n return value\n\n m = max(len(a), len(b)) - 1\n zi = lfilter_zi(b, a)\n ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi))\n result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x),\n xtol=1e-10, ftol=1e-12,\n maxfun=10000, maxiter=10000,\n full_output=True, disp=False)\n opt, fopt, niter, funcalls, warnflag = result\n if warnflag > 0:\n raise RuntimeError(\"minimization failed in filtfilt_gust_opt: \"\n \"warnflag=%d\" % warnflag)\n z0f = opt[:m]\n z0b = opt[m:]\n\n # Apply the forward-backward filter using the computed initial\n # conditions.\n y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]\n y = lfilter(b, a, y_b, zi=z0f)[0]\n\n return y, z0f, z0b\n\n\ndef check_filtfilt_gust(b, a, shape, axis, irlen=None):\n # Generate x, the data to be filtered.\n np.random.seed(123)\n x = np.random.randn(*shape)\n\n # Apply filtfilt to x. This is the main calculation to be checked.\n y = filtfilt(b, a, x, axis=axis, method=\"gust\", irlen=irlen)\n\n # Also call the private function so we can test the ICs.\n yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)\n\n # filtfilt_gust_opt is an independent implementation that gives the\n # expected result, but it only handles 1-d arrays, so use some looping\n # and reshaping shenanigans to create the expected output arrays.\n xx = np.swapaxes(x, axis, -1)\n out_shape = xx.shape[:-1]\n yo = np.empty_like(xx)\n m = max(len(a), len(b)) - 1\n zo1 = np.empty(out_shape + (m,))\n zo2 = np.empty(out_shape + (m,))\n for indx in product(*[range(d) for d in out_shape]):\n yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx])\n yo = np.swapaxes(yo, -1, axis)\n zo1 = np.swapaxes(zo1, -1, axis)\n zo2 = np.swapaxes(zo2, -1, axis)\n\n assert_allclose(y, yo, rtol=1e-9, atol=1e-10)\n assert_allclose(yg, yo, rtol=1e-9, atol=1e-10)\n assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10)\n assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10)\n\n\ndef test_choose_conv_method():\n for mode in ['valid', 'same', 'full']:\n for ndims in [1, 2]:\n n, k, true_method = 8, 6, 'direct'\n x = np.random.randn(*((n,) * ndims))\n h = np.random.randn(*((k,) * ndims))\n\n method = choose_conv_method(x, h, mode=mode)\n assert_equal(method, true_method)\n\n method_try, times = choose_conv_method(x, h, mode=mode, measure=True)\n assert_(method_try in {'fft', 'direct'})\n assert_(type(times) is dict)\n assert_('fft' in times.keys() and 'direct' in times.keys())\n\n n = 10\n for not_fft_conv_supp in [\"complex256\", \"complex192\"]:\n if hasattr(np, not_fft_conv_supp):\n x = np.ones(n, dtype=not_fft_conv_supp)\n h = x.copy()\n assert_equal(choose_conv_method(x, h, mode=mode), 'direct')\n\n x = np.array([2**51], dtype=np.int64)\n h = x.copy()\n assert_equal(choose_conv_method(x, h, mode=mode), 'direct')\n\n x = [Decimal(3), Decimal(2)]\n h = [Decimal(1), Decimal(4)]\n assert_equal(choose_conv_method(x, h, mode=mode), 'direct')\n\n\ndef test_filtfilt_gust():\n # Design a filter.\n z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk')\n\n # Find the approximate impulse response length of the filter.\n eps = 1e-10\n r = np.max(np.abs(p))\n approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))\n\n np.random.seed(123)\n\n b, a = zpk2tf(z, p, k)\n for irlen in [None, approx_impulse_len]:\n signal_len = 5 * approx_impulse_len\n\n # 1-d test case\n check_filtfilt_gust(b, a, (signal_len,), 0, irlen)\n\n # 3-d test case; test each axis.\n for axis in range(3):\n shape = [2, 2, 2]\n shape[axis] = signal_len\n check_filtfilt_gust(b, a, shape, axis, irlen)\n\n # Test case with length less than 2*approx_impulse_len.\n # In this case, `filtfilt_gust` should behave the same as if\n # `irlen=None` was given.\n length = 2*approx_impulse_len - 50\n check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len)\n\n\nclass TestDecimate(object):\n def test_bad_args(self):\n x = np.arange(12)\n assert_raises(TypeError, signal.decimate, x, q=0.5, n=1)\n assert_raises(TypeError, signal.decimate, x, q=2, n=0.5)\n\n def test_basic_IIR(self):\n x = np.arange(12)\n y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round()\n assert_array_equal(y, x[::2])\n\n def test_basic_FIR(self):\n x = np.arange(12)\n y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round()\n assert_array_equal(y, x[::2])\n\n def test_shape(self):\n # Regression test for ticket #1480.\n z = np.zeros((30, 30))\n d0 = signal.decimate(z, 2, axis=0, zero_phase=False)\n assert_equal(d0.shape, (15, 30))\n d1 = signal.decimate(z, 2, axis=1, zero_phase=False)\n assert_equal(d1.shape, (30, 15))\n\n def test_phaseshift_FIR(self):\n with suppress_warnings() as sup:\n sup.filter(BadCoefficients, \"Badly conditioned filter\")\n self._test_phaseshift(method='fir', zero_phase=False)\n\n def test_zero_phase_FIR(self):\n with suppress_warnings() as sup:\n sup.filter(BadCoefficients, \"Badly conditioned filter\")\n self._test_phaseshift(method='fir', zero_phase=True)\n\n def test_phaseshift_IIR(self):\n self._test_phaseshift(method='iir', zero_phase=False)\n\n def test_zero_phase_IIR(self):\n self._test_phaseshift(method='iir', zero_phase=True)\n\n def _test_phaseshift(self, method, zero_phase):\n rate = 120\n rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3\n\n t_tot = int(100) # Need to let antialiasing filters settle\n t = np.arange(rate*t_tot+1) / float(rate)\n\n # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts\n freqs = np.array(rates_to) * 0.8 / 2\n d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t)\n * signal.windows.tukey(t.size, 0.1))\n\n for rate_to in rates_to:\n q = rate // rate_to\n t_to = np.arange(rate_to*t_tot+1) / float(rate_to)\n d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to)\n * signal.windows.tukey(t_to.size, 0.1))\n\n # Set up downsampling filters, match v0.17 defaults\n if method == 'fir':\n n = 30\n system = signal.dlti(signal.firwin(n + 1, 1. / q,\n window='hamming'), 1.)\n elif method == 'iir':\n n = 8\n wc = 0.8*np.pi/q\n system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi))\n\n # Calculate expected phase response, as unit complex vector\n if zero_phase is False:\n _, h_resps = signal.freqz(system.num, system.den,\n freqs/rate*2*np.pi)\n h_resps /= np.abs(h_resps)\n else:\n h_resps = np.ones_like(freqs)\n\n y_resamps = signal.decimate(d.real, q, n, ftype=system,\n zero_phase=zero_phase)\n\n # Get phase from complex inner product, like CSD\n h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1)\n h_resamps /= np.abs(h_resamps)\n subnyq = freqs < 0.5*rate_to\n\n # Complex vectors should be aligned, only compare below nyquist\n assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0,\n atol=1e-3, rtol=1e-3)\n\n def test_auto_n(self):\n # Test that our value of n is a reasonable choice (depends on\n # the downsampling factor)\n sfreq = 100.\n n = 1000\n t = np.arange(n) / sfreq\n # will alias for decimations (>= 15)\n x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t)\n assert_allclose(np.linalg.norm(x), 1., rtol=1e-3)\n x_out = signal.decimate(x, 30, ftype='fir')\n assert_array_less(np.linalg.norm(x_out), 0.01)\n\n\nclass TestHilbert(object):\n\n def test_bad_args(self):\n x = np.array([1.0 + 0.0j])\n assert_raises(ValueError, hilbert, x)\n x = np.arange(8.0)\n assert_raises(ValueError, hilbert, x, N=0)\n\n def test_hilbert_theoretical(self):\n # test cases by Ariel Rokem\n decimal = 14\n\n pi = np.pi\n t = np.arange(0, 2 * pi, pi / 256)\n a0 = np.sin(t)\n a1 = np.cos(t)\n a2 = np.sin(2 * t)\n a3 = np.cos(2 * t)\n a = np.vstack([a0, a1, a2, a3])\n\n h = hilbert(a)\n h_abs = np.abs(h)\n h_angle = np.angle(h)\n h_real = np.real(h)\n\n # The real part should be equal to the original signals:\n assert_almost_equal(h_real, a, decimal)\n # The absolute value should be one everywhere, for this input:\n assert_almost_equal(h_abs, np.ones(a.shape), decimal)\n # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in\n # the first 256 bins:\n assert_almost_equal(h_angle[0, :256],\n np.arange(-pi / 2, pi / 2, pi / 256),\n decimal)\n # For the 'slow' cosine - the phase should go from 0 to pi in the\n # same interval:\n assert_almost_equal(\n h_angle[1, :256], np.arange(0, pi, pi / 256), decimal)\n # The 'fast' sine should make this phase transition in half the time:\n assert_almost_equal(h_angle[2, :128],\n np.arange(-pi / 2, pi / 2, pi / 128),\n decimal)\n # Ditto for the 'fast' cosine:\n assert_almost_equal(\n h_angle[3, :128], np.arange(0, pi, pi / 128), decimal)\n\n # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia\n assert_almost_equal(h[1].imag, a0, decimal)\n\n def test_hilbert_axisN(self):\n # tests for axis and N arguments\n a = np.arange(18).reshape(3, 6)\n # test axis\n aa = hilbert(a, axis=-1)\n assert_equal(hilbert(a.T, axis=0), aa.T)\n # test 1d\n assert_almost_equal(hilbert(a[0]), aa[0], 14)\n\n # test N\n aan = hilbert(a, N=20, axis=-1)\n assert_equal(aan.shape, [3, 20])\n assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3])\n # the next test is just a regression test,\n # no idea whether numbers make sense\n a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j,\n 1.000000000000000e+00 - 2.047794505137069j,\n 1.999999999999999e+00 - 2.244055555687583j,\n 3.000000000000000e+00 - 1.262750302935009j,\n 4.000000000000000e+00 - 1.066489252384493j,\n 5.000000000000000e+00 + 2.918022706971047j,\n 8.881784197001253e-17 + 3.845658908989067j,\n -9.444121133484362e-17 + 0.985044202202061j,\n -1.776356839400251e-16 + 1.332257797702019j,\n -3.996802888650564e-16 + 0.501905089898885j,\n 1.332267629550188e-16 + 0.668696078880782j,\n -1.192678053963799e-16 + 0.235487067862679j,\n -1.776356839400251e-16 + 0.286439612812121j,\n 3.108624468950438e-16 + 0.031676888064907j,\n 1.332267629550188e-16 - 0.019275656884536j,\n -2.360035624836702e-16 - 0.1652588660287j,\n 0.000000000000000e+00 - 0.332049855010597j,\n 3.552713678800501e-16 - 0.403810179797771j,\n 8.881784197001253e-17 - 0.751023775297729j,\n 9.444121133484362e-17 - 0.79252210110103j])\n assert_almost_equal(aan[0], a0hilb, 14, 'N regression')\n\n\nclass TestHilbert2(object):\n\n def test_bad_args(self):\n # x must be real.\n x = np.array([[1.0 + 0.0j]])\n assert_raises(ValueError, hilbert2, x)\n\n # x must be rank 2.\n x = np.arange(24).reshape(2, 3, 4)\n assert_raises(ValueError, hilbert2, x)\n\n # Bad value for N.\n x = np.arange(16).reshape(4, 4)\n assert_raises(ValueError, hilbert2, x, N=0)\n assert_raises(ValueError, hilbert2, x, N=(2, 0))\n assert_raises(ValueError, hilbert2, x, N=(2,))\n\n\nclass TestPartialFractionExpansion(object):\n def test_invresz_one_coefficient_bug(self):\n # Regression test for issue in gh-4646.\n r = [1]\n p = [2]\n k = [0]\n a_expected = [1.0, 0.0]\n b_expected = [1.0, -2.0]\n a_observed, b_observed = invresz(r, p, k)\n\n assert_allclose(a_observed, a_expected)\n assert_allclose(b_observed, b_expected)\n\n def test_invres_distinct_roots(self):\n # This test was inspired by github issue 2496.\n r = [3 / 10, -1 / 6, -2 / 15]\n p = [0, -2, -5]\n k = []\n a_expected = [1, 3]\n b_expected = [1, 7, 10, 0]\n a_observed, b_observed = invres(r, p, k)\n assert_allclose(a_observed, a_expected)\n assert_allclose(b_observed, b_expected)\n rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum')\n\n # With the default tolerance, the rtype does not matter\n # for this example.\n for rtype in rtypes:\n a_observed, b_observed = invres(r, p, k, rtype=rtype)\n assert_allclose(a_observed, a_expected)\n assert_allclose(b_observed, b_expected)\n\n # With unrealistically large tolerances, repeated roots may be inferred\n # and the rtype comes into play.\n ridiculous_tolerance = 1e10\n for rtype in rtypes:\n a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype)\n\n def test_invres_repeated_roots(self):\n r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]\n p = [0, -2, -2, -5]\n k = []\n a_expected = [1, 3]\n b_expected = [1, 9, 24, 20, 0]\n rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum')\n for rtype in rtypes:\n a_observed, b_observed = invres(r, p, k, rtype=rtype)\n assert_allclose(a_observed, a_expected)\n assert_allclose(b_observed, b_expected)\n\n def test_invres_bad_rtype(self):\n r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]\n p = [0, -2, -2, -5]\n k = []\n assert_raises(ValueError, invres, r, p, k, rtype='median')\n\n\nclass TestVectorstrength(object):\n\n def test_single_1dperiod(self):\n events = np.array([.5])\n period = 5.\n targ_strength = 1.\n targ_phase = .1\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 0)\n assert_equal(phase.ndim, 0)\n assert_almost_equal(strength, targ_strength)\n assert_almost_equal(phase, 2 * np.pi * targ_phase)\n\n def test_single_2dperiod(self):\n events = np.array([.5])\n period = [1, 2, 5.]\n targ_strength = [1.] * 3\n targ_phase = np.array([.5, .25, .1])\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 1)\n assert_equal(phase.ndim, 1)\n assert_array_almost_equal(strength, targ_strength)\n assert_almost_equal(phase, 2 * np.pi * targ_phase)\n\n def test_equal_1dperiod(self):\n events = np.array([.25, .25, .25, .25, .25, .25])\n period = 2\n targ_strength = 1.\n targ_phase = .125\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 0)\n assert_equal(phase.ndim, 0)\n assert_almost_equal(strength, targ_strength)\n assert_almost_equal(phase, 2 * np.pi * targ_phase)\n\n def test_equal_2dperiod(self):\n events = np.array([.25, .25, .25, .25, .25, .25])\n period = [1, 2, ]\n targ_strength = [1.] * 2\n targ_phase = np.array([.25, .125])\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 1)\n assert_equal(phase.ndim, 1)\n assert_almost_equal(strength, targ_strength)\n assert_almost_equal(phase, 2 * np.pi * targ_phase)\n\n def test_spaced_1dperiod(self):\n events = np.array([.1, 1.1, 2.1, 4.1, 10.1])\n period = 1\n targ_strength = 1.\n targ_phase = .1\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 0)\n assert_equal(phase.ndim, 0)\n assert_almost_equal(strength, targ_strength)\n assert_almost_equal(phase, 2 * np.pi * targ_phase)\n\n def test_spaced_2dperiod(self):\n events = np.array([.1, 1.1, 2.1, 4.1, 10.1])\n period = [1, .5]\n targ_strength = [1.] * 2\n targ_phase = np.array([.1, .2])\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 1)\n assert_equal(phase.ndim, 1)\n assert_almost_equal(strength, targ_strength)\n assert_almost_equal(phase, 2 * np.pi * targ_phase)\n\n def test_partial_1dperiod(self):\n events = np.array([.25, .5, .75])\n period = 1\n targ_strength = 1. / 3.\n targ_phase = .5\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 0)\n assert_equal(phase.ndim, 0)\n assert_almost_equal(strength, targ_strength)\n assert_almost_equal(phase, 2 * np.pi * targ_phase)\n\n def test_partial_2dperiod(self):\n events = np.array([.25, .5, .75])\n period = [1., 1., 1., 1.]\n targ_strength = [1. / 3.] * 4\n targ_phase = np.array([.5, .5, .5, .5])\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 1)\n assert_equal(phase.ndim, 1)\n assert_almost_equal(strength, targ_strength)\n assert_almost_equal(phase, 2 * np.pi * targ_phase)\n\n def test_opposite_1dperiod(self):\n events = np.array([0, .25, .5, .75])\n period = 1.\n targ_strength = 0\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 0)\n assert_equal(phase.ndim, 0)\n assert_almost_equal(strength, targ_strength)\n\n def test_opposite_2dperiod(self):\n events = np.array([0, .25, .5, .75])\n period = [1.] * 10\n targ_strength = [0.] * 10\n\n strength, phase = vectorstrength(events, period)\n\n assert_equal(strength.ndim, 1)\n assert_equal(phase.ndim, 1)\n assert_almost_equal(strength, targ_strength)\n\n def test_2d_events_ValueError(self):\n events = np.array([[1, 2]])\n period = 1.\n assert_raises(ValueError, vectorstrength, events, period)\n\n def test_2d_period_ValueError(self):\n events = 1.\n period = np.array([[1]])\n assert_raises(ValueError, vectorstrength, events, period)\n\n def test_zero_period_ValueError(self):\n events = 1.\n period = 0\n assert_raises(ValueError, vectorstrength, events, period)\n\n def test_negative_period_ValueError(self):\n events = 1.\n period = -1\n assert_raises(ValueError, vectorstrength, events, period)\n\n\nclass TestSOSFilt(object):\n\n # For sosfilt we only test a single datatype. Since sosfilt wraps\n # to lfilter under the hood, it's hopefully good enough to ensure\n # lfilter is extensively tested.\n dt = np.float64\n\n # The test_rank* tests are pulled from _TestLinearFilter\n def test_rank1(self):\n x = np.linspace(0, 5, 6).astype(self.dt)\n b = np.array([1, -1]).astype(self.dt)\n a = np.array([0.5, -0.5]).astype(self.dt)\n\n # Test simple IIR\n y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt)\n assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r)\n\n # Test simple FIR\n b = np.array([1, 1]).astype(self.dt)\n # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero:\n a = np.array([1, 0]).astype(self.dt)\n y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)\n assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r)\n\n b = [1, 1, 0]\n a = [1, 0, 0]\n x = np.ones(8)\n sos = np.concatenate((b, a))\n sos.shape = (1, 6)\n y = sosfilt(sos, x)\n assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2])\n\n def test_rank2(self):\n shape = (4, 3)\n x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)\n x = x.astype(self.dt)\n\n b = np.array([1, -1]).astype(self.dt)\n a = np.array([0.5, 0.5]).astype(self.dt)\n\n y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]],\n dtype=self.dt)\n\n y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],\n [18, -16, 18]], dtype=self.dt)\n\n y = sosfilt(tf2sos(b, a), x, axis=0)\n assert_array_almost_equal(y_r2_a0, y)\n\n y = sosfilt(tf2sos(b, a), x, axis=1)\n assert_array_almost_equal(y_r2_a1, y)\n\n def test_rank3(self):\n shape = (4, 3, 2)\n x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)\n\n b = np.array([1, -1]).astype(self.dt)\n a = np.array([0.5, 0.5]).astype(self.dt)\n\n # Test last axis\n y = sosfilt(tf2sos(b, a), x)\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))\n\n def test_initial_conditions(self):\n b1, a1 = signal.butter(2, 0.25, 'low')\n b2, a2 = signal.butter(2, 0.75, 'low')\n b3, a3 = signal.butter(2, 0.75, 'low')\n b = np.convolve(np.convolve(b1, b2), b3)\n a = np.convolve(np.convolve(a1, a2), a3)\n sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3]))\n\n x = np.random.rand(50)\n\n # Stopping filtering and continuing\n y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6))\n y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]]\n assert_allclose(y_true, lfilter(b, a, x))\n\n y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2)))\n y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]]\n assert_allclose(y_true, y_sos)\n\n # Use a step function\n zi = sosfilt_zi(sos)\n x = np.ones(8)\n y, zf = sosfilt(sos, x, zi=zi)\n\n assert_allclose(y, np.ones(8))\n assert_allclose(zf, zi)\n\n # Initial condition shape matching\n x.shape = (1, 1) + x.shape # 3D\n assert_raises(ValueError, sosfilt, sos, x, zi=zi)\n zi_nd = zi.copy()\n zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1])\n assert_raises(ValueError, sosfilt, sos, x,\n zi=zi_nd[:, :, :, [0, 1, 1]])\n y, zf = sosfilt(sos, x, zi=zi_nd)\n assert_allclose(y[0, 0], np.ones(8))\n assert_allclose(zf[:, 0, 0, :], zi)\n\n def test_initial_conditions_3d_axis1(self):\n # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input.\n\n # Input array is x.\n x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3))\n\n # Design a filter in ZPK format and convert to SOS\n zpk = signal.butter(6, 0.35, output='zpk')\n sos = zpk2sos(*zpk)\n nsections = sos.shape[0]\n\n # Filter along this axis.\n axis = 1\n\n # Initial conditions, all zeros.\n shp = list(x.shape)\n shp[axis] = 2\n shp = [nsections] + shp\n z0 = np.zeros(shp)\n\n # Apply the filter to x.\n yf, zf = sosfilt(sos, x, axis=axis, zi=z0)\n\n # Apply the filter to x in two stages.\n y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0)\n y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1)\n\n # y should equal yf, and z2 should equal zf.\n y = np.concatenate((y1, y2), axis=axis)\n assert_allclose(y, yf, rtol=1e-10, atol=1e-13)\n assert_allclose(z2, zf, rtol=1e-10, atol=1e-13)\n\n # let's try the \"step\" initial condition\n zi = sosfilt_zi(sos)\n zi.shape = [nsections, 1, 2, 1]\n zi = zi * x[:, 0:1, :]\n y = sosfilt(sos, x, axis=axis, zi=zi)[0]\n # check it against the TF form\n b, a = zpk2tf(*zpk)\n zi = lfilter_zi(b, a)\n zi.shape = [1, zi.size, 1]\n zi = zi * x[:, 0:1, :]\n y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0]\n assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13)\n\n def test_bad_zi_shape(self):\n # The shape of zi is checked before using any values in the\n # arguments, so np.empty is fine for creating the arguments.\n x = np.empty((3, 15, 3))\n sos = np.empty((4, 6))\n zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3)\n assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1)\n\n def test_sosfilt_zi(self):\n sos = signal.butter(6, 0.2, output='sos')\n zi = sosfilt_zi(sos)\n\n y, zf = sosfilt(sos, np.ones(40), zi=zi)\n assert_allclose(zf, zi, rtol=1e-13)\n\n # Expected steady state value of the step response of this filter:\n ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1))\n assert_allclose(y, ss, rtol=1e-13)\n\n\nclass TestDeconvolve(object):\n\n def test_basic(self):\n # From docstring example\n original = [0, 1, 0, 0, 1, 1, 0, 0]\n impulse_response = [2, 1]\n recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0]\n recovered, remainder = signal.deconvolve(recorded, impulse_response)\n assert_allclose(recovered, original)\n",
"#!/usr/bin/env python\n\n\"\"\"Top level ``eval`` module.\n\"\"\"\n\nimport warnings\nimport tokenize\nfrom pandas.formats.printing import pprint_thing\nfrom pandas.computation import _NUMEXPR_INSTALLED\nfrom pandas.computation.expr import Expr, _parsers, tokenize_string\nfrom pandas.computation.scope import _ensure_scope\nfrom pandas.compat import string_types\nfrom pandas.computation.engines import _engines\n\n\ndef _check_engine(engine):\n \"\"\"Make sure a valid engine is passed.\n\n Parameters\n ----------\n engine : str\n\n Raises\n ------\n KeyError\n * If an invalid engine is passed\n ImportError\n * If numexpr was requested but doesn't exist\n\n Returns\n -------\n string engine\n\n \"\"\"\n\n if engine is None:\n if _NUMEXPR_INSTALLED:\n engine = 'numexpr'\n else:\n engine = 'python'\n\n if engine not in _engines:\n raise KeyError('Invalid engine {0!r} passed, valid engines are'\n ' {1}'.format(engine, list(_engines.keys())))\n\n # TODO: validate this in a more general way (thinking of future engines\n # that won't necessarily be import-able)\n # Could potentially be done on engine instantiation\n if engine == 'numexpr':\n if not _NUMEXPR_INSTALLED:\n raise ImportError(\"'numexpr' is not installed or an \"\n \"unsupported version. Cannot use \"\n \"engine='numexpr' for query/eval \"\n \"if 'numexpr' is not installed\")\n\n return engine\n\n\ndef _check_parser(parser):\n \"\"\"Make sure a valid parser is passed.\n\n Parameters\n ----------\n parser : str\n\n Raises\n ------\n KeyError\n * If an invalid parser is passed\n \"\"\"\n if parser not in _parsers:\n raise KeyError('Invalid parser {0!r} passed, valid parsers are'\n ' {1}'.format(parser, _parsers.keys()))\n\n\ndef _check_resolvers(resolvers):\n if resolvers is not None:\n for resolver in resolvers:\n if not hasattr(resolver, '__getitem__'):\n name = type(resolver).__name__\n raise TypeError('Resolver of type %r does not implement '\n 'the __getitem__ method' % name)\n\n\ndef _check_expression(expr):\n \"\"\"Make sure an expression is not an empty string\n\n Parameters\n ----------\n expr : object\n An object that can be converted to a string\n\n Raises\n ------\n ValueError\n * If expr is an empty string\n \"\"\"\n if not expr:\n raise ValueError(\"expr cannot be an empty string\")\n\n\ndef _convert_expression(expr):\n \"\"\"Convert an object to an expression.\n\n Thus function converts an object to an expression (a unicode string) and\n checks to make sure it isn't empty after conversion. This is used to\n convert operators to their string representation for recursive calls to\n :func:`~pandas.eval`.\n\n Parameters\n ----------\n expr : object\n The object to be converted to a string.\n\n Returns\n -------\n s : unicode\n The string representation of an object.\n\n Raises\n ------\n ValueError\n * If the expression is empty.\n \"\"\"\n s = pprint_thing(expr)\n _check_expression(s)\n return s\n\n\ndef _check_for_locals(expr, stack_level, parser):\n at_top_of_stack = stack_level == 0\n not_pandas_parser = parser != 'pandas'\n\n if not_pandas_parser:\n msg = \"The '@' prefix is only supported by the pandas parser\"\n elif at_top_of_stack:\n msg = (\"The '@' prefix is not allowed in \"\n \"top-level eval calls, \\nplease refer to \"\n \"your variables by name without the '@' \"\n \"prefix\")\n\n if at_top_of_stack or not_pandas_parser:\n for toknum, tokval in tokenize_string(expr):\n if toknum == tokenize.OP and tokval == '@':\n raise SyntaxError(msg)\n\n\ndef eval(expr, parser='pandas', engine=None, truediv=True,\n local_dict=None, global_dict=None, resolvers=(), level=0,\n target=None, inplace=None):\n \"\"\"Evaluate a Python expression as a string using various backends.\n\n The following arithmetic operations are supported: ``+``, ``-``, ``*``,\n ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following\n boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).\n Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,\n :keyword:`or`, and :keyword:`not` with the same semantics as the\n corresponding bitwise operators. :class:`~pandas.Series` and\n :class:`~pandas.DataFrame` objects are supported and behave as they would\n with plain ol' Python evaluation.\n\n Parameters\n ----------\n expr : str or unicode\n The expression to evaluate. This string cannot contain any Python\n `statements\n <http://docs.python.org/2/reference/simple_stmts.html#simple-statements>`__,\n only Python `expressions\n <http://docs.python.org/2/reference/simple_stmts.html#expression-statements>`__.\n parser : string, default 'pandas', {'pandas', 'python'}\n The parser to use to construct the syntax tree from the expression. The\n default of ``'pandas'`` parses code slightly different than standard\n Python. Alternatively, you can parse an expression using the\n ``'python'`` parser to retain strict Python semantics. See the\n :ref:`enhancing performance <enhancingperf.eval>` documentation for\n more details.\n engine : string or None, default 'numexpr', {'python', 'numexpr'}\n\n The engine used to evaluate the expression. Supported engines are\n\n - None : tries to use ``numexpr``, falls back to ``python``\n - ``'numexpr'``: This default engine evaluates pandas objects using\n numexpr for large speed ups in complex expressions\n with large frames.\n - ``'python'``: Performs operations as if you had ``eval``'d in top\n level python. This engine is generally not that useful.\n\n More backends may be available in the future.\n\n truediv : bool, optional\n Whether to use true division, like in Python >= 3\n local_dict : dict or None, optional\n A dictionary of local variables, taken from locals() by default.\n global_dict : dict or None, optional\n A dictionary of global variables, taken from globals() by default.\n resolvers : list of dict-like or None, optional\n A list of objects implementing the ``__getitem__`` special method that\n you can use to inject an additional collection of namespaces to use for\n variable lookup. For example, this is used in the\n :meth:`~pandas.DataFrame.query` method to inject the\n :attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns`\n variables that refer to their respective :class:`~pandas.DataFrame`\n instance attributes.\n level : int, optional\n The number of prior stack frames to traverse and add to the current\n scope. Most users will **not** need to change this parameter.\n target : a target object for assignment, optional, default is None\n essentially this is a passed in resolver\n inplace : bool, default True\n If expression mutates, whether to modify object inplace or return\n copy with mutation.\n\n WARNING: inplace=None currently falls back to to True, but\n in a future version, will default to False. Use inplace=True\n explicitly rather than relying on the default.\n\n Returns\n -------\n ndarray, numeric scalar, DataFrame, Series\n\n Notes\n -----\n The ``dtype`` of any objects involved in an arithmetic ``%`` operation are\n recursively cast to ``float64``.\n\n See the :ref:`enhancing performance <enhancingperf.eval>` documentation for\n more details.\n\n See Also\n --------\n pandas.DataFrame.query\n pandas.DataFrame.eval\n \"\"\"\n first_expr = True\n if isinstance(expr, string_types):\n _check_expression(expr)\n exprs = [e for e in expr.splitlines() if e != '']\n else:\n exprs = [expr]\n multi_line = len(exprs) > 1\n\n if multi_line and target is None:\n raise ValueError(\"multi-line expressions are only valid in the \"\n \"context of data, use DataFrame.eval\")\n\n first_expr = True\n for expr in exprs:\n expr = _convert_expression(expr)\n engine = _check_engine(engine)\n _check_parser(parser)\n _check_resolvers(resolvers)\n _check_for_locals(expr, level, parser)\n\n # get our (possibly passed-in) scope\n level += 1\n env = _ensure_scope(level, global_dict=global_dict,\n local_dict=local_dict, resolvers=resolvers,\n target=target)\n\n parsed_expr = Expr(expr, engine=engine, parser=parser, env=env,\n truediv=truediv)\n\n # construct the engine and evaluate the parsed expression\n eng = _engines[engine]\n eng_inst = eng(parsed_expr)\n ret = eng_inst.evaluate()\n\n if parsed_expr.assigner is None and multi_line:\n raise ValueError(\"Multi-line expressions are only valid\"\n \" if all expressions contain an assignment\")\n\n # assign if needed\n if env.target is not None and parsed_expr.assigner is not None:\n if inplace is None:\n warnings.warn(\n \"eval expressions containing an assignment currently\"\n \"default to operating inplace.\\nThis will change in \"\n \"a future version of pandas, use inplace=True to \"\n \"avoid this warning.\",\n FutureWarning, stacklevel=3)\n inplace = True\n\n # if returning a copy, copy only on the first assignment\n if not inplace and first_expr:\n target = env.target.copy()\n else:\n target = env.target\n\n target[parsed_expr.assigner] = ret\n\n if not resolvers:\n resolvers = ({parsed_expr.assigner: ret},)\n else:\n # existing resolver needs updated to handle\n # case of mutating existing column in copy\n for resolver in resolvers:\n if parsed_expr.assigner in resolver:\n resolver[parsed_expr.assigner] = ret\n break\n else:\n resolvers += ({parsed_expr.assigner: ret},)\n\n ret = None\n first_expr = False\n\n if not inplace and inplace is not None:\n return target\n\n return ret\n",
"\"\"\"\nRead SAS sas7bdat or xport files.\n\"\"\"\n\n\ndef read_sas(filepath_or_buffer, format=None, index=None, encoding=None,\n chunksize=None, iterator=False):\n \"\"\"\n Read SAS files stored as either XPORT or SAS7BDAT format files.\n\n Parameters\n ----------\n filepath_or_buffer : string or file-like object\n Path to the SAS file.\n format : string {'xport', 'sas7bdat'} or None\n If None, file format is inferred. If 'xport' or 'sas7bdat',\n uses the corresponding format.\n index : identifier of index column, defaults to None\n Identifier of column that should be used as index of the DataFrame.\n encoding : string, default is None\n Encoding for text data. If None, text data are stored as raw bytes.\n chunksize : int\n Read file `chunksize` lines at a time, returns iterator.\n iterator : bool, defaults to False\n If True, returns an iterator for reading the file incrementally.\n\n Returns\n -------\n DataFrame if iterator=False and chunksize=None, else SAS7BDATReader\n or XportReader\n \"\"\"\n\n if format is None:\n try:\n fname = filepath_or_buffer.lower()\n if fname.endswith(\".xpt\"):\n format = \"xport\"\n elif fname.endswith(\".sas7bdat\"):\n format = \"sas7bdat\"\n else:\n raise ValueError(\"unable to infer format of SAS file\")\n except:\n pass\n\n if format.lower() == 'xport':\n from pandas.io.sas.sas_xport import XportReader\n reader = XportReader(filepath_or_buffer, index=index,\n encoding=encoding,\n chunksize=chunksize)\n elif format.lower() == 'sas7bdat':\n from pandas.io.sas.sas7bdat import SAS7BDATReader\n reader = SAS7BDATReader(filepath_or_buffer, index=index,\n encoding=encoding,\n chunksize=chunksize)\n else:\n raise ValueError('unknown SAS format')\n\n if iterator or chunksize:\n return reader\n\n data = reader.read()\n reader.close()\n return data\n",
"\"\"\"\nDiscrete Fourier Transforms - helper.py\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport collections\nimport threading\n\nfrom numpy.compat import integer_types\nfrom numpy.core import (\n asarray, concatenate, arange, take, integer, empty\n )\n\n# Created by Pearu Peterson, September 2002\n\n__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']\n\ninteger_types = integer_types + (integer,)\n\n\ndef fftshift(x, axes=None):\n \"\"\"\n Shift the zero-frequency component to the center of the spectrum.\n\n This function swaps half-spaces for all axes listed (defaults to all).\n Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to shift. Default is None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n See Also\n --------\n ifftshift : The inverse of `fftshift`.\n\n Examples\n --------\n >>> freqs = np.fft.fftfreq(10, 0.1)\n >>> freqs\n array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])\n >>> np.fft.fftshift(freqs)\n array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])\n\n Shift the zero-frequency component only along the second axis:\n\n >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)\n >>> freqs\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n >>> np.fft.fftshift(freqs, axes=(1,))\n array([[ 2., 0., 1.],\n [-4., 3., 4.],\n [-1., -3., -2.]])\n\n \"\"\"\n tmp = asarray(x)\n ndim = tmp.ndim\n if axes is None:\n axes = list(range(ndim))\n elif isinstance(axes, integer_types):\n axes = (axes,)\n y = tmp\n for k in axes:\n n = tmp.shape[k]\n p2 = (n+1)//2\n mylist = concatenate((arange(p2, n), arange(p2)))\n y = take(y, mylist, k)\n return y\n\n\ndef ifftshift(x, axes=None):\n \"\"\"\n The inverse of `fftshift`. Although identical for even-length `x`, the\n functions differ by one sample for odd-length `x`.\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to calculate. Defaults to None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n See Also\n --------\n fftshift : Shift zero-frequency component to the center of the spectrum.\n\n Examples\n --------\n >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)\n >>> freqs\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n >>> np.fft.ifftshift(np.fft.fftshift(freqs))\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n\n \"\"\"\n tmp = asarray(x)\n ndim = tmp.ndim\n if axes is None:\n axes = list(range(ndim))\n elif isinstance(axes, integer_types):\n axes = (axes,)\n y = tmp\n for k in axes:\n n = tmp.shape[k]\n p2 = n-(n+1)//2\n mylist = concatenate((arange(p2, n), arange(p2)))\n y = take(y, mylist, k)\n return y\n\n\ndef fftfreq(n, d=1.0):\n \"\"\"\n Return the Discrete Fourier Transform sample frequencies.\n\n The returned float array `f` contains the frequency bin centers in cycles\n per unit of the sample spacing (with zero at the start). For instance, if\n the sample spacing is in seconds, then the frequency unit is cycles/second.\n\n Given a window length `n` and a sample spacing `d`::\n\n f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even\n f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd\n\n Parameters\n ----------\n n : int\n Window length.\n d : scalar, optional\n Sample spacing (inverse of the sampling rate). Defaults to 1.\n\n Returns\n -------\n f : ndarray\n Array of length `n` containing the sample frequencies.\n\n Examples\n --------\n >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)\n >>> fourier = np.fft.fft(signal)\n >>> n = signal.size\n >>> timestep = 0.1\n >>> freq = np.fft.fftfreq(n, d=timestep)\n >>> freq\n array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])\n\n \"\"\"\n if not isinstance(n, integer_types):\n raise ValueError(\"n should be an integer\")\n val = 1.0 / (n * d)\n results = empty(n, int)\n N = (n-1)//2 + 1\n p1 = arange(0, N, dtype=int)\n results[:N] = p1\n p2 = arange(-(n//2), 0, dtype=int)\n results[N:] = p2\n return results * val\n #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d)\n\n\ndef rfftfreq(n, d=1.0):\n \"\"\"\n Return the Discrete Fourier Transform sample frequencies\n (for usage with rfft, irfft).\n\n The returned float array `f` contains the frequency bin centers in cycles\n per unit of the sample spacing (with zero at the start). For instance, if\n the sample spacing is in seconds, then the frequency unit is cycles/second.\n\n Given a window length `n` and a sample spacing `d`::\n\n f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even\n f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd\n\n Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)\n the Nyquist frequency component is considered to be positive.\n\n Parameters\n ----------\n n : int\n Window length.\n d : scalar, optional\n Sample spacing (inverse of the sampling rate). Defaults to 1.\n\n Returns\n -------\n f : ndarray\n Array of length ``n//2 + 1`` containing the sample frequencies.\n\n Examples\n --------\n >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)\n >>> fourier = np.fft.rfft(signal)\n >>> n = signal.size\n >>> sample_rate = 100\n >>> freq = np.fft.fftfreq(n, d=1./sample_rate)\n >>> freq\n array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.])\n >>> freq = np.fft.rfftfreq(n, d=1./sample_rate)\n >>> freq\n array([ 0., 10., 20., 30., 40., 50.])\n\n \"\"\"\n if not isinstance(n, integer_types):\n raise ValueError(\"n should be an integer\")\n val = 1.0/(n*d)\n N = n//2 + 1\n results = arange(0, N, dtype=int)\n return results * val\n\n\nclass _FFTCache(object):\n \"\"\"\n Cache for the FFT twiddle factors as an LRU (least recently used) cache.\n\n Parameters\n ----------\n max_size_in_mb : int\n Maximum memory usage of the cache before items are being evicted.\n max_item_count : int\n Maximum item count of the cache before items are being evicted.\n\n Notes\n -----\n Items will be evicted if either limit has been reached upon getting and\n setting. The maximum memory usages is not strictly the given\n ``max_size_in_mb`` but rather\n ``max(max_size_in_mb, 1.5 * size_of_largest_item)``. Thus the cache will\n never be completely cleared - at least one item will remain and a single\n large item can cause the cache to retain several smaller items even if the\n given maximum cache size has been exceeded.\n \"\"\"\n def __init__(self, max_size_in_mb, max_item_count):\n self._max_size_in_bytes = max_size_in_mb * 1024 ** 2\n self._max_item_count = max_item_count\n self._dict = collections.OrderedDict()\n self._lock = threading.Lock()\n\n def put_twiddle_factors(self, n, factors):\n \"\"\"\n Store twiddle factors for an FFT of length n in the cache.\n\n Putting multiple twiddle factors for a certain n will store it multiple\n times.\n\n Parameters\n ----------\n n : int\n Data length for the FFT.\n factors : ndarray\n The actual twiddle values.\n \"\"\"\n with self._lock:\n # Pop + later add to move it to the end for LRU behavior.\n # Internally everything is stored in a dictionary whose values are\n # lists.\n try:\n value = self._dict.pop(n)\n except KeyError:\n value = []\n value.append(factors)\n self._dict[n] = value\n self._prune_cache()\n\n def pop_twiddle_factors(self, n):\n \"\"\"\n Pop twiddle factors for an FFT of length n from the cache.\n\n Will return None if the requested twiddle factors are not available in\n the cache.\n\n Parameters\n ----------\n n : int\n Data length for the FFT.\n\n Returns\n -------\n out : ndarray or None\n The retrieved twiddle factors if available, else None.\n \"\"\"\n with self._lock:\n if n not in self._dict or not self._dict[n]:\n return None\n # Pop + later add to move it to the end for LRU behavior.\n all_values = self._dict.pop(n)\n value = all_values.pop()\n # Only put pack if there are still some arrays left in the list.\n if all_values:\n self._dict[n] = all_values\n return value\n\n def _prune_cache(self):\n # Always keep at least one item.\n while len(self._dict) > 1 and (\n len(self._dict) > self._max_item_count or self._check_size()):\n self._dict.popitem(last=False)\n\n def _check_size(self):\n item_sizes = [sum(_j.nbytes for _j in _i)\n for _i in self._dict.values() if _i]\n if not item_sizes:\n return False\n max_size = max(self._max_size_in_bytes, 1.5 * max(item_sizes))\n return sum(item_sizes) > max_size\n",
"# coding=utf-8\n# pylint: disable-msg=E1101,W0612\n\nfrom collections import Iterable\nfrom datetime import datetime, timedelta\nimport operator\nfrom itertools import product, starmap\n\nfrom numpy import nan, inf\nimport numpy as np\nimport pandas as pd\n\nfrom pandas import (Index, Series, DataFrame, isnull, bdate_range,\n NaT, date_range, timedelta_range,\n _np_version_under1p8)\nfrom pandas.tseries.index import Timestamp\nfrom pandas.tseries.tdi import Timedelta\nimport pandas.core.nanops as nanops\n\nfrom pandas.compat import range, zip\nfrom pandas import compat\nfrom pandas.util.testing import (assert_series_equal, assert_almost_equal,\n assert_frame_equal, assert_index_equal)\nimport pandas.util.testing as tm\n\nfrom .common import TestData\n\n\nclass TestSeriesOperators(TestData, tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def test_comparisons(self):\n left = np.random.randn(10)\n right = np.random.randn(10)\n left[:3] = np.nan\n\n result = nanops.nangt(left, right)\n with np.errstate(invalid='ignore'):\n expected = (left > right).astype('O')\n expected[:3] = np.nan\n\n assert_almost_equal(result, expected)\n\n s = Series(['a', 'b', 'c'])\n s2 = Series([False, True, False])\n\n # it works!\n exp = Series([False, False, False])\n assert_series_equal(s == s2, exp)\n assert_series_equal(s2 == s, exp)\n\n def test_op_method(self):\n def check(series, other, check_reverse=False):\n simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']\n if not compat.PY3:\n simple_ops.append('div')\n\n for opname in simple_ops:\n op = getattr(Series, opname)\n\n if op == 'div':\n alt = operator.truediv\n else:\n alt = getattr(operator, opname)\n\n result = op(series, other)\n expected = alt(series, other)\n assert_almost_equal(result, expected)\n if check_reverse:\n rop = getattr(Series, \"r\" + opname)\n result = rop(series, other)\n expected = alt(other, series)\n assert_almost_equal(result, expected)\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts[::2])\n check(self.ts, 5, check_reverse=True)\n check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)\n\n def test_neg(self):\n assert_series_equal(-self.series, -1 * self.series)\n\n def test_invert(self):\n assert_series_equal(-(self.series < 0), ~(self.series < 0))\n\n def test_div(self):\n with np.errstate(all='ignore'):\n # no longer do integer div for any ops, but deal with the 0's\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})\n result = p['first'] / p['second']\n expected = Series(\n p['first'].values.astype(float) / p['second'].values,\n dtype='float64')\n expected.iloc[0:3] = np.inf\n assert_series_equal(result, expected)\n\n result = p['first'] / 0\n expected = Series(np.inf, index=p.index, name='first')\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] / p['second']\n expected = Series(p['first'].values / p['second'].values)\n assert_series_equal(result, expected)\n\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})\n result = p['first'] / p['second']\n assert_series_equal(result, p['first'].astype('float64'),\n check_names=False)\n self.assertTrue(result.name is None)\n self.assertFalse(np.array_equal(result, p['second'] / p['first']))\n\n # inf signing\n s = Series([np.nan, 1., -1.])\n result = s / 0\n expected = Series([np.nan, np.inf, -np.inf])\n assert_series_equal(result, expected)\n\n # float/integer issue\n # GH 7785\n p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})\n expected = Series([-0.01, -np.inf])\n\n result = p['second'].div(p['first'])\n assert_series_equal(result, expected, check_names=False)\n\n result = p['second'] / p['first']\n assert_series_equal(result, expected)\n\n # GH 9144\n s = Series([-1, 0, 1])\n\n result = 0 / s\n expected = Series([0.0, nan, 0.0])\n assert_series_equal(result, expected)\n\n result = s / 0\n expected = Series([-inf, nan, inf])\n assert_series_equal(result, expected)\n\n result = s // 0\n expected = Series([-inf, nan, inf])\n assert_series_equal(result, expected)\n\n def test_operators(self):\n def _check_op(series, other, op, pos_only=False,\n check_dtype=True):\n left = np.abs(series) if pos_only else series\n right = np.abs(other) if pos_only else other\n\n cython_or_numpy = op(left, right)\n python = left.combine(right, op)\n assert_series_equal(cython_or_numpy, python,\n check_dtype=check_dtype)\n\n def check(series, other):\n simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']\n\n for opname in simple_ops:\n _check_op(series, other, getattr(operator, opname))\n\n _check_op(series, other, operator.pow, pos_only=True)\n\n _check_op(series, other, lambda x, y: operator.add(y, x))\n _check_op(series, other, lambda x, y: operator.sub(y, x))\n _check_op(series, other, lambda x, y: operator.truediv(y, x))\n _check_op(series, other, lambda x, y: operator.floordiv(y, x))\n _check_op(series, other, lambda x, y: operator.mul(y, x))\n _check_op(series, other, lambda x, y: operator.pow(y, x),\n pos_only=True)\n _check_op(series, other, lambda x, y: operator.mod(y, x))\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts * 0)\n check(self.ts, self.ts[::2])\n check(self.ts, 5)\n\n def check_comparators(series, other, check_dtype=True):\n _check_op(series, other, operator.gt, check_dtype=check_dtype)\n _check_op(series, other, operator.ge, check_dtype=check_dtype)\n _check_op(series, other, operator.eq, check_dtype=check_dtype)\n _check_op(series, other, operator.lt, check_dtype=check_dtype)\n _check_op(series, other, operator.le, check_dtype=check_dtype)\n\n check_comparators(self.ts, 5)\n check_comparators(self.ts, self.ts + 1, check_dtype=False)\n\n def test_divmod(self):\n def check(series, other):\n results = divmod(series, other)\n if isinstance(other, Iterable) and len(series) != len(other):\n # if the lengths don't match, this is the test where we use\n # `self.ts[::2]`. Pad every other value in `other_np` with nan.\n other_np = []\n for n in other:\n other_np.append(n)\n other_np.append(np.nan)\n else:\n other_np = other\n other_np = np.asarray(other_np)\n with np.errstate(all='ignore'):\n expecteds = divmod(series.values, np.asarray(other_np))\n\n for result, expected in zip(results, expecteds):\n # check the values, name, and index separatly\n assert_almost_equal(np.asarray(result), expected)\n\n self.assertEqual(result.name, series.name)\n assert_index_equal(result.index, series.index)\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts * 0)\n check(self.ts, self.ts[::2])\n check(self.ts, 5)\n\n def test_operators_empty_int_corner(self):\n s1 = Series([], [], dtype=np.int32)\n s2 = Series({'x': 0.})\n assert_series_equal(s1 * s2, Series([np.nan], index=['x']))\n\n def test_operators_timedelta64(self):\n\n # invalid ops\n self.assertRaises(Exception, self.objSeries.__add__, 1)\n self.assertRaises(Exception, self.objSeries.__add__,\n np.array(1, dtype=np.int64))\n self.assertRaises(Exception, self.objSeries.__sub__, 1)\n self.assertRaises(Exception, self.objSeries.__sub__,\n np.array(1, dtype=np.int64))\n\n # seriese ops\n v1 = date_range('2012-1-1', periods=3, freq='D')\n v2 = date_range('2012-1-2', periods=3, freq='D')\n rs = Series(v2) - Series(v1)\n xp = Series(1e9 * 3600 * 24,\n rs.index).astype('int64').astype('timedelta64[ns]')\n assert_series_equal(rs, xp)\n self.assertEqual(rs.dtype, 'timedelta64[ns]')\n\n df = DataFrame(dict(A=v1))\n td = Series([timedelta(days=i) for i in range(3)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # series on the rhs\n result = df['A'] - df['A'].shift()\n self.assertEqual(result.dtype, 'timedelta64[ns]')\n\n result = df['A'] + td\n self.assertEqual(result.dtype, 'M8[ns]')\n\n # scalar Timestamp on rhs\n maxa = df['A'].max()\n tm.assertIsInstance(maxa, Timestamp)\n\n resultb = df['A'] - df['A'].max()\n self.assertEqual(resultb.dtype, 'timedelta64[ns]')\n\n # timestamp on lhs\n result = resultb + df['A']\n values = [Timestamp('20111230'), Timestamp('20120101'),\n Timestamp('20120103')]\n expected = Series(values, name='A')\n assert_series_equal(result, expected)\n\n # datetimes on rhs\n result = df['A'] - datetime(2001, 1, 1)\n expected = Series(\n [timedelta(days=4017 + i) for i in range(3)], name='A')\n assert_series_equal(result, expected)\n self.assertEqual(result.dtype, 'm8[ns]')\n\n d = datetime(2001, 1, 1, 3, 4)\n resulta = df['A'] - d\n self.assertEqual(resulta.dtype, 'm8[ns]')\n\n # roundtrip\n resultb = resulta + d\n assert_series_equal(df['A'], resultb)\n\n # timedeltas on rhs\n td = timedelta(days=1)\n resulta = df['A'] + td\n resultb = resulta - td\n assert_series_equal(resultb, df['A'])\n self.assertEqual(resultb.dtype, 'M8[ns]')\n\n # roundtrip\n td = timedelta(minutes=5, seconds=3)\n resulta = df['A'] + td\n resultb = resulta - td\n assert_series_equal(df['A'], resultb)\n self.assertEqual(resultb.dtype, 'M8[ns]')\n\n # inplace\n value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))\n rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))\n self.assertEqual(rs[2], value)\n\n def test_operator_series_comparison_zerorank(self):\n # GH 13006\n result = np.float64(0) > pd.Series([1, 2, 3])\n expected = 0.0 > pd.Series([1, 2, 3])\n self.assert_series_equal(result, expected)\n result = pd.Series([1, 2, 3]) < np.float64(0)\n expected = pd.Series([1, 2, 3]) < 0.0\n self.assert_series_equal(result, expected)\n result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])\n expected = 0.0 > pd.Series([1, 2, 3])\n self.assert_series_equal(result, expected)\n\n def test_timedeltas_with_DateOffset(self):\n\n # GH 4532\n # operate with pd.offsets\n s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])\n\n result = s + pd.offsets.Second(5)\n result2 = pd.offsets.Second(5) + s\n expected = Series([Timestamp('20130101 9:01:05'), Timestamp(\n '20130101 9:02:05')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s - pd.offsets.Second(5)\n result2 = -pd.offsets.Second(5) + s\n expected = Series([Timestamp('20130101 9:00:55'), Timestamp(\n '20130101 9:01:55')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Milli(5)\n result2 = pd.offsets.Milli(5) + s\n expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(\n '20130101 9:02:00.005')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)\n expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(\n '20130101 9:07:00.005')])\n assert_series_equal(result, expected)\n\n # operate with np.timedelta64 correctly\n result = s + np.timedelta64(1, 's')\n result2 = np.timedelta64(1, 's') + s\n expected = Series([Timestamp('20130101 9:01:01'), Timestamp(\n '20130101 9:02:01')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + np.timedelta64(5, 'ms')\n result2 = np.timedelta64(5, 'ms') + s\n expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(\n '20130101 9:02:00.005')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n # valid DateOffsets\n for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',\n 'Nano']:\n op = getattr(pd.offsets, do)\n s + op(5)\n op(5) + s\n\n def test_timedelta_series_ops(self):\n # GH11925\n\n s = Series(timedelta_range('1 day', periods=3))\n ts = Timestamp('2012-01-01')\n expected = Series(date_range('2012-01-02', periods=3))\n assert_series_equal(ts + s, expected)\n assert_series_equal(s + ts, expected)\n\n expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))\n assert_series_equal(ts - s, expected2)\n assert_series_equal(ts + (-s), expected2)\n\n def test_timedelta64_operations_with_DateOffset(self):\n # GH 10699\n td = Series([timedelta(minutes=5, seconds=3)] * 3)\n result = td + pd.offsets.Minute(1)\n expected = Series([timedelta(minutes=6, seconds=3)] * 3)\n assert_series_equal(result, expected)\n\n result = td - pd.offsets.Minute(1)\n expected = Series([timedelta(minutes=4, seconds=3)] * 3)\n assert_series_equal(result, expected)\n\n result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),\n pd.offsets.Hour(2)])\n expected = Series([timedelta(minutes=6, seconds=3), timedelta(\n minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])\n assert_series_equal(result, expected)\n\n result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)\n expected = Series([timedelta(minutes=6, seconds=15)] * 3)\n assert_series_equal(result, expected)\n\n # valid DateOffsets\n for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',\n 'Nano']:\n op = getattr(pd.offsets, do)\n td + op(5)\n op(5) + td\n td - op(5)\n op(5) - td\n\n def test_timedelta64_operations_with_timedeltas(self):\n\n # td operate with td\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td2 = timedelta(minutes=5, seconds=4)\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(\n seconds=1)] * 3)\n self.assertEqual(result.dtype, 'm8[ns]')\n assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(\n seconds=0)] * 3))\n assert_series_equal(result2, expected)\n\n # roundtrip\n assert_series_equal(result + td2, td1)\n\n # Now again, using pd.to_timedelta, which should build\n # a Series or a scalar, depending on input.\n td1 = Series(pd.to_timedelta(['00:05:03'] * 3))\n td2 = pd.to_timedelta('00:05:04')\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(\n seconds=1)] * 3)\n self.assertEqual(result.dtype, 'm8[ns]')\n assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(\n seconds=0)] * 3))\n assert_series_equal(result2, expected)\n\n # roundtrip\n assert_series_equal(result + td2, td1)\n\n def test_timedelta64_operations_with_integers(self):\n\n # GH 4521\n # divide/multiply by integers\n startdate = Series(date_range('2013-01-01', '2013-01-03'))\n enddate = Series(date_range('2013-03-01', '2013-03-03'))\n\n s1 = enddate - startdate\n s1[2] = np.nan\n s2 = Series([2, 3, 4])\n expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 / s2\n assert_series_equal(result, expected)\n\n s2 = Series([20, 30, 40])\n expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 / s2\n assert_series_equal(result, expected)\n\n result = s1 / 2\n expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result, expected)\n\n s2 = Series([20, 30, 40])\n expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 * s2\n assert_series_equal(result, expected)\n\n for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',\n 'uint8']:\n s2 = Series([20, 30, 40], dtype=dtype)\n expected = Series(\n s1.values.astype(np.int64) * s2.astype(np.int64),\n dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 * s2\n assert_series_equal(result, expected)\n\n result = s1 * 2\n expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result, expected)\n\n result = s1 * -1\n expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result, expected)\n\n # invalid ops\n assert_series_equal(s1 / s2.astype(float),\n Series([Timedelta('2 days 22:48:00'), Timedelta(\n '1 days 23:12:00'), Timedelta('NaT')]))\n assert_series_equal(s1 / 2.0,\n Series([Timedelta('29 days 12:00:00'), Timedelta(\n '29 days 12:00:00'), Timedelta('NaT')]))\n\n for op in ['__add__', '__sub__']:\n sop = getattr(s1, op, None)\n if sop is not None:\n self.assertRaises(TypeError, sop, 1)\n self.assertRaises(TypeError, sop, s2.values)\n\n def test_timedelta64_conversions(self):\n startdate = Series(date_range('2013-01-01', '2013-01-03'))\n enddate = Series(date_range('2013-03-01', '2013-03-03'))\n\n s1 = enddate - startdate\n s1[2] = np.nan\n\n for m in [1, 3, 10]:\n for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:\n\n # op\n expected = s1.apply(lambda x: x / np.timedelta64(m, unit))\n result = s1 / np.timedelta64(m, unit)\n assert_series_equal(result, expected)\n\n if m == 1 and unit != 'ns':\n\n # astype\n result = s1.astype(\"timedelta64[{0}]\".format(unit))\n assert_series_equal(result, expected)\n\n # reverse op\n expected = s1.apply(\n lambda x: Timedelta(np.timedelta64(m, unit)) / x)\n result = np.timedelta64(m, unit) / s1\n\n # astype\n s = Series(date_range('20130101', periods=3))\n result = s.astype(object)\n self.assertIsInstance(result.iloc[0], datetime)\n self.assertTrue(result.dtype == np.object_)\n\n result = s1.astype(object)\n self.assertIsInstance(result.iloc[0], timedelta)\n self.assertTrue(result.dtype == np.object_)\n\n def test_timedelta64_equal_timedelta_supported_ops(self):\n ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),\n Timestamp('20130228 22:00:00'), Timestamp(\n '20130228 21:00:00')])\n\n intervals = 'D', 'h', 'm', 's', 'us'\n\n # TODO: unused\n # npy16_mappings = {'D': 24 * 60 * 60 * 1000000,\n # 'h': 60 * 60 * 1000000,\n # 'm': 60 * 1000000,\n # 's': 1000000,\n # 'us': 1}\n\n def timedelta64(*args):\n return sum(starmap(np.timedelta64, zip(args, intervals)))\n\n for op, d, h, m, s, us in product([operator.add, operator.sub],\n *([range(2)] * 5)):\n nptd = timedelta64(d, h, m, s, us)\n pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,\n microseconds=us)\n lhs = op(ser, nptd)\n rhs = op(ser, pytd)\n\n try:\n assert_series_equal(lhs, rhs)\n except:\n raise AssertionError(\n \"invalid comparsion [op->{0},d->{1},h->{2},m->{3},\"\n \"s->{4},us->{5}]\\n{6}\\n{7}\\n\".format(op, d, h, m, s,\n us, lhs, rhs))\n\n def test_operators_datetimelike(self):\n def run_ops(ops, get_ser, test_ser):\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n for op_str in ops:\n op = getattr(get_ser, op_str, None)\n with tm.assertRaisesRegexp(TypeError, 'operate'):\n op(test_ser)\n\n # ## timedelta64 ###\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n td2 = timedelta(minutes=5, seconds=4)\n ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',\n '__rfloordiv__', '__rpow__']\n run_ops(ops, td1, td2)\n td1 + td2\n td2 + td1\n td1 - td2\n td2 - td1\n td1 / td2\n td2 / td1\n\n # ## datetime64 ###\n dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),\n Timestamp('20120103')])\n dt1.iloc[2] = np.nan\n dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),\n Timestamp('20120104')])\n ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',\n '__pow__', '__radd__', '__rmul__', '__rfloordiv__',\n '__rtruediv__', '__rdiv__', '__rpow__']\n run_ops(ops, dt1, dt2)\n dt1 - dt2\n dt2 - dt1\n\n # ## datetime64 with timetimedelta ###\n ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',\n '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',\n '__rpow__']\n run_ops(ops, dt1, td1)\n dt1 + td1\n td1 + dt1\n dt1 - td1\n # TODO: Decide if this ought to work.\n # td1 - dt1\n\n # ## timetimedelta with datetime64 ###\n ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',\n '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',\n '__rdiv__', '__rpow__']\n run_ops(ops, td1, dt1)\n td1 + dt1\n dt1 + td1\n\n # 8260, 10763\n # datetime64 with tz\n ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',\n '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',\n '__rpow__']\n\n tz = 'US/Eastern'\n dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,\n tz=tz), name='foo')\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n run_ops(ops, dt1, td1)\n\n result = dt1 + td1[0]\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n\n result = dt2 + td2[0]\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n\n # odd numpy behavior with scalar timedeltas\n if not _np_version_under1p8:\n result = td1[0] + dt1\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n\n result = td2[0] + dt2\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n\n result = dt1 - td1[0]\n exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n self.assertRaises(TypeError, lambda: td1[0] - dt1)\n\n result = dt2 - td2[0]\n exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n self.assertRaises(TypeError, lambda: td2[0] - dt2)\n\n result = dt1 + td1\n exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n\n result = dt2 + td2\n exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n\n result = dt1 - td1\n exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n\n result = dt2 - td2\n exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)\n assert_series_equal(result, exp)\n\n self.assertRaises(TypeError, lambda: td1 - dt1)\n self.assertRaises(TypeError, lambda: td2 - dt2)\n\n def test_sub_datetime_compat(self):\n # GH 14088\n tm._skip_if_no_pytz()\n import pytz\n s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])\n dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)\n exp = Series([Timedelta('1 days'), pd.NaT])\n assert_series_equal(s - dt, exp)\n assert_series_equal(s - Timestamp(dt), exp)\n\n def test_sub_single_tz(self):\n # GH12290\n s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])\n s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])\n result = s1 - s2\n expected = Series([Timedelta('2days')])\n assert_series_equal(result, expected)\n result = s2 - s1\n expected = Series([Timedelta('-2days')])\n assert_series_equal(result, expected)\n\n def test_ops_nat(self):\n # GH 11349\n timedelta_series = Series([NaT, Timedelta('1s')])\n datetime_series = Series([NaT, Timestamp('19900315')])\n nat_series_dtype_timedelta = Series(\n [NaT, NaT], dtype='timedelta64[ns]')\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')\n single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')\n single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')\n\n # subtraction\n assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)\n assert_series_equal(-NaT + timedelta_series,\n nat_series_dtype_timedelta)\n\n assert_series_equal(timedelta_series - single_nat_dtype_timedelta,\n nat_series_dtype_timedelta)\n assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,\n nat_series_dtype_timedelta)\n\n assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)\n assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)\n\n assert_series_equal(datetime_series - single_nat_dtype_datetime,\n nat_series_dtype_timedelta)\n with tm.assertRaises(TypeError):\n -single_nat_dtype_datetime + datetime_series\n\n assert_series_equal(datetime_series - single_nat_dtype_timedelta,\n nat_series_dtype_timestamp)\n assert_series_equal(-single_nat_dtype_timedelta + datetime_series,\n nat_series_dtype_timestamp)\n\n # without a Series wrapping the NaT, it is ambiguous\n # whether it is a datetime64 or timedelta64\n # defaults to interpreting it as timedelta64\n assert_series_equal(nat_series_dtype_timestamp - NaT,\n nat_series_dtype_timestamp)\n assert_series_equal(-NaT + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n\n assert_series_equal(nat_series_dtype_timestamp -\n single_nat_dtype_datetime,\n nat_series_dtype_timedelta)\n with tm.assertRaises(TypeError):\n -single_nat_dtype_datetime + nat_series_dtype_timestamp\n\n assert_series_equal(nat_series_dtype_timestamp -\n single_nat_dtype_timedelta,\n nat_series_dtype_timestamp)\n assert_series_equal(-single_nat_dtype_timedelta +\n nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n\n with tm.assertRaises(TypeError):\n timedelta_series - single_nat_dtype_datetime\n\n # addition\n assert_series_equal(nat_series_dtype_timestamp + NaT,\n nat_series_dtype_timestamp)\n assert_series_equal(NaT + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n\n assert_series_equal(nat_series_dtype_timestamp +\n single_nat_dtype_timedelta,\n nat_series_dtype_timestamp)\n assert_series_equal(single_nat_dtype_timedelta +\n nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n\n assert_series_equal(nat_series_dtype_timedelta + NaT,\n nat_series_dtype_timedelta)\n assert_series_equal(NaT + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n\n assert_series_equal(nat_series_dtype_timedelta +\n single_nat_dtype_timedelta,\n nat_series_dtype_timedelta)\n assert_series_equal(single_nat_dtype_timedelta +\n nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n\n assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)\n assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)\n\n assert_series_equal(timedelta_series + single_nat_dtype_timedelta,\n nat_series_dtype_timedelta)\n assert_series_equal(single_nat_dtype_timedelta + timedelta_series,\n nat_series_dtype_timedelta)\n\n assert_series_equal(nat_series_dtype_timestamp + NaT,\n nat_series_dtype_timestamp)\n assert_series_equal(NaT + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n\n assert_series_equal(nat_series_dtype_timestamp +\n single_nat_dtype_timedelta,\n nat_series_dtype_timestamp)\n assert_series_equal(single_nat_dtype_timedelta +\n nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n\n assert_series_equal(nat_series_dtype_timedelta + NaT,\n nat_series_dtype_timedelta)\n assert_series_equal(NaT + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n\n assert_series_equal(nat_series_dtype_timedelta +\n single_nat_dtype_timedelta,\n nat_series_dtype_timedelta)\n assert_series_equal(single_nat_dtype_timedelta +\n nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n\n assert_series_equal(nat_series_dtype_timedelta +\n single_nat_dtype_datetime,\n nat_series_dtype_timestamp)\n assert_series_equal(single_nat_dtype_datetime +\n nat_series_dtype_timedelta,\n nat_series_dtype_timestamp)\n\n # multiplication\n assert_series_equal(nat_series_dtype_timedelta * 1.0,\n nat_series_dtype_timedelta)\n assert_series_equal(1.0 * nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n\n assert_series_equal(timedelta_series * 1, timedelta_series)\n assert_series_equal(1 * timedelta_series, timedelta_series)\n\n assert_series_equal(timedelta_series * 1.5,\n Series([NaT, Timedelta('1.5s')]))\n assert_series_equal(1.5 * timedelta_series,\n Series([NaT, Timedelta('1.5s')]))\n\n assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)\n assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)\n\n with tm.assertRaises(TypeError):\n datetime_series * 1\n with tm.assertRaises(TypeError):\n nat_series_dtype_timestamp * 1\n with tm.assertRaises(TypeError):\n datetime_series * 1.0\n with tm.assertRaises(TypeError):\n nat_series_dtype_timestamp * 1.0\n\n # division\n assert_series_equal(timedelta_series / 2,\n Series([NaT, Timedelta('0.5s')]))\n assert_series_equal(timedelta_series / 2.0,\n Series([NaT, Timedelta('0.5s')]))\n assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)\n with tm.assertRaises(TypeError):\n nat_series_dtype_timestamp / 1.0\n with tm.assertRaises(TypeError):\n nat_series_dtype_timestamp / 1\n\n def test_ops_datetimelike_align(self):\n # GH 7500\n # datetimelike ops need to align\n dt = Series(date_range('2012-1-1', periods=3, freq='D'))\n dt.iloc[2] = np.nan\n dt2 = dt[::-1]\n\n expected = Series([timedelta(0), timedelta(0), pd.NaT])\n # name is reset\n result = dt2 - dt\n assert_series_equal(result, expected)\n\n expected = Series(expected, name=0)\n result = (dt2.to_frame() - dt.to_frame())[0]\n assert_series_equal(result, expected)\n\n def test_object_comparisons(self):\n s = Series(['a', 'b', np.nan, 'c', 'a'])\n\n result = s == 'a'\n expected = Series([True, False, False, False, True])\n assert_series_equal(result, expected)\n\n result = s < 'a'\n expected = Series([False, False, False, False, False])\n assert_series_equal(result, expected)\n\n result = s != 'a'\n expected = -(s == 'a')\n assert_series_equal(result, expected)\n\n def test_comparison_tuples(self):\n # GH11339\n # comparisons vs tuple\n s = Series([(1, 1), (1, 2)])\n\n result = s == (1, 2)\n expected = Series([False, True])\n assert_series_equal(result, expected)\n\n result = s != (1, 2)\n expected = Series([True, False])\n assert_series_equal(result, expected)\n\n result = s == (0, 0)\n expected = Series([False, False])\n assert_series_equal(result, expected)\n\n result = s != (0, 0)\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n s = Series([(1, 1), (1, 1)])\n\n result = s == (1, 1)\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n result = s != (1, 1)\n expected = Series([False, False])\n assert_series_equal(result, expected)\n\n s = Series([frozenset([1]), frozenset([1, 2])])\n\n result = s == frozenset([1])\n expected = Series([True, False])\n assert_series_equal(result, expected)\n\n def test_comparison_operators_with_nas(self):\n s = Series(bdate_range('1/1/2000', periods=10), dtype=object)\n s[::2] = np.nan\n\n # test that comparisons work\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n val = s[5]\n\n f = getattr(operator, op)\n result = f(s, val)\n\n expected = f(s.dropna(), val).reindex(s.index)\n\n if op == 'ne':\n expected = expected.fillna(True).astype(bool)\n else:\n expected = expected.fillna(False).astype(bool)\n\n assert_series_equal(result, expected)\n\n # fffffffuuuuuuuuuuuu\n # result = f(val, s)\n # expected = f(val, s.dropna()).reindex(s.index)\n # assert_series_equal(result, expected)\n\n # boolean &, |, ^ should work with object arrays and propagate NAs\n\n ops = ['and_', 'or_', 'xor']\n mask = s.isnull()\n for bool_op in ops:\n f = getattr(operator, bool_op)\n\n filled = s.fillna(s[0])\n\n result = f(s < s[9], s > s[3])\n\n expected = f(filled < filled[9], filled > filled[3])\n expected[mask] = False\n assert_series_equal(result, expected)\n\n def test_comparison_object_numeric_nas(self):\n s = Series(np.random.randn(10), dtype=object)\n shifted = s.shift(2)\n\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n f = getattr(operator, op)\n\n result = f(s, shifted)\n expected = f(s.astype(float), shifted.astype(float))\n assert_series_equal(result, expected)\n\n def test_comparison_invalid(self):\n\n # GH4968\n # invalid date/int comparisons\n s = Series(range(5))\n s2 = Series(date_range('20010101', periods=5))\n\n for (x, y) in [(s, s2), (s2, s)]:\n self.assertRaises(TypeError, lambda: x == y)\n self.assertRaises(TypeError, lambda: x != y)\n self.assertRaises(TypeError, lambda: x >= y)\n self.assertRaises(TypeError, lambda: x > y)\n self.assertRaises(TypeError, lambda: x < y)\n self.assertRaises(TypeError, lambda: x <= y)\n\n def test_more_na_comparisons(self):\n for dtype in [None, object]:\n left = Series(['a', np.nan, 'c'], dtype=dtype)\n right = Series(['a', np.nan, 'd'], dtype=dtype)\n\n result = left == right\n expected = Series([True, False, False])\n assert_series_equal(result, expected)\n\n result = left != right\n expected = Series([False, True, True])\n assert_series_equal(result, expected)\n\n result = left == np.nan\n expected = Series([False, False, False])\n assert_series_equal(result, expected)\n\n result = left != np.nan\n expected = Series([True, True, True])\n assert_series_equal(result, expected)\n\n def test_nat_comparisons(self):\n data = [([pd.Timestamp('2011-01-01'), pd.NaT,\n pd.Timestamp('2011-01-03')],\n [pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),\n\n ([pd.Timedelta('1 days'), pd.NaT,\n pd.Timedelta('3 days')],\n [pd.NaT, pd.NaT, pd.Timedelta('3 days')]),\n\n ([pd.Period('2011-01', freq='M'), pd.NaT,\n pd.Period('2011-03', freq='M')],\n [pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]\n\n # add lhs / rhs switched data\n data = data + [(r, l) for l, r in data]\n\n for l, r in data:\n for dtype in [None, object]:\n left = Series(l, dtype=dtype)\n\n # Series, Index\n for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:\n expected = Series([False, False, True])\n assert_series_equal(left == right, expected)\n\n expected = Series([True, True, False])\n assert_series_equal(left != right, expected)\n\n expected = Series([False, False, False])\n assert_series_equal(left < right, expected)\n\n expected = Series([False, False, False])\n assert_series_equal(left > right, expected)\n\n expected = Series([False, False, True])\n assert_series_equal(left >= right, expected)\n\n expected = Series([False, False, True])\n assert_series_equal(left <= right, expected)\n\n def test_nat_comparisons_scalar(self):\n data = [[pd.Timestamp('2011-01-01'), pd.NaT,\n pd.Timestamp('2011-01-03')],\n\n [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],\n\n [pd.Period('2011-01', freq='M'), pd.NaT,\n pd.Period('2011-03', freq='M')]]\n\n for l in data:\n for dtype in [None, object]:\n left = Series(l, dtype=dtype)\n\n expected = Series([False, False, False])\n assert_series_equal(left == pd.NaT, expected)\n assert_series_equal(pd.NaT == left, expected)\n\n expected = Series([True, True, True])\n assert_series_equal(left != pd.NaT, expected)\n assert_series_equal(pd.NaT != left, expected)\n\n expected = Series([False, False, False])\n assert_series_equal(left < pd.NaT, expected)\n assert_series_equal(pd.NaT > left, expected)\n assert_series_equal(left <= pd.NaT, expected)\n assert_series_equal(pd.NaT >= left, expected)\n\n assert_series_equal(left > pd.NaT, expected)\n assert_series_equal(pd.NaT < left, expected)\n assert_series_equal(left >= pd.NaT, expected)\n assert_series_equal(pd.NaT <= left, expected)\n\n def test_comparison_different_length(self):\n a = Series(['a', 'b', 'c'])\n b = Series(['b', 'a'])\n self.assertRaises(ValueError, a.__lt__, b)\n\n a = Series([1, 2])\n b = Series([2, 3, 4])\n self.assertRaises(ValueError, a.__eq__, b)\n\n def test_comparison_label_based(self):\n\n # GH 4947\n # comparisons should be label based\n\n a = Series([True, False, True], list('bca'))\n b = Series([False, True, False], list('abc'))\n\n expected = Series([False, True, False], list('abc'))\n result = a & b\n assert_series_equal(result, expected)\n\n expected = Series([True, True, False], list('abc'))\n result = a | b\n assert_series_equal(result, expected)\n\n expected = Series([True, False, False], list('abc'))\n result = a ^ b\n assert_series_equal(result, expected)\n\n # rhs is bigger\n a = Series([True, False, True], list('bca'))\n b = Series([False, True, False, True], list('abcd'))\n\n expected = Series([False, True, False, False], list('abcd'))\n result = a & b\n assert_series_equal(result, expected)\n\n expected = Series([True, True, False, False], list('abcd'))\n result = a | b\n assert_series_equal(result, expected)\n\n # filling\n\n # vs empty\n result = a & Series([])\n expected = Series([False, False, False], list('bca'))\n assert_series_equal(result, expected)\n\n result = a | Series([])\n expected = Series([True, False, True], list('bca'))\n assert_series_equal(result, expected)\n\n # vs non-matching\n result = a & Series([1], ['z'])\n expected = Series([False, False, False, False], list('abcz'))\n assert_series_equal(result, expected)\n\n result = a | Series([1], ['z'])\n expected = Series([True, True, False, False], list('abcz'))\n assert_series_equal(result, expected)\n\n # identity\n # we would like s[s|e] == s to hold for any e, whether empty or not\n for e in [Series([]), Series([1], ['z']),\n Series(np.nan, b.index), Series(np.nan, a.index)]:\n result = a[a | e]\n assert_series_equal(result, a[a])\n\n for e in [Series(['z'])]:\n if compat.PY3:\n with tm.assert_produces_warning(RuntimeWarning):\n result = a[a | e]\n else:\n result = a[a | e]\n assert_series_equal(result, a[a])\n\n # vs scalars\n index = list('bca')\n t = Series([True, False, True])\n\n for v in [True, 1, 2]:\n result = Series([True, False, True], index=index) | v\n expected = Series([True, True, True], index=index)\n assert_series_equal(result, expected)\n\n for v in [np.nan, 'foo']:\n self.assertRaises(TypeError, lambda: t | v)\n\n for v in [False, 0]:\n result = Series([True, False, True], index=index) | v\n expected = Series([True, False, True], index=index)\n assert_series_equal(result, expected)\n\n for v in [True, 1]:\n result = Series([True, False, True], index=index) & v\n expected = Series([True, False, True], index=index)\n assert_series_equal(result, expected)\n\n for v in [False, 0]:\n result = Series([True, False, True], index=index) & v\n expected = Series([False, False, False], index=index)\n assert_series_equal(result, expected)\n for v in [np.nan]:\n self.assertRaises(TypeError, lambda: t & v)\n\n def test_comparison_flex_basic(self):\n left = pd.Series(np.random.randn(10))\n right = pd.Series(np.random.randn(10))\n\n assert_series_equal(left.eq(right), left == right)\n assert_series_equal(left.ne(right), left != right)\n assert_series_equal(left.le(right), left < right)\n assert_series_equal(left.lt(right), left <= right)\n assert_series_equal(left.gt(right), left > right)\n assert_series_equal(left.ge(right), left >= right)\n\n # axis\n for axis in [0, None, 'index']:\n assert_series_equal(left.eq(right, axis=axis), left == right)\n assert_series_equal(left.ne(right, axis=axis), left != right)\n assert_series_equal(left.le(right, axis=axis), left < right)\n assert_series_equal(left.lt(right, axis=axis), left <= right)\n assert_series_equal(left.gt(right, axis=axis), left > right)\n assert_series_equal(left.ge(right, axis=axis), left >= right)\n\n #\n msg = 'No axis named 1 for object type'\n for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:\n with tm.assertRaisesRegexp(ValueError, msg):\n getattr(left, op)(right, axis=1)\n\n def test_comparison_flex_alignment(self):\n left = Series([1, 3, 2], index=list('abc'))\n right = Series([2, 2, 2], index=list('bcd'))\n\n exp = pd.Series([False, False, True, False], index=list('abcd'))\n assert_series_equal(left.eq(right), exp)\n\n exp = pd.Series([True, True, False, True], index=list('abcd'))\n assert_series_equal(left.ne(right), exp)\n\n exp = pd.Series([False, False, True, False], index=list('abcd'))\n assert_series_equal(left.le(right), exp)\n\n exp = pd.Series([False, False, False, False], index=list('abcd'))\n assert_series_equal(left.lt(right), exp)\n\n exp = pd.Series([False, True, True, False], index=list('abcd'))\n assert_series_equal(left.ge(right), exp)\n\n exp = pd.Series([False, True, False, False], index=list('abcd'))\n assert_series_equal(left.gt(right), exp)\n\n def test_comparison_flex_alignment_fill(self):\n left = Series([1, 3, 2], index=list('abc'))\n right = Series([2, 2, 2], index=list('bcd'))\n\n exp = pd.Series([False, False, True, True], index=list('abcd'))\n assert_series_equal(left.eq(right, fill_value=2), exp)\n\n exp = pd.Series([True, True, False, False], index=list('abcd'))\n assert_series_equal(left.ne(right, fill_value=2), exp)\n\n exp = pd.Series([False, False, True, True], index=list('abcd'))\n assert_series_equal(left.le(right, fill_value=0), exp)\n\n exp = pd.Series([False, False, False, True], index=list('abcd'))\n assert_series_equal(left.lt(right, fill_value=0), exp)\n\n exp = pd.Series([True, True, True, False], index=list('abcd'))\n assert_series_equal(left.ge(right, fill_value=0), exp)\n\n exp = pd.Series([True, True, False, False], index=list('abcd'))\n assert_series_equal(left.gt(right, fill_value=0), exp)\n\n def test_operators_bitwise(self):\n # GH 9016: support bitwise op for integer types\n index = list('bca')\n\n s_tft = Series([True, False, True], index=index)\n s_fff = Series([False, False, False], index=index)\n s_tff = Series([True, False, False], index=index)\n s_empty = Series([])\n\n # TODO: unused\n # s_0101 = Series([0, 1, 0, 1])\n\n s_0123 = Series(range(4), dtype='int64')\n s_3333 = Series([3] * 4)\n s_4444 = Series([4] * 4)\n\n res = s_tft & s_empty\n expected = s_fff\n assert_series_equal(res, expected)\n\n res = s_tft | s_empty\n expected = s_tft\n assert_series_equal(res, expected)\n\n res = s_0123 & s_3333\n expected = Series(range(4), dtype='int64')\n assert_series_equal(res, expected)\n\n res = s_0123 | s_4444\n expected = Series(range(4, 8), dtype='int64')\n assert_series_equal(res, expected)\n\n s_a0b1c0 = Series([1], list('b'))\n\n res = s_tft & s_a0b1c0\n expected = s_tff.reindex(list('abc'))\n assert_series_equal(res, expected)\n\n res = s_tft | s_a0b1c0\n expected = s_tft.reindex(list('abc'))\n assert_series_equal(res, expected)\n\n n0 = 0\n res = s_tft & n0\n expected = s_fff\n assert_series_equal(res, expected)\n\n res = s_0123 & n0\n expected = Series([0] * 4)\n assert_series_equal(res, expected)\n\n n1 = 1\n res = s_tft & n1\n expected = s_tft\n assert_series_equal(res, expected)\n\n res = s_0123 & n1\n expected = Series([0, 1, 0, 1])\n assert_series_equal(res, expected)\n\n s_1111 = Series([1] * 4, dtype='int8')\n res = s_0123 & s_1111\n expected = Series([0, 1, 0, 1], dtype='int64')\n assert_series_equal(res, expected)\n\n res = s_0123.astype(np.int16) | s_1111.astype(np.int32)\n expected = Series([1, 1, 3, 3], dtype='int32')\n assert_series_equal(res, expected)\n\n self.assertRaises(TypeError, lambda: s_1111 & 'a')\n self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])\n self.assertRaises(TypeError, lambda: s_0123 & np.NaN)\n self.assertRaises(TypeError, lambda: s_0123 & 3.14)\n self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])\n\n # s_0123 will be all false now because of reindexing like s_tft\n if compat.PY3:\n # unable to sort incompatible object via .union.\n exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])\n with tm.assert_produces_warning(RuntimeWarning):\n assert_series_equal(s_tft & s_0123, exp)\n else:\n exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])\n assert_series_equal(s_tft & s_0123, exp)\n\n # s_tft will be all false now because of reindexing like s_0123\n if compat.PY3:\n # unable to sort incompatible object via .union.\n exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])\n with tm.assert_produces_warning(RuntimeWarning):\n assert_series_equal(s_0123 & s_tft, exp)\n else:\n exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])\n assert_series_equal(s_0123 & s_tft, exp)\n\n assert_series_equal(s_0123 & False, Series([False] * 4))\n assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))\n assert_series_equal(s_0123 & [False], Series([False] * 4))\n assert_series_equal(s_0123 & (False), Series([False] * 4))\n assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),\n Series([False] * 4))\n\n s_ftft = Series([False, True, False, True])\n assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)\n\n s_abNd = Series(['a', 'b', np.NaN, 'd'])\n res = s_0123 & s_abNd\n expected = s_ftft\n assert_series_equal(res, expected)\n\n def test_scalar_na_cmp_corners(self):\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n def tester(a, b):\n return a & b\n\n self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))\n\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])\n s[::2] = np.nan\n\n expected = Series(True, index=s.index)\n expected[::2] = False\n assert_series_equal(tester(s, list(s)), expected)\n\n d = DataFrame({'A': s})\n # TODO: Fix this exception - needs to be fixed! (see GH5035)\n # (previously this was a TypeError because series returned\n # NotImplemented\n\n # this is an alignment issue; these are equivalent\n # https://github.com/pandas-dev/pandas/issues/5284\n\n self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))\n self.assertRaises(ValueError, tester, s, d)\n\n # this is wrong as its not a boolean result\n # result = d.__and__(s,axis='index')\n\n def test_operators_corner(self):\n series = self.ts\n\n empty = Series([], index=Index([]))\n\n result = series + empty\n self.assertTrue(np.isnan(result).all())\n\n result = empty + Series([], index=Index([]))\n self.assertEqual(len(result), 0)\n\n # TODO: this returned NotImplemented earlier, what to do?\n # deltas = Series([timedelta(1)] * 5, index=np.arange(5))\n # sub_deltas = deltas[::2]\n # deltas5 = deltas * 5\n # deltas = deltas + sub_deltas\n\n # float + int\n int_ts = self.ts.astype(int)[:-5]\n added = self.ts + int_ts\n expected = Series(self.ts.values[:-5] + int_ts.values,\n index=self.ts.index[:-5], name='ts')\n self.assert_series_equal(added[:-5], expected)\n\n def test_operators_reverse_object(self):\n # GH 56\n arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)\n\n def _check_op(arr, op):\n result = op(1., arr)\n expected = op(1., arr.astype(float))\n assert_series_equal(result.astype(float), expected)\n\n _check_op(arr, operator.add)\n _check_op(arr, operator.sub)\n _check_op(arr, operator.mul)\n _check_op(arr, operator.truediv)\n _check_op(arr, operator.floordiv)\n\n def test_arith_ops_df_compat(self):\n # GH 1134\n s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')\n s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')\n\n exp = pd.Series([3.0, 4.0, np.nan, np.nan],\n index=list('ABCD'), name='x')\n assert_series_equal(s1 + s2, exp)\n assert_series_equal(s2 + s1, exp)\n\n exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},\n index=list('ABCD'))\n assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)\n assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)\n\n # different length\n s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')\n s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')\n\n exp = pd.Series([3, 4, 5, np.nan],\n index=list('ABCD'), name='x')\n assert_series_equal(s3 + s4, exp)\n assert_series_equal(s4 + s3, exp)\n\n exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},\n index=list('ABCD'))\n assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)\n assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)\n\n def test_comp_ops_df_compat(self):\n # GH 1134\n s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')\n s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')\n\n s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')\n s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')\n\n for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:\n\n msg = \"Can only compare identically-labeled Series objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n l == r\n\n with tm.assertRaisesRegexp(ValueError, msg):\n l != r\n\n with tm.assertRaisesRegexp(ValueError, msg):\n l < r\n\n msg = \"Can only compare identically-labeled DataFrame objects\"\n with tm.assertRaisesRegexp(ValueError, msg):\n l.to_frame() == r.to_frame()\n\n with tm.assertRaisesRegexp(ValueError, msg):\n l.to_frame() != r.to_frame()\n\n with tm.assertRaisesRegexp(ValueError, msg):\n l.to_frame() < r.to_frame()\n\n def test_bool_ops_df_compat(self):\n # GH 1134\n s1 = pd.Series([True, False, True], index=list('ABC'), name='x')\n s2 = pd.Series([True, True, False], index=list('ABD'), name='x')\n\n exp = pd.Series([True, False, False, False],\n index=list('ABCD'), name='x')\n assert_series_equal(s1 & s2, exp)\n assert_series_equal(s2 & s1, exp)\n\n # True | np.nan => True\n exp = pd.Series([True, True, True, False],\n index=list('ABCD'), name='x')\n assert_series_equal(s1 | s2, exp)\n # np.nan | True => np.nan, filled with False\n exp = pd.Series([True, True, False, False],\n index=list('ABCD'), name='x')\n assert_series_equal(s2 | s1, exp)\n\n # DataFrame doesn't fill nan with False\n exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},\n index=list('ABCD'))\n assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)\n assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)\n\n exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},\n index=list('ABCD'))\n assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)\n assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)\n\n # different length\n s3 = pd.Series([True, False, True], index=list('ABC'), name='x')\n s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')\n\n exp = pd.Series([True, False, True, False],\n index=list('ABCD'), name='x')\n assert_series_equal(s3 & s4, exp)\n assert_series_equal(s4 & s3, exp)\n\n # np.nan | True => np.nan, filled with False\n exp = pd.Series([True, True, True, False],\n index=list('ABCD'), name='x')\n assert_series_equal(s3 | s4, exp)\n # True | np.nan => True\n exp = pd.Series([True, True, True, True],\n index=list('ABCD'), name='x')\n assert_series_equal(s4 | s3, exp)\n\n exp = pd.DataFrame({'x': [True, False, True, np.nan]},\n index=list('ABCD'))\n assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)\n assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)\n\n exp = pd.DataFrame({'x': [True, True, True, np.nan]},\n index=list('ABCD'))\n assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)\n assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)\n\n def test_series_frame_radd_bug(self):\n # GH 353\n vals = Series(tm.rands_array(5, 10))\n result = 'foo_' + vals\n expected = vals.map(lambda x: 'foo_' + x)\n assert_series_equal(result, expected)\n\n frame = DataFrame({'vals': vals})\n result = 'foo_' + frame\n expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})\n assert_frame_equal(result, expected)\n\n # really raise this time\n with tm.assertRaises(TypeError):\n datetime.now() + self.ts\n\n with tm.assertRaises(TypeError):\n self.ts + datetime.now()\n\n def test_series_radd_more(self):\n data = [[1, 2, 3],\n [1.1, 2.2, 3.3],\n [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),\n pd.NaT],\n ['x', 'y', 1]]\n\n for d in data:\n for dtype in [None, object]:\n s = Series(d, dtype=dtype)\n with tm.assertRaises(TypeError):\n 'foo_' + s\n\n for dtype in [None, object]:\n res = 1 + pd.Series([1, 2, 3], dtype=dtype)\n exp = pd.Series([2, 3, 4], dtype=dtype)\n assert_series_equal(res, exp)\n res = pd.Series([1, 2, 3], dtype=dtype) + 1\n assert_series_equal(res, exp)\n\n res = np.nan + pd.Series([1, 2, 3], dtype=dtype)\n exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)\n assert_series_equal(res, exp)\n res = pd.Series([1, 2, 3], dtype=dtype) + np.nan\n assert_series_equal(res, exp)\n\n s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),\n pd.Timedelta('3 days')], dtype=dtype)\n exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),\n pd.Timedelta('6 days')])\n assert_series_equal(pd.Timedelta('3 days') + s, exp)\n assert_series_equal(s + pd.Timedelta('3 days'), exp)\n\n s = pd.Series(['x', np.nan, 'x'])\n assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax']))\n assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa']))\n\n def test_frame_radd_more(self):\n data = [[1, 2, 3],\n [1.1, 2.2, 3.3],\n [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),\n pd.NaT],\n ['x', 'y', 1]]\n\n for d in data:\n for dtype in [None, object]:\n s = DataFrame(d, dtype=dtype)\n with tm.assertRaises(TypeError):\n 'foo_' + s\n\n for dtype in [None, object]:\n res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype)\n exp = pd.DataFrame([2, 3, 4], dtype=dtype)\n assert_frame_equal(res, exp)\n res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1\n assert_frame_equal(res, exp)\n\n res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype)\n exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)\n assert_frame_equal(res, exp)\n res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan\n assert_frame_equal(res, exp)\n\n df = pd.DataFrame(['x', np.nan, 'x'])\n assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax']))\n assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))\n\n def test_operators_frame(self):\n # rpow does not work with DataFrame\n df = DataFrame({'A': self.ts})\n\n assert_series_equal(self.ts + self.ts, self.ts + df['A'],\n check_names=False)\n assert_series_equal(self.ts ** self.ts, self.ts ** df['A'],\n check_names=False)\n assert_series_equal(self.ts < self.ts, self.ts < df['A'],\n check_names=False)\n assert_series_equal(self.ts / self.ts, self.ts / df['A'],\n check_names=False)\n\n def test_operators_combine(self):\n def _check_fill(meth, op, a, b, fill_value=0):\n exp_index = a.index.union(b.index)\n a = a.reindex(exp_index)\n b = b.reindex(exp_index)\n\n amask = isnull(a)\n bmask = isnull(b)\n\n exp_values = []\n for i in range(len(exp_index)):\n with np.errstate(all='ignore'):\n if amask[i]:\n if bmask[i]:\n exp_values.append(nan)\n continue\n exp_values.append(op(fill_value, b[i]))\n elif bmask[i]:\n if amask[i]:\n exp_values.append(nan)\n continue\n exp_values.append(op(a[i], fill_value))\n else:\n exp_values.append(op(a[i], b[i]))\n\n result = meth(a, b, fill_value=fill_value)\n expected = Series(exp_values, exp_index)\n assert_series_equal(result, expected)\n\n a = Series([nan, 1., 2., 3., nan], index=np.arange(5))\n b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))\n\n pairings = []\n for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:\n fv = 0\n lop = getattr(Series, op)\n lequiv = getattr(operator, op)\n rop = getattr(Series, 'r' + op)\n # bind op at definition time...\n requiv = lambda x, y, op=op: getattr(operator, op)(y, x)\n pairings.append((lop, lequiv, fv))\n pairings.append((rop, requiv, fv))\n\n if compat.PY3:\n pairings.append((Series.div, operator.truediv, 1))\n pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x),\n 1))\n else:\n pairings.append((Series.div, operator.div, 1))\n pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))\n\n for op, equiv_op, fv in pairings:\n result = op(a, b)\n exp = equiv_op(a, b)\n assert_series_equal(result, exp)\n _check_fill(op, equiv_op, a, b, fill_value=fv)\n # should accept axis=0 or axis='rows'\n op(a, b, axis=0)\n\n def test_ne(self):\n ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)\n expected = [True, True, False, True, True]\n self.assertTrue(tm.equalContents(ts.index != 5, expected))\n self.assertTrue(tm.equalContents(~(ts.index == 5), expected))\n\n def test_operators_na_handling(self):\n from decimal import Decimal\n from datetime import date\n s = Series([Decimal('1.3'), Decimal('2.3')],\n index=[date(2012, 1, 1), date(2012, 1, 2)])\n\n result = s + s.shift(1)\n result2 = s.shift(1) + s\n self.assertTrue(isnull(result[0]))\n self.assertTrue(isnull(result2[0]))\n\n s = Series(['foo', 'bar', 'baz', np.nan])\n result = 'prefix_' + s\n expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])\n assert_series_equal(result, expected)\n\n result = s + '_suffix'\n expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])\n assert_series_equal(result, expected)\n\n def test_divide_decimal(self):\n \"\"\" resolves issue #9787 \"\"\"\n from decimal import Decimal\n\n expected = Series([Decimal(5)])\n\n s = Series([Decimal(10)])\n s = s / Decimal(2)\n\n assert_series_equal(expected, s)\n\n s = Series([Decimal(10)])\n s = s // Decimal(2)\n\n assert_series_equal(expected, s)\n\n def test_datetime64_with_index(self):\n\n # arithmetic integer ops with an index\n s = Series(np.random.randn(5))\n expected = s - s.index.to_series()\n result = s - s.index\n assert_series_equal(result, expected)\n\n # GH 4629\n # arithmetic datetime64 ops with an index\n s = Series(date_range('20130101', periods=5),\n index=date_range('20130101', periods=5))\n expected = s - s.index.to_series()\n result = s - s.index\n assert_series_equal(result, expected)\n\n result = s - s.index.to_period()\n assert_series_equal(result, expected)\n\n df = DataFrame(np.random.randn(5, 2),\n index=date_range('20130101', periods=5))\n df['date'] = Timestamp('20130102')\n df['expected'] = df['date'] - df.index.to_series()\n df['result'] = df['date'] - df.index\n assert_series_equal(df['result'], df['expected'], check_names=False)\n\n def test_dti_tz_convert_to_utc(self):\n base = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'],\n tz='UTC')\n idx1 = base.tz_convert('Asia/Tokyo')[:2]\n idx2 = base.tz_convert('US/Eastern')[1:]\n\n res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)\n assert_series_equal(res, Series([np.nan, 3, np.nan], index=base))\n\n def test_op_duplicate_index(self):\n # GH14227\n s1 = Series([1, 2], index=[1, 1])\n s2 = Series([10, 10], index=[1, 2])\n result = s1 + s2\n expected = pd.Series([11, 12, np.nan], index=[1, 1, 2])\n assert_series_equal(result, expected)\n",
"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n# pylint: disable-msg=W0612,E1101\nfrom copy import deepcopy\nimport sys\nimport nose\nfrom distutils.version import LooseVersion\n\nfrom pandas.compat import range, lrange\nfrom pandas import compat\n\nfrom numpy.random import randn\nimport numpy as np\n\nfrom pandas import DataFrame, Series\nimport pandas as pd\n\nfrom pandas.util.testing import (assert_almost_equal,\n assert_series_equal,\n assert_frame_equal,\n assertRaisesRegexp)\n\nimport pandas.util.testing as tm\n\nfrom pandas.tests.frame.common import TestData\n\n\nclass SharedWithSparse(object):\n\n _multiprocess_can_split_ = True\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n for attr in ('index', 'columns'):\n ind = getattr(self.frame, attr)\n ind.name = None\n cp = self.frame.copy()\n getattr(cp, attr).name = 'foo'\n self.assertIsNone(getattr(self.frame, attr).name)\n\n def test_getitem_pop_assign_name(self):\n s = self.frame['A']\n self.assertEqual(s.name, 'A')\n\n s = self.frame.pop('A')\n self.assertEqual(s.name, 'A')\n\n s = self.frame.ix[:, 'B']\n self.assertEqual(s.name, 'B')\n\n s2 = s.ix[:]\n self.assertEqual(s2.name, 'B')\n\n def test_get_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n result = self.frame.get_value(idx, col)\n expected = self.frame[col][idx]\n tm.assert_almost_equal(result, expected)\n\n def test_join_index(self):\n # left / right\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2)\n self.assert_index_equal(f.index, joined.index)\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='left')\n self.assert_index_equal(joined.index, f.index)\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='right')\n self.assert_index_equal(joined.index, f2.index)\n self.assertEqual(len(joined.columns), 4)\n\n # inner\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='inner')\n self.assert_index_equal(joined.index, f.index.intersection(f2.index))\n self.assertEqual(len(joined.columns), 4)\n\n # outer\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='outer')\n self.assertTrue(tm.equalContents(self.frame.index, joined.index))\n self.assertEqual(len(joined.columns), 4)\n\n assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')\n\n # corner case - overlapping columns\n for how in ('outer', 'left', 'inner'):\n with assertRaisesRegexp(ValueError, 'columns overlap but '\n 'no suffix'):\n self.frame.join(self.frame, how=how)\n\n def test_join_index_more(self):\n af = self.frame.ix[:, ['A', 'B']]\n bf = self.frame.ix[::2, ['C', 'D']]\n\n expected = af.copy()\n expected['C'] = self.frame['C'][::2]\n expected['D'] = self.frame['D'][::2]\n\n result = af.join(bf)\n assert_frame_equal(result, expected)\n\n result = af.join(bf, how='right')\n assert_frame_equal(result, expected[::2])\n\n result = bf.join(af, how='right')\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n def test_join_index_series(self):\n df = self.frame.copy()\n s = df.pop(self.frame.columns[-1])\n joined = df.join(s)\n\n # TODO should this check_names ?\n assert_frame_equal(joined, self.frame, check_names=False)\n\n s.name = None\n assertRaisesRegexp(ValueError, 'must have a name', df.join, s)\n\n def test_join_overlap(self):\n df1 = self.frame.ix[:, ['A', 'B', 'C']]\n df2 = self.frame.ix[:, ['B', 'C', 'D']]\n\n joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')\n df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')\n df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')\n\n no_overlap = self.frame.ix[:, ['A', 'D']]\n expected = df1_suf.join(df2_suf).join(no_overlap)\n\n # column order not necessarily sorted\n assert_frame_equal(joined, expected.ix[:, joined.columns])\n\n def test_add_prefix_suffix(self):\n with_prefix = self.frame.add_prefix('foo#')\n expected = pd.Index(['foo#%s' % c for c in self.frame.columns])\n self.assert_index_equal(with_prefix.columns, expected)\n\n with_suffix = self.frame.add_suffix('#foo')\n expected = pd.Index(['%s#foo' % c for c in self.frame.columns])\n self.assert_index_equal(with_suffix.columns, expected)\n\n\nclass TestDataFrameMisc(tm.TestCase, SharedWithSparse, TestData):\n\n klass = DataFrame\n\n _multiprocess_can_split_ = True\n\n def test_get_axis(self):\n f = self.frame\n self.assertEqual(f._get_axis_number(0), 0)\n self.assertEqual(f._get_axis_number(1), 1)\n self.assertEqual(f._get_axis_number('index'), 0)\n self.assertEqual(f._get_axis_number('rows'), 0)\n self.assertEqual(f._get_axis_number('columns'), 1)\n\n self.assertEqual(f._get_axis_name(0), 'index')\n self.assertEqual(f._get_axis_name(1), 'columns')\n self.assertEqual(f._get_axis_name('index'), 'index')\n self.assertEqual(f._get_axis_name('rows'), 'index')\n self.assertEqual(f._get_axis_name('columns'), 'columns')\n\n self.assertIs(f._get_axis(0), f.index)\n self.assertIs(f._get_axis(1), f.columns)\n\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)\n assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')\n assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number,\n None)\n\n def test_keys(self):\n getkeys = self.frame.keys\n self.assertIs(getkeys(), self.frame.columns)\n\n def test_column_contains_typeerror(self):\n try:\n self.frame.columns in self.frame\n except TypeError:\n pass\n\n def test_not_hashable(self):\n df = pd.DataFrame([1])\n self.assertRaises(TypeError, hash, df)\n self.assertRaises(TypeError, hash, self.empty)\n\n def test_new_empty_index(self):\n df1 = DataFrame(randn(0, 3))\n df2 = DataFrame(randn(0, 3))\n df1.index.name = 'foo'\n self.assertIsNone(df2.index.name)\n\n def test_array_interface(self):\n with np.errstate(all='ignore'):\n result = np.sqrt(self.frame)\n tm.assertIsInstance(result, type(self.frame))\n self.assertIs(result.index, self.frame.index)\n self.assertIs(result.columns, self.frame.columns)\n\n assert_frame_equal(result, self.frame.apply(np.sqrt))\n\n def test_get_agg_axis(self):\n cols = self.frame._get_agg_axis(0)\n self.assertIs(cols, self.frame.columns)\n\n idx = self.frame._get_agg_axis(1)\n self.assertIs(idx, self.frame.index)\n\n self.assertRaises(ValueError, self.frame._get_agg_axis, 2)\n\n def test_nonzero(self):\n self.assertTrue(self.empty.empty)\n\n self.assertFalse(self.frame.empty)\n self.assertFalse(self.mixed_frame.empty)\n\n # corner case\n df = DataFrame({'A': [1., 2., 3.],\n 'B': ['a', 'b', 'c']},\n index=np.arange(3))\n del df['A']\n self.assertFalse(df.empty)\n\n def test_iteritems(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])\n for k, v in compat.iteritems(df):\n self.assertEqual(type(v), Series)\n\n def test_iter(self):\n self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))\n\n def test_iterrows(self):\n for i, (k, v) in enumerate(self.frame.iterrows()):\n exp = self.frame.xs(self.frame.index[i])\n assert_series_equal(v, exp)\n\n for i, (k, v) in enumerate(self.mixed_frame.iterrows()):\n exp = self.mixed_frame.xs(self.mixed_frame.index[i])\n assert_series_equal(v, exp)\n\n def test_itertuples(self):\n for i, tup in enumerate(self.frame.itertuples()):\n s = Series(tup[1:])\n s.name = tup[0]\n expected = self.frame.ix[i, :].reset_index(drop=True)\n assert_series_equal(s, expected)\n\n df = DataFrame({'floats': np.random.randn(5),\n 'ints': lrange(5)}, columns=['floats', 'ints'])\n\n for tup in df.itertuples(index=False):\n tm.assertIsInstance(tup[1], np.integer)\n\n df = DataFrame(data={\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n dfaa = df[['a', 'a']]\n self.assertEqual(list(dfaa.itertuples()), [\n (0, 1, 1), (1, 2, 2), (2, 3, 3)])\n\n self.assertEqual(repr(list(df.itertuples(name=None))),\n '[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')\n\n tup = next(df.itertuples(name='TestName'))\n\n # no support for field renaming in Python 2.6, regular tuples are\n # returned\n if sys.version >= LooseVersion('2.7'):\n self.assertEqual(tup._fields, ('Index', 'a', 'b'))\n self.assertEqual((tup.Index, tup.a, tup.b), tup)\n self.assertEqual(type(tup).__name__, 'TestName')\n\n df.columns = ['def', 'return']\n tup2 = next(df.itertuples(name='TestName'))\n self.assertEqual(tup2, (0, 1, 4))\n\n if sys.version >= LooseVersion('2.7'):\n self.assertEqual(tup2._fields, ('Index', '_1', '_2'))\n\n df3 = DataFrame(dict(('f' + str(i), [i]) for i in range(1024)))\n # will raise SyntaxError if trying to create namedtuple\n tup3 = next(df3.itertuples())\n self.assertFalse(hasattr(tup3, '_fields'))\n self.assertIsInstance(tup3, tuple)\n\n def test_len(self):\n self.assertEqual(len(self.frame), len(self.frame.index))\n\n def test_as_matrix(self):\n frame = self.frame\n mat = frame.as_matrix()\n\n frameCols = frame.columns\n for i, row in enumerate(mat):\n for j, value in enumerate(row):\n col = frameCols[j]\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][i]))\n else:\n self.assertEqual(value, frame[col][i])\n\n # mixed type\n mat = self.mixed_frame.as_matrix(['foo', 'A'])\n self.assertEqual(mat[0, 0], 'bar')\n\n df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})\n mat = df.as_matrix()\n self.assertEqual(mat[0, 0], 1j)\n\n # single block corner case\n mat = self.frame.as_matrix(['A', 'B'])\n expected = self.frame.reindex(columns=['A', 'B']).values\n assert_almost_equal(mat, expected)\n\n def test_values(self):\n self.frame.values[:, 0] = 5.\n self.assertTrue((self.frame.values[:, 0] == 5).all())\n\n def test_deepcopy(self):\n cp = deepcopy(self.frame)\n series = cp['A']\n series[:] = 10\n for idx, value in compat.iteritems(series):\n self.assertNotEqual(self.frame['A'][idx], value)\n\n # ---------------------------------------------------------------------\n # Transposing\n\n def test_transpose(self):\n frame = self.frame\n dft = frame.T\n for idx, series in compat.iteritems(dft):\n for col, value in compat.iteritems(series):\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][idx]))\n else:\n self.assertEqual(value, frame[col][idx])\n\n # mixed type\n index, data = tm.getMixedTypeDict()\n mixed = DataFrame(data, index=index)\n\n mixed_T = mixed.T\n for col, s in compat.iteritems(mixed_T):\n self.assertEqual(s.dtype, np.object_)\n\n def test_transpose_get_view(self):\n dft = self.frame.T\n dft.values[:, 5:10] = 5\n\n self.assertTrue((self.frame.values[5:10] == 5).all())\n\n def test_swapaxes(self):\n df = DataFrame(np.random.randn(10, 5))\n assert_frame_equal(df.T, df.swapaxes(0, 1))\n assert_frame_equal(df.T, df.swapaxes(1, 0))\n assert_frame_equal(df, df.swapaxes(0, 0))\n self.assertRaises(ValueError, df.swapaxes, 2, 5)\n\n def test_axis_aliases(self):\n f = self.frame\n\n # reg name\n expected = f.sum(axis=0)\n result = f.sum(axis='index')\n assert_series_equal(result, expected)\n\n expected = f.sum(axis=1)\n result = f.sum(axis='columns')\n assert_series_equal(result, expected)\n\n def test_more_asMatrix(self):\n values = self.mixed_frame.as_matrix()\n self.assertEqual(values.shape[1], len(self.mixed_frame.columns))\n\n def test_repr_with_mi_nat(self):\n df = DataFrame({'X': [1, 2]},\n index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])\n res = repr(df)\n exp = ' X\\nNaT a 1\\n2013-01-01 b 2'\n self.assertEqual(res, exp)\n\n def test_iterkv_deprecation(self):\n with tm.assert_produces_warning(FutureWarning):\n self.mixed_float.iterkv()\n\n def test_iterkv_names(self):\n for k, v in compat.iteritems(self.mixed_frame):\n self.assertEqual(v.name, k)\n\n def test_series_put_names(self):\n series = self.mixed_frame._series\n for k, v in compat.iteritems(series):\n self.assertEqual(v.name, k)\n\n def test_empty_nonzero(self):\n df = DataFrame([1, 2, 3])\n self.assertFalse(df.empty)\n df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()\n self.assertTrue(df.empty)\n self.assertTrue(df.T.empty)\n\n def test_inplace_return_self(self):\n # re #1893\n\n data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],\n 'b': [0, 0, 1, 1],\n 'c': [1, 2, 3, 4]})\n\n def _check_f(base, f):\n result = f(base)\n self.assertTrue(result is None)\n\n # -----DataFrame-----\n\n # set_index\n f = lambda x: x.set_index('a', inplace=True)\n _check_f(data.copy(), f)\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True)\n _check_f(data.set_index('a'), f)\n\n # drop_duplicates\n f = lambda x: x.drop_duplicates(inplace=True)\n _check_f(data.copy(), f)\n\n # sort\n f = lambda x: x.sort_values('b', inplace=True)\n _check_f(data.copy(), f)\n\n # sort_index\n f = lambda x: x.sort_index(inplace=True)\n _check_f(data.copy(), f)\n\n # sortlevel\n f = lambda x: x.sortlevel(0, inplace=True)\n _check_f(data.set_index(['a', 'b']), f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(data.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(data.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(data.copy(), f)\n\n # -----Series-----\n d = data.copy()['c']\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True, drop=True)\n _check_f(data.set_index('a')['c'], f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(d.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(d.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(d.copy(), f)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nfrom matplotlib.tri import Triangulation\nimport matplotlib._tri as _tri\nimport numpy as np\n\n\nclass TriFinder(object):\n \"\"\"\n Abstract base class for classes used to find the triangles of a\n Triangulation in which (x,y) points lie.\n\n Rather than instantiate an object of a class derived from TriFinder, it is\n usually better to use the function\n :func:`matplotlib.tri.Triangulation.get_trifinder`.\n\n Derived classes implement __call__(x,y) where x,y are array_like point\n coordinates of the same shape.\n \"\"\"\n def __init__(self, triangulation):\n if not isinstance(triangulation, Triangulation):\n raise ValueError('Expected a Triangulation object')\n self._triangulation = triangulation\n\n\nclass TrapezoidMapTriFinder(TriFinder):\n \"\"\"\n :class:`~matplotlib.tri.TriFinder` class implemented using the trapezoid\n map algorithm from the book \"Computational Geometry, Algorithms and\n Applications\", second edition, by M. de Berg, M. van Kreveld, M. Overmars\n and O. Schwarzkopf.\n\n The triangulation must be valid, i.e. it must not have duplicate points,\n triangles formed from colinear points, or overlapping triangles. The\n algorithm has some tolerance to triangles formed from colinear points, but\n this should not be relied upon.\n \"\"\"\n def __init__(self, triangulation):\n TriFinder.__init__(self, triangulation)\n self._cpp_trifinder = _tri.TrapezoidMapTriFinder(\n triangulation.get_cpp_triangulation())\n self._initialize()\n\n def __call__(self, x, y):\n \"\"\"\n Return an array containing the indices of the triangles in which the\n specified x,y points lie, or -1 for points that do not lie within a\n triangle.\n\n *x*, *y* are array_like x and y coordinates of the same shape and any\n number of dimensions.\n\n Returns integer array with the same shape and *x* and *y*.\n \"\"\"\n x = np.asarray(x, dtype=np.float64)\n y = np.asarray(y, dtype=np.float64)\n if x.shape != y.shape:\n raise ValueError(\"x and y must be array-like with the same shape\")\n\n # C++ does the heavy lifting, and expects 1D arrays.\n indices = (self._cpp_trifinder.find_many(x.ravel(), y.ravel())\n .reshape(x.shape))\n return indices\n\n def _get_tree_stats(self):\n \"\"\"\n Return a python list containing the statistics about the node tree:\n 0: number of nodes (tree size)\n 1: number of unique nodes\n 2: number of trapezoids (tree leaf nodes)\n 3: number of unique trapezoids\n 4: maximum parent count (max number of times a node is repeated in\n tree)\n 5: maximum depth of tree (one more than the maximum number of\n comparisons needed to search through the tree)\n 6: mean of all trapezoid depths (one more than the average number\n of comparisons needed to search through the tree)\n \"\"\"\n return self._cpp_trifinder.get_tree_stats()\n\n def _initialize(self):\n \"\"\"\n Initialize the underlying C++ object. Can be called multiple times if,\n for example, the triangulation is modified.\n \"\"\"\n self._cpp_trifinder.initialize()\n\n def _print_tree(self):\n \"\"\"\n Print a text representation of the node tree, which is useful for\n debugging purposes.\n \"\"\"\n self._cpp_trifinder.print_tree()\n",
"\"\"\"Tests for hermite module.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nimport numpy.polynomial.hermite as herm\nfrom numpy.polynomial.polynomial import polyval\nfrom numpy.testing import (\n assert_almost_equal, assert_raises, assert_equal, assert_,\n run_module_suite\n )\n\nH0 = np.array([1])\nH1 = np.array([0, 2])\nH2 = np.array([-2, 0, 4])\nH3 = np.array([0, -12, 0, 8])\nH4 = np.array([12, 0, -48, 0, 16])\nH5 = np.array([0, 120, 0, -160, 0, 32])\nH6 = np.array([-120, 0, 720, 0, -480, 0, 64])\nH7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128])\nH8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256])\nH9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512])\n\nHlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9]\n\n\ndef trim(x):\n return herm.hermtrim(x, tol=1e-6)\n\n\nclass TestConstants(object):\n\n def test_hermdomain(self):\n assert_equal(herm.hermdomain, [-1, 1])\n\n def test_hermzero(self):\n assert_equal(herm.hermzero, [0])\n\n def test_hermone(self):\n assert_equal(herm.hermone, [1])\n\n def test_hermx(self):\n assert_equal(herm.hermx, [0, .5])\n\n\nclass TestArithmetic(object):\n x = np.linspace(-3, 3, 100)\n\n def test_hermadd(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n tgt = np.zeros(max(i, j) + 1)\n tgt[i] += 1\n tgt[j] += 1\n res = herm.hermadd([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_hermsub(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n tgt = np.zeros(max(i, j) + 1)\n tgt[i] += 1\n tgt[j] -= 1\n res = herm.hermsub([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_hermmulx(self):\n assert_equal(herm.hermmulx([0]), [0])\n assert_equal(herm.hermmulx([1]), [0, .5])\n for i in range(1, 5):\n ser = [0]*i + [1]\n tgt = [0]*(i - 1) + [i, 0, .5]\n assert_equal(herm.hermmulx(ser), tgt)\n\n def test_hermmul(self):\n # check values of result\n for i in range(5):\n pol1 = [0]*i + [1]\n val1 = herm.hermval(self.x, pol1)\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n pol2 = [0]*j + [1]\n val2 = herm.hermval(self.x, pol2)\n pol3 = herm.hermmul(pol1, pol2)\n val3 = herm.hermval(self.x, pol3)\n assert_(len(pol3) == i + j + 1, msg)\n assert_almost_equal(val3, val1*val2, err_msg=msg)\n\n def test_hermdiv(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n ci = [0]*i + [1]\n cj = [0]*j + [1]\n tgt = herm.hermadd(ci, cj)\n quo, rem = herm.hermdiv(tgt, ci)\n res = herm.hermadd(herm.hermmul(quo, ci), rem)\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n\nclass TestEvaluation(object):\n # coefficients of 1 + 2*x + 3*x**2\n c1d = np.array([2.5, 1., .75])\n c2d = np.einsum('i,j->ij', c1d, c1d)\n c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)\n\n # some random values in [-1, 1)\n x = np.random.random((3, 5))*2 - 1\n y = polyval(x, [1., 2., 3.])\n\n def test_hermval(self):\n #check empty input\n assert_equal(herm.hermval([], [1]).size, 0)\n\n #check normal input)\n x = np.linspace(-1, 1)\n y = [polyval(x, c) for c in Hlist]\n for i in range(10):\n msg = \"At i=%d\" % i\n tgt = y[i]\n res = herm.hermval(x, [0]*i + [1])\n assert_almost_equal(res, tgt, err_msg=msg)\n\n #check that shape is preserved\n for i in range(3):\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(herm.hermval(x, [1]).shape, dims)\n assert_equal(herm.hermval(x, [1, 0]).shape, dims)\n assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims)\n\n def test_hermval2d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test exceptions\n assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d)\n\n #test values\n tgt = y1*y2\n res = herm.hermval2d(x1, x2, self.c2d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = herm.hermval2d(z, z, self.c2d)\n assert_(res.shape == (2, 3))\n\n def test_hermval3d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test exceptions\n assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d)\n\n #test values\n tgt = y1*y2*y3\n res = herm.hermval3d(x1, x2, x3, self.c3d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = herm.hermval3d(z, z, z, self.c3d)\n assert_(res.shape == (2, 3))\n\n def test_hermgrid2d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test values\n tgt = np.einsum('i,j->ij', y1, y2)\n res = herm.hermgrid2d(x1, x2, self.c2d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = herm.hermgrid2d(z, z, self.c2d)\n assert_(res.shape == (2, 3)*2)\n\n def test_hermgrid3d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test values\n tgt = np.einsum('i,j,k->ijk', y1, y2, y3)\n res = herm.hermgrid3d(x1, x2, x3, self.c3d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = herm.hermgrid3d(z, z, z, self.c3d)\n assert_(res.shape == (2, 3)*3)\n\n\nclass TestIntegral(object):\n\n def test_hermint(self):\n # check exceptions\n assert_raises(ValueError, herm.hermint, [0], .5)\n assert_raises(ValueError, herm.hermint, [0], -1)\n assert_raises(ValueError, herm.hermint, [0], 1, [0, 0])\n assert_raises(ValueError, herm.hermint, [0], lbnd=[0])\n assert_raises(ValueError, herm.hermint, [0], scl=[0])\n assert_raises(ValueError, herm.hermint, [0], axis=.5)\n\n # test integration of zero polynomial\n for i in range(2, 5):\n k = [0]*(i - 2) + [1]\n res = herm.hermint([0], m=i, k=k)\n assert_almost_equal(res, [0, .5])\n\n # check single integration with integration constant\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [1/scl]\n hermpol = herm.poly2herm(pol)\n hermint = herm.hermint(hermpol, m=1, k=[i])\n res = herm.herm2poly(hermint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check single integration with integration constant and lbnd\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n hermpol = herm.poly2herm(pol)\n hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1)\n assert_almost_equal(herm.hermval(-1, hermint), i)\n\n # check single integration with integration constant and scaling\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [2/scl]\n hermpol = herm.poly2herm(pol)\n hermint = herm.hermint(hermpol, m=1, k=[i], scl=2)\n res = herm.herm2poly(hermint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with default k\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = herm.hermint(tgt, m=1)\n res = herm.hermint(pol, m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with defined k\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = herm.hermint(tgt, m=1, k=[k])\n res = herm.hermint(pol, m=j, k=list(range(j)))\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with lbnd\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)\n res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with scaling\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = herm.hermint(tgt, m=1, k=[k], scl=2)\n res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_hermint_axis(self):\n # check that axis keyword works\n c2d = np.random.random((3, 4))\n\n tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T\n res = herm.hermint(c2d, axis=0)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([herm.hermint(c) for c in c2d])\n res = herm.hermint(c2d, axis=1)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([herm.hermint(c, k=3) for c in c2d])\n res = herm.hermint(c2d, k=3, axis=1)\n assert_almost_equal(res, tgt)\n\n\nclass TestDerivative(object):\n\n def test_hermder(self):\n # check exceptions\n assert_raises(ValueError, herm.hermder, [0], .5)\n assert_raises(ValueError, herm.hermder, [0], -1)\n\n # check that zeroth derivative does nothing\n for i in range(5):\n tgt = [0]*i + [1]\n res = herm.hermder(tgt, m=0)\n assert_equal(trim(res), trim(tgt))\n\n # check that derivation is the inverse of integration\n for i in range(5):\n for j in range(2, 5):\n tgt = [0]*i + [1]\n res = herm.hermder(herm.hermint(tgt, m=j), m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check derivation with scaling\n for i in range(5):\n for j in range(2, 5):\n tgt = [0]*i + [1]\n res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_hermder_axis(self):\n # check that axis keyword works\n c2d = np.random.random((3, 4))\n\n tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T\n res = herm.hermder(c2d, axis=0)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([herm.hermder(c) for c in c2d])\n res = herm.hermder(c2d, axis=1)\n assert_almost_equal(res, tgt)\n\n\nclass TestVander(object):\n # some random values in [-1, 1)\n x = np.random.random((3, 5))*2 - 1\n\n def test_hermvander(self):\n # check for 1d x\n x = np.arange(3)\n v = herm.hermvander(x, 3)\n assert_(v.shape == (3, 4))\n for i in range(4):\n coef = [0]*i + [1]\n assert_almost_equal(v[..., i], herm.hermval(x, coef))\n\n # check for 2d x\n x = np.array([[1, 2], [3, 4], [5, 6]])\n v = herm.hermvander(x, 3)\n assert_(v.shape == (3, 2, 4))\n for i in range(4):\n coef = [0]*i + [1]\n assert_almost_equal(v[..., i], herm.hermval(x, coef))\n\n def test_hermvander2d(self):\n # also tests hermval2d for non-square coefficient array\n x1, x2, x3 = self.x\n c = np.random.random((2, 3))\n van = herm.hermvander2d(x1, x2, [1, 2])\n tgt = herm.hermval2d(x1, x2, c)\n res = np.dot(van, c.flat)\n assert_almost_equal(res, tgt)\n\n # check shape\n van = herm.hermvander2d([x1], [x2], [1, 2])\n assert_(van.shape == (1, 5, 6))\n\n def test_hermvander3d(self):\n # also tests hermval3d for non-square coefficient array\n x1, x2, x3 = self.x\n c = np.random.random((2, 3, 4))\n van = herm.hermvander3d(x1, x2, x3, [1, 2, 3])\n tgt = herm.hermval3d(x1, x2, x3, c)\n res = np.dot(van, c.flat)\n assert_almost_equal(res, tgt)\n\n # check shape\n van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3])\n assert_(van.shape == (1, 5, 24))\n\n\nclass TestFitting(object):\n\n def test_hermfit(self):\n def f(x):\n return x*(x - 1)*(x - 2)\n\n def f2(x):\n return x**4 + x**2 + 1\n\n # Test exceptions\n assert_raises(ValueError, herm.hermfit, [1], [1], -1)\n assert_raises(TypeError, herm.hermfit, [[1]], [1], 0)\n assert_raises(TypeError, herm.hermfit, [], [1], 0)\n assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0)\n assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0)\n assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0)\n assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]])\n assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1])\n assert_raises(ValueError, herm.hermfit, [1], [1], [-1,])\n assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6])\n assert_raises(TypeError, herm.hermfit, [1], [1], [])\n\n # Test fit\n x = np.linspace(0, 2)\n y = f(x)\n #\n coef3 = herm.hermfit(x, y, 3)\n assert_equal(len(coef3), 4)\n assert_almost_equal(herm.hermval(x, coef3), y)\n coef3 = herm.hermfit(x, y, [0, 1, 2, 3])\n assert_equal(len(coef3), 4)\n assert_almost_equal(herm.hermval(x, coef3), y)\n #\n coef4 = herm.hermfit(x, y, 4)\n assert_equal(len(coef4), 5)\n assert_almost_equal(herm.hermval(x, coef4), y)\n coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4])\n assert_equal(len(coef4), 5)\n assert_almost_equal(herm.hermval(x, coef4), y)\n # check things still work if deg is not in strict increasing\n coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0])\n assert_equal(len(coef4), 5)\n assert_almost_equal(herm.hermval(x, coef4), y)\n #\n coef2d = herm.hermfit(x, np.array([y, y]).T, 3)\n assert_almost_equal(coef2d, np.array([coef3, coef3]).T)\n coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3])\n assert_almost_equal(coef2d, np.array([coef3, coef3]).T)\n # test weighting\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n y[0::2] = 0\n wcoef3 = herm.hermfit(x, yw, 3, w=w)\n assert_almost_equal(wcoef3, coef3)\n wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w)\n assert_almost_equal(wcoef3, coef3)\n #\n wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w)\n assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)\n wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)\n assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)\n # test scaling with complex values x points whose square\n # is zero when summed.\n x = [1, 1j, -1, -1j]\n assert_almost_equal(herm.hermfit(x, x, 1), [0, .5])\n assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5])\n # test fitting only even Legendre polynomials\n x = np.linspace(-1, 1)\n y = f2(x)\n coef1 = herm.hermfit(x, y, 4)\n assert_almost_equal(herm.hermval(x, coef1), y)\n coef2 = herm.hermfit(x, y, [0, 2, 4])\n assert_almost_equal(herm.hermval(x, coef2), y)\n assert_almost_equal(coef1, coef2)\n\n\nclass TestCompanion(object):\n\n def test_raises(self):\n assert_raises(ValueError, herm.hermcompanion, [])\n assert_raises(ValueError, herm.hermcompanion, [1])\n\n def test_dimensions(self):\n for i in range(1, 5):\n coef = [0]*i + [1]\n assert_(herm.hermcompanion(coef).shape == (i, i))\n\n def test_linear_root(self):\n assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)\n\n\nclass TestGauss(object):\n\n def test_100(self):\n x, w = herm.hermgauss(100)\n\n # test orthogonality. Note that the results need to be normalized,\n # otherwise the huge values that can arise from fast growing\n # functions like Laguerre can be very confusing.\n v = herm.hermvander(x, 99)\n vv = np.dot(v.T * w, v)\n vd = 1/np.sqrt(vv.diagonal())\n vv = vd[:, None] * vv * vd\n assert_almost_equal(vv, np.eye(100))\n\n # check that the integral of 1 is correct\n tgt = np.sqrt(np.pi)\n assert_almost_equal(w.sum(), tgt)\n\n\nclass TestMisc(object):\n\n def test_hermfromroots(self):\n res = herm.hermfromroots([])\n assert_almost_equal(trim(res), [1])\n for i in range(1, 5):\n roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n pol = herm.hermfromroots(roots)\n res = herm.hermval(roots, pol)\n tgt = 0\n assert_(len(pol) == i + 1)\n assert_almost_equal(herm.herm2poly(pol)[-1], 1)\n assert_almost_equal(res, tgt)\n\n def test_hermroots(self):\n assert_almost_equal(herm.hermroots([1]), [])\n assert_almost_equal(herm.hermroots([1, 1]), [-.5])\n for i in range(2, 5):\n tgt = np.linspace(-1, 1, i)\n res = herm.hermroots(herm.hermfromroots(tgt))\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_hermtrim(self):\n coef = [2, -1, 1, 0]\n\n # Test exceptions\n assert_raises(ValueError, herm.hermtrim, coef, -1)\n\n # Test results\n assert_equal(herm.hermtrim(coef), coef[:-1])\n assert_equal(herm.hermtrim(coef, 1), coef[:-3])\n assert_equal(herm.hermtrim(coef, 2), [0])\n\n def test_hermline(self):\n assert_equal(herm.hermline(3, 4), [3, 2])\n\n def test_herm2poly(self):\n for i in range(10):\n assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i])\n\n def test_poly2herm(self):\n for i in range(10):\n assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1])\n\n def test_weight(self):\n x = np.linspace(-5, 5, 11)\n tgt = np.exp(-x**2)\n res = herm.hermweight(x)\n assert_almost_equal(res, tgt)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"# -*- coding: utf-8 -*-\n\nfrom numpy import nan\nimport numpy as np\n\nfrom pandas.types.common import _ensure_int64\nfrom pandas import Index, isnull\nfrom pandas.util.testing import assert_almost_equal\nimport pandas.util.testing as tm\nimport pandas.lib as lib\nimport pandas.algos as algos\n\n\ndef test_series_grouper():\n from pandas import Series\n obj = Series(np.random.randn(10))\n dummy = obj[:0]\n\n labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)\n\n grouper = lib.SeriesGrouper(obj, np.mean, labels, 2, dummy)\n result, counts = grouper.get_result()\n\n expected = np.array([obj[3:6].mean(), obj[6:].mean()])\n assert_almost_equal(result, expected)\n\n exp_counts = np.array([3, 4], dtype=np.int64)\n assert_almost_equal(counts, exp_counts)\n\n\ndef test_series_bin_grouper():\n from pandas import Series\n obj = Series(np.random.randn(10))\n dummy = obj[:0]\n\n bins = np.array([3, 6])\n\n grouper = lib.SeriesBinGrouper(obj, np.mean, bins, dummy)\n result, counts = grouper.get_result()\n\n expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])\n assert_almost_equal(result, expected)\n\n exp_counts = np.array([3, 3, 4], dtype=np.int64)\n assert_almost_equal(counts, exp_counts)\n\n\nclass TestBinGroupers(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.obj = np.random.randn(10, 1)\n self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)\n self.bins = np.array([3, 6], dtype=np.int64)\n\n def test_generate_bins(self):\n from pandas.core.groupby import generate_bins_generic\n values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)\n binner = np.array([0, 3, 6, 9], dtype=np.int64)\n\n for func in [lib.generate_bins_dt64, generate_bins_generic]:\n bins = func(values, binner, closed='left')\n assert ((bins == np.array([2, 5, 6])).all())\n\n bins = func(values, binner, closed='right')\n assert ((bins == np.array([3, 6, 6])).all())\n\n for func in [lib.generate_bins_dt64, generate_bins_generic]:\n values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)\n binner = np.array([0, 3, 6], dtype=np.int64)\n\n bins = func(values, binner, closed='right')\n assert ((bins == np.array([3, 6])).all())\n\n self.assertRaises(ValueError, generate_bins_generic, values, [],\n 'right')\n self.assertRaises(ValueError, generate_bins_generic, values[:0],\n binner, 'right')\n\n self.assertRaises(ValueError, generate_bins_generic, values, [4],\n 'right')\n self.assertRaises(ValueError, generate_bins_generic, values, [-3, -1],\n 'right')\n\n\ndef test_group_ohlc():\n def _check(dtype):\n obj = np.array(np.random.randn(20), dtype=dtype)\n\n bins = np.array([6, 12, 20])\n out = np.zeros((3, 4), dtype)\n counts = np.zeros(len(out), dtype=np.int64)\n labels = _ensure_int64(np.repeat(np.arange(3),\n np.diff(np.r_[0, bins])))\n\n func = getattr(algos, 'group_ohlc_%s' % dtype)\n func(out, counts, obj[:, None], labels)\n\n def _ohlc(group):\n if isnull(group).all():\n return np.repeat(nan, 4)\n return [group[0], group.max(), group.min(), group[-1]]\n\n expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]),\n _ohlc(obj[12:])])\n\n assert_almost_equal(out, expected)\n tm.assert_numpy_array_equal(counts,\n np.array([6, 6, 8], dtype=np.int64))\n\n obj[:6] = nan\n func(out, counts, obj[:, None], labels)\n expected[0] = nan\n assert_almost_equal(out, expected)\n\n _check('float32')\n _check('float64')\n\n\nclass TestMoments(tm.TestCase):\n pass\n\n\nclass TestReducer(tm.TestCase):\n def test_int_index(self):\n from pandas.core.series import Series\n\n arr = np.random.randn(100, 4)\n result = lib.reduce(arr, np.sum, labels=Index(np.arange(4)))\n expected = arr.sum(0)\n assert_almost_equal(result, expected)\n\n result = lib.reduce(arr, np.sum, axis=1, labels=Index(np.arange(100)))\n expected = arr.sum(1)\n assert_almost_equal(result, expected)\n\n dummy = Series(0., index=np.arange(100))\n result = lib.reduce(arr, np.sum, dummy=dummy,\n labels=Index(np.arange(4)))\n expected = arr.sum(0)\n assert_almost_equal(result, expected)\n\n dummy = Series(0., index=np.arange(4))\n result = lib.reduce(arr, np.sum, axis=1, dummy=dummy,\n labels=Index(np.arange(100)))\n expected = arr.sum(1)\n assert_almost_equal(result, expected)\n\n result = lib.reduce(arr, np.sum, axis=1, dummy=dummy,\n labels=Index(np.arange(100)))\n assert_almost_equal(result, expected)\n",
"# pylint: disable-msg=W0611, W0612, W0511,R0201\n\"\"\"Tests suite for mrecords.\n\n:author: Pierre Gerard-Marchant\n:contact: pierregm_at_uga_dot_edu\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport warnings\nimport pickle\n\nimport numpy as np\nimport numpy.ma as ma\nfrom numpy import recarray\nfrom numpy.ma import masked, nomask\nfrom numpy.testing import run_module_suite, temppath\nfrom numpy.core.records import (\n fromrecords as recfromrecords, fromarrays as recfromarrays\n )\nfrom numpy.ma.mrecords import (\n MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords,\n addfield\n )\nfrom numpy.ma.testutils import (\n assert_, assert_equal,\n assert_equal_records,\n )\n\n\nclass TestMRecords(object):\n\n ilist = [1, 2, 3, 4, 5]\n flist = [1.1, 2.2, 3.3, 4.4, 5.5]\n slist = [b'one', b'two', b'three', b'four', b'five']\n ddtype = [('a', int), ('b', float), ('c', '|S8')]\n mask = [0, 1, 0, 0, 1]\n base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)\n\n def test_byview(self):\n # Test creation by view\n base = self.base\n mbase = base.view(mrecarray)\n assert_equal(mbase.recordmask, base.recordmask)\n assert_equal_records(mbase._mask, base._mask)\n assert_(isinstance(mbase._data, recarray))\n assert_equal_records(mbase._data, base._data.view(recarray))\n for field in ('a', 'b', 'c'):\n assert_equal(base[field], mbase[field])\n assert_equal_records(mbase.view(mrecarray), mbase)\n\n def test_get(self):\n # Tests fields retrieval\n base = self.base.copy()\n mbase = base.view(mrecarray)\n # As fields..........\n for field in ('a', 'b', 'c'):\n assert_equal(getattr(mbase, field), mbase[field])\n assert_equal(base[field], mbase[field])\n # as elements .......\n mbase_first = mbase[0]\n assert_(isinstance(mbase_first, mrecarray))\n assert_equal(mbase_first.dtype, mbase.dtype)\n assert_equal(mbase_first.tolist(), (1, 1.1, b'one'))\n # Used to be mask, now it's recordmask\n assert_equal(mbase_first.recordmask, nomask)\n assert_equal(mbase_first._mask.item(), (False, False, False))\n assert_equal(mbase_first['a'], mbase['a'][0])\n mbase_last = mbase[-1]\n assert_(isinstance(mbase_last, mrecarray))\n assert_equal(mbase_last.dtype, mbase.dtype)\n assert_equal(mbase_last.tolist(), (None, None, None))\n # Used to be mask, now it's recordmask\n assert_equal(mbase_last.recordmask, True)\n assert_equal(mbase_last._mask.item(), (True, True, True))\n assert_equal(mbase_last['a'], mbase['a'][-1])\n assert_((mbase_last['a'] is masked))\n # as slice ..........\n mbase_sl = mbase[:2]\n assert_(isinstance(mbase_sl, mrecarray))\n assert_equal(mbase_sl.dtype, mbase.dtype)\n # Used to be mask, now it's recordmask\n assert_equal(mbase_sl.recordmask, [0, 1])\n assert_equal_records(mbase_sl.mask,\n np.array([(False, False, False),\n (True, True, True)],\n dtype=mbase._mask.dtype))\n assert_equal_records(mbase_sl, base[:2].view(mrecarray))\n for field in ('a', 'b', 'c'):\n assert_equal(getattr(mbase_sl, field), base[:2][field])\n\n def test_set_fields(self):\n # Tests setting fields.\n base = self.base.copy()\n mbase = base.view(mrecarray)\n mbase = mbase.copy()\n mbase.fill_value = (999999, 1e20, 'N/A')\n # Change the data, the mask should be conserved\n mbase.a._data[:] = 5\n assert_equal(mbase['a']._data, [5, 5, 5, 5, 5])\n assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1])\n # Change the elements, and the mask will follow\n mbase.a = 1\n assert_equal(mbase['a']._data, [1]*5)\n assert_equal(ma.getmaskarray(mbase['a']), [0]*5)\n # Use to be _mask, now it's recordmask\n assert_equal(mbase.recordmask, [False]*5)\n assert_equal(mbase._mask.tolist(),\n np.array([(0, 0, 0),\n (0, 1, 1),\n (0, 0, 0),\n (0, 0, 0),\n (0, 1, 1)],\n dtype=bool))\n # Set a field to mask ........................\n mbase.c = masked\n # Use to be mask, and now it's still mask !\n assert_equal(mbase.c.mask, [1]*5)\n assert_equal(mbase.c.recordmask, [1]*5)\n assert_equal(ma.getmaskarray(mbase['c']), [1]*5)\n assert_equal(ma.getdata(mbase['c']), [b'N/A']*5)\n assert_equal(mbase._mask.tolist(),\n np.array([(0, 0, 1),\n (0, 1, 1),\n (0, 0, 1),\n (0, 0, 1),\n (0, 1, 1)],\n dtype=bool))\n # Set fields by slices .......................\n mbase = base.view(mrecarray).copy()\n mbase.a[3:] = 5\n assert_equal(mbase.a, [1, 2, 3, 5, 5])\n assert_equal(mbase.a._mask, [0, 1, 0, 0, 0])\n mbase.b[3:] = masked\n assert_equal(mbase.b, base['b'])\n assert_equal(mbase.b._mask, [0, 1, 0, 1, 1])\n # Set fields globally..........................\n ndtype = [('alpha', '|S1'), ('num', int)]\n data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype)\n rdata = data.view(MaskedRecords)\n val = ma.array([10, 20, 30], mask=[1, 0, 0])\n\n rdata['num'] = val\n assert_equal(rdata.num, val)\n assert_equal(rdata.num.mask, [1, 0, 0])\n\n def test_set_fields_mask(self):\n # Tests setting the mask of a field.\n base = self.base.copy()\n # This one has already a mask....\n mbase = base.view(mrecarray)\n mbase['a'][-2] = masked\n assert_equal(mbase.a, [1, 2, 3, 4, 5])\n assert_equal(mbase.a._mask, [0, 1, 0, 1, 1])\n # This one has not yet\n mbase = fromarrays([np.arange(5), np.random.rand(5)],\n dtype=[('a', int), ('b', float)])\n mbase['a'][-2] = masked\n assert_equal(mbase.a, [0, 1, 2, 3, 4])\n assert_equal(mbase.a._mask, [0, 0, 0, 1, 0])\n\n def test_set_mask(self):\n base = self.base.copy()\n mbase = base.view(mrecarray)\n # Set the mask to True .......................\n mbase.mask = masked\n assert_equal(ma.getmaskarray(mbase['b']), [1]*5)\n assert_equal(mbase['a']._mask, mbase['b']._mask)\n assert_equal(mbase['a']._mask, mbase['c']._mask)\n assert_equal(mbase._mask.tolist(),\n np.array([(1, 1, 1)]*5, dtype=bool))\n # Delete the mask ............................\n mbase.mask = nomask\n assert_equal(ma.getmaskarray(mbase['c']), [0]*5)\n assert_equal(mbase._mask.tolist(),\n np.array([(0, 0, 0)]*5, dtype=bool))\n\n def test_set_mask_fromarray(self):\n base = self.base.copy()\n mbase = base.view(mrecarray)\n # Sets the mask w/ an array\n mbase.mask = [1, 0, 0, 0, 1]\n assert_equal(mbase.a.mask, [1, 0, 0, 0, 1])\n assert_equal(mbase.b.mask, [1, 0, 0, 0, 1])\n assert_equal(mbase.c.mask, [1, 0, 0, 0, 1])\n # Yay, once more !\n mbase.mask = [0, 0, 0, 0, 1]\n assert_equal(mbase.a.mask, [0, 0, 0, 0, 1])\n assert_equal(mbase.b.mask, [0, 0, 0, 0, 1])\n assert_equal(mbase.c.mask, [0, 0, 0, 0, 1])\n\n def test_set_mask_fromfields(self):\n mbase = self.base.copy().view(mrecarray)\n\n nmask = np.array(\n [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)],\n dtype=[('a', bool), ('b', bool), ('c', bool)])\n mbase.mask = nmask\n assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])\n assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])\n assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])\n # Reinitialize and redo\n mbase.mask = False\n mbase.fieldmask = nmask\n assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])\n assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])\n assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])\n\n def test_set_elements(self):\n base = self.base.copy()\n # Set an element to mask .....................\n mbase = base.view(mrecarray).copy()\n mbase[-2] = masked\n assert_equal(\n mbase._mask.tolist(),\n np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)],\n dtype=bool))\n # Used to be mask, now it's recordmask!\n assert_equal(mbase.recordmask, [0, 1, 0, 1, 1])\n # Set slices .................................\n mbase = base.view(mrecarray).copy()\n mbase[:2] = (5, 5, 5)\n assert_equal(mbase.a._data, [5, 5, 3, 4, 5])\n assert_equal(mbase.a._mask, [0, 0, 0, 0, 1])\n assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5])\n assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])\n assert_equal(mbase.c._data,\n [b'5', b'5', b'three', b'four', b'five'])\n assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])\n\n mbase = base.view(mrecarray).copy()\n mbase[:2] = masked\n assert_equal(mbase.a._data, [1, 2, 3, 4, 5])\n assert_equal(mbase.a._mask, [1, 1, 0, 0, 1])\n assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5])\n assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])\n assert_equal(mbase.c._data,\n [b'one', b'two', b'three', b'four', b'five'])\n assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])\n\n def test_setslices_hardmask(self):\n # Tests setting slices w/ hardmask.\n base = self.base.copy()\n mbase = base.view(mrecarray)\n mbase.harden_mask()\n try:\n mbase[-2:] = (5, 5, 5)\n assert_equal(mbase.a._data, [1, 2, 3, 5, 5])\n assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5])\n assert_equal(mbase.c._data,\n [b'one', b'two', b'three', b'5', b'five'])\n assert_equal(mbase.a._mask, [0, 1, 0, 0, 1])\n assert_equal(mbase.b._mask, mbase.a._mask)\n assert_equal(mbase.b._mask, mbase.c._mask)\n except NotImplementedError:\n # OK, not implemented yet...\n pass\n except AssertionError:\n raise\n else:\n raise Exception(\"Flexible hard masks should be supported !\")\n # Not using a tuple should crash\n try:\n mbase[-2:] = 3\n except (NotImplementedError, TypeError):\n pass\n else:\n raise TypeError(\"Should have expected a readable buffer object!\")\n\n def test_hardmask(self):\n # Test hardmask\n base = self.base.copy()\n mbase = base.view(mrecarray)\n mbase.harden_mask()\n assert_(mbase._hardmask)\n mbase.mask = nomask\n assert_equal_records(mbase._mask, base._mask)\n mbase.soften_mask()\n assert_(not mbase._hardmask)\n mbase.mask = nomask\n # So, the mask of a field is no longer set to nomask...\n assert_equal_records(mbase._mask,\n ma.make_mask_none(base.shape, base.dtype))\n assert_(ma.make_mask(mbase['b']._mask) is nomask)\n assert_equal(mbase['a']._mask, mbase['b']._mask)\n\n def test_pickling(self):\n # Test pickling\n base = self.base.copy()\n mrec = base.view(mrecarray)\n _ = pickle.dumps(mrec)\n mrec_ = pickle.loads(_)\n assert_equal(mrec_.dtype, mrec.dtype)\n assert_equal_records(mrec_._data, mrec._data)\n assert_equal(mrec_._mask, mrec._mask)\n assert_equal_records(mrec_._mask, mrec._mask)\n\n def test_filled(self):\n # Test filling the array\n _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)\n _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)\n _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')\n ddtype = [('a', int), ('b', float), ('c', '|S8')]\n mrec = fromarrays([_a, _b, _c], dtype=ddtype,\n fill_value=(99999, 99999., 'N/A'))\n mrecfilled = mrec.filled()\n assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int))\n assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.),\n dtype=float))\n assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'),\n dtype='|S8'))\n\n def test_tolist(self):\n # Test tolist.\n _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)\n _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)\n _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8')\n ddtype = [('a', int), ('b', float), ('c', '|S8')]\n mrec = fromarrays([_a, _b, _c], dtype=ddtype,\n fill_value=(99999, 99999., 'N/A'))\n\n assert_equal(mrec.tolist(),\n [(1, 1.1, None), (2, 2.2, b'two'),\n (None, None, b'three')])\n\n def test_withnames(self):\n # Test the creation w/ format and names\n x = mrecarray(1, formats=float, names='base')\n x[0]['base'] = 10\n assert_equal(x['base'][0], 10)\n\n def test_exotic_formats(self):\n # Test that 'exotic' formats are processed properly\n easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)])\n easy[0] = masked\n assert_equal(easy.filled(1).item(), (1, b'1', 1.))\n\n solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))])\n solo[0] = masked\n assert_equal(solo.filled(1).item(),\n np.array((1,), dtype=solo.dtype).item())\n\n mult = mrecarray(2, dtype=\"i4, (2,3)float, float\")\n mult[0] = masked\n mult[1] = (1, 1, 1)\n mult.filled(0)\n assert_equal_records(mult.filled(0),\n np.array([(0, 0, 0), (1, 1, 1)],\n dtype=mult.dtype))\n\n\nclass TestView(object):\n\n def setup(self):\n (a, b) = (np.arange(10), np.random.rand(10))\n ndtype = [('a', float), ('b', float)]\n arr = np.array(list(zip(a, b)), dtype=ndtype)\n\n mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.))\n mrec.mask[3] = (False, True)\n self.data = (mrec, a, b, arr)\n\n def test_view_by_itself(self):\n (mrec, a, b, arr) = self.data\n test = mrec.view()\n assert_(isinstance(test, MaskedRecords))\n assert_equal_records(test, mrec)\n assert_equal_records(test._mask, mrec._mask)\n\n def test_view_simple_dtype(self):\n (mrec, a, b, arr) = self.data\n ntype = (float, 2)\n test = mrec.view(ntype)\n assert_(isinstance(test, ma.MaskedArray))\n assert_equal(test, np.array(list(zip(a, b)), dtype=float))\n assert_(test[3, 1] is ma.masked)\n\n def test_view_flexible_type(self):\n (mrec, a, b, arr) = self.data\n alttype = [('A', float), ('B', float)]\n test = mrec.view(alttype)\n assert_(isinstance(test, MaskedRecords))\n assert_equal_records(test, arr.view(alttype))\n assert_(test['B'][3] is masked)\n assert_equal(test.dtype, np.dtype(alttype))\n assert_(test._fill_value is None)\n\n\n##############################################################################\nclass TestMRecordsImport(object):\n\n _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)\n _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)\n _c = ma.array([b'one', b'two', b'three'],\n mask=[0, 0, 1], dtype='|S8')\n ddtype = [('a', int), ('b', float), ('c', '|S8')]\n mrec = fromarrays([_a, _b, _c], dtype=ddtype,\n fill_value=(b'99999', b'99999.',\n b'N/A'))\n nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)\n data = (mrec, nrec, ddtype)\n\n def test_fromarrays(self):\n _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)\n _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)\n _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')\n (mrec, nrec, _) = self.data\n for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)):\n assert_equal(getattr(mrec, f)._mask, l._mask)\n # One record only\n _x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0],)\n assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0])\n\n def test_fromrecords(self):\n # Test construction from records.\n (mrec, nrec, ddtype) = self.data\n #......\n palist = [(1, 'abc', 3.7000002861022949, 0),\n (2, 'xy', 6.6999998092651367, 1),\n (0, ' ', 0.40000000596046448, 0)]\n pa = recfromrecords(palist, names='c1, c2, c3, c4')\n mpa = fromrecords(palist, names='c1, c2, c3, c4')\n assert_equal_records(pa, mpa)\n #.....\n _mrec = fromrecords(nrec)\n assert_equal(_mrec.dtype, mrec.dtype)\n for field in _mrec.dtype.names:\n assert_equal(getattr(_mrec, field), getattr(mrec._data, field))\n\n _mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')\n assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')])\n for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')):\n assert_equal(getattr(_mrec, f), getattr(mrec._data, n))\n\n _mrec = fromrecords(mrec)\n assert_equal(_mrec.dtype, mrec.dtype)\n assert_equal_records(_mrec._data, mrec.filled())\n assert_equal_records(_mrec._mask, mrec._mask)\n\n def test_fromrecords_wmask(self):\n # Tests construction from records w/ mask.\n (mrec, nrec, ddtype) = self.data\n\n _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,])\n assert_equal_records(_mrec._data, mrec._data)\n assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)])\n\n _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True)\n assert_equal_records(_mrec._data, mrec._data)\n assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)])\n\n _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask)\n assert_equal_records(_mrec._data, mrec._data)\n assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())\n\n _mrec = fromrecords(nrec.tolist(), dtype=ddtype,\n mask=mrec._mask.tolist())\n assert_equal_records(_mrec._data, mrec._data)\n assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())\n\n def test_fromtextfile(self):\n # Tests reading from a text file.\n fcontent = (\n\"\"\"#\n'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)'\n'strings',1,1.0,'mixed column',,1\n'with embedded \"double quotes\"',2,2.0,1.0,,1\n'strings',3,3.0E5,3,,1\n'strings',4,-1e-10,,,1\n\"\"\")\n with temppath() as path:\n with open(path, 'w') as f:\n f.write(fcontent)\n mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG')\n assert_(isinstance(mrectxt, MaskedRecords))\n assert_equal(mrectxt.F, [1, 1, 1, 1])\n assert_equal(mrectxt.E._mask, [1, 1, 1, 1])\n assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])\n\n def test_addfield(self):\n # Tests addfield\n (mrec, nrec, ddtype) = self.data\n (d, m) = ([100, 200, 300], [1, 0, 0])\n mrec = addfield(mrec, ma.array(d, mask=m))\n assert_equal(mrec.f3, d)\n assert_equal(mrec.f3._mask, m)\n\n\ndef test_record_array_with_object_field():\n # Trac #1839\n y = ma.masked_array(\n [(1, '2'), (3, '4')],\n mask=[(0, 0), (0, 1)],\n dtype=[('a', int), ('b', object)])\n # getting an item used to fail\n y[1]\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"from __future__ import division, absolute_import, print_function\n\nimport sys\nimport os\nimport shutil\nfrom tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp\nimport mmap\n\nfrom numpy import (\n memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)\nfrom numpy.compat import Path\n\nfrom numpy import arange, allclose, asarray\nfrom numpy.testing import (\n run_module_suite, assert_, assert_equal, assert_array_equal,\n dec, suppress_warnings\n)\n\nclass TestMemmap(object):\n def setup(self):\n self.tmpfp = NamedTemporaryFile(prefix='mmap')\n self.tempdir = mkdtemp()\n self.shape = (3, 4)\n self.dtype = 'float32'\n self.data = arange(12, dtype=self.dtype)\n self.data.resize(self.shape)\n\n def teardown(self):\n self.tmpfp.close()\n shutil.rmtree(self.tempdir)\n\n def test_roundtrip(self):\n # Write data to file\n fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',\n shape=self.shape)\n fp[:] = self.data[:]\n del fp # Test __del__ machinery, which handles cleanup\n\n # Read data back from file\n newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',\n shape=self.shape)\n assert_(allclose(self.data, newfp))\n assert_array_equal(self.data, newfp)\n assert_equal(newfp.flags.writeable, False)\n\n def test_open_with_filename(self):\n tmpname = mktemp('', 'mmap', dir=self.tempdir)\n fp = memmap(tmpname, dtype=self.dtype, mode='w+',\n shape=self.shape)\n fp[:] = self.data[:]\n del fp\n\n def test_unnamed_file(self):\n with TemporaryFile() as f:\n fp = memmap(f, dtype=self.dtype, shape=self.shape)\n del fp\n\n def test_attributes(self):\n offset = 1\n mode = \"w+\"\n fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,\n shape=self.shape, offset=offset)\n assert_equal(offset, fp.offset)\n assert_equal(mode, fp.mode)\n del fp\n\n def test_filename(self):\n tmpname = mktemp('', 'mmap', dir=self.tempdir)\n fp = memmap(tmpname, dtype=self.dtype, mode='w+',\n shape=self.shape)\n abspath = os.path.abspath(tmpname)\n fp[:] = self.data[:]\n assert_equal(abspath, fp.filename)\n b = fp[:1]\n assert_equal(abspath, b.filename)\n del b\n del fp\n\n @dec.skipif(Path is None, \"No pathlib.Path\")\n def test_path(self):\n tmpname = mktemp('', 'mmap', dir=self.tempdir)\n fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',\n shape=self.shape)\n abspath = os.path.realpath(os.path.abspath(tmpname))\n fp[:] = self.data[:]\n assert_equal(abspath, str(fp.filename.resolve()))\n b = fp[:1]\n assert_equal(abspath, str(b.filename.resolve()))\n del b\n del fp\n\n def test_filename_fileobj(self):\n fp = memmap(self.tmpfp, dtype=self.dtype, mode=\"w+\",\n shape=self.shape)\n assert_equal(fp.filename, self.tmpfp.name)\n\n @dec.knownfailureif(sys.platform == 'gnu0', \"This test is known to fail on hurd\")\n def test_flush(self):\n fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',\n shape=self.shape)\n fp[:] = self.data[:]\n assert_equal(fp[0], self.data[0])\n fp.flush()\n\n def test_del(self):\n # Make sure a view does not delete the underlying mmap\n fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',\n shape=self.shape)\n fp_base[0] = 5\n fp_view = fp_base[0:1]\n assert_equal(fp_view[0], 5)\n del fp_view\n # Should still be able to access and assign values after\n # deleting the view\n assert_equal(fp_base[0], 5)\n fp_base[0] = 6\n assert_equal(fp_base[0], 6)\n\n def test_arithmetic_drops_references(self):\n fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',\n shape=self.shape)\n tmp = (fp + 10)\n if isinstance(tmp, memmap):\n assert_(tmp._mmap is not fp._mmap)\n\n def test_indexing_drops_references(self):\n fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',\n shape=self.shape)\n tmp = fp[[(1, 2), (2, 3)]]\n if isinstance(tmp, memmap):\n assert_(tmp._mmap is not fp._mmap)\n\n def test_slicing_keeps_references(self):\n fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',\n shape=self.shape)\n assert_(fp[:2, :2]._mmap is fp._mmap)\n\n def test_view(self):\n fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)\n new1 = fp.view()\n new2 = new1.view()\n assert_(new1.base is fp)\n assert_(new2.base is fp)\n new_array = asarray(fp)\n assert_(new_array.base is fp)\n\n def test_ufunc_return_ndarray(self):\n fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)\n fp[:] = self.data\n\n with suppress_warnings() as sup:\n sup.filter(FutureWarning, \"np.average currently does not preserve\")\n for unary_op in [sum, average, product]:\n result = unary_op(fp)\n assert_(isscalar(result))\n assert_(result.__class__ is self.data[0, 0].__class__)\n\n assert_(unary_op(fp, axis=0).__class__ is ndarray)\n assert_(unary_op(fp, axis=1).__class__ is ndarray)\n\n for binary_op in [add, subtract, multiply]:\n assert_(binary_op(fp, self.data).__class__ is ndarray)\n assert_(binary_op(self.data, fp).__class__ is ndarray)\n assert_(binary_op(fp, fp).__class__ is ndarray)\n\n fp += 1\n assert(fp.__class__ is memmap)\n add(fp, 1, out=fp)\n assert(fp.__class__ is memmap)\n\n def test_getitem(self):\n fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)\n fp[:] = self.data\n\n assert_(fp[1:, :-1].__class__ is memmap)\n # Fancy indexing returns a copy that is not memmapped\n assert_(fp[[0, 1]].__class__ is ndarray)\n\n def test_memmap_subclass(self):\n class MemmapSubClass(memmap):\n pass\n\n fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)\n fp[:] = self.data\n\n # We keep previous behavior for subclasses of memmap, i.e. the\n # ufunc and __getitem__ output is never turned into a ndarray\n assert_(sum(fp, axis=0).__class__ is MemmapSubClass)\n assert_(sum(fp).__class__ is MemmapSubClass)\n assert_(fp[1:, :-1].__class__ is MemmapSubClass)\n assert(fp[[0, 1]].__class__ is MemmapSubClass)\n\n def test_mmap_offset_greater_than_allocation_granularity(self):\n size = 5 * mmap.ALLOCATIONGRANULARITY\n offset = mmap.ALLOCATIONGRANULARITY + 1\n fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)\n assert_(fp.offset == offset)\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
] | [
[
"pandas.core.generic.NDFrame.__init__",
"pandas.types.cast._infer_dtype_from_scalar",
"numpy.asarray",
"pandas.compat.numpy.function.validate_round",
"pandas.compat.range",
"pandas.compat.map",
"pandas.core.internals.create_block_manager_from_blocks",
"pandas.core.generic.NDFrame._set_item",
"pandas.compat.iteritems",
"pandas.core.frame.DataFrame",
"pandas.core.common._default_index",
"pandas.core.groupby.PanelGroupBy",
"pandas.compat.OrderedDefaultdict",
"pandas.compat.OrderedDict",
"pandas.core.series.Series",
"numpy.unique",
"pandas.core.ops.add_special_arithmetic_methods",
"numpy.arange",
"pandas.types.missing.notnull",
"pandas.core.index._ensure_index",
"pandas.core.missing.fill_zeros",
"pandas.util.decorators.deprecate",
"numpy.apply_along_axis",
"pandas.types.common.is_list_like",
"pandas.types.common.is_integer",
"numpy.zeros",
"pandas.core.index.MultiIndex",
"pandas.util.decorators.Appender",
"pandas.types.common.is_scalar",
"pandas.core.index._get_combined_index",
"pandas.core.common._try_sort",
"pandas.core.common.PandasError",
"pandas.compat.u",
"pandas.core.index.MultiIndex.from_arrays",
"pandas.tools.util.cartesian_product",
"numpy.errstate",
"pandas.compat.itervalues",
"numpy.array",
"pandas.computation.expressions.evaluate",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.io.excel.ExcelWriter",
"numpy.isfinite",
"pandas.core.internals.create_block_manager_from_arrays",
"numpy.array_equal",
"pandas.formats.printing.pprint_thing",
"numpy.empty",
"pandas.core.common._apply_if_callable",
"pandas.types.common.is_string_like",
"pandas.compat.zip",
"numpy.prod",
"pandas.types.cast._possibly_cast_item",
"pandas.core.indexing.maybe_droplevels",
"numpy.vstack",
"pandas.core.index.Index"
],
[
"numpy.testing.assert_equal",
"numpy.log",
"numpy.linalg.solve",
"numpy.linalg.eigvals",
"numpy.sqrt",
"numpy.random.seed",
"numpy.arange",
"scipy.optimize.root",
"numpy.ones",
"numpy.errstate",
"numpy.testing.assert_",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"pandas.core.api.DataFrame",
"pandas.stats.common._get_window_type",
"pandas.compat.StringIO",
"pandas.util.decorators.cache_readonly",
"pandas.compat.range",
"pandas.stats.common.banner",
"pandas.core.api.Series",
"numpy.array",
"pandas.stats.plm.MovingPanelOLS"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.offsetbox.DrawingArea",
"numpy.arange",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.gcf",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.offsetbox.AnchoredOffsetbox",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
],
[
"numpy.linalg._umath_linalg.eig",
"numpy.core.zeros",
"numpy.core.asanyarray",
"numpy.core.multiarray.normalize_axis_index",
"numpy.core.geterrobj",
"numpy.core.add.reduce",
"numpy.core.all",
"numpy.core.product",
"numpy.core.count_nonzero",
"numpy.linalg._umath_linalg.inv",
"numpy.linalg._umath_linalg.slogdet",
"numpy.core.amax",
"numpy.core.sum",
"numpy.core.array",
"numpy.lib.triu",
"numpy.core.empty",
"numpy.core.dot",
"numpy.core.finfo",
"numpy.core.isfinite",
"numpy.linalg._umath_linalg.det",
"numpy.core.abs",
"numpy.linalg._umath_linalg.eigvals",
"numpy.core.divide",
"numpy.core.atleast_2d",
"numpy.core.swapaxes",
"numpy.core.moveaxis",
"numpy.core.asarray",
"numpy.core.sqrt",
"numpy.core.errstate"
],
[
"matplotlib.pyplot.yticks",
"matplotlib.colors.LogNorm",
"numpy.abs",
"numpy.random.seed",
"numpy.arange",
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.testing.decorators.image_comparison",
"numpy.random.randn",
"matplotlib.rcParams.update",
"numpy.random.rand",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.cbook.get_sample_data",
"matplotlib.pyplot.xticks",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"pandas.msgpack.packb"
],
[
"numpy.dot",
"numpy.resize",
"scipy.optimize._numdiff.group_columns",
"numpy.arctan",
"numpy.asarray",
"numpy.all",
"numpy.zeros_like",
"numpy.any",
"numpy.iscomplexobj",
"scipy.optimize.OptimizeResult",
"scipy.sparse.issparse",
"numpy.atleast_1d",
"numpy.log1p",
"scipy.optimize._minpack._lmdif",
"scipy.optimize._numdiff.approx_derivative",
"scipy.sparse.csr_matrix",
"numpy.atleast_2d",
"numpy.sum",
"scipy.optimize._minpack._lmder",
"numpy.isfinite",
"numpy.linalg.norm",
"numpy.empty"
],
[
"numpy.distutils.misc_util.Configuration"
],
[
"pandas.io.pytables.read_hdf",
"pandas.util.testing.skip_if_no_package",
"pandas.io.pytables._tables",
"pandas.io.pytables.HDFStore",
"pandas.Series",
"numpy.asarray",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.io.pytables.get_store",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.makePanel",
"numpy.random.randn",
"pandas.util.testing.makeDataFrame",
"numpy.random.randint",
"pandas.util.testing.makeTimeDataFrame",
"pandas.util.testing.makePanel4D",
"pandas.offsets.CustomBusinessDay",
"pandas.io.pytables.Term",
"numpy.arange",
"pandas.Panel",
"pandas.tslib.maybe_get_tz",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.Index",
"pandas.util.testing.assert_panel4d_equal",
"pandas.util.testing.makeDateIndex",
"pandas.util.testing.assert_panel_equal",
"pandas.Int64Index",
"pandas.Panel4D",
"pandas.util.testing._skip_if_no_pathlib",
"pandas.util.testing.set_timezone",
"pandas.set_option",
"pandas.util.testing.reset_testing_mode",
"numpy.zeros",
"pandas.util.testing.rands_array",
"pandas.concat",
"pandas.read_hdf",
"pandas.compat.u",
"pandas.compat.is_platform_windows",
"pandas.MultiIndex",
"pandas.bdate_range",
"pandas.Categorical",
"pandas.util.testing.assert_almost_equal",
"pandas.Timedelta",
"numpy.random.rand",
"pandas.date_range",
"numpy.random.binomial",
"numpy.array",
"pandas.util.testing._skip_if_no_dateutil",
"pandas.timedelta_range",
"pandas.util.testing._skip_if_no_localpath",
"pandas.util.testing.makeTimeSeries",
"numpy.random.seed",
"pandas.util.testing.assertRaisesRegexp",
"pandas.isnull",
"pandas.util.testing.makeMixedDataFrame",
"pandas.util.testing.get_data_path",
"pandas.util.testing.set_testing_mode",
"pandas.formats.printing.pprint_thing",
"numpy.datetime64",
"pandas.util.testing.makeStringSeries",
"pandas.util.testing.assertRaises",
"numpy.random.normal",
"pandas.util.testing.rands",
"numpy.float64",
"pandas.compat.lrange",
"pandas.Timestamp",
"pandas.util.testing.makePeriodIndex",
"pandas.compat.range"
],
[
"numpy.dot",
"scipy._lib._numpy_compat.suppress_warnings",
"scipy.interpolate._bsplines._augknt",
"scipy.interpolate._fitpack_impl.splrep",
"numpy.linspace",
"scipy.interpolate.sproot",
"numpy.asarray",
"numpy.nan_to_num",
"scipy.interpolate.PPoly.from_spline",
"scipy.interpolate.splder",
"numpy.searchsorted",
"numpy.where",
"scipy.interpolate._fitpack_impl.splantider",
"scipy.interpolate._fitpack._splint",
"numpy.testing.assert_equal",
"numpy.ones_like",
"scipy.interpolate.make_interp_spline",
"numpy.unique",
"numpy.arange",
"numpy.sin",
"numpy.atleast_1d",
"scipy.interpolate._fitpack_impl.splprep",
"scipy.interpolate._fitpack_impl.splder",
"scipy.linalg.solve",
"numpy.zeros",
"numpy.isnan",
"scipy.interpolate.splprep",
"scipy.interpolate._bsplines._not_a_knot",
"scipy.interpolate.insert",
"scipy.interpolate.splev",
"scipy.interpolate.make_lsq_spline",
"numpy.linalg.lstsq",
"scipy.interpolate.splantider",
"numpy.testing.assert_allclose",
"numpy.errstate",
"numpy.array",
"scipy.interpolate.splrep",
"numpy.random.random",
"numpy.random.seed",
"scipy.interpolate.BSpline.construct_fast",
"numpy.dstack",
"numpy.cos",
"numpy.piecewise",
"scipy.interpolate.splint",
"scipy.interpolate.BSpline.basis_element",
"scipy.interpolate.BSpline",
"scipy.interpolate._bspl.evaluate_all_bspl"
],
[
"numpy.dot",
"numpy.hstack",
"numpy.log2",
"numpy.asarray",
"numpy.identity",
"numpy.zeros_like",
"numpy.outer",
"numpy.vstack",
"numpy.asarray_chkfinite"
],
[
"matplotlib.backend_bases.NavigationToolbar2.__init__",
"matplotlib.backend_bases.ToolContainerBase.__init__",
"matplotlib.widgets.SubplotTool",
"matplotlib.backend_bases.FigureCanvasBase.scroll_event",
"matplotlib.backend_bases.NavigationToolbar2.update",
"matplotlib.backend_tools.add_tools_to_manager",
"matplotlib.backend_tools.RubberbandBase.__init__",
"matplotlib._pylab_helpers.Gcf.destroy",
"matplotlib.backend_bases.FigureCanvasBase.button_release_event",
"matplotlib.backend_bases.TimerBase.__init__",
"matplotlib.is_interactive",
"matplotlib.backend_managers.ToolManager",
"matplotlib.backend_tools.ConfigureSubplotsBase.__init__",
"matplotlib.cbook.deprecated",
"matplotlib.backend_bases.FigureCanvasBase.key_press_event",
"matplotlib._pylab_helpers.Gcf.get_num_fig_managers",
"matplotlib.backend_tools.add_tools_to_container",
"matplotlib.figure.Figure",
"matplotlib.backend_bases.FigureCanvasBase.motion_notify_event",
"matplotlib.cbook.warn_deprecated",
"matplotlib.backend_bases.FigureManagerBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.key_release_event",
"matplotlib.backend_bases.StatusbarBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.button_press_event",
"matplotlib.backend_bases.TimerBase._on_timer",
"matplotlib.backends.windowing.FocusManager"
],
[
"scipy.special._precompute.gammainc_asy.compute_g",
"numpy.intp",
"scipy.special._testutils.check_version",
"scipy.special._precompute.gammainc_asy.compute_d",
"scipy.special._mptestutils.Arg",
"scipy.special._mptestutils.IntArg",
"scipy.special._testutils.MissingModule",
"scipy.special._precompute.gammainc_asy.compute_alpha",
"scipy.special._precompute.gammainc_data.gammaincc",
"scipy.special._mptestutils.mp_assert_allclose"
],
[
"matplotlib.transforms.Bbox",
"numpy.asarray",
"numpy.concatenate",
"matplotlib.pyplot.axes",
"numpy.all",
"matplotlib.patches.Polygon",
"numpy.roll",
"matplotlib.patches.PathPatch",
"numpy.sin",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.pyplot.Circle",
"matplotlib.path.Path.unit_circle",
"matplotlib.collections.PathCollection",
"matplotlib.pyplot.figure",
"matplotlib.patches.Arc",
"matplotlib.path.Path.make_compound_path",
"matplotlib.patches.Rectangle",
"matplotlib.path.Path",
"matplotlib.patches.Circle",
"matplotlib.testing.jpl_units.register",
"matplotlib.patches.Wedge",
"matplotlib.patches.Ellipse",
"matplotlib.path.Path.unit_regular_star",
"matplotlib.pyplot.subplots",
"numpy.cos",
"matplotlib.patches.ConnectionPatch"
],
[
"numpy.abs",
"numpy.sqrt",
"numpy.asarray",
"matplotlib.font_manager.FontProperties",
"numpy.cos",
"numpy.empty",
"numpy.vstack"
],
[
"scipy._lib._numpy_compat.suppress_warnings",
"scipy.special.spence",
"scipy.special.loggamma",
"scipy.special.seterr",
"numpy.testing.assert_",
"scipy.special.geterr",
"scipy.special.gammaln",
"scipy.special.errprint",
"scipy.special.wrightomega",
"scipy.special.errstate"
],
[
"pandas.Series",
"pandas.util.testing.assert_produces_warning",
"pandas.DataFrame",
"pandas.util.testing._skip_if_windows",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"numpy.random.randint",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_series_equal",
"numpy.sin",
"pandas.util.testing.randbool",
"pandas.util.testing.assert_produces_warnings",
"pandas.types.common.is_list_like",
"pandas.types.common.is_scalar",
"numpy.isnan",
"pandas.util.testing.assert_almost_equal",
"numpy.log10",
"pandas.util.testing.skip_if_no_ne",
"numpy.random.rand",
"pandas.util.testing.assert_equal",
"pandas.date_range",
"numpy.errstate",
"pandas.core.common.difference",
"numpy.array",
"pandas.util.testing.makeCustomDataframe",
"pandas.eval",
"pandas.util.testing.assertRaisesRegexp",
"pandas.util.testing.assertRaises",
"numpy.float64"
],
[
"numpy.sqrt",
"scipy.signal.get_window",
"scipy.signal.freqz",
"numpy.exp",
"scipy.signal.cspline1d",
"scipy.signal.medfilt",
"numpy.sin",
"scipy.signal.tf2sos",
"numpy.real",
"numpy.zeros",
"numpy.log",
"scipy.signal.fftconvolve",
"numpy.random.choice",
"scipy.signal.windows.hann",
"scipy.signal.zpk2sos",
"numpy.log10",
"scipy.signal.signaltools._filtfilt_gust",
"numpy.array",
"scipy.signal.convolve",
"numpy.sum",
"numpy.nditer",
"numpy.testing.assert_array_equal",
"scipy.signal.choose_conv_method",
"numpy.angle",
"scipy.signal.cspline1d_eval",
"numpy.vstack",
"scipy._lib._numpy_compat.suppress_warnings",
"numpy.asarray",
"numpy.concatenate",
"scipy.signal.firwin",
"numpy.sctypes.items",
"numpy.allclose",
"scipy.signal.lfiltic",
"scipy.signal.resample",
"scipy.signal.lfilter",
"scipy.signal.deconvolve",
"scipy.signal.resample_poly",
"scipy.signal.convolve2d",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.corrcoef",
"numpy.random.RandomState",
"numpy.testing.assert_warns",
"scipy.signal.filtfilt",
"scipy.signal.order_filter",
"numpy.ones",
"scipy.signal.windows.tukey",
"numpy.empty",
"scipy.signal.sosfilt",
"numpy.linspace",
"scipy.signal.wiener",
"scipy.signal.correlate2d",
"scipy.optimize.fmin",
"scipy.ndimage.filters.correlate1d",
"scipy.signal.hilbert",
"numpy.random.randint",
"numpy.testing.assert_equal",
"numpy.swapaxes",
"scipy.signal.invresz",
"scipy.signal.invres",
"scipy.signal.ellip",
"numpy.interp",
"numpy.testing.assert_array_almost_equal",
"scipy.signal.decimate",
"numpy.testing.assert_allclose",
"numpy.correlate",
"numpy.convolve",
"scipy.signal.cheby1",
"numpy.cos",
"numpy.linalg.norm",
"scipy.signal.correlate",
"numpy.dtype",
"scipy.signal.sosfilt_zi",
"numpy.random.randn",
"scipy.signal.zpk2tf",
"numpy.ones_like",
"numpy.empty_like",
"numpy.arange",
"numpy.finfo",
"numpy.testing.assert_almost_equal",
"scipy.signal.butter",
"numpy.apply_along_axis",
"scipy.signal.vectorstrength",
"scipy.signal.lfilter_zi",
"scipy.signal.tf2zpk",
"numpy.random.random",
"numpy.abs",
"numpy.random.seed",
"scipy.signal.sosfiltfilt",
"numpy.prod"
],
[
"pandas.computation.expr._parsers.keys",
"pandas.formats.printing.pprint_thing",
"pandas.computation.scope._ensure_scope",
"pandas.computation.expr.tokenize_string",
"pandas.computation.expr.Expr",
"pandas.computation.engines._engines.keys"
],
[
"pandas.io.sas.sas_xport.XportReader",
"pandas.io.sas.sas7bdat.SAS7BDATReader"
],
[
"numpy.core.arange",
"numpy.core.take",
"numpy.core.empty",
"numpy.core.asarray"
],
[
"pandas.util.testing.assertIsInstance",
"pandas.Series",
"numpy.asarray",
"pandas.util.testing._skip_if_no_pytz",
"pandas.util.testing.assert_produces_warning",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"pandas.offsets.Hour",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.Index",
"pandas.util.testing.equalContents",
"pandas.util.testing.rands_array",
"pandas.tseries.index.Timestamp",
"pandas.bdate_range",
"numpy.isnan",
"pandas.util.testing.assert_almost_equal",
"pandas.offsets.Milli",
"pandas.core.nanops.nangt",
"numpy.timedelta64",
"pandas.Timedelta",
"pandas.date_range",
"numpy.errstate",
"pandas.offsets.Second",
"pandas.tseries.tdi.Timedelta",
"numpy.array",
"pandas.timedelta_range",
"numpy.abs",
"pandas.isnull",
"numpy.array_equal",
"pandas.util.testing.makeFloatSeries",
"pandas.util.testing.assertRaisesRegexp",
"pandas.util.testing.assertRaises",
"pandas.offsets.Minute",
"pandas.compat.zip",
"numpy.float64",
"pandas.Period",
"pandas.to_timedelta",
"pandas.Timestamp",
"pandas.compat.range"
],
[
"pandas.util.testing.assertIsInstance",
"numpy.sqrt",
"pandas.Series",
"pandas.util.testing.assert_produces_warning",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"pandas.compat.iteritems",
"numpy.arange",
"pandas.Index",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing.equalContents",
"numpy.isnan",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.getMixedTypeDict",
"numpy.errstate",
"pandas.util.testing.assertRaisesRegexp",
"pandas.Timestamp",
"pandas.compat.lrange",
"pandas.compat.range"
],
[
"numpy.asarray"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"numpy.einsum",
"numpy.polynomial.hermite.hermmulx",
"numpy.polynomial.hermite.poly2herm",
"numpy.polynomial.hermite.hermval2d",
"numpy.zeros_like",
"numpy.exp",
"numpy.polynomial.hermite.hermcompanion",
"numpy.polynomial.hermite.herm2poly",
"numpy.testing.assert_equal",
"numpy.polynomial.hermite.hermadd",
"numpy.arange",
"numpy.eye",
"numpy.polynomial.hermite.hermvander3d",
"numpy.testing.assert_almost_equal",
"numpy.polynomial.hermite.hermfromroots",
"numpy.polynomial.hermite.hermgauss",
"numpy.zeros",
"numpy.polynomial.hermite.hermsub",
"numpy.polynomial.hermite.hermmul",
"numpy.polynomial.hermite.hermtrim",
"numpy.polynomial.hermite.hermgrid3d",
"numpy.polynomial.hermite.hermint",
"numpy.polynomial.hermite.hermweight",
"numpy.polynomial.hermite.hermdiv",
"numpy.polynomial.hermite.hermval3d",
"numpy.polynomial.polynomial.polyval",
"numpy.polynomial.hermite.hermroots",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"numpy.array",
"numpy.polynomial.hermite.hermder",
"numpy.polynomial.hermite.hermval",
"numpy.testing.run_module_suite",
"numpy.polynomial.hermite.hermgrid2d",
"numpy.random.random",
"numpy.polynomial.hermite.hermvander2d",
"numpy.polynomial.hermite.hermfit",
"numpy.polynomial.hermite.hermline",
"numpy.ones",
"numpy.polynomial.hermite.hermvander"
],
[
"pandas.isnull",
"numpy.arange",
"pandas.util.testing.assert_almost_equal",
"numpy.random.randn",
"numpy.diff",
"pandas.lib.SeriesGrouper",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"pandas.lib.SeriesBinGrouper"
],
[
"numpy.ma.testutils.assert_",
"numpy.ma.testutils.assert_equal_records",
"numpy.ma.getdata",
"numpy.dtype",
"numpy.core.records.fromarrays",
"numpy.ma.array",
"numpy.core.records.fromrecords",
"numpy.ma.getmaskarray",
"numpy.arange",
"numpy.ma.mrecords.fromtextfile",
"numpy.ma.make_mask_none",
"numpy.testing.temppath",
"numpy.ma.mrecords.mrecarray",
"numpy.random.rand",
"numpy.array",
"numpy.ma.mrecords.fromarrays",
"numpy.testing.run_module_suite",
"numpy.ma.make_mask",
"numpy.ma.masked_array",
"numpy.ma.mrecords.fromrecords",
"numpy.ma.testutils.assert_equal"
],
[
"numpy.testing.dec.skipif",
"numpy.testing.assert_equal",
"numpy.testing.run_module_suite",
"numpy.allclose",
"numpy.testing.dec.knownfailureif",
"numpy.asarray",
"numpy.arange",
"numpy.memmap",
"numpy.testing.suppress_warnings",
"numpy.testing.assert_array_equal",
"numpy.compat.Path",
"numpy.testing.assert_",
"numpy.isscalar",
"numpy.add",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.19"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.13",
"1.16",
"1.9",
"1.7",
"1.15",
"1.14",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.19"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.19"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.13",
"1.16",
"1.9",
"1.7",
"1.15",
"1.14",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.10",
"1.12",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eric-czech/FaST-LMM | [
"497ac732f0cb25e328282cff42045afb70a99076"
] | [
"fastlmm/inference/fastlmm_predictor.py"
] | [
"from __future__ import print_function #Python 2 & 3 compatibility\nfrom __future__ import absolute_import\nimport numpy as np\nimport logging\nimport unittest\nimport os\nimport scipy.linalg as LA\nimport time\n\nfrom pysnptools.snpreader import Bed,Pheno\nfrom pysnptools.snpreader import SnpData,SnpReader\nfrom pysnptools.kernelreader import KernelNpz\nfrom pysnptools.kernelreader import SnpKernel\nfrom pysnptools.kernelreader import KernelReader\nfrom pysnptools.kernelreader import Identity as KernelIdentity\nimport pysnptools.util as pstutil\nfrom pysnptools.standardizer import DiagKtoN,UnitTrained\nfrom pysnptools.standardizer import Unit\nfrom pysnptools.util import intersect_apply\nfrom pysnptools.standardizer import Standardizer\nfrom fastlmm.inference.lmm import LMM\nfrom pysnptools.standardizer import Identity as StandardizerIdentity\nfrom scipy.stats import multivariate_normal\nfrom fastlmm.util.pickle_io import load, save\nfrom pysnptools.pstreader import PstReader\nfrom six.moves import range\n\nclass _SnpWholeTest(KernelReader):\n '''\n Warning: Assumes that if train and test contains the same iid, they have the same value.\n '''\n def __init__(self,train,test,standardizer,block_size,iid0=None):\n self.train = train\n self.test = test\n self.standardizer = standardizer\n assert standardizer.is_constant, \"Expect standardizer to be constant\"\n self.block_size = block_size\n if iid0 is not None:\n _row = iid0\n\n @property\n def row(self):\n if not hasattr(self,'_row'):\n assert np.array_equal(self.train.sid,self.test.sid), \"Expect train and test to have same sid in same order\"\n train_set = set(tuple(item) for item in self.train.iid)\n test_unique = [item2 for item2 in (tuple(item) for item in self.test.iid) if item2 not in train_set]\n self._row = np.r_[self.train.iid,np.array(test_unique,dtype='str').reshape(-1,2)]\n return self._row\n\n\n @property\n def col(self):\n return self.test.iid\n\n def __getitem__(self, iid_indexer_and_snp_indexer):\n if isinstance(iid_indexer_and_snp_indexer,tuple):\n iid0_indexer, iid1_indexer = iid_indexer_and_snp_indexer\n else:\n iid0_indexer = iid_indexer_and_snp_indexer\n iid1_indexer = iid0_indexer\n\n row_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.row_count, iid0_indexer)\n col_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.col_count, iid1_indexer)\n\n if row_index_or_none is None:\n row_index_or_none = list(range(self.row_count))\n\n assert not isinstance(row_index_or_none,str), \"row_index_or_none should not be a string\"\n iid = self.row[row_index_or_none]\n\n if col_index_or_none is None or np.array_equal(col_index_or_none,list(range(self.col_count))):\n test = self.test\n else:\n test = self.test[col_index_or_none]\n \n try: #case 1: asking for train x test\n train = self.train[self.train.iid_to_index(iid),:]\n is_ok = True\n except:\n is_ok = False\n if is_ok:\n return _SnpTrainTest(train=train,test=test,standardizer=self.standardizer,block_size=self.block_size)\n\n #case 2: asking for train x test\n if np.array_equal(test.iid,iid):\n return SnpKernel(test,standardizer=self.standardizer,block_size=self.block_size)\n\n #case 3: Just re-reordering the iids\n if len(row_index_or_none) == self.row_count and (col_index_or_none is None or len(col_index_or_none) == self.col_count):\n result = _SnpWholeTest(train=self.train,test=test,standardizer=self.standardizer,block_size=self.block_size,iid0=iid)\n return result\n\n \n raise Exception(\"When reading from a _SnpWholeTest, can only ask to reorder iids or to access from train x test or test x test\")\n\n\n #!!! does it make sense to read from disk in to parts?\n def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):\n result = self[row_index_or_none,col_index_or_none]._read(row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok)\n return result\n\n def __repr__(self):\n s = \"_SnpWholeTest(train={0},test={1},standardizer={2}\".format(self.train,self.test,self.standardizer)\n if self.block_size is not None:\n s += \",block_size={0}\".format(self.block_size)\n s += \")\"\n return s\n\n def copyinputs(self, copier):\n #Doesn't need run_once\n copier.input(self.train)\n copier.input(self.test)\n copier.input(self.standardizer)\n\nclass _SnpTrainTest(KernelReader):\n def __init__(self,train,test,standardizer,block_size):\n self.train = train\n self.test = test\n self.standardizer = standardizer\n assert standardizer.is_constant, \"Expect standardizer to be constant\"\n self.block_size = block_size\n if np.array_equal(train.iid,test.iid):\n self._col = train.iid\n else:\n self._col = test.iid\n\n @property\n def row(self):\n return self.train.iid\n\n @property\n def col(self):\n return self._col\n\n def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):\n assert self.train.sid_count == self.test.sid_count, \"real assert\"\n #case 1: asking for all of train x test\n if (row_index_or_none is None or np.array_equal(row_index_or_none,np.arange(self.row_count))\n and col_index_or_none is None or np.array_equal(col_index_or_none,np.arange(self.col_count))):\n\n #Do all-at-once (not in blocks) if 1. No block size is given or 2. The #ofSNPs < Min(block_size,iid_count) #similar code elsewhere\n if self.block_size is None or (self.train.sid_count <= self.block_size or self.train.sid_count <= self.train.iid_count+self.test.iid_count):\n train_snps = self.train.read(dtype=dtype).standardize(self.standardizer)\n test_snps = self.test.read(dtype=dtype).standardize(self.standardizer)\n if order == 'F': #numpy's 'dot' always returns 'C' order\n k_val = test_snps.val.dot(train_snps.val.T).T\n else:\n k_val = train_snps.val.dot(test_snps.val.T)\n return k_val\n else: #Do in blocks\n #Set the default order to 'C' because with kernels any order is fine and the Python .dot method likes 'C' best.\n if order=='A':\n order = 'C'\n k_val = np.zeros([self.train.iid_count,self.test.iid_count],dtype=dtype,order=order)\n ct = 0\n ts = time.time()\n\n for start in range(0, self.train.sid_count, self.block_size):\n ct += self.block_size\n train_snps = self.train[:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)\n test_snps = self.test [:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)\n if order == 'F': #numpy's 'dot' always returns 'C' order\n k_val += test_snps.val.dot(train_snps.val.T).T\n else:\n k_val += train_snps.val.dot(test_snps.val.T)\n if ct % self.block_size==0:\n diff = time.time()-ts\n if diff > 1: logging.info(\"read %s SNPs in %.2f seconds\" % (ct, diff))\n return k_val\n else:\n raise Exception(\"_SnpTrainTest currently only has code for reading all of train x test\")\n\n\n def __repr__(self):\n s = \"_SnpTrainTest(train={0},test={1},standardizer={2}\".format(self.train,self.test,self.standardizer)\n if self.block_size is not None:\n s += \",block_size={0}\".format(self.block_size)\n s += \")\"\n return s\n\n def copyinputs(self, copier):\n #Doesn't need run_once\n copier.input(self.train)\n copier.input(self.test)\n copier.input(self.standardizer)\n\ndef _snps_fixup(snp_input, iid_if_none=None,count_A1=None):\n from pysnptools.snpreader import _snps_fixup as pst_snps_fixup\n return pst_snps_fixup(snp_input,iid_if_none,count_A1)\n\ndef _pheno_fixup(pheno_input, iid_if_none=None, missing ='NaN',count_A1=None):\n\n try:\n ret = Pheno(pheno_input, iid_if_none, missing=missing)\n ret.iid #doing this just to force file load\n return ret\n except:\n return _snps_fixup(pheno_input, iid_if_none=iid_if_none,count_A1=count_A1)\n\ndef _kernel_fixup(input, iid_if_none, standardizer, test=None, test_iid_if_none=None, block_size=None, train_snps=None, count_A1=None):\n if test is not None and input is None:\n input = test\n test = None\n\n if isinstance(input, str) and input.endswith(\".npz\"):\n return KernelNpz(input)\n\n if isinstance(input, str):\n input = Bed(input, count_A1=count_A1) #Note that we don't return here. Processing continues\n if isinstance(test, str):\n test = Bed(test, count_A1=count_A1) #Note that we don't return here. Processing continues\n\n if isinstance(input,SnpReader):\n if test is not None:\n return _SnpWholeTest(train=train_snps,test=test,standardizer=standardizer,block_size=block_size)\n else:\n return SnpKernel(input,standardizer=standardizer, block_size=block_size)\n \n \n if input is None:\n return KernelIdentity(iid=iid_if_none,test=test_iid_if_none)\n\n return input\n\n\nclass FastLMM(object):\n '''\n A predictor, somewhat in the style of scikit-learn, for learning and predicting with linear mixed models.\n\n **Constructor:**\n :Parameters: * **GB_goal** (int) -- gigabytes of memory the run should use, optional. If not given, will read the test_snps in blocks the same size as the kernel, which is memory efficient with little overhead on computation time.\n * **force_full_rank** (bool) -- Even if kernels are defined with fewer SNPs than IIDs, create an explicit iid_count x iid_count kernel. Cannot be True if force_low_rank is True.\n * **force_low_rank** (bool) -- Even if kernels are defined with fewer IIDs than SNPs, create a low-rank iid_count x sid_count kernel. Cannot be True if force_full_rank is True.\n * **snp_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to SNP data. Choices include :class:`Standardizer.Unit` (Default. Makes values for each SNP have mean zero and standard deviation 1.0, then fills missing with zero) and :class:`Standardizer.Identity` (Do nothing)\n * **covariate_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to X, the covariate data. Some choices include :class:`Standardizer.Unit` (Default. Fills missing with zero) and :class:`Standardizer.Identity` (do nothing)\n * **kernel_standardizer** (:class:`KernelStandardizer`) -- The PySnpTools kernel standardizer to be apply to the kernels. Some choices include :class:`KernelStandardizer.DiagKToN` (Default. Make the diagonal sum to iid_count) and :class:`KernelStandardizer.Identity` (Do nothing)\n\n :Example:\n\n >>> from __future__ import print_function #Python 2 & 3 compatibility\n >>> import numpy as np\n >>> import logging\n >>> from pysnptools.snpreader import Bed, Pheno\n >>> from fastlmm.inference import FastLMM\n >>> logging.basicConfig(level=logging.INFO)\n >>> snpreader = Bed('../feature_selection/examples/toydata.bed',count_A1=False)\n >>> cov_fn = \"../feature_selection/examples/toydata.cov\"\n >>> pheno_fn = \"../feature_selection/examples/toydata.phe\"\n >>> train_idx = np.r_[10:snpreader.iid_count] # iids 10 and on\n >>> test_idx = np.r_[0:10] # the first 10 iids\n >>> fastlmm = FastLMM(GB_goal=2)\n >>> #We give it phenotype and covariate information for extra examples, but it reorders and intersects the examples, so only training examples are used. \n >>> _ = fastlmm.fit(K0_train=snpreader[train_idx,:],X=cov_fn,y=pheno_fn) \n >>> mean, covariance = fastlmm.predict(K0_whole_test=snpreader[test_idx,:],X=cov_fn,count_A1=False)\n >>> print(list(mean.iid[0]), round(mean.val[0,0],7), round(covariance.val[0,0],7))\n ['per0', 'per0'] 0.1791958 0.8995209\n >>> nll = fastlmm.score(K0_whole_test=snpreader[test_idx,:],X=cov_fn,y=pheno_fn,count_A1=False)\n >>> print(round(nll,7))\n 13.4623234\n\n\n '''\n\n def __init__(self, GB_goal=None, force_full_rank=False, force_low_rank=False, snp_standardizer=Unit(), covariate_standardizer=Unit(), kernel_standardizer=DiagKtoN()):\n self.GB_goal = GB_goal\n self.force_full_rank = force_full_rank\n self.force_low_rank = force_low_rank\n self.snp_standardizer = snp_standardizer\n self.covariate_standardizer = covariate_standardizer\n self.kernel_standardizer = kernel_standardizer\n self.is_fitted = False\n\n #!!!update doc to explain h2raw w.r.t h2\n def fit(self, X=None, y=None, K0_train=None, K1_train=None, h2raw=None, mixing=None,count_A1=None):#!!!is this h2 or h2corr????\n \"\"\"\n Method for training a :class:`FastLMM` predictor. If the examples in X, y, K0_train, K1_train are not the same, they will be reordered and intersected.\n\n :param X: training covariate information, optional: \n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__\n (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param y: training phenotype:\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ \n (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param K0_train: A similarity matrix or SNPs from which to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__.\n If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__.\n If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K0_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or\n `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param K1_train: A second similarity matrix or SNPs from which to construct such a second similarity matrix. (Also, see 'mixing').\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__.\n If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K1_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or\n `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param h2raw: A parameter to LMM learning that tells how much weight to give the K's vs. the identity matrix, optional \n If not given will search for best value.\n If mixing is unspecified, then h2 must also be unspecified.\n :type h2raw: number\n\n :param mixing: Weight between 0.0 (inclusive, default) and 1.0 (inclusive) given to K1_train relative to K0_train.\n If you give no mixing number and a K1_train is given, the best weight will be learned.\n :type mixing: number\n\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\n :type count_A1: bool\n\n :rtype: self, the fitted FastLMM predictor\n \"\"\"\n self.is_fitted = True\n # should this have a cache file like 'single_snp'?\n #!!!later what happens if missing values in pheno_train?\n #!!!later add code so that X, y, etc can be array-like objects without iid information. In that case, make up iid info\n\n assert y is not None, \"y must be given\"\n\n y = _pheno_fixup(y,count_A1=count_A1)\n assert y.sid_count == 1, \"Expect y to be just one variable\"\n X = _pheno_fixup(X, iid_if_none=y.iid,count_A1=count_A1)\n\n K0_train = _kernel_fixup(K0_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)\n K1_train = _kernel_fixup(K1_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)\n\n K0_train, K1_train, X, y = intersect_apply([K0_train, K1_train, X, y],intersect_before_standardize=True) #!!! test this on both K's as None\n from fastlmm.association.single_snp import _set_block_size\n K0_train, K1_train, block_size = _set_block_size(K0_train, K1_train, mixing, self.GB_goal, self.force_full_rank, self.force_low_rank)\n\n X = X.read()\n # If possible, unit standardize train and test together. If that is not possible, unit standardize only train and later apply\n # the same linear transformation to test. Unit standardization is necessary for FastLMM to work correctly.\n #!!!later is the calculation of the training data's stats done twice???\n X, covar_unit_trained = X.standardize(self.covariate_standardizer,block_size=block_size,return_trained=True) #This also fills missing with the mean\n\n # add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset\n X = SnpData(iid=X.iid,\n sid=self._new_snp_name(X),\n val=np.c_[X.val,np.ones((X.iid_count,1))],\n name =\"covariate_train w/ 1's\")\n\n y0 = y.read().val #!!!later would view_ok=True,order='A' be ok because this code already did a fresh read to look for any missing values \n\n from fastlmm.association.single_snp import _Mixer #!!!move _combine_the_best_way to another file (e.g. this one)\n K_train, h2raw, mixer = _Mixer.combine_the_best_way(K0_train,K1_train,X.val,y0,mixing,h2raw,force_full_rank=self.force_full_rank,force_low_rank=self.force_low_rank,kernel_standardizer=self.kernel_standardizer,block_size=block_size)\n\n # do final prediction using lmm.py\n lmm = LMM()\n\n #Special case: The K kernel is defined implicitly with SNP data\n if mixer.do_g:\n assert isinstance(K_train.standardizer,StandardizerIdentity), \"Expect Identity standardizer\"\n G_train = K_train.snpreader\n lmm.setG(G0=K_train.snpreader.val)\n else:\n lmm.setK(K0=K_train.val)\n\n lmm.setX(X.val)\n lmm.sety(y0[:,0])\n\n # Find the best h2 and also on covariates (not given from new model)\n if h2raw is None:\n res = lmm.findH2() #!!!why is REML true in the return???\n else:\n res = lmm.nLLeval(h2=h2raw)\n\n\n #We compute sigma2 instead of using res['sigma2'] because res['sigma2'] is only the pure noise.\n full_sigma2 = float(sum((np.dot(X.val,res['beta']).reshape(-1,1)-y0)**2))/y.iid_count #!!! this is non REML. Is that right?\n\n ###### all references to 'fastlmm_model' should be here so that we don't forget any\n self.block_size = block_size\n self.beta = res['beta']\n self.h2raw = res['h2']\n self.sigma2 = full_sigma2\n self.U = lmm.U\n self.S = lmm.S\n self.K = lmm.K\n self.G = lmm.G\n self.y = lmm.y\n self.Uy = lmm.Uy\n self.X = lmm.X\n self.UX = lmm.UX\n self.mixer = mixer\n self.covar_unit_trained = covar_unit_trained\n self.K_train_iid = K_train.iid\n self.covar_sid = X.sid\n self.pheno_sid = y.sid\n self.G0_train = K0_train.snpreader if isinstance(K0_train,SnpKernel) else None #!!!later expensive?\n self.G1_train = K1_train.snpreader if isinstance(K1_train,SnpKernel) else None #!!!later expensive?\n return self\n\n @staticmethod\n def _new_snp_name(snpreader):\n new_snp = \"always1\"\n while True:\n if not new_snp in snpreader.sid:\n return np.r_[snpreader.sid,[new_snp]]\n new_snp += \"_\"\n \n\n def score(self, X=None, y=None, K0_whole_test=None, K1_whole_test=None, iid_if_none=None, return_mse_too=False, return_per_iid=False, count_A1=None):\n \"\"\"\n Method for calculating the negative log likelihood of testing examples.\n If the examples in X,y, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.\n\n :param X: testing covariate information, optional: \n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param y: testing phenotype:\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param K0_whole_test: A similarity matrix from all the examples to the test examples. Alternatively,\n the test SNPs needed to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K0_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param K1_whole_test: A second similarity matrix from all the examples to the test examples. Alternatively,\n the test SNPs needed to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.\n :type iid_if_none: an ndarray of two strings\n\n :param return_mse_too: If true, will also return the mean squared error.\n :type return_mse_too: bool\n\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\n :type count_A1: bool\n\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\n :type count_A1: bool\n\n :rtype: a float of the negative log likelihood and, optionally, a float of the mean squared error.\n \"\"\"\n mean0, covar0 = self.predict(K0_whole_test=K0_whole_test,K1_whole_test=K1_whole_test,X=X,iid_if_none=iid_if_none,count_A1=count_A1)\n y = _pheno_fixup(y, iid_if_none=covar0.iid,count_A1=count_A1)\n mean, covar, y = intersect_apply([mean0, covar0, y])\n mean = mean.read(order='A',view_ok=True).val\n covar = covar.read(order='A',view_ok=True).val\n y_actual = y.read().val\n if not return_per_iid:\n var = multivariate_normal(mean=mean.reshape(-1), cov=covar)\n nll = -np.log(var.pdf(y_actual.reshape(-1)))\n if not return_mse_too:\n return nll\n else:\n mse = ((y_actual-mean)**2).sum()\n return nll, mse\n else:\n if not return_mse_too:\n result = SnpData(iid=y.iid,sid=['nLL'],val=np.empty((y.iid_count,1)),name=\"nLL\")\n for iid_index in range(y.iid_count):\n var = multivariate_normal(mean=mean[iid_index], cov=covar[iid_index,iid_index])\n nll = -np.log(var.pdf(y_actual[iid_index]))\n result.val[iid_index,0] = nll\n return result\n else:\n raise Exception(\"need code for mse_too\") \n\n\n def _extract_fixup(kernel):\n assert kernel.iid0_count >= kernel.iid1_count, \"Expect iid0 to be at least as long as iid1\"\n\n\n def predict(self,X=None,K0_whole_test=None,K1_whole_test=None,iid_if_none=None, count_A1=None):\n \"\"\"\n Method for predicting from a fitted :class:`FastLMM` predictor.\n If the examples in X, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.\n\n :param X: testing covariate information, optional: \n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param K0_whole_test: A similarity matrix from all the examples to the test examples. Alternatively,\n the test SNPs needed to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K0_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param K1_whole_test: A second similarity matrix from all the examples to the test examples. Alternatively,\n the test SNPs needed to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.\n :type iid_if_none: an ndarray of two strings\n\n :rtype: A `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__ of the means and a :class:`KernelData` of the covariance\n \"\"\"\n\n assert self.is_fitted, \"Can only predict after predictor has been fitted\"\n #assert K0_whole_test is not None, \"K0_whole_test must be given\"\n #!!!later is it too wasteful to keep both G0_train, G1_train, and lmm.G when storing to disk?\n #!!!later all _kernel_fixup's should use block_size input\n\n K0_whole_test_b = _kernel_fixup(K0_whole_test, train_snps=self.G0_train, iid_if_none=iid_if_none, standardizer=self.mixer.snp_trained0, test=K0_whole_test, test_iid_if_none=None, block_size=self.block_size,count_A1=count_A1)\n K1_whole_test = _kernel_fixup(K1_whole_test, train_snps=self.G1_train, iid_if_none=K0_whole_test_b.iid0, standardizer=self.mixer.snp_trained1, test=K1_whole_test, test_iid_if_none=K0_whole_test_b.iid1, block_size=self.block_size,count_A1=count_A1)\n X = _pheno_fixup(X,iid_if_none=K0_whole_test_b.iid1,count_A1=count_A1)\n K0_whole_test_c, K1_whole_test, X = intersect_apply([K0_whole_test_b, K1_whole_test, X],intersect_before_standardize=True,is_test=True)\n X = X.read().standardize(self.covar_unit_trained)\n # add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset\n X = SnpData(iid=X.iid,\n sid=self._new_snp_name(X),\n val=np.c_[X.read().val,np.ones((X.iid_count,1))])\n assert np.array_equal(X.sid,self.covar_sid), \"Expect covar sids to be the same in train and test.\"\n\n train_idx0 = K0_whole_test_c.iid0_to_index(self.K_train_iid)\n K0_train_test = K0_whole_test_c[train_idx0,:]\n train_idx1 = K1_whole_test.iid0_to_index(self.K_train_iid)\n K1_train_test = K1_whole_test[train_idx1,:]\n test_idx0 = K0_whole_test_c.iid0_to_index(K0_whole_test_c.iid1)\n K0_test_test = K0_whole_test_c[test_idx0,:]\n if K0_test_test.iid0 is not K0_test_test.iid1:\n raise Exception(\"real assert\")\n test_idx1 = K1_whole_test.iid0_to_index(K0_whole_test_c.iid1)\n K1_test_test = K1_whole_test[test_idx1,:]\n\n if self.mixer.do_g:\n ###################################################\n # low rank from Rasmussen eq 2.9 + noise term added to covar\n ###################################################\n Gstar = self.mixer.g_mix(K0_train_test,K1_train_test)\n varg = self.h2raw * self.sigma2\n vare = (1.-self.h2raw) * self.sigma2\n Ainv = LA.inv((1./vare) * np.dot(self.G.T,self.G) + (1./varg)*np.eye(self.G.shape[1]))\n testAinv = np.dot(Gstar.test.val, Ainv)\n pheno_predicted = np.dot(X.val,self.beta) + (1./vare) * np.dot(np.dot(testAinv,self.G.T),self.y-np.dot(self.X,self.beta))\n pheno_predicted = pheno_predicted.reshape(-1,1)\n covar = np.dot(testAinv,Gstar.test.val.T) + vare * np.eye(Gstar.test.val.shape[0])\n\n else:\n lmm = LMM()\n lmm.U = self.U\n lmm.S = self.S\n lmm.G = self.G\n lmm.y = self.y\n lmm.Uy = self.Uy\n lmm.X = self.X\n lmm.UX = self.UX\n\n Kstar = self.mixer.k_mix(K0_train_test,K1_train_test) #!!!later do we need/want reads here? how about view_OK?\n lmm.setTestData(Xstar=X.val, K0star=Kstar.val.T)\n\n Kstar_star = self.mixer.k_mix(K0_test_test,K1_test_test) #!!!later do we need/want reads here?how about view_OK?\n pheno_predicted, covar = lmm.predict_mean_and_variance(beta=self.beta, h2=self.h2raw,sigma2=self.sigma2, Kstar_star=Kstar_star.val)\n\n #pheno_predicted = lmm.predictMean(beta=self.beta, h2=self.h2,scale=self.sigma2).reshape(-1,1)\n ret0 = SnpData(iid = X.iid, sid=self.pheno_sid,val=pheno_predicted,pos=np.array([[np.nan,np.nan,np.nan]]),name=\"lmm Prediction\")\n\n from pysnptools.kernelreader import KernelData\n ret1 = KernelData(iid=K0_test_test.iid,val=covar)\n return ret0, ret1\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n\n import doctest\n doctest.testmod()\n"
] | [
[
"numpy.dot",
"numpy.array_equal",
"numpy.arange",
"numpy.eye",
"numpy.ones",
"scipy.stats.multivariate_normal",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jnettels/reegis | [
"fe50c124aa041b9faa494611cba6b833675115e4",
"fe50c124aa041b9faa494611cba6b833675115e4"
] | [
"reegis/mobility.py",
"reegis/entsoe.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"Calculate the mobility demand.\n\nSPDX-FileCopyrightText: 2016-2019 Uwe Krien <[email protected]>\n\nSPDX-License-Identifier: MIT\n\"\"\"\n__copyright__ = \"Uwe Krien <[email protected]>\"\n__license__ = \"MIT\"\n\n\nimport os\nimport pandas as pd\nfrom collections import namedtuple\n\nfrom reegis import geometries, config as cfg, tools, energy_balance\n\n\ndef format_kba_table(filename, sheet):\n \"\"\"\n Clean the layout of the table.\n\n The tables are made for human readability and not for automatic processing.\n Lines with subtotals and format-strings of the column names are removed.\n A valid MultiIndex is created to make it easier to filter the table by the\n index.\n\n Parameters\n ----------\n filename : str\n Path and name of the excel file.\n sheet : str\n Name of the sheet of the excel table.\n\n Returns\n -------\n pandas.DataFrame\n\n \"\"\"\n\n # Read table\n df = pd.read_excel(filename, sheet, skiprows=7, header=[0, 1])\n\n # Drop empty column\n df = df.drop([(\"Unnamed: 0_level_0\", \"Unnamed: 0_level_1\")], axis=1)\n\n idx1 = df.columns[0]\n idx2 = df.columns[1]\n idx3 = df.columns[2]\n\n # Remove lines with subtotal\n df.loc[(df[idx1] == \"SONSTIGE\"), idx2] = \"SONSTIGE\"\n df.loc[(df[idx1] == \"SONSTIGE\"), idx3] = \"00000 SONSTIGE\"\n df = df.drop(df.loc[df[idx3].isnull()].index)\n df[df.columns[[0, 1, 2]]] = df[df.columns[[0, 1, 2]]].fillna(\n method=\"ffill\"\n )\n\n # Add column with name of subregion and remove name from index\n df[df.columns[2]] = df[df.columns[2]].str[:5]\n\n # set MultiIndex\n df.set_index(list(df.columns[[0, 1, 2]]), inplace=True)\n df.index = df.index.set_names([\"state\", \"region\", \"subregion\"])\n\n # Remove format-strings from column names\n level1 = (\n df.columns.get_level_values(1)\n .str.replace(\"\\n\", \" \")\n .str.replace(\"- \", \"\")\n .str.replace(\":\", \"\")\n )\n level0 = (\n df.columns.get_level_values(0)\n .str.replace(\"\\n\", \" \")\n .str.replace(\"- \", \"\")\n .str.replace(\":\", \"\")\n )\n df.columns = pd.MultiIndex.from_arrays([level0, level1])\n\n return df\n\n\ndef get_kba_table():\n \"\"\"\n Get the \"kfz\" table for all vehicles and the \"pkw\" table for more\n statistics about passenger cars.\n\n Returns\n -------\n namedtuple\n\n Examples\n --------\n >>> table = get_kba_table()\n >>> kfz = table.kfz\n >>> print(type(kfz))\n <class 'pandas.core.frame.DataFrame'>\n \"\"\"\n kba_table = namedtuple(\"kba_table\", \"kfz pkw\")\n kba_filename = os.path.join(\n cfg.get(\"paths\", \"general\"), cfg.get(\"mobility\", \"table_kba\")\n )\n\n # Download table if it does not exit\n if not os.path.isfile(kba_filename):\n tools.download_file(kba_filename, cfg.get(\"mobility\", \"url_kba\"))\n\n return kba_table(\n kfz=format_kba_table(kba_filename, \"Kfz_u_Kfz_Anh\"),\n pkw=format_kba_table(kba_filename, \"Pkw\"),\n )\n\n\ndef get_mileage_table():\n \"\"\"\n Download mileage table from the KBA (Kraftfahrtbundesamt) and store it\n locally.\n \"\"\"\n url = (\n \"https://www.kba.de/SharedDocs/Publikationen/DE/Statistik/\"\n \"Kraftverkehr/VK/2018/vk_2018_xlsx.xlsx?__blob=publicationFile&v=22\"\n )\n\n mileage_filename = os.path.join(\n cfg.get(\"paths\", \"general\"), \"mileage_table_kba.xlsx\"\n )\n\n # Download table if it does not exit\n if not os.path.isfile(mileage_filename):\n tools.download_file(mileage_filename, url)\n return mileage_filename\n\n\ndef get_sheet_from_mileage_table(sheet):\n \"\"\"Load given sheet from the mileage file.\"\"\"\n fn = get_mileage_table()\n df = pd.read_excel(\n fn, sheet, skiprows=7, index_col=[0, 1, 2], skipfooter=9\n )\n df.index = df.index.droplevel(0).set_names([\"\", \"\"])\n\n return df.drop(\n df.loc[pd.IndexSlice[slice(None), \"Insgesamt\"], slice(None)].index\n )\n\n\ndef get_mileage_by_type_and_fuel(year=2018):\n \"\"\"\n Get mileage by type and fuel from mileage table and other sources.\n\n See mobility.ini file for more information.\n \"\"\"\n # get km per year and type\n total = (\n get_sheet_from_mileage_table(\"VK 1.1\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n )\n passenger = (\n get_sheet_from_mileage_table(\"VK 1.7\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n )\n small_trucks = (\n get_sheet_from_mileage_table(\"VK 1.17\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n )\n medium_trucks = (\n get_sheet_from_mileage_table(\"VK 1.20\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n )\n big_trucks_diesel = (\n get_sheet_from_mileage_table(\"VK 1.23\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n .sum()\n )\n df = pd.DataFrame(index=total.index, columns=[\"diesel\", \"petrol\", \"other\"])\n\n vt_dict = cfg.get_dict(\"vehicle_types_dictionary\")\n df.rename(vt_dict, axis=0, inplace=True)\n total.rename(vt_dict, axis=0, inplace=True)\n\n dc = cfg.get_dict(\"fuel_dictionary\")\n\n # add km by fuel for passenger cars\n df.loc[\"passenger car\"] = passenger.rename(dc, axis=0)\n\n # add km by fuel for small trucks (<= 3.5 tons)\n df.loc[\"small truck (max. 3.5 tons)\"] = small_trucks.rename(dc, axis=0)\n\n # add km by fuel for medium trucks (3.5 < weight <= 7.5 tons)\n df.loc[\"medium truck (3.5 to 7.5 tons)\"] = medium_trucks.rename(dc, axis=0)\n\n # add km by fuel for big trucks (> 7.5 tons)\n # assuming that non-diesel engines are 50% petrol and 50% other\n n = \"big truck (over 7.5 tons)\"\n df.loc[n, \"diesel\"] = big_trucks_diesel\n df.loc[n, [\"petrol\", \"other\"]] = (total[n] - big_trucks_diesel) / 2\n\n fuel_share = pd.DataFrame(\n cfg.get_dict_list(\"fuel share\"), index=[\"diesel\", \"petrol\", \"other\"]\n ).astype(float)\n\n for col in fuel_share.columns:\n df.loc[col] = fuel_share[col].mul(total[col])\n\n return df\n\n\ndef create_grouped_table_kfz():\n \"\"\"Group the kfz-table by main groups.\"\"\"\n df = get_kba_table().kfz\n df.index = df.index.droplevel([0, 1])\n df.columns = [\" \".join(col).strip() for col in df.columns]\n kfz_dict = cfg.get_dict(\"KFZ\")\n for col in df.columns:\n df[col] = pd.to_numeric(df[col].replace(\"-\", \"\"))\n df = df.groupby(by=kfz_dict, axis=1).sum()\n df[\"traction engine, general\"] = (\n df[\"traction engine\"] - df[\"traction engine, agriculture and forestry\"]\n )\n df.drop(\"traction engine\", axis=1, inplace=True)\n df.drop(\"ignore\", axis=1, inplace=True)\n return df\n\n\ndef create_grouped_table_pkw():\n \"\"\"\n Extract fuel groups of passenger cars\n\n Examples\n --------\n >>> pkw = create_grouped_table_pkw()\n >>> pkw['petrol'].sum()\n 31031021.0\n >>> pkw['diesel'].sum()\n 15153364.0\n \"\"\"\n df = get_kba_table().pkw\n df.index = df.index.droplevel([0, 1])\n df = df[\"Nach Kraftstoffarten\"]\n df = df.groupby(by=cfg.get_dict(\"PKW\"), axis=1).sum()\n df.drop(\"ignore\", axis=1, inplace=True)\n return df\n\n\ndef get_admin_by_region(region):\n \"\"\"\n Allocate admin keys to the given regions.\n\n Parameters\n ----------\n region : geopandas.GeoDataFrame\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n fn = os.path.join(cfg.get(\"paths\", \"geometry\"), \"vg1000_geodata.geojson\")\n vg = geometries.load(fullname=fn)\n vg.set_index(\"RS\", inplace=True)\n\n reg2vg = geometries.spatial_join_with_buffer(\n vg.representative_point(), region, \"fs\", limit=0\n )\n\n return pd.DataFrame(reg2vg.drop(\"geometry\", axis=1))\n\n\ndef get_grouped_kfz_by_region(region):\n \"\"\"\n Get the main vehicle groups by region.\n\n Parameters\n ----------\n region : geopandas.GeoDataFrame\n\n Returns\n -------\n pd.DataFrame\n\n Examples\n --------\n >>> fs = geometries.get_federal_states_polygon()\n >>> total = get_grouped_kfz_by_region(fs).sum()\n >>> int(total[\"passenger car\"])\n 47095784\n >>> int(total[\"lorry, > 7500\"])\n 295826\n \"\"\"\n df = create_grouped_table_kfz()\n reg2vg = get_admin_by_region(region)\n df2reg = df.merge(reg2vg, left_index=True, right_index=True, how=\"left\")\n df2reg[\"fs\"] = df2reg[\"fs\"].fillna(\"unknown\")\n return df2reg.groupby(\"fs\").sum()\n\n\ndef get_traffic_fuel_energy(year):\n \"\"\"\n\n Parameters\n ----------\n year : int\n\n Returns\n -------\n\n Examples\n --------\n >>> fuel_energy = get_traffic_fuel_energy(2017)\n >>> int(fuel_energy[\"Ottokraftstoffe\"])\n 719580\n >>> fuel_share = fuel_energy.div(fuel_energy.sum()) * 100\n >>> round(fuel_share[\"Dieselkraftstoffe\"], 1)\n 62.7\n \"\"\"\n fuel_energy = energy_balance.get_de_balance(year).loc[\"Straßenverkehr\"]\n fuel_energy = fuel_energy[fuel_energy != 0]\n fuel_energy.drop(\n [\"primär (gesamt)\", \"sekundär (gesamt)\", \"Row\", \"gesamt\"], inplace=True\n )\n return fuel_energy\n\n\ndef calculate_mobility_energy_use(year):\n \"\"\"\n\n Parameters\n ----------\n year\n\n Returns\n -------\n\n Examples\n --------\n >>> mobility_balance = get_traffic_fuel_energy(2017)\n >>> energy_use = calculate_mobility_energy_use(2017)\n >>> p = \"Petrol usage [TJ]\"\n >>> d = \"Diesel usage [TJ]\"\n >>> o = \"Overall fuel usage [TJ]\"\n >>> print(p, \"(energy balance):\", int(mobility_balance[\"Ottokraftstoffe\"]))\n Petrol usage [TJ] (energy balance): 719580\n >>> print(p, \"(calculated):\", int(energy_use[\"petrol\"].sum()))\n Petrol usage [TJ] (calculated): 803603\n >>> print(d, \"(energy balance):\",\n ... int(mobility_balance[\"Dieselkraftstoffe\"]))\n Diesel usage [TJ] (energy balance): 1425424\n >>> print(d, \"(calculated):\", int(energy_use[\"diesel\"].sum()))\n Diesel usage [TJ] (calculated): 1636199\n >>> print(o, \"(energy balance):\", int(mobility_balance.sum()))\n Overall fuel usage [TJ] (energy balance): 2275143\n >>> print(o, \"(calculated):\", int(energy_use.sum().sum()))\n Overall fuel usage [TJ] (calculated): 2439803\n \"\"\"\n # fetch table of mileage by fuel and vehicle type\n mileage = get_mileage_by_type_and_fuel(year)\n\n # fetch table of specific demand by fuel and vehicle type (from 2011)\n spec_demand = (\n pd.DataFrame(\n cfg.get_dict_list(\"fuel consumption\"),\n index=[\"diesel\", \"petrol\", \"other\"],\n )\n .astype(float)\n .transpose()\n )\n\n # fetch the energy content of the different fuel types\n energy_content = pd.Series(cfg.get_dict(\"energy_per_liter\"))[\n [\"diesel\", \"petrol\", \"other\"]\n ]\n\n return mileage.mul(spec_demand).mul(energy_content) / 10 ** 6\n\n\nif __name__ == \"__main__\":\n pass\n",
"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\n\n\"\"\" Download and prepare entsoe load profile from opsd data portal.\n\nSPDX-FileCopyrightText: 2016-2019 Uwe Krien <[email protected]>\n\nSPDX-License-Identifier: MIT\n\"\"\"\n__copyright__ = \"Uwe Krien <[email protected]>\"\n__license__ = \"MIT\"\n\n\n# Python libraries\nimport os\nimport logging\nimport datetime\nfrom collections import namedtuple\n\n# internal modules\nfrom reegis import config as cfg\n\n# External packages\nimport pandas as pd\nimport requests\nimport pytz\nimport dateutil\n\n\ndef read_original_timeseries_file(orig_csv_file=None, overwrite=False):\n \"\"\"Read timeseries file if it exists. Otherwise download it from opsd.\n \"\"\"\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n\n if orig_csv_file is None:\n orig_csv_file = os.path.join(\n cfg.get(\"paths\", \"entsoe\"), cfg.get(\"entsoe\", \"original_file\")\n ).format(version=version)\n readme = os.path.join(\n cfg.get(\"paths\", \"entsoe\"), cfg.get(\"entsoe\", \"readme_file\")\n ).format(version=version)\n json = os.path.join(\n cfg.get(\"paths\", \"entsoe\"), cfg.get(\"entsoe\", \"json_file\")\n ).format(version=version)\n\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n\n if not os.path.isfile(orig_csv_file) or overwrite:\n req = requests.get(\n cfg.get(\"entsoe\", \"timeseries_data\").format(version=version)\n )\n\n if not overwrite:\n logging.warning(\"File not found. Try to download it from server.\")\n else:\n logging.warning(\n \"Will download file from server and overwrite\" \"existing ones\"\n )\n logging.warning(\"Check URL if download does not work.\")\n with open(orig_csv_file, \"wb\") as fout:\n fout.write(req.content)\n logging.warning(\n \"Downloaded from {0} and copied to '{1}'.\".format(\n cfg.get(\"entsoe\", \"timeseries_data\").format(version=version),\n orig_csv_file,\n )\n )\n req = requests.get(\n cfg.get(\"entsoe\", \"timeseries_readme\").format(version=version)\n )\n with open(readme, \"wb\") as fout:\n fout.write(req.content)\n req = requests.get(\n cfg.get(\"entsoe\", \"timeseries_json\").format(version=version)\n )\n with open(json, \"wb\") as fout:\n fout.write(req.content)\n logging.debug(\"Reading file: {0}\".format(orig_csv_file))\n orig = pd.read_csv(orig_csv_file, index_col=[0], parse_dates=True,\n date_parser=lambda col: pd.to_datetime(col, utc=True))\n orig = orig.tz_convert(\"Europe/Berlin\")\n return orig\n\n\ndef prepare_de_file(filename=None, overwrite=False):\n \"\"\"Convert demand file. CET index and Germany's load only.\"\"\"\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n if filename is None:\n filename = os.path.join(\n cfg.get(\"paths\", \"entsoe\"),\n cfg.get(\"entsoe\", \"de_file\").format(version=version),\n )\n if not os.path.isfile(filename) or overwrite:\n ts = read_original_timeseries_file(overwrite=overwrite)\n for col in ts.columns:\n if \"DE\" not in col:\n ts.drop(col, 1, inplace=True)\n\n ts.to_csv(filename)\n\n\ndef split_timeseries_file(filename=None, overwrite=False):\n \"\"\"Split table into load and renewables.\"\"\"\n entsoe_ts = namedtuple(\"entsoe\", [\"load\", \"renewables\"])\n logging.info(\"Splitting time series.\")\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n path_pattern = os.path.join(cfg.get(\"paths\", \"entsoe\"), \"{0}\")\n if filename is None:\n filename = path_pattern.format(\n cfg.get(\"entsoe\", \"de_file\").format(version=version)\n )\n\n if not os.path.isfile(filename) or overwrite:\n prepare_de_file(filename, overwrite)\n\n de_ts = pd.read_csv(\n filename,\n index_col=\"utc_timestamp\",\n parse_dates=True,\n date_parser=lambda col: pd.to_datetime(col, utc=True),\n )\n de_ts.index = de_ts.index.tz_convert(\"Europe/Berlin\")\n berlin = pytz.timezone(\"Europe/Berlin\")\n end_date = berlin.localize(datetime.datetime(2015, 1, 1, 0, 0, 0))\n\n de_ts.loc[de_ts.index < end_date, \"DE_load_\"] = de_ts.loc[\n de_ts.index < end_date, \"DE_load_actual_entsoe_power_statistics\"\n ]\n de_ts.loc[de_ts.index >= end_date, \"DE_load_\"] = de_ts.loc[\n de_ts.index >= end_date, \"DE_load_actual_entsoe_transparency\"\n ]\n\n load = pd.DataFrame(\n de_ts[pd.notnull(de_ts[\"DE_load_\"])][\"DE_load_\"], columns=[\"DE_load_\"]\n )\n\n re_columns = [\n \"DE_solar_capacity\",\n \"DE_solar_generation_actual\",\n \"DE_solar_profile\",\n \"DE_wind_capacity\",\n \"DE_wind_generation_actual\",\n \"DE_wind_profile\",\n \"DE_wind_offshore_capacity\",\n \"DE_wind_offshore_generation_actual\",\n \"DE_wind_offshore_profile\",\n \"DE_wind_onshore_capacity\",\n \"DE_wind_onshore_generation_actual\",\n \"DE_wind_onshore_profile\",\n ]\n re_subset = [\n \"DE_solar_capacity\",\n \"DE_solar_generation_actual\",\n \"DE_solar_profile\",\n \"DE_wind_capacity\",\n \"DE_wind_generation_actual\",\n \"DE_wind_profile\",\n ]\n\n renewables = de_ts.dropna(subset=re_subset, how=\"any\")[re_columns]\n\n return entsoe_ts(load=load, renewables=renewables)\n\n\ndef get_entsoe_load(year):\n \"\"\"\n\n Parameters\n ----------\n year\n\n Returns\n -------\n\n Examples\n --------\n >>> entsoe=get_entsoe_load(2015)\n >>> int(entsoe.sum())\n 477923089\n \"\"\"\n filename = os.path.join(\n cfg.get(\"paths\", \"entsoe\"), cfg.get(\"entsoe\", \"load_file\")\n )\n if not os.path.isfile(filename):\n load = split_timeseries_file().load\n load.to_hdf(filename, \"entsoe\")\n\n # Read entsoe time series for the given year\n f = datetime.datetime(year, 1, 1, 0)\n t = datetime.datetime(year, 12, 31, 23)\n f = f.astimezone(pytz.timezone(\"Europe/Berlin\"))\n t = t.astimezone(pytz.timezone(\"Europe/Berlin\"))\n logging.info(\"Read entsoe load series from {0} to {1}\".format(f, t))\n df = pd.DataFrame(pd.read_hdf(filename, \"entsoe\"))\n return df.loc[f:t]\n\n\ndef get_entsoe_renewable_data():\n \"\"\"\n\n Returns\n -------\n\n Examples\n --------\n >>> re=get_entsoe_renewable_data()\n >>> int(re['DE_solar_generation_actual'].sum())\n 237214558\n \"\"\"\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n path_pattern = os.path.join(cfg.get(\"paths\", \"entsoe\"), \"{0}\")\n fn = path_pattern.format(\n cfg.get(\"entsoe\", \"renewables_file_csv\").format(version=version)\n )\n if not os.path.isfile(fn):\n renewables = split_timeseries_file().renewables\n renewables.to_csv(fn)\n re = pd.read_csv(fn, index_col=[0], parse_dates=True)\n return re\n\n\nif __name__ == \"__main__\":\n pass\n"
] | [
[
"pandas.read_excel",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame"
],
[
"pandas.notnull",
"pandas.read_hdf",
"pandas.read_csv",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MoriZSJ/GVB | [
"9b954660ef377ead81c8e631c4a0f4a17075b2ea"
] | [
"CDAN-GD/pre_process.py"
] | [
"import numpy as np\nfrom torchvision import transforms\nimport os\nfrom PIL import Image, ImageOps\nimport numbers\nimport torch\n\n\nclass ResizeImage():\n def __init__(self, size):\n if isinstance(size, int):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n th, tw = self.size\n return img.resize((th, tw))\n\n\nclass RandomSizedCrop(object):\n \"\"\"Crop the given PIL.Image to random size and aspect ratio.\n A crop of random size of (0.08 to 1.0) of the original size and a random\n aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n Args:\n size: size of the smaller edge\n interpolation: Default: PIL.Image.BILINEAR\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, img):\n h_off = random.randint(0, img.shape[1]-self.size)\n w_off = random.randint(0, img.shape[2]-self.size)\n img = img[:, h_off:h_off+self.size, w_off:w_off+self.size]\n return img\n\n\nclass Normalize(object):\n \"\"\"Normalize an tensor image with mean and standard deviation.\n Given mean: (R, G, B),\n will normalize each channel of the torch.*Tensor, i.e.\n channel = channel - mean\n Args:\n mean (sequence): Sequence of means for R, G, B channels respecitvely.\n \"\"\"\n\n def __init__(self, mean=None, meanfile=None):\n if mean:\n self.mean = mean\n else:\n arr = np.load(meanfile)\n self.mean = torch.from_numpy(arr.astype('float32')/255.0)[[2, 1, 0], :, :]\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n # TODO: make efficient\n for t, m in zip(tensor, self.mean):\n t.sub_(m)\n return tensor\n\n\nclass PlaceCrop(object):\n \"\"\"Crops the given PIL.Image at the particular index.\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (w, h), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size, start_x, start_y):\n if isinstance(size, int):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.start_x = start_x\n self.start_y = start_y\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be cropped.\n Returns:\n PIL.Image: Cropped image.\n \"\"\"\n th, tw = self.size\n return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))\n\n\nclass ForceFlip(object):\n \"\"\"Horizontally flip the given PIL.Image randomly with a probability of 0.5.\"\"\"\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be flipped.\n Returns:\n PIL.Image: Randomly flipped image.\n \"\"\"\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n\n\nclass CenterCrop(object):\n \"\"\"Crops the given PIL.Image at the center.\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be cropped.\n Returns:\n PIL.Image: Cropped image.\n \"\"\"\n w, h = (img.shape[1], img.shape[2])\n th, tw = self.size\n w_off = int((w - tw) / 2.)\n h_off = int((h - th) / 2.)\n img = img[:, h_off:h_off+th, w_off:w_off+tw]\n return img\n\n\ndef image_train(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.RandomResizedCrop(crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n\n\ndef image_target(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.RandomCrop(crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n\n\ndef image_test(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n start_first = 0\n start_center = (resize_size - crop_size - 1) / 2\n start_last = resize_size - crop_size - 1\n\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n\n\ndef image_test_10crop(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n start_first = 0\n start_center = (resize_size - crop_size - 1) / 2\n start_last = resize_size - crop_size - 1\n data_transforms = [\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_first, start_first),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_last, start_last),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_last, start_first),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_first, start_last),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_center, start_center),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_first, start_first),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_last, start_last),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_last, start_first),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_first, start_last),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_center, start_center),\n transforms.ToTensor(),\n normalize\n ])\n ]\n return data_transforms\n"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Halimaz/tensorflow-1 | [
"3437fba39d5bca77fd7627aad15ba76fb75f5731"
] | [
"tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for rnn module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.contrib import rnn as rnn_lib\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops as ops_lib\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.util import nest\n\n\nclass Plus1RNNCell(rnn_lib.RNNCell):\n \"\"\"RNN Cell generating (output, new_state) = (input + 1, state + 1).\"\"\"\n\n @property\n def output_size(self):\n return 5\n\n @property\n def state_size(self):\n return 5\n\n def __call__(self, input_, state, scope=None):\n return (input_ + 1, state + 1)\n\n\nclass DummyMultiDimensionalLSTM(rnn_lib.RNNCell):\n \"\"\"LSTM Cell generating (output, new_state) = (input + 1, state + 1).\n\n The input to this cell may have an arbitrary number of dimensions that follow\n the preceding 'Time' and 'Batch' dimensions.\n \"\"\"\n\n def __init__(self, dims):\n \"\"\"Initialize the Multi-dimensional LSTM cell.\n\n Args:\n dims: tuple that contains the dimensions of the output of the cell,\n without including 'Time' or 'Batch' dimensions.\n \"\"\"\n if not isinstance(dims, tuple):\n raise TypeError(\"The dimensions passed to DummyMultiDimensionalLSTM \"\n \"should be a tuple of ints.\")\n self._dims = dims\n self._output_size = tensor_shape.TensorShape(self._dims)\n self._state_size = (tensor_shape.TensorShape(self._dims),\n tensor_shape.TensorShape(self._dims))\n\n @property\n def output_size(self):\n return self._output_size\n\n @property\n def state_size(self):\n return self._state_size\n\n def __call__(self, input_, state, scope=None):\n h, c = state\n return (input_ + 1, (h + 1, c + 1))\n\n\nclass NestedRNNCell(rnn_lib.RNNCell):\n \"\"\"RNN Cell generating (output, new_state) = (input + 1, state + 1).\n\n The input, output and state of this cell is a tuple of two tensors.\n \"\"\"\n\n @property\n def output_size(self):\n return (5, 5)\n\n @property\n def state_size(self):\n return (6, 6)\n\n def __call__(self, input_, state, scope=None):\n h, c = state\n x, y = input_\n return ((x + 1, y + 1), (h + 1, c + 1))\n\n\nclass TestStateSaver(object):\n\n def __init__(self, batch_size, state_size):\n self._batch_size = batch_size\n self._state_size = state_size\n self.saved_state = {}\n\n def state(self, name):\n\n if isinstance(self._state_size, dict):\n state_size = self._state_size[name]\n else:\n state_size = self._state_size\n if isinstance(state_size, int):\n state_size = (state_size,)\n elif isinstance(state_size, tuple):\n pass\n else:\n raise TypeError(\"state_size should either be an int or a tuple\")\n\n return array_ops.zeros((self._batch_size,) + state_size)\n\n def save_state(self, name, state):\n self.saved_state[name] = state\n return array_ops.identity(state)\n\n @property\n def batch_size(self):\n return self._batch_size\n\n @property\n def state_size(self):\n return self._state_size\n\n\nclass TestStateSaverWithCounters(TestStateSaver):\n \"\"\"Class wrapper around TestStateSaver.\n\n A dummy class used for testing of static_state_saving_rnn. It helps test if\n save_state and state functions got called same number of time when we\n evaluate output of rnn cell and state or either of them separately. It\n inherits from the TestStateSaver and adds the counters for calls of functions.\n \"\"\"\n\n def __init__(self, batch_size, state_size):\n super(TestStateSaverWithCounters, self).__init__(batch_size, state_size)\n self._num_state_calls = variables_lib.Variable(0)\n self._num_save_state_calls = variables_lib.Variable(0)\n\n def state(self, name):\n with ops_lib.control_dependencies(\n [state_ops.assign_add(self._num_state_calls, 1)]):\n return super(TestStateSaverWithCounters, self).state(name)\n\n def save_state(self, name, state):\n with ops_lib.control_dependencies([state_ops.assign_add(\n self._num_save_state_calls, 1)]):\n return super(TestStateSaverWithCounters, self).save_state(name, state)\n\n @property\n def num_state_calls(self):\n return self._num_state_calls\n\n @property\n def num_save_state_calls(self):\n return self._num_save_state_calls\n\n\nclass RNNTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testInvalidSequenceLengthShape(self):\n cell = Plus1RNNCell()\n inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]\n with self.assertRaisesRegexp(ValueError, \"must be a vector\"):\n rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4)\n\n def testRNN(self):\n cell = Plus1RNNCell()\n batch_size = 2\n input_size = 5\n max_length = 8 # unrolled up to this length\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n for out, inp in zip(outputs, inputs):\n self.assertEqual(out.get_shape(), inp.get_shape())\n self.assertEqual(out.dtype, inp.dtype)\n\n with self.test_session(use_gpu=True) as sess:\n input_value = np.random.randn(batch_size, input_size)\n values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})\n\n # Outputs\n for v in values[:-1]:\n self.assertAllClose(v, input_value + 1.0)\n\n # Final state\n self.assertAllClose(values[-1],\n max_length * np.ones(\n (batch_size, input_size), dtype=np.float32))\n\n def testDropout(self):\n cell = Plus1RNNCell()\n full_dropout_cell = rnn_cell.DropoutWrapper(\n cell, input_keep_prob=1e-12, seed=0)\n (name, dep), = full_dropout_cell._checkpoint_dependencies\n self.assertIs(dep, cell)\n self.assertEqual(\"cell\", name)\n batch_size = 2\n input_size = 5\n max_length = 8\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n with variable_scope.variable_scope(\"share_scope\"):\n outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n with variable_scope.variable_scope(\"drop_scope\"):\n dropped_outputs, _ = rnn.static_rnn(\n full_dropout_cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n for out, inp in zip(outputs, inputs):\n self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())\n self.assertEqual(out.dtype, inp.dtype)\n\n with self.test_session(use_gpu=True) as sess:\n input_value = np.random.randn(batch_size, input_size)\n values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})\n full_dropout_values = sess.run(\n dropped_outputs, feed_dict={\n inputs[0]: input_value\n })\n\n for v in values[:-1]:\n self.assertAllClose(v, input_value + 1.0)\n for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)\n self.assertAllClose(d_v, np.ones_like(input_value))\n\n def testDynamicCalculation(self):\n cell = Plus1RNNCell()\n sequence_length = array_ops.placeholder(dtypes.int64)\n batch_size = 2\n input_size = 5\n max_length = 8\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n with variable_scope.variable_scope(\"drop_scope\"):\n dynamic_outputs, dynamic_state = rnn.static_rnn(\n cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)\n self.assertEqual(len(dynamic_outputs), len(inputs))\n\n with self.test_session(use_gpu=True) as sess:\n input_value = np.random.randn(batch_size, input_size)\n dynamic_values = sess.run(\n dynamic_outputs,\n feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n dynamic_state_value = sess.run(\n [dynamic_state],\n feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n\n # outputs are fully calculated for t = 0, 1\n for v in dynamic_values[:2]:\n self.assertAllClose(v, input_value + 1.0)\n\n # outputs at t = 2 are zero for entry 0, calculated for entry 1\n self.assertAllClose(dynamic_values[2],\n np.vstack((np.zeros((input_size)),\n 1.0 + input_value[1, :])))\n\n # outputs at t = 3+ are zero\n for v in dynamic_values[3:]:\n self.assertAllEqual(v, np.zeros_like(input_value))\n\n # the final states are:\n # entry 0: the values from the calculation at t=1\n # entry 1: the values from the calculation at t=2\n self.assertAllEqual(dynamic_state_value[0],\n np.vstack((1.0 * (1 + 1) * np.ones((input_size)),\n 1.0 * (2 + 1) * np.ones((input_size)))))\n\n def _testScope(self, factory, prefix=\"prefix\", use_outer_scope=True):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n factory(scope)\n else:\n factory(prefix)\n\n # check that all the variables names starts\n # with the proper scope.\n variables_lib.global_variables_initializer()\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"RNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testScope(self):\n\n def factory(scope):\n cell = Plus1RNNCell()\n batch_size = 2\n input_size = 5\n max_length = 8 # unrolled up to this length\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope)\n\n self._testScope(factory, use_outer_scope=True)\n self._testScope(factory, use_outer_scope=False)\n self._testScope(factory, prefix=None, use_outer_scope=False)\n\n\nclass LSTMTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testDType(self):\n # Test case for GitHub issue 16228\n # Not passing dtype in constructor results in default float32\n lstm = rnn_cell.LSTMCell(10)\n input_tensor = array_ops.ones([10, 50])\n lstm.build(input_tensor.get_shape())\n self.assertEqual(lstm._bias.dtype, dtypes.float32_ref)\n\n # Explicitly pass dtype in constructor\n for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:\n lstm = rnn_cell.LSTMCell(10, dtype=dtype)\n input_tensor = array_ops.ones([10, 50])\n lstm.build(input_tensor.get_shape())\n self.assertEqual(lstm._bias.dtype, dtype._as_ref)\n\n def testNoProjNoSharding(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n cell = rnn_cell.LSTMCell(\n num_units, initializer=initializer, state_is_tuple=False)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n sess.run(outputs, feed_dict={inputs[0]: input_value})\n\n def testCellClipping(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n cell_clip=0.0,\n initializer=initializer,\n state_is_tuple=False)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n values = sess.run(outputs, feed_dict={inputs[0]: input_value})\n\n for value in values:\n # if cell c is clipped to 0, tanh(c) = 0 => m==0\n self.assertAllEqual(value, np.zeros((batch_size, num_units)))\n\n def testNoProjNoShardingSimpleStateSaver(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n state_saver = TestStateSaver(batch_size, 2 * num_units)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=False,\n initializer=initializer,\n state_is_tuple=False)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n with variable_scope.variable_scope(\"share_scope\"):\n outputs, state = rnn.static_state_saving_rnn(\n cell, inputs, state_saver=state_saver, state_name=\"save_lstm\")\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n (last_state_value, saved_state_value) = sess.run(\n [state, state_saver.saved_state[\"save_lstm\"]],\n feed_dict={\n inputs[0]: input_value\n })\n self.assertAllEqual(last_state_value, saved_state_value)\n\n def testNoProjNoShardingTupleStateSaver(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n state_saver = TestStateSaver(batch_size, num_units)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=False,\n initializer=initializer,\n state_is_tuple=True)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n with variable_scope.variable_scope(\"share_scope\"):\n outputs, state = rnn.static_state_saving_rnn(\n cell, inputs, state_saver=state_saver, state_name=(\"c\", \"m\"))\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n last_and_saved_states = sess.run(\n state + (state_saver.saved_state[\"c\"], state_saver.saved_state[\"m\"]),\n feed_dict={\n inputs[0]: input_value\n })\n self.assertEqual(4, len(last_and_saved_states))\n self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])\n\n def testNoProjNoShardingNestedTupleStateSaver(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n state_saver = TestStateSaver(\n batch_size, {\n \"c0\": num_units,\n \"m0\": num_units,\n \"c1\": num_units + 1,\n \"m1\": num_units + 1,\n \"c2\": num_units + 2,\n \"m2\": num_units + 2,\n \"c3\": num_units + 3,\n \"m3\": num_units + 3\n })\n\n def _cell(i):\n return rnn_cell.LSTMCell(\n num_units + i,\n use_peepholes=False,\n initializer=initializer,\n state_is_tuple=True)\n\n # This creates a state tuple which has 4 sub-tuples of length 2 each.\n cell = rnn_cell.MultiRNNCell(\n [_cell(i) for i in range(4)], state_is_tuple=True)\n\n self.assertEqual(len(cell.state_size), 4)\n for i in range(4):\n self.assertEqual(len(cell.state_size[i]), 2)\n\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n\n state_names = ((\"c0\", \"m0\"), (\"c1\", \"m1\"), (\"c2\", \"m2\"), (\"c3\", \"m3\"))\n with variable_scope.variable_scope(\"share_scope\"):\n outputs, state = rnn.static_state_saving_rnn(\n cell, inputs, state_saver=state_saver, state_name=state_names)\n self.assertEqual(len(outputs), len(inputs))\n\n # Final output comes from _cell(3) which has state size num_units + 3\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n last_states = sess.run(\n list(nest.flatten(state)), feed_dict={\n inputs[0]: input_value\n })\n saved_states = sess.run(\n list(state_saver.saved_state.values()),\n feed_dict={\n inputs[0]: input_value\n })\n self.assertEqual(8, len(last_states))\n self.assertEqual(8, len(saved_states))\n flat_state_names = nest.flatten(state_names)\n named_saved_states = dict(\n zip(state_saver.saved_state.keys(), saved_states))\n\n for i in range(8):\n self.assertAllEqual(last_states[i],\n named_saved_states[flat_state_names[i]])\n\n def testProjNoSharding(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=False)\n outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n sess.run(outputs, feed_dict={inputs[0]: input_value})\n\n def _testStateTupleWithProjAndSequenceLength(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n sequence_length = [4, 6]\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n cell_notuple = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=False)\n cell_tuple = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=True)\n with variable_scope.variable_scope(\"root\") as scope:\n outputs_notuple, state_notuple = rnn.static_rnn(\n cell_notuple,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n scope.reuse_variables()\n # TODO(ebrevdo): For this test, we ensure values are identical and\n # therefore the weights here are tied. In the future, we may consider\n # making the state_is_tuple property mutable so we can avoid\n # having to do this - especially if users ever need to reuse\n # the parameters from different RNNCell instances. Right now,\n # this seems an unrealistic use case except for testing.\n cell_tuple._scope = cell_notuple._scope # pylint: disable=protected-access\n outputs_tuple, state_tuple = rnn.static_rnn(\n cell_tuple,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n self.assertEqual(len(outputs_notuple), len(inputs))\n self.assertEqual(len(outputs_tuple), len(inputs))\n self.assertTrue(isinstance(state_tuple, tuple))\n self.assertTrue(isinstance(state_notuple, ops_lib.Tensor))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n outputs_notuple_v = sess.run(\n outputs_notuple, feed_dict={\n inputs[0]: input_value\n })\n outputs_tuple_v = sess.run(\n outputs_tuple, feed_dict={\n inputs[0]: input_value\n })\n self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)\n\n (state_notuple_v,) = sess.run(\n (state_notuple,), feed_dict={\n inputs[0]: input_value\n })\n state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value})\n self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))\n\n def testProjSharding(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n num_proj_shards = 3\n num_unit_shards = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n num_unit_shards=num_unit_shards,\n num_proj_shards=num_proj_shards,\n initializer=initializer,\n state_is_tuple=False)\n\n outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n\n self.assertEqual(len(outputs), len(inputs))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n sess.run(outputs, feed_dict={inputs[0]: input_value})\n\n def testDoubleInput(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n num_proj_shards = 3\n num_unit_shards = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float64, shape=(None, input_size))\n ]\n\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n num_unit_shards=num_unit_shards,\n num_proj_shards=num_proj_shards,\n initializer=initializer,\n state_is_tuple=False)\n\n outputs, _ = rnn.static_rnn(\n cell,\n inputs,\n initial_state=cell.zero_state(batch_size, dtypes.float64))\n\n self.assertEqual(len(outputs), len(inputs))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.asarray(\n np.random.randn(batch_size, input_size), dtype=np.float64)\n values = sess.run(outputs, feed_dict={inputs[0]: input_value})\n self.assertEqual(values[0].dtype, input_value.dtype)\n\n def testShardNoShardEquivalentOutput(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n num_proj_shards = 3\n num_unit_shards = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n initializer = init_ops.constant_initializer(0.001)\n\n cell_noshard = rnn_cell.LSTMCell(\n num_units,\n num_proj=num_proj,\n use_peepholes=True,\n initializer=initializer,\n num_unit_shards=num_unit_shards,\n num_proj_shards=num_proj_shards,\n state_is_tuple=False)\n\n cell_shard = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n initializer=initializer,\n num_proj=num_proj,\n state_is_tuple=False)\n\n with variable_scope.variable_scope(\"noshard_scope\"):\n outputs_noshard, state_noshard = rnn.static_rnn(\n cell_noshard, inputs, dtype=dtypes.float32)\n with variable_scope.variable_scope(\"shard_scope\"):\n outputs_shard, state_shard = rnn.static_rnn(\n cell_shard, inputs, dtype=dtypes.float32)\n\n self.assertEqual(len(outputs_noshard), len(inputs))\n self.assertEqual(len(outputs_noshard), len(outputs_shard))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n feeds = dict((x, input_value) for x in inputs)\n values_noshard = sess.run(outputs_noshard, feed_dict=feeds)\n values_shard = sess.run(outputs_shard, feed_dict=feeds)\n state_values_noshard = sess.run([state_noshard], feed_dict=feeds)\n state_values_shard = sess.run([state_shard], feed_dict=feeds)\n self.assertEqual(len(values_noshard), len(values_shard))\n self.assertEqual(len(state_values_noshard), len(state_values_shard))\n for (v_noshard, v_shard) in zip(values_noshard, values_shard):\n self.assertAllClose(v_noshard, v_shard, atol=1e-3)\n for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):\n self.assertAllClose(s_noshard, s_shard, atol=1e-3)\n\n def testDoubleInputWithDropoutAndDynamicCalculation(self):\n \"\"\"Smoke test for using LSTM with doubles, dropout, dynamic calculation.\"\"\"\n\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n num_proj_shards = 3\n num_unit_shards = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n sequence_length = array_ops.placeholder(dtypes.int64)\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float64, shape=(None, input_size))\n ]\n\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n num_unit_shards=num_unit_shards,\n num_proj_shards=num_proj_shards,\n initializer=initializer,\n state_is_tuple=False)\n dropout_cell = rnn_cell.DropoutWrapper(cell, 0.5, seed=0)\n\n outputs, state = rnn.static_rnn(\n dropout_cell,\n inputs,\n sequence_length=sequence_length,\n initial_state=cell.zero_state(batch_size, dtypes.float64))\n\n self.assertEqual(len(outputs), len(inputs))\n\n variables_lib.global_variables_initializer().run(feed_dict={\n sequence_length: [2, 3]\n })\n input_value = np.asarray(\n np.random.randn(batch_size, input_size), dtype=np.float64)\n values = sess.run(\n outputs, feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n state_value = sess.run(\n [state], feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n self.assertEqual(values[0].dtype, input_value.dtype)\n self.assertEqual(state_value[0].dtype, input_value.dtype)\n\n def testSharingWeightsWithReuse(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)\n initializer_d = init_ops.random_uniform_initializer(\n -1, 1, seed=self._seed + 1)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=False)\n cell_d = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer_d,\n state_is_tuple=False)\n\n with variable_scope.variable_scope(\"share_scope\"):\n outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n with variable_scope.variable_scope(\"share_scope\", reuse=True):\n outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n with variable_scope.variable_scope(\"diff_scope\"):\n outputs2, _ = rnn.static_rnn(cell_d, inputs, dtype=dtypes.float32)\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n output_values = sess.run(\n outputs0 + outputs1 + outputs2, feed_dict={\n inputs[0]: input_value\n })\n outputs0_values = output_values[:max_length]\n outputs1_values = output_values[max_length:2 * max_length]\n outputs2_values = output_values[2 * max_length:]\n self.assertEqual(len(outputs0_values), len(outputs1_values))\n self.assertEqual(len(outputs0_values), len(outputs2_values))\n for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):\n # Same weights used by both RNNs so outputs should be the same.\n self.assertAllEqual(o1, o2)\n # Different weights used so outputs should be different.\n self.assertTrue(np.linalg.norm(o1 - o3) > 1e-6)\n\n def testSharingWeightsWithDifferentNamescope(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=False)\n\n with ops_lib.name_scope(\"scope0\"):\n with variable_scope.variable_scope(\"share_scope\"):\n outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n with ops_lib.name_scope(\"scope1\"):\n with variable_scope.variable_scope(\"share_scope\", reuse=True):\n outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n output_values = sess.run(\n outputs0 + outputs1, feed_dict={\n inputs[0]: input_value\n })\n outputs0_values = output_values[:max_length]\n outputs1_values = output_values[max_length:]\n self.assertEqual(len(outputs0_values), len(outputs1_values))\n for out0, out1 in zip(outputs0_values, outputs1_values):\n self.assertAllEqual(out0, out1)\n\n def testDynamicRNNAllowsUnknownTimeDimension(self):\n inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20])\n cell = rnn_cell.GRUCell(30)\n # Smoke test, this should not raise an error\n rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)\n\n @test_util.run_in_graph_and_eager_modes\n def testDynamicRNNWithTupleStates(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n sequence_length = [4, 6]\n in_graph_mode = not context.executing_eagerly()\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n if in_graph_mode:\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n else:\n inputs = max_length * [\n constant_op.constant(\n np.random.randn(batch_size, input_size).astype(np.float32))\n ]\n inputs_c = array_ops.stack(inputs)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=True)\n with variable_scope.variable_scope(\"root\") as scope:\n outputs_static, state_static = rnn.static_rnn(\n cell,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n scope.reuse_variables()\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs_c,\n dtype=dtypes.float32,\n time_major=True,\n sequence_length=sequence_length,\n scope=scope)\n self.assertTrue(isinstance(state_static, rnn_cell.LSTMStateTuple))\n self.assertTrue(isinstance(state_dynamic, rnn_cell.LSTMStateTuple))\n self.assertEqual(state_static[0], state_static.c)\n self.assertEqual(state_static[1], state_static.h)\n self.assertEqual(state_dynamic[0], state_dynamic.c)\n self.assertEqual(state_dynamic[1], state_dynamic.h)\n\n if in_graph_mode:\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n outputs_static = sess.run(\n outputs_static, feed_dict={\n inputs[0]: input_value\n })\n outputs_dynamic = sess.run(\n outputs_dynamic, feed_dict={\n inputs[0]: input_value\n })\n state_static = sess.run(\n state_static, feed_dict={\n inputs[0]: input_value\n })\n state_dynamic = sess.run(\n state_dynamic, feed_dict={\n inputs[0]: input_value\n })\n\n if in_graph_mode:\n self.assertAllEqual(outputs_static, outputs_dynamic)\n else:\n self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)\n self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))\n\n @test_util.run_in_graph_and_eager_modes\n def testDynamicRNNWithNestedTupleStates(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n sequence_length = [4, 6]\n in_graph_mode = not context.executing_eagerly()\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n if in_graph_mode:\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n else:\n inputs = max_length * [\n constant_op.constant(\n np.random.randn(batch_size, input_size).astype(np.float32))\n ]\n inputs_c = array_ops.stack(inputs)\n\n def _cell(i):\n return rnn_cell.LSTMCell(\n num_units + i,\n use_peepholes=True,\n num_proj=num_proj + i,\n initializer=initializer,\n state_is_tuple=True)\n\n # This creates a state tuple which has 4 sub-tuples of length 2 each.\n cell = rnn_cell.MultiRNNCell(\n [_cell(i) for i in range(4)], state_is_tuple=True)\n\n self.assertEqual(len(cell.state_size), 4)\n for i in range(4):\n self.assertEqual(len(cell.state_size[i]), 2)\n\n test_zero = cell.zero_state(1, dtypes.float32)\n self.assertEqual(len(test_zero), 4)\n for i in range(4):\n self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])\n self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])\n\n with variable_scope.variable_scope(\"root\") as scope:\n outputs_static, state_static = rnn.static_rnn(\n cell,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n scope.reuse_variables()\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs_c,\n dtype=dtypes.float32,\n time_major=True,\n sequence_length=sequence_length,\n scope=scope)\n\n if in_graph_mode:\n input_value = np.random.randn(batch_size, input_size)\n variables_lib.global_variables_initializer().run()\n outputs_static = sess.run(\n outputs_static, feed_dict={\n inputs[0]: input_value\n })\n outputs_dynamic = sess.run(\n outputs_dynamic, feed_dict={\n inputs[0]: input_value\n })\n state_static = sess.run(\n nest.flatten(state_static), feed_dict={\n inputs[0]: input_value\n })\n state_dynamic = sess.run(\n nest.flatten(state_dynamic), feed_dict={\n inputs[0]: input_value\n })\n\n if in_graph_mode:\n self.assertAllEqual(outputs_static, outputs_dynamic)\n else:\n self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)\n state_static = nest.flatten(state_static)\n state_dynamic = nest.flatten(state_dynamic)\n self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))\n\n def _testDynamicEquivalentToStaticRNN(self, use_sequence_length):\n time_steps = 8\n num_units = 3\n num_proj = 4\n input_size = 5\n batch_size = 2\n\n input_values = np.random.randn(time_steps, batch_size, input_size).astype(\n np.float32)\n\n if use_sequence_length:\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n else:\n sequence_length = None\n\n in_graph_mode = not context.executing_eagerly()\n\n # TODO(b/68017812): Eager ignores operation seeds, so we need to create a\n # single cell and reuse it across the static and dynamic RNNs. Remove this\n # special case once is fixed.\n if not in_graph_mode:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n initializer=initializer,\n num_proj=num_proj,\n state_is_tuple=False)\n\n ########### Step 1: Run static graph and generate readouts\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n if in_graph_mode:\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n else:\n concat_inputs = constant_op.constant(input_values)\n inputs = array_ops.unstack(concat_inputs)\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n\n # TODO(akshayka): Remove special case once b/68017812 is fixed.\n if in_graph_mode:\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n initializer=initializer,\n num_proj=num_proj,\n state_is_tuple=False)\n\n with variable_scope.variable_scope(\"dynamic_scope\"):\n outputs_static, state_static = rnn.static_rnn(\n cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)\n\n if in_graph_mode:\n # Generate gradients and run sessions to obtain outputs\n feeds = {concat_inputs: input_values}\n # Initialize\n variables_lib.global_variables_initializer().run(feed_dict=feeds)\n # Generate gradients of sum of outputs w.r.t. inputs\n static_gradients = gradients_impl.gradients(\n outputs_static + [state_static], [concat_inputs])\n # Generate gradients of individual outputs w.r.t. inputs\n static_individual_gradients = nest.flatten([\n gradients_impl.gradients(y, [concat_inputs])\n for y in [outputs_static[0], outputs_static[-1], state_static]\n ])\n # Generate gradients of individual variables w.r.t. inputs\n trainable_variables = ops_lib.get_collection(\n ops_lib.GraphKeys.TRAINABLE_VARIABLES)\n assert len(trainable_variables) > 1, (\n \"Count of trainable variables: %d\" % len(trainable_variables))\n # pylint: disable=bad-builtin\n static_individual_variable_gradients = nest.flatten([\n gradients_impl.gradients(y, trainable_variables)\n for y in [outputs_static[0], outputs_static[-1], state_static]\n ])\n # Test forward pass\n values_static = sess.run(outputs_static, feed_dict=feeds)\n (state_value_static,) = sess.run((state_static,), feed_dict=feeds)\n\n # Test gradients to inputs and variables w.r.t. outputs & final state\n static_grad_values = sess.run(static_gradients, feed_dict=feeds)\n\n static_individual_grad_values = sess.run(\n static_individual_gradients, feed_dict=feeds)\n\n static_individual_var_grad_values = sess.run(\n static_individual_variable_gradients, feed_dict=feeds)\n\n ########## Step 2: Run dynamic graph and generate readouts\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n if in_graph_mode:\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n else:\n concat_inputs = constant_op.constant(input_values)\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n\n # TODO(akshayka): Remove this special case once b/68017812 is\n # fixed.\n if in_graph_mode:\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n initializer=initializer,\n num_proj=num_proj,\n state_is_tuple=False)\n\n with variable_scope.variable_scope(\"dynamic_scope\"):\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs=concat_inputs,\n sequence_length=sequence_length,\n time_major=True,\n dtype=dtypes.float32)\n split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps)\n\n if in_graph_mode:\n feeds = {concat_inputs: input_values}\n\n # Initialize\n variables_lib.global_variables_initializer().run(feed_dict=feeds)\n\n # Generate gradients of sum of outputs w.r.t. inputs\n dynamic_gradients = gradients_impl.gradients(\n split_outputs_dynamic + [state_dynamic], [concat_inputs])\n\n # Generate gradients of several individual outputs w.r.t. inputs\n dynamic_individual_gradients = nest.flatten([\n gradients_impl.gradients(y, [concat_inputs])\n for y in [\n split_outputs_dynamic[0], split_outputs_dynamic[-1],\n state_dynamic\n ]\n ])\n\n # Generate gradients of individual variables w.r.t. inputs\n trainable_variables = ops_lib.get_collection(\n ops_lib.GraphKeys.TRAINABLE_VARIABLES)\n assert len(trainable_variables) > 1, (\n \"Count of trainable variables: %d\" % len(trainable_variables))\n dynamic_individual_variable_gradients = nest.flatten([\n gradients_impl.gradients(y, trainable_variables)\n for y in [\n split_outputs_dynamic[0], split_outputs_dynamic[-1],\n state_dynamic\n ]\n ])\n\n # Test forward pass\n values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)\n (state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds)\n\n # Test gradients to inputs and variables w.r.t. outputs & final state\n dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)\n\n dynamic_individual_grad_values = sess.run(\n dynamic_individual_gradients, feed_dict=feeds)\n\n dynamic_individual_var_grad_values = sess.run(\n dynamic_individual_variable_gradients, feed_dict=feeds)\n\n ######### Step 3: Comparisons\n if not in_graph_mode:\n values_static = outputs_static\n values_dynamic = split_outputs_dynamic\n state_value_static = state_static\n state_value_dynamic = state_dynamic\n\n self.assertEqual(len(values_static), len(values_dynamic))\n for (value_static, value_dynamic) in zip(values_static, values_dynamic):\n self.assertAllEqual(value_static, value_dynamic)\n self.assertAllEqual(state_value_static, state_value_dynamic)\n\n if in_graph_mode:\n\n self.assertAllEqual(static_grad_values, dynamic_grad_values)\n\n self.assertEqual(\n len(static_individual_grad_values),\n len(dynamic_individual_grad_values))\n self.assertEqual(\n len(static_individual_var_grad_values),\n len(dynamic_individual_var_grad_values))\n\n for i, (a, b) in enumerate(\n zip(static_individual_grad_values, dynamic_individual_grad_values)):\n tf_logging.info(\"Comparing individual gradients iteration %d\" % i)\n self.assertAllEqual(a, b)\n\n for i, (a, b) in enumerate(\n zip(static_individual_var_grad_values,\n dynamic_individual_var_grad_values)):\n tf_logging.info(\n \"Comparing individual variable gradients iteration %d\" % i)\n self.assertAllEqual(a, b)\n\n @test_util.run_in_graph_and_eager_modes\n def testDynamicEquivalentToStaticRNN(self):\n self._testDynamicEquivalentToStaticRNN(use_sequence_length=True)\n self._testDynamicEquivalentToStaticRNN(use_sequence_length=False)\n\n\nclass BidirectionalRNNTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n sequence_length = array_ops.placeholder(\n dtypes.int64) if use_sequence_length else None\n cell_fw = rnn_cell.LSTMCell(\n num_units, input_size, initializer=initializer, state_is_tuple=False)\n cell_bw = rnn_cell.LSTMCell(\n num_units, input_size, initializer=initializer, state_is_tuple=False)\n inputs = max_length * [\n array_ops.placeholder(\n dtypes.float32,\n shape=(batch_size, input_size) if use_shape else (None, input_size))\n ]\n outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(\n cell_fw,\n cell_bw,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(),\n [batch_size if use_shape else None, 2 * num_units])\n\n input_value = np.random.randn(batch_size, input_size)\n outputs = array_ops.stack(outputs)\n\n return input_value, inputs, outputs, state_fw, state_bw, sequence_length\n\n def _testBidirectionalRNN(self, use_shape):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n input_value, inputs, outputs, state_fw, state_bw, sequence_length = (\n self._createBidirectionalRNN(use_shape, True))\n variables_lib.global_variables_initializer().run()\n # Run with pre-specified sequence length of 2, 3\n out, s_fw, s_bw = sess.run(\n [outputs, state_fw, state_bw],\n feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n\n # Since the forward and backward LSTM cells were initialized with the\n # same parameters, the forward and backward output has to be the same,\n # but reversed in time. The format is output[time][batch][depth], and\n # due to depth concatenation (as num_units=3 for both RNNs):\n # - forward output: out[][][depth] for 0 <= depth < 3\n # - backward output: out[][][depth] for 4 <= depth < 6\n #\n # First sequence in batch is length=2\n # Check that the time=0 forward output is equal to time=1 backward output\n self.assertEqual(out[0][0][0], out[1][0][3])\n self.assertEqual(out[0][0][1], out[1][0][4])\n self.assertEqual(out[0][0][2], out[1][0][5])\n # Check that the time=1 forward output is equal to time=0 backward output\n self.assertEqual(out[1][0][0], out[0][0][3])\n self.assertEqual(out[1][0][1], out[0][0][4])\n self.assertEqual(out[1][0][2], out[0][0][5])\n\n # Second sequence in batch is length=3\n # Check that the time=0 forward output is equal to time=2 backward output\n self.assertEqual(out[0][1][0], out[2][1][3])\n self.assertEqual(out[0][1][1], out[2][1][4])\n self.assertEqual(out[0][1][2], out[2][1][5])\n # Check that the time=1 forward output is equal to time=1 backward output\n self.assertEqual(out[1][1][0], out[1][1][3])\n self.assertEqual(out[1][1][1], out[1][1][4])\n self.assertEqual(out[1][1][2], out[1][1][5])\n # Check that the time=2 forward output is equal to time=0 backward output\n self.assertEqual(out[2][1][0], out[0][1][3])\n self.assertEqual(out[2][1][1], out[0][1][4])\n self.assertEqual(out[2][1][2], out[0][1][5])\n # Via the reasoning above, the forward and backward final state should be\n # exactly the same\n self.assertAllClose(s_fw, s_bw)\n\n def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n input_value, inputs, outputs, state_fw, state_bw, _ = (\n self._createBidirectionalRNN(use_shape, False))\n variables_lib.global_variables_initializer().run()\n out, s_fw, s_bw = sess.run(\n [outputs, state_fw, state_bw], feed_dict={\n inputs[0]: input_value\n })\n\n # Since the forward and backward LSTM cells were initialized with the\n # same parameters, the forward and backward output has to be the same,\n # but reversed in time. The format is output[time][batch][depth], and\n # due to depth concatenation (as num_units=3 for both RNNs):\n # - forward output: out[][][depth] for 0 <= depth < 3\n # - backward output: out[][][depth] for 4 <= depth < 6\n #\n # Both sequences in batch are length=8. Check that the time=i\n # forward output is equal to time=8-1-i backward output\n for i in xrange(8):\n self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3])\n self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4])\n self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5])\n for i in xrange(8):\n self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3])\n self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4])\n self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5])\n # Via the reasoning above, the forward and backward final state should be\n # exactly the same\n self.assertAllClose(s_fw, s_bw)\n\n def testBidirectionalRNN(self):\n self._testBidirectionalRNN(use_shape=False)\n self._testBidirectionalRNN(use_shape=True)\n\n def testBidirectionalRNNWithoutSequenceLength(self):\n self._testBidirectionalRNNWithoutSequenceLength(use_shape=False)\n self._testBidirectionalRNNWithoutSequenceLength(use_shape=True)\n\n def _createBidirectionalDynamicRNN(self,\n use_shape,\n use_state_tuple,\n use_time_major,\n use_sequence_length,\n scope=None):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n sequence_length = (\n array_ops.placeholder(dtypes.int64) if use_sequence_length else None)\n cell_fw = rnn_cell.LSTMCell(\n num_units, initializer=initializer, state_is_tuple=use_state_tuple)\n cell_bw = rnn_cell.LSTMCell(\n num_units, initializer=initializer, state_is_tuple=use_state_tuple)\n inputs = max_length * [\n array_ops.placeholder(\n dtypes.float32,\n shape=(batch_size if use_shape else None, input_size))\n ]\n inputs_c = array_ops.stack(inputs)\n if not use_time_major:\n inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])\n outputs, states = rnn.bidirectional_dynamic_rnn(\n cell_fw,\n cell_bw,\n inputs_c,\n sequence_length,\n dtype=dtypes.float32,\n time_major=use_time_major,\n scope=scope)\n outputs = array_ops.concat(outputs, 2)\n state_fw, state_bw = states\n outputs_shape = [None, max_length, 2 * num_units]\n if use_shape:\n outputs_shape[0] = batch_size\n if use_time_major:\n outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]\n self.assertEqual(outputs.get_shape().as_list(), outputs_shape)\n\n input_value = np.random.randn(batch_size, input_size)\n\n return input_value, inputs, outputs, state_fw, state_bw, sequence_length\n\n def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,\n use_time_major, use_sequence_length):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n input_value, inputs, outputs, state_fw, state_bw, sequence_length = (\n self._createBidirectionalDynamicRNN(\n use_shape, use_state_tuple, use_time_major, use_sequence_length))\n variables_lib.global_variables_initializer().run()\n # Run with pre-specified sequence length of 2, 3\n feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {})\n feed_dict.update({inputs[0]: input_value})\n if use_state_tuple:\n out, c_fw, m_fw, c_bw, m_bw = sess.run(\n [outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],\n feed_dict=feed_dict)\n s_fw = (c_fw, m_fw)\n s_bw = (c_bw, m_bw)\n else:\n feed_dict.update({inputs[0]: input_value})\n out, s_fw, s_bw = sess.run(\n [outputs, state_fw, state_bw], feed_dict=feed_dict)\n\n # Since the forward and backward LSTM cells were initialized with the\n # same parameters, the forward and backward output has to be the same,\n # but reversed in time. The format is output[time][batch][depth], and\n # due to depth concatenation (as num_units=3 for both RNNs):\n # - forward output: out[][][depth] for 0 <= depth < 3\n # - backward output: out[][][depth] for 4 <= depth < 6\n #\n if not use_time_major:\n out = np.swapaxes(out, 0, 1)\n\n if use_sequence_length:\n # First sequence in batch is length=2\n # Check that the t=0 forward output is equal to t=1 backward output\n self.assertEqual(out[0][0][0], out[1][0][3])\n self.assertEqual(out[0][0][1], out[1][0][4])\n self.assertEqual(out[0][0][2], out[1][0][5])\n # Check that the t=1 forward output is equal to t=0 backward output\n self.assertEqual(out[1][0][0], out[0][0][3])\n self.assertEqual(out[1][0][1], out[0][0][4])\n self.assertEqual(out[1][0][2], out[0][0][5])\n\n # Second sequence in batch is length=3\n # Check that the t=0 forward output is equal to t=2 backward output\n self.assertEqual(out[0][1][0], out[2][1][3])\n self.assertEqual(out[0][1][1], out[2][1][4])\n self.assertEqual(out[0][1][2], out[2][1][5])\n # Check that the t=1 forward output is equal to t=1 backward output\n self.assertEqual(out[1][1][0], out[1][1][3])\n self.assertEqual(out[1][1][1], out[1][1][4])\n self.assertEqual(out[1][1][2], out[1][1][5])\n # Check that the t=2 forward output is equal to t=0 backward output\n self.assertEqual(out[2][1][0], out[0][1][3])\n self.assertEqual(out[2][1][1], out[0][1][4])\n self.assertEqual(out[2][1][2], out[0][1][5])\n # Via the reasoning above, the forward and backward final state should\n # be exactly the same\n self.assertAllClose(s_fw, s_bw)\n else: # not use_sequence_length\n max_length = 8 # from createBidirectionalDynamicRNN\n for t in range(max_length):\n self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6])\n self.assertAllClose(s_fw, s_bw)\n\n def testBidirectionalDynamicRNN(self):\n # Generate 2^5 option values\n # from [True, True, True, True, True] to [False, False, False, False, False]\n options = itertools.product([True, False], repeat=4)\n for option in options:\n self._testBidirectionalDynamicRNN(\n use_shape=option[0],\n use_state_tuple=option[1],\n use_time_major=option[2],\n use_sequence_length=option[3])\n\n def _testScope(self, factory, prefix=\"prefix\", use_outer_scope=True):\n # REMARKS: factory(scope) is a function accepting a scope\n # as an argument, such scope can be None, a string\n # or a VariableScope instance.\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n factory(scope)\n else:\n factory(prefix)\n\n # check that all the variables names starts\n # with the proper scope.\n variables_lib.global_variables_initializer()\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"bidirectional_rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"BiRNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testBidirectionalRNNScope(self):\n\n def factory(scope):\n return self._createBidirectionalRNN(\n use_shape=True, use_sequence_length=True, scope=scope)\n\n self._testScope(factory, use_outer_scope=True)\n self._testScope(factory, use_outer_scope=False)\n self._testScope(factory, prefix=None, use_outer_scope=False)\n\n def testBidirectionalDynamicRNNScope(self):\n\n def get_factory(use_time_major):\n\n def factory(scope):\n return self._createBidirectionalDynamicRNN(\n use_shape=True,\n use_state_tuple=True,\n use_sequence_length=True,\n use_time_major=use_time_major,\n scope=scope)\n\n return factory\n\n self._testScope(get_factory(True), use_outer_scope=True)\n self._testScope(get_factory(True), use_outer_scope=False)\n self._testScope(get_factory(True), prefix=None, use_outer_scope=False)\n self._testScope(get_factory(False), use_outer_scope=True)\n self._testScope(get_factory(False), use_outer_scope=False)\n self._testScope(get_factory(False), prefix=None, use_outer_scope=False)\n\n\nclass MultiDimensionalLSTMTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testMultiDimensionalLSTMAllRNNContainers(self):\n feature_dims = (3, 4, 5)\n input_size = feature_dims\n batch_size = 2\n max_length = 8\n sequence_length = [4, 6]\n with self.test_session(graph=ops_lib.Graph()) as sess:\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None,) + input_size)\n ]\n inputs_using_dim = max_length * [\n array_ops.placeholder(\n dtypes.float32, shape=(batch_size,) + input_size)\n ]\n inputs_c = array_ops.stack(inputs)\n # Create a cell for the whole test. This is fine because the cell has no\n # variables.\n cell = DummyMultiDimensionalLSTM(feature_dims)\n state_saver = TestStateSaver(batch_size, input_size)\n outputs_static, state_static = rnn.static_rnn(\n cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs_c,\n dtype=dtypes.float32,\n time_major=True,\n sequence_length=sequence_length)\n outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(\n cell,\n cell,\n inputs_using_dim,\n dtype=dtypes.float32,\n sequence_length=sequence_length)\n outputs_sav, state_sav = rnn.static_state_saving_rnn(\n cell,\n inputs_using_dim,\n sequence_length=sequence_length,\n state_saver=state_saver,\n state_name=(\"h\", \"c\"))\n\n self.assertEqual(outputs_dynamic.get_shape().as_list(),\n inputs_c.get_shape().as_list())\n for out, inp in zip(outputs_static, inputs):\n self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())\n for out, inp in zip(outputs_bid, inputs_using_dim):\n input_shape_list = inp.get_shape().as_list()\n # fwd and bwd activations are concatenated along the second dim.\n input_shape_list[1] *= 2\n self.assertEqual(out.get_shape().as_list(), input_shape_list)\n\n variables_lib.global_variables_initializer().run()\n\n input_total_size = (batch_size,) + input_size\n input_value = np.random.randn(*input_total_size)\n outputs_static_v = sess.run(\n outputs_static, feed_dict={\n inputs[0]: input_value\n })\n outputs_dynamic_v = sess.run(\n outputs_dynamic, feed_dict={\n inputs[0]: input_value\n })\n outputs_bid_v = sess.run(\n outputs_bid, feed_dict={\n inputs_using_dim[0]: input_value\n })\n outputs_sav_v = sess.run(\n outputs_sav, feed_dict={\n inputs_using_dim[0]: input_value\n })\n\n self.assertAllEqual(outputs_static_v, outputs_dynamic_v)\n self.assertAllEqual(outputs_static_v, outputs_sav_v)\n outputs_static_array = np.array(outputs_static_v)\n outputs_static_array_double = np.concatenate(\n (outputs_static_array, outputs_static_array), axis=2)\n outputs_bid_array = np.array(outputs_bid_v)\n self.assertAllEqual(outputs_static_array_double, outputs_bid_array)\n\n state_static_v = sess.run(\n state_static, feed_dict={\n inputs[0]: input_value\n })\n state_dynamic_v = sess.run(\n state_dynamic, feed_dict={\n inputs[0]: input_value\n })\n state_bid_fw_v = sess.run(\n state_fw, feed_dict={\n inputs_using_dim[0]: input_value\n })\n state_bid_bw_v = sess.run(\n state_bw, feed_dict={\n inputs_using_dim[0]: input_value\n })\n state_sav_v = sess.run(\n state_sav, feed_dict={\n inputs_using_dim[0]: input_value\n })\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))\n\n\nclass NestedLSTMTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testNestedIOLSTMAllRNNContainers(self):\n input_size = 5\n batch_size = 2\n state_size = 6\n max_length = 8\n sequence_length = [4, 6]\n with self.test_session(graph=ops_lib.Graph()) as sess:\n state_saver = TestStateSaver(batch_size, state_size)\n single_input = (array_ops.placeholder(\n dtypes.float32, shape=(None, input_size)),\n array_ops.placeholder(\n dtypes.float32, shape=(None, input_size)))\n inputs = max_length * [single_input]\n inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),\n array_ops.stack([input_[1] for input_ in inputs]))\n single_input_using_dim = (array_ops.placeholder(\n dtypes.float32, shape=(batch_size, input_size)),\n array_ops.placeholder(\n dtypes.float32,\n shape=(batch_size, input_size)))\n inputs_using_dim = max_length * [single_input_using_dim]\n\n # Create a cell for the whole test. This is fine because the cell has no\n # variables.\n cell = NestedRNNCell()\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs_c,\n dtype=dtypes.float32,\n time_major=True,\n sequence_length=sequence_length)\n outputs_static, state_static = rnn.static_rnn(\n cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)\n outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(\n cell,\n cell,\n inputs_using_dim,\n dtype=dtypes.float32,\n sequence_length=sequence_length)\n outputs_sav, state_sav = rnn.static_state_saving_rnn(\n cell,\n inputs_using_dim,\n sequence_length=sequence_length,\n state_saver=state_saver,\n state_name=(\"h\", \"c\"))\n\n def _assert_same_shape(input1, input2, double=False):\n flat_input1 = nest.flatten(input1)\n flat_input2 = nest.flatten(input2)\n for inp1, inp2 in zip(flat_input1, flat_input2):\n input_shape = inp1.get_shape().as_list()\n if double:\n input_shape[1] *= 2\n self.assertEqual(input_shape, inp2.get_shape().as_list())\n\n _assert_same_shape(inputs_c, outputs_dynamic)\n _assert_same_shape(inputs, outputs_static)\n _assert_same_shape(inputs_using_dim, outputs_sav)\n _assert_same_shape(inputs_using_dim, outputs_bid, double=True)\n\n variables_lib.global_variables_initializer().run()\n\n input_total_size = (batch_size, input_size)\n input_value = (np.random.randn(*input_total_size),\n np.random.randn(*input_total_size))\n outputs_dynamic_v = sess.run(\n outputs_dynamic, feed_dict={\n single_input: input_value\n })\n outputs_static_v = sess.run(\n outputs_static, feed_dict={\n single_input: input_value\n })\n outputs_sav_v = sess.run(\n outputs_sav, feed_dict={\n single_input_using_dim: input_value\n })\n outputs_bid_v = sess.run(\n outputs_bid, feed_dict={\n single_input_using_dim: input_value\n })\n\n self.assertAllEqual(outputs_static_v,\n np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))\n self.assertAllEqual(outputs_static_v, outputs_sav_v)\n outputs_static_array = np.array(outputs_static_v)\n outputs_static_array_double = np.concatenate(\n (outputs_static_array, outputs_static_array), axis=3)\n outputs_bid_array = np.array(outputs_bid_v)\n self.assertAllEqual(outputs_static_array_double, outputs_bid_array)\n\n state_dynamic_v = sess.run(\n state_dynamic, feed_dict={\n single_input: input_value\n })\n state_static_v = sess.run(\n state_static, feed_dict={\n single_input: input_value\n })\n state_bid_fw_v = sess.run(\n state_fw, feed_dict={\n single_input_using_dim: input_value\n })\n state_bid_bw_v = sess.run(\n state_bw, feed_dict={\n single_input_using_dim: input_value\n })\n state_sav_v = sess.run(\n state_sav, feed_dict={\n single_input_using_dim: input_value\n })\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))\n\n\nclass StateSaverRNNTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def _factory(self, scope, state_saver):\n num_units = state_saver.state_size // 2\n batch_size = state_saver.batch_size\n input_size = 5\n max_length = 8\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=False,\n initializer=initializer,\n state_is_tuple=False)\n inputs = max_length * [\n array_ops.zeros(dtype=dtypes.float32, shape=(batch_size, input_size))\n ]\n out, state = rnn.static_state_saving_rnn(\n cell,\n inputs,\n state_saver=state_saver,\n state_name=\"save_lstm\",\n scope=scope)\n return out, state, state_saver\n\n def _testScope(self, prefix=\"prefix\", use_outer_scope=True):\n num_units = 3\n batch_size = 2\n state_saver = TestStateSaver(batch_size, 2 * num_units)\n\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n self._factory(scope=scope, state_saver=state_saver)\n else:\n self._factory(scope=prefix, state_saver=state_saver)\n variables_lib.global_variables_initializer()\n\n # check that all the variables names starts\n # with the proper scope.\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"RNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testStateSaverRNNScope(self):\n self._testScope(use_outer_scope=True)\n self._testScope(use_outer_scope=False)\n self._testScope(prefix=None, use_outer_scope=False)\n\n def testStateSaverCallsSaveState(self):\n \"\"\"Test that number of calls to state and save_state is equal.\n\n Test if the order of actual evaluating or skipping evaluation of out,\n state tensors, which are the output tensors from static_state_saving_rnn,\n have influence on number of calls to save_state and state methods of\n state_saver object (the number of calls should be same.)\n \"\"\"\n\n num_units = 3\n batch_size = 2\n state_saver = TestStateSaverWithCounters(batch_size, 2 * num_units)\n out, state, state_saver = self._factory(scope=None, state_saver=state_saver)\n\n with self.test_session() as sess:\n sess.run(variables_lib.global_variables_initializer())\n sess.run(variables_lib.local_variables_initializer())\n\n _, _, num_state_calls, num_save_state_calls = sess.run([\n out,\n state,\n state_saver.num_state_calls,\n state_saver.num_save_state_calls])\n self.assertEqual(num_state_calls, num_save_state_calls)\n\n _, num_state_calls, num_save_state_calls = sess.run([\n out,\n state_saver.num_state_calls,\n state_saver.num_save_state_calls])\n self.assertEqual(num_state_calls, num_save_state_calls)\n\n _, num_state_calls, num_save_state_calls = sess.run([\n state,\n state_saver.num_state_calls,\n state_saver.num_save_state_calls])\n self.assertEqual(num_state_calls, num_save_state_calls)\n\nclass GRUTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testDynamic(self):\n time_steps = 8\n num_units = 3\n input_size = 5\n batch_size = 2\n\n input_values = np.random.randn(time_steps, batch_size, input_size)\n\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n\n cell = rnn_cell.GRUCell(num_units=num_units)\n\n with variable_scope.variable_scope(\"dynamic_scope\"):\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs=concat_inputs,\n sequence_length=sequence_length,\n time_major=True,\n dtype=dtypes.float32)\n\n feeds = {concat_inputs: input_values}\n\n # Initialize\n variables_lib.global_variables_initializer().run(feed_dict=feeds)\n\n sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)\n\n def _testScope(self, factory, prefix=\"prefix\", use_outer_scope=True):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n factory(scope)\n else:\n factory(prefix)\n variables_lib.global_variables_initializer()\n\n # check that all the variables names starts\n # with the proper scope.\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"RNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testDynamicScope(self):\n time_steps = 8\n num_units = 3\n input_size = 5\n batch_size = 2\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n\n def factory(scope):\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n cell = rnn_cell.GRUCell(num_units=num_units)\n return rnn.dynamic_rnn(\n cell,\n inputs=concat_inputs,\n sequence_length=sequence_length,\n time_major=True,\n dtype=dtypes.float32,\n scope=scope)\n\n self._testScope(factory, use_outer_scope=True)\n self._testScope(factory, use_outer_scope=False)\n self._testScope(factory, prefix=None, use_outer_scope=False)\n\n\nclass RawRNNTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def _testRawRNN(self, max_time):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n inputs = array_ops.placeholder(\n shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)\n sequence_length = array_ops.placeholder(\n shape=(batch_size,), dtype=dtypes.int32)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, unused_loop_state):\n emit_output = cell_output # == None for time == 0\n if cell_output is None: # time == 0\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n next_state = cell_state # copy state through\n elements_finished = (time_ >= sequence_length)\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output, None)\n\n reuse_scope = variable_scope.get_variable_scope()\n\n outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)\n outputs = outputs_ta.stack()\n\n reuse_scope.reuse_variables()\n outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(\n cell,\n inputs,\n time_major=True,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=reuse_scope)\n\n variables = variables_lib.trainable_variables()\n gradients = gradients_impl.gradients([outputs, final_state],\n [inputs] + variables)\n gradients_dynamic_rnn = gradients_impl.gradients(\n [outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)\n\n variables_lib.global_variables_initializer().run()\n\n rand_input = np.random.randn(max_time, batch_size, input_depth)\n if max_time == 0:\n rand_seq_len = np.zeros(batch_size)\n else:\n rand_seq_len = np.random.randint(max_time, size=batch_size)\n\n # To ensure same output lengths for dynamic_rnn and raw_rnn\n rand_seq_len[0] = max_time\n\n (outputs_val, outputs_dynamic_rnn_val, final_state_val,\n final_state_dynamic_rnn_val) = sess.run(\n [outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],\n feed_dict={\n inputs: rand_input,\n sequence_length: rand_seq_len\n })\n\n self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)\n self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)\n\n # NOTE: Because with 0 time steps, raw_rnn does not have shape\n # information about the input, it is impossible to perform\n # gradients comparisons as the gradients eval will fail. So\n # this case skips the gradients test.\n if max_time > 0:\n self.assertEqual(len(gradients), len(gradients_dynamic_rnn))\n gradients_val = sess.run(\n gradients,\n feed_dict={\n inputs: rand_input,\n sequence_length: rand_seq_len\n })\n gradients_dynamic_rnn_val = sess.run(\n gradients_dynamic_rnn,\n feed_dict={\n inputs: rand_input,\n sequence_length: rand_seq_len\n })\n self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))\n input_gradients_val = gradients_val[0]\n input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]\n self.assertAllClose(input_gradients_val,\n input_gradients_dynamic_rnn_val)\n for i in range(1, len(gradients_val)):\n self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])\n\n def testRawRNNZeroLength(self):\n # NOTE: Because with 0 time steps, raw_rnn does not have shape\n # information about the input, it is impossible to perform\n # gradients comparisons as the gradients eval will fail. So this\n # case skips the gradients test.\n self._testRawRNN(max_time=0)\n\n def testRawRNN(self):\n self._testRawRNN(max_time=10)\n\n def testLoopState(self):\n with self.test_session(graph=ops_lib.Graph()):\n max_time = 10\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n inputs = np.random.randn(max_time, batch_size, input_depth)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, loop_state):\n if cell_output is None:\n loop_state = constant_op.constant([0])\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1])\n next_state = cell_state\n emit_output = cell_output # == None for time == 0\n elements_finished = array_ops.tile([time_ >= max_time], [batch_size])\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output,\n loop_state)\n\n r = rnn.raw_rnn(cell, loop_fn)\n loop_state = r[-1]\n self.assertEqual([10], loop_state.eval())\n\n def testLoopStateWithTensorArray(self):\n with self.test_session(graph=ops_lib.Graph()):\n max_time = 4\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n inputs = np.random.randn(max_time, batch_size, input_depth)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, loop_state):\n if cell_output is None:\n loop_state = tensor_array_ops.TensorArray(\n dynamic_size=True,\n size=0,\n dtype=dtypes.int32,\n clear_after_read=False)\n loop_state = loop_state.write(0, 1)\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n loop_state = loop_state.write(time_,\n loop_state.read(time_ - 1) + time_)\n next_state = cell_state\n emit_output = cell_output # == None for time == 0\n elements_finished = array_ops.tile([time_ >= max_time], [batch_size])\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output,\n loop_state)\n\n r = rnn.raw_rnn(cell, loop_fn)\n loop_state = r[-1]\n loop_state = loop_state.stack()\n self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())\n\n def testEmitDifferentStructureThanCellOutput(self):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n max_time = 10\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n inputs = np.random.randn(max_time, batch_size, input_depth)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n # Verify emit shapes may be unknown by feeding a placeholder that\n # determines an emit shape.\n unknown_dim = array_ops.placeholder(dtype=dtypes.int32)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, _):\n if cell_output is None:\n emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32),\n array_ops.zeros([unknown_dim], dtype=dtypes.int64))\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32),\n array_ops.ones(\n [batch_size, unknown_dim], dtype=dtypes.int64))\n next_state = cell_state\n elements_finished = array_ops.tile([time_ >= max_time], [batch_size])\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output, None)\n\n r = rnn.raw_rnn(cell, loop_fn)\n output_ta = r[0]\n self.assertEqual(2, len(output_ta))\n self.assertEqual([dtypes.int32, dtypes.int64],\n [ta.dtype for ta in output_ta])\n output = [ta.stack() for ta in output_ta]\n output_vals = sess.run(output, feed_dict={unknown_dim: 1})\n self.assertAllEqual(\n np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])\n self.assertAllEqual(\n np.ones((max_time, batch_size, 1), np.int64), output_vals[1])\n\n def _testScope(self, factory, prefix=\"prefix\", use_outer_scope=True):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n factory(scope)\n else:\n factory(prefix)\n variables_lib.global_variables_initializer()\n\n # check that all the variables names starts\n # with the proper scope.\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"RNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testRawRNNScope(self):\n max_time = 10\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n def factory(scope):\n inputs = array_ops.placeholder(\n shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)\n sequence_length = array_ops.placeholder(\n shape=(batch_size,), dtype=dtypes.int32)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, unused_loop_state):\n emit_output = cell_output # == None for time == 0\n if cell_output is None: # time == 0\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n next_state = cell_state\n\n elements_finished = (time_ >= sequence_length)\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output, None)\n\n return rnn.raw_rnn(cell, loop_fn, scope=scope)\n\n self._testScope(factory, use_outer_scope=True)\n self._testScope(factory, use_outer_scope=False)\n self._testScope(factory, prefix=None, use_outer_scope=False)\n\n\nclass DeviceWrapperCell(rnn_cell.RNNCell):\n \"\"\"Class to ensure cell calculation happens on a specific device.\"\"\"\n\n def __init__(self, cell, device):\n self._cell = cell\n self._device = device\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n def __call__(self, input_, state, scope=None):\n if self._device is not None:\n with ops_lib.device(self._device):\n return self._cell(input_, state, scope=scope)\n else:\n return self._cell(input_, state, scope=scope)\n\n\nclass TensorArrayOnCorrectDeviceTest(test.TestCase):\n\n def _execute_rnn_on(self,\n rnn_device=None,\n cell_device=None,\n input_device=None):\n batch_size = 3\n time_steps = 7\n input_size = 5\n num_units = 10\n\n cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)\n gpu_cell = DeviceWrapperCell(cell, cell_device)\n inputs = np.random.randn(batch_size, time_steps, input_size).astype(\n np.float32)\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n\n if input_device is not None:\n with ops_lib.device(input_device):\n inputs = constant_op.constant(inputs)\n\n if rnn_device is not None:\n with ops_lib.device(rnn_device):\n outputs, _ = rnn.dynamic_rnn(\n gpu_cell,\n inputs,\n sequence_length=sequence_length,\n dtype=dtypes.float32)\n else:\n outputs, _ = rnn.dynamic_rnn(\n gpu_cell,\n inputs,\n sequence_length=sequence_length,\n dtype=dtypes.float32)\n\n with self.test_session(use_gpu=True) as sess:\n opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n variables_lib.global_variables_initializer().run()\n sess.run(outputs, options=opts, run_metadata=run_metadata)\n\n return run_metadata\n\n def _retrieve_cpu_gpu_stats(self, run_metadata):\n cpu_stats = None\n gpu_stats = None\n step_stats = run_metadata.step_stats\n for ds in step_stats.dev_stats:\n if \"cpu:0\" in ds.device[-5:].lower():\n cpu_stats = ds.node_stats\n if \"gpu:0\" == ds.device[-5:].lower():\n gpu_stats = ds.node_stats\n return cpu_stats, gpu_stats\n\n def testRNNOnCPUCellOnGPU(self):\n if not test.is_gpu_available():\n return # Test requires access to a GPU\n\n gpu_dev = test.gpu_device_name()\n run_metadata = self._execute_rnn_on(\n rnn_device=\"/cpu:0\", cell_device=gpu_dev)\n cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)\n\n def _assert_in(op_str, in_stats, out_stats):\n self.assertTrue(any(op_str in s.node_name for s in in_stats))\n self.assertFalse(any(op_str in s.node_name for s in out_stats))\n\n # Writes happen at output of RNN cell\n _assert_in(\"TensorArrayWrite\", gpu_stats, cpu_stats)\n # Gather happens on final TensorArray\n _assert_in(\"TensorArrayGather\", gpu_stats, cpu_stats)\n # Reads happen at input to RNN cell\n _assert_in(\"TensorArrayRead\", cpu_stats, gpu_stats)\n # Scatters happen to get initial input into TensorArray\n _assert_in(\"TensorArrayScatter\", cpu_stats, gpu_stats)\n\n def testRNNOnCPUCellOnCPU(self):\n if not test.is_gpu_available():\n return # Test requires access to a GPU\n\n gpu_dev = test.gpu_device_name()\n run_metadata = self._execute_rnn_on(\n rnn_device=\"/cpu:0\", cell_device=\"/cpu:0\", input_device=gpu_dev)\n cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)\n\n def _assert_in(op_str, in_stats, out_stats):\n self.assertTrue(any(op_str in s.node_name for s in in_stats))\n self.assertFalse(any(op_str in s.node_name for s in out_stats))\n\n # All TensorArray operations happen on CPU\n _assert_in(\"TensorArray\", cpu_stats, gpu_stats)\n\n def testInputOnGPUCellNotDeclared(self):\n if not test.is_gpu_available():\n return # Test requires access to a GPU\n\n gpu_dev = test.gpu_device_name()\n run_metadata = self._execute_rnn_on(input_device=gpu_dev)\n cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)\n\n def _assert_in(op_str, in_stats, out_stats):\n self.assertTrue(any(op_str in s.node_name for s in in_stats))\n self.assertFalse(any(op_str in s.node_name for s in out_stats))\n\n # Everything happens on GPU\n _assert_in(\"TensorArray\", gpu_stats, cpu_stats)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.rnn_cell.GRUCell",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.init_ops.random_uniform_initializer",
"tensorflow.python.ops.array_ops.zeros",
"numpy.random.randn",
"numpy.concatenate",
"numpy.zeros_like",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.random.randint",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.rnn.raw_rnn",
"numpy.hstack",
"numpy.swapaxes",
"tensorflow.python.ops.rnn_cell.LSTMCell",
"tensorflow.python.ops.math_ops.reduce_all",
"numpy.ones_like",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.ops.rnn.bidirectional_dynamic_rnn",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.array_ops.ones",
"numpy.zeros",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.python.ops.variables.global_variables",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.rnn.static_rnn",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.rnn_cell.DropoutWrapper",
"tensorflow.python.ops.rnn.dynamic_rnn",
"tensorflow.python.ops.rnn.static_bidirectional_rnn",
"numpy.transpose",
"numpy.array",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"numpy.random.seed",
"tensorflow.python.ops.rnn.static_state_saving_rnn",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.tf_logging.info",
"numpy.linalg.norm",
"numpy.ones",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.platform.test.gpu_device_name",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.variables.local_variables_initializer",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.7",
"2.2"
]
}
] |
oricou/pandas | [
"9405e58d9268041f5416711c051cf5429a19bf49",
"9405e58d9268041f5416711c051cf5429a19bf49",
"9405e58d9268041f5416711c051cf5429a19bf49",
"9405e58d9268041f5416711c051cf5429a19bf49",
"9405e58d9268041f5416711c051cf5429a19bf49",
"9405e58d9268041f5416711c051cf5429a19bf49",
"9405e58d9268041f5416711c051cf5429a19bf49"
] | [
"pandas/tests/indexing/common.py",
"pandas/tests/indexing/multiindex/test_chaining_and_caching.py",
"pandas/tests/series/methods/test_convert_dtypes.py",
"pandas/tests/arrays/sparse/test_arithmetics.py",
"pandas/tests/io/pytables/test_round_trip.py",
"pandas/tests/indexes/multi/test_integrity.py",
"pandas/tests/series/methods/test_dropna.py"
] | [
"\"\"\" common utilities \"\"\"\nimport itertools\n\nimport numpy as np\n\nfrom pandas import (\n DataFrame,\n Float64Index,\n MultiIndex,\n Series,\n UInt64Index,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef _mklbl(prefix, n):\n return [f\"{prefix}{i}\" for i in range(n)]\n\n\ndef _axify(obj, key, axis):\n # create a tuple accessor\n axes = [slice(None)] * obj.ndim\n axes[axis] = key\n return tuple(axes)\n\n\nclass Base:\n \"\"\" indexing comprehensive base class \"\"\"\n\n _kinds = {\"series\", \"frame\"}\n _typs = {\n \"ints\",\n \"uints\",\n \"labels\",\n \"mixed\",\n \"ts\",\n \"floats\",\n \"empty\",\n \"ts_rev\",\n \"multi\",\n }\n\n def setup_method(self, method):\n\n self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))\n self.frame_ints = DataFrame(\n np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)\n )\n\n self.series_uints = Series(\n np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))\n )\n self.frame_uints = DataFrame(\n np.random.randn(4, 4),\n index=UInt64Index(range(0, 8, 2)),\n columns=UInt64Index(range(0, 12, 3)),\n )\n\n self.series_floats = Series(\n np.random.rand(4), index=Float64Index(range(0, 8, 2))\n )\n self.frame_floats = DataFrame(\n np.random.randn(4, 4),\n index=Float64Index(range(0, 8, 2)),\n columns=Float64Index(range(0, 12, 3)),\n )\n\n m_idces = [\n MultiIndex.from_product([[1, 2], [3, 4]]),\n MultiIndex.from_product([[5, 6], [7, 8]]),\n MultiIndex.from_product([[9, 10], [11, 12]]),\n ]\n\n self.series_multi = Series(np.random.rand(4), index=m_idces[0])\n self.frame_multi = DataFrame(\n np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]\n )\n\n self.series_labels = Series(np.random.randn(4), index=list(\"abcd\"))\n self.frame_labels = DataFrame(\n np.random.randn(4, 4), index=list(\"abcd\"), columns=list(\"ABCD\")\n )\n\n self.series_mixed = Series(np.random.randn(4), index=[2, 4, \"null\", 8])\n self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, \"null\", 8])\n\n self.series_ts = Series(\n np.random.randn(4), index=date_range(\"20130101\", periods=4)\n )\n self.frame_ts = DataFrame(\n np.random.randn(4, 4), index=date_range(\"20130101\", periods=4)\n )\n\n dates_rev = date_range(\"20130101\", periods=4).sort_values(ascending=False)\n self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)\n self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)\n\n self.frame_empty = DataFrame()\n self.series_empty = Series(dtype=object)\n\n # form agglomerates\n for kind in self._kinds:\n d = {}\n for typ in self._typs:\n d[typ] = getattr(self, f\"{kind}_{typ}\")\n\n setattr(self, kind, d)\n\n def generate_indices(self, f, values=False):\n \"\"\"\n generate the indices\n if values is True , use the axis values\n is False, use the range\n \"\"\"\n axes = f.axes\n if values:\n axes = (list(range(len(ax))) for ax in axes)\n\n return itertools.product(*axes)\n\n def get_value(self, name, f, i, values=False):\n \"\"\" return the value for the location i \"\"\"\n # check against values\n if values:\n return f.values[i]\n\n elif name == \"iat\":\n return f.iloc[i]\n else:\n assert name == \"at\"\n return f.loc[i]\n\n def check_values(self, f, func, values=False):\n\n if f is None:\n return\n axes = f.axes\n indicies = itertools.product(*axes)\n\n for i in indicies:\n result = getattr(f, func)[i]\n\n # check against values\n if values:\n expected = f.values[i]\n else:\n expected = f\n for a in reversed(i):\n expected = expected.__getitem__(a)\n\n tm.assert_almost_equal(result, expected)\n\n def check_result(self, method, key, typs=None, axes=None, fails=None):\n def _eq(axis, obj, key):\n \"\"\" compare equal for these 2 keys \"\"\"\n axified = _axify(obj, key, axis)\n try:\n getattr(obj, method).__getitem__(axified)\n\n except (IndexError, TypeError, KeyError) as detail:\n\n # if we are in fails, the ok, otherwise raise it\n if fails is not None:\n if isinstance(detail, fails):\n return\n raise\n\n if typs is None:\n typs = self._typs\n\n if axes is None:\n axes = [0, 1]\n else:\n assert axes in [0, 1]\n axes = [axes]\n\n # check\n for kind in self._kinds:\n\n d = getattr(self, kind)\n for ax in axes:\n for typ in typs:\n assert typ in self._typs\n\n obj = d[typ]\n if ax < obj.ndim:\n _eq(axis=ax, obj=obj, key=key)\n",
"import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\nimport pandas.core.common as com\n\n\ndef test_detect_chained_assignment():\n # Inplace ops, originally from:\n # https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug\n a = [12, 23]\n b = [123, None]\n c = [1234, 2345]\n d = [12345, 23456]\n tuples = [(\"eyes\", \"left\"), (\"eyes\", \"right\"), (\"ears\", \"left\"), (\"ears\", \"right\")]\n events = {\n (\"eyes\", \"left\"): a,\n (\"eyes\", \"right\"): b,\n (\"ears\", \"left\"): c,\n (\"ears\", \"right\"): d,\n }\n multiind = MultiIndex.from_tuples(tuples, names=[\"part\", \"side\"])\n zed = DataFrame(events, index=[\"a\", \"b\"], columns=multiind)\n\n msg = \"A value is trying to be set on a copy of a slice from a DataFrame\"\n with pytest.raises(com.SettingWithCopyError, match=msg):\n zed[\"eyes\"][\"right\"].fillna(value=555, inplace=True)\n\n\ndef test_cache_updating():\n # 5216\n # make sure that we don't try to set a dead cache\n a = np.random.rand(10, 3)\n df = DataFrame(a, columns=[\"x\", \"y\", \"z\"])\n tuples = [(i, j) for i in range(5) for j in range(2)]\n index = MultiIndex.from_tuples(tuples)\n df.index = index\n\n # setting via chained assignment\n # but actually works, since everything is a view\n df.loc[0][\"z\"].iloc[0] = 1.0\n result = df.loc[(0, 0), \"z\"]\n assert result == 1\n\n # correct setting\n df.loc[(0, 0), \"z\"] = 2\n result = df.loc[(0, 0), \"z\"]\n assert result == 2\n\n\[email protected]_slow\ndef test_indexer_caching():\n # GH5727\n # make sure that indexers are in the _internal_names_set\n n = 1000001\n arrays = (range(n), range(n))\n index = MultiIndex.from_tuples(zip(*arrays))\n s = Series(np.zeros(n), index=index)\n str(s)\n\n # setitem\n expected = Series(np.ones(n), index=index)\n s = Series(np.zeros(n), index=index)\n s[s == 0] = 1\n tm.assert_series_equal(s, expected)\n",
"from itertools import product\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_interval_dtype\n\nimport pandas as pd\nimport pandas._testing as tm\n\n# Each test case consists of a tuple with the data and dtype to create the\n# test Series, the default dtype for the expected result (which is valid\n# for most cases), and the specific cases where the result deviates from\n# this default. Those overrides are defined as a dict with (keyword, val) as\n# dictionary key. In case of multiple items, the last override takes precendence.\ntest_cases = [\n (\n # data\n [1, 2, 3],\n # original dtype\n np.dtype(\"int32\"),\n # default expected dtype\n \"Int32\",\n # exceptions on expected dtype\n {(\"convert_integer\", False): np.dtype(\"int32\")},\n ),\n (\n [1, 2, 3],\n np.dtype(\"int64\"),\n \"Int64\",\n {(\"convert_integer\", False): np.dtype(\"int64\")},\n ),\n (\n [\"x\", \"y\", \"z\"],\n np.dtype(\"O\"),\n pd.StringDtype(),\n {(\"convert_string\", False): np.dtype(\"O\")},\n ),\n (\n [True, False, np.nan],\n np.dtype(\"O\"),\n pd.BooleanDtype(),\n {(\"convert_boolean\", False): np.dtype(\"O\")},\n ),\n (\n [\"h\", \"i\", np.nan],\n np.dtype(\"O\"),\n pd.StringDtype(),\n {(\"convert_string\", False): np.dtype(\"O\")},\n ),\n ( # GH32117\n [\"h\", \"i\", 1],\n np.dtype(\"O\"),\n np.dtype(\"O\"),\n {},\n ),\n (\n [10, np.nan, 20],\n np.dtype(\"float\"),\n \"Int64\",\n {\n (\"convert_integer\", False, \"convert_floating\", True): \"Float64\",\n (\"convert_integer\", False, \"convert_floating\", False): np.dtype(\"float\"),\n },\n ),\n (\n [np.nan, 100.5, 200],\n np.dtype(\"float\"),\n \"Float64\",\n {(\"convert_floating\", False): np.dtype(\"float\")},\n ),\n (\n [3, 4, 5],\n \"Int8\",\n \"Int8\",\n {},\n ),\n (\n [[1, 2], [3, 4], [5]],\n None,\n np.dtype(\"O\"),\n {},\n ),\n (\n [4, 5, 6],\n np.dtype(\"uint32\"),\n \"UInt32\",\n {(\"convert_integer\", False): np.dtype(\"uint32\")},\n ),\n (\n [-10, 12, 13],\n np.dtype(\"i1\"),\n \"Int8\",\n {(\"convert_integer\", False): np.dtype(\"i1\")},\n ),\n (\n [1.2, 1.3],\n np.dtype(\"float32\"),\n \"Float32\",\n {(\"convert_floating\", False): np.dtype(\"float32\")},\n ),\n (\n [1, 2.0],\n object,\n \"Int64\",\n {\n (\"convert_integer\", False): \"Float64\",\n (\"convert_integer\", False, \"convert_floating\", False): np.dtype(\"float\"),\n (\"infer_objects\", False): np.dtype(\"object\"),\n },\n ),\n (\n [1, 2.5],\n object,\n \"Float64\",\n {\n (\"convert_floating\", False): np.dtype(\"float\"),\n (\"infer_objects\", False): np.dtype(\"object\"),\n },\n ),\n ([\"a\", \"b\"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),\n (\n pd.to_datetime([\"2020-01-14 10:00\", \"2020-01-15 11:11\"]),\n pd.DatetimeTZDtype(tz=\"UTC\"),\n pd.DatetimeTZDtype(tz=\"UTC\"),\n {},\n ),\n (\n pd.to_datetime([\"2020-01-14 10:00\", \"2020-01-15 11:11\"]),\n \"datetime64[ns]\",\n np.dtype(\"datetime64[ns]\"),\n {},\n ),\n (\n pd.to_datetime([\"2020-01-14 10:00\", \"2020-01-15 11:11\"]),\n object,\n np.dtype(\"datetime64[ns]\"),\n {(\"infer_objects\", False): np.dtype(\"object\")},\n ),\n (pd.period_range(\"1/1/2011\", freq=\"M\", periods=3), None, pd.PeriodDtype(\"M\"), {}),\n (\n pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),\n None,\n pd.IntervalDtype(\"int64\", \"right\"),\n {},\n ),\n]\n\n\nclass TestSeriesConvertDtypes:\n @pytest.mark.parametrize(\n \"data, maindtype, expected_default, expected_other\",\n test_cases,\n )\n @pytest.mark.parametrize(\"params\", product(*[(True, False)] * 5))\n def test_convert_dtypes(\n self, data, maindtype, params, expected_default, expected_other\n ):\n warn = None\n if (\n hasattr(data, \"dtype\")\n and data.dtype == \"M8[ns]\"\n and isinstance(maindtype, pd.DatetimeTZDtype)\n ):\n # this astype is deprecated in favor of tz_localize\n warn = FutureWarning\n\n if maindtype is not None:\n with tm.assert_produces_warning(warn):\n series = pd.Series(data, dtype=maindtype)\n else:\n series = pd.Series(data)\n\n result = series.convert_dtypes(*params)\n\n param_names = [\n \"infer_objects\",\n \"convert_string\",\n \"convert_integer\",\n \"convert_boolean\",\n \"convert_floating\",\n ]\n params_dict = dict(zip(param_names, params))\n\n expected_dtype = expected_default\n for spec, dtype in expected_other.items():\n if all(params_dict[key] is val for key, val in zip(spec[::2], spec[1::2])):\n expected_dtype = dtype\n\n warn2 = None\n if (\n hasattr(data, \"dtype\")\n and data.dtype == \"M8[ns]\"\n and isinstance(expected_dtype, pd.DatetimeTZDtype)\n ):\n # this astype is deprecated in favor of tz_localize\n warn2 = FutureWarning\n\n with tm.assert_produces_warning(warn2):\n expected = pd.Series(data, dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n # Test that it is a copy\n copy = series.copy(deep=True)\n if is_interval_dtype(result.dtype) and result.dtype.subtype.kind in [\"i\", \"u\"]:\n msg = \"Cannot set float NaN to integer-backed IntervalArray\"\n with pytest.raises(ValueError, match=msg):\n result[result.notna()] = np.nan\n else:\n result[result.notna()] = np.nan\n\n # Make sure original not changed\n tm.assert_series_equal(series, copy)\n\n def test_convert_string_dtype(self):\n # https://github.com/pandas-dev/pandas/issues/31731 -> converting columns\n # that are already string dtype\n df = pd.DataFrame(\n {\"A\": [\"a\", \"b\", pd.NA], \"B\": [\"ä\", \"ö\", \"ü\"]}, dtype=\"string\"\n )\n result = df.convert_dtypes()\n tm.assert_frame_equal(df, result)\n\n def test_convert_bool_dtype(self):\n # GH32287\n df = pd.DataFrame({\"A\": pd.array([True])})\n tm.assert_frame_equal(df, df.convert_dtypes())\n",
"import operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import np_version_under1p20\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core import ops\nfrom pandas.core.arrays.sparse import (\n SparseArray,\n SparseDtype,\n)\n\n\[email protected](params=[\"integer\", \"block\"])\ndef kind(request):\n \"\"\"kind kwarg to pass to SparseArray/SparseSeries\"\"\"\n return request.param\n\n\[email protected](params=[True, False])\ndef mix(request):\n # whether to operate op(sparse, dense) instead of op(sparse, sparse)\n return request.param\n\n\nclass TestSparseArrayArithmetics:\n\n _base = np.array\n _klass = SparseArray\n\n def _assert(self, a, b):\n tm.assert_numpy_array_equal(a, b)\n\n def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n if mix:\n result = op(a, b_dense).to_dense()\n else:\n result = op(a, b).to_dense()\n\n if op in [operator.truediv, ops.rtruediv]:\n # pandas uses future division\n expected = op(a_dense * 1.0, b_dense)\n else:\n expected = op(a_dense, b_dense)\n\n if op in [operator.floordiv, ops.rfloordiv]:\n # Series sets 1//0 to np.inf, which SparseArray does not do (yet)\n mask = np.isinf(expected)\n if mask.any():\n expected[mask] = np.nan\n\n self._assert(result, expected)\n\n def _check_bool_result(self, res):\n assert isinstance(res, self._klass)\n assert isinstance(res.dtype, SparseDtype)\n assert res.dtype.subtype == np.bool_\n assert isinstance(res.fill_value, bool)\n\n def _check_comparison_ops(self, a, b, a_dense, b_dense):\n with np.errstate(invalid=\"ignore\"):\n # Unfortunately, trying to wrap the computation of each expected\n # value is with np.errstate() is too tedious.\n #\n # sparse & sparse\n self._check_bool_result(a == b)\n self._assert((a == b).to_dense(), a_dense == b_dense)\n\n self._check_bool_result(a != b)\n self._assert((a != b).to_dense(), a_dense != b_dense)\n\n self._check_bool_result(a >= b)\n self._assert((a >= b).to_dense(), a_dense >= b_dense)\n\n self._check_bool_result(a <= b)\n self._assert((a <= b).to_dense(), a_dense <= b_dense)\n\n self._check_bool_result(a > b)\n self._assert((a > b).to_dense(), a_dense > b_dense)\n\n self._check_bool_result(a < b)\n self._assert((a < b).to_dense(), a_dense < b_dense)\n\n # sparse & dense\n self._check_bool_result(a == b_dense)\n self._assert((a == b_dense).to_dense(), a_dense == b_dense)\n\n self._check_bool_result(a != b_dense)\n self._assert((a != b_dense).to_dense(), a_dense != b_dense)\n\n self._check_bool_result(a >= b_dense)\n self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)\n\n self._check_bool_result(a <= b_dense)\n self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)\n\n self._check_bool_result(a > b_dense)\n self._assert((a > b_dense).to_dense(), a_dense > b_dense)\n\n self._check_bool_result(a < b_dense)\n self._assert((a < b_dense).to_dense(), a_dense < b_dense)\n\n def _check_logical_ops(self, a, b, a_dense, b_dense):\n # sparse & sparse\n self._check_bool_result(a & b)\n self._assert((a & b).to_dense(), a_dense & b_dense)\n\n self._check_bool_result(a | b)\n self._assert((a | b).to_dense(), a_dense | b_dense)\n # sparse & dense\n self._check_bool_result(a & b_dense)\n self._assert((a & b_dense).to_dense(), a_dense & b_dense)\n\n self._check_bool_result(a | b_dense)\n self._assert((a | b_dense).to_dense(), a_dense | b_dense)\n\n @pytest.mark.parametrize(\"scalar\", [0, 1, 3])\n @pytest.mark.parametrize(\"fill_value\", [None, 0, 2])\n def test_float_scalar(\n self, kind, mix, all_arithmetic_functions, fill_value, scalar, request\n ):\n op = all_arithmetic_functions\n\n if not np_version_under1p20:\n if op in [operator.floordiv, ops.rfloordiv]:\n if op is operator.floordiv and scalar != 0:\n pass\n elif op is ops.rfloordiv and scalar == 0:\n pass\n else:\n mark = pytest.mark.xfail(reason=\"GH#38172\")\n request.node.add_marker(mark)\n\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n\n a = self._klass(values, kind=kind, fill_value=fill_value)\n self._check_numeric_ops(a, scalar, values, scalar, mix, op)\n\n def test_float_scalar_comparison(self, kind):\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n\n a = self._klass(values, kind=kind)\n self._check_comparison_ops(a, 1, values, 1)\n self._check_comparison_ops(a, 0, values, 0)\n self._check_comparison_ops(a, 3, values, 3)\n\n a = self._klass(values, kind=kind, fill_value=0)\n self._check_comparison_ops(a, 1, values, 1)\n self._check_comparison_ops(a, 0, values, 0)\n self._check_comparison_ops(a, 3, values, 3)\n\n a = self._klass(values, kind=kind, fill_value=2)\n self._check_comparison_ops(a, 1, values, 1)\n self._check_comparison_ops(a, 0, values, 0)\n self._check_comparison_ops(a, 3, values, 3)\n\n def test_float_same_index_without_nans(\n self, kind, mix, all_arithmetic_functions, request\n ):\n # when sp_index are the same\n op = all_arithmetic_functions\n\n values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])\n rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind, fill_value=0)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n def test_float_same_index_with_nans(\n self, kind, mix, all_arithmetic_functions, request\n ):\n # when sp_index are the same\n op = all_arithmetic_functions\n\n if not np_version_under1p20:\n if op is ops.rfloordiv:\n if not (mix and kind == \"block\"):\n mark = pytest.mark.xfail(reason=\"GH#38172\")\n request.node.add_marker(mark)\n\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])\n\n a = self._klass(values, kind=kind)\n b = self._klass(rvalues, kind=kind)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n def test_float_same_index_comparison(self, kind):\n # when sp_index are the same\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])\n\n a = self._klass(values, kind=kind)\n b = self._klass(rvalues, kind=kind)\n self._check_comparison_ops(a, b, values, rvalues)\n\n values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])\n rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind, fill_value=0)\n self._check_comparison_ops(a, b, values, rvalues)\n\n def test_float_array(self, kind, mix, all_arithmetic_functions):\n op = all_arithmetic_functions\n\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])\n\n a = self._klass(values, kind=kind)\n b = self._klass(rvalues, kind=kind)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind, fill_value=0)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n a = self._klass(values, kind=kind, fill_value=1)\n b = self._klass(rvalues, kind=kind, fill_value=2)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n def test_float_array_different_kind(self, mix, all_arithmetic_functions):\n op = all_arithmetic_functions\n\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])\n\n a = self._klass(values, kind=\"integer\")\n b = self._klass(rvalues, kind=\"block\")\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)\n\n a = self._klass(values, kind=\"integer\", fill_value=0)\n b = self._klass(rvalues, kind=\"block\")\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n a = self._klass(values, kind=\"integer\", fill_value=0)\n b = self._klass(rvalues, kind=\"block\", fill_value=0)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n a = self._klass(values, kind=\"integer\", fill_value=1)\n b = self._klass(rvalues, kind=\"block\", fill_value=2)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n def test_float_array_comparison(self, kind):\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])\n\n a = self._klass(values, kind=kind)\n b = self._klass(rvalues, kind=kind)\n self._check_comparison_ops(a, b, values, rvalues)\n self._check_comparison_ops(a, b * 0, values, rvalues * 0)\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind)\n self._check_comparison_ops(a, b, values, rvalues)\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind, fill_value=0)\n self._check_comparison_ops(a, b, values, rvalues)\n\n a = self._klass(values, kind=kind, fill_value=1)\n b = self._klass(rvalues, kind=kind, fill_value=2)\n self._check_comparison_ops(a, b, values, rvalues)\n\n def test_int_array(self, kind, mix, all_arithmetic_functions):\n op = all_arithmetic_functions\n\n # have to specify dtype explicitly until fixing GH 667\n dtype = np.int64\n\n values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)\n rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)\n\n a = self._klass(values, dtype=dtype, kind=kind)\n assert a.dtype == SparseDtype(dtype)\n b = self._klass(rvalues, dtype=dtype, kind=kind)\n assert b.dtype == SparseDtype(dtype)\n\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)\n\n a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)\n assert a.dtype == SparseDtype(dtype)\n b = self._klass(rvalues, dtype=dtype, kind=kind)\n assert b.dtype == SparseDtype(dtype)\n\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)\n assert a.dtype == SparseDtype(dtype)\n b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind)\n assert b.dtype == SparseDtype(dtype)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n a = self._klass(values, fill_value=1, dtype=dtype, kind=kind)\n assert a.dtype == SparseDtype(dtype, fill_value=1)\n b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind)\n assert b.dtype == SparseDtype(dtype, fill_value=2)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n def test_int_array_comparison(self, kind):\n dtype = \"int64\"\n # int32 NI ATM\n\n values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)\n rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)\n\n a = self._klass(values, dtype=dtype, kind=kind)\n b = self._klass(rvalues, dtype=dtype, kind=kind)\n self._check_comparison_ops(a, b, values, rvalues)\n self._check_comparison_ops(a, b * 0, values, rvalues * 0)\n\n a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)\n b = self._klass(rvalues, dtype=dtype, kind=kind)\n self._check_comparison_ops(a, b, values, rvalues)\n\n a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)\n b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0)\n self._check_comparison_ops(a, b, values, rvalues)\n\n a = self._klass(values, dtype=dtype, kind=kind, fill_value=1)\n b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2)\n self._check_comparison_ops(a, b, values, rvalues)\n\n @pytest.mark.parametrize(\"fill_value\", [True, False, np.nan])\n def test_bool_same_index(self, kind, fill_value):\n # GH 14000\n # when sp_index are the same\n values = self._base([True, False, True, True], dtype=np.bool_)\n rvalues = self._base([True, False, True, True], dtype=np.bool_)\n\n a = self._klass(values, kind=kind, dtype=np.bool_, fill_value=fill_value)\n b = self._klass(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value)\n self._check_logical_ops(a, b, values, rvalues)\n\n @pytest.mark.parametrize(\"fill_value\", [True, False, np.nan])\n def test_bool_array_logical(self, kind, fill_value):\n # GH 14000\n # when sp_index are the same\n values = self._base([True, False, True, False, True, True], dtype=np.bool_)\n rvalues = self._base([True, False, False, True, False, True], dtype=np.bool_)\n\n a = self._klass(values, kind=kind, dtype=np.bool_, fill_value=fill_value)\n b = self._klass(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value)\n self._check_logical_ops(a, b, values, rvalues)\n\n def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request):\n op = all_arithmetic_functions\n\n if not np_version_under1p20:\n if op in [operator.floordiv, ops.rfloordiv] and mix:\n mark = pytest.mark.xfail(reason=\"GH#38172\")\n request.node.add_marker(mark)\n\n rdtype = \"int64\"\n\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)\n\n a = self._klass(values, kind=kind)\n b = self._klass(rvalues, kind=kind)\n assert b.dtype == SparseDtype(rdtype)\n\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind)\n assert b.dtype == SparseDtype(rdtype)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind, fill_value=0)\n assert b.dtype == SparseDtype(rdtype)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n a = self._klass(values, kind=kind, fill_value=1)\n b = self._klass(rvalues, kind=kind, fill_value=2)\n assert b.dtype == SparseDtype(rdtype, fill_value=2)\n self._check_numeric_ops(a, b, values, rvalues, mix, op)\n\n def test_mixed_array_comparison(self, kind):\n rdtype = \"int64\"\n # int32 NI ATM\n\n values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)\n\n a = self._klass(values, kind=kind)\n b = self._klass(rvalues, kind=kind)\n assert b.dtype == SparseDtype(rdtype)\n\n self._check_comparison_ops(a, b, values, rvalues)\n self._check_comparison_ops(a, b * 0, values, rvalues * 0)\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind)\n assert b.dtype == SparseDtype(rdtype)\n self._check_comparison_ops(a, b, values, rvalues)\n\n a = self._klass(values, kind=kind, fill_value=0)\n b = self._klass(rvalues, kind=kind, fill_value=0)\n assert b.dtype == SparseDtype(rdtype)\n self._check_comparison_ops(a, b, values, rvalues)\n\n a = self._klass(values, kind=kind, fill_value=1)\n b = self._klass(rvalues, kind=kind, fill_value=2)\n assert b.dtype == SparseDtype(rdtype, fill_value=2)\n self._check_comparison_ops(a, b, values, rvalues)\n\n def test_xor(self):\n s = SparseArray([True, True, False, False])\n t = SparseArray([True, False, True, False])\n result = s ^ t\n sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype=\"int32\"))\n expected = SparseArray([False, True, True], sparse_index=sp_index)\n tm.assert_sp_array_equal(result, expected)\n\n\[email protected](\"op\", [operator.eq, operator.add])\ndef test_with_list(op):\n arr = SparseArray([0, 1], fill_value=0)\n result = op(arr, [0, 1])\n expected = op(arr, SparseArray([0, 1]))\n tm.assert_sp_array_equal(result, expected)\n\n\ndef test_with_dataframe():\n # GH#27910\n arr = SparseArray([0, 1], fill_value=0)\n df = pd.DataFrame([[1, 2], [3, 4]])\n result = arr.__add__(df)\n assert result is NotImplemented\n\n\ndef test_with_zerodim_ndarray():\n # GH#27910\n arr = SparseArray([0, 1], fill_value=0)\n\n result = arr * np.array(2)\n expected = arr * 2\n tm.assert_sp_array_equal(result, expected)\n\n\[email protected](\"ufunc\", [np.abs, np.exp])\[email protected](\n \"arr\", [SparseArray([0, 0, -1, 1]), SparseArray([None, None, -1, 1])]\n)\ndef test_ufuncs(ufunc, arr):\n result = ufunc(arr)\n fill_value = ufunc(arr.fill_value)\n expected = SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value)\n tm.assert_sp_array_equal(result, expected)\n\n\[email protected](\n \"a, b\",\n [\n (SparseArray([0, 0, 0]), np.array([0, 1, 2])),\n (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),\n (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),\n (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),\n (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),\n ],\n)\[email protected](\"ufunc\", [np.add, np.greater])\ndef test_binary_ufuncs(ufunc, a, b):\n # can't say anything about fill value here.\n result = ufunc(a, b)\n expected = ufunc(np.asarray(a), np.asarray(b))\n assert isinstance(result, SparseArray)\n tm.assert_numpy_array_equal(np.asarray(result), expected)\n\n\ndef test_ndarray_inplace():\n sparray = SparseArray([0, 2, 0, 0])\n ndarray = np.array([0, 1, 2, 3])\n ndarray += sparray\n expected = np.array([0, 3, 2, 3])\n tm.assert_numpy_array_equal(ndarray, expected)\n\n\ndef test_sparray_inplace():\n sparray = SparseArray([0, 2, 0, 0])\n ndarray = np.array([0, 1, 2, 3])\n sparray += ndarray\n expected = SparseArray([0, 3, 2, 3], fill_value=0)\n tm.assert_sp_array_equal(sparray, expected)\n\n\[email protected](\"fill_value\", [True, False])\ndef test_invert(fill_value):\n arr = np.array([True, False, False, True])\n sparray = SparseArray(arr, fill_value=fill_value)\n result = ~sparray\n expected = SparseArray(~arr, fill_value=not fill_value)\n tm.assert_sp_array_equal(result, expected)\n\n result = ~pd.Series(sparray)\n expected = pd.Series(expected)\n tm.assert_series_equal(result, expected)\n\n result = ~pd.DataFrame({\"A\": sparray})\n expected = pd.DataFrame({\"A\": expected})\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"fill_value\", [0, np.nan])\[email protected](\"op\", [operator.pos, operator.neg])\ndef test_unary_op(op, fill_value):\n arr = np.array([0, 1, np.nan, 2])\n sparray = SparseArray(arr, fill_value=fill_value)\n result = op(sparray)\n expected = SparseArray(op(arr), fill_value=op(fill_value))\n tm.assert_sp_array_equal(result, expected)\n",
"import datetime\nimport re\nfrom warnings import (\n catch_warnings,\n simplefilter,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import Timestamp\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n _testing as tm,\n bdate_range,\n read_hdf,\n)\nfrom pandas.tests.io.pytables.common import (\n _maybe_remove,\n ensure_clean_path,\n ensure_clean_store,\n)\nfrom pandas.util import _test_decorators as td\n\n_default_compressor = \"blosc\"\n\n\npytestmark = pytest.mark.single\n\n\ndef test_conv_read_write(setup_path):\n with tm.ensure_clean() as path:\n\n def roundtrip(key, obj, **kwargs):\n obj.to_hdf(path, key, **kwargs)\n return read_hdf(path, key)\n\n o = tm.makeTimeSeries()\n tm.assert_series_equal(o, roundtrip(\"series\", o))\n\n o = tm.makeStringSeries()\n tm.assert_series_equal(o, roundtrip(\"string_series\", o))\n\n o = tm.makeDataFrame()\n tm.assert_frame_equal(o, roundtrip(\"frame\", o))\n\n # table\n df = DataFrame({\"A\": range(5), \"B\": range(5)})\n df.to_hdf(path, \"table\", append=True)\n result = read_hdf(path, \"table\", where=[\"index>2\"])\n tm.assert_frame_equal(df[df.index > 2], result)\n\n\ndef test_long_strings(setup_path):\n\n # GH6166\n df = DataFrame(\n {\"a\": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)\n )\n\n with ensure_clean_store(setup_path) as store:\n store.append(\"df\", df, data_columns=[\"a\"])\n\n result = store.select(\"df\")\n tm.assert_frame_equal(df, result)\n\n\ndef test_api(setup_path):\n\n # GH4584\n # API issue when to_hdf doesn't accept append AND format args\n with ensure_clean_path(setup_path) as path:\n\n df = tm.makeDataFrame()\n df.iloc[:10].to_hdf(path, \"df\", append=True, format=\"table\")\n df.iloc[10:].to_hdf(path, \"df\", append=True, format=\"table\")\n tm.assert_frame_equal(read_hdf(path, \"df\"), df)\n\n # append to False\n df.iloc[:10].to_hdf(path, \"df\", append=False, format=\"table\")\n df.iloc[10:].to_hdf(path, \"df\", append=True, format=\"table\")\n tm.assert_frame_equal(read_hdf(path, \"df\"), df)\n\n with ensure_clean_path(setup_path) as path:\n\n df = tm.makeDataFrame()\n df.iloc[:10].to_hdf(path, \"df\", append=True)\n df.iloc[10:].to_hdf(path, \"df\", append=True, format=\"table\")\n tm.assert_frame_equal(read_hdf(path, \"df\"), df)\n\n # append to False\n df.iloc[:10].to_hdf(path, \"df\", append=False, format=\"table\")\n df.iloc[10:].to_hdf(path, \"df\", append=True)\n tm.assert_frame_equal(read_hdf(path, \"df\"), df)\n\n with ensure_clean_path(setup_path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path, \"df\", append=False, format=\"fixed\")\n tm.assert_frame_equal(read_hdf(path, \"df\"), df)\n\n df.to_hdf(path, \"df\", append=False, format=\"f\")\n tm.assert_frame_equal(read_hdf(path, \"df\"), df)\n\n df.to_hdf(path, \"df\", append=False)\n tm.assert_frame_equal(read_hdf(path, \"df\"), df)\n\n df.to_hdf(path, \"df\")\n tm.assert_frame_equal(read_hdf(path, \"df\"), df)\n\n with ensure_clean_store(setup_path) as store:\n\n df = tm.makeDataFrame()\n\n _maybe_remove(store, \"df\")\n store.append(\"df\", df.iloc[:10], append=True, format=\"table\")\n store.append(\"df\", df.iloc[10:], append=True, format=\"table\")\n tm.assert_frame_equal(store.select(\"df\"), df)\n\n # append to False\n _maybe_remove(store, \"df\")\n store.append(\"df\", df.iloc[:10], append=False, format=\"table\")\n store.append(\"df\", df.iloc[10:], append=True, format=\"table\")\n tm.assert_frame_equal(store.select(\"df\"), df)\n\n # formats\n _maybe_remove(store, \"df\")\n store.append(\"df\", df.iloc[:10], append=False, format=\"table\")\n store.append(\"df\", df.iloc[10:], append=True, format=\"table\")\n tm.assert_frame_equal(store.select(\"df\"), df)\n\n _maybe_remove(store, \"df\")\n store.append(\"df\", df.iloc[:10], append=False, format=\"table\")\n store.append(\"df\", df.iloc[10:], append=True, format=None)\n tm.assert_frame_equal(store.select(\"df\"), df)\n\n with ensure_clean_path(setup_path) as path:\n # Invalid.\n df = tm.makeDataFrame()\n\n msg = \"Can only append to Tables\"\n\n with pytest.raises(ValueError, match=msg):\n df.to_hdf(path, \"df\", append=True, format=\"f\")\n\n with pytest.raises(ValueError, match=msg):\n df.to_hdf(path, \"df\", append=True, format=\"fixed\")\n\n msg = r\"invalid HDFStore format specified \\[foo\\]\"\n\n with pytest.raises(TypeError, match=msg):\n df.to_hdf(path, \"df\", append=True, format=\"foo\")\n\n with pytest.raises(TypeError, match=msg):\n df.to_hdf(path, \"df\", append=False, format=\"foo\")\n\n # File path doesn't exist\n path = \"\"\n msg = f\"File {path} does not exist\"\n\n with pytest.raises(FileNotFoundError, match=msg):\n read_hdf(path, \"df\")\n\n\ndef test_get(setup_path):\n\n with ensure_clean_store(setup_path) as store:\n store[\"a\"] = tm.makeTimeSeries()\n left = store.get(\"a\")\n right = store[\"a\"]\n tm.assert_series_equal(left, right)\n\n left = store.get(\"/a\")\n right = store[\"/a\"]\n tm.assert_series_equal(left, right)\n\n with pytest.raises(KeyError, match=\"'No object named b in the file'\"):\n store.get(\"b\")\n\n\ndef test_put_integer(setup_path):\n # non-date, non-string index\n df = DataFrame(np.random.randn(50, 100))\n _check_roundtrip(df, tm.assert_frame_equal, setup_path)\n\n\ndef test_table_values_dtypes_roundtrip(setup_path):\n\n with ensure_clean_store(setup_path) as store:\n df1 = DataFrame({\"a\": [1, 2, 3]}, dtype=\"f8\")\n store.append(\"df_f8\", df1)\n tm.assert_series_equal(df1.dtypes, store[\"df_f8\"].dtypes)\n\n df2 = DataFrame({\"a\": [1, 2, 3]}, dtype=\"i8\")\n store.append(\"df_i8\", df2)\n tm.assert_series_equal(df2.dtypes, store[\"df_i8\"].dtypes)\n\n # incompatible dtype\n msg = re.escape(\n \"invalid combination of [values_axes] on appending data \"\n \"[name->values_block_0,cname->values_block_0,\"\n \"dtype->float64,kind->float,shape->(1, 3)] vs \"\n \"current table [name->values_block_0,\"\n \"cname->values_block_0,dtype->int64,kind->integer,\"\n \"shape->None]\"\n )\n with pytest.raises(ValueError, match=msg):\n store.append(\"df_i8\", df1)\n\n # check creation/storage/retrieval of float32 (a bit hacky to\n # actually create them thought)\n df1 = DataFrame(np.array([[1], [2], [3]], dtype=\"f4\"), columns=[\"A\"])\n store.append(\"df_f4\", df1)\n tm.assert_series_equal(df1.dtypes, store[\"df_f4\"].dtypes)\n assert df1.dtypes[0] == \"float32\"\n\n # check with mixed dtypes\n df1 = DataFrame(\n {\n c: Series(np.random.randint(5), dtype=c)\n for c in [\"float32\", \"float64\", \"int32\", \"int64\", \"int16\", \"int8\"]\n }\n )\n df1[\"string\"] = \"foo\"\n df1[\"float322\"] = 1.0\n df1[\"float322\"] = df1[\"float322\"].astype(\"float32\")\n df1[\"bool\"] = df1[\"float32\"] > 0\n df1[\"time1\"] = Timestamp(\"20130101\")\n df1[\"time2\"] = Timestamp(\"20130102\")\n\n store.append(\"df_mixed_dtypes1\", df1)\n result = store.select(\"df_mixed_dtypes1\").dtypes.value_counts()\n result.index = [str(i) for i in result.index]\n expected = Series(\n {\n \"float32\": 2,\n \"float64\": 1,\n \"int32\": 1,\n \"bool\": 1,\n \"int16\": 1,\n \"int8\": 1,\n \"int64\": 1,\n \"object\": 1,\n \"datetime64[ns]\": 2,\n }\n )\n result = result.sort_index()\n expected = expected.sort_index()\n tm.assert_series_equal(result, expected)\n\n\ndef test_series(setup_path):\n\n s = tm.makeStringSeries()\n _check_roundtrip(s, tm.assert_series_equal, path=setup_path)\n\n ts = tm.makeTimeSeries()\n _check_roundtrip(ts, tm.assert_series_equal, path=setup_path)\n\n ts2 = Series(ts.index, Index(ts.index, dtype=object))\n _check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)\n\n ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))\n _check_roundtrip(\n ts3, tm.assert_series_equal, path=setup_path, check_index_type=False\n )\n\n\ndef test_float_index(setup_path):\n\n # GH #454\n index = np.random.randn(10)\n s = Series(np.random.randn(10), index=index)\n _check_roundtrip(s, tm.assert_series_equal, path=setup_path)\n\n\ndef test_tuple_index(setup_path):\n\n # GH #492\n col = np.arange(10)\n idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]\n data = np.random.randn(30).reshape((3, 10))\n DF = DataFrame(data, index=idx, columns=col)\n\n with catch_warnings(record=True):\n simplefilter(\"ignore\", pd.errors.PerformanceWarning)\n _check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)\n\n\[email protected](\"ignore::pandas.errors.PerformanceWarning\")\ndef test_index_types(setup_path):\n with catch_warnings(record=True):\n values = np.random.randn(2)\n\n func = lambda l, r: tm.assert_series_equal(l, r, check_index_type=True)\n\n with catch_warnings(record=True):\n ser = Series(values, [0, \"y\"])\n _check_roundtrip(ser, func, path=setup_path)\n\n with catch_warnings(record=True):\n ser = Series(values, [datetime.datetime.today(), 0])\n _check_roundtrip(ser, func, path=setup_path)\n\n with catch_warnings(record=True):\n ser = Series(values, [\"y\", 0])\n _check_roundtrip(ser, func, path=setup_path)\n\n with catch_warnings(record=True):\n ser = Series(values, [datetime.date.today(), \"a\"])\n _check_roundtrip(ser, func, path=setup_path)\n\n with catch_warnings(record=True):\n ser = Series(values, [0, \"y\"])\n _check_roundtrip(ser, func, path=setup_path)\n\n ser = Series(values, [datetime.datetime.today(), 0])\n _check_roundtrip(ser, func, path=setup_path)\n\n ser = Series(values, [\"y\", 0])\n _check_roundtrip(ser, func, path=setup_path)\n\n ser = Series(values, [datetime.date.today(), \"a\"])\n _check_roundtrip(ser, func, path=setup_path)\n\n ser = Series(values, [1.23, \"b\"])\n _check_roundtrip(ser, func, path=setup_path)\n\n ser = Series(values, [1, 1.53])\n _check_roundtrip(ser, func, path=setup_path)\n\n ser = Series(values, [1, 5])\n _check_roundtrip(ser, func, path=setup_path)\n\n ser = Series(\n values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]\n )\n _check_roundtrip(ser, func, path=setup_path)\n\n\ndef test_timeseries_preepoch(setup_path):\n\n dr = bdate_range(\"1/1/1940\", \"1/1/1960\")\n ts = Series(np.random.randn(len(dr)), index=dr)\n try:\n _check_roundtrip(ts, tm.assert_series_equal, path=setup_path)\n except OverflowError:\n pytest.skip(\"known failer on some windows platforms\")\n\n\[email protected](\n \"compression\", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]\n)\ndef test_frame(compression, setup_path):\n\n df = tm.makeDataFrame()\n\n # put in some random NAs\n df.values[0, 0] = np.nan\n df.values[5, 3] = np.nan\n\n _check_roundtrip_table(\n df, tm.assert_frame_equal, path=setup_path, compression=compression\n )\n _check_roundtrip(\n df, tm.assert_frame_equal, path=setup_path, compression=compression\n )\n\n tdf = tm.makeTimeDataFrame()\n _check_roundtrip(\n tdf, tm.assert_frame_equal, path=setup_path, compression=compression\n )\n\n with ensure_clean_store(setup_path) as store:\n # not consolidated\n df[\"foo\"] = np.random.randn(len(df))\n store[\"df\"] = df\n recons = store[\"df\"]\n assert recons._mgr.is_consolidated()\n\n # empty\n _check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)\n\n\ndef test_empty_series_frame(setup_path):\n s0 = Series(dtype=object)\n s1 = Series(name=\"myseries\", dtype=object)\n df0 = DataFrame()\n df1 = DataFrame(index=[\"a\", \"b\", \"c\"])\n df2 = DataFrame(columns=[\"d\", \"e\", \"f\"])\n\n _check_roundtrip(s0, tm.assert_series_equal, path=setup_path)\n _check_roundtrip(s1, tm.assert_series_equal, path=setup_path)\n _check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)\n _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)\n _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)\n\n\[email protected](\"dtype\", [np.int64, np.float64, object, \"m8[ns]\", \"M8[ns]\"])\ndef test_empty_series(dtype, setup_path):\n s = Series(dtype=dtype)\n _check_roundtrip(s, tm.assert_series_equal, path=setup_path)\n\n\ndef test_can_serialize_dates(setup_path):\n\n rng = [x.date() for x in bdate_range(\"1/1/2000\", \"1/30/2000\")]\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)\n\n\ndef test_store_hierarchical(setup_path):\n index = MultiIndex(\n levels=[[\"foo\", \"bar\", \"baz\", \"qux\"], [\"one\", \"two\", \"three\"]],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=[\"foo\", \"bar\"],\n )\n frame = DataFrame(np.random.randn(10, 3), index=index, columns=[\"A\", \"B\", \"C\"])\n\n _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)\n _check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)\n _check_roundtrip(frame[\"A\"], tm.assert_series_equal, path=setup_path)\n\n # check that the names are stored\n with ensure_clean_store(setup_path) as store:\n store[\"frame\"] = frame\n recons = store[\"frame\"]\n tm.assert_frame_equal(recons, frame)\n\n\[email protected](\n \"compression\", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]\n)\ndef test_store_mixed(compression, setup_path):\n def _make_one():\n df = tm.makeDataFrame()\n df[\"obj1\"] = \"foo\"\n df[\"obj2\"] = \"bar\"\n df[\"bool1\"] = df[\"A\"] > 0\n df[\"bool2\"] = df[\"B\"] > 0\n df[\"int1\"] = 1\n df[\"int2\"] = 2\n return df._consolidate()\n\n df1 = _make_one()\n df2 = _make_one()\n\n _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)\n _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)\n\n with ensure_clean_store(setup_path) as store:\n store[\"obj\"] = df1\n tm.assert_frame_equal(store[\"obj\"], df1)\n store[\"obj\"] = df2\n tm.assert_frame_equal(store[\"obj\"], df2)\n\n # check that can store Series of all of these types\n _check_roundtrip(\n df1[\"obj1\"],\n tm.assert_series_equal,\n path=setup_path,\n compression=compression,\n )\n _check_roundtrip(\n df1[\"bool1\"],\n tm.assert_series_equal,\n path=setup_path,\n compression=compression,\n )\n _check_roundtrip(\n df1[\"int1\"],\n tm.assert_series_equal,\n path=setup_path,\n compression=compression,\n )\n\n\ndef _check_roundtrip(obj, comparator, path, compression=False, **kwargs):\n\n options = {}\n if compression:\n options[\"complib\"] = _default_compressor\n\n with ensure_clean_store(path, \"w\", **options) as store:\n store[\"obj\"] = obj\n retrieved = store[\"obj\"]\n comparator(retrieved, obj, **kwargs)\n\n\ndef _check_double_roundtrip(self, obj, comparator, path, compression=False, **kwargs):\n options = {}\n if compression:\n options[\"complib\"] = compression or _default_compressor\n\n with ensure_clean_store(path, \"w\", **options) as store:\n store[\"obj\"] = obj\n retrieved = store[\"obj\"]\n comparator(retrieved, obj, **kwargs)\n store[\"obj\"] = retrieved\n again = store[\"obj\"]\n comparator(again, obj, **kwargs)\n\n\ndef _check_roundtrip_table(obj, comparator, path, compression=False):\n options = {}\n if compression:\n options[\"complib\"] = _default_compressor\n\n with ensure_clean_store(path, \"w\", **options) as store:\n store.put(\"obj\", obj, format=\"table\")\n retrieved = store[\"obj\"]\n\n comparator(retrieved, obj)\n\n\ndef test_unicode_index(setup_path):\n\n unicode_values = [\"\\u03c3\", \"\\u03c3\\u03c3\"]\n\n # PerformanceWarning\n with catch_warnings(record=True):\n simplefilter(\"ignore\", pd.errors.PerformanceWarning)\n s = Series(np.random.randn(len(unicode_values)), unicode_values)\n _check_roundtrip(s, tm.assert_series_equal, path=setup_path)\n\n\ndef test_unicode_longer_encoded(setup_path):\n # GH 11234\n char = \"\\u0394\"\n df = DataFrame({\"A\": [char]})\n with ensure_clean_store(setup_path) as store:\n store.put(\"df\", df, format=\"table\", encoding=\"utf-8\")\n result = store.get(\"df\")\n tm.assert_frame_equal(result, df)\n\n df = DataFrame({\"A\": [\"a\", char], \"B\": [\"b\", \"b\"]})\n with ensure_clean_store(setup_path) as store:\n store.put(\"df\", df, format=\"table\", encoding=\"utf-8\")\n result = store.get(\"df\")\n tm.assert_frame_equal(result, df)\n\n\ndef test_store_datetime_mixed(setup_path):\n\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [1.0, 2.0, 3.0], \"c\": [\"a\", \"b\", \"c\"]})\n ts = tm.makeTimeSeries()\n df[\"d\"] = ts.index[:3]\n _check_roundtrip(df, tm.assert_frame_equal, path=setup_path)\n\n\ndef test_round_trip_equals(setup_path):\n # GH 9330\n df = DataFrame({\"B\": [1, 2], \"A\": [\"x\", \"y\"]})\n\n with ensure_clean_path(setup_path) as path:\n df.to_hdf(path, \"df\", format=\"table\")\n other = read_hdf(path, \"df\")\n tm.assert_frame_equal(df, other)\n assert df.equals(other)\n assert other.equals(df)\n",
"import re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.cast import construct_1d_object_array_from_listlike\n\nimport pandas as pd\nfrom pandas import (\n IntervalIndex,\n MultiIndex,\n RangeIndex,\n)\nimport pandas._testing as tm\n\n\ndef test_labels_dtypes():\n\n # GH 8456\n i = MultiIndex.from_tuples([(\"A\", 1), (\"A\", 2)])\n assert i.codes[0].dtype == \"int8\"\n assert i.codes[1].dtype == \"int8\"\n\n i = MultiIndex.from_product([[\"a\"], range(40)])\n assert i.codes[1].dtype == \"int8\"\n i = MultiIndex.from_product([[\"a\"], range(400)])\n assert i.codes[1].dtype == \"int16\"\n i = MultiIndex.from_product([[\"a\"], range(40000)])\n assert i.codes[1].dtype == \"int32\"\n\n i = MultiIndex.from_product([[\"a\"], range(1000)])\n assert (i.codes[0] >= 0).all()\n assert (i.codes[1] >= 0).all()\n\n\ndef test_values_boxed():\n tuples = [\n (1, pd.Timestamp(\"2000-01-01\")),\n (2, pd.NaT),\n (3, pd.Timestamp(\"2000-01-03\")),\n (1, pd.Timestamp(\"2000-01-04\")),\n (2, pd.Timestamp(\"2000-01-02\")),\n (3, pd.Timestamp(\"2000-01-03\")),\n ]\n result = MultiIndex.from_tuples(tuples)\n expected = construct_1d_object_array_from_listlike(tuples)\n tm.assert_numpy_array_equal(result.values, expected)\n # Check that code branches for boxed values produce identical results\n tm.assert_numpy_array_equal(result.values[:4], result[:4].values)\n\n\ndef test_values_multiindex_datetimeindex():\n # Test to ensure we hit the boxing / nobox part of MI.values\n ints = np.arange(10 ** 18, 10 ** 18 + 5)\n naive = pd.DatetimeIndex(ints)\n\n aware = pd.DatetimeIndex(ints, tz=\"US/Central\")\n\n idx = MultiIndex.from_arrays([naive, aware])\n result = idx.values\n\n outer = pd.DatetimeIndex([x[0] for x in result])\n tm.assert_index_equal(outer, naive)\n\n inner = pd.DatetimeIndex([x[1] for x in result])\n tm.assert_index_equal(inner, aware)\n\n # n_lev > n_lab\n result = idx[:2].values\n\n outer = pd.DatetimeIndex([x[0] for x in result])\n tm.assert_index_equal(outer, naive[:2])\n\n inner = pd.DatetimeIndex([x[1] for x in result])\n tm.assert_index_equal(inner, aware[:2])\n\n\ndef test_values_multiindex_periodindex():\n # Test to ensure we hit the boxing / nobox part of MI.values\n ints = np.arange(2007, 2012)\n pidx = pd.PeriodIndex(ints, freq=\"D\")\n\n idx = MultiIndex.from_arrays([ints, pidx])\n result = idx.values\n\n outer = pd.Int64Index([x[0] for x in result])\n tm.assert_index_equal(outer, pd.Int64Index(ints))\n\n inner = pd.PeriodIndex([x[1] for x in result])\n tm.assert_index_equal(inner, pidx)\n\n # n_lev > n_lab\n result = idx[:2].values\n\n outer = pd.Int64Index([x[0] for x in result])\n tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))\n\n inner = pd.PeriodIndex([x[1] for x in result])\n tm.assert_index_equal(inner, pidx[:2])\n\n\ndef test_consistency():\n # need to construct an overflow\n major_axis = list(range(70000))\n minor_axis = list(range(10))\n\n major_codes = np.arange(70000)\n minor_codes = np.repeat(range(10), 7000)\n\n # the fact that is works means it's consistent\n index = MultiIndex(\n levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]\n )\n\n # inconsistent\n major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])\n minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])\n index = MultiIndex(\n levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]\n )\n\n assert index.is_unique is False\n\n\[email protected]_slow\ndef test_hash_collisions():\n # non-smoke test that we don't get hash collisions\n\n index = MultiIndex.from_product(\n [np.arange(1000), np.arange(1000)], names=[\"one\", \"two\"]\n )\n result = index.get_indexer(index.values)\n tm.assert_numpy_array_equal(result, np.arange(len(index), dtype=\"intp\"))\n\n for i in [0, 1, len(index) - 2, len(index) - 1]:\n result = index.get_loc(index[i])\n assert result == i\n\n\ndef test_dims():\n pass\n\n\ndef test_take_invalid_kwargs():\n vals = [[\"A\", \"B\"], [pd.Timestamp(\"2011-01-01\"), pd.Timestamp(\"2011-01-02\")]]\n idx = MultiIndex.from_product(vals, names=[\"str\", \"dt\"])\n indices = [1, 2]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n with pytest.raises(TypeError, match=msg):\n idx.take(indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, mode=\"clip\")\n\n\ndef test_isna_behavior(idx):\n # should not segfault GH5123\n # NOTE: if MI representation changes, may make sense to allow\n # isna(MI)\n msg = \"isna is not defined for MultiIndex\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.isna(idx)\n\n\ndef test_large_multiindex_error():\n # GH12527\n df_below_1000000 = pd.DataFrame(\n 1, index=MultiIndex.from_product([[1, 2], range(499999)]), columns=[\"dest\"]\n )\n with pytest.raises(KeyError, match=r\"^\\(-1, 0\\)$\"):\n df_below_1000000.loc[(-1, 0), \"dest\"]\n with pytest.raises(KeyError, match=r\"^\\(3, 0\\)$\"):\n df_below_1000000.loc[(3, 0), \"dest\"]\n df_above_1000000 = pd.DataFrame(\n 1, index=MultiIndex.from_product([[1, 2], range(500001)]), columns=[\"dest\"]\n )\n with pytest.raises(KeyError, match=r\"^\\(-1, 0\\)$\"):\n df_above_1000000.loc[(-1, 0), \"dest\"]\n with pytest.raises(KeyError, match=r\"^\\(3, 0\\)$\"):\n df_above_1000000.loc[(3, 0), \"dest\"]\n\n\ndef test_million_record_attribute_error():\n # GH 18165\n r = list(range(1000000))\n df = pd.DataFrame(\n {\"a\": r, \"b\": r}, index=MultiIndex.from_tuples([(x, x) for x in r])\n )\n\n msg = \"'Series' object has no attribute 'foo'\"\n with pytest.raises(AttributeError, match=msg):\n df[\"a\"].foo()\n\n\ndef test_can_hold_identifiers(idx):\n key = idx[0]\n assert idx._can_hold_identifiers_and_holds_name(key) is True\n\n\ndef test_metadata_immutable(idx):\n levels, codes = idx.levels, idx.codes\n # shouldn't be able to set at either the top level or base level\n mutable_regex = re.compile(\"does not support mutable operations\")\n with pytest.raises(TypeError, match=mutable_regex):\n levels[0] = levels[0]\n with pytest.raises(TypeError, match=mutable_regex):\n levels[0][0] = levels[0][0]\n # ditto for labels\n with pytest.raises(TypeError, match=mutable_regex):\n codes[0] = codes[0]\n with pytest.raises(ValueError, match=\"assignment destination is read-only\"):\n codes[0][0] = codes[0][0]\n # and for names\n names = idx.names\n with pytest.raises(TypeError, match=mutable_regex):\n names[0] = names[0]\n\n\ndef test_level_setting_resets_attributes():\n ind = MultiIndex.from_arrays([[\"A\", \"A\", \"B\", \"B\", \"B\"], [1, 2, 1, 2, 3]])\n assert ind.is_monotonic\n with tm.assert_produces_warning(FutureWarning):\n ind.set_levels([[\"A\", \"B\"], [1, 3, 2]], inplace=True)\n # if this fails, probably didn't reset the cache correctly.\n assert not ind.is_monotonic\n\n\ndef test_rangeindex_fallback_coercion_bug():\n # GH 12893\n foo = pd.DataFrame(np.arange(100).reshape((10, 10)))\n bar = pd.DataFrame(np.arange(100).reshape((10, 10)))\n df = pd.concat({\"foo\": foo.stack(), \"bar\": bar.stack()}, axis=1)\n df.index.names = [\"fizz\", \"buzz\"]\n\n str(df)\n expected = pd.DataFrame(\n {\"bar\": np.arange(100), \"foo\": np.arange(100)},\n index=MultiIndex.from_product([range(10), range(10)], names=[\"fizz\", \"buzz\"]),\n )\n tm.assert_frame_equal(df, expected, check_like=True)\n\n result = df.index.get_level_values(\"fizz\")\n expected = pd.Int64Index(np.arange(10), name=\"fizz\").repeat(10)\n tm.assert_index_equal(result, expected)\n\n result = df.index.get_level_values(\"buzz\")\n expected = pd.Int64Index(np.tile(np.arange(10), 10), name=\"buzz\")\n tm.assert_index_equal(result, expected)\n\n\ndef test_memory_usage(idx):\n result = idx.memory_usage()\n if len(idx):\n idx.get_loc(idx[0])\n result2 = idx.memory_usage()\n result3 = idx.memory_usage(deep=True)\n\n # RangeIndex, IntervalIndex\n # don't have engines\n if not isinstance(idx, (RangeIndex, IntervalIndex)):\n assert result2 > result\n\n if idx.inferred_type == \"object\":\n assert result3 > result2\n\n else:\n\n # we report 0 for no-length\n assert result == 0\n\n\ndef test_nlevels(idx):\n assert idx.nlevels == 2\n",
"import numpy as np\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n IntervalIndex,\n NaT,\n Period,\n Series,\n Timestamp,\n)\nimport pandas._testing as tm\n\n\nclass TestDropna:\n def test_dropna_empty(self):\n ser = Series([], dtype=object)\n\n assert len(ser.dropna()) == 0\n return_value = ser.dropna(inplace=True)\n assert return_value is None\n assert len(ser) == 0\n\n # invalid axis\n msg = \"No axis named 1 for object type Series\"\n with pytest.raises(ValueError, match=msg):\n ser.dropna(axis=1)\n\n def test_dropna_preserve_name(self, datetime_series):\n datetime_series[:5] = np.nan\n result = datetime_series.dropna()\n assert result.name == datetime_series.name\n name = datetime_series.name\n ts = datetime_series.copy()\n return_value = ts.dropna(inplace=True)\n assert return_value is None\n assert ts.name == name\n\n def test_dropna_no_nan(self):\n for ser in [\n Series([1, 2, 3], name=\"x\"),\n Series([False, True, False], name=\"x\"),\n ]:\n\n result = ser.dropna()\n tm.assert_series_equal(result, ser)\n assert result is not ser\n\n s2 = ser.copy()\n return_value = s2.dropna(inplace=True)\n assert return_value is None\n tm.assert_series_equal(s2, ser)\n\n def test_dropna_intervals(self):\n ser = Series(\n [np.nan, 1, 2, 3],\n IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]),\n )\n\n result = ser.dropna()\n expected = ser.iloc[1:]\n tm.assert_series_equal(result, expected)\n\n def test_dropna_period_dtype(self):\n # GH#13737\n ser = Series([Period(\"2011-01\", freq=\"M\"), Period(\"NaT\", freq=\"M\")])\n result = ser.dropna()\n expected = Series([Period(\"2011-01\", freq=\"M\")])\n\n tm.assert_series_equal(result, expected)\n\n def test_datetime64_tz_dropna(self):\n # DatetimeBlock\n ser = Series(\n [\n Timestamp(\"2011-01-01 10:00\"),\n NaT,\n Timestamp(\"2011-01-03 10:00\"),\n NaT,\n ]\n )\n result = ser.dropna()\n expected = Series(\n [Timestamp(\"2011-01-01 10:00\"), Timestamp(\"2011-01-03 10:00\")], index=[0, 2]\n )\n tm.assert_series_equal(result, expected)\n\n # DatetimeBlockTZ\n idx = DatetimeIndex(\n [\"2011-01-01 10:00\", NaT, \"2011-01-03 10:00\", NaT], tz=\"Asia/Tokyo\"\n )\n ser = Series(idx)\n assert ser.dtype == \"datetime64[ns, Asia/Tokyo]\"\n result = ser.dropna()\n expected = Series(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n Timestamp(\"2011-01-03 10:00\", tz=\"Asia/Tokyo\"),\n ],\n index=[0, 2],\n )\n assert result.dtype == \"datetime64[ns, Asia/Tokyo]\"\n tm.assert_series_equal(result, expected)\n"
] | [
[
"pandas._testing.assert_almost_equal",
"pandas.Series",
"numpy.arange",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.random.rand",
"pandas.MultiIndex.from_product",
"pandas.date_range"
],
[
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.ones",
"numpy.random.rand",
"pandas._testing.assert_series_equal",
"numpy.zeros"
],
[
"pandas._testing.assert_produces_warning",
"pandas.to_datetime",
"pandas.CategoricalDtype",
"pandas.Series",
"pandas.period_range",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.StringDtype",
"pandas.array",
"numpy.dtype",
"pandas.DataFrame",
"pandas.BooleanDtype",
"pandas.PeriodDtype",
"pandas.Interval",
"pandas.DatetimeTZDtype",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal",
"pandas.IntervalDtype"
],
[
"pandas.core.arrays.sparse.SparseDtype",
"pandas._testing.assert_numpy_array_equal",
"pandas._testing.assert_sp_array_equal",
"pandas.Series",
"numpy.asarray",
"pandas.DataFrame",
"pandas._testing.assert_frame_equal",
"pandas.core.arrays.sparse.SparseArray",
"numpy.errstate",
"pandas._testing.assert_series_equal",
"numpy.array",
"numpy.isinf"
],
[
"pandas.Series",
"pandas._libs.tslibs.Timestamp",
"numpy.asarray",
"pandas.DataFrame",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"pandas._testing.makeStringSeries",
"numpy.random.randint",
"pandas.tests.io.pytables.common._maybe_remove",
"numpy.arange",
"pandas._testing.makeTimeSeries",
"pandas.Index",
"pandas._testing.makeDataFrame",
"pandas._testing.assert_series_equal",
"pandas.read_hdf",
"pandas.bdate_range",
"pandas.MultiIndex",
"pandas.tests.io.pytables.common.ensure_clean_store",
"pandas._testing.rands_array",
"numpy.array",
"pandas._testing.ensure_clean",
"pandas._testing.makeTimeDataFrame",
"pandas.tests.io.pytables.common.ensure_clean_path"
],
[
"pandas._testing.assert_produces_warning",
"pandas._testing.assert_numpy_array_equal",
"pandas.PeriodIndex",
"pandas.MultiIndex",
"pandas.Timestamp",
"numpy.arange",
"pandas.MultiIndex.from_tuples",
"pandas.DatetimeIndex",
"pandas.MultiIndex.from_arrays",
"pandas.core.dtypes.cast.construct_1d_object_array_from_listlike",
"pandas._testing.assert_frame_equal",
"pandas.Int64Index",
"pandas.MultiIndex.from_product",
"pandas.isna",
"numpy.array",
"pandas._testing.assert_index_equal"
],
[
"pandas.IntervalIndex.from_arrays",
"pandas.Series",
"pandas.DatetimeIndex",
"pandas.Period",
"pandas._testing.assert_series_equal",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Napkin-DL/PyTorch-GAN | [
"4668fb434a74a4e4771631944e4abfb0ec1c8795",
"4668fb434a74a4e4771631944e4abfb0ec1c8795",
"4668fb434a74a4e4771631944e4abfb0ec1c8795",
"4668fb434a74a4e4771631944e4abfb0ec1c8795"
] | [
".history/implementations/pixelda/pixelda_20190101201505.py",
".history/implementations/pixelda/pixelda_20190101224024.py",
".history/implementations/pixelda/pixelda_try_20190106200949.py",
".history/implementations/pixelda/pixelda_try_20190106200556.py"
] | [
"import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nfrom mnistm import MNISTM\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nos.makedirs('images', exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')\nparser.add_argument('--batch_size', type=int, default=64, help='size of the batches')\nparser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')\nparser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')\nparser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')\nparser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')\nparser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')\nparser.add_argument('--channels', type=int, default=3, help='number of image channels')\nparser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')\nparser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')\nopt = parser.parse_args()\nprint(opt)\n\n# Calculate output of image discriminator (PatchGAN)\npatch = int(opt.img_size / 2**4)\npatch = (1, patch, patch)\n\ncuda = True if torch.cuda.is_available() else False\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n print(\"classname : {}\".format(classname))\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nclass ResidualBlock_back(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(ResidualBlock, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_features, in_features, 3, 1, 1),\n nn.BatchNorm2d(in_features),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_features, in_features, 3, 1, 1),\n nn.BatchNorm2d(in_features)\n )\n\n def forward(self, x):\n return x + self.block(x)\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, in_features=64, out_features=64):\n super(ResidualBlock, self).__init__()\n \n # calculate same padding:\n # (w - k + 2*p)/s + 1 = o\n # => p = (s(o-1) - w + k)/2\n (2(128-1)-64 +3)/2\n ### ENCODER\n self.encode_block = nn.Sequential(\n nn.Conv2d(in_channels=1*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),\n nn.BatchNorm2d(2*in_features),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(in_channels=2*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=2),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True)\n )\n print(\"self.encode_block : {}\".format(self.encode_block))\n\n self.decode_block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=4*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2), padding=2),\n nn.BatchNorm2d(2*in_features),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=2*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),\n nn.BatchNorm2d(1*in_features),\n nn.LeakyReLU(inplace=True)\n )\n\n print(\"self.decode_block : {}\".format(self.decode_block))\n\n def forward(self, x):\n encode_x = self.encode_block(x)\n decode_x = self.decode_block(encode_x)\n # decode_x = decode_x[:, :, :-1, :-1]\n # decode_x = F.sigmoid(decode_x)\n return x + decode_x \n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n\n self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n # resblocks.append(ResidualBlock())\n resblocks.append(ResidualBlock())\n self.resblocks = nn.Sequential(*resblocks)\n\n self.l2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())\n\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)\n out = self.l1(gen_input)\n out = self.resblocks(out)\n img_ = self.l2(out)\n\n return img_\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1)\n )\n\n def forward(self, img):\n validity = self.model(img)\n\n return validity\n\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Classifier block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512)\n )\n\n input_size = opt.img_size // 2**4\n self.output_layer = nn.Sequential(\n nn.Linear(512*input_size**2, opt.n_classes),\n nn.Softmax()\n )\n\n def forward(self, img):\n feature_repr = self.model(img)\n feature_repr = feature_repr.view(feature_repr.size(0), -1)\n label = self.output_layer(feature_repr)\n return label\n\n# Loss function\nadversarial_loss = torch.nn.MSELoss()\ntask_loss = torch.nn.CrossEntropyLoss()\n\n# Loss weights\nlambda_adv = 1\nlambda_task = 0.1\n\n# Initialize generator and discriminator\ngenerator = Generator()\ndiscriminator = Discriminator()\nclassifier = Classifier()\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n classifier.cuda()\n adversarial_loss.cuda()\n task_loss.cuda()\n\n# Initialize weights\ngenerator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\nclassifier.apply(weights_init_normal)\n\n# Configure data loader\nos.makedirs('../../data/mnist', exist_ok=True)\ndataloader_A = torch.utils.data.DataLoader(\n datasets.MNIST('../../data/mnist', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\nos.makedirs('../../data/mnistm', exist_ok=True)\ndataloader_B = torch.utils.data.DataLoader(\n MNISTM('../../data/mnistm', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\n# Optimizers\noptimizer_G = torch.optim.Adam( itertools.chain(generator.parameters(), classifier.parameters()),\n lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n# ----------\n# Training\n# ----------\n\n# Keeps 100 accuracy measurements\ntask_performance = []\ntarget_performance = []\n\nfor epoch in range(opt.n_epochs):\n for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):\n\n batch_size = imgs_A.size(0)\n\n # Adversarial ground truths\n valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)\n\n # Configure input\n imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))\n labels_A = Variable(labels_A.type(LongTensor))\n imgs_B = Variable(imgs_B.type(FloatTensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise\n z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))\n\n # Generate a batch of images\n fake_B = generator(imgs_A, z)\n\n # Perform task on translated source image\n label_pred = classifier(fake_B)\n\n # Calculate the task loss\n task_loss_ = (task_loss(label_pred, labels_A) + \\\n task_loss(classifier(imgs_A), labels_A)) / 2\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = lambda_adv * adversarial_loss(discriminator(fake_B), valid) + \\\n lambda_task * task_loss_\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # Measure discriminator's ability to classify real from generated samples\n real_loss = adversarial_loss(discriminator(imgs_B), valid)\n fake_loss = adversarial_loss(discriminator(fake_B.detach()), fake)\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n\n # ---------------------------------------\n # Evaluate Performance on target domain\n # ---------------------------------------\n\n # Evaluate performance on translated Domain A\n acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())\n task_performance.append(acc)\n if len(task_performance) > 100:\n task_performance.pop(0)\n\n # Evaluate performance on Domain B\n pred_B = classifier(imgs_B)\n target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())\n target_performance.append(target_acc)\n if len(target_performance) > 100:\n target_performance.pop(0)\n\n print (\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]\" %\n (epoch, opt.n_epochs,\n i, len(dataloader_A),\n d_loss.item(), g_loss.item(),\n 100*acc, 100*np.mean(task_performance),\n 100*target_acc, 100*np.mean(target_performance)))\n\n batches_done = len(dataloader_A) * epoch + i\n if batches_done % opt.sample_interval == 0:\n sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)\n save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)\n",
"import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nfrom mnistm import MNISTM\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nos.makedirs('images', exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')\nparser.add_argument('--batch_size', type=int, default=64, help='size of the batches')\nparser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')\nparser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')\nparser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')\nparser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')\nparser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')\nparser.add_argument('--channels', type=int, default=3, help='number of image channels')\nparser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')\nparser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')\nopt = parser.parse_args()\nprint(opt)\n\n# Calculate output of image discriminator (PatchGAN)\npatch = int(opt.img_size / 2**4)\npatch = (1, patch, patch)\n\ncuda = True if torch.cuda.is_available() else False\n\nprint(\"cuda : {}\".format(cuda))\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n print(\"classname : {}\".format(classname))\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nclass ResidualBlock_back(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(ResidualBlock, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_features, in_features, 3, 1, 1),\n nn.BatchNorm2d(in_features),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_features, in_features, 3, 1, 1),\n nn.BatchNorm2d(in_features)\n )\n\n def forward(self, x):\n return x + self.block(x)\n\nclass sencode_ResidualBlock(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(sencode_ResidualBlock, self).__init__()\n \n ### ENCODER\n self.sencode_block = nn.Sequential(\n nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),\n nn.BatchNorm2d(8*in_features),\n nn.LeakyReLU(inplace=True)\n )\n \n \n def forward(self, x):\n encode_x = self.sencode_block(x)\n return x, encode_x \n\nclass sdecode_ResidualBlock(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(sdecode_ResidualBlock, self).__init__()\n\n self.sdecode_block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),\n nn.BatchNorm2d(1*in_features),\n nn.LeakyReLU(inplace=True),\n \n )\n\n def forward(self, encode_x):\n decode_x = self.sdecode_block(encode_x)\n decode_x = decode_x[:, :, :-1, :-1]\n decode_x = F.sigmoid(decode_x)\n return decode_x \n\nclass tencode_ResidualBlock(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(tencode_ResidualBlock, self).__init__()\n \n ### ENCODER\n self.tencode_block = nn.Sequential(\n nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),\n nn.BatchNorm2d(8*in_features),\n nn.LeakyReLU(inplace=True)\n )\n \n \n def forward(self, x):\n encode_x = self.tencode_block(x)\n return x, encode_x \n\nclass tdecode_ResidualBlock(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(tdecode_ResidualBlock, self).__init__()\n\n self.tdecode_block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),\n nn.BatchNorm2d(1*in_features),\n nn.LeakyReLU(inplace=True),\n \n )\n\n def forward(self, encode_x):\n decode_x = self.tdecode_block(encode_x)\n decode_x = decode_x[:, :, :-1, :-1]\n decode_x = F.sigmoid(decode_x)\n return decode_x \n\n\nclass target_encode_Generator(nn.Module):\n def __init__(self):\n super(target_encode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.tfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n self.tl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n resblocks.append(tencode_ResidualBlock())\n self.tencode_resblocks = nn.Sequential(*resblocks)\n\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.tfc(z).view(*img.shape)), 1)\n out = self.tl1(gen_input)\n x, encode_out = self.tencode_resblocks(out)\n\n\n return x, encode_out\n\n\nclass source_encode_Generator(nn.Module):\n def __init__(self):\n super(source_encode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.sfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n self.sl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n resblocks.append(sencode_ResidualBlock())\n self.sencode_resblocks = nn.Sequential(*resblocks)\n\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.sfc(z).view(*img.shape)), 1)\n out = self.sl1(gen_input)\n x, encode_out = self.sencode_resblocks(out)\n\n\n return x, encode_out\n\nclass target_decode_Generator(nn.Module):\n def __init__(self):\n super(target_decode_Generator, self).__init__()\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n resblocks.append(tdecode_ResidualBlock())\n self.target_decode_resblocks = nn.Sequential(*resblocks)\n\n self.tl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())\n\n\n def forward(self, img, encode_out):\n out = img + self.target_decode_resblocks(encode_out)\n img_ = self.tl2(out)\n\n return img_\n\nclass source_decode_Generator(nn.Module):\n def __init__(self):\n super(source_decode_Generator, self).__init__()\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n resblocks.append(sdecode_ResidualBlock())\n self.source_decode_resblocks = nn.Sequential(*resblocks)\n\n self.sl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())\n\n\n def forward(self, img, encode_out):\n out = img + self.source_decode_resblocks(encode_out)\n img_ = self.sl2(out)\n\n return img_\n\n\nclass encode_Discriminator(nn.Module):\n def __init__(self):\n super(encode_Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(256, 512, normalization=False),\n *block(512, 1024),\n nn.Conv2d(1024, 1, 3, 1, 1)\n )\n\n def forward(self, encode_x):\n validity = self.model(encode_x)\n\n return validity\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1)\n )\n\n def forward(self, img):\n validity = self.model(img)\n\n return validity\n\nclass encode_Classifier(nn.Module):\n def __init__(self):\n super(encode_Classifier, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Classifier block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(256, 512, normalization=False),\n *block(512, 1024)\n *block(1024, 2048)\n )\n\n input_size = opt.img_size // 2**4\n self.output_layer = nn.Sequential(\n nn.Linear(2048*input_size**2, opt.n_classes),\n nn.Softmax()\n )\n\n def forward(self, img):\n feature_repr = self.model(img)\n feature_repr = feature_repr.view(feature_repr.size(0), -1)\n label = self.output_layer(feature_repr)\n return label\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Classifier block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512)\n )\n\n input_size = opt.img_size // 2**4\n self.output_layer = nn.Sequential(\n nn.Linear(512*input_size**2, opt.n_classes),\n nn.Softmax()\n )\n\n def forward(self, img):\n feature_repr = self.model(img)\n feature_repr = feature_repr.view(feature_repr.size(0), -1)\n label = self.output_layer(feature_repr)\n return label\n\n# Loss function\nadversarial_loss = torch.nn.MSELoss()\nencode_adversarial_loss = torch.nn.MSELoss()\ntask_loss = torch.nn.CrossEntropyLoss()\n\n# Loss weights\nlambda_adv = 1\nlambda_task = 0.1\n\n# Initialize generator and discriminator\ntarget_encode_generator = target_encode_Generator()\ntarget_decode_generator = target_decode_Generator()\nsource_encode_generator = source_encode_Generator()\nsource_decode_generator = source_decode_Generator()\nencode_discriminator = encode_Discriminator()\ndiscriminator = Discriminator()\nclassifier = Classifier()\n\nif cuda:\n target_encode_generator.cuda()\n target_decode_generator.cuda()\n source_encode_generator.cuda()\n source_decode_generator.cuda()\n encode_discriminator.cuda()\n discriminator.cuda()\n classifier.cuda()\n adversarial_loss.cuda()\n encode_adversarial_loss.cuda()\n task_loss.cuda()\n\n# Initialize weights\ntarget_encode_generator.apply(weights_init_normal)\ntarget_decode_generator.apply(weights_init_normal)\nsource_encode_generator.apply(weights_init_normal)\nsource_decode_generator.apply(weights_init_normal)\nencode_discriminator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\nclassifier.apply(weights_init_normal)\n\n# Configure data loader\nos.makedirs('../../data/mnist', exist_ok=True)\ndataloader_A = torch.utils.data.DataLoader(\n datasets.MNIST('../../data/mnist', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\nos.makedirs('../../data/mnistm', exist_ok=True)\ndataloader_B = torch.utils.data.DataLoader(\n MNISTM('../../data/mnistm', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\n# Optimizers\n\noptimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(), \n source_encode_generator.parameters(), target_decode_generator.parameters(), \n source_decode_generator.parameters(),\n classifier.parameters()),\n lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n# ----------\n# Training\n# ----------\n\n# Keeps 100 accuracy measurements\ntask_performance = []\ntarget_performance = []\n\nfor epoch in range(opt.n_epochs):\n for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):\n\n batch_size = imgs_A.size(0)\n\n # Adversarial ground truths\n valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)\n\n # Configure input\n imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))\n labels_A = Variable(labels_A.type(LongTensor))\n imgs_B = Variable(imgs_B.type(FloatTensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise\n z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))\n\n # Generate a batch of images\n imgs_A_x, encode_fake_B = source_encode_generator(imgs_A, z)\n decode_fake_B = source_decode_generator(imgs_A_x, encode_fake_B)\n\n # Perform task on translated source image\n label_pred = classifier(decode_fake_B)\n\n # Calculate the task loss\n task_loss_ = (task_loss(label_pred, labels_A) + \\\n task_loss(classifier(imgs_A), labels_A)) / 2\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \\\n 0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), valid) + \\\n lambda_task * task_loss_\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n imgs_B_x, encode_real_B = target_encode_generator(imgs_B, z)\n decode_real_B = target_decode_generator(imgs_B_x, encode_real_B)\n # Measure discriminator's ability to classify real from generated samples\n encode_real_loss = adversarial_loss(encode_discriminator(encode_real_B), valid)\n encode_fake_loss = adversarial_loss(encode_discriminator(encode_fake_B.detach()), fake)\n decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)\n decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)\n encode_d_loss = (encode_real_loss + encode_fake_loss) / 2\n decode_d_loss = (decode_real_loss + decode_fake_loss) / 2\n d_loss = encode_d_loss + decode_d_loss\n\n d_loss.backward()\n optimizer_D.step()\n\n # ---------------------------------------\n # Evaluate Performance on target domain\n # ---------------------------------------\n\n # Evaluate performance on translated Domain A\n acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())\n task_performance.append(acc)\n if len(task_performance) > 100:\n task_performance.pop(0)\n\n # Evaluate performance on Domain B\n pred_B = classifier(imgs_B)\n target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())\n target_performance.append(target_acc)\n if len(target_performance) > 100:\n target_performance.pop(0)\n\n print (\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]\" %\n (epoch, opt.n_epochs,\n i, len(dataloader_A),\n d_loss.item(), g_loss.item(),\n 100*acc, 100*np.mean(task_performance),\n 100*target_acc, 100*np.mean(target_performance)))\n\n batches_done = len(dataloader_A) * epoch + i\n if batches_done % opt.sample_interval == 0:\n sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)\n save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)\n",
"import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nfrom mnistm import MNISTM\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nos.makedirs('images', exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')\nparser.add_argument('--batch_size', type=int, default=64, help='size of the batches')\nparser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')\nparser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')\nparser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')\nparser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')\nparser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')\nparser.add_argument('--channels', type=int, default=3, help='number of image channels')\nparser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')\nparser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')\nopt = parser.parse_args()\nprint(opt)\n\n# Calculate output of image discriminator (PatchGAN)\npatch = int(opt.img_size / 2**4)\npatch = (1, patch, patch)\n\ncuda = True if torch.cuda.is_available() else False\n\nprint(\"cuda : {}\".format(cuda))\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n print(\"classname : {}\".format(classname))\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\nclass encode_ResidualBlock1(nn.Module):\n def __init__(self, in_features=32, out_features=64, kernel_size=3, stride=2, padding=1):\n super(encode_ResidualBlock1, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, x):\n encode_x = self.block(x)\n return x, encode_x\n\n\nclass encode_ResidualBlock2(nn.Module):\n def __init__(self, in_features=64, out_features=128, kernel_size=3, stride=2, padding=1):\n super(encode_ResidualBlock2, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, x):\n encode_x = self.block(x)\n return encode_x\n\n\nclass encode_ResidualBlock3(nn.Module):\n def __init__(self, in_features=128, out_features=256, kernel_size=3, stride=2, padding=1):\n super(encode_ResidualBlock3, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, x):\n encode_x = self.block(x)\n return encode_x\n\n\nclass decode_ResidualBlock1(nn.Module):\n def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):\n super(decode_ResidualBlock1, self).__init__()\n\n self.block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, encode_x):\n decode_x = self.block(encode_x)\n decode_x = decode_x[:,:,:-1,:-1]\n return decode_x\n\n\nclass decode_ResidualBlock2(nn.Module):\n def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):\n super(decode_ResidualBlock2, self).__init__()\n\n self.block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, encode_x):\n decode_x = self.block(encode_x)\n decode_x = decode_x[:,:,:-1,:-1]\n return decode_x\n\n\nclass decode_ResidualBlock3(nn.Module):\n def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):\n super(decode_ResidualBlock3, self).__init__()\n\n self.block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, encode_x):\n decode_x = self.block(encode_x)\n decode_x = decode_x[:,:,:-1,:-1]\n return decode_x\n\n\nclass source_encode_Generator(nn.Module):\n def __init__(self):\n super(source_encode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))\n\n self.encode_resblocks1 = encode_ResidualBlock1()\n self.encode_resblocks2 = encode_ResidualBlock2()\n self.encode_resblocks3 = encode_ResidualBlock3()\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)\n encode_x = self.l1(gen_input)\n x, encode_out1 = self.encode_resblocks1(encode_x)\n encode_out2 = self.encode_resblocks2(encode_out1)\n encode_out3 = self.encode_resblocks3(encode_out2)\n\n return x, encode_out1, encode_out2, encode_out3\n\n\nclass target_encode_Generator(nn.Module):\n def __init__(self):\n super(target_encode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))\n\n self.encode_resblocks1 = encode_ResidualBlock1()\n self.encode_resblocks2 = encode_ResidualBlock2()\n self.encode_resblocks3 = encode_ResidualBlock3()\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)\n encode_x = self.l1(gen_input)\n x, encode_out1 = self.encode_resblocks1(encode_x)\n encode_out2 = self.encode_resblocks2(encode_out1)\n encode_out3 = self.encode_resblocks3(encode_out2)\n\n return x, encode_out1, encode_out2, encode_out3\n\n\nclass decode_Generator(nn.Module):\n def __init__(self):\n super(decode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.decode_resblocks1 = decode_ResidualBlock1()\n self.decode_resblocks2 = decode_ResidualBlock2()\n self.decode_resblocks3 = decode_ResidualBlock3()\n\n self.l2 = nn.Sequential(nn.Conv2d(32, opt.channels, 3, 1, 1), nn.Tanh())\n\n def forward(self, x, encode_out1, encode_out2, encode_out3):\n print(x.size(),encode_out1.size(), encode_out2.size(), encode_out3.size() )\n\n decode_out1 = self.decode_resblocks1(encode_out3)\n print(decode_out1.size())\n decode_out2 = self.decode_resblocks2(torch.cat([decode_out1,encode_out2], dim=1))\n print(decode_out2.size())\n decode_out3 = self.decode_resblocks3(torch.cat([decode_out2,encode_out1], dim=1))\n print(decode_out3.size())\n decode_x = F.sigmoid(decode_out3)\n decode_x = decode_x[:, :, :-1, :-1]\n out = x + decode_x\n img_ = self.l2(out)\n\n return img_\n\n\nclass encode_Discriminator(nn.Module):\n def __init__(self):\n super(encode_Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(512, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1)\n )\n\n def forward(self, encode_x):\n validity = self.model(encode_x)\n\n return validity\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1)\n )\n\n def forward(self, img):\n validity = self.model(img)\n\n return validity\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Classifier block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512)\n )\n\n input_size = opt.img_size // 2**4\n self.output_layer = nn.Sequential(\n nn.Linear(512*input_size**2, opt.n_classes),\n nn.Softmax()\n )\n\n def forward(self, img):\n feature_repr = self.model(img)\n feature_repr = feature_repr.view(feature_repr.size(0), -1)\n label = self.output_layer(feature_repr)\n return label\n\n# Loss function\nadversarial_loss = torch.nn.MSELoss()\nencode_adversarial_loss = torch.nn.MSELoss()\ntask_loss = torch.nn.CrossEntropyLoss()\n\n# Loss weights\nlambda_adv = 1\nlambda_task = 0.1\n\n# Initialize generator and discriminator\ntarget_encode_generator = target_encode_Generator()\nsource_encode_generator = source_encode_Generator()\ndecode_generator = decode_Generator()\n\nencode_discriminator = encode_Discriminator()\ndiscriminator = Discriminator()\nclassifier = Classifier()\n\nif cuda:\n target_encode_generator.cuda()\n source_encode_generator.cuda()\n decode_generator.cuda()\n\n encode_discriminator.cuda()\n discriminator.cuda()\n classifier.cuda()\n adversarial_loss.cuda()\n encode_adversarial_loss.cuda()\n task_loss.cuda()\n\n# Initialize weights\ntarget_encode_generator.apply(weights_init_normal)\nsource_encode_generator.apply(weights_init_normal)\ndecode_generator.apply(weights_init_normal)\nencode_discriminator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\nclassifier.apply(weights_init_normal)\n\n# Configure data loader\nos.makedirs('../../data/mnist', exist_ok=True)\ndataloader_A = torch.utils.data.DataLoader(\n datasets.MNIST('../../data/mnist', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\nos.makedirs('../../data/mnistm', exist_ok=True)\ndataloader_B = torch.utils.data.DataLoader(\n MNISTM('../../data/mnistm', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\n# Optimizers\n\noptimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(), \n source_encode_generator.parameters(),\n decode_generator.parameters(),\n classifier.parameters()),\n lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n# ----------\n# Training\n# ----------\n\n# Keeps 100 accuracy measurements\ntask_performance = []\ntarget_performance = []\n\nfor epoch in range(opt.n_epochs):\n for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):\n\n batch_size = imgs_A.size(0)\n\n # Adversarial ground truths\n valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)\n encode_valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)\n encode_fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)\n\n # Configure input\n imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))\n labels_A = Variable(labels_A.type(LongTensor))\n imgs_B = Variable(imgs_B.type(FloatTensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise\n z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))\n\n # Generate a batch of images\n imgs_A_x, sencode_1, sencode_2, encode_fake_B = source_encode_generator(imgs_A, z)\n decode_fake_B = decode_generator(imgs_A_x, sencode_1, sencode_2, encode_fake_B)\n\n # Perform task on translated source image\n label_pred = classifier(decode_fake_B)\n\n # Calculate the task loss\n task_loss_ = (task_loss(label_pred, labels_A) + \\\n task_loss(classifier(imgs_A), labels_A)) / 2\n \n # Loss measures generator's ability to fool the discriminator\n g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \\\n 0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), encode_valid) + \\\n lambda_task * task_loss_\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n optimizer_D.zero_grad()\n\n imgs_B_x, tencode_1, tencode_2, encode_real_B = target_encode_generator(imgs_B, z)\n decode_real_B = decode_generator(imgs_B_x, tencode_1, tencode_2, encode_real_B)\n\n # Measure discriminator's ability to classify real from generated samples\n encode_real_loss = encode_adversarial_loss(encode_discriminator(encode_real_B), encode_valid)\n encode_fake_loss = encode_adversarial_loss(encode_discriminator(encode_fake_B.detach()), encode_fake)\n decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)\n decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)\n encode_d_loss = (encode_real_loss + encode_fake_loss) / 2\n decode_d_loss = (decode_real_loss + decode_fake_loss) / 2\n d_loss = encode_d_loss + decode_d_loss\n\n d_loss.backward()\n optimizer_D.step()\n\n # ---------------------------------------\n # Evaluate Performance on target domain\n # ---------------------------------------\n\n # Evaluate performance on translated Domain A\n acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())\n task_performance.append(acc)\n if len(task_performance) > 100:\n task_performance.pop(0)\n\n # Evaluate performance on Domain B\n pred_B = classifier(imgs_B)\n target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())\n target_performance.append(target_acc)\n if len(target_performance) > 100:\n target_performance.pop(0)\n\n print (\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]\" %\n (epoch, opt.n_epochs,\n i, len(dataloader_A),\n d_loss.item(), g_loss.item(),\n 100*acc, 100*np.mean(task_performance),\n 100*target_acc, 100*np.mean(target_performance)))\n\n batches_done = len(dataloader_A) * epoch + i\n if batches_done % opt.sample_interval == 0:\n sample = torch.cat((imgs_A.data[:5], decode_fake_B.data[:5], imgs_B.data[:5]), -2)\n save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)\n",
"import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nfrom mnistm import MNISTM\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nos.makedirs('images', exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')\nparser.add_argument('--batch_size', type=int, default=64, help='size of the batches')\nparser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')\nparser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')\nparser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')\nparser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')\nparser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')\nparser.add_argument('--channels', type=int, default=3, help='number of image channels')\nparser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')\nparser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')\nopt = parser.parse_args()\nprint(opt)\n\n# Calculate output of image discriminator (PatchGAN)\npatch = int(opt.img_size / 2**4)\npatch = (1, patch, patch)\n\ncuda = True if torch.cuda.is_available() else False\n\nprint(\"cuda : {}\".format(cuda))\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n print(\"classname : {}\".format(classname))\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\nclass encode_ResidualBlock1(nn.Module):\n def __init__(self, in_features=32, out_features=64, kernel_size=3, stride=2, padding=1):\n super(encode_ResidualBlock1, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, x):\n encode_x = self.block(x)\n return x, encode_x\n\n\nclass encode_ResidualBlock2(nn.Module):\n def __init__(self, in_features=64, out_features=128, kernel_size=3, stride=2, padding=1):\n super(encode_ResidualBlock2, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, x):\n encode_x = self.block(x)\n return encode_x\n\n\nclass encode_ResidualBlock3(nn.Module):\n def __init__(self, in_features=128, out_features=256, kernel_size=3, stride=2, padding=1):\n super(encode_ResidualBlock3, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, x):\n encode_x = self.block(x)\n return encode_x\n\n\nclass decode_ResidualBlock1(nn.Module):\n def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):\n super(decode_ResidualBlock1, self).__init__()\n\n self.block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, encode_x):\n decode_x = self.block(encode_x)\n decode_x = decode_x[:,:,:-1,:-1]\n return decode_x\n\n\nclass decode_ResidualBlock2(nn.Module):\n def __init__(self, in_features=128, out_features=64, kernel_size=3, stride=2, padding=0):\n super(decode_ResidualBlock2, self).__init__()\n\n self.block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, encode_x):\n decode_x = self.block(encode_x)\n decode_x = decode_x[:,:,:-1,:-1]\n return decode_x\n\n\nclass decode_ResidualBlock3(nn.Module):\n def __init__(self, in_features=64, out_features=32, kernel_size=3, stride=2, padding=1):\n super(decode_ResidualBlock3, self).__init__()\n\n self.block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),\n nn.BatchNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, encode_x):\n decode_x = self.decode_block(encode_x)\n return decode_x\n\n\nclass source_encode_Generator(nn.Module):\n def __init__(self):\n super(source_encode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))\n\n self.encode_resblocks1 = encode_ResidualBlock1()\n self.encode_resblocks2 = encode_ResidualBlock2()\n self.encode_resblocks3 = encode_ResidualBlock3()\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)\n encode_x = self.l1(gen_input)\n x, encode_out1 = self.encode_resblocks1(encode_x)\n encode_out2 = self.encode_resblocks2(encode_out1)\n encode_out3 = self.encode_resblocks3(encode_out2)\n\n return x, encode_out1, encode_out2, encode_out3\n\n\nclass target_encode_Generator(nn.Module):\n def __init__(self):\n super(target_encode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))\n\n self.encode_resblocks1 = encode_ResidualBlock1()\n self.encode_resblocks2 = encode_ResidualBlock2()\n self.encode_resblocks3 = encode_ResidualBlock3()\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)\n encode_x = self.l1(gen_input)\n x, encode_out1 = self.encode_resblocks1(encode_x)\n encode_out2 = self.encode_resblocks2(encode_out1)\n encode_out3 = self.encode_resblocks3(encode_out2)\n\n return x, encode_out1, encode_out2, encode_out3\n\n\nclass decode_Generator(nn.Module):\n def __init__(self):\n super(decode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.decode_resblocks1 = decode_ResidualBlock1()\n self.decode_resblocks2 = decode_ResidualBlock2()\n self.decode_resblocks3 = decode_ResidualBlock3()\n\n self.l2 = nn.Sequential(nn.Conv2d(32, opt.channels, 3, 1, 1), nn.Tanh())\n\n def forward(self, x, encode_out1, encode_out2, encode_out3):\n print(x.size(),encode_out1.size(), encode_out2.size(), encode_out3.size() )\n\n decode_out1 = self.decode_resblocks1(encode_out3)\n print(decode_out1.size())\n decode_out2 = self.decode_resblocks2(torch.cat([decode_out1,encode_out2], dim=1))\n decode_out3 = self.decode_resblocks3(torch.cat([decode_out2+encode_out1], dim=1))\n decode_x = F.sigmoid(decode_out3)\n decode_x = decode_x[:, :, :-1, :-1]\n out = x + decode_x\n img_ = self.l2(out)\n\n return img_\n\n\nclass encode_Discriminator(nn.Module):\n def __init__(self):\n super(encode_Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(512, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1)\n )\n\n def forward(self, encode_x):\n validity = self.model(encode_x)\n\n return validity\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1)\n )\n\n def forward(self, img):\n validity = self.model(img)\n\n return validity\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Classifier block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512)\n )\n\n input_size = opt.img_size // 2**4\n self.output_layer = nn.Sequential(\n nn.Linear(512*input_size**2, opt.n_classes),\n nn.Softmax()\n )\n\n def forward(self, img):\n feature_repr = self.model(img)\n feature_repr = feature_repr.view(feature_repr.size(0), -1)\n label = self.output_layer(feature_repr)\n return label\n\n# Loss function\nadversarial_loss = torch.nn.MSELoss()\nencode_adversarial_loss = torch.nn.MSELoss()\ntask_loss = torch.nn.CrossEntropyLoss()\n\n# Loss weights\nlambda_adv = 1\nlambda_task = 0.1\n\n# Initialize generator and discriminator\ntarget_encode_generator = target_encode_Generator()\nsource_encode_generator = source_encode_Generator()\ndecode_generator = decode_Generator()\n\nencode_discriminator = encode_Discriminator()\ndiscriminator = Discriminator()\nclassifier = Classifier()\n\nif cuda:\n target_encode_generator.cuda()\n source_encode_generator.cuda()\n decode_generator.cuda()\n\n encode_discriminator.cuda()\n discriminator.cuda()\n classifier.cuda()\n adversarial_loss.cuda()\n encode_adversarial_loss.cuda()\n task_loss.cuda()\n\n# Initialize weights\ntarget_encode_generator.apply(weights_init_normal)\nsource_encode_generator.apply(weights_init_normal)\ndecode_generator.apply(weights_init_normal)\nencode_discriminator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\nclassifier.apply(weights_init_normal)\n\n# Configure data loader\nos.makedirs('../../data/mnist', exist_ok=True)\ndataloader_A = torch.utils.data.DataLoader(\n datasets.MNIST('../../data/mnist', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\nos.makedirs('../../data/mnistm', exist_ok=True)\ndataloader_B = torch.utils.data.DataLoader(\n MNISTM('../../data/mnistm', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\n# Optimizers\n\noptimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(), \n source_encode_generator.parameters(),\n decode_generator.parameters(),\n classifier.parameters()),\n lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n# ----------\n# Training\n# ----------\n\n# Keeps 100 accuracy measurements\ntask_performance = []\ntarget_performance = []\n\nfor epoch in range(opt.n_epochs):\n for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):\n\n batch_size = imgs_A.size(0)\n\n # Adversarial ground truths\n valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)\n encode_valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)\n encode_fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)\n\n # Configure input\n imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))\n labels_A = Variable(labels_A.type(LongTensor))\n imgs_B = Variable(imgs_B.type(FloatTensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise\n z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))\n\n # Generate a batch of images\n imgs_A_x, sencode_1, sencode_2, encode_fake_B = source_encode_generator(imgs_A, z)\n decode_fake_B = decode_generator(imgs_A_x, sencode_1, sencode_2, encode_fake_B)\n\n # Perform task on translated source image\n label_pred = classifier(decode_fake_B)\n\n # Calculate the task loss\n task_loss_ = (task_loss(label_pred, labels_A) + \\\n task_loss(classifier(imgs_A), labels_A)) / 2\n \n # Loss measures generator's ability to fool the discriminator\n g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \\\n 0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), encode_valid) + \\\n lambda_task * task_loss_\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n optimizer_D.zero_grad()\n\n imgs_B_x, tencode_1, tencode_2, encode_real_B = target_encode_generator(imgs_B, z)\n decode_real_B = decode_generator(imgs_B_x, tencode_1, tencode_2, encode_real_B)\n\n # Measure discriminator's ability to classify real from generated samples\n encode_real_loss = encode_adversarial_loss(encode_discriminator(encode_real_B), encode_valid)\n encode_fake_loss = encode_adversarial_loss(encode_discriminator(encode_fake_B.detach()), encode_fake)\n decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)\n decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)\n encode_d_loss = (encode_real_loss + encode_fake_loss) / 2\n decode_d_loss = (decode_real_loss + decode_fake_loss) / 2\n d_loss = encode_d_loss + decode_d_loss\n\n d_loss.backward()\n optimizer_D.step()\n\n # ---------------------------------------\n # Evaluate Performance on target domain\n # ---------------------------------------\n\n # Evaluate performance on translated Domain A\n acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())\n task_performance.append(acc)\n if len(task_performance) > 100:\n task_performance.pop(0)\n\n # Evaluate performance on Domain B\n pred_B = classifier(imgs_B)\n target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())\n target_performance.append(target_acc)\n if len(target_performance) > 100:\n target_performance.pop(0)\n\n print (\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]\" %\n (epoch, opt.n_epochs,\n i, len(dataloader_A),\n d_loss.item(), g_loss.item(),\n 100*acc, 100*np.mean(task_performance),\n 100*target_acc, 100*np.mean(target_performance)))\n\n batches_done = len(dataloader_A) * epoch + i\n if batches_done % opt.sample_interval == 0:\n sample = torch.cat((imgs_A.data[:5], decode_fake_B.data[:5], imgs_B.data[:5]), -2)\n save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.CrossEntropyLoss",
"torch.nn.Softmax",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.InstanceNorm2d",
"numpy.random.uniform",
"torch.nn.ReLU",
"numpy.mean",
"torch.nn.MSELoss"
],
[
"torch.nn.Softmax",
"torch.cat",
"numpy.mean",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.sigmoid",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.InstanceNorm2d",
"torch.nn.BatchNorm2d",
"torch.nn.Tanh",
"numpy.random.uniform",
"torch.nn.ReLU",
"torch.nn.MSELoss"
],
[
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.InstanceNorm2d",
"numpy.random.uniform",
"torch.nn.ReLU",
"numpy.mean",
"torch.nn.MSELoss"
],
[
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.InstanceNorm2d",
"numpy.random.uniform",
"torch.nn.ReLU",
"numpy.mean",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hrayatnia/SciPy | [
"a50dcbb6b8adffbc526eec93f5009f09943786e3"
] | [
"plotting-beginner-plotting-cookbook/pltcp.py"
] | [
"import numpy as np\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nax = plt.axes(polar = True)\ntheta = np.linspace(0, 2 * np.pi, 8, endpoint = False)\nradius = .25 + .75 * np.random.random(size = len(theta))\npoints = np.vstack((theta, radius)).transpose()\nplt.gca().add_patch(patches.Polygon(points, color = '.75'))\nplt.show()"
] | [
[
"matplotlib.pyplot.gca",
"numpy.linspace",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.show",
"matplotlib.patches.Polygon",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mitchellgordon95/lottery-ticket-hypothesis | [
"3b2abee4b1e9ba00fe8501ac86652e2604736405",
"3b2abee4b1e9ba00fe8501ac86652e2604736405",
"3b2abee4b1e9ba00fe8501ac86652e2604736405"
] | [
"lottery_ticket/foundations/trainer.py",
"lottery_ticket/mnist_fc/big_two_layer_exp.py",
"lottery_ticket/mnist_fc/one_layer_exp.py"
] | [
"# Copyright (C) 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A function that trains a network on a dataset.\"\"\"\n\nfrom lottery_ticket.foundations import paths\nfrom lottery_ticket.foundations import save_restore\nimport tensorflow as tf\n\n\ndef train(sess, dataset, model, optimizer_fn, training_len, output_dir,\n **params):\n \"\"\"Train a model on a dataset.\n\n Training continues until training_len iterations or epochs have taken place.\n\n Args:\n sess: A tensorflow session\n dataset: The dataset on which to train (a child of dataset_base.DatasetBase)\n model: The model to train (a child of model_base.ModelBase)\n optimizer_fn: A function that, when called, returns an instance of an\n optimizer object to be used to optimize the network.\n training_len: A tuple whose first value is the unit of measure\n (\"epochs\" or \"iterations\") and whose second value is the number of\n units for which the network should be trained.\n output_dir: The directory to which any output should be saved.\n **params: Other parameters.\n save_summaries is whether to save summary data.\n save_network is whether to save the network before and after training.\n test_interval is None if the test set should not be evaluated; otherwise,\n frequency (in iterations) at which the test set should be run.\n validate_interval is analogous to test_interval.\n\n Returns:\n A dictionary containing the weights before training and the weights after\n training, as well as the trained model.\n \"\"\"\n # Create initial session parameters.\n optimize = optimizer_fn().minimize(model.loss)\n sess.run(tf.global_variables_initializer())\n initial_weights = model.get_current_weights(sess)\n\n train_handle = dataset.get_train_handle(sess)\n test_handle = dataset.get_test_handle(sess)\n validate_handle = dataset.get_validate_handle(sess)\n\n # Optional operations to perform before training.\n if params.get('save_summaries', False):\n writer = tf.summary.FileWriter(paths.summaries(output_dir))\n train_file = tf.gfile.GFile(paths.log(output_dir, 'train'), 'w')\n test_file = tf.gfile.GFile(paths.log(output_dir, 'test'), 'w')\n validate_file = tf.gfile.GFile(paths.log(output_dir, 'validate'), 'w')\n\n if params.get('save_network', False):\n save_restore.save_network(paths.initial(output_dir), initial_weights)\n save_restore.save_network(paths.masks(output_dir), model.masks)\n\n # Helper functions to collect and record summaries.\n def record_summaries(iteration, records, fp):\n \"\"\"Records summaries obtained from evaluating the network.\n\n Args:\n iteration: The current training iteration as an integer.\n records: A list of records to be written.\n fp: A file to which the records should be logged in an easier-to-parse\n format than the tensorflow summary files.\n \"\"\"\n if params.get('save_summaries', False):\n log = ['iteration', str(iteration)]\n for record in records:\n # Log to tensorflow summaries for tensorboard.\n writer.add_summary(record, iteration)\n\n # Log to text file for convenience.\n summary_proto = tf.Summary()\n summary_proto.ParseFromString(record)\n value = summary_proto.value[0]\n log += [value.tag, str(value.simple_value)]\n fp.write(','.join(log) + '\\n')\n\n def collect_test_summaries(iteration):\n if (params.get('save_summaries', False) and\n 'test_interval' in params and\n iteration % params['test_interval'] == 0):\n sess.run(dataset.test_initializer)\n records = sess.run(model.test_summaries, {dataset.handle: test_handle})\n record_summaries(iteration, records, test_file)\n\n def collect_validate_summaries(iteration):\n if (params.get('save_summaries', False) and\n 'validate_interval' in params and\n iteration % params['validate_interval'] == 0):\n sess.run(dataset.validate_initializer)\n records = sess.run(model.validate_summaries,\n {dataset.handle: validate_handle})\n record_summaries(iteration, records, validate_file)\n\n # Train for the specified number of epochs. This behavior is encapsulated\n # in a function so that it is possible to break out of multiple loops\n # simultaneously.\n def training_loop():\n \"\"\"The main training loop encapsulated in a function.\"\"\"\n iteration = 0\n epoch = 0\n last_train_acc = None\n while True:\n sess.run(dataset.train_initializer)\n epoch += 1\n\n # End training if we have passed the epoch limit.\n if training_len[0] == 'epochs' and epoch > training_len[1]:\n return last_train_acc\n\n # One training epoch.\n while True:\n try:\n iteration += 1\n\n # End training if we have passed the iteration limit.\n if training_len[0] == 'iterations' and iteration > training_len[1]:\n return last_train_acc\n\n # Train.\n results = sess.run([optimize, model.accuracy] + model.train_summaries,\n {dataset.handle: train_handle})\n last_train_acc = results[1]\n records = results[2:]\n record_summaries(iteration, records, train_file)\n\n # Collect test and validation data if applicable.\n collect_test_summaries(iteration)\n collect_validate_summaries(iteration)\n\n # End of epoch handling.\n except tf.errors.OutOfRangeError:\n break\n\n # Run the training loop.\n final_train_acc = training_loop()\n\n # Clean up.\n if params.get('save_summaries', False):\n train_file.close()\n test_file.close()\n validate_file.close()\n\n # Retrieve the final weights of the model.\n final_weights = model.get_current_weights(sess)\n if params.get('save_network', False):\n save_restore.save_network(paths.final(output_dir), final_weights)\n\n return initial_weights, final_weights, final_train_acc\n",
"# Copyright (C) 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Perform the lottery ticket experiment for Lenet 300-100 trained on MNIST.\n\nThe output of each experiment will be stored in a directory called:\n{output_dir}/{pruning level}/{experiment_name} as defined in the\nfoundations.paths module.\n\nArgs:\n output_dir: Parent directory for all output files.\n mnist_location: The path to the NPZ file containing MNIST.\n training_len: How long to train on each iteration.\n iterations: How many iterative pruning steps to perform.\n experiment_name: The name of this specific experiment\n presets: The initial weights for the network, if any. Presets can come in\n one of three forms:\n * A dictionary of numpy arrays. Each dictionary key is the name of the\n corresponding tensor that is to be initialized. Each value is a numpy\n array containing the initializations.\n * The string name of a directory containing one file for each\n set of weights that is to be initialized (in the form of\n foundations.save_restore).\n * None, meaning the network should be randomly initialized.\n permute_labels: Whether to permute the labels on the dataset.\n train_order_seed: The random seed, if any, to be used to determine the\n order in which training examples are shuffled before being presented\n to the network.\n\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport fire\nimport tensorflow as tf\nfrom lottery_ticket.datasets import dataset_mnist\nfrom lottery_ticket.foundations import experiment\nfrom lottery_ticket.foundations import model_fc\nfrom lottery_ticket.foundations import paths\nfrom lottery_ticket.foundations import pruning\nfrom lottery_ticket.foundations import save_restore\nfrom lottery_ticket.foundations import trainer\nfrom lottery_ticket.foundations.experiment_base import ExperimentBase\nfrom lottery_ticket.mnist_fc import constants\n\nclass Experiment(ExperimentBase):\n def __init__(self, trial):\n self.output_dir = paths.trial(paths.experiment(constants.EXPERIMENT_PATH, 'big_two_layer'), trial)\n\n def train_once(self, iteration, presets=None, masks=None):\n tf.reset_default_graph()\n sess = tf.Session()\n dataset = dataset_mnist.DatasetMnist(\n constants.MNIST_LOCATION,\n permute_labels=False,\n train_order_seed=None)\n input_tensor, label_tensor = dataset.placeholders\n hyperparameters = {'layers': [(1000, tf.nn.relu), (500, tf.nn.relu), (10, None)]}\n model = model_fc.ModelFc(hyperparameters, input_tensor, label_tensor, presets=presets, masks=masks)\n params = {\n 'test_interval': 100,\n 'save_summaries': True,\n 'save_network': True,\n }\n return trainer.train(\n sess,\n dataset,\n model,\n functools.partial(tf.train.GradientDescentOptimizer, .1),\n ('iterations', 50000),\n output_dir=paths.run(self.output_dir, iteration),\n **params)\n\n def prune_masks(self, masks, final_weights):\n return pruning.prune_holistically(.50, masks, final_weights)\n\n def stop_pruning(self, train_acc):\n return train_acc < 0.95\n\ndef main():\n for trial in range(1, 21):\n mnist_experiment = Experiment(trial)\n experiment.run_experiment(\n mnist_experiment,\n max_prune_iterations=30,\n presets=save_restore.standardize(None))\n\nif __name__ == '__main__':\n fire.Fire(main)\n",
"# Copyright (C) 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Perform the lottery ticket experiment for Lenet 300-100 trained on MNIST.\n\nThe output of each experiment will be stored in a directory called:\n{output_dir}/{pruning level}/{experiment_name} as defined in the\nfoundations.paths module.\n\nArgs:\n output_dir: Parent directory for all output files.\n mnist_location: The path to the NPZ file containing MNIST.\n training_len: How long to train on each iteration.\n iterations: How many iterative pruning steps to perform.\n experiment_name: The name of this specific experiment\n presets: The initial weights for the network, if any. Presets can come in\n one of three forms:\n * A dictionary of numpy arrays. Each dictionary key is the name of the\n corresponding tensor that is to be initialized. Each value is a numpy\n array containing the initializations.\n * The string name of a directory containing one file for each\n set of weights that is to be initialized (in the form of\n foundations.save_restore).\n * None, meaning the network should be randomly initialized.\n permute_labels: Whether to permute the labels on the dataset.\n train_order_seed: The random seed, if any, to be used to determine the\n order in which training examples are shuffled before being presented\n to the network.\n\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport fire\nimport tensorflow as tf\nfrom lottery_ticket.datasets import dataset_mnist\nfrom lottery_ticket.foundations import experiment\nfrom lottery_ticket.foundations import model_fc\nfrom lottery_ticket.foundations import paths\nfrom lottery_ticket.foundations import pruning\nfrom lottery_ticket.foundations import save_restore\nfrom lottery_ticket.foundations import trainer\nfrom lottery_ticket.foundations.experiment_base import ExperimentBase\nfrom lottery_ticket.mnist_fc import constants\n\nclass Experiment(ExperimentBase):\n def __init__(self, trial):\n self.output_dir = paths.trial(paths.experiment(constants.EXPERIMENT_PATH, 'one_layer'), trial)\n\n def train_once(self, iteration, presets=None, masks=None):\n tf.reset_default_graph()\n sess = tf.Session()\n dataset = dataset_mnist.DatasetMnist(\n constants.MNIST_LOCATION,\n permute_labels=False,\n train_order_seed=None)\n input_tensor, label_tensor = dataset.placeholders\n hyperparameters = {'layers': [(3000, tf.nn.relu), (10, None)]}\n model = model_fc.ModelFc(hyperparameters, input_tensor, label_tensor, presets=presets, masks=masks)\n params = {\n 'test_interval': 100,\n 'save_summaries': True,\n 'save_network': True,\n }\n return trainer.train(\n sess,\n dataset,\n model,\n functools.partial(tf.train.GradientDescentOptimizer, .1),\n ('iterations', 50000),\n output_dir=paths.run(self.output_dir, iteration),\n **params)\n\n def prune_masks(self, masks, final_weights):\n return pruning.prune_holistically(.75, masks, final_weights)\n\n def stop_pruning(self, train_acc):\n return train_acc < 0.95\n\ndef main():\n for trial in range(1, 21):\n mnist_experiment = Experiment(trial)\n experiment.run_experiment(\n mnist_experiment,\n max_prune_iterations=30,\n presets=save_restore.standardize(None))\n\nif __name__ == '__main__':\n fire.Fire(main)\n"
] | [
[
"tensorflow.global_variables_initializer",
"tensorflow.Summary"
],
[
"tensorflow.reset_default_graph",
"tensorflow.Session"
],
[
"tensorflow.reset_default_graph",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
jakee417/probability-1 | [
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde"
] | [
"tensorflow_probability/python/experimental/mcmc/windowed_sampling_test.py",
"tensorflow_probability/python/distributions/student_t_process_regression_model_test.py",
"tensorflow_probability/python/experimental/mcmc/gradient_based_trajectory_length_adaptation_test.py",
"tensorflow_probability/python/distributions/exponential.py",
"tensorflow_probability/python/distributions/plackett_luce.py",
"tensorflow_probability/python/distributions/probit_bernoulli.py",
"tensorflow_probability/python/distributions/markov_chain.py",
"tensorflow_probability/python/bijectors/soft_clip.py",
"tensorflow_probability/python/mcmc/internal/util.py",
"tensorflow_probability/python/distributions/relaxed_onehot_categorical.py",
"tensorflow_probability/python/experimental/mcmc/potential_scale_reduction_reducer_test.py",
"tensorflow_probability/python/internal/backend/numpy/gen/linear_operator.py",
"tensorflow_probability/python/math/ode/util.py",
"tensorflow_probability/python/bijectors/scale_matvec_linear_operator.py",
"tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py",
"tensorflow_probability/python/internal/dtype_util.py",
"tensorflow_probability/python/experimental/joint_distribution_layers/layers_test.py",
"tensorflow_probability/python/internal/tensorshape_util.py",
"tensorflow_probability/python/distributions/pixel_cnn.py",
"tensorflow_probability/python/experimental/sequential/iterated_filter.py",
"tensorflow_probability/python/internal/vectorization_util_test.py",
"tensorflow_probability/python/bijectors/matrix_inverse_tril_test.py",
"tensorflow_probability/python/stats/sample_stats.py"
] | [
"# Copyright 2021 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the _License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for windowed sampling.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.experimental import distribute\nfrom tensorflow_probability.python.experimental.mcmc import windowed_sampling\nfrom tensorflow_probability.python.internal import callable_util\nfrom tensorflow_probability.python.internal import distribute_test_lib\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import test_util\nfrom tensorflow_probability.python.internal import unnest\n\nJAX_MODE = False\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\nRoot = tfd.JointDistributionCoroutine.Root\n\nNUM_SCHOOLS = 8 # number of schools\nTREATMENT_EFFECTS = [28., 8, -3, 7, -1, 1, 18, 12]\nTREATMENT_STDDEVS = [15., 10, 16, 11, 9, 11, 10, 18]\n\n\ndef eight_schools_coroutine():\n\n @tfd.JointDistributionCoroutine\n def model():\n avg_effect = yield Root(tfd.Normal(0., 5., name='avg_effect'))\n avg_stddev = yield Root(tfd.HalfNormal(5., name='avg_stddev'))\n school_effects_std = yield Root(\n tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'))\n yield tfd.Independent(\n tfd.Normal(loc=(avg_effect[..., tf.newaxis] +\n avg_stddev[..., tf.newaxis] * school_effects_std),\n scale=tf.constant(TREATMENT_STDDEVS)),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')\n return model\n\n\ndef eight_schools_sequential():\n model = tfd.JointDistributionSequential([\n tfd.Normal(0., 5., name='avg_effect'),\n tfd.HalfNormal(5., name='avg_stddev'),\n tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),\n # pylint: disable=g-long-lambda\n lambda school_effects_std, avg_stddev, avg_effect: tfd.Independent(\n tfd.Normal(loc=(avg_effect[..., tf.newaxis] +\n avg_stddev[..., tf.newaxis] * school_effects_std),\n scale=tf.constant(TREATMENT_STDDEVS)),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')])\n # pylint: enable=g-long-lambda\n return model\n\n\ndef eight_schools_named():\n model = tfd.JointDistributionNamed(\n dict(\n avg_effect=tfd.Normal(0., 5., name='avg_effect'),\n avg_stddev=tfd.HalfNormal(5., name='avg_stddev'),\n school_effects_std=tfd.Sample(\n tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),\n # pylint: disable=g-long-lambda\n treatment_effects=lambda school_effects_std, avg_stddev, avg_effect:\n tfd.Independent(\n tfd.Normal(loc=(avg_effect[..., tf.newaxis] +\n avg_stddev[..., tf.newaxis] * school_effects_std),\n scale=tf.constant(TREATMENT_STDDEVS)),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')))\n # pylint: enable=g-long-lambda\n return model\n\n\ndef eight_schools_nested():\n model = tfd.JointDistributionNamed(\n dict(\n effect_and_stddev=tfd.JointDistributionSequential([\n tfd.Normal(0., 5., name='avg_effect'),\n tfd.HalfNormal(5., name='avg_stddev')], name='effect_and_stddev'),\n school_effects_std=tfd.Sample(\n tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),\n # pylint: disable=g-long-lambda\n treatment_effects=lambda school_effects_std, effect_and_stddev:\n tfd.Independent(\n tfd.Normal(loc=(effect_and_stddev[0][..., tf.newaxis] +\n effect_and_stddev[1][..., tf.newaxis] *\n school_effects_std),\n scale=tf.constant(TREATMENT_STDDEVS)),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')))\n # pylint: enable=g-long-lambda\n return model\n\n\ndef _gen_gaussian_updating_example(x_dim, y_dim, seed):\n \"\"\"An implementation of section 2.3.3 from [1].\n\n We initialize a joint distribution\n\n x ~ N(mu, Lambda^{-1})\n y ~ N(Ax, L^{-1})\n\n Then condition the model on an observation for y. We can test to confirm that\n Cov(p(x | y_obs)) is near to\n\n Sigma = (Lambda + A^T L A)^{-1}\n\n This test can actually check whether the posterior samples have the proper\n covariance, and whether the windowed tuning recovers 1 / diag(Sigma) as the\n diagonal scaling factor.\n\n References:\n [1] Bishop, Christopher M. Pattern Recognition and Machine Learning.\n Springer, 2006.\n\n Args:\n x_dim: int\n y_dim: int\n seed: PRNG seed; see `tfp.random.sanitize_seed` for details.\n Returns:\n (tfd.JointDistribution, tf.Tensor), representing the joint distribution\n above, and the posterior variance.\n \"\"\"\n seeds = samplers.split_seed(seed, 6)\n x_mean = samplers.normal((x_dim,), seed=seeds[0])\n x_scale_diag = samplers.normal((x_dim,), seed=seeds[1])\n y_scale_diag = samplers.normal((y_dim,), seed=seeds[2])\n scale_mat = samplers.normal((y_dim, x_dim), seed=seeds[3])\n y_shift = samplers.normal((y_dim,), seed=seeds[4])\n\n @tfd.JointDistributionCoroutine\n def model():\n x = yield Root(tfd.MultivariateNormalDiag(\n x_mean, scale_diag=x_scale_diag, name='x'))\n yield tfd.MultivariateNormalDiag(\n tf.linalg.matvec(scale_mat, x) + y_shift,\n scale_diag=y_scale_diag,\n name='y')\n\n dists, _ = model.sample_distributions(seed=seeds[5])\n precision_x = tf.linalg.inv(dists.x.covariance())\n precision_y = tf.linalg.inv(dists.y.covariance())\n true_cov = tf.linalg.inv(precision_x +\n tf.linalg.matmul(\n tf.linalg.matmul(scale_mat, precision_y,\n transpose_a=True),\n scale_mat))\n return model, tf.linalg.diag_part(true_cov)\n\n\n@test_util.test_graph_and_eager_modes\nclass WindowedSamplingTest(test_util.TestCase):\n\n @parameterized.named_parameters(\n dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in\n [eight_schools_coroutine, eight_schools_named, eight_schools_sequential,\n eight_schools_nested])\n def test_hmc_type_checks(self, model_fn):\n model = model_fn()\n pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}\n\n @tf.function(autograph=False)\n def do_sample(seed):\n return tfp.experimental.mcmc.windowed_adaptive_hmc(\n 3, model, num_leapfrog_steps=2, num_adaptation_steps=21,\n seed=seed, **pins)\n\n draws, _ = do_sample(test_util.test_seed())\n self.evaluate(draws)\n\n @parameterized.named_parameters(\n dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in\n [eight_schools_coroutine, eight_schools_named, eight_schools_sequential,\n eight_schools_nested])\n def test_nuts_type_checks(self, model_fn):\n model = model_fn()\n pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}\n\n @tf.function\n def do_sample(seed):\n return tfp.experimental.mcmc.windowed_adaptive_nuts(\n 3, model, max_tree_depth=2, num_adaptation_steps=50,\n seed=seed, **pins)\n\n draws, _ = do_sample(test_util.test_seed())\n self.evaluate(draws)\n\n def test_hmc_samples_well(self):\n model = eight_schools_named()\n pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}\n\n @tf.function\n def do_sample(seed):\n return tfp.experimental.mcmc.windowed_adaptive_hmc(\n 400, model, num_leapfrog_steps=12, seed=seed,\n **pins)\n\n draws, _ = do_sample(test_util.test_seed())\n flat_draws = tf.nest.flatten(\n model.experimental_pin(**pins)._model_flatten(draws))\n max_scale_reduction = tf.reduce_max(\n tf.nest.map_structure(tf.reduce_max,\n tfp.mcmc.potential_scale_reduction(flat_draws)))\n self.assertLess(self.evaluate(max_scale_reduction), 1.5)\n\n def test_nuts_samples_well(self):\n model = eight_schools_named()\n pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}\n\n @tf.function\n def do_sample():\n return tfp.experimental.mcmc.windowed_adaptive_nuts(\n 200, model, max_tree_depth=5, seed=test_util.test_seed(),\n **pins)\n\n draws, _ = do_sample()\n flat_draws = tf.nest.flatten(\n model.experimental_pin(**pins)._model_flatten(draws))\n max_scale_reduction = tf.reduce_max(\n tf.nest.map_structure(tf.reduce_max,\n tfp.mcmc.potential_scale_reduction(flat_draws)))\n self.assertLess(self.evaluate(max_scale_reduction), 1.05)\n\n @parameterized.named_parameters(\n dict(testcase_name=f'_{num_draws}', num_draws=num_draws)\n for num_draws in [0, 1, 500, 499, 100, 10000])\n def test_get_window_sizes(self, num_draws):\n [first_window,\n slow_window,\n last_window] = windowed_sampling._get_window_sizes(num_draws)\n self.assertEqual(first_window +\n slow_window +\n 2 * slow_window +\n 4 * slow_window +\n 8 * slow_window +\n last_window, num_draws)\n if num_draws == 500:\n self.assertEqual(slow_window, 25)\n self.assertEqual(first_window, 75)\n self.assertEqual(last_window, 50)\n\n def test_explicit_init(self):\n sample_dist = tfd.JointDistributionSequential(\n [tfd.HalfNormal(1., name=f'dist_{idx}') for idx in range(4)])\n\n explicit_init = [tf.ones(20) for _ in range(3)]\n _, init, bijector, _, _, _ = windowed_sampling._setup_mcmc(\n model=sample_dist,\n n_chains=[20],\n init_position=explicit_init,\n seed=test_util.test_seed(),\n dist_3=1.)\n\n self.assertAllEqual(self.evaluate(init),\n tf.convert_to_tensor(bijector(explicit_init)))\n\n def test_explicit_init_samples(self):\n stream = test_util.test_seed_stream()\n\n # Compute everything in a function so it is consistent in graph mode\n @tf.function\n def do_sample():\n jd_model = tfd.JointDistributionNamed({\n 'x': tfd.HalfNormal(1.),\n 'y': lambda x: tfd.Normal(0., x)})\n init = {'x': tf.ones(64)}\n return tfp.experimental.mcmc.windowed_adaptive_hmc(\n 10,\n jd_model,\n num_adaptation_steps=200,\n current_state=init,\n num_leapfrog_steps=5,\n discard_tuning=False,\n y=tf.constant(1.),\n seed=stream(),\n trace_fn=None)\n\n self.evaluate(do_sample())\n\n def test_valid_init(self):\n\n class _HalfNormal(tfd.HalfNormal):\n\n def _default_event_space_bijector(self):\n # This bijector is intentionally mis-specified so that ~50% of\n # initialiations will fail.\n return tfb.Identity(validate_args=self.validate_args)\n\n tough_dist = tfd.JointDistributionSequential(\n [_HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])\n\n # Twenty chains with three parameters gives a 1 / 2^60 chance of\n # initializing with a finite log probability by chance.\n _, init, _, _, _, _ = windowed_sampling._setup_mcmc(\n model=tough_dist,\n n_chains=[20],\n seed=test_util.test_seed(),\n dist_3=1.)\n\n self.assertAllGreater(self.evaluate(init), 0.)\n\n def test_extra_pins_not_required(self):\n model = tfd.JointDistributionSequential([\n tfd.Normal(0., 1., name='x'),\n lambda x: tfd.Normal(x, 1., name='y')\n ])\n pinned = model.experimental_pin(y=4.2)\n\n # No explicit pins are passed, since the model is already pinned.\n _, init, _, _, _, _ = windowed_sampling._setup_mcmc(\n model=pinned, n_chains=[20],\n seed=test_util.test_seed())\n self.assertLen(init, 1)\n\n def test_hmc_fitting_gaussian(self):\n # See docstring to _gen_gaussian_updating_example\n x_dim = 3\n y_dim = 12\n\n stream = test_util.test_seed_stream()\n\n # Compute everything in a function so it is consistent in graph mode\n @tf.function\n def do_sample():\n jd_model, true_var = _gen_gaussian_updating_example(\n x_dim, y_dim, stream())\n y_val = jd_model.sample(seed=stream()).y\n _, trace = tfp.experimental.mcmc.windowed_adaptive_hmc(\n 1,\n jd_model,\n n_chains=1,\n num_adaptation_steps=10000,\n num_leapfrog_steps=16,\n discard_tuning=False,\n y=y_val,\n seed=stream())\n\n # Get the final scaling used for the mass matrix - this is a measure\n # of how well the windowed adaptation recovered the true variance\n final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]\n return final_scaling, true_var\n final_scaling, true_var = do_sample()\n self.assertAllClose(true_var, final_scaling, rtol=0.15)\n\n def test_nuts_fitting_gaussian(self):\n # See docstring to _gen_gaussian_updating_example\n x_dim = 3\n y_dim = 12\n\n stream = test_util.test_seed_stream()\n\n # Compute everything in a function so it is consistent in graph mode\n @tf.function\n def do_sample():\n jd_model, true_var = _gen_gaussian_updating_example(\n x_dim, y_dim, stream())\n y_val = jd_model.sample(seed=stream()).y\n _, trace = tfp.experimental.mcmc.windowed_adaptive_nuts(\n 1,\n jd_model,\n n_chains=1,\n num_adaptation_steps=10000,\n max_tree_depth=5,\n discard_tuning=False,\n y=y_val,\n seed=stream())\n\n # Get the final scaling used for the mass matrix - this is a measure\n # of how well the windowed adaptation recovered the true variance\n final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]\n return final_scaling, true_var\n final_scaling, true_var = do_sample()\n self.assertAllClose(true_var, final_scaling, rtol=0.1, atol=1e-3)\n\n def test_f64_step_size(self):\n dist = tfd.JointDistributionSequential([\n tfd.Normal(\n tf.constant(0., dtype=tf.float64),\n tf.constant(1., dtype=tf.float64))\n ])\n (target_log_prob_fn, initial_transformed_position, _, _, _, _\n ) = windowed_sampling._setup_mcmc(\n dist, n_chains=[5], init_position=None, seed=test_util.test_seed())\n init_step_size = windowed_sampling._get_step_size(\n initial_transformed_position, target_log_prob_fn)\n self.assertDTypeEqual(init_step_size, np.float64)\n self.assertAllFinite(init_step_size)\n\n def test_batch_of_problems_autobatched(self):\n\n def model_fn():\n x = yield tfd.MultivariateNormalDiag(\n tf.zeros([10, 3]), tf.ones(3), name='x')\n yield tfd.Multinomial(\n logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')\n\n model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)\n samp = model.sample(seed=test_util.test_seed())\n self.assertEqual((10, 3), samp.x.shape)\n self.assertEqual((10, 4), samp.y.shape)\n\n states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(\n 2, model.experimental_pin(y=samp.y), num_leapfrog_steps=3,\n num_adaptation_steps=100, init_step_size=tf.ones([10, 1]),\n seed=test_util.test_seed()))\n self.assertEqual((2, 64, 10, 3), states.x.shape)\n self.assertEqual((2, 10, 1), trace['step_size'].shape)\n\n def test_batch_of_problems_named(self):\n\n def mk_y(x):\n return tfd.Multinomial(logits=tfb.Pad([(0, 1)])(x), total_count=10)\n\n model = tfd.JointDistributionNamed(dict(\n x=tfd.MultivariateNormalDiag(tf.zeros([10, 3]), tf.ones(3)),\n y=mk_y))\n\n samp = model.sample(seed=test_util.test_seed())\n self.assertEqual((10, 3), samp['x'].shape)\n self.assertEqual((10, 4), samp['y'].shape)\n\n states, trace = self.evaluate(\n tfp.experimental.mcmc.windowed_adaptive_hmc(\n 2,\n model.experimental_pin(y=samp['y']),\n num_leapfrog_steps=3,\n num_adaptation_steps=100,\n init_step_size=tf.ones([10, 1]),\n seed=test_util.test_seed()))\n self.assertEqual((2, 64, 10, 3), states['x'].shape)\n self.assertEqual((2, 10, 1), trace['step_size'].shape)\n\n def test_bijector(self):\n dist = tfd.JointDistributionSequential([tfd.Dirichlet(tf.ones(2))])\n bij, _ = windowed_sampling._get_flat_unconstraining_bijector(dist)\n draw = dist.sample(seed=test_util.test_seed())\n self.assertAllCloseNested(bij.inverse(bij(draw)), draw)\n\n @parameterized.named_parameters(*(\n (f'{kind}_{n_chains}', kind, n_chains) # pylint: disable=g-complex-comprehension\n for kind in ('hmc', 'nuts') for n_chains in ([], 3, [2, 1], [2, 2, 2])))\n def test_batches_of_chains(self, kind, n_chains):\n\n def model_fn():\n x = yield tfd.MultivariateNormalDiag(\n tf.zeros(3), tf.ones(3), name='x')\n yield tfd.Multinomial(\n logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')\n\n model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)\n samp = model.sample(seed=test_util.test_seed())\n states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(\n 5, model.experimental_pin(y=samp.y), n_chains=n_chains,\n num_leapfrog_steps=3, num_adaptation_steps=100,\n seed=test_util.test_seed()))\n if isinstance(n_chains, int):\n n_chains = [n_chains]\n self.assertEqual((5, *n_chains, 3), states.x.shape)\n self.assertEqual((5,), trace['step_size'].shape)\n\n def test_dynamic_batch_shape(self):\n \"\"\"Test correct handling of `TensorShape(None)`.\"\"\"\n if JAX_MODE:\n self.skipTest('b/203858802')\n\n n_features = 5\n n_timepoints = 100\n features = tfd.Normal(0., 1.).sample([100, n_features],\n test_util.test_seed())\n ar_sigma = 1.\n rho = .25\n\n @tfd.JointDistributionCoroutine\n def jd_model():\n beta = yield Root(tfd.Sample(tfd.Normal(0., 1.), n_features))\n yhat = tf.einsum('ij,...j->...i', features, beta)\n\n def ar_fun(y):\n loc = tf.concat([tf.zeros_like(y[..., :1]), y[..., :-1]], axis=-1)\n return tfd.Independent(\n tfd.Normal(loc=loc * rho, scale=ar_sigma),\n reinterpreted_batch_ndims=1)\n # Autoregressive distribution defined as below introduce a batch shape:\n # TensorShape(None)\n yield tfd.Autoregressive(\n distribution_fn=ar_fun,\n sample0=tf.zeros_like(yhat),\n num_steps=yhat.shape[-1],\n name='y')\n\n states, _ = self.evaluate(\n tfp.experimental.mcmc.windowed_adaptive_nuts(\n 2,\n jd_model,\n num_adaptation_steps=25,\n n_chains=3,\n seed=test_util.test_seed()))\n self.assertEqual((2, 3, n_timepoints), states.y.shape)\n\n @parameterized.named_parameters(\n ('_nuts', tfp.experimental.mcmc.windowed_adaptive_nuts, {}),\n ('_hmc', tfp.experimental.mcmc.windowed_adaptive_hmc, {\n 'num_leapfrog_steps': 1\n }),\n )\n def test_f64_state(self, method, method_kwargs):\n states, _ = callable_util.get_output_spec(lambda: method( # pylint: disable=g-long-lambda\n 5,\n tfd.Normal(tf.constant(0., tf.float64), 1.),\n n_chains=2,\n num_adaptation_steps=100,\n seed=test_util.test_seed(),\n **method_kwargs))\n\n self.assertEqual(tf.float64, states.dtype)\n\n\n@test_util.test_graph_and_eager_modes\nclass WindowedSamplingStepSizeTest(test_util.TestCase):\n\n def test_supply_full_step_size(self):\n stream = test_util.test_seed_stream()\n\n jd_model = tfd.JointDistributionNamed({\n 'a': tfd.Normal(0., 1.),\n 'b': tfd.MultivariateNormalDiag(\n loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))\n })\n\n init_step_size = {'a': tf.reshape(tf.linspace(1., 2., 3), (3, 1)),\n 'b': tf.reshape(tf.linspace(1., 2., 9), (3, 3))}\n\n _, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(\n 1,\n jd_model,\n num_adaptation_steps=25,\n n_chains=3,\n init_step_size=init_step_size,\n num_leapfrog_steps=5,\n discard_tuning=False,\n trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),\n seed=stream(),\n )\n\n # Gets a newaxis because step size needs to have an event dimension.\n self.assertAllCloseNested([init_step_size['a'],\n init_step_size['b']],\n [j[0] for j in actual_step_size])\n\n def test_supply_partial_step_size(self):\n stream = test_util.test_seed_stream()\n\n jd_model = tfd.JointDistributionNamed({\n 'a': tfd.Normal(0., 1.),\n 'b': tfd.MultivariateNormalDiag(\n loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))\n })\n\n init_step_size = {'a': 1., 'b': 2.}\n _, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(\n 1,\n jd_model,\n num_adaptation_steps=25,\n n_chains=3,\n init_step_size=init_step_size,\n num_leapfrog_steps=5,\n discard_tuning=False,\n trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),\n seed=stream(),\n )\n\n actual_step = [j[0] for j in actual_step_size]\n expected_step = [1., 2.]\n self.assertAllCloseNested(expected_step, actual_step)\n\n def test_supply_single_step_size(self):\n stream = test_util.test_seed_stream()\n\n jd_model = tfd.JointDistributionNamed({\n 'a': tfd.Normal(0., 1.),\n 'b': tfd.MultivariateNormalDiag(\n loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))\n })\n\n init_step_size = 1.\n _, traced_step_size = self.evaluate(\n tfp.experimental.mcmc.windowed_adaptive_hmc(\n 1,\n jd_model,\n num_adaptation_steps=25,\n n_chains=20,\n init_step_size=init_step_size,\n num_leapfrog_steps=5,\n discard_tuning=False,\n trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),\n seed=stream()))\n\n self.assertEqual((25 + 1,), traced_step_size.shape)\n self.assertAllClose(1., traced_step_size[0])\n\n def test_sequential_step_size(self):\n stream = test_util.test_seed_stream()\n\n jd_model = tfd.JointDistributionSequential(\n [tfd.HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])\n init_step_size = [1., 2., 3.]\n _, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_nuts(\n 1,\n jd_model,\n num_adaptation_steps=25,\n n_chains=3,\n init_step_size=init_step_size,\n discard_tuning=False,\n trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),\n dist_3=tf.constant(1.),\n seed=stream(),\n )\n\n self.assertAllCloseNested(init_step_size,\n [j[0] for j in actual_step_size])\n\n\ndef _beta_binomial(trials):\n \"\"\"Returns a function that constructs a beta binomial distribution.\"\"\"\n\n def _beta_binomial_distribution(mean, inverse_concentration):\n \"\"\"Returns a beta binomial distribution with the given parameters.\"\"\"\n # Mean and inverse concentration are broadcast across days.\n mean = mean[..., tf.newaxis]\n inverse_concentration = inverse_concentration[..., tf.newaxis]\n\n beta_binomial = tfd.BetaBinomial(\n total_count=trials,\n concentration0=(1 - mean) / inverse_concentration,\n concentration1=mean / inverse_concentration)\n return tfd.Independent(beta_binomial, reinterpreted_batch_ndims=2)\n\n return _beta_binomial_distribution\n\n\ndef get_joint_distribution(\n trials,\n mean_prior=lambda: tfd.Uniform(0., 1.),\n inverse_concentration_prior=lambda: tfd.HalfNormal(5.)):\n \"\"\"Returns a joint distribution over parameters and successes.\"\"\"\n param_shape = ps.shape(trials)[:1]\n mean = tfd.Sample(mean_prior(), param_shape)\n inverse_concentration = tfd.Sample(inverse_concentration_prior(), param_shape)\n return tfd.JointDistributionNamed(\n dict(mean=mean,\n inverse_concentration=inverse_concentration,\n successes=_beta_binomial(trials)),\n name='jd')\n\n\nclass PrecompiledTest(test_util.TestCase):\n\n def setUp(self):\n super().setUp()\n arms = 2\n days = 3\n\n seed = test_util.test_seed()\n trial_seed, value_seed = tfp.random.split_seed(seed)\n self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)\n dist = get_joint_distribution(self.trials)\n self.true_values = dist.sample(seed=value_seed)\n\n def nuts_kwargs(self):\n return {'max_tree_depth': 2}\n\n def hmc_kwargs(self):\n return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}\n\n @parameterized.named_parameters(('hmc_jit_sig', 'hmc'),\n ('nuts_jit_sig', 'nuts'))\n def test_base_kernel(self, kind):\n self.skip_if_no_xla()\n self.skipTest('b/195070752') # Test is broken by cl/393807414.\n\n if JAX_MODE:\n input_signature = None\n else:\n input_signature = (\n tf.TensorSpec(\n shape=[None, None], dtype=tf.float32, name='trials'),\n tf.TensorSpec(\n shape=[None, None], dtype=tf.float32, name='successes'),\n tf.TensorSpec(\n shape=[2], dtype=tf.int32, name='seed'))\n @tf.function(jit_compile=True, input_signature=input_signature)\n def do(trials, successes, seed):\n if kind == 'hmc':\n proposal_kernel_kwargs = self.hmc_kwargs()\n else:\n proposal_kernel_kwargs = self.nuts_kwargs()\n\n return windowed_sampling._windowed_adaptive_impl(\n n_draws=9,\n joint_dist=get_joint_distribution(trials),\n kind=kind,\n n_chains=11,\n proposal_kernel_kwargs=proposal_kernel_kwargs,\n num_adaptation_steps=50,\n current_state=None,\n dual_averaging_kwargs={'target_accept_prob': 0.76},\n trace_fn=None,\n return_final_kernel_results=False,\n discard_tuning=True,\n chain_axis_names=None,\n seed=seed,\n successes=successes)\n\n self.evaluate(do(self.trials + 0., self.true_values['successes'],\n test_util.test_seed(sampler_type='stateless')))\n\nif JAX_MODE:\n # TF runs into the `merge_call` error here (b/181800108).\n\n @test_util.disable_test_for_backend(\n disable_numpy=True,\n reason='Sharding not available for NumPy backend.')\n class DistributedTest(distribute_test_lib.DistributedTest):\n\n def setUp(self):\n super().setUp()\n arms = 2\n days = 3\n\n seed = test_util.test_seed()\n trial_seed, value_seed = tfp.random.split_seed(seed)\n self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)\n dist = get_joint_distribution(self.trials)\n self.true_values = dist.sample(seed=value_seed)\n\n def nuts_kwargs(self):\n return {'max_tree_depth': 2}\n\n def hmc_kwargs(self):\n return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}\n\n def test_can_extract_shard_axis_names_from_model(self):\n joint_dist = distribute.JointDistributionNamed(dict(\n x=tfd.Normal(0., 1.),\n y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),\n z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)\n ))\n\n def do():\n _, _, _, _, _, shard_axis_names = windowed_sampling._setup_mcmc(\n model=joint_dist,\n n_chains=[20],\n seed=test_util.test_seed(), z=1.)\n # _setup_mcmc will flatten the distribution\n self.assertListEqual(shard_axis_names, [[], ['i']])\n self.strategy_run(do, args=(), in_axes=None)\n\n @parameterized.named_parameters(('hmc_jit_sig', 'hmc'),\n ('nuts_jit_sig', 'nuts'))\n def test_data_sharding(self, kind):\n self.skip_if_no_xla()\n\n joint_dist = distribute.JointDistributionNamed(dict(\n x=tfd.Normal(0., 1.),\n y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),\n z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)\n ))\n\n def do(seed, z):\n if kind == 'hmc':\n proposal_kernel_kwargs = self.hmc_kwargs()\n else:\n proposal_kernel_kwargs = self.nuts_kwargs()\n\n return windowed_sampling._windowed_adaptive_impl(\n n_draws=10,\n joint_dist=joint_dist,\n kind=kind,\n n_chains=2,\n proposal_kernel_kwargs=proposal_kernel_kwargs,\n num_adaptation_steps=21,\n current_state=None,\n dual_averaging_kwargs={'target_accept_prob': 0.76},\n trace_fn=None,\n return_final_kernel_results=False,\n discard_tuning=True,\n seed=seed,\n chain_axis_names=None,\n z=z)\n\n self.evaluate(self.strategy_run(\n do,\n in_axes=(None, 0),\n args=(samplers.zeros_seed(), self.shard_values(\n tf.ones(distribute_test_lib.NUM_DEVICES)))))\n\n @parameterized.named_parameters(('hmc_jit_sig', 'hmc'),\n ('nuts_jit_sig', 'nuts'))\n def test_chain_sharding(self, kind):\n self.skip_if_no_xla()\n\n joint_dist = tfd.JointDistributionNamed(dict(\n x=tfd.Normal(0., 1.),\n y=lambda x: tfd.Sample(tfd.Normal(x, 1.), 4),\n z=lambda y: tfd.Independent(tfd.Normal(y, 1.), 1)\n ))\n\n def do(seed, z):\n if kind == 'hmc':\n proposal_kernel_kwargs = self.hmc_kwargs()\n else:\n proposal_kernel_kwargs = self.nuts_kwargs()\n\n return windowed_sampling._windowed_adaptive_impl(\n n_draws=10,\n joint_dist=joint_dist,\n kind=kind,\n n_chains=2,\n proposal_kernel_kwargs=proposal_kernel_kwargs,\n num_adaptation_steps=21,\n current_state=None,\n dual_averaging_kwargs={'target_accept_prob': 0.76},\n trace_fn=None,\n return_final_kernel_results=False,\n discard_tuning=True,\n seed=seed,\n chain_axis_names=self.axis_name,\n z=z)\n\n self.evaluate(self.strategy_run(\n do,\n in_axes=None,\n args=(samplers.zeros_seed(),\n tf.ones(distribute_test_lib.NUM_DEVICES))))\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2021 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\nfrom tensorflow_probability.python.math import psd_kernels\n\n\n@test_util.test_all_tf_execution_regimes\nclass StudentTProcessRegressionModelTest(test_util.TestCase):\n\n def testInstantiate(self):\n df = np.float64(1.)\n # 5x5 grid of index points in R^2 and flatten to 25x2\n index_points = np.linspace(-4., 4., 5, dtype=np.float64)\n index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)\n index_points = np.reshape(index_points, [-1, 2])\n # ==> shape = [25, 2]\n\n # Kernel with batch_shape [2, 4, 1, 3]\n amplitude = np.array([1., 2.], np.float64).reshape([2, 1, 1, 1])\n length_scale = np.array([.1, .2, .3, .4], np.float64).reshape(\n [1, 4, 1, 1])\n observation_noise_variance = np.array(\n [1e-5, 1e-6, 1e-9], np.float64).reshape([1, 1, 1, 3])\n\n observation_index_points = (\n np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)\n\n def cholesky_fn(x):\n return tf.linalg.cholesky(\n tf.linalg.set_diag(x, tf.linalg.diag_part(x) + 1.))\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n stprm = tfd.StudentTProcessRegressionModel(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance,\n cholesky_fn=cholesky_fn)\n batch_shape = [2, 4, 1, 3]\n event_shape = [25]\n sample_shape = [7, 2]\n\n print(stprm.batch_shape)\n print(stprm.kernel.batch_shape)\n print(stprm.kernel.schur_complement.batch_shape)\n print(stprm.kernel.schur_complement.base_kernel.batch_shape)\n\n self.assertIs(cholesky_fn, stprm.cholesky_fn)\n\n samples = stprm.sample(sample_shape, seed=test_util.test_seed())\n self.assertAllEqual(stprm.batch_shape_tensor(), batch_shape)\n self.assertAllEqual(stprm.event_shape_tensor(), event_shape)\n self.assertAllEqual(self.evaluate(samples).shape,\n sample_shape + batch_shape + event_shape)\n\n def testMeanSameAsGPRM(self):\n df = np.float64(3.)\n index_points = np.linspace(-4., 4., 5, dtype=np.float64)\n index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)\n index_points = np.reshape(index_points, [-1, 2])\n\n # Kernel with batch_shape [5, 3]\n amplitude = np.array([1., 2., 3., 4., 5.], np.float64).reshape([5, 1])\n length_scale = np.array([.1, .2, .3], np.float64).reshape(\n [1, 3])\n observation_noise_variance = np.array(\n [1e-5, 1e-6, 1e-9], np.float64).reshape([1, 3])\n\n observation_index_points = (\n np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n stprm = tfd.StudentTProcessRegressionModel(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance)\n gprm = tfd.GaussianProcessRegressionModel(\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance)\n\n self.assertAllClose(self.evaluate(stprm.mean()), self.evaluate(gprm.mean()))\n\n def testLogProbNearGPRM(self):\n # For large df, the log_prob calculations should be the same.\n df = np.float64(1e6)\n index_points = np.linspace(-4., 4., 5, dtype=np.float64)\n index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)\n index_points = np.reshape(index_points, [-1, 2])\n\n # Kernel with batch_shape [5, 3]\n amplitude = np.array([1., 2., 3., 4., 5.], np.float64).reshape([5, 1])\n length_scale = np.array([.1, .2, .3], np.float64).reshape(\n [1, 3])\n observation_noise_variance = np.array(\n [1e-5, 1e-6, 1e-9], np.float64).reshape([1, 3])\n\n observation_index_points = (\n np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n stprm = tfd.StudentTProcessRegressionModel(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance)\n gprm = tfd.GaussianProcessRegressionModel(\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance)\n\n x = np.linspace(-3., 3., 25)\n\n self.assertAllClose(\n self.evaluate(stprm.log_prob(x)),\n self.evaluate(gprm.log_prob(x)), rtol=2e-5)\n\n def testMeanVarianceAndCovariancePrecomputed(self):\n amplitude = np.array([1., 2.], np.float64).reshape([2, 1])\n length_scale = np.array([.1, .2, .3], np.float64).reshape([1, 3])\n observation_noise_variance = np.array([1e-9], np.float64)\n df = np.float64(3.)\n\n observation_index_points = (\n np.random.uniform(-1., 1., (1, 1, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (1, 1, 7)).astype(np.float64)\n\n index_points = np.random.uniform(-1., 1., (6, 2)).astype(np.float64)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n stprm = tfd.StudentTProcessRegressionModel(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance,\n validate_args=True)\n\n precomputed_stprm = tfd.StudentTProcessRegressionModel.precompute_regression_model(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance,\n validate_args=True)\n\n self.assertAllClose(self.evaluate(precomputed_stprm.covariance()),\n self.evaluate(stprm.covariance()))\n self.assertAllClose(self.evaluate(precomputed_stprm.variance()),\n self.evaluate(stprm.variance()))\n self.assertAllClose(self.evaluate(precomputed_stprm.mean()),\n self.evaluate(stprm.mean()))\n\n @test_util.disable_test_for_backend(\n disable_numpy=True, disable_jax=True,\n reason='Numpy and JAX have no notion of CompositeTensor/saved_model')\n def testPrecomputedCompositeTensor(self):\n amplitude = np.array([1., 2.], np.float64).reshape([2, 1])\n length_scale = np.array([.1, .2, .3], np.float64).reshape([1, 3])\n observation_noise_variance = np.array([1e-9], np.float64)\n\n observation_index_points = (\n np.random.uniform(-1., 1., (1, 1, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (1, 1, 7)).astype(np.float64)\n\n index_points = np.random.uniform(-1., 1., (6, 2)).astype(np.float64)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n\n precomputed_stprm = tfd.StudentTProcessRegressionModel.precompute_regression_model(\n df=3.,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance,\n validate_args=True)\n\n flat = tf.nest.flatten(precomputed_stprm, expand_composites=True)\n unflat = tf.nest.pack_sequence_as(\n precomputed_stprm, flat, expand_composites=True)\n self.assertIsInstance(unflat, tfd.StudentTProcessRegressionModel)\n # Check that we don't recompute the divisor matrix on flattening /\n # unflattening.\n self.assertIs(\n precomputed_stprm.kernel.schur_complement._precomputed_divisor_matrix_cholesky, # pylint:disable=line-too-long\n unflat.kernel.schur_complement._precomputed_divisor_matrix_cholesky)\n\n # TODO(b/196219597): Enable this test once STPRM works across TF function\n # boundaries.\n # index_observations = np.random.uniform(-1., 1., (6,)).astype(np.float64)\n # @tf.function\n # def log_prob(d):\n # return d.log_prob(index_observations)\n\n # lp = self.evaluate(precomputed_stprm.log_prob(index_observations))\n\n # self.assertAllClose(lp, self.evaluate(log_prob(precomputed_stprm)))\n # self.assertAllClose(lp, self.evaluate(log_prob(unflat)))\n\n def testEmptyDataMatchesStPPrior(self):\n df = np.float64(3.5)\n amp = np.float64(.5)\n len_scale = np.float64(.2)\n index_points = np.random.uniform(-1., 1., (10, 1)).astype(np.float64)\n\n # k_xx - k_xn @ (k_nn + sigma^2) @ k_nx + sigma^2\n mean_fn = lambda x: x[:, 0]**2\n\n kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)\n stp = tfd.StudentTProcess(\n df,\n kernel,\n index_points,\n mean_fn=mean_fn,\n validate_args=True)\n\n stprm_nones = tfd.StudentTProcessRegressionModel(\n df,\n kernel=kernel,\n index_points=index_points,\n mean_fn=mean_fn,\n validate_args=True)\n\n stprm_zero_shapes = tfd.StudentTProcessRegressionModel(\n df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=tf.ones([0, 1], tf.float64),\n observations=tf.ones([0], tf.float64),\n mean_fn=mean_fn,\n validate_args=True)\n\n for stprm in [stprm_nones, stprm_zero_shapes]:\n self.assertAllClose(\n self.evaluate(stp.mean()), self.evaluate(stprm.mean()))\n self.assertAllClose(self.evaluate(stp.covariance()),\n self.evaluate(stprm.covariance()))\n self.assertAllClose(self.evaluate(stp.variance()),\n self.evaluate(stprm.variance()))\n\n observations = np.random.uniform(-1., 1., 10).astype(np.float64)\n self.assertAllClose(self.evaluate(stp.log_prob(observations)),\n self.evaluate(stprm.log_prob(observations)))\n\n def testCopy(self):\n # 5 random index points in R^2\n index_points_1 = np.random.uniform(-4., 4., (5, 2)).astype(np.float32)\n # 10 random index points in R^2\n index_points_2 = np.random.uniform(-4., 4., (10, 2)).astype(np.float32)\n\n observation_index_points_1 = (\n np.random.uniform(-4., 4., (7, 2)).astype(np.float32))\n observation_index_points_2 = (\n np.random.uniform(-4., 4., (9, 2)).astype(np.float32))\n\n observations_1 = np.random.uniform(-1., 1., 7).astype(np.float32)\n observations_2 = np.random.uniform(-1., 1., 9).astype(np.float32)\n\n # ==> shape = [6, 25, 2]\n mean_fn = lambda x: np.array([0.], np.float32)\n kernel_1 = psd_kernels.ExponentiatedQuadratic()\n kernel_2 = psd_kernels.ExpSinSquared()\n\n stprm1 = tfd.StudentTProcessRegressionModel(\n df=5.,\n kernel=kernel_1,\n index_points=index_points_1,\n observation_index_points=observation_index_points_1,\n observations=observations_1,\n mean_fn=mean_fn,\n validate_args=True)\n stprm2 = stprm1.copy(\n kernel=kernel_2,\n index_points=index_points_2,\n observation_index_points=observation_index_points_2,\n observations=observations_2)\n\n precomputed_stprm1 = (\n tfd.StudentTProcessRegressionModel.precompute_regression_model(\n df=5.,\n kernel=kernel_1,\n index_points=index_points_1,\n observation_index_points=observation_index_points_1,\n observations=observations_1,\n mean_fn=mean_fn,\n validate_args=True))\n precomputed_stprm2 = precomputed_stprm1.copy(index_points=index_points_2)\n self.assertIs(precomputed_stprm1.mean_fn, precomputed_stprm2.mean_fn)\n self.assertIs(precomputed_stprm1.kernel, precomputed_stprm2.kernel)\n\n event_shape_1 = [5]\n event_shape_2 = [10]\n\n self.assertIsInstance(stprm1.kernel.schur_complement.base_kernel,\n psd_kernels.ExponentiatedQuadratic)\n self.assertIsInstance(stprm2.kernel.schur_complement.base_kernel,\n psd_kernels.ExpSinSquared)\n self.assertAllEqual(self.evaluate(stprm1.batch_shape_tensor()),\n self.evaluate(stprm2.batch_shape_tensor()))\n self.assertAllEqual(self.evaluate(stprm1.event_shape_tensor()),\n event_shape_1)\n self.assertAllEqual(self.evaluate(stprm2.event_shape_tensor()),\n event_shape_2)\n self.assertAllEqual(self.evaluate(stprm1.index_points), index_points_1)\n self.assertAllEqual(self.evaluate(stprm2.index_points), index_points_2)\n\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for gradient_based_trajectory_length_adaptation.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.internal import distribute_lib\nfrom tensorflow_probability.python.internal import distribute_test_lib\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import test_util\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\n\nJAX_MODE = False\n\n\ndef snaper_criterion_dummy_direction(previous_state, *args, **kwargs):\n # Technically direction should be normalized, but omitting the normalization\n # term only rescales the criterion so we're fine.\n return tfp.experimental.mcmc.snaper_criterion(\n previous_state,\n *args,\n direction=tf.nest.map_structure(tf.ones_like, previous_state),\n **kwargs,\n )\n\n\ndef snaper_criterion_2d_direction(previous_state, *args, **kwargs):\n return tfp.experimental.mcmc.snaper_criterion(\n previous_state,\n *args,\n direction=tf.constant([0., 1.], previous_state.dtype),\n **kwargs,\n )\n\n\n@test_util.test_graph_and_eager_modes\nclass GradientBasedTrajectoryLengthAdaptationTestGeneric(\n test_util.TestCase, parameterized.TestCase):\n\n def testForbiddenTransformedKernel(self):\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=lambda x: -x**2, step_size=0.1, num_leapfrog_steps=1)\n kernel = tfp.mcmc.TransformedTransitionKernel(kernel, tfb.Identity())\n with self.assertRaisesRegex(\n ValueError,\n 'The inner kernel cannot contain a `TransformedTransitionKernel`'):\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel, num_adaptation_steps=100)\n\n def testNestedStepSizeError(self):\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=lambda x: -x**2,\n step_size=[0.1],\n num_leapfrog_steps=1)\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel, num_adaptation_steps=100)\n with self.assertRaisesRegex(ValueError, 'Step size must be a scalar'):\n kernel.bootstrap_results([1.])\n\n @parameterized.named_parameters(('StaticShape', True),\n ('DynamicShape', False))\n def testNonScalarStepSizeError(self, use_static_shape):\n step_size = tf1.placeholder_with_default(\n [0.1, 0.2], shape=[2] if use_static_shape else None)\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=lambda x: -x**2,\n step_size=step_size,\n num_leapfrog_steps=1)\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel, num_adaptation_steps=100, validate_args=True)\n with self.assertRaisesRegex(Exception, 'Step size must be a scalar'):\n self.evaluate(kernel.bootstrap_results(tf.constant(1.)))\n\n @parameterized.named_parameters(\n ('ChEESStaticShape', True, tfp.experimental.mcmc.chees_criterion),\n ('ChEESDynamicShape', False, tfp.experimental.mcmc.chees_criterion),\n ('SNAPERStaticShape', True, snaper_criterion_dummy_direction),\n ('SNAPERDynamicShape', False, snaper_criterion_dummy_direction),\n )\n def testTooFewChains(self, use_static_shape, criterion_fn):\n state = tf1.placeholder_with_default(\n [[0.1, 0.2]], shape=[1, 2] if use_static_shape else None)\n accept_prob = tf1.placeholder_with_default(\n [1.], shape=[1] if use_static_shape else None)\n with self.assertRaisesRegex(Exception,\n 'chees_criterion requires at least 2 chains'):\n self.evaluate(\n tfp.experimental.mcmc.chees_criterion(\n state, state, accept_prob, 1., validate_args=True))\n\n @parameterized.named_parameters(\n ('ChEESStaticShape', True, tfp.experimental.mcmc.chees_criterion),\n ('ChEESDynamicShape', False, tfp.experimental.mcmc.chees_criterion),\n ('SNAPERStaticShape', True, snaper_criterion_dummy_direction),\n ('SNAPERDynamicShape', False, snaper_criterion_dummy_direction),\n )\n def testNoBatchDims(self, use_static_shape, criterion_fn):\n state = tf1.placeholder_with_default(\n [[0.1, 0.2]], shape=[1, 2] if use_static_shape else None)\n accept_prob = tf1.placeholder_with_default(\n 1., shape=[] if use_static_shape else None)\n with self.assertRaisesRegex(Exception, 'requires at least 2 chains'):\n self.evaluate(\n criterion_fn(state, state, accept_prob, 1., validate_args=True))\n\n\nclass _GradientBasedTrajectoryLengthAdaptationTest(test_util.TestCase):\n\n def testDocstringExample(self):\n if tf.executing_eagerly() and not JAX_MODE:\n self.skipTest('Too slow for TF Eager.')\n\n target = tfd.JointDistributionSequential([\n tfd.Normal(0., tf.constant(20., dtype=self.dtype)),\n tfd.HalfNormal(tf.constant(10., dtype=self.dtype)),\n ])\n\n def target_log_prob_fn(*x):\n return tf.cast(target.log_prob(x), self.dtype)\n\n num_burnin_steps = 1000\n num_adaptation_steps = int(num_burnin_steps * 0.8)\n num_results = 500\n num_chains = 16\n step_size = 0.1\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n step_size=step_size,\n num_leapfrog_steps=1,\n )\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n num_adaptation_steps=num_adaptation_steps,\n validate_args=True)\n kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n kernel, num_adaptation_steps=num_adaptation_steps)\n kernel = tfp.mcmc.TransformedTransitionKernel(\n kernel, [tfb.Identity(), tfb.Exp()])\n\n def trace_fn(_, pkr):\n return (\n pkr.inner_results.inner_results.inner_results.accepted_results\n .step_size,\n pkr.inner_results.inner_results.max_trajectory_length,\n pkr.inner_results.inner_results.inner_results.log_accept_ratio,\n )\n\n # The chain will be stepped for num_results + num_burnin_steps, adapting for\n # the first num_adaptation_steps.\n chain, [step_size, max_trajectory_length, log_accept_ratio] = (\n tfp.mcmc.sample_chain(\n num_results=num_results,\n num_burnin_steps=num_burnin_steps,\n current_state=[\n tf.ones(num_chains, dtype=self.dtype),\n tf.ones(num_chains, dtype=self.dtype)\n ],\n kernel=kernel,\n trace_fn=trace_fn,\n seed=test_util.test_seed(sampler_type='stateless')))\n\n p_accept = tf.math.exp(\n tfp.math.reduce_logmeanexp(tf.minimum(log_accept_ratio, 0.)))\n mean_step_size = tf.reduce_mean(step_size)\n mean_max_trajectory_length = tf.reduce_mean(max_trajectory_length)\n\n self.assertAllClose(0.75, p_accept, atol=0.1)\n self.assertAllClose(0.52, mean_step_size, atol=0.2)\n self.assertAllClose(46., mean_max_trajectory_length, atol=15)\n self.assertAllClose(\n target.mean(), [tf.reduce_mean(x, axis=[0, 1]) for x in chain],\n atol=1.5)\n self.assertAllClose(\n target.variance(),\n [tf.math.reduce_variance(x, axis=[0, 1]) for x in chain],\n rtol=0.2)\n\n def testStateMeanSNAPER(self):\n state = np.array([[0.1, 0.2]], self.dtype)\n accept_prob = np.ones([], self.dtype)\n # This doesn't fail because state_mean is provided externally.\n self.evaluate(tfp.experimental.mcmc.snaper_criterion(\n state,\n state,\n accept_prob,\n 2.,\n direction=tf.ones_like(state),\n state_mean=state,\n state_mean_weight=0.1,\n ))\n\n @parameterized.named_parameters(\n ('ChEES', tfp.experimental.mcmc.chees_criterion),\n ('SNAPER', snaper_criterion_dummy_direction),\n )\n def testScalarState(self, criterion_fn):\n\n def target_log_prob_fn(x):\n return -x**2 / 2\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n step_size=0.1,\n num_leapfrog_steps=1,\n )\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n num_adaptation_steps=5,\n adaptation_rate=1.,\n criterion_fn=criterion_fn,\n validate_args=True)\n\n state = tf.zeros([64], self.dtype)\n init_kernel_results = kernel.bootstrap_results(state)\n init_kernel_results, (_, final_kernel_results) = self.evaluate([\n init_kernel_results,\n kernel.one_step(\n state,\n init_kernel_results,\n seed=test_util.test_seed(sampler_type='stateless'))\n ])\n\n # We expect it to move it a little bit.\n self.assertGreater(\n np.abs(init_kernel_results.max_trajectory_length -\n final_kernel_results.max_trajectory_length), 0.0005)\n\n @parameterized.named_parameters(\n ('ChEES', tfp.experimental.mcmc.chees_criterion),\n ('SNAPER', snaper_criterion_dummy_direction),\n )\n def testTensorState(self, criterion_fn):\n\n def target_log_prob_fn(x):\n return -tf.reduce_mean(x**2, [-1, -2]) / 2\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n step_size=0.1,\n num_leapfrog_steps=1,\n )\n kernel = (\n tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n num_adaptation_steps=5,\n adaptation_rate=1.,\n criterion_fn=criterion_fn,\n validate_args=True))\n\n state = tf.zeros([64, 2, 3], self.dtype)\n init_kernel_results = kernel.bootstrap_results(state)\n init_kernel_results, (_, final_kernel_results) = self.evaluate([\n init_kernel_results,\n kernel.one_step(\n state,\n init_kernel_results,\n seed=test_util.test_seed(sampler_type='stateless'))\n ])\n\n # We expect it to move it a little bit.\n self.assertGreater(\n np.abs(init_kernel_results.max_trajectory_length -\n final_kernel_results.max_trajectory_length), 0.0005)\n\n @parameterized.named_parameters(\n ('ChEES', tfp.experimental.mcmc.chees_criterion),\n ('SNAPER', snaper_criterion_dummy_direction),\n )\n def testListState(self, criterion_fn):\n\n def target_log_prob_fn(x, y):\n return -x**2 / 2 - y**2 / 2\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n step_size=0.1,\n num_leapfrog_steps=1,\n )\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n num_adaptation_steps=5,\n adaptation_rate=1.,\n criterion_fn=criterion_fn,\n validate_args=True)\n\n state = [tf.zeros([64], self.dtype), tf.zeros([64], self.dtype)]\n init_kernel_results = kernel.bootstrap_results(state)\n init_kernel_results, (_, final_kernel_results) = self.evaluate([\n init_kernel_results,\n kernel.one_step(\n state,\n init_kernel_results,\n seed=test_util.test_seed(sampler_type='stateless'))\n ])\n\n # We expect it to move it a little bit.\n self.assertGreater(\n np.abs(init_kernel_results.max_trajectory_length -\n final_kernel_results.max_trajectory_length), 0.0005)\n\n @parameterized.named_parameters(\n ('ChEES', tfp.experimental.mcmc.chees_rate_criterion),\n ('SNAPER', snaper_criterion_2d_direction),\n )\n def testAdaptation(self, criterion_fn):\n if tf.executing_eagerly() and not JAX_MODE:\n self.skipTest('Too slow for TF Eager.')\n\n target = tfd.Independent(\n tfd.Normal(0., tf.constant([1., 10.], self.dtype)), 1)\n\n num_burnin_steps = 1000\n num_adaptation_steps = int(num_burnin_steps * 0.8)\n num_results = 500\n num_chains = 16\n step_size = 0.1\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target.log_prob,\n step_size=step_size,\n num_leapfrog_steps=1,\n )\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n num_adaptation_steps=num_adaptation_steps,\n criterion_fn=criterion_fn,\n validate_args=True)\n kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n kernel, num_adaptation_steps=num_adaptation_steps)\n\n def trace_fn(_, pkr):\n return (\n pkr.inner_results.inner_results.accepted_results\n .step_size,\n pkr.inner_results.max_trajectory_length,\n pkr.inner_results.inner_results.log_accept_ratio,\n )\n\n # The chain will be stepped for num_results + num_burnin_steps, adapting for\n # the first num_adaptation_steps.\n chain, [step_size, max_trajectory_length, log_accept_ratio] = (\n tfp.mcmc.sample_chain(\n num_results=num_results,\n num_burnin_steps=num_burnin_steps,\n current_state=tf.zeros([num_chains, 2], dtype=self.dtype),\n kernel=kernel,\n trace_fn=trace_fn,\n seed=test_util.test_seed(sampler_type='stateless')))\n\n p_accept = tf.math.exp(\n tfp.math.reduce_logmeanexp(tf.minimum(log_accept_ratio, 0.)))\n mean_step_size = tf.reduce_mean(step_size)\n mean_max_trajectory_length = tf.reduce_mean(max_trajectory_length)\n\n self.assertAllClose(0.75, p_accept, atol=0.1)\n self.assertAllClose(1.5, mean_step_size, atol=0.2)\n # Both SNAPER and ChEES-rate find roughly the same trajectory length for\n # this target.\n self.assertAllClose(15., mean_max_trajectory_length, rtol=0.3)\n self.assertAllClose(\n target.mean(), tf.reduce_mean(chain, axis=[0, 1]),\n atol=1.)\n self.assertAllClose(\n target.variance(),\n tf.math.reduce_variance(chain, axis=[0, 1]),\n rtol=0.1)\n\n def testPreconditionedHMC(self):\n if tf.executing_eagerly() and not JAX_MODE:\n self.skipTest('Too slow for TF Eager.')\n\n target = tfd.Independent(\n tfd.Normal(0., tf.constant([1., 10.], self.dtype)), 1)\n\n num_burnin_steps = 1000\n num_adaptation_steps = int(num_burnin_steps * 0.8)\n num_results = 500\n num_chains = 16\n step_size = 0.1\n\n kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(\n target_log_prob_fn=target.log_prob,\n step_size=step_size,\n num_leapfrog_steps=1,\n momentum_distribution=tfd.Independent(\n tfd.Normal(0., tf.constant([1., 1. / 10.], self.dtype)), 1),\n )\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n num_adaptation_steps=num_adaptation_steps,\n validate_args=True)\n kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n kernel, num_adaptation_steps=num_adaptation_steps)\n\n def trace_fn(_, pkr):\n return (\n pkr.inner_results.inner_results.accepted_results\n .step_size,\n pkr.inner_results.max_trajectory_length,\n pkr.inner_results.inner_results.log_accept_ratio,\n )\n\n # The chain will be stepped for num_results + num_burnin_steps, adapting for\n # the first num_adaptation_steps.\n chain, [step_size, max_trajectory_length, log_accept_ratio] = (\n tfp.mcmc.sample_chain(\n num_results=num_results,\n num_burnin_steps=num_burnin_steps,\n current_state=tf.zeros([num_chains, 2], dtype=self.dtype),\n kernel=kernel,\n trace_fn=trace_fn,\n seed=test_util.test_seed(sampler_type='stateless')))\n\n p_accept = tf.math.exp(\n tfp.math.reduce_logmeanexp(tf.minimum(log_accept_ratio, 0.)))\n mean_step_size = tf.reduce_mean(step_size)\n mean_max_trajectory_length = tf.reduce_mean(max_trajectory_length)\n\n self.assertAllClose(0.75, p_accept, atol=0.1)\n self.assertAllClose(1.2, mean_step_size, atol=0.2)\n self.assertAllClose(1.5, mean_max_trajectory_length, rtol=0.25)\n self.assertAllClose(\n target.mean(), tf.reduce_mean(chain, axis=[0, 1]),\n atol=0.3)\n self.assertAllClose(\n target.variance(),\n tf.math.reduce_variance(chain, axis=[0, 1]),\n rtol=0.1)\n\n def testNumAdaptationSteps(self):\n\n def target_log_prob_fn(x):\n return -x**2\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n step_size=0.1,\n num_leapfrog_steps=1,\n )\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n num_adaptation_steps=1,\n adaptation_rate=1.,\n validate_args=True)\n\n state = tf.zeros([64], self.dtype)\n seed = test_util.test_seed(sampler_type='stateless')\n step_0_kernel_results = kernel.bootstrap_results(state)\n state, step_1_kernel_results = kernel.one_step(\n state, step_0_kernel_results, seed=seed)\n _, step_2_kernel_results = kernel.one_step(\n state, step_1_kernel_results, seed=seed)\n\n (step_0_kernel_results, step_1_kernel_results,\n step_2_kernel_results) = self.evaluate([\n step_0_kernel_results,\n step_1_kernel_results,\n step_2_kernel_results,\n ])\n\n # The intention of num_adaptation_steps is that we should adapt for 1 step\n # and then hold the hyperparameters constant.\n self.assertGreater(\n np.abs(step_0_kernel_results.max_trajectory_length -\n step_1_kernel_results.max_trajectory_length), 0.005)\n self.assertAllClose(step_1_kernel_results.max_trajectory_length,\n step_2_kernel_results.max_trajectory_length)\n\n @parameterized.named_parameters(\n ('ChEES', tfp.experimental.mcmc.chees_criterion),\n ('ChEESR', tfp.experimental.mcmc.chees_rate_criterion),\n ('SNAPER', snaper_criterion_dummy_direction),\n )\n def testCriterionStateEquivalence(self, criterion_fn):\n # Criteria should not care about the exact arrangement of state parts.\n previous_state = np.random.randn(4, 6).astype(self.dtype)\n new_state = np.random.randn(4, 6).astype(self.dtype)\n accept_prob = np.random.uniform(size=(4,)).astype(self.dtype)\n\n matrix_previous_state = previous_state.reshape([4, 3, 2])\n matrix_new_state = new_state.reshape([4, 3, 2])\n\n list_previous_state = [previous_state[:, :2], previous_state[:, 2:]]\n list_new_state = [new_state[:, :2], new_state[:, 2:]]\n\n criterion = criterion_fn(\n previous_state, new_state, accept_prob, 1.)\n matrix_criterion = criterion_fn(\n matrix_previous_state, matrix_new_state, accept_prob, 1.)\n list_criterion = criterion_fn(\n list_previous_state, list_new_state, accept_prob, 1.)\n\n self.assertAllEqual([4], criterion.shape)\n self.assertAllClose(criterion, matrix_criterion)\n self.assertAllClose(criterion, list_criterion)\n\n\nclass GradientBasedTrajectoryLengthAdaptationTestFloat32(\n _GradientBasedTrajectoryLengthAdaptationTest):\n dtype = np.float32\n\n\nclass GradientBasedTrajectoryLengthAdaptationTestFloat64(\n _GradientBasedTrajectoryLengthAdaptationTest):\n dtype = np.float64\n\n\n@test_util.test_all_tf_execution_regimes\nclass DistributedGBTLATest(distribute_test_lib.DistributedTest):\n\n def test_gbtla_kernel_tracks_axis_names(self):\n inner_kernel = tfp.mcmc.HamiltonianMonteCarlo(tfd.Normal(0, 1).log_prob,\n step_size=1.9,\n num_leapfrog_steps=2)\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n inner_kernel, 1)\n self.assertIsNone(kernel.experimental_shard_axis_names)\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n inner_kernel, 1, experimental_shard_axis_names=['a'])\n self.assertListEqual(kernel.experimental_shard_axis_names, ['a'])\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n inner_kernel, 1).experimental_with_shard_axes(['a'])\n self.assertListEqual(kernel.experimental_shard_axis_names, ['a'])\n\n @parameterized.named_parameters(\n ('ChEES', tfp.experimental.mcmc.chees_criterion),\n ('ChEESR', tfp.experimental.mcmc.chees_rate_criterion),\n ('SNAPER', snaper_criterion_dummy_direction),\n )\n def test_gbtla_kernel_computes_same_criterion_info_with_sharded_state(\n self,\n criterion_fn,\n ):\n\n if not JAX_MODE:\n self.skipTest('Test in TF runs into `merge_call` error: see b/178944108')\n\n def target_log_prob(a, b):\n return (\n tfd.Normal(0., 1.).log_prob(a)\n + distribute_lib.psum(tfd.Normal(\n distribute_lib.pbroadcast(a, 'foo'), 1.).log_prob(b), 'foo'))\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(target_log_prob,\n step_size=1e-2,\n num_leapfrog_steps=2)\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel, 10, criterion_fn=criterion_fn)\n sharded_kernel = kernel.experimental_with_shard_axes([None, ['foo']])\n\n def run(seed):\n init_seed, sample_seed = samplers.split_seed(seed)\n state_seeds = samplers.split_seed(init_seed)\n state = [\n samplers.normal(seed=state_seeds[0], shape=[5]),\n samplers.normal(seed=state_seeds[1], shape=[5])\n ]\n kr = sharded_kernel.bootstrap_results(state)\n _, kr = sharded_kernel.one_step(state, kr, seed=sample_seed)\n return (\n kr.criterion,\n kr.averaged_sq_grad,\n kr.averaged_max_trajectory_length\n )\n\n criterion, avg_sq_grad, avg_max_tl = self.evaluate(\n self.per_replica_to_tensor(self.strategy_run(\n run, args=(samplers.zeros_seed(),), in_axes=None, axis_name='foo'),\n 0))\n\n for i in range(distribute_test_lib.NUM_DEVICES):\n self.assertAllClose(criterion[0], criterion[i])\n self.assertAllClose(avg_sq_grad[0], avg_sq_grad[i])\n self.assertAllClose(avg_max_tl[0], avg_max_tl[i])\n\n @parameterized.named_parameters(\n ('ChEES', tfp.experimental.mcmc.chees_criterion),\n ('ChEESR', tfp.experimental.mcmc.chees_rate_criterion),\n ('SNAPER', snaper_criterion_dummy_direction),\n )\n def test_gbtla_kernel_can_shard_chains_across_devices(self, criterion_fn):\n\n def target_log_prob(a, b):\n return (\n tfd.Normal(0., 1.).log_prob(a)\n + tfd.Sample(tfd.Normal(a, 1.), 4).log_prob(b))\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(target_log_prob,\n step_size=1e-2,\n num_leapfrog_steps=2)\n sharded_kernel = (\n tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n 10,\n experimental_reduce_chain_axis_names=self.axis_name,\n criterion_fn=criterion_fn))\n\n def run(seed):\n init_seed, sample_seed = samplers.split_seed(seed)\n state_seeds = samplers.split_seed(init_seed)\n state = [\n samplers.normal(seed=state_seeds[0], shape=[]),\n samplers.normal(seed=state_seeds[1], shape=[4])\n ]\n kr = sharded_kernel.bootstrap_results(state)\n _, kr = sharded_kernel.one_step(state, kr, seed=sample_seed)\n return (\n kr.averaged_sq_grad,\n kr.averaged_max_trajectory_length\n )\n\n seeds = self.shard_values(tf.stack(tfp.random.split_seed(\n samplers.zeros_seed(), distribute_test_lib.NUM_DEVICES)), 0)\n\n avg_sq_grad, avg_max_tl = self.evaluate(\n self.per_replica_to_tensor(self.strategy_run(\n run, args=(seeds,), axis_name=self.axis_name), 0))\n\n for i in range(distribute_test_lib.NUM_DEVICES):\n self.assertAllClose(avg_sq_grad[0], avg_sq_grad[i])\n self.assertAllClose(avg_max_tl[0], avg_max_tl[i])\n\n @parameterized.named_parameters(\n ('ChEES', tfp.experimental.mcmc.chees_rate_criterion),\n ('SNAPER', snaper_criterion_2d_direction),\n )\n def test_adaptation(self, criterion_fn):\n # Compare this to testAdaptation. There we don't use SPMD, but should\n # get the same hyperparameters.\n\n if not JAX_MODE:\n self.skipTest('TF does not have pmax implemented.')\n\n target = tfd.Independent(\n tfd.Normal(0., tf.constant([1., 10.])), 1)\n\n def run(seed):\n num_burnin_steps = 1000\n num_adaptation_steps = int(num_burnin_steps * 0.8)\n num_results = 500\n num_chains = 16 // distribute_test_lib.NUM_DEVICES\n step_size = 0.1\n\n kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target.log_prob,\n step_size=step_size,\n num_leapfrog_steps=1,\n )\n kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(\n kernel,\n num_adaptation_steps=num_adaptation_steps,\n criterion_fn=criterion_fn,\n experimental_reduce_chain_axis_names=self.axis_name,\n validate_args=True)\n kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n kernel, num_adaptation_steps=num_adaptation_steps,\n experimental_reduce_chain_axis_names=self.axis_name)\n\n def trace_fn(_, pkr):\n return (\n pkr.inner_results.inner_results.accepted_results\n .step_size,\n pkr.inner_results.max_trajectory_length,\n pkr.inner_results.inner_results.log_accept_ratio,\n )\n\n # The chain will be stepped for num_results + num_burnin_steps, adapting\n # for the first num_adaptation_steps.\n chain, [step_size, max_trajectory_length, log_accept_ratio] = (\n tfp.mcmc.sample_chain(\n num_results=num_results,\n num_burnin_steps=num_burnin_steps,\n current_state=tf.zeros([num_chains, 2]),\n kernel=kernel,\n trace_fn=trace_fn,\n seed=seed))\n\n p_accept = tf.math.exp(\n tfp.math.reduce_logmeanexp(tf.minimum(log_accept_ratio, 0.)))\n mean_step_size = tf.reduce_mean(step_size)\n mean_max_trajectory_length = tf.reduce_mean(max_trajectory_length)\n mean = tf.reduce_mean(chain, axis=[0, 1])\n var = tf.reduce_variance(chain, axis=[0, 1])\n\n return mean, var, p_accept, mean_step_size, mean_max_trajectory_length\n\n seeds = self.shard_values(tf.stack(tfp.random.split_seed(\n samplers.zeros_seed(), distribute_test_lib.NUM_DEVICES)), 0)\n\n (mean, var, p_accept, mean_step_size, mean_max_trajectory_length) = (\n self.evaluate(\n self.per_replica_to_tensor(\n self.strategy_run(run, args=(seeds,), axis_name=self.axis_name),\n 0,\n )))\n\n self.assertAllClose(0.75, p_accept.mean(), atol=0.1)\n # Both ChEES-rate and SNAPER learn roughly the same trajectory length.\n self.assertAllClose(1.5, mean_step_size[0], atol=0.2)\n self.assertAllClose(15., mean_max_trajectory_length[0], rtol=0.3)\n self.assertAllClose(\n target.mean(), mean.mean(0),\n atol=1.)\n self.assertAllClose(\n target.variance(),\n var.mean(0) + mean.var(0),\n rtol=0.1)\n\n\ndel _GradientBasedTrajectoryLengthAdaptationTest\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Exponential distribution class.\"\"\"\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import softplus as softplus_bijector\nfrom tensorflow_probability.python.distributions import gamma\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import tensor_util\n\n\n__all__ = [\n 'Exponential',\n]\n\n\nclass Exponential(gamma.Gamma):\n \"\"\"Exponential distribution.\n\n The Exponential distribution is parameterized by an event `rate` parameter.\n\n #### Mathematical Details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; lambda, x > 0) = exp(-lambda x) / Z\n Z = 1 / lambda\n ```\n\n where `rate = lambda` and `Z` is the normalizaing constant.\n\n The Exponential distribution is a special case of the Gamma distribution,\n i.e.,\n\n ```python\n Exponential(rate) = Gamma(concentration=1., rate)\n ```\n\n The Exponential distribution uses a `rate` parameter, or \"inverse scale\",\n which can be intuited as,\n\n ```none\n X ~ Exponential(rate=1)\n Y = X / rate\n ```\n\n \"\"\"\n\n def __init__(self,\n rate,\n force_probs_to_zero_outside_support=False,\n validate_args=False,\n allow_nan_stats=True,\n name='Exponential'):\n \"\"\"Construct Exponential distribution with parameter `rate`.\n\n Args:\n rate: Floating point tensor, equivalent to `1 / mean`. Must contain only\n positive values.\n force_probs_to_zero_outside_support: Python `bool`. When `True`, negative\n and non-integer values are evaluated \"strictly\": `cdf` returns\n `0`, `sf` returns `1`, and `log_cdf` and `log_sf` correspond. When\n `False`, the implementation is free to save computation (and TF graph\n size) by evaluating something that matches the Exponential cdf at\n non-negative values `x` but produces an unrestricted result on\n other inputs. In the case of Exponential distribution, the `cdf`\n formula in this case happens to be the continuous function\n `1 - exp(rate * value)`.\n Note that this function is not itself a cdf function.\n Default value: `False`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n # Even though all statistics of are defined for valid inputs, this is not\n # true in the parent class \"Gamma.\" Therefore, passing\n # allow_nan_stats=True\n # through to the parent class results in unnecessary asserts.\n with tf.name_scope(name) as name:\n self._rate = tensor_util.convert_nonref_to_tensor(\n rate,\n name='rate',\n dtype=dtype_util.common_dtype([rate], dtype_hint=tf.float32))\n super(Exponential, self).__init__(\n concentration=1.,\n rate=self._rate,\n allow_nan_stats=allow_nan_stats,\n validate_args=validate_args,\n force_probs_to_zero_outside_support=(\n force_probs_to_zero_outside_support),\n name=name)\n self._parameters = parameters\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n # pylint: disable=g-long-lambda\n return dict(\n rate=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))\n # pylint: enable=g-long-lambda\n\n @property\n def rate(self):\n return self._rate\n\n def _cdf(self, value):\n cdf = -tf.math.expm1(-self.rate * value)\n # Set cdf = 0 when value is less than 0.\n return distribution_util.extend_cdf_outside_support(value, cdf, low=0.)\n\n def _log_survival_function(self, value):\n rate = tf.convert_to_tensor(self._rate)\n log_sf = self._log_prob(value, rate=rate) - tf.math.log(rate)\n\n if self.force_probs_to_zero_outside_support:\n # Set log_survival_function = 0 when value is less than 0.\n log_sf = tf.where(value < 0., tf.zeros_like(log_sf), log_sf)\n\n return log_sf\n\n def _sample_n(self, n, seed=None):\n rate = tf.convert_to_tensor(self.rate)\n shape = ps.concat([[n], ps.shape(rate)], 0)\n # Uniform variates must be sampled from the open-interval `(0, 1)` rather\n # than `[0, 1)`. To do so, we use\n # `np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny`\n # because it is the smallest, positive, \"normal\" number. A \"normal\" number\n # is such that the mantissa has an implicit leading 1. Normal, positive\n # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In\n # this case, a subnormal number (i.e., np.nextafter) can cause us to sample\n # 0.\n sampled = samplers.uniform(\n shape,\n minval=np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny,\n maxval=1.,\n seed=seed,\n dtype=self.dtype)\n return -tf.math.log(sampled) / rate\n\n def _quantile(self, value):\n return -tf.math.log1p(-value) / self.rate\n\n def _default_event_space_bijector(self):\n return softplus_bijector.Softplus(validate_args=self.validate_args)\n\n @classmethod\n def _maximum_likelihood_parameters(cls, value):\n return {'rate': 1. / tf.reduce_mean(value, axis=0)}\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The PlackettLuce distribution class.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.bijectors import softplus as softplus_bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import gumbel\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n\nclass PlackettLuce(distribution.AutoCompositeTensorDistribution):\n \"\"\"Plackett-Luce distribution over permutations.\n\n The Plackett-Luce distribution is defined over permutations of\n fixed length. It is parameterized by a positive score vector of same length.\n\n This class provides methods to create indexed batches of PlackettLuce\n distributions. If the provided `scores` is rank 2 or higher, for\n every fixed set of leading dimensions, the last dimension represents one\n single PlackettLuce distribution. When calling distribution\n functions (e.g. `dist.log_prob(x)`), `scores` and `x` are broadcast to the\n same shape (if possible). In all cases, the last dimension of `scores, x`\n represents single PlackettLuce distributions.\n\n #### Mathematical Details\n\n The Plackett-Luce is a distribution over permutation vectors `p` of length `k`\n where the permutation `p` is an arbitrary ordering of `k` indices\n `{0, 1, ..., k-1}`.\n\n The probability mass function (pmf) is,\n\n ```none\n pmf(p; s) = prod_i s_{p_i} / (Z - Z_i)\n Z = sum_{j=0}^{k-1} s_j\n Z_i = sum_{j=0}^{i-1} s_{p_j} for i>0 and 0 for i=0\n ```\n\n where:\n\n * `scores = s = [s_0, ..., s_{k-1}]`, `s_i >= 0`.\n\n Samples from Plackett-Luce distribution are generated sequentially as follows.\n\n Initialize normalization `N_0 = Z`\n For `i` in `{0, 1, ..., k-1}`\n\n 1. Sample i-th element of permutation\n `p_i ~ Categorical(probs=[s_0/N_i, ..., s_{k-1}/N_i])`\n 2. Update normalization\n `N_{i+1} = N_i-s_{p_i}`\n 3. Mask out sampled index for subsequent rounds\n `s_{p_i} = 0`\n\n Return p\n\n Alternately, an equivalent way to sample from this distribution is to sort\n Gumbel perturbed log-scores [1].\n\n ```none\n p = argsort(log s + g) ~ PlackettLuce(s)\n g = [g_0, ..., g_{k-1}], g_i~ Gumbel(0, 1)\n ```\n\n #### Examples\n\n ```python\n scores = [0.1, 2., 5.]\n dist = PlackettLuce(scores)\n ```\n\n Creates a distribution over permutations of length 3, with the 3rd index\n likely to appear first in the permutation.\n The distribution function can be evaluated on permutations as follows.\n\n ```python\n # permutations same shape as scores.\n permutations = [2, 1, 0]\n dist.prob(permutations) # Shape []\n\n # scores broadcast to [[0.1, 2.3, 5.], [0.1, 2.3, 5.]] to match permutations.\n permutations = [[2, 1, 0], [1, 0, 2]]\n dist.prob(permutations) # Shape [2]\n\n # scores broadcast to shape [5, 7, 3] to match permutations.\n permutations = [[...]] # Shape [5, 7, 3]\n dist.prob(permutaions) # Shape [5, 7]\n ```\n\n Creates a 2-batch of 3-class distributions.\n\n ```python\n scores = [[0.1, 2.3, 5.], [4.2, 0.5, 3.1]] # Shape [2, 3]\n dist = PlackettLuce(scores)\n\n # permutations broadcast to [[2, 1, 0], [2, 1, 0]] to match shape of scores.\n permutations = [2, 1, 0]\n dist.prob(permutations) # Shape [2]\n ```\n\n #### References\n\n [1]: Aditya Grover, Eric Wang, Aaron Zweig, Stefano Ermon. Stochastic\n Optimization of Sorting Networks via Continuous Relaxations. ICLR 2019.\n \"\"\"\n\n def __init__(self,\n scores,\n dtype=tf.int32,\n validate_args=False,\n allow_nan_stats=True,\n name='PlackettLuce'):\n \"\"\"Initialize a batch of PlackettLuce distributions.\n\n Args:\n scores: An N-D `Tensor`, `N >= 1`, representing the scores of a set of\n elements to be permuted. The first `N - 1` dimensions index into a\n batch of independent distributions and the last dimension represents a\n vector of scores for the elements.\n dtype: The type of the event samples (default: int32).\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n self._scores = tensor_util.convert_nonref_to_tensor(\n scores, dtype_hint=tf.float32, name='scores')\n\n super(PlackettLuce, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n # pylint: disable=g-long-lambda\n return dict(\n scores=parameter_properties.ParameterProperties(\n event_ndims=1,\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))\n # pylint: enable=g-long-lambda\n\n def _event_size(self, scores=None):\n if scores is None:\n scores = self._scores\n if scores.shape is not None:\n event_size = tf.compat.dimension_value(scores.shape[-1])\n if event_size is not None:\n return event_size\n return tf.shape(scores)[-1]\n\n @property\n def scores(self):\n \"\"\"Input argument `scores`.\n\n Each element is a non-negative value for which the sorted permutation is\n an ordering supported by this distribution.\n\n Returns:\n scores: A batch of scores used for initializing the distribution.\n \"\"\"\n return self._scores\n\n def _event_shape_tensor(self, scores=None):\n scores = self._scores if scores is None else scores\n return ps.shape(scores)[-1:]\n\n def _event_shape(self, scores=None):\n scores = self._scores if scores is None else scores\n return tensorshape_util.with_rank_at_least(scores.shape, 1)[-1:]\n\n def _mode(self):\n return tf.cast(\n tf.argsort(self.scores, axis=-1, direction='DESCENDING'),\n self.dtype)\n\n def _log_prob(self, x):\n scores = tf.convert_to_tensor(self.scores)\n event_size = self._event_size(scores)\n\n x = tf.cast(x, self.dtype)\n # Broadcast scores or x if need be.\n if (not tensorshape_util.is_fully_defined(x.shape) or\n not tensorshape_util.is_fully_defined(scores.shape) or\n x.shape != scores.shape):\n broadcast_shape = ps.broadcast_shape(\n ps.shape(scores), ps.shape(x))\n scores = tf.broadcast_to(scores, broadcast_shape)\n x = tf.broadcast_to(x, broadcast_shape)\n scores_shape = ps.shape(scores)[:-1]\n scores_2d = tf.reshape(scores, [-1, event_size])\n x_2d = tf.reshape(x, [-1, event_size])\n\n rearranged_scores = tf.gather(scores_2d, x_2d, batch_dims=1)\n normalization_terms = tf.cumsum(rearranged_scores, axis=-1, reverse=True)\n ret = tf.math.reduce_sum(\n tf.math.log(rearranged_scores / normalization_terms), axis=-1)\n # Reshape back to user-supplied batch and sample dims prior to 2D reshape.\n ret = tf.reshape(ret, scores_shape)\n return ret\n\n def _sample_n(self, n, seed=None):\n scores = tf.convert_to_tensor(self.scores)\n sample_shape = ps.concat([[n], ps.shape(scores)], axis=0)\n gumbel_noise = gumbel.Gumbel(loc=0, scale=1).sample(sample_shape,\n seed=seed)\n noisy_log_scores = gumbel_noise + tf.math.log(scores)\n return tf.cast(\n tf.argsort(noisy_log_scores, axis=-1, direction='DESCENDING'),\n self.dtype)\n\n def scores_parameter(self, name=None):\n \"\"\"Scores vec computed from non-`None` input arg (`scores`).\"\"\"\n with self._name_and_control_scope(name or 'scores_parameter'):\n return tf.identity(self._scores)\n\n def _default_event_space_bijector(self):\n return\n\n def _sample_control_dependencies(self, x):\n assertions = []\n if not self.validate_args:\n return assertions\n assertions.append(assert_util.assert_equal(\n tf.range(self._event_size(), dtype=x.dtype),\n tf.sort(x, axis=-1),\n message='Sample must be a permutation of `{0, ..., k-1}`, where `k` is '\n 'the size of the last dimension of `scores`.'))\n return assertions\n\n def _parameter_control_dependencies(self, is_init):\n assertions = []\n\n scores = self._scores\n param, name = (scores, 'scores')\n\n # In init, we can always build shape and dtype checks because\n # we assume shape doesn't change for Variable backed args.\n if is_init:\n if not dtype_util.is_floating(param.dtype):\n raise TypeError('Argument `{}` must having floating type.'.format(name))\n\n msg = 'Argument `{}` must have rank at least 1.'.format(name)\n shape_static = tensorshape_util.dims(param.shape)\n if shape_static is not None:\n if len(shape_static) < 1:\n raise ValueError(msg)\n elif self.validate_args:\n param = tf.convert_to_tensor(param)\n assertions.append(\n assert_util.assert_rank_at_least(param, 1, message=msg))\n with tf.control_dependencies(assertions):\n param = tf.identity(param)\n\n msg1 = 'Argument `{}` must have final dimension >= 1.'.format(name)\n msg2 = 'Argument `{}` must have final dimension <= {}.'.format(\n name, dtype_util.max(tf.int32))\n event_size = shape_static[-1] if shape_static is not None else None\n if event_size is not None:\n if event_size < 1:\n raise ValueError(msg1)\n if event_size > dtype_util.max(tf.int32):\n raise ValueError(msg2)\n elif self.validate_args:\n param = tf.convert_to_tensor(param)\n assertions.append(assert_util.assert_greater_equal(\n tf.shape(param)[-1], 1, message=msg1))\n # NOTE: For now, we leave out a runtime assertion that\n # `tf.shape(param)[-1] <= tf.int32.max`. An earlier `tf.shape` call\n # will fail before we get to this point.\n\n if not self.validate_args:\n assert not assertions # Should never happen.\n return []\n\n if is_init != tensor_util.is_ref(scores):\n scores = tf.convert_to_tensor(scores)\n assertions.extend([\n assert_util.assert_positive(scores),\n ])\n\n return assertions\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The ProbitBernoulli distribution class.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import special_math\nfrom tensorflow_probability.python.internal import tensor_util\n\n\nclass ProbitBernoulli(distribution.AutoCompositeTensorDistribution):\n \"\"\"ProbitBernoulli distribution.\n\n The ProbitBernoulli distribution with `probs` parameter, i.e., the probability\n of a `1` outcome (vs a `0` outcome). Unlike a regular Bernoulli distribution,\n which uses the logistic (aka 'sigmoid') function to go from the un-constrained\n parameters to probabilities, this distribution uses the CDF of the [standard\n normal distribution](https://en.wikipedia.org/wiki/Normal_distribution):\n\n ```none\n p(x=1; probits) = 0.5 * (1 + erf(probits / sqrt(2)))\n p(x=0; probits) = 1 - p(x=1; probits)\n ```\n\n Where `erf` is the [error\n function](https://en.wikipedia.org/wiki/Error_function). A typical application\n of this distribution is in [probit\n regression](https://en.wikipedia.org/wiki/Probit_model).\n \"\"\"\n\n def __init__(self,\n probits=None,\n probs=None,\n dtype=tf.int32,\n validate_args=False,\n allow_nan_stats=True,\n name='ProbitBernoulli'):\n \"\"\"Construct ProbitBernoulli distributions.\n\n Args:\n probits: An N-D `Tensor` representing the probit-odds of a `1` event. Each\n entry in the `Tensor` parameterizes an independent ProbitBernoulli\n distribution where the probability of an event is normal_cdf(probits).\n Only one of `probits` or `probs` should be passed in.\n probs: An N-D `Tensor` representing the probability of a `1`\n event. Each entry in the `Tensor` parameterizes an independent\n ProbitBernoulli distribution. Only one of `probits` or `probs` should be\n passed in.\n dtype: The type of the event samples. Default: `int32`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n ValueError: If probs and probits are passed, or if neither are passed.\n \"\"\"\n parameters = dict(locals())\n if (probs is None) == (probits is None):\n raise ValueError('Must pass probs or probits, but not both.')\n with tf.name_scope(name) as name:\n self._probs = tensor_util.convert_nonref_to_tensor(\n probs, dtype_hint=tf.float32, name='probs')\n self._probits = tensor_util.convert_nonref_to_tensor(\n probits, dtype_hint=tf.float32, name='probits')\n super(ProbitBernoulli, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n return dict(\n probits=parameter_properties.ParameterProperties(),\n probs=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=sigmoid_bijector.Sigmoid,\n is_preferred=False))\n\n @property\n def probits(self):\n \"\"\"Input argument `probits`.\"\"\"\n return self._probits\n\n @property\n def probs(self):\n \"\"\"Input argument `probs`.\"\"\"\n return self._probs\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n probs = self._probs_parameter_no_checks()\n new_shape = ps.concat([[n], ps.shape(probs)], 0)\n uniform = samplers.uniform(new_shape, seed=seed, dtype=probs.dtype)\n sample = tf.less(uniform, probs)\n return tf.cast(sample, self.dtype)\n\n def _log_prob(self, event):\n log_probs0, log_probs1 = self._outcome_log_probs()\n event = tf.cast(event, log_probs0.dtype)\n return (tf.math.multiply_no_nan(log_probs0, 1 - event) +\n tf.math.multiply_no_nan(log_probs1, event))\n\n def _outcome_log_probs(self):\n if self._probits is None:\n p = tf.convert_to_tensor(self._probs)\n return tf.math.log1p(-p), tf.math.log(p)\n s = tf.convert_to_tensor(self._probits)\n return special_math.log_ndtr(-s), special_math.log_ndtr(s)\n\n def _entropy(self):\n log_probs0, log_probs1 = self._outcome_log_probs()\n probs1 = tf.exp(log_probs1)\n return -(1. - probs1) * log_probs0 - probs1 * log_probs1\n\n def _mean(self):\n return self._probs_parameter_no_checks()\n\n def _variance(self):\n mean = self._mean()\n return mean * (1. - mean)\n\n def _mode(self):\n \"\"\"Returns `1` if `prob > 0.5` and `0` otherwise.\"\"\"\n return tf.cast(self._probs_parameter_no_checks() > 0.5, self.dtype)\n\n def probits_parameter(self, name=None):\n \"\"\"Probits computed from non-`None` input arg (`probs` or `probits`).\"\"\"\n with self._name_and_control_scope(name or 'probits_parameter'):\n return self._probits_parameter_no_checks()\n\n def _probits_parameter_no_checks(self):\n if self._probits is None:\n probs = tf.convert_to_tensor(self._probs)\n return tf.math.ndtri(probs)\n return tensor_util.identity_as_tensor(self._probits)\n\n def probs_parameter(self, name=None):\n \"\"\"Probs computed from non-`None` input arg (`probs` or `probits`).\"\"\"\n with self._name_and_control_scope(name or 'probs_parameter'):\n return self._probs_parameter_no_checks()\n\n def _probs_parameter_no_checks(self):\n if self._probits is None:\n return tensor_util.identity_as_tensor(self._probs)\n return special_math.ndtr(self._probits)\n\n def _default_event_space_bijector(self):\n return\n\n def _parameter_control_dependencies(self, is_init):\n return maybe_assert_bernoulli_param_correctness(\n is_init, self.validate_args, self._probs, self._probits)\n\n def _sample_control_dependencies(self, x):\n assertions = []\n if not self.validate_args:\n return assertions\n assertions.extend(distribution_util.assert_nonnegative_integer_form(x))\n assertions.append(\n assert_util.assert_less_equal(x, tf.ones([], dtype=x.dtype),\n message='Elements cannot exceed 1.'))\n return assertions\n\n\ndef maybe_assert_bernoulli_param_correctness(\n is_init, validate_args, probs, probits):\n \"\"\"Return assertions for `ProbitBernoulli`-type distributions.\"\"\"\n if is_init:\n x, name = (probs, 'probs') if probits is None else (probits, 'probits')\n if not dtype_util.is_floating(x.dtype):\n raise TypeError(\n 'Argument `{}` must having floating type.'.format(name))\n\n if not validate_args:\n return []\n\n assertions = []\n\n if probs is not None:\n if is_init != tensor_util.is_ref(probs):\n probs = tf.convert_to_tensor(probs)\n one = tf.constant(1., probs.dtype)\n assertions += [\n assert_util.assert_non_negative(\n probs, message='probs has components less than 0.'),\n assert_util.assert_less_equal(\n probs, one, message='probs has components greater than 1.')\n ]\n\n return assertions\n\n\n@kullback_leibler.RegisterKL(ProbitBernoulli, ProbitBernoulli)\ndef _kl_bernoulli_bernoulli(a, b, name=None):\n \"\"\"Calculate the batched KL divergence KL(a || b) with a and b ProbitBernoulli.\n\n Args:\n a: instance of a ProbitBernoulli distribution object.\n b: instance of a ProbitBernoulli distribution object.\n name: Python `str` name to use for created operations.\n Default value: `None` (i.e., `'kl_bernoulli_bernoulli'`).\n\n Returns:\n Batchwise KL(a || b)\n \"\"\"\n with tf.name_scope(name or 'kl_probit_bernoulli_probit_bernoulli'):\n a_log_probs0, a_log_probs1 = a._outcome_log_probs() # pylint: disable=protected-access\n b_log_probs0, b_log_probs1 = b._outcome_log_probs() # pylint: disable=protected-access\n a_prob1 = tf.exp(a_log_probs1)\n\n return (1. - a_prob1) * (a_log_probs0 - b_log_probs0) + a_prob1 * (\n a_log_probs1 - b_log_probs1)\n",
"# Copyright 2021 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MarkovChain distribution.\"\"\"\n\nimport functools\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import math as tfp_math\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import log_prob_ratio\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n__all__ = [\n 'MarkovChain',\n]\n\n\nclass MarkovChain(distribution.Distribution):\n \"\"\"Distribution of a sequence generated by a memoryless process.\n\n A discrete-time [Markov chain](https://en.wikipedia.org/wiki/Markov_chain)\n is a sequence of random variables in which the variable(s) at each step is\n independent of all previous variables, *conditioned on* the variable(s) at the\n immediate predecessor step. That is, there can be no (direct) long-term\n dependencies. This 'Markov property' is a simplifying assumption; for example,\n it enables efficient sampling. Many time-series models can be formulated as\n Markov chains.\n\n Instances of `tfd.MarkovChain` represent fully-observed, discrete-time Markov\n chains, with one or more random variables at each step. These variables may\n take continuous or discrete values. Sampling is done sequentially, requiring\n time that scales with the length of the sequence; `log_prob` evaluation is\n vectorized over timesteps, and so requires only constant time given sufficient\n parallelism.\n\n #### Related distributions\n\n The discrete-valued Markov chains modeled by `tfd.HiddenMarkovModel` (using\n a trivial observation distribution) are a special case of those supported by\n this distribution, which enable exact inference over the values in an\n unobserved chain. Continuous-valued chains with linear Gaussian transitions\n are supported by `tfd.LinearGaussianStateSpaceModel`, which can similarly\n exploit the linear Gaussian structure for exact inference of hidden states.\n These distributions are limited to chains that have the respective (discrete\n or linear Gaussian) structure.\n\n Autoregressive models that do *not* necessarily respect the Markov property\n are supported by `tfd.Autoregressive`, which is, in that sense, more general\n than this distribution. These models require a more involved specification,\n and sampling in general requires quadratic (rather than linear) time in the\n length of the sequence.\n\n Exact inference for unobserved Markov chains is not possible in\n general; however, particle filtering exploits the Markov property\n to perform approximate inference, and is often a well-suited method for\n sequential inference tasks. Particle filtering is available in TFP using\n `tfp.experimental.mcmc.particle_filter`, and related methods.\n\n #### Example: Gaussian random walk\n\n One of the simplest continuous-valued Markov chains is a\n [Gaussian random walk](\n https://en.wikipedia.org/wiki/Random_walk#Gaussian_random_walk).\n This may also be viewed as a discretized [Brownian motion](\n https://en.wikipedia.org/wiki/Brownian_motion).\n\n ```python\n tfd = tfp.distributions\n\n gaussian_walk = tfd.MarkovChain(\n initial_state_prior=tfd.Normal(loc=0., scale=1.),\n transition_fn=lambda _, x: tfd.Normal(loc=x, scale=1.),\n num_steps=100)\n # ==> `gaussian_walk.event_shape == [100]`\n # ==> `gaussian_walk.batch_shape == []`\n\n x = gaussian_walk.sample(5) # Samples a matrix of 5 independent walks.\n lp = gaussian_walk.log_prob(x) # ==> `lp.shape == [5]`.\n ```\n\n #### Example: batch of random walks\n\n To spice things up, we'll now define a *batch* of random walks, each following\n a different distribution (in this case, different starting locations).\n We'll also demonstrate scales that differ across timesteps.\n\n ```python\n scales = tf.convert_to_tensor([0.5, 0.3, 0.2, 0.2, 0.3, 0.2, 0.7])\n batch_gaussian_walk = tfd.MarkovChain(\n # The prior distribution determines the batch shape for the chain.\n # Transitions must respect this batch shape.\n initial_state_prior=tfd.Normal(loc=[-10., 0., 10.],\n scale=[1., 1., 1.]),\n transition_fn=lambda t, x: tfd.Normal(\n loc=x,\n # The `num_steps` dimension will always be leftmost in `x`, so we\n # pad the scale to the same rank as `x` to make their shapes line up.\n tf.reshape(tf.gather(scales, t),\n tf.concat([[-1],\n tf.ones(tf.rank(x) - 1, dtype=tf.int32)], axis=0))),\n # Limit to eight steps since we only specified scales for seven transitions.\n num_steps=8)\n # ==> `batch_gaussian_walk.event_shape == [8]`\n # ==> `batch_gaussian_walk.batch_shape == [3]`\n\n x = batch_gaussian_walk.sample(5) # ==> `x.shape == [5, 3, 8]`.\n lp = batch_gaussian_walk.log_prob(x) # ==> `lp.shape == [5, 3]`.\n ```\n\n #### Example: multivariate chain with longer-term dependence\n\n We can also define multivariate Markov chains. In addition to the obvious\n use of modeling the joint evolution of multiple variables, multivariate\n chains can also help us work around the Markov limitation by\n the trick of folding state history into the current state as an auxiliary\n variable(s). The next example, a second-order [autoregressive process](\n https://en.wikipedia.org/wiki/Autoregressive_model) with dynamic coefficients\n and scale, contains multiple time-dependent variables and also uses an\n auxiliary `previous_level` variable to enable the transition function\n to access the previous *two* steps of history:\n\n ```python\n\n def transition_fn(_, previous_state):\n return tfd.JointDistributionNamedAutoBatched(\n # The transition distribution must match the batch shape of the chain.\n # Since `log_scale` is a scalar quantity, its shape is the batch shape.\n batch_ndims=tf.rank(previous_state['log_scale']),\n model={\n # The autoregressive coefficients and the `log_scale` each follow\n # an independent slow-moving random walk.\n 'coefs': tfd.Normal(loc=previous_state['coefs'], scale=0.01),\n 'log_scale': tfd.Normal(loc=previous_state['log_scale'],\n scale=0.01),\n # The level is a linear combination of the previous *two* levels,\n # with additional noise of scale `exp(log_scale)`.\n 'level': lambda coefs, log_scale: tfd.Normal( # pylint: disable=g-long-lambda\n loc=(coefs[..., 0] * previous_state['level'] +\n coefs[..., 1] * previous_state['previous_level']),\n scale=tf.exp(log_scale)),\n # Store the previous level to access at the next step.\n 'previous_level': tfd.Deterministic(previous_state['level'])})\n ```\n\n Note: when using an autobatched joint distribution as a transition model,\n as we did here, it is necessary to explicitly set its `batch_ndims` to the\n batch rank of the passed-in state. This will be at least the batch rank of the\n initial state prior, but may be greater, e.g., when evaluating multiple iid\n samples. In general, the correct batch rank is that of the previous state\n `Tensor`s.\n\n ```python\n process = tfd.MarkovChain(\n # For simplicity, define the prior as a 'transition' from fixed values.\n initial_state_prior=transition_fn(\n 0, previous_state={\n 'coefs': [0.7, -0.2],\n 'log_scale': -1.,\n 'level': 0.,\n 'previous_level': 0.}),\n transition_fn=transition_fn,\n num_steps=100)\n # ==> `process.event_shape == {'coefs': [100, 2], 'log_scale': [100],\n # 'level': [100], 'previous_level': [100]}`\n # ==> `process.batch_shape == []`\n\n x = process.sample(5)\n # ==> `x['coefs'].shape == [5, 100, 2]`\n # ==> `x['log_scale'].shape == [5, 100]`\n # ==> `x['level'].shape == [5, 100]`\n # ==> `x['previous_level'].shape == [5, 100]`\n lp = process.log_prob(x) # ==> `lp.shape == [5]`.\n ```\n\n \"\"\"\n\n def __init__(self,\n initial_state_prior,\n transition_fn,\n num_steps,\n experimental_use_kahan_sum=False,\n validate_args=False,\n name='MarkovChain'):\n \"\"\"Initializes the Markov chain.\n\n Note that the `initial_state_prior` and `transition_fn` used to specify a\n Markov chain are the same parameters required for particle filtering\n inference with `tfp.experimental.mcmc.particle_filter`.\n\n Args:\n initial_state_prior: `tfd.Distribution` instance describing a prior\n distribution on the state at step 0. This may be a joint distribution.\n transition_fn: Python `callable` with signature\n `current_state_dist = transition_fn(previous_step, previous_state)`.\n The arguments are an integer `previous_step`, and `previous_state`,\n a (structure of) Tensor(s) like a sample from the\n `initial_state_prior`. The returned `current_state_dist` must have the\n same `dtype`, `batch_shape`, and `event_shape` as `initial_state_prior`.\n num_steps: Integer `Tensor` scalar number of steps in the chain.\n experimental_use_kahan_sum: If `True`, use\n [Kahan summation](\n https://en.wikipedia.org/wiki/Kahan_summation_algorithm) to mitigate\n accumulation of floating-point error in log_prob calculation.\n validate_args: Python `bool`, default `False`. Whether to validate input\n with asserts. If `validate_args` is `False`, and the inputs are\n invalid, correct behavior is not guaranteed.\n name: The name to give ops created by this distribution.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n self._initial_state_prior = initial_state_prior\n self._transition_fn = transition_fn\n self._num_steps = tensor_util.convert_nonref_to_tensor(\n num_steps, dtype_hint=tf.int32, name='num_steps',\n as_shape_tensor=True)\n self._experimental_use_kahan_sum = experimental_use_kahan_sum\n super(MarkovChain, self).__init__(\n parameters=parameters,\n validate_args=validate_args,\n reparameterization_type=initial_state_prior.reparameterization_type,\n dtype=initial_state_prior.dtype,\n allow_nan_stats=initial_state_prior.allow_nan_stats,\n name=name)\n\n @property\n def initial_state_prior(self):\n return self._initial_state_prior\n\n @property\n def num_steps(self):\n return self._num_steps\n\n @property\n def transition_fn(self):\n return self._transition_fn\n\n @property\n def _sum_fn(self):\n if self._experimental_use_kahan_sum:\n return lambda x, axis: tfp_math.reduce_kahan_sum(x, axis=axis).value\n return tf.reduce_sum\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n return dict(\n initial_state_prior=parameter_properties.BatchedComponentProperties(),\n num_steps=parameter_properties.ShapeParameterProperties())\n\n def _event_shape(self):\n def _prefix_with_num_steps(event_shape):\n if tensorshape_util.rank(event_shape) is None:\n return tf.TensorShape(None)\n return tensorshape_util.concatenate([tf.get_static_value(self.num_steps)],\n event_shape)\n return tf.nest.map_structure(_prefix_with_num_steps,\n self.initial_state_prior.event_shape)\n\n def _event_shape_tensor(self):\n return tf.nest.map_structure(\n lambda event_shape: ps.concat([[self.num_steps], event_shape], axis=0),\n self.initial_state_prior.event_shape_tensor())\n\n def _batch_shape(self):\n # This matches the automatically-inferred batch shape, but we implement it\n # anyway in order to support the structured batch shapes of\n # non-autobatched JDs.\n return self.initial_state_prior.batch_shape\n\n def _batch_shape_tensor(self):\n # This matches the automatically-inferred batch shape, but we implement it\n # anyway in order to support the structured batch shapes of\n # non-autobatched JDs.\n return self.initial_state_prior.batch_shape_tensor()\n\n def _step_axes(self):\n \"\"\"Index of the `num_steps` axis in each event part, as negative int(s).\"\"\"\n return tf.nest.map_structure(\n lambda nd: -(1 + nd),\n tf.nest.map_structure(ps.rank_from_shape,\n self.initial_state_prior.event_shape_tensor()))\n\n def _sample_and_log_prob_helper(self,\n sample_shape,\n seed=None,\n compute_log_prob=False):\n \"\"\"Draws samples from the chain and optionally accumulates the log_prob.\"\"\"\n prior_seed, loop_seed = samplers.split_seed(\n n=2, seed=seed, salt='markov_chain_sample')\n\n if compute_log_prob:\n sample_attr = 'experimental_sample_and_log_prob'\n extract_sample_fn = lambda x_and_lp: x_and_lp[0]\n extract_lp_fn = lambda x_and_lp: self._sum_fn(x_and_lp[1], axis=0)\n else:\n sample_attr = 'sample'\n extract_sample_fn = lambda x: x\n extract_lp_fn = lambda x: 0.\n\n prior_result = getattr(self.initial_state_prior, sample_attr)(\n sample_shape, seed=prior_seed)\n\n loop_body = _make_sample_loop_body(\n self.transition_fn,\n sample_attr=sample_attr,\n extract_sample_fn=extract_sample_fn)\n _, results = tf.scan(loop_body,\n elems=tf.range(1, self.num_steps),\n initializer=(loop_seed, prior_result))\n\n # Concatenate prior sample (and lp) with remaining samples (and lps).\n results = tf.nest.map_structure(concat_initial, prior_result, results)\n samples, lp = extract_sample_fn(results), extract_lp_fn(results)\n\n # Move leftmost `num_steps` dimension into the event shape.\n samples = move_dimensions(samples, 0, self._step_axes())\n return samples, lp\n\n def _sample_n(self, sample_shape, seed=None):\n samples, _ = self._sample_and_log_prob_helper(\n sample_shape, seed=seed, compute_log_prob=False)\n return samples\n\n def _sample_and_log_prob(self, sample_shape, seed=None):\n return self._sample_and_log_prob_helper(\n sample_shape, seed=seed, compute_log_prob=True)\n\n def _log_prob_parts(self, x):\n \"\"\"Returns the prior log-prob and elementwise transition log-probs.\"\"\"\n # Move step dimension to the leftmost location, so that it appears to the\n # transition model as the leftmost sample dimension rather than as the\n # rightmost batch dimension (which could otherwise conflict with existing\n # batch dimensions).\n x = move_dimensions(x, self._step_axes(), 0)\n prior_lp = self.initial_state_prior.log_prob(\n tf.nest.map_structure(lambda state_part: state_part[0], x))\n num_steps = ps.shape(tf.nest.flatten(x)[0])[0]\n\n return prior_lp, self.transition_fn(\n tf.range(num_steps - 1),\n tf.nest.map_structure(\n lambda state_part: state_part[:num_steps - 1], x)\n ).log_prob(tf.nest.map_structure(\n lambda state_part: state_part[1 : num_steps], x))\n\n def _log_prob(self, x):\n prior_lp, transition_lps = self._log_prob_parts(x)\n transition_lp = self._sum_fn(transition_lps, axis=0)\n\n with tf.control_dependencies(\n _assert_same_shape(\n prior_lp, transition_lp, validate_args=self.validate_args,\n message='The shape of the `log_prob` returned by the transition '\n 'distribution does not match the `log_prob` from the '\n 'initial state prior. This indicates that the transition '\n 'distribution\\'s batch shape is incorrect. Please ensure that '\n '`initial_state_prior.batch_shape == transition_fn(0, '\n 'initial_state_prior.sample()).batch_shape`.')):\n return prior_lp + transition_lp\n\n def _default_event_space_bijector(self):\n transition_dist = self.transition_fn(\n 0, self.initial_state_prior.sample(seed=samplers.zeros_seed()))\n transition_bijector = (\n transition_dist.experimental_default_event_space_bijector())\n # We can share a single bijector across the whole chain if:\n # 1. The prior and transition distributions use the same bijector, and\n # 2. This bijector has no batch shape (which could conflict with\n # the `num_steps` axis of the chain).\n if (transition_bijector ==\n self.initial_state_prior.experimental_default_event_space_bijector()\n and tensorshape_util.rank(\n transition_bijector.experimental_batch_shape()) == 0):\n return transition_bijector\n\n return _MarkovChainBijector(\n self,\n transition_bijector=transition_bijector,\n bijector_fn=lambda d: d.experimental_default_event_space_bijector())\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n if is_init != tensor_util.is_ref(self._num_steps):\n assertions.append(assert_util.assert_greater_equal(\n self._num_steps, 1,\n message='Argument `num_steps` must be at least 1.'))\n return assertions\n\n def _sample_control_dependencies(self, x):\n if not self.validate_args:\n return []\n parts_num_steps = tf.nest.flatten(tf.nest.map_structure(\n lambda x, k: ps.shape(x)[k], x, self._step_axes()))\n return [\n assert_util.assert_equal( # pylint: disable=g-complex-comprehension\n num_steps, self.num_steps,\n message='Input shape does not match the expected num_steps.')\n for num_steps in parts_num_steps]\n\n\ndef _make_sample_loop_body(transition_fn,\n sample_attr='sample',\n extract_sample_fn=lambda x: x):\n \"\"\"Builds the scan loop body to sample from a Markov chain.\"\"\"\n\n def loop_body(seed_and_state, step):\n seed, previous_result = seed_and_state\n state = extract_sample_fn(previous_result) # Maybe strip log_prob.\n current_step_seed, seed = samplers.split_seed(seed, n=2)\n new_result = getattr(transition_fn(step - 1, state), sample_attr)(\n seed=current_step_seed)\n return seed, new_result\n\n return loop_body\n\n\ndef _assert_same_shape(x, y,\n message='Shapes do not match.',\n validate_args=False):\n \"\"\"Asserts (statically if possible) that two Tensor have the same shape.\"\"\"\n if not tensorshape_util.is_compatible_with(x.shape, y.shape):\n raise ValueError(message +\n ' Saw shapes: {} vs {}.'.format(x.shape, y.shape))\n\n assertions = []\n if validate_args and not (tensorshape_util.is_fully_defined(x.shape) and\n tensorshape_util.is_fully_defined(y.shape)):\n assertions.append(\n assert_util.assert_equal(\n tf.shape(x), tf.shape(y), message=message))\n return assertions\n\n\n# pylint: disable=protected-access\n@log_prob_ratio.RegisterLogProbRatio(MarkovChain)\ndef _markov_chain_log_prob_ratio(p, x, q, y, name=None):\n \"\"\"Implements `log_prob_ratio` for tfd.MarkovChain.\"\"\"\n with tf.name_scope(name or 'markov_chain_log_prob_ratio'):\n # TODO(davmre): In the case where `p` and `q` have components of the same\n # families (in addition to just both being MarkovChains), we might prefer to\n # recursively call `log_prob_ratio` instead of just subtracting log probs.\n p_prior_lp, p_transition_lps = p._log_prob_parts(x)\n q_prior_lp, q_transition_lps = q._log_prob_parts(y)\n prior_lp_ratio = p_prior_lp - q_prior_lp\n transition_lp_ratios = p_transition_lps - q_transition_lps\n if (p._experimental_use_kahan_sum or\n q._experimental_use_kahan_sum):\n transition_lp_ratio = tfp_math.reduce_kahan_sum(\n transition_lp_ratios, axis=0).value\n else:\n transition_lp_ratio = tf.reduce_sum(transition_lp_ratios, axis=0)\n return prior_lp_ratio + transition_lp_ratio\n# pylint: enable=protected-access\n\n\nclass _MarkovChainBijector(bijector.Bijector):\n \"\"\"Applies distinct bijectors to initial + transition states of a chain.\"\"\"\n\n def __init__(self,\n chain,\n bijector_fn,\n transition_bijector,\n name='markov_chain_bijector'):\n \"\"\"Initializes the MarkovChain bijector.\n\n This bijector maps into the support of the corresponding MarkovChain\n distribution, using separate bijectors for the head (initial state)\n and tail (transition states) of the chain. Its input is a pair of\n unconstrained structures each matching `chain.dtype`, and the output is a\n constrained structure matching `chain.dtype`. Note that the inputs\n to the two bijectors may have different shapes, corresponding to the\n inverse images of the two bijectors, but the outputs must have the same\n shape in order to support concatenation along the `num_steps` axis.\n\n Conceptually, the Markov chain bijector performs the same transformation as\n the `joint_distribution._DefaultJointBijector` for a hypothetical joint\n distribution that samples from the chain one step at a time:\n\n ```python\n @tfd.JointDistributionCoroutineAutoBatched\n def markov_chain_equivalent():\n x = yield initial_state_prior\n for i in range(1, num_steps):\n x = yield transition_fn(i, x)\n markov_chain_equivalent_joint_bijector = (\n markov_chain_equivalent.experimental_default_event_space_bijector())\n ```\n\n However, just as `MarkovChain` uses low-level looping and batch operations\n for better performance than the corresponding joint\n distribution, the `MarkovChainBijector` is more efficient than the\n corresponding joint bijector.\n\n Args:\n chain: Instance of `tfd.MarkovChain`.\n bijector_fn: Callable with signature `bij = bijector_fn(dist)`, where\n `dist` is a `tfd.Distribution` instance. This is applied to the\n `chain.initial_state_prior` and to distributions returned by\n `chain.transition_fn(...)`.\n transition_bijector: Bijector instance for a single step of the\n transition model. This is typically equal to\n `bijector_fn(markov_chain.transition_fn(0,\n markov_chain.initial_state_prior.sample()))`; passing it explicitly\n avoids the need to recreate it whenever the chain bijector is\n copied or otherwise re-initialized.\n name: The name to give ops created by this bijector.\n\n #### Example\n\n For example, consider the following chain, which has dtype\n `{'probs': tf.float32}`, and describes a process in which a 2D vector\n is sampled from the probability simplex and then gradually corrupted by\n Gaussian noise (which will in general push it out of the simplex):\n\n ```python\n chain = tfd.MarkovChain(\n initial_state_prior=tfd.JointDistributionNamedAutoBatched(\n {'probs': tfd.Dirichlet([1., 1.])}),\n transition_fn=lambda _, x: tfd.JointDistributionNamedAutoBatched(\n {'probs': tfd.MultivariateNormalDiag(loc=x['probs'],\n scale_diag=[0.1, 0.1])},\n batch_ndims=ps.rank(x['probs'])),\n num_steps=10)\n ```\n\n Transformations of this distribution apply separate bijectors\n for the `Dirichlet` initial state and `MultivariateNormalDiag` transitions:\n\n ```python\n bij = chain.experimental_default_event_space_bijector()\n y = chain.sample(5) # Shape: {'probs': [5, 10, 2]}\n x = chain.inverse(y) # Shape: [{'probs': [5, 1]}, {'probs': [5, 9, 2]}]\n ```\n\n Note that the pulled-back `x` is a pair of structures, of shapes\n `{'probs': [5, 1]}` and `{'probs': [5, 9, 2]}` respectively. The first is\n the result of pulling the initial state vectors back through the Dirichlet\n event space bijector, which inverts shape-`[2]` vectors on the simplex to\n shape-`[1]` unconstrained vectors. The second comes from pulling the\n shape-`[2]` chain state at each of the remaining `9` timesteps back through\n the `MultivariateNormalDiag` event space bijector, which is just\n the identity bijector, resulting in vectors of shape `[2]`.\n\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name):\n self._chain = chain\n self._bijector_fn = bijector_fn\n self._initial_bijector = bijector_fn(chain.initial_state_prior)\n self._transition_bijector = transition_bijector\n\n inverse_min_event_ndims = tf.nest.map_structure(\n ps.rank_from_shape, chain.event_shape_tensor())\n super(_MarkovChainBijector, self).__init__(\n forward_min_event_ndims=(\n self._initial_bijector.inverse_event_ndims(\n tf.nest.map_structure(lambda nd: nd - 1,\n inverse_min_event_ndims)),\n self._transition_bijector.inverse_event_ndims(\n inverse_min_event_ndims)),\n inverse_min_event_ndims=inverse_min_event_ndims,\n is_constant_jacobian=(\n self.initial_bijector.is_constant_jacobian and\n self.transition_bijector.is_constant_jacobian),\n validate_args=chain.validate_args,\n parameters=parameters,\n name=name)\n\n @property\n def bijector_fn(self):\n return self._bijector_fn\n\n @property\n def chain(self):\n return self._chain\n\n @property\n def initial_bijector(self):\n return self._initial_bijector\n\n @property\n def transition_bijector(self):\n return self._transition_bijector\n\n @classmethod\n def _parameter_properties(cls, dtype):\n return dict(\n chain=parameter_properties.BatchedComponentProperties(),\n transition_bijector=parameter_properties.BatchedComponentProperties(\n # The transition bijector contributes no batch shape\n # beyond that from the chain itself.\n event_ndims=None))\n\n def _apply_forward_scan(self, fn, x0, xs):\n \"\"\"Runs the chain forward, accumulating `fn(b, x, y)` vals at every step.\n\n Args:\n fn: Callable with signature `result = fn(b, x, y)`.\n x0: Structure of initial state `Tensors`, each of shape\n `concat([[batch_shape], unconstrained_prior_event_shape])`.\n xs: Structure of `Tensors`, each of shape\n `concat([[batch_shape], [num_steps - 1],\n unconstrained_transition_event_shape])`.\n Returns:\n fs: Result `Tensor` of shape\n `concat([[num_steps], batch_shape, result_shape])`, where `result_shape`\n is the shape of the result from an unbatched call to `fn`.\n \"\"\"\n xs_step_axes = tf.nest.map_structure(\n lambda nd: -nd,\n self.transition_bijector.inverse_event_ndims(\n # Outputs `y` have the num_steps axis at `-inverse_min_event_ndims`.\n self.inverse_min_event_ndims))\n xs = move_dimensions(xs, source=xs_step_axes, dest=0)\n\n # Evaluate the initial state.\n y0 = self.initial_bijector.forward(x0)\n f0 = fn(self.initial_bijector, x0, y0)\n\n # Evaluate the rest of the chain.\n def loop_body(previous_y_and_result, idx):\n previous_y, _ = previous_y_and_result\n bij = self.bijector_fn(self.chain.transition_fn(idx, previous_y))\n x_i = tf.nest.map_structure(lambda x: x[idx - 1], xs)\n y_i = bij.forward(x_i)\n f_i = fn(bij, x_i, y_i)\n return (y_i,\n tf.nest.map_structure(lambda a, b: tf.cast(a, b.dtype), f_i, f0))\n _, fs = tf.scan(loop_body,\n elems=tf.range(1, self.chain.num_steps),\n initializer=(y0, f0))\n return concat_initial(f0, fs)\n\n def _apply_batch(self, fn, y):\n \"\"\"Applies `fn(b, y)` at each step of the chain.\n\n Args:\n fn: Callable with signature `result = fn(b, y)`.\n y: Structure of `Tensor`s, of shape\n `batch_shape + self.chain.event_shape`.\n Returns:\n f0: `Tensor` of shape `batch_shape + result_shape`.\n fs: `Tensor` of shape `[num_steps - 1] + batch_shape + result_shape`.\n \"\"\"\n y = move_dimensions(y, source=self.chain._step_axes(), dest=0) # pylint: disable=protected-access\n f0 = fn(self.initial_bijector, tf.nest.map_structure(lambda y: y[0], y))\n transition_dist = self.chain.transition_fn(\n tf.range(self.chain.num_steps - 1),\n tf.nest.map_structure(lambda y: y[:-1], y))\n return (f0,\n fn(self.bijector_fn(transition_dist),\n tf.nest.map_structure(lambda y: y[1:], y)))\n\n def _forward(self, x):\n x0, xs = x\n y = self._apply_forward_scan(fn=lambda b, x, y: y, x0=x0, xs=xs)\n return move_dimensions(y, source=0, dest=self.chain._step_axes()) # pylint: disable=protected-access\n\n def _inverse(self, y):\n xs_step_axes = tf.nest.map_structure(\n lambda nd: -nd,\n self.transition_bijector.inverse_event_ndims(\n # Outputs `y` have the num_steps axis at `-inverse_min_event_ndims`.\n self.inverse_min_event_ndims))\n x0, xs = self._apply_batch(fn=lambda b, y: b.inverse(y), y=y)\n return (x0, move_dimensions(xs, source=0, dest=xs_step_axes))\n\n def _forward_log_det_jacobian(self, x):\n inverse_ndims_for_one_step = tf.nest.map_structure(\n lambda nd: nd - 1, self.inverse_min_event_ndims)\n x0, xs = x\n fldjs = self._apply_forward_scan(\n fn=lambda b, x, y: compute_and_maybe_broadcast_ldj( # pylint: disable=g-long-lambda\n b, x,\n event_ndims=b.inverse_event_ndims(inverse_ndims_for_one_step),\n ldj_fn=lambda b: b.forward_log_det_jacobian),\n x0=x0, xs=xs)\n return tf.reduce_sum(fldjs, axis=0)\n\n def _inverse_log_det_jacobian(self, y):\n inverse_ndims_for_one_step = tf.nest.map_structure(\n lambda nd: nd - 1, self.inverse_min_event_ndims)\n initial_ildj, ildjs = self._apply_batch(\n fn=lambda b, x: compute_and_maybe_broadcast_ldj( # pylint: disable=g-long-lambda\n b, x,\n event_ndims=inverse_ndims_for_one_step,\n ldj_fn=lambda b: b.inverse_log_det_jacobian),\n y=y)\n return initial_ildj + tf.reduce_sum(ildjs, axis=0)\n\n def _forward_event_shape(self, event_shape):\n _, tail_shape = event_shape\n return tf.nest.map_structure(\n lambda s: tensorshape_util.concatenate([1 + s[0]], s[1:]),\n self.transition_bijector.forward_event_shape(tail_shape))\n\n def _forward_event_shape_tensor(self, event_shape):\n _, tail_shape = event_shape\n return tf.nest.map_structure(\n lambda s: ps.concat([[1 + s[0]], s[1:]], axis=0),\n self.transition_bijector.forward_event_shape_tensor(tail_shape))\n\n def _inverse_event_shape(self, event_shape):\n num_steps = tf.nest.flatten(event_shape)[0][0]\n head_shape = tf.nest.map_structure(lambda s: s[1:], event_shape)\n tail_shape = tf.nest.map_structure(\n lambda s: tensorshape_util.concatenate([num_steps - 1], s[1:]),\n event_shape)\n return (self.initial_bijector.inverse_event_shape(head_shape),\n self.transition_bijector.inverse_event_shape(tail_shape))\n\n def _inverse_event_shape_tensor(self, event_shape):\n num_steps = tf.nest.flatten(event_shape)[0][0]\n head_shape = tf.nest.map_structure(lambda s: s[1:], event_shape)\n tail_shape = tf.nest.map_structure(\n lambda s: ps.concat([[num_steps - 1], s[1:]], axis=0),\n event_shape)\n return (self.initial_bijector.inverse_event_shape_tensor(head_shape),\n self.transition_bijector.inverse_event_shape_tensor(tail_shape))\n\n def _inverse_dtype(self, dtype):\n return (self.initial_bijector.inverse_dtype(dtype),\n self.transition_bijector.inverse_dtype(dtype))\n\n def _forward_dtype(self, dtype):\n head_dtype, _ = dtype\n return self.initial_bijector.forward_dtype(head_dtype)\n\n\ndef move_dimensions(xs, source, dest):\n if tf.nest.is_nested(xs):\n if not tf.nest.is_nested(source):\n source = tf.nest.map_structure(lambda _: source, xs)\n if not tf.nest.is_nested(dest):\n dest = tf.nest.map_structure(lambda _: dest, xs)\n return tf.nest.map_structure(\n distribution_util.move_dimension, xs, source, dest)\n\n\ndef compute_and_maybe_broadcast_ldj(\n b, x, event_ndims, ldj_fn=lambda b: b.forward_log_det_jacobian):\n \"\"\"Broadcasts the forward/inverse log det jacobian to full batch shape.\"\"\"\n ldj = ldj_fn(b)(x, event_ndims=event_ndims)\n x_batch_shape_parts = [\n ps.shape(t)[:ps.rank(t) - nd]\n for (t, nd) in zip(tf.nest.flatten(x), tf.nest.flatten(event_ndims))]\n return tf.broadcast_to(ldj, functools.reduce(ps.broadcast_shape,\n x_batch_shape_parts,\n ps.shape(ldj)))\n\n\ndef concat_initial(x0, xs):\n return tf.nest.map_structure(\n lambda x0, xs: tf.concat([x0[tf.newaxis, ...], xs], axis=0),\n x0, xs)\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"SoftClip bijector.\"\"\"\n\n# Dependency imports\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import util as tfp_util\n\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.bijectors import chain\nfrom tensorflow_probability.python.bijectors import scale\nfrom tensorflow_probability.python.bijectors import shift\nfrom tensorflow_probability.python.bijectors import softplus\n\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import tensor_util\n\n\n__all__ = [\n 'SoftClip',\n]\n\n\nclass SoftClip(bijector.AutoCompositeTensorBijector):\n \"\"\"Bijector that approximates clipping as a continuous, differentiable map.\n\n The `forward` method takes unconstrained scalar `x` to a value `y` in\n `[low, high]`. For values within the interval and far from the bounds\n (`low << x << high`), this mapping is approximately the identity mapping.\n\n ```python\n b = tfb.SoftClip(low=-10., high=10.)\n b.forward([-15., -7., 1., 9., 20.])\n # => [-9.993284, -6.951412, 0.9998932, 8.686738, 9.999954 ]\n ```\n\n The softness of the clipping can be adjusted via the `hinge_softness`\n parameter. A sharp constraint (`hinge_softness < 1.0`) will approximate\n the identity mapping very well across almost all of its range, but may\n be numerically ill-conditioned at the boundaries. A soft constraint\n (`hinge_softness > 1.0`) corresponds to a smoother, better-conditioned\n mapping, but creates a larger distortion of its inputs.\n\n ```python\n b_hard = SoftClip(low=-5, high=5., hinge_softness=0.1)\n b_soft.forward([-15., -7., 1., 9., 20.])\n # => [-10., -7., 1., 8.999995, 10.]\n\n b_soft = SoftClip(low=-5, high=5., hinge_softness=10.0)\n b_soft.forward([-15., -7., 1., 9., 20.])\n # => [-6.1985435, -3.369276, 0.16719627, 3.6655345, 7.1750355]\n ```\n\n Note that the outputs are always in the interval `[low, high]`, regardless\n of the `hinge_softness`.\n\n #### Example use\n\n A trivial application of this bijector is to constrain the values sampled\n from a distribution:\n\n ```python\n dist = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=0., scale=1.),\n bijector=tfb.SoftClip(low=-5., high=5.))\n samples = dist.sample(100) # => samples guaranteed in [-10., 10.]\n ```\n\n A more useful application is to constrain the values considered\n during inference, preventing an inference algorithm from proposing values\n that cause numerical issues. For example, this model will return a `log_prob`\n of `NaN` when `z` is outside of the range `[-5., 5.]`:\n\n ```python\n dist = tfd.JointDistributionNamed({\n 'z': tfd.Normal(0., 1.0)\n 'x': lambda z: tfd.Normal(\n loc=tf.log(25 - z**2), # Breaks if z >= 5 or z <= -5.\n scale=1.)})\n ```\n\n Using SoftClip allows us to keep an inference algorithm in the feasible\n region without distorting the inference geometry by very much:\n\n ```python\n target_log_prob_fn = lambda z: dist.log_prob(z=z, x=3.) # Condition on x==3.\n\n # Use SoftClip to ensure sampler stays within the numerically valid region.\n mcmc_samples = tfp.mcmc.sample_chain(\n kernel=tfp.mcmc.TransformedTransitionKernel(\n tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n num_leapfrog_steps=2,\n step_size=0.1),\n bijector=tfb.SoftClip(-5., 5.)),\n trace_fn=None,\n current_state=0.,\n num_results=100)\n ```\n\n #### Mathematical Details\n\n The constraint is built by using `softplus(x) = log(1 + exp(x))` as a smooth\n approximation to `max(x, 0)`. In combination with affine transformations, this\n can implement a constraint to any scalar interval.\n\n In particular, translating `softplus` gives a generic lower bound constraint:\n\n ```\n max(x, low) = max(x - low, 0) + low\n ~= softplus(x - low) + low\n := softlower(x)\n ```\n\n Note that this quantity is always greater than `low` because `softplus` is\n positive-valued. We can also implement a soft upper bound:\n\n ```\n min(x, high) = min(x - high, 0) + high\n = -max(high - x, 0) + high\n ~= -softplus(high - x) + high\n := softupper(x)\n ```\n\n which, similarly, is always less than `high`.\n\n Composing these bounds as `softupper(softlower(x))` gives a quantity bounded\n above by `high`, and bounded below by `softupper(low)` (because `softupper`\n is monotonic and its input is bounded below by `low`). In general, we will\n have `softupper(low) < low`, so we need to shrink the interval slightly\n (by `(high - low) / (high - softupper(low))`) to preserve the lower bound.\n The two-sided constraint is therefore:\n\n ```python\n softclip(x) := (softupper(softlower(x)) - high) *\n (high - low) / (high - softupper(low)) + high\n = -softplus(high - low - softplus(x - low)) *\n (high - low) / (softplus(high-low)) + high\n ```\n\n Due to this rescaling, the bijector can be mildly asymmetric. Values\n of equal distance from the endpoints are mapped to values with slightly\n unequal distance from the endpoints; for example,\n\n ```python\n b = SoftConstrain(-1., 1.)\n b.forward([-0.5., 0.5.])\n # => [-0.2527727 , 0.19739306]\n ```\n\n The degree of the asymmetry is proportional to the size of the rescaling\n correction, i.e., the extent to which `softupper` fails to be the identity\n map at the lower end of the interval. This is maximized when the upper and\n lower bounds are very close together relative to the hinge softness, as in\n the example above. Conversely, when the interval is wide, the required\n correction and asymmetry are very small.\n\n \"\"\"\n\n def __init__(self,\n low=None,\n high=None,\n hinge_softness=None,\n validate_args=False,\n name='soft_clip'):\n \"\"\"Instantiates the SoftClip bijector.\n\n Args:\n low: Optional float `Tensor` lower bound. If `None`, the lower-bound\n constraint is omitted.\n Default value: `None`.\n high: Optional float `Tensor` upper bound. If `None`, the upper-bound\n constraint is omitted.\n Default value: `None`.\n hinge_softness: Optional nonzero float `Tensor`. Controls the softness\n of the constraint at the boundaries; values outside of the constraint\n set are mapped into intervals of width approximately\n `log(2) * hinge_softness` on the interior of each boundary. High\n softness reserves more space for values outside of the constraint set,\n leading to greater distortion of inputs *within* the constraint set,\n but improved numerical stability near the boundaries.\n Default value: `None` (`1.0`).\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name):\n dtype = dtype_util.common_dtype(\n [low, high, hinge_softness], dtype_hint=tf.float32)\n low = tensor_util.convert_nonref_to_tensor(\n low, name='low', dtype=dtype)\n high = tensor_util.convert_nonref_to_tensor(\n high, name='high', dtype=dtype)\n hinge_softness = tensor_util.convert_nonref_to_tensor(\n hinge_softness, name='hinge_softness', dtype=dtype)\n\n softplus_bijector = softplus.Softplus(hinge_softness=hinge_softness)\n negate = tf.convert_to_tensor(-1., dtype=dtype)\n\n components = []\n if low is not None and high is not None:\n # Support reference tensors (eg Variables) for `high` and `low` by\n # deferring all computation on them until needed.\n width = tfp_util.DeferredTensor(\n pretransformed_input=high, transform_fn=lambda high: high - low)\n negated_shrinkage_factor = tfp_util.DeferredTensor(\n pretransformed_input=width,\n transform_fn=lambda w: tf.cast( # pylint: disable=g-long-lambda\n negate * w / softplus_bijector.forward(w), dtype=dtype))\n\n # Implement the soft constraint from 'Mathematical Details' above:\n # softclip(x) := -softplus(width - softplus(x - low)) *\n # (width) / (softplus(width)) + high\n components = [\n shift.Shift(high),\n scale.Scale(negated_shrinkage_factor),\n softplus_bijector,\n shift.Shift(width),\n scale.Scale(negate),\n softplus_bijector,\n shift.Shift(tfp_util.DeferredTensor(low, lambda x: -x))]\n elif low is not None:\n # Implement a soft lower bound:\n # softlower(x) := softplus(x - low) + low\n components = [\n shift.Shift(low),\n softplus_bijector,\n shift.Shift(tfp_util.DeferredTensor(low, lambda x: -x))]\n elif high is not None:\n # Implement a soft upper bound:\n # softupper(x) := -softplus(high - x) + high\n components = [shift.Shift(high),\n scale.Scale(negate),\n softplus_bijector,\n scale.Scale(negate),\n shift.Shift(high)]\n\n self._low = low\n self._high = high\n self._hinge_softness = hinge_softness\n self._chain = chain.Chain(components, validate_args=validate_args)\n\n super(SoftClip, self).__init__(\n forward_min_event_ndims=0,\n dtype=dtype,\n validate_args=validate_args,\n parameters=parameters,\n is_constant_jacobian=not components,\n name=name)\n\n @classmethod\n def _parameter_properties(cls, dtype):\n return dict(\n low=parameter_properties.ParameterProperties(),\n high=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=parameter_properties\n .BIJECTOR_NOT_IMPLEMENTED),\n hinge_softness=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=(\n lambda: softplus.Softplus(low=dtype_util.eps(dtype)))))\n\n @property\n def low(self):\n return self._low\n\n @property\n def high(self):\n return self._high\n\n @property\n def hinge_softness(self):\n return self._hinge_softness\n\n @classmethod\n def _is_increasing(cls):\n return True\n\n def _forward(self, x):\n return self._chain.forward(x)\n\n def _forward_log_det_jacobian(self, x):\n return self._chain.forward_log_det_jacobian(x, self.forward_min_event_ndims)\n\n def _inverse(self, y):\n with tf.control_dependencies(self._assert_valid_inverse_input(y)):\n return self._chain.inverse(y) # pylint: disable=protected-access\n\n def _inverse_log_det_jacobian(self, y):\n with tf.control_dependencies(self._assert_valid_inverse_input(y)):\n return self._chain.inverse_log_det_jacobian(\n y, self.inverse_min_event_ndims)\n\n def _assert_valid_inverse_input(self, y):\n assertions = []\n if self.validate_args and self.low is not None:\n assertions += [assert_util.assert_greater(\n y, self.low,\n message='Input must be greater than `low`.')]\n if self.validate_args and self.high is not None:\n assertions += [assert_util.assert_less(\n y, self.high,\n message='Input must be less than `high`.')]\n return assertions\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args or self.low is None or self.high is None:\n return []\n assertions = []\n if is_init != (tensor_util.is_ref(self.low) or\n tensor_util.is_ref(self.high)):\n assertions.append(assert_util.assert_greater(\n self.high, self.low,\n message='Argument `high` must be greater than `low`.'))\n return assertions\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Internal utility functions for implementing TransitionKernels.\"\"\"\n\nimport warnings\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import broadcast_util as bu\nfrom tensorflow_probability.python.internal import distribution_util as dist_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.math.gradient import value_and_gradient as tfp_math_value_and_gradients\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n\n__all__ = [\n 'choose',\n 'choose_from',\n 'enable_store_parameters_in_results',\n 'index_remapping_gather',\n 'is_list_like',\n 'is_namedtuple_like',\n 'make_name',\n 'maybe_call_fn_and_grads',\n 'prepare_state_parts',\n 'PrettyNamedTupleMixin',\n 'safe_sum',\n 'SEED_CTOR_ARG_DEPRECATION_MSG',\n 'set_doc',\n 'strip_seeds',\n 'warn_if_parameters_are_not_simple_tensors',\n]\n\n\nJAX_MODE = False\n\nSEED_CTOR_ARG_DEPRECATION_MSG = (\n 'Seeding `tfp.mcmc.TransitionKernel` instances by constructor argument is '\n 'deprecated. Use the `seed` argument to `tfp.mcmc.sample_chain` or '\n 'directly on `one_step`. The legacy behavior is still supported and should '\n 'be through 2020-09-20.')\n\n\nclass PrettyNamedTupleMixin(object):\n \"\"\"Mixin adding a nicer `__repr__` for `namedtuple`s.\"\"\"\n __slots__ = ()\n\n def __repr__(self):\n return '{}(\\n{}\\n)'.format(\n type(self).__name__,\n ',\\n'.join(' {}={}'.format(k, repr(v).replace('\\n', '\\n '))\n for (k, v) in self._asdict().items()))\n\n\ndef prepare_state_parts(state_or_state_part, dtype=None, name=None):\n \"\"\"Calls c2t on each element or the entirety if not iterable; returns list.\"\"\"\n # Don't use tf.name_scope since this function has ct2-like semantics.\n is_multipart = is_list_like(state_or_state_part)\n state_parts = state_or_state_part if is_multipart else [state_or_state_part]\n state_parts = [tf.convert_to_tensor(x, dtype=dtype, name=name)\n for x in state_parts]\n return state_parts, is_multipart\n\n\ndef is_list_like(x):\n \"\"\"Helper which returns `True` if input is `list`-like.\"\"\"\n return isinstance(x, (tuple, list))\n\n\ndef is_namedtuple_like(x):\n \"\"\"Helper which returns `True` if input is `collections.namedtuple`-like.\"\"\"\n try:\n for fn in x._fields:\n _ = getattr(x, fn)\n return True\n except AttributeError:\n return False\n\n\ndef make_name(super_name, default_super_name, sub_name):\n \"\"\"Helper which makes a `str` name; useful for tf.name_scope.\"\"\"\n name = super_name if super_name is not None else default_super_name\n if sub_name is not None:\n name += '_' + sub_name\n return name\n\n\ndef _choose_base_case(is_accepted,\n proposed,\n current,\n name=None,\n addr=None,):\n \"\"\"Helper to `choose` which expand_dims `is_accepted` and applies tf.where.\"\"\"\n def _where(proposed, current):\n \"\"\"Wraps `tf.where`.\"\"\"\n if proposed is current:\n return proposed\n\n # Handle CompositeTensor types at the leafmost `addr`.\n flat_p = tf.nest.flatten(proposed, expand_composites=True)\n flat_c = tf.nest.flatten(current, expand_composites=True)\n\n res = []\n for p, c in zip(flat_p, flat_c):\n # Preserve the name from `current` so names can propagate from\n # `bootstrap_results`.\n name = getattr(c, 'name', None)\n if name is not None:\n name = name.rpartition('/')[2].rsplit(':', 1)[0]\n # Since this is an internal utility it is ok to assume\n # tf.shape(proposed) == tf.shape(current).\n res.append(\n tf.where(bu.left_justified_expand_dims_like(is_accepted, p), p, c,\n name=name))\n return tf.nest.pack_sequence_as(current, res, expand_composites=True)\n\n with tf.name_scope(name or 'choose'):\n if not is_list_like(proposed):\n return _where(proposed, current)\n return tf.nest.pack_sequence_as(\n current,\n [(_choose_recursive(is_accepted, p, c, name=name, addr=f'{addr}[i]')\n if is_namedtuple_like(p) else\n _where(p, c)) for i, (p, c) in enumerate(zip(proposed, current))])\n\n\ndef _choose_recursive(is_accepted, proposed, current, name=None, addr='<root>'):\n \"\"\"Recursion helper which also reports the address of any failures.\"\"\"\n with tf.name_scope(name or 'choose'):\n if not is_namedtuple_like(proposed):\n return _choose_base_case(is_accepted, proposed, current, name=name,\n addr=addr)\n if not isinstance(proposed, type(current)):\n raise TypeError(\n f'Type of `proposed` ({type(proposed).__name__}) must be identical '\n f'to type of `current` ({type(current).__name__}). (At \"{addr}\".)')\n items = {}\n for fn in proposed._fields:\n items[fn] = _choose_recursive(is_accepted,\n getattr(proposed, fn),\n getattr(current, fn),\n name=name,\n addr=f'{addr}/{fn}')\n return type(proposed)(**items)\n\n\ndef choose(is_accepted, proposed, current, name=None):\n \"\"\"Helper which expand_dims `is_accepted` then applies tf.where.\"\"\"\n return _choose_recursive(is_accepted, proposed, current, name=name)\n\n\ndef _nest_choose(is_accepted, proposed, current):\n \"\"\"Like `choose` but not limited to list, tuple, namedtuple.\"\"\"\n result_parts = choose(is_accepted,\n tf.nest.flatten(proposed, expand_composites=True),\n tf.nest.flatten(current, expand_composites=True))\n return tf.nest.pack_sequence_as(\n proposed, result_parts, expand_composites=True)\n\n\ndef choose_from(n, options):\n \"\"\"Helper to select the n-th option from a list of options.\n\n This is useful when `n` is not a concrete value. Also note that\n the value of `n` will be clipped to the edges of the interval\n `[0, len(options) - 1]`.\n\n Args:\n n: Scalar `int` `Tensor` option.\n options: List of options to choose from. All the options should have the\n same nested structure.\n\n Returns:\n The n-th option among `options`.\n \"\"\"\n if len(options) == 1:\n return options[0]\n m = len(options) // 2\n return _nest_choose(n < m, choose_from(n, options[:m]),\n choose_from(n - m, options[m:]))\n\n\ndef strip_seeds(obj):\n if not is_namedtuple_like(obj):\n return obj\n return type(obj)(**{fn: strip_seeds(fv) if fn != 'seed' else []\n for fn, fv in obj._asdict().items()})\n\n\ndef safe_sum(x, alt_value=-np.inf, name=None):\n \"\"\"Elementwise adds list members, replacing non-finite results with alt_value.\n\n Typically the `alt_value` is chosen so the `MetropolisHastings`\n `TransitionKernel` always rejects the proposal.\n\n Args:\n x: Python `list` of `Tensors` to elementwise add.\n alt_value: Python scalar used to replace any elementwise sums which would\n otherwise be non-finite.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"safe_sum\").\n\n Returns:\n safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s\n `x` or `alt_value` where sums are non-finite.\n\n Raises:\n TypeError: if `x` is not list-like.\n ValueError: if `x` is empty.\n \"\"\"\n with tf.name_scope(name or 'safe_sum'):\n if not is_list_like(x):\n raise TypeError('Expected list input.')\n if not x:\n raise ValueError('Input should not be empty.')\n in_shape = x[0].shape\n x = tf.add_n(x)\n x = tf.where(tf.math.is_finite(x), x, tf.constant(alt_value, dtype=x.dtype))\n tensorshape_util.set_shape(x, in_shape)\n return x\n\n\ndef set_doc(value):\n \"\"\"Decorator to programmatically set a function docstring.\"\"\"\n def _doc(func):\n func.__doc__ = value\n return func\n return _doc\n\n\ndef _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):\n \"\"\"Helper to `maybe_call_fn_and_grads`.\"\"\"\n with tf.name_scope(name or 'value_and_gradients'):\n\n def _convert_to_tensor(x, name):\n ctt = lambda x_: None if x_ is None else tf.convert_to_tensor( # pylint: disable=g-long-lambda\n x_, name=name)\n return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)\n\n fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)\n else [fn_arg_list])\n fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')\n\n if result is None and grads is None and (JAX_MODE or\n not tf.executing_eagerly()):\n # Currently, computing gradient is not working well with caching in\n # tensorflow eager mode (see below), so we will handle that case\n # separately.\n return tfp_math_value_and_gradients(fn, fn_arg_list)\n\n if result is None:\n result = fn(*fn_arg_list)\n if grads is None:\n assert tf.executing_eagerly()\n # Ensure we disable bijector cacheing in eager mode.\n # TODO(b/72831017): Remove this once bijector cacheing is fixed for\n # eager mode.\n fn_arg_list = [0 + x for x in fn_arg_list]\n\n result = _convert_to_tensor(result, 'fn_result')\n\n if grads is not None:\n grads = _convert_to_tensor(grads, 'fn_grad')\n return result, grads\n\n _, grads = tfp_math_value_and_gradients(fn, fn_arg_list)\n\n return result, grads\n\n\ndef maybe_call_fn_and_grads(fn,\n fn_arg_list,\n result=None,\n grads=None,\n check_non_none_grads=True,\n name=None):\n \"\"\"Calls `fn` and computes the gradient of the result wrt `args_list`.\"\"\"\n with tf.name_scope(name or 'maybe_call_fn_and_grads'):\n fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)\n else [fn_arg_list])\n result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)\n if not all(dtype_util.is_floating(r.dtype)\n for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens\n raise TypeError('Function result must be a `Tensor` with `float` '\n '`dtype`.')\n if len(fn_arg_list) != len(grads):\n raise ValueError('Function args must be in one-to-one correspondence '\n 'with grads.')\n if check_non_none_grads and any(g is None for g in grads):\n raise ValueError('Encountered `None` gradient.\\n'\n ' fn_arg_list: {}\\n'\n ' grads: {}'.format(fn_arg_list, grads))\n return result, grads\n\n\ndef enable_store_parameters_in_results(kernel):\n \"\"\"Enables the `store_parameters_in_results` parameter in a chain of kernels.\n\n This is a temporary utility for use during the transition period of the\n parameter storage methods.\n\n Args:\n kernel: A TransitionKernel.\n\n Returns:\n kernel: The same kernel, but recreated with `store_parameters_in_results`\n recursively set to `True` in its parameters and its inner kernels (as\n appropriate).\n \"\"\"\n kernel_stack = []\n while hasattr(kernel, 'parameters') and 'inner_kernel' in kernel.parameters:\n kernel_stack.append(kernel)\n kernel = kernel.parameters['inner_kernel']\n\n def _recreate_kernel(kernel, parameters):\n new_parameters = kernel.parameters.copy()\n new_parameters.update(parameters)\n if 'store_parameters_in_results' in new_parameters:\n new_parameters['store_parameters_in_results'] = True\n with deprecation.silence():\n return type(kernel)(**new_parameters)\n\n if hasattr(kernel, 'parameters'):\n kernel = _recreate_kernel(kernel, {})\n\n for outer_kernel in reversed(kernel_stack):\n outer_kernel = _recreate_kernel(outer_kernel, {'inner_kernel': kernel})\n kernel = outer_kernel\n\n return kernel\n\n\ndef _is_tensor_like(param):\n if is_list_like(param):\n return all([_is_tensor_like(p) for p in param])\n if isinstance(param, tf.Tensor):\n return True\n elif isinstance(param, tf.Variable):\n return False\n else:\n return np.array(param).dtype != np.object_\n\n\ndef warn_if_parameters_are_not_simple_tensors(params_dict):\n for param_name, param in params_dict.items():\n if not _is_tensor_like(param):\n warnings.warn(\n '`{}` is not a `tf.Tensor`, Python number, or Numpy array. If this '\n 'parameter is mutable (e.g., a `tf.Variable`), then the '\n 'behavior implied by `store_parameters_in_results` will silently '\n 'change on 2019-08-01. Please consult the docstring for '\n '`store_parameters_in_results` details and use '\n '`store_parameters_in_results=True` to silence this warning.'.format(\n param_name))\n\n\ndef index_remapping_gather(params,\n indices,\n axis=0,\n indices_axis=0,\n name='index_remapping_gather'):\n \"\"\"Gather values from `axis` of `params` using `indices_axis` of `indices`.\n\n The shape of `indices` must broadcast to that of `params` when\n their `indices_axis` and `axis` (respectively) are aligned:\n\n ```python\n # params.shape:\n [p[0], ..., ..., p[axis], ..., ..., p[rank(params)] - 1])\n # indices.shape:\n [i[0], ..., i[indices_axis], ..., i[rank(indices)] - 1])\n ```\n\n In particular, `params` must have at least as many\n leading dimensions as `indices` (`axis >= indices_axis`), and at least as many\n trailing dimensions (`rank(params) - axis >= rank(indices) - indices_axis`).\n\n The `result` has the same shape as `params`, except that the dimension\n of size `p[axis]` is replaced by one of size `i[indices_axis]`:\n\n ```python\n # result.shape:\n [p[0], ..., ..., i[indices_axis], ..., ..., p[rank(params) - 1]]\n ```\n\n In the case where `rank(params) == 5`, `rank(indices) == 3`, `axis = 2`, and\n `indices_axis = 1`, the result is given by\n\n ```python\n # alignment is: v axis\n # params.shape == [p[0], p[1], p[2], p[3], p[4]]\n # indices.shape == [i[0], i[1], i[2]]\n # ^ indices_axis\n result[i, j, k, l, m] = params[i, j, indices[j, k, l], l, m]\n ```\n\n Args:\n params: `N-D` `Tensor` (`N > 0`) from which to gather values.\n Number of dimensions must be known statically.\n indices: `Tensor` with values in `{0, ..., params.shape[axis] - 1}`, whose\n shape broadcasts to that of `params` as described above.\n axis: Python `int` axis of `params` from which to gather.\n indices_axis: Python `int` axis of `indices` to align with the `axis`\n over which `params` is gathered.\n name: String name for scoping created ops.\n\n Returns:\n `Tensor` composed of elements of `params`.\n\n Raises:\n ValueError: If shape/rank requirements are not met.\n \"\"\"\n with tf.name_scope(name):\n params = tf.convert_to_tensor(params, name='params')\n indices = tf.convert_to_tensor(indices, name='indices')\n\n params_ndims = tensorshape_util.rank(params.shape)\n indices_ndims = tensorshape_util.rank(indices.shape)\n # `axis` dtype must match ndims, which are 64-bit Python ints.\n axis = tf.get_static_value(ps.convert_to_shape_tensor(axis, dtype=tf.int64))\n indices_axis = tf.get_static_value(\n ps.convert_to_shape_tensor(indices_axis, dtype=tf.int64))\n\n if params_ndims is None:\n raise ValueError(\n 'Rank of `params`, must be known statically. This is due to '\n 'tf.gather not accepting a `Tensor` for `batch_dims`.')\n\n if axis is None:\n raise ValueError(\n '`axis` must be known statically. This is due to '\n 'tf.gather not accepting a `Tensor` for `batch_dims`.')\n\n if indices_axis is None:\n raise ValueError(\n '`indices_axis` must be known statically. This is due to '\n 'tf.gather not accepting a `Tensor` for `batch_dims`.')\n\n if indices_axis > axis:\n raise ValueError(\n '`indices_axis` should be <= `axis`, but was {} > {}'.format(\n indices_axis, axis))\n\n if params_ndims < 1:\n raise ValueError(\n 'Rank of params should be `> 0`, but was {}'.format(params_ndims))\n\n if indices_ndims is not None and indices_ndims < 1:\n raise ValueError(\n 'Rank of indices should be `> 0`, but was {}'.format(indices_ndims))\n\n if (indices_ndims is not None and\n (indices_ndims - indices_axis > params_ndims - axis)):\n raise ValueError(\n '`rank(params) - axis` ({} - {}) must be >= `rank(indices) - '\n 'indices_axis` ({} - {}), but was not.'.format(\n params_ndims, axis, indices_ndims, indices_axis))\n\n # `tf.gather` requires the axis to be the rightmost batch ndim. So, we\n # transpose `indices_axis` to be the rightmost dimension of `indices`...\n transposed_indices = dist_util.move_dimension(indices,\n source_idx=indices_axis,\n dest_idx=-1)\n\n # ... and `axis` to be the corresponding (aligned as in the docstring)\n # dimension of `params`.\n broadcast_indices_ndims = indices_ndims + (axis - indices_axis)\n transposed_params = dist_util.move_dimension(\n params,\n source_idx=axis,\n dest_idx=broadcast_indices_ndims - 1)\n\n # Next we broadcast `indices` so that its shape has the same prefix as\n # `params.shape`.\n transposed_params_shape = ps.shape(transposed_params)\n result_shape = ps.concat([\n transposed_params_shape[:broadcast_indices_ndims - 1],\n ps.shape(indices)[indices_axis:indices_axis + 1],\n transposed_params_shape[broadcast_indices_ndims:]], axis=0)\n broadcast_indices = ps.broadcast_to(\n transposed_indices,\n result_shape[:broadcast_indices_ndims])\n\n result_t = tf.gather(transposed_params,\n broadcast_indices,\n batch_dims=broadcast_indices_ndims - 1,\n axis=broadcast_indices_ndims - 1)\n return dist_util.move_dimension(result_t,\n source_idx=broadcast_indices_ndims - 1,\n dest_idx=axis)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Relaxed OneHotCategorical distribution classes.\"\"\"\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.bijectors import chain as chain_bijector\nfrom tensorflow_probability.python.bijectors import exp as exp_bijector\nfrom tensorflow_probability.python.bijectors import softmax_centered as softmax_centered_bijector\nfrom tensorflow_probability.python.bijectors import softplus as softplus_bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n\nclass ExpRelaxedOneHotCategorical(distribution.AutoCompositeTensorDistribution):\n \"\"\"ExpRelaxedOneHotCategorical distribution with temperature and logits.\n\n An ExpRelaxedOneHotCategorical distribution is a log-transformed\n RelaxedOneHotCategorical distribution. The RelaxedOneHotCategorical is a\n distribution over random probability vectors, vectors of positive real\n values that sum to one, which continuously approximates a OneHotCategorical.\n The degree of approximation is controlled by a temperature: as the temperature\n goes to 0 the RelaxedOneHotCategorical becomes discrete with a distribution\n described by the logits, as the temperature goes to infinity the\n RelaxedOneHotCategorical becomes the constant distribution that is identically\n the constant vector of (1/event_size, ..., 1/event_size).\n\n Because computing log-probabilities of the RelaxedOneHotCategorical can\n suffer from underflow issues, this class is one solution for loss\n functions that depend on log-probabilities, such as the KL Divergence found\n in the variational autoencoder loss. The KL divergence between two\n distributions is invariant under invertible transformations, so evaluating\n KL divergences of ExpRelaxedOneHotCategorical samples, which are always\n followed by a `tf.exp` op, is equivalent to evaluating KL divergences of\n RelaxedOneHotCategorical samples. See the appendix of Maddison et al., 2016\n for more mathematical details, where this distribution is called the\n ExpConcrete.\n\n #### Examples\n\n Creates a continuous distribution, whose exp approximates a 3-class one-hot\n categorical distribution. The 2nd class is the most likely to be the\n largest component in samples drawn from this distribution. If those samples\n are followed by a `tf.exp` op, then they are distributed as a relaxed onehot\n categorical.\n\n ```python\n temperature = 0.5\n p = [0.1, 0.5, 0.4]\n dist = ExpRelaxedOneHotCategorical(temperature, probs=p)\n samples = dist.sample()\n exp_samples = tf.exp(samples)\n # exp_samples has the same distribution as samples from\n # RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Creates a continuous distribution, whose exp approximates a 3-class one-hot\n categorical distribution. The 2nd class is the most likely to be the\n largest component in samples drawn from this distribution.\n\n ```python\n temperature = 0.5\n logits = [-2, 2, 0]\n dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)\n samples = dist.sample()\n exp_samples = tf.exp(samples)\n # exp_samples has the same distribution as samples from\n # RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Creates a continuous distribution, whose exp approximates a 3-class one-hot\n categorical distribution. Because the temperature is very low, samples from\n this distribution are almost discrete, with one component almost 0 and the\n others very negative. The 2nd class is the most likely to be the largest\n component in samples drawn from this distribution.\n\n ```python\n temperature = 1e-5\n logits = [-2, 2, 0]\n dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)\n samples = dist.sample()\n exp_samples = tf.exp(samples)\n # exp_samples has the same distribution as samples from\n # RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Creates a continuous distribution, whose exp approximates a 3-class one-hot\n categorical distribution. Because the temperature is very high, samples from\n this distribution are usually close to the (-log(3), -log(3), -log(3)) vector.\n The 2nd class is still the most likely to be the largest component\n in samples drawn from this distribution.\n\n ```python\n temperature = 10\n logits = [-2, 2, 0]\n dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)\n samples = dist.sample()\n exp_samples = tf.exp(samples)\n # exp_samples has the same distribution as samples from\n # RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:\n A Continuous Relaxation of Discrete Random Variables. 2016.\n \"\"\"\n\n def __init__(\n self,\n temperature,\n logits=None,\n probs=None,\n validate_args=False,\n allow_nan_stats=True,\n name='ExpRelaxedOneHotCategorical'):\n \"\"\"Initialize ExpRelaxedOneHotCategorical using class log-probabilities.\n\n Args:\n temperature: A `Tensor`, representing the temperature of one or more\n distributions. The temperature values must be positive, and the shape\n must broadcast against `(logits or probs)[..., 0]`.\n logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities\n of one or many distributions. The first `N - 1` dimensions index into a\n batch of independent distributions and the last dimension represents a\n vector of logits for each class. Only one of `logits` or `probs` should\n be passed in.\n probs: An N-D `Tensor`, `N >= 1`, representing the probabilities\n of one or many distributions. The first `N - 1` dimensions index into a\n batch of independent distributions and the last dimension represents a\n vector of probabilities for each class. Only one of `logits` or `probs`\n should be passed in.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([logits, probs, temperature], tf.float32)\n self._temperature = tensor_util.convert_nonref_to_tensor(\n temperature, dtype_hint=dtype, name='temperature')\n self._logits = tensor_util.convert_nonref_to_tensor(\n logits, dtype_hint=dtype, name='logits')\n self._probs = tensor_util.convert_nonref_to_tensor(\n probs, dtype_hint=dtype, name='probs')\n if (self._probs is None) == (self._logits is None):\n raise ValueError('Must pass `probs` or `logits`, but not both.')\n\n super(ExpRelaxedOneHotCategorical, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n # pylint: disable=g-long-lambda\n return dict(\n temperature=parameter_properties.ParameterProperties(\n shape_fn=lambda sample_shape: sample_shape[:-1],\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),\n logits=parameter_properties.ParameterProperties(event_ndims=1),\n probs=parameter_properties.ParameterProperties(\n event_ndims=1,\n default_constraining_bijector_fn=softmax_centered_bijector\n .SoftmaxCentered,\n is_preferred=False))\n # pylint: enable=g-long-lambda\n\n @property\n @deprecation.deprecated(\n '2019-10-01', 'The `event_size` property is deprecated. Use '\n '`tf.shape(self.probs if self.logits is None else self.logits)[-1]` '\n 'instead.')\n def event_size(self):\n \"\"\"Scalar `int32` tensor: the number of classes.\"\"\"\n return self._event_size()\n\n def _event_size(self, logits=None):\n param = logits\n if param is None:\n param = self._logits if self._logits is not None else self._probs\n if param.shape is not None:\n event_size = tf.compat.dimension_value(param.shape[-1])\n if event_size is not None:\n return event_size\n return tf.shape(param)[-1]\n\n @property\n def temperature(self):\n \"\"\"Batchwise temperature tensor of a RelaxedCategorical.\"\"\"\n return self._temperature\n\n @property\n def logits(self):\n \"\"\"Input argument `logits`.\"\"\"\n return self._logits\n\n @property\n def probs(self):\n \"\"\"Input argument `probs`.\"\"\"\n return self._probs\n\n def _event_shape_tensor(self, logits=None):\n param = logits\n if param is None:\n param = self._logits if self._logits is not None else self._probs\n return ps.shape(param)[-1:]\n\n def _event_shape(self):\n param = self._logits if self._logits is not None else self._probs\n return tensorshape_util.with_rank(param.shape[-1:], rank=1)\n\n def _sample_n(self, n, seed=None):\n temperature = tf.convert_to_tensor(self.temperature)\n logits = self._logits_parameter_no_checks()\n\n # Uniform variates must be sampled from the open-interval `(0, 1)` rather\n # than `[0, 1)`. To do so, we use\n # `np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny` because it is the\n # smallest, positive, 'normal' number. A 'normal' number is such that the\n # mantissa has an implicit leading 1. Normal, positive numbers x, y have the\n # reasonable property that, `x + y >= max(x, y)`. In this case, a subnormal\n # number (i.e., np.nextafter) can cause us to sample 0.\n uniform_shape = ps.concat(\n [[n],\n self._batch_shape_tensor(temperature=temperature, logits=logits),\n self._event_shape_tensor(logits=logits)], 0)\n uniform = samplers.uniform(\n shape=uniform_shape,\n minval=np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny,\n maxval=1.,\n dtype=self.dtype,\n seed=seed)\n gumbel = -tf.math.log(-tf.math.log(uniform))\n noisy_logits = (gumbel + logits) / temperature[..., tf.newaxis]\n return tf.math.log_softmax(noisy_logits)\n\n def _log_prob(self, x):\n temperature = tf.convert_to_tensor(self.temperature)\n logits = self._logits_parameter_no_checks()\n\n # broadcast logits or x if need be.\n if (not tensorshape_util.is_fully_defined(x.shape) or\n not tensorshape_util.is_fully_defined(logits.shape) or\n x.shape != logits.shape):\n logits = tf.ones_like(x, dtype=logits.dtype) * logits\n x = tf.ones_like(logits, dtype=x.dtype) * x\n # compute the normalization constant\n k = tf.cast(self._event_size(logits), x.dtype)\n log_norm_const = (\n tf.math.lgamma(k) + (k - 1.) * tf.math.log(temperature))\n # compute the unnormalized density\n log_softmax = tf.math.log_softmax(logits - x * temperature[..., tf.newaxis])\n log_unnorm_prob = tf.reduce_sum(log_softmax, axis=[-1], keepdims=False)\n # combine unnormalized density with normalization constant\n return log_norm_const + log_unnorm_prob\n\n def logits_parameter(self, name=None):\n \"\"\"Logits vec computed from non-`None` input arg (`probs` or `logits`).\"\"\"\n with self._name_and_control_scope(name or 'logits_parameter'):\n return self._logits_parameter_no_checks()\n\n def _logits_parameter_no_checks(self):\n if self._logits is None:\n return tf.math.log(self._probs)\n return tensor_util.identity_as_tensor(self._logits)\n\n def probs_parameter(self, name=None):\n \"\"\"Probs vec computed from non-`None` input arg (`probs` or `logits`).\"\"\"\n with self._name_and_control_scope(name or 'probs_parameter'):\n return self._probs_parameter_no_checks()\n\n def _probs_parameter_no_checks(self):\n if self._logits is None:\n return tensor_util.identity_as_tensor(self._probs)\n return tf.math.softmax(self._logits)\n\n def _sample_control_dependencies(self, x):\n assertions = []\n if not self.validate_args:\n return assertions\n assertions.append(assert_util.assert_non_positive(\n x,\n message=('Samples must be less than or equal to `0` for '\n '`ExpRelaxedOneHotCategorical` or `1` for '\n '`RelaxedOneHotCategorical`.')))\n assertions.append(assert_util.assert_near(\n tf.zeros([], dtype=self.dtype), tf.reduce_logsumexp(x, axis=[-1]),\n message=('Final dimension of samples must sum to `0` for ''.'\n '`ExpRelaxedOneHotCategorical` or `1` '\n 'for `RelaxedOneHotCategorical`.')))\n return assertions\n\n def _parameter_control_dependencies(self, is_init):\n assertions = []\n\n logits = self._logits\n probs = self._probs\n param, name = (probs, 'probs') if logits is None else (logits, 'logits')\n\n # In init, we can always build shape and dtype checks because\n # we assume shape doesn't change for Variable backed args.\n if is_init:\n if not dtype_util.is_floating(param.dtype):\n raise TypeError('Argument `{}` must having floating type.'.format(name))\n\n msg = 'Argument `{}` must have rank at least 1.'.format(name)\n shape_static = tensorshape_util.dims(param.shape)\n if shape_static is not None:\n if len(shape_static) < 1:\n raise ValueError(msg)\n elif self.validate_args:\n param = tf.convert_to_tensor(param)\n assertions.append(\n assert_util.assert_rank_at_least(param, 1, message=msg))\n\n msg1 = 'Argument `{}` must have final dimension >= 1.'.format(name)\n msg2 = 'Argument `{}` must have final dimension <= {}.'.format(\n name, dtype_util.max(tf.int32))\n event_size = shape_static[-1] if shape_static is not None else None\n if event_size is not None:\n if event_size < 1:\n raise ValueError(msg1)\n if event_size > dtype_util.max(tf.int32):\n raise ValueError(msg2)\n elif self.validate_args:\n param = tf.convert_to_tensor(param)\n assertions.append(assert_util.assert_greater_equal(\n tf.shape(param)[-1:], 1, message=msg1))\n # NOTE: For now, we leave out a runtime assertion that\n # `tf.shape(param)[-1] <= tf.int32.max`. An earlier `tf.shape` call\n # will fail before we get to this point.\n\n if not self.validate_args:\n assert not assertions # Should never happen.\n return []\n\n if is_init != tensor_util.is_ref(self.temperature):\n assertions.append(assert_util.assert_positive(self.temperature))\n\n if probs is not None:\n probs = param # reuse tensor conversion from above\n if is_init != tensor_util.is_ref(probs):\n probs = tf.convert_to_tensor(probs)\n one = tf.ones([], dtype=probs.dtype)\n assertions.extend([\n assert_util.assert_non_negative(probs),\n assert_util.assert_less_equal(probs, one),\n assert_util.assert_near(\n tf.reduce_sum(probs, axis=-1), one,\n message='Argument `probs` must sum to 1.'),\n ])\n\n return assertions\n\n def _default_event_space_bijector(self):\n # TODO(b/145620027) Finalize choice of bijector.\n return chain_bijector.Chain([\n exp_bijector.Log(validate_args=self.validate_args),\n softmax_centered_bijector.SoftmaxCentered(\n validate_args=self.validate_args),\n ], validate_args=self.validate_args)\n\n\nclass RelaxedOneHotCategorical(\n transformed_distribution.TransformedDistribution,\n distribution.AutoCompositeTensorDistribution):\n \"\"\"RelaxedOneHotCategorical distribution with temperature and logits.\n\n The RelaxedOneHotCategorical is a distribution over random probability\n vectors, vectors of positive real values that sum to one, which continuously\n approximates a OneHotCategorical. The degree of approximation is controlled by\n a temperature: as the temperature goes to 0 the RelaxedOneHotCategorical\n becomes discrete with a distribution described by the `logits` or `probs`\n parameters, as the temperature goes to infinity the RelaxedOneHotCategorical\n becomes the constant distribution that is identically the constant vector of\n (1/event_size, ..., 1/event_size).\n\n The RelaxedOneHotCategorical distribution was concurrently introduced as the\n Gumbel-Softmax (Jang et al., 2016) and Concrete (Maddison et al., 2016)\n distributions for use as a reparameterized continuous approximation to the\n `Categorical` one-hot distribution. If you use this distribution, please cite\n both papers.\n\n #### Examples\n\n Creates a continuous distribution, which approximates a 3-class one-hot\n categorical distribution. The 2nd class is the most likely to be the\n largest component in samples drawn from this distribution.\n\n ```python\n temperature = 0.5\n p = [0.1, 0.5, 0.4]\n dist = RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Creates a continuous distribution, which approximates a 3-class one-hot\n categorical distribution. The 2nd class is the most likely to be the\n largest component in samples drawn from this distribution.\n\n ```python\n temperature = 0.5\n logits = [-2, 2, 0]\n dist = RelaxedOneHotCategorical(temperature, logits=logits)\n ```\n\n Creates a continuous distribution, which approximates a 3-class one-hot\n categorical distribution. Because the temperature is very low, samples from\n this distribution are almost discrete, with one component almost 1 and the\n others nearly 0. The 2nd class is the most likely to be the largest component\n in samples drawn from this distribution.\n\n ```python\n temperature = 1e-5\n logits = [-2, 2, 0]\n dist = RelaxedOneHotCategorical(temperature, logits=logits)\n ```\n\n Creates a continuous distribution, which approximates a 3-class one-hot\n categorical distribution. Because the temperature is very high, samples from\n this distribution are usually close to the (1/3, 1/3, 1/3) vector. The 2nd\n class is still the most likely to be the largest component\n in samples drawn from this distribution.\n\n ```python\n temperature = 10\n logits = [-2, 2, 0]\n dist = RelaxedOneHotCategorical(temperature, logits=logits)\n ```\n\n Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with\n Gumbel-Softmax. 2016.\n\n Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:\n A Continuous Relaxation of Discrete Random Variables. 2016.\n \"\"\"\n\n def __init__(\n self,\n temperature,\n logits=None,\n probs=None,\n validate_args=False,\n allow_nan_stats=True,\n name='RelaxedOneHotCategorical'):\n \"\"\"Initialize RelaxedOneHotCategorical using class log-probabilities.\n\n Args:\n temperature: An 0-D `Tensor`, representing the temperature\n of a set of RelaxedOneHotCategorical distributions. The temperature\n should be positive.\n logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities\n of a set of RelaxedOneHotCategorical distributions. The first\n `N - 1` dimensions index into a batch of independent distributions and\n the last dimension represents a vector of logits for each class. Only\n one of `logits` or `probs` should be passed in.\n probs: An N-D `Tensor`, `N >= 1`, representing the probabilities\n of a set of RelaxedOneHotCategorical distributions. The first `N - 1`\n dimensions index into a batch of independent distributions and the last\n dimension represents a vector of probabilities for each class. Only one\n of `logits` or `probs` should be passed in.\n validate_args: Unused in this distribution.\n allow_nan_stats: Python `bool`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: A name for this distribution (optional).\n \"\"\"\n parameters = dict(locals())\n dist = ExpRelaxedOneHotCategorical(temperature,\n logits=logits,\n probs=probs,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats)\n\n super(RelaxedOneHotCategorical, self).__init__(dist,\n exp_bijector.Exp(),\n validate_args=validate_args,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n # pylint: disable=g-long-lambda\n return dict(\n temperature=parameter_properties.ParameterProperties(\n shape_fn=lambda sample_shape: sample_shape[:-1],\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),\n logits=parameter_properties.ParameterProperties(event_ndims=1),\n probs=parameter_properties.ParameterProperties(\n event_ndims=1,\n default_constraining_bijector_fn=softmax_centered_bijector\n .SoftmaxCentered,\n is_preferred=False))\n # pylint: enable=g-long-lambda\n\n @property\n def temperature(self):\n \"\"\"Batchwise temperature tensor of a RelaxedCategorical.\"\"\"\n return self.distribution.temperature\n\n @property\n @deprecation.deprecated(\n '2019-10-01', 'The `event_size` property is deprecated. Use '\n '`tf.shape(self.probs if self.logits is None else self.logits)[-1]` '\n 'instead.')\n def event_size(self):\n \"\"\"Scalar `int32` tensor: the number of classes.\"\"\"\n return self.distribution.event_size\n\n @property\n def probs(self):\n \"\"\"Input argument `probs`.\"\"\"\n return self.distribution.probs\n\n @property\n def logits(self):\n \"\"\"Input argument `logits`.\"\"\"\n return self.distribution.logits\n\n experimental_is_sharded = False\n\n def logits_parameter(self, name=None):\n \"\"\"Logits vec computed from non-`None` input arg (`probs` or `logits`).\"\"\"\n return self.distribution.logits_parameter(name)\n\n def probs_parameter(self, name=None):\n \"\"\"Probs vec computed from non-`None` input arg (`probs` or `logits`).\"\"\"\n return self.distribution.probs_parameter(name)\n\n def _default_event_space_bijector(self):\n return softmax_centered_bijector.SoftmaxCentered(\n validate_args=self.validate_args)\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for PotentialScaleReductionReducer.\"\"\"\n\n# Dependency imports\n\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.experimental.mcmc.internal import test_fixtures\nfrom tensorflow_probability.python.internal import test_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass PotentialScaleReductionReducerTest(test_util.TestCase):\n\n def test_int_samples(self):\n rhat_reducer = tfp.experimental.mcmc.PotentialScaleReductionReducer(\n independent_chain_ndims=1)\n state = rhat_reducer.initialize(tf.zeros((5, 3), dtype=tf.int64))\n chain_state = np.arange(60).reshape((4, 5, 3))\n for sample in chain_state:\n state = rhat_reducer.one_step(sample, state)\n rhat = rhat_reducer.finalize(state)\n true_rhat = tfp.mcmc.potential_scale_reduction(\n chains_states=chain_state,\n independent_chain_ndims=1)\n self.assertEqual(tf.float64, rhat.dtype)\n rhat, true_rhat = self.evaluate([rhat, true_rhat])\n self.assertAllClose(true_rhat, rhat, rtol=1e-6)\n\n def test_iid_normal_passes(self):\n n_samples = 500\n # five scalar chains taken from iid Normal(0, 1)\n rng = test_util.test_np_rng()\n iid_normal_samples = rng.randn(n_samples, 5)\n rhat_reducer = tfp.experimental.mcmc.PotentialScaleReductionReducer(\n independent_chain_ndims=1)\n rhat = self.evaluate(test_fixtures.reduce(rhat_reducer, iid_normal_samples))\n self.assertAllEqual((), rhat.shape)\n self.assertAllClose(1., rhat, rtol=0.02)\n\n def test_offset_normal_fails(self):\n n_samples = 500\n # three 4-variate chains taken from Normal(0, 1) that have been\n # shifted. Since every chain is shifted, they are not the same, and the\n # test should fail.\n offset = np.array([1., -1., 2.]).reshape(3, 1)\n rng = test_util.test_np_rng()\n offset_samples = rng.randn(n_samples, 3, 4) + offset\n rhat_reducer = tfp.experimental.mcmc.PotentialScaleReductionReducer(\n independent_chain_ndims=1)\n rhat = self.evaluate(test_fixtures.reduce(rhat_reducer, offset_samples))\n self.assertAllEqual((4,), rhat.shape)\n self.assertAllGreater(rhat, 1.2)\n\n def test_with_hmc(self):\n target_dist = tfp.distributions.Normal(loc=0., scale=1.)\n hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_dist.log_prob,\n num_leapfrog_steps=27,\n step_size=0.33)\n reduced_stats, _, _ = tfp.experimental.mcmc.sample_fold(\n num_steps=50,\n current_state=tf.zeros((2,)),\n kernel=hmc_kernel,\n reducer=[\n tfp.experimental.mcmc.TracingReducer(),\n tfp.experimental.mcmc.PotentialScaleReductionReducer()\n ])\n rhat = reduced_stats[1]\n true_rhat = tfp.mcmc.potential_scale_reduction(\n chains_states=reduced_stats[0][0],\n independent_chain_ndims=1)\n true_rhat, rhat = self.evaluate([true_rhat, rhat])\n self.assertAllClose(true_rhat, rhat, rtol=1e-6)\n\n def test_multiple_latent_states_and_independent_chain_ndims(self):\n rng = test_util.test_np_rng()\n rhat_reducer = tfp.experimental.mcmc.PotentialScaleReductionReducer(\n independent_chain_ndims=2)\n state = rhat_reducer.initialize([tf.zeros((2, 5, 3)), tf.zeros((7, 2, 8))])\n chain_state = rng.randn(4, 2, 5, 3)\n second_chain_state = rng.randn(4, 7, 2, 8)\n for latent in zip(chain_state, second_chain_state):\n state = rhat_reducer.one_step(latent, state)\n rhat = rhat_reducer.finalize(state)\n true_rhat = tfp.mcmc.potential_scale_reduction(\n chains_states=[chain_state, second_chain_state],\n independent_chain_ndims=2)\n rhat, true_rhat = self.evaluate([rhat, true_rhat])\n self.assertAllClose(true_rhat, rhat, rtol=1e-6)\n\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.\n# DO NOT MODIFY DIRECTLY.\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n# pylint: disable=g-import-not-at-top\n# pylint: disable=g-direct-tensorflow-import\n# pylint: disable=g-bad-import-order\n# pylint: disable=unused-import\n# pylint: disable=line-too-long\n# pylint: disable=reimported\n# pylint: disable=g-bool-id-comparison\n# pylint: disable=g-statement-before-imports\n# pylint: disable=bad-continuation\n# pylint: disable=useless-import-alias\n# pylint: disable=property-with-parameters\n# pylint: disable=trailing-whitespace\n\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base class for linear operators.\"\"\"\n\nimport abc\nimport contextlib\n\nimport numpy as np\nimport six\n\nfrom tensorflow_probability.python.internal.backend.numpy import composite_tensor\nfrom tensorflow_probability.python.internal.backend.numpy import dtype as dtypes\nfrom tensorflow_probability.python.internal.backend.numpy import ops\nfrom tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape\nfrom tensorflow_probability.python.internal.backend.numpy import tensor_spec\n# from tensorflow.python.framework import tensor_util\nfrom tensorflow_probability.python.internal.backend.numpy import type_spec\nfrom tensorflow_probability.python.internal.backend.numpy import ops as module\nfrom tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops\nfrom tensorflow_probability.python.internal.backend.numpy import debugging as check_ops\nfrom tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg_ops\nfrom tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops\nfrom tensorflow_probability.python.internal.backend.numpy import resource_variable_ops\nfrom tensorflow_probability.python.internal.backend.numpy import variables\nfrom tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg\nfrom tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_algebra\nfrom tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_util\nfrom absl import logging as logging\nfrom tensorflow_probability.python.internal.backend.numpy import data_structures\nfrom tensorflow_probability.python.internal.backend.numpy import deprecation\n# from tensorflow_probability.python.internal.backend.numpy import dispatch\nfrom tensorflow_probability.python.internal.backend.numpy import nest\n# from tensorflow.python.util.tf_export import tf_export\n\n__all__ = [\"LinearOperator\"]\n\n\n# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.\n# @tf_export(\"linalg.LinearOperator\")\[email protected]_metaclass(abc.ABCMeta)\nclass LinearOperator(module.Module, composite_tensor.CompositeTensor):\n \"\"\"Base class defining a [batch of] linear operator[s].\n\n Subclasses of `LinearOperator` provide access to common methods on a\n (batch) matrix, without the need to materialize the matrix. This allows:\n\n * Matrix free computations\n * Operators that take advantage of special structure, while providing a\n consistent API to users.\n\n #### Subclassing\n\n To enable a public method, subclasses should implement the leading-underscore\n version of the method. The argument signature should be identical except for\n the omission of `name=\"...\"`. For example, to enable\n `matmul(x, adjoint=False, name=\"matmul\")` a subclass should implement\n `_matmul(x, adjoint=False)`.\n\n #### Performance contract\n\n Subclasses should only implement the assert methods\n (e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`\n time.\n\n Class docstrings should contain an explanation of computational complexity.\n Since this is a high-performance library, attention should be paid to detail,\n and explanations can include constants as well as Big-O notation.\n\n #### Shape compatibility\n\n `LinearOperator` subclasses should operate on a [batch] matrix with\n compatible shape. Class docstrings should define what is meant by compatible\n shape. Some subclasses may not support batching.\n\n Examples:\n\n `x` is a batch matrix with compatible shape for `matmul` if\n\n ```\n tensor_shape.TensorShape(operator.shape) = [B1,...,Bb] + [M, N], b >= 0,\n tensor_shape.TensorShape(x.shape) = [B1,...,Bb] + [N, R]\n ```\n\n `rhs` is a batch matrix with compatible shape for `solve` if\n\n ```\n tensor_shape.TensorShape(operator.shape) = [B1,...,Bb] + [M, N], b >= 0,\n tensor_shape.TensorShape(rhs.shape) = [B1,...,Bb] + [M, R]\n ```\n\n #### Example docstring for subclasses.\n\n This operator acts like a (batch) matrix `A` with shape\n `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a\n batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is\n an `m x n` matrix. Again, this matrix `A` may not be materialized, but for\n purposes of identifying and working with compatible arguments the shape is\n relevant.\n\n Examples:\n\n ```python\n some_tensor = ... shape = ????\n operator = MyLinOp(some_tensor)\n\n operator.shape()\n ==> [2, 4, 4]\n\n operator.log_abs_determinant()\n ==> Shape [2] Tensor\n\n x = ... Shape [2, 4, 5] Tensor\n\n operator.matmul(x)\n ==> Shape [2, 4, 5] Tensor\n ```\n\n #### Shape compatibility\n\n This operator acts on batch matrices with compatible shape.\n FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE\n\n #### Performance\n\n FILL THIS IN\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n\n #### Initialization parameters\n\n All subclasses of `LinearOperator` are expected to pass a `parameters`\n argument to `super().__init__()`. This should be a `dict` containing\n the unadulterated arguments passed to the subclass `__init__`. For example,\n `MyLinearOperator` with an initializer should look like:\n\n ```python\n def __init__(self, operator, is_square=False, name=None):\n parameters = dict(\n operator=operator,\n is_square=is_square,\n name=name\n )\n ...\n super().__init__(..., parameters=parameters)\n ```\n\n Users can then access `my_linear_operator.parameters` to see all arguments\n passed to its initializer.\n \"\"\"\n\n # TODO(b/143910018) Remove graph_parents in V3.\n @deprecation.deprecated_args(None, \"Do not pass `graph_parents`. They will \"\n \" no longer be used.\", \"graph_parents\")\n def __init__(self,\n dtype,\n graph_parents=None,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=None,\n parameters=None):\n r\"\"\"Initialize the `LinearOperator`.\n\n **This is a private method for subclass use.**\n **Subclasses should copy-paste this `__init__` documentation.**\n\n Args:\n dtype: The type of the this `LinearOperator`. Arguments to `matmul` and\n `solve` will have to be this type.\n graph_parents: (Deprecated) Python list of graph prerequisites of this\n `LinearOperator` Typically tensors that are passed during initialization\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. If `dtype` is real, this is equivalent to being symmetric.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n parameters: Python `dict` of parameters used to instantiate this\n `LinearOperator`.\n\n Raises:\n ValueError: If any member of graph_parents is `None` or not a `Tensor`.\n ValueError: If hints are set incorrectly.\n \"\"\"\n # Check and auto-set flags.\n if is_positive_definite:\n if is_non_singular is False:\n raise ValueError(\"A positive definite matrix is always non-singular.\")\n is_non_singular = True\n\n if is_non_singular:\n if is_square is False:\n raise ValueError(\"A non-singular matrix is always square.\")\n is_square = True\n\n if is_self_adjoint:\n if is_square is False:\n raise ValueError(\"A self-adjoint matrix is always square.\")\n is_square = True\n\n self._is_square_set_or_implied_by_hints = is_square\n\n if graph_parents is not None:\n self._set_graph_parents(graph_parents)\n else:\n self._graph_parents = []\n self._dtype = dtypes.as_dtype(dtype) if dtype else dtype\n self._is_non_singular = is_non_singular\n self._is_self_adjoint = is_self_adjoint\n self._is_positive_definite = is_positive_definite\n self._parameters = self._no_dependency(parameters)\n self._parameters_sanitized = False\n self._name = name or type(self).__name__\n\n @contextlib.contextmanager\n def _name_scope(self, name=None): # pylint: disable=method-hidden\n \"\"\"Helper function to standardize op scope.\"\"\"\n full_name = self.name\n if name is not None:\n full_name = full_name + \"/\" + name\n with ops.name_scope(full_name) as scope:\n yield scope\n\n @property\n def parameters(self):\n \"\"\"Dictionary of parameters used to instantiate this `LinearOperator`.\"\"\"\n return dict(self._parameters)\n\n @property\n def dtype(self):\n \"\"\"The `DType` of `Tensor`s handled by this `LinearOperator`.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Name prepended to all ops created by this `LinearOperator`.\"\"\"\n return self._name\n\n @property\n @deprecation.deprecated(None, \"Do not call `graph_parents`.\")\n def graph_parents(self):\n \"\"\"List of graph dependencies of this `LinearOperator`.\"\"\"\n return self._graph_parents\n\n @property\n def is_non_singular(self):\n return self._is_non_singular\n\n @property\n def is_self_adjoint(self):\n return self._is_self_adjoint\n\n @property\n def is_positive_definite(self):\n return self._is_positive_definite\n\n @property\n def is_square(self):\n \"\"\"Return `True/False` depending on if this operator is square.\"\"\"\n # Static checks done after __init__. Why? Because domain/range dimension\n # sometimes requires lots of work done in the derived class after init.\n auto_square_check = self.domain_dimension == self.range_dimension\n if self._is_square_set_or_implied_by_hints is False and auto_square_check:\n raise ValueError(\n \"User set is_square hint to False, but the operator was square.\")\n if self._is_square_set_or_implied_by_hints is None:\n return auto_square_check\n\n return self._is_square_set_or_implied_by_hints\n\n @abc.abstractmethod\n def _shape(self):\n # Write this in derived class to enable all static shape methods.\n raise NotImplementedError(\"_shape is not implemented.\")\n\n @property\n def shape(self):\n \"\"\"`TensorShape` of this `LinearOperator`.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns\n `TensorShape([B1,...,Bb, M, N])`, equivalent to `tensor_shape.TensorShape(A.shape)`.\n\n Returns:\n `TensorShape`, statically determined, may be undefined.\n \"\"\"\n return self._shape()\n\n def _shape_tensor(self):\n # This is not an abstractmethod, since we want derived classes to be able to\n # override this with optional kwargs, which can reduce the number of\n # `convert_to_tensor` calls. See derived classes for examples.\n raise NotImplementedError(\"_shape_tensor is not implemented.\")\n\n def shape_tensor(self, name=\"shape_tensor\"):\n \"\"\"Shape of this `LinearOperator`, determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n # Prefer to use statically defined shape if available.\n if tensor_shape.TensorShape(self.shape).is_fully_defined():\n return linear_operator_util.shape_tensor(tensor_shape.TensorShape(self.shape).as_list())\n else:\n return self._shape_tensor()\n\n @property\n def batch_shape(self):\n \"\"\"`TensorShape` of batch dimensions of this `LinearOperator`.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns\n `TensorShape([B1,...,Bb])`, equivalent to `tensor_shape.TensorShape(A.shape)[:-2]`\n\n Returns:\n `TensorShape`, statically determined, may be undefined.\n \"\"\"\n # Derived classes get this \"for free\" once .shape is implemented.\n return tensor_shape.TensorShape(self.shape)[:-2]\n\n def batch_shape_tensor(self, name=\"batch_shape_tensor\"):\n \"\"\"Shape of batch dimensions of this operator, determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n `[B1,...,Bb]`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name): # pylint: disable=not-callable\n return self._batch_shape_tensor()\n\n def _batch_shape_tensor(self, shape=None):\n # `shape` may be passed in if this can be pre-computed in a\n # more efficient manner, e.g. without excessive Tensor conversions.\n if self.batch_shape.is_fully_defined():\n return linear_operator_util.shape_tensor(\n self.batch_shape.as_list(), name=\"batch_shape\")\n else:\n shape = self.shape_tensor() if shape is None else shape\n return shape[:-2]\n\n @property\n def tensor_rank(self, name=\"tensor_rank\"):\n \"\"\"Rank (in the sense of tensors) of matrix corresponding to this operator.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns `b + 2`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Python integer, or None if the tensor rank is undefined.\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name): # pylint: disable=not-callable\n return tensor_shape.TensorShape(self.shape).ndims\n\n def tensor_rank_tensor(self, name=\"tensor_rank_tensor\"):\n \"\"\"Rank (in the sense of tensors) of matrix corresponding to this operator.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns `b + 2`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`, determined at runtime.\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name): # pylint: disable=not-callable\n return self._tensor_rank_tensor()\n\n def _tensor_rank_tensor(self, shape=None):\n # `shape` may be passed in if this can be pre-computed in a\n # more efficient manner, e.g. without excessive Tensor conversions.\n if self.tensor_rank is not None:\n return ops.convert_to_tensor(self.tensor_rank)\n else:\n shape = self.shape_tensor() if shape is None else shape\n return array_ops.size(shape)\n\n @property\n def domain_dimension(self):\n \"\"\"Dimension (in the sense of vector spaces) of the domain of this operator.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns `N`.\n\n Returns:\n `Dimension` object.\n \"\"\"\n # Derived classes get this \"for free\" once .shape is implemented.\n if tensor_shape.TensorShape(self.shape).rank is None:\n return tensor_shape.Dimension(None)\n else:\n return tensor_shape.TensorShape(self.shape).dims[-1]\n\n def domain_dimension_tensor(self, name=\"domain_dimension_tensor\"):\n \"\"\"Dimension (in the sense of vector spaces) of the domain of this operator.\n\n Determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns `N`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name): # pylint: disable=not-callable\n return self._domain_dimension_tensor()\n\n def _domain_dimension_tensor(self, shape=None):\n # `shape` may be passed in if this can be pre-computed in a\n # more efficient manner, e.g. without excessive Tensor conversions.\n dim_value = tensor_shape.dimension_value(self.domain_dimension)\n if dim_value is not None:\n return np.array(dim_value, np.int32)\n else:\n shape = self.shape_tensor() if shape is None else shape\n return shape[-1]\n\n @property\n def range_dimension(self):\n \"\"\"Dimension (in the sense of vector spaces) of the range of this operator.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns `M`.\n\n Returns:\n `Dimension` object.\n \"\"\"\n # Derived classes get this \"for free\" once .shape is implemented.\n if tensor_shape.TensorShape(self.shape).dims:\n return tensor_shape.TensorShape(self.shape).dims[-2]\n else:\n return tensor_shape.Dimension(None)\n\n def range_dimension_tensor(self, name=\"range_dimension_tensor\"):\n \"\"\"Dimension (in the sense of vector spaces) of the range of this operator.\n\n Determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `tensor_shape.TensorShape(A.shape) = [B1,...,Bb, M, N]`, then this returns `M`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name): # pylint: disable=not-callable\n return self._range_dimension_tensor()\n\n def _range_dimension_tensor(self, shape=None):\n # `shape` may be passed in if this can be pre-computed in a\n # more efficient manner, e.g. without excessive Tensor conversions.\n dim_value = tensor_shape.dimension_value(self.range_dimension)\n if dim_value is not None:\n return np.array(dim_value, np.int32)\n else:\n shape = self.shape_tensor() if shape is None else shape\n return shape[-2]\n\n def _assert_non_singular(self):\n \"\"\"Private default implementation of _assert_non_singular.\"\"\"\n logging.warn(\n \"Using (possibly slow) default implementation of assert_non_singular.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n if self._can_use_cholesky():\n return self.assert_positive_definite()\n else:\n singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)\n # TODO(langmore) Add .eig and .cond as methods.\n cond = (math_ops.reduce_max(singular_values, axis=-1) /\n math_ops.reduce_min(singular_values, axis=-1))\n return check_ops.assert_less(\n cond,\n self._max_condition_number_to_be_non_singular(),\n message=\"Singular matrix up to precision epsilon.\")\n\n def _max_condition_number_to_be_non_singular(self):\n \"\"\"Return the maximum condition number that we consider nonsingular.\"\"\"\n with ops.name_scope(\"max_nonsingular_condition_number\"):\n dtype_eps = np.finfo(self.dtype).eps\n eps = _ops.cast(\n math_ops.reduce_max([\n 100.,\n _ops.cast(self.range_dimension_tensor(), self.dtype),\n _ops.cast(self.domain_dimension_tensor(), self.dtype)\n ]), self.dtype) * dtype_eps\n return 1. / eps\n\n def assert_non_singular(self, name=\"assert_non_singular\"):\n \"\"\"Returns an `Op` that asserts this operator is non singular.\n\n This operator is considered non-singular if\n\n ```\n ConditionNumber < max{100, range_dimension, domain_dimension} * eps,\n eps := np.finfo(self.dtype).eps\n ```\n\n Args:\n name: A string name to prepend to created ops.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is singular.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n return self._assert_non_singular()\n\n def _assert_positive_definite(self):\n \"\"\"Default implementation of _assert_positive_definite.\"\"\"\n logging.warn(\n \"Using (possibly slow) default implementation of \"\n \"assert_positive_definite.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n # If the operator is self-adjoint, then checking that\n # Cholesky decomposition succeeds + results in positive diag is necessary\n # and sufficient.\n if self.is_self_adjoint:\n return check_ops.assert_positive(\n _linalg.diag_part(linalg_ops.cholesky(self.to_dense())),\n message=\"Matrix was not positive definite.\")\n # We have no generic check for positive definite.\n raise NotImplementedError(\"assert_positive_definite is not implemented.\")\n\n def assert_positive_definite(self, name=\"assert_positive_definite\"):\n \"\"\"Returns an `Op` that asserts this operator is positive definite.\n\n Here, positive definite means that the quadratic form `x^H A x` has positive\n real part for all nonzero `x`. Note that we do not require the operator to\n be self-adjoint to be positive definite.\n\n Args:\n name: A name to give this `Op`.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is not positive definite.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n return self._assert_positive_definite()\n\n def _assert_self_adjoint(self):\n dense = self.to_dense()\n logging.warn(\n \"Using (possibly slow) default implementation of assert_self_adjoint.\"\n \" Requires conversion to a dense matrix.\")\n return check_ops.assert_equal(\n dense,\n linalg.adjoint(dense),\n message=\"Matrix was not equal to its adjoint.\")\n\n def assert_self_adjoint(self, name=\"assert_self_adjoint\"):\n \"\"\"Returns an `Op` that asserts this operator is self-adjoint.\n\n Here we check that this operator is *exactly* equal to its hermitian\n transpose.\n\n Args:\n name: A string name to prepend to created ops.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is not self-adjoint.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n return self._assert_self_adjoint()\n\n def _check_input_dtype(self, arg):\n \"\"\"Check that arg.dtype == self.dtype.\"\"\"\n if arg.dtype != self.dtype:\n raise TypeError(\n \"Expected argument to have dtype %s. Found: %s in tensor %s\" %\n (self.dtype, arg.dtype, arg))\n\n @abc.abstractmethod\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n raise NotImplementedError(\"_matmul is not implemented.\")\n\n def matmul(self, x, adjoint=False, adjoint_arg=False, name=\"matmul\"):\n \"\"\"Transform [batch] matrix `x` with left multiplication: `x --> Ax`.\n\n ```python\n # Make an operator acting like batch matrix A. Assume tensor_shape.TensorShape(A.shape) = [..., M, N]\n operator = LinearOperator(...)\n tensor_shape.TensorShape(operator.shape) = [..., M, N]\n\n X = ... # shape [..., N, R], batch matrix, R > 0.\n\n Y = operator.matmul(X)\n tensor_shape.TensorShape(Y.shape)\n ==> [..., M, R]\n\n Y[..., :, r] = sum_j A[..., :, j] X[j, r]\n ```\n\n Args:\n x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as\n `self`. See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.\n adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is\n the hermitian transpose (transposition and complex conjugation).\n name: A name for this `Op`.\n\n Returns:\n A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`\n as `self`.\n \"\"\"\n if isinstance(x, LinearOperator):\n left_operator = self.adjoint() if adjoint else self\n right_operator = x.adjoint() if adjoint_arg else x\n\n if (right_operator.range_dimension is not None and\n left_operator.domain_dimension is not None and\n right_operator.range_dimension != left_operator.domain_dimension):\n raise ValueError(\n \"Operators are incompatible. Expected `x` to have dimension\"\n \" {} but got {}.\".format(\n left_operator.domain_dimension, right_operator.range_dimension))\n with self._name_scope(name): # pylint: disable=not-callable\n return linear_operator_algebra.matmul(left_operator, right_operator)\n\n with self._name_scope(name): # pylint: disable=not-callable\n x = ops.convert_to_tensor(x, name=\"x\")\n # self._check_input_dtype(x)\n\n self_dim = -2 if adjoint else -1\n arg_dim = -1 if adjoint_arg else -2\n tensor_shape.dimension_at_index(\n tensor_shape.TensorShape(self.shape), self_dim).assert_is_compatible_with(\n tensor_shape.TensorShape(x.shape)[arg_dim])\n\n return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n def __matmul__(self, other):\n return self.matmul(other)\n\n def _matvec(self, x, adjoint=False):\n x_mat = array_ops.expand_dims(x, axis=-1)\n y_mat = self.matmul(x_mat, adjoint=adjoint)\n return array_ops.squeeze(y_mat, axis=-1)\n\n def matvec(self, x, adjoint=False, name=\"matvec\"):\n \"\"\"Transform [batch] vector `x` with left multiplication: `x --> Ax`.\n\n ```python\n # Make an operator acting like batch matrix A. Assume tensor_shape.TensorShape(A.shape) = [..., M, N]\n operator = LinearOperator(...)\n\n X = ... # shape [..., N], batch vector\n\n Y = operator.matvec(X)\n tensor_shape.TensorShape(Y.shape)\n ==> [..., M]\n\n Y[..., :] = sum_j A[..., :, j] X[..., j]\n ```\n\n Args:\n x: `Tensor` with compatible shape and same `dtype` as `self`.\n `x` is treated as a [batch] vector meaning for every set of leading\n dimensions, the last dimension defines a vector.\n See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.\n name: A name for this `Op`.\n\n Returns:\n A `Tensor` with shape `[..., M]` and same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n x = ops.convert_to_tensor(x, name=\"x\")\n # self._check_input_dtype(x)\n self_dim = -2 if adjoint else -1\n tensor_shape.dimension_at_index(\n tensor_shape.TensorShape(self.shape), self_dim).assert_is_compatible_with(tensor_shape.TensorShape(x.shape)[-1])\n return self._matvec(x, adjoint=adjoint)\n\n def _determinant(self):\n logging.warn(\n \"Using (possibly slow) default implementation of determinant.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n if self._can_use_cholesky():\n return math_ops.exp(self.log_abs_determinant())\n return _linalg.det(self.to_dense())\n\n def determinant(self, name=\"det\"):\n \"\"\"Determinant for every batch member.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\n Raises:\n NotImplementedError: If `self.is_square` is `False`.\n \"\"\"\n if self.is_square is False:\n raise NotImplementedError(\n \"Determinant not implemented for an operator that is expected to \"\n \"not be square.\")\n with self._name_scope(name): # pylint: disable=not-callable\n return self._determinant()\n\n def _log_abs_determinant(self):\n logging.warn(\n \"Using (possibly slow) default implementation of determinant.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n if self._can_use_cholesky():\n diag = _linalg.diag_part(linalg_ops.cholesky(self.to_dense()))\n return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])\n _, log_abs_det = linalg.slogdet(self.to_dense())\n return log_abs_det\n\n def log_abs_determinant(self, name=\"log_abs_det\"):\n \"\"\"Log absolute value of determinant for every batch member.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\n Raises:\n NotImplementedError: If `self.is_square` is `False`.\n \"\"\"\n if self.is_square is False:\n raise NotImplementedError(\n \"Determinant not implemented for an operator that is expected to \"\n \"not be square.\")\n with self._name_scope(name): # pylint: disable=not-callable\n return self._log_abs_determinant()\n\n def _dense_solve(self, rhs, adjoint=False, adjoint_arg=False):\n \"\"\"Solve by conversion to a dense matrix.\"\"\"\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise NotImplementedError(\n \"Solve is not yet implemented for non-square operators.\")\n rhs = linalg.adjoint(rhs) if adjoint_arg else rhs\n if self._can_use_cholesky():\n return linalg_ops.cholesky_solve(\n linalg_ops.cholesky(self.to_dense()), rhs)\n return linear_operator_util.matrix_solve_with_broadcast(\n self.to_dense(), rhs, adjoint=adjoint)\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n \"\"\"Default implementation of _solve.\"\"\"\n logging.warn(\n \"Using (possibly slow) default implementation of solve.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n return self._dense_solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n def solve(self, rhs, adjoint=False, adjoint_arg=False, name=\"solve\"):\n \"\"\"Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.\n\n The returned `Tensor` will be close to an exact solution if `A` is well\n conditioned. Otherwise closeness will vary. See class docstring for details.\n\n Examples:\n\n ```python\n # Make an operator acting like batch matrix A. Assume tensor_shape.TensorShape(A.shape) = [..., M, N]\n operator = LinearOperator(...)\n tensor_shape.TensorShape(operator.shape) = [..., M, N]\n\n # Solve R > 0 linear systems for every member of the batch.\n RHS = ... # shape [..., M, R]\n\n X = operator.solve(RHS)\n # X[..., :, r] is the solution to the r'th linear system\n # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]\n\n operator.matmul(X)\n ==> RHS\n ```\n\n Args:\n rhs: `Tensor` with same `dtype` as this operator and compatible shape.\n `rhs` is treated like a [batch] matrix meaning for every set of leading\n dimensions, the last two dimensions defines a matrix.\n See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, solve the system involving the adjoint\n of this `LinearOperator`: `A^H X = rhs`.\n adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`\n is the hermitian transpose (transposition and complex conjugation).\n name: A name scope to use for ops added by this method.\n\n Returns:\n `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.\n\n Raises:\n NotImplementedError: If `self.is_non_singular` or `is_square` is False.\n \"\"\"\n if self.is_non_singular is False:\n raise NotImplementedError(\n \"Exact solve not implemented for an operator that is expected to \"\n \"be singular.\")\n if self.is_square is False:\n raise NotImplementedError(\n \"Exact solve not implemented for an operator that is expected to \"\n \"not be square.\")\n if isinstance(rhs, LinearOperator):\n left_operator = self.adjoint() if adjoint else self\n right_operator = rhs.adjoint() if adjoint_arg else rhs\n\n if (right_operator.range_dimension is not None and\n left_operator.domain_dimension is not None and\n right_operator.range_dimension != left_operator.domain_dimension):\n raise ValueError(\n \"Operators are incompatible. Expected `rhs` to have dimension\"\n \" {} but got {}.\".format(\n left_operator.domain_dimension, right_operator.range_dimension))\n with self._name_scope(name): # pylint: disable=not-callable\n return linear_operator_algebra.solve(left_operator, right_operator)\n\n with self._name_scope(name): # pylint: disable=not-callable\n rhs = ops.convert_to_tensor(rhs, name=\"rhs\")\n # self._check_input_dtype(rhs)\n\n self_dim = -1 if adjoint else -2\n arg_dim = -1 if adjoint_arg else -2\n tensor_shape.dimension_at_index(\n tensor_shape.TensorShape(self.shape), self_dim).assert_is_compatible_with(\n tensor_shape.TensorShape(rhs.shape)[arg_dim])\n\n return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n def _solvevec(self, rhs, adjoint=False):\n \"\"\"Default implementation of _solvevec.\"\"\"\n rhs_mat = array_ops.expand_dims(rhs, axis=-1)\n solution_mat = self.solve(rhs_mat, adjoint=adjoint)\n return array_ops.squeeze(solution_mat, axis=-1)\n\n def solvevec(self, rhs, adjoint=False, name=\"solve\"):\n \"\"\"Solve single equation with best effort: `A X = rhs`.\n\n The returned `Tensor` will be close to an exact solution if `A` is well\n conditioned. Otherwise closeness will vary. See class docstring for details.\n\n Examples:\n\n ```python\n # Make an operator acting like batch matrix A. Assume tensor_shape.TensorShape(A.shape) = [..., M, N]\n operator = LinearOperator(...)\n tensor_shape.TensorShape(operator.shape) = [..., M, N]\n\n # Solve one linear system for every member of the batch.\n RHS = ... # shape [..., M]\n\n X = operator.solvevec(RHS)\n # X is the solution to the linear system\n # sum_j A[..., :, j] X[..., j] = RHS[..., :]\n\n operator.matvec(X)\n ==> RHS\n ```\n\n Args:\n rhs: `Tensor` with same `dtype` as this operator.\n `rhs` is treated like a [batch] vector meaning for every set of leading\n dimensions, the last dimension defines a vector. See class docstring\n for definition of compatibility regarding batch dimensions.\n adjoint: Python `bool`. If `True`, solve the system involving the adjoint\n of this `LinearOperator`: `A^H X = rhs`.\n name: A name scope to use for ops added by this method.\n\n Returns:\n `Tensor` with shape `[...,N]` and same `dtype` as `rhs`.\n\n Raises:\n NotImplementedError: If `self.is_non_singular` or `is_square` is False.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n rhs = ops.convert_to_tensor(rhs, name=\"rhs\")\n # self._check_input_dtype(rhs)\n self_dim = -1 if adjoint else -2\n tensor_shape.dimension_at_index(\n tensor_shape.TensorShape(self.shape), self_dim).assert_is_compatible_with(tensor_shape.TensorShape(rhs.shape)[-1])\n\n return self._solvevec(rhs, adjoint=adjoint)\n\n def adjoint(self, name=\"adjoint\"):\n \"\"\"Returns the adjoint of the current `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, return `A*`.\n Note that calling `self.adjoint()` and `self.H` are equivalent.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `LinearOperator` which represents the adjoint of this `LinearOperator`.\n \"\"\"\n if self.is_self_adjoint is True: # pylint: disable=g-bool-id-comparison\n return self\n with self._name_scope(name): # pylint: disable=not-callable\n return linear_operator_algebra.adjoint(self)\n\n # self.H is equivalent to self.adjoint().\n H = property(adjoint, None)\n\n def inverse(self, name=\"inverse\"):\n \"\"\"Returns the Inverse of this `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, return a `LinearOperator`\n representing `A^-1`.\n\n Args:\n name: A name scope to use for ops added by this method.\n\n Returns:\n `LinearOperator` representing inverse of this matrix.\n\n Raises:\n ValueError: When the `LinearOperator` is not hinted to be `non_singular`.\n \"\"\"\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"Cannot take the Inverse: This operator represents \"\n \"a non square matrix.\")\n if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"Cannot take the Inverse: This operator represents \"\n \"a singular matrix.\")\n\n with self._name_scope(name): # pylint: disable=not-callable\n return linear_operator_algebra.inverse(self)\n\n def cholesky(self, name=\"cholesky\"):\n \"\"\"Returns a Cholesky factor as a `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, if `A` is positive definite\n self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky\n decomposition.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `LinearOperator` which represents the lower triangular matrix\n in the Cholesky decomposition.\n\n Raises:\n ValueError: When the `LinearOperator` is not hinted to be positive\n definite and self adjoint.\n \"\"\"\n\n if not self._can_use_cholesky():\n raise ValueError(\"Cannot take the Cholesky decomposition: \"\n \"Not a positive definite self adjoint matrix.\")\n with self._name_scope(name): # pylint: disable=not-callable\n return linear_operator_algebra.cholesky(self)\n\n def _to_dense(self):\n \"\"\"Generic and often inefficient implementation. Override often.\"\"\"\n if self.batch_shape.is_fully_defined():\n batch_shape = self.batch_shape\n else:\n batch_shape = self.batch_shape_tensor()\n\n dim_value = tensor_shape.dimension_value(self.domain_dimension)\n if dim_value is not None:\n n = dim_value\n else:\n n = self.domain_dimension_tensor()\n\n eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)\n return self.matmul(eye)\n\n def to_dense(self, name=\"to_dense\"):\n \"\"\"Return a dense (batch) matrix representing this operator.\"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n return self._to_dense()\n\n def _diag_part(self):\n \"\"\"Generic and often inefficient implementation. Override often.\"\"\"\n return _linalg.diag_part(self.to_dense())\n\n def diag_part(self, name=\"diag_part\"):\n \"\"\"Efficiently get the [batch] diagonal part of this operator.\n\n If this operator has shape `[B1,...,Bb, M, N]`, this returns a\n `Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where\n `diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.\n\n ```\n my_operator = LinearOperatorDiag([1., 2.])\n\n # Efficiently get the diagonal\n my_operator.diag_part()\n ==> [1., 2.]\n\n # Equivalent, but inefficient method\n tf.linalg.diag_part(my_operator.to_dense())\n ==> [1., 2.]\n ```\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n diag_part: A `Tensor` of same `dtype` as self.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n return self._diag_part()\n\n def _trace(self):\n return math_ops.reduce_sum(self.diag_part(), axis=-1)\n\n def trace(self, name=\"trace\"):\n \"\"\"Trace of the linear operator, equal to sum of `self.diag_part()`.\n\n If the operator is square, this is also the sum of the eigenvalues.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n return self._trace()\n\n def _add_to_tensor(self, x):\n # Override if a more efficient implementation is available.\n return self.to_dense() + x\n\n def add_to_tensor(self, x, name=\"add_to_tensor\"):\n \"\"\"Add matrix represented by this operator to `x`. Equivalent to `A + x`.\n\n Args:\n x: `Tensor` with same `dtype` and shape broadcastable to `tensor_shape.TensorShape(self.shape)`.\n name: A name to give this `Op`.\n\n Returns:\n A `Tensor` with broadcast shape and same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n x = ops.convert_to_tensor(x, name=\"x\")\n # self._check_input_dtype(x)\n return self._add_to_tensor(x)\n\n def _eigvals(self):\n return linalg_ops.self_adjoint_eigvals(self.to_dense())\n\n def eigvals(self, name=\"eigvals\"):\n \"\"\"Returns the eigenvalues of this linear operator.\n\n If the operator is marked as self-adjoint (via `is_self_adjoint`)\n this computation can be more efficient.\n\n Note: This currently only supports self-adjoint operators.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Shape `[B1,...,Bb, N]` `Tensor` of same `dtype` as `self`.\n \"\"\"\n if not self.is_self_adjoint:\n raise NotImplementedError(\"Only self-adjoint matrices are supported.\")\n with self._name_scope(name): # pylint: disable=not-callable\n return self._eigvals()\n\n def _cond(self):\n if not self.is_self_adjoint:\n # In general the condition number is the ratio of the\n # absolute value of the largest and smallest singular values.\n vals = linalg_ops.svd(self.to_dense(), compute_uv=False)\n else:\n # For self-adjoint matrices, and in general normal matrices,\n # we can use eigenvalues.\n vals = math_ops.abs(self._eigvals())\n\n return (math_ops.reduce_max(vals, axis=-1) /\n math_ops.reduce_min(vals, axis=-1))\n\n def cond(self, name=\"cond\"):\n \"\"\"Returns the condition number of this linear operator.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name): # pylint: disable=not-callable\n return self._cond()\n\n def _can_use_cholesky(self):\n return self.is_self_adjoint and self.is_positive_definite\n\n def _set_graph_parents(self, graph_parents):\n \"\"\"Set self._graph_parents. Called during derived class init.\n\n This method allows derived classes to set graph_parents, without triggering\n a deprecation warning (which is invoked if `graph_parents` is passed during\n `__init__`.\n\n Args:\n graph_parents: Iterable over Tensors.\n \"\"\"\n # TODO(b/143910018) Remove this function in V3.\n graph_parents = [] if graph_parents is None else graph_parents\n for i, t in enumerate(graph_parents):\n if t is None or not (linear_operator_util.is_ref(t) or\n ops.is_tensor(t)):\n raise ValueError(\"Graph parent item %d is not a Tensor; %s.\" % (i, t))\n self._graph_parents = graph_parents\n\n @property\n def _composite_tensor_fields(self):\n \"\"\"A tuple of parameter names to rebuild the `LinearOperator`.\n\n The tuple contains the names of kwargs to the `LinearOperator`'s constructor\n that the `TypeSpec` needs to rebuild the `LinearOperator` instance.\n\n \"is_non_singular\", \"is_self_adjoint\", \"is_positive_definite\", and\n \"is_square\" are common to all `LinearOperator` subclasses and may be\n omitted.\n \"\"\"\n return ()\n\n @property\n def _composite_tensor_prefer_static_fields(self):\n \"\"\"A tuple of names referring to parameters that may be treated statically.\n\n This is a subset of `_composite_tensor_fields`, and contains the names of\n of `Tensor`-like args to the `LinearOperator`s constructor that may be\n stored as static values, if they are statically known. These are typically\n shapes or axis values.\n \"\"\"\n return ()\n\n @property\n def _type_spec(self):\n # This property will be overwritten by the `@make_composite_tensor`\n # decorator. However, we need it so that a valid subclass of the `ABCMeta`\n # class `CompositeTensor` can be constructed and passed to the\n # `@make_composite_tensor` decorator.\n pass\n\n\nclass _LinearOperatorSpec(type_spec.TypeSpec):\n \"\"\"A tf.TypeSpec for `LinearOperator` objects.\"\"\"\n\n __slots__ = (\"_param_specs\", \"_non_tensor_params\", \"_prefer_static_fields\")\n\n def __init__(self, param_specs, non_tensor_params, prefer_static_fields):\n \"\"\"Initializes a new `_LinearOperatorSpec`.\n\n Args:\n param_specs: Python `dict` of `tf.TypeSpec` instances that describe\n kwargs to the `LinearOperator`'s constructor that are `Tensor`-like or\n `CompositeTensor` subclasses.\n non_tensor_params: Python `dict` containing non-`Tensor` and non-\n `CompositeTensor` kwargs to the `LinearOperator`'s constructor.\n prefer_static_fields: Python `tuple` of strings corresponding to the names\n of `Tensor`-like args to the `LinearOperator`s constructor that may be\n stored as static values, if known. These are typically shapes, indices,\n or axis values.\n \"\"\"\n self._param_specs = param_specs\n self._non_tensor_params = non_tensor_params\n self._prefer_static_fields = prefer_static_fields\n\n @classmethod\n def from_operator(cls, operator):\n \"\"\"Builds a `_LinearOperatorSpec` from a `LinearOperator` instance.\n\n Args:\n operator: An instance of `LinearOperator`.\n\n Returns:\n linear_operator_spec: An instance of `_LinearOperatorSpec` to be used as\n the `TypeSpec` of `operator`.\n \"\"\"\n validation_fields = (\"is_non_singular\", \"is_self_adjoint\",\n \"is_positive_definite\", \"is_square\")\n kwargs = _extract_attrs(\n operator,\n keys=set(operator._composite_tensor_fields + validation_fields)) # pylint: disable=protected-access\n\n non_tensor_params = {}\n param_specs = {}\n for k, v in list(kwargs.items()):\n type_spec_or_v = _extract_type_spec_recursively(v)\n is_tensor = [isinstance(x, type_spec.TypeSpec)\n for x in nest.flatten(type_spec_or_v)]\n if all(is_tensor):\n param_specs[k] = type_spec_or_v\n elif not any(is_tensor):\n non_tensor_params[k] = v\n else:\n raise NotImplementedError(f\"Field {k} contains a mix of `Tensor` and \"\n f\" non-`Tensor` values.\")\n\n return cls(\n param_specs=param_specs,\n non_tensor_params=non_tensor_params,\n prefer_static_fields=operator._composite_tensor_prefer_static_fields) # pylint: disable=protected-access\n\n def _to_components(self, obj):\n return _extract_attrs(obj, keys=list(self._param_specs))\n\n def _from_components(self, components):\n kwargs = dict(self._non_tensor_params, **components)\n return self.value_type(**kwargs)\n\n @property\n def _component_specs(self):\n return self._param_specs\n\n def _serialize(self):\n return (self._param_specs,\n self._non_tensor_params,\n self._prefer_static_fields)\n\n\ndef make_composite_tensor(cls, module_name=\"tf.linalg\"):\n \"\"\"Class decorator to convert `LinearOperator`s to `CompositeTensor`.\"\"\"\n\n spec_name = \"{}Spec\".format(cls.__name__)\n spec_type = type(spec_name, (_LinearOperatorSpec,), {\"value_type\": cls})\n type_spec.register(\"{}.{}\".format(module_name, spec_name))(spec_type)\n cls._type_spec = property(spec_type.from_operator) # pylint: disable=protected-access\n return cls\n\n\ndef _extract_attrs(op, keys):\n \"\"\"Extract constructor kwargs to reconstruct `op`.\n\n Args:\n op: A `LinearOperator` instance.\n keys: A Python `tuple` of strings indicating the names of the constructor\n kwargs to extract from `op`.\n\n Returns:\n kwargs: A Python `dict` of kwargs to `op`'s constructor, keyed by `keys`.\n \"\"\"\n\n kwargs = {}\n not_found = object()\n for k in keys:\n srcs = [\n getattr(op, k, not_found), getattr(op, \"_\" + k, not_found),\n getattr(op, \"parameters\", {}).get(k, not_found),\n ]\n if any(v is not not_found for v in srcs):\n kwargs[k] = [v for v in srcs if v is not not_found][0]\n else:\n raise ValueError(\n f\"Could not determine an appropriate value for field `{k}` in object \"\n f\" `{op}`. Looked for \\n\"\n f\" 1. an attr called `{k}`,\\n\"\n f\" 2. an attr called `_{k}`,\\n\"\n f\" 3. an entry in `op.parameters` with key '{k}'.\")\n if k in op._composite_tensor_prefer_static_fields and kwargs[k] is not None: # pylint: disable=protected-access\n if ops.is_tensor(kwargs[k]):\n static_val = ops.get_static_value(kwargs[k])\n if static_val is not None:\n kwargs[k] = static_val\n if isinstance(kwargs[k], (np.ndarray, np.generic)):\n kwargs[k] = kwargs[k].tolist()\n return kwargs\n\n\ndef _extract_type_spec_recursively(value):\n \"\"\"Return (collection of) `TypeSpec`(s) for `value` if it includes `Tensor`s.\n\n If `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If\n `value` is a collection containing `Tensor` values, recursively supplant them\n with their respective `TypeSpec`s in a collection of parallel stucture.\n\n If `value` is none of the above, return it unchanged.\n\n Args:\n value: a Python `object` to (possibly) turn into a (collection of)\n `tf.TypeSpec`(s).\n\n Returns:\n spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value`\n or `value`, if no `Tensor`s are found.\n \"\"\"\n if isinstance(value, composite_tensor.CompositeTensor):\n return value._type_spec # pylint: disable=protected-access\n if isinstance(value, variables.Variable):\n return resource_variable_ops.VariableSpec(\n tensor_shape.TensorShape(value.shape), dtype=value.dtype, trainable=value.trainable)\n if ops.is_tensor(value):\n return tensor_spec.TensorSpec(tensor_shape.TensorShape(value.shape), value.dtype)\n # Unwrap trackable data structures to comply with `Type_Spec._serialize`\n # requirements. `ListWrapper`s are converted to `list`s, and for other\n # trackable data structures, the `__wrapped__` attribute is used.\n if isinstance(value, list):\n return list(_extract_type_spec_recursively(v) for v in value)\n if isinstance(value, data_structures.TrackableDataStructure):\n return _extract_type_spec_recursively(value.__wrapped__)\n if isinstance(value, tuple):\n return type(value)(_extract_type_spec_recursively(x) for x in value)\n if isinstance(value, dict):\n return type(value)((k, _extract_type_spec_recursively(v))\n for k, v in value.items())\n return value\n\n\n# Overrides for tf.linalg functions. This allows a LinearOperator to be used in\n# place of a Tensor.\n# For instance tf.trace(linop) and linop.trace() both work.\n\n\n# @dispatch.dispatch_for_types(linalg.adjoint, LinearOperator)\ndef _adjoint(matrix, name=None):\n return matrix.adjoint(name)\n\n\n# @dispatch.dispatch_for_types(linalg.cholesky, LinearOperator)\ndef _cholesky(input, name=None): # pylint:disable=redefined-builtin\n return input.cholesky(name)\n\n\n# The signature has to match with the one in python/op/array_ops.py,\n# so we have k, padding_value, and align even though we don't use them here.\n# pylint:disable=unused-argument\n# @dispatch.dispatch_for_types(linalg.diag_part, LinearOperator)\ndef _diag_part(\n input, # pylint:disable=redefined-builtin\n name=\"diag_part\",\n k=0,\n padding_value=0,\n align=\"RIGHT_LEFT\"):\n return input.diag_part(name)\n# pylint:enable=unused-argument\n\n\n# @dispatch.dispatch_for_types(linalg.det, LinearOperator)\ndef _det(input, name=None): # pylint:disable=redefined-builtin\n return input.determinant(name)\n\n\n# @dispatch.dispatch_for_types(linalg.inv, LinearOperator)\ndef _inverse(input, adjoint=False, name=None): # pylint:disable=redefined-builtin\n inv = input.inverse(name)\n if adjoint:\n inv = inv.adjoint()\n return inv\n\n\n# @dispatch.dispatch_for_types(linalg.logdet, LinearOperator)\ndef _logdet(matrix, name=None):\n if matrix.is_positive_definite and matrix.is_self_adjoint:\n return matrix.log_abs_determinant(name)\n raise ValueError(\"Expected matrix to be self-adjoint positive definite.\")\n\n\n# @dispatch.dispatch_for_types(_linalg.matmul, LinearOperator)\ndef _matmul( # pylint:disable=missing-docstring\n a,\n b,\n transpose_a=False,\n transpose_b=False,\n adjoint_a=False,\n adjoint_b=False,\n a_is_sparse=False,\n b_is_sparse=False,\n output_type=None, # pylint: disable=unused-argument\n name=None):\n if transpose_a or transpose_b:\n raise ValueError(\"Transposing not supported at this time.\")\n if a_is_sparse or b_is_sparse:\n raise ValueError(\"Sparse methods not supported at this time.\")\n if not isinstance(a, LinearOperator):\n # We use the identity (B^HA^H)^H = AB\n adjoint_matmul = b.matmul(\n a,\n adjoint=(not adjoint_b),\n adjoint_arg=(not adjoint_a),\n name=name)\n return linalg.adjoint(adjoint_matmul)\n return a.matmul(\n b, adjoint=adjoint_a, adjoint_arg=adjoint_b, name=name)\n\n\n# @dispatch.dispatch_for_types(linalg.solve, LinearOperator)\ndef _solve(\n matrix,\n rhs,\n adjoint=False,\n name=None):\n if not isinstance(matrix, LinearOperator):\n raise ValueError(\"Passing in `matrix` as a Tensor and `rhs` as a \"\n \"LinearOperator is not supported.\")\n return matrix.solve(rhs, adjoint=adjoint, name=name)\n\n\n# @dispatch.dispatch_for_types(linalg.trace, LinearOperator)\ndef _trace(x, name=None):\n return x.trace(name)\n\nimport numpy as np\nfrom tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg\nfrom tensorflow_probability.python.internal.backend.numpy import ops as _ops\nfrom tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape\n\nfrom tensorflow_probability.python.internal.backend.numpy import private\ndistribution_util = private.LazyLoader(\n \"distribution_util\", globals(),\n \"tensorflow_probability.substrates.numpy.internal.distribution_util\")\ntensorshape_util = private.LazyLoader(\n \"tensorshape_util\", globals(),\n \"tensorflow_probability.substrates.numpy.internal.tensorshape_util\")\n\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utilities for TensorFlow Probability ODE solvers.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.math import gradient as tfp_gradient\n\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\n\nclass Bunch(dict):\n \"\"\"Dict-subclass which exposes keys as attributes.\"\"\"\n\n def __getattr__(self, name):\n return self[name]\n\n def __setattr__(self, name, value):\n self[name] = value\n\n def __delattr__(self, name):\n del self[name]\n\n\ndef assert_increasing(tensor, identifier):\n \"\"\"Assert if a `Tensor` is strictly increasing.\"\"\"\n return tf.Assert(\n tf.reduce_all(tensor[1:] > tensor[:-1]),\n ['`{}` must be strictly increasing.'.format(identifier)])\n\n\ndef assert_nonnegative(tensor, identifier):\n \"\"\"Assert if a `Tensor` is nonnegative.\"\"\"\n return tf.Assert(\n tf.reduce_all(tensor >= tf.zeros([], dtype=tensor.dtype)),\n ['`{}` must be nonnegative'.format(identifier)])\n\n\ndef assert_positive(tensor, identifier):\n \"\"\"Assert if a `Tensor` is positive.\"\"\"\n return tf.Assert(\n tf.reduce_all(tensor > tf.zeros([], dtype=tensor.dtype)),\n ['`{}` must be positive.'.format(identifier)])\n\n\ndef error_if_not_real_or_complex(tensor, identifier):\n \"\"\"Raise a `TypeError` if the `Tensor` is neither real nor complex.\"\"\"\n if not (dtype_util.is_floating(tensor.dtype) or\n dtype_util.is_complex(tensor.dtype)):\n raise TypeError(\n '`{}` must have a floating point or complex floating point dtype.'\n .format(identifier))\n\n\ndef error_if_not_vector(tensor, identifier):\n \"\"\"Raise a `ValueError` if the `Tensor` is not 1-D.\"\"\"\n if len(list(tensor.shape)) != 1:\n raise ValueError('`{}` must be a 1-D tensor.'.format(identifier))\n\n\ndef _flatten_nested_jacobian(jacobian, state_shape):\n \"\"\"Flattens a nested Jacobian into a matrix.\n\n The flattening and concatenation follows the interpretation of the structure\n as being a leading 'axis', meaning that if the input has 'shape':\n [input_structure, A, B], and the output has 'shape':\n [output_structure, C, D], the input Jacobian should have the 'shape':\n [input_structure, output_structure, A, B, C, D]. As with the regular axes, the\n encoding is input major.\n\n Args:\n jacobian: A nested Jacobian.\n state_shape: A nested collection of state shapes.\n\n Returns:\n jacobian_mat: The Jacobian matrix.\n\n #### Examples\n\n Non-structured state:\n\n ```python\n input = tf.zeros([1, 2])\n output = tf.zeros([3])\n jacobian = tf.zeros([1, 2, 3])\n ```\n\n Structured state:\n\n ```python\n input = {'x': tf.zeros([1, 2])}\n output = {'y': tf.zeros([3])}\n jacobian = {'x': {'y': tf.zeros([1, 2, 3])}}\n ```\n\n A more complicated structure:\n\n ```python\n input = [tf.zeros([1, 2]), tf.zeros([])]\n output = {'y': tf.zeros([3])}\n jacobian = [{'y': tf.zeros([1, 2, 3])}, {'y': tf.zeros([3]}]\n ```\n\n \"\"\"\n\n def _flatten_row(jacobian_row, state_shape_part):\n state_size = ps.reduce_prod(state_shape_part)\n jacobian_row_mats = tf.nest.map_structure(\n lambda j: tf.reshape(j, ps.stack([state_size, -1], axis=0)),\n jacobian_row)\n return tf.concat(tf.nest.flatten(jacobian_row_mats), axis=-1)\n\n flat_rows = nest.map_structure_up_to(state_shape, _flatten_row, jacobian,\n state_shape)\n return tf.concat(tf.nest.flatten(flat_rows), axis=0)\n\n\ndef get_jacobian_fn_mat(jacobian_fn, ode_fn_vec, state_shape, dtype):\n \"\"\"Returns a wrapper around the user-specified `jacobian_fn` argument.\n\n `jacobian_fn` is an optional argument that can either be a constant `Tensor`\n or a function of the form `jacobian_fn(time, state)`. This function returns a\n wrapper `jacobian_fn_mat(time, state_vec)` whose second argument and output\n are 1 and 2-D `Tensor`s, respectively, corresponding reshaped versions of\n `state` and `jacobian_fn(time, state)`.\n\n Args:\n jacobian_fn: User-specified `jacobian_fn` passed to `solve`.\n ode_fn_vec: Result of `get_ode_fn_vec`.\n state_shape: The shape of the second argument and output of `ode_fn`.\n dtype: If `jacobian_fn` is constant, what dtype to convert it to.\n\n Returns:\n The wrapper described above.\n \"\"\"\n if jacobian_fn is None:\n return _AutomaticJacobian(ode_fn_vec)\n\n if not callable(jacobian_fn):\n jacobian_fn = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(x, dtype=dtype), jacobian_fn)\n constant_jacobian_mat = _flatten_nested_jacobian(jacobian_fn, state_shape)\n\n def constant_jacobian_fn_mat(*_):\n return constant_jacobian_mat\n\n return constant_jacobian_fn_mat\n\n def jacobian_fn_mat(time, state_vec):\n return _flatten_nested_jacobian(\n jacobian_fn(time, get_state_from_vec(state_vec, state_shape)),\n state_shape)\n\n return jacobian_fn_mat\n\n\ndef get_state_vec(state):\n \"\"\"Converts a possibly nested state into a vector.\"\"\"\n return tf.concat(\n tf.nest.flatten(\n tf.nest.map_structure(lambda s: tf.reshape(s, [-1]), state)),\n axis=-1)\n\n\ndef get_state_from_vec(state_vec, state_shape):\n \"\"\"Inverse of `get_state_vec`.\"\"\"\n state_sizes = tf.nest.map_structure(ps.reduce_prod, state_shape)\n state_vec_parts = tf.nest.pack_sequence_as(\n state_shape, tf.split(state_vec, tf.nest.flatten(state_sizes), axis=-1))\n batch_shape = ps.shape(state_vec)[:-1]\n\n return tf.nest.map_structure(\n lambda sv, s: tf.reshape(sv, ps.concat([batch_shape, s], axis=0)),\n state_vec_parts, state_shape)\n\n\ndef get_ode_fn_vec(ode_fn, state_shape):\n \"\"\"Returns a wrapper around the user-specified `ode_fn` argument.\n\n The second argument and output of `ode_fn(time, state)` are N-D `Tensor`s.\n This function returns a wrapper `ode_fn_vec(time, state_vec)` whose\n second argument and output are 1-D `Tensor`s corresponding to reshaped\n versions of `state` and `ode_fn(time, state)`.\n\n Args:\n ode_fn: User-specified `ode_fn` passed to `solve`.\n state_shape: The shape of the second argument and output of `ode_fn`.\n\n Returns:\n The wrapper described above.\n \"\"\"\n\n def ode_fn_vec(time, state_vec):\n return get_state_vec(\n ode_fn(time, get_state_from_vec(state_vec, state_shape)))\n\n return ode_fn_vec\n\n\ndef next_step_size(step_size, order, error_ratio, safety_factor,\n min_step_size_factor, max_step_size_factor):\n \"\"\"Computes the next step size to use.\n\n Computes the next step size by applying a multiplicative factor to the current\n step size. This factor is\n ```none\n factor_unclamped = error_ratio**(-1. / (order + 1)) * safety_factor\n factor = clamp(factor_unclamped, min_step_size_factor, max_step_size_factor)\n ```\n\n Args:\n step_size: Scalar float `Tensor` specifying the current step size.\n order: Scalar integer `Tensor` specifying the order of the method.\n error_ratio: Scalar float `Tensor` specifying the ratio of the error in the\n computed state and the tolerance.\n safety_factor: Scalar float `Tensor`.\n min_step_size_factor: Scalar float `Tensor` specifying a lower bound on the\n multiplicative factor.\n max_step_size_factor: Scalar float `Tensor` specifying an upper bound on the\n multiplicative factor.\n\n Returns:\n Scalar float `Tensor` specifying the next step size.\n \"\"\"\n order_cast = tf.cast(order, error_ratio.dtype)\n factor = error_ratio**(-1. / (order_cast + 1.))\n return step_size * tf.clip_by_value(\n safety_factor * factor, min_step_size_factor, max_step_size_factor)\n\n\ndef stop_gradient_of_real_or_complex_entries(nested):\n \"\"\"Calls `tf.stop_gradient` on real or complex elements of a nested structure.\n\n Args:\n nested: The nested structure. May contain `Tensor`s with different `dtype`s.\n\n Returns:\n The resulting nested structure.\n \"\"\"\n def _one_part(tensor):\n tensor = tf.convert_to_tensor(tensor)\n if dtype_util.is_floating(tensor.dtype) or dtype_util.is_complex(\n tensor.dtype):\n return tf.stop_gradient(tensor)\n else:\n return tensor\n\n return tf.nest.map_structure(_one_part, nested)\n\n\ndef right_mult_by_jacobian_mat(jacobian_fn_mat, ode_fn_vec, time, state_vec,\n vec):\n \"\"\"Right multiplies a vector by the Jacobian.\n\n The Jacobian is constructed by calling `jacobian_fn_mat(time, state_vec)` if\n doing so does not require automatic differentiation. Otherwise, chain rule\n automatic differentiation is applied to `ode_fn_vec` to obtain the Jacobian.\n\n Args:\n jacobian_fn_mat: Result of `get_jacobian_fn_mat`.\n ode_fn_vec: Result of `get_ode_fn_vec`.\n time: Scalar float `Tensor` time at which to evalute the Jacobian.\n state_vec: `Tensor` state at which to evaluate the Jacobian.\n vec: `Tensor` with shape is compatible with the Jacobian.\n\n Returns:\n `Tensor` representing the dot product.\n \"\"\"\n if isinstance(jacobian_fn_mat, _AutomaticJacobian):\n # Compute the dot product by using chain rule automatic differentiation.\n _, dot_product = tfp_gradient.value_and_gradient(\n lambda x: ode_fn_vec(time, x), state_vec, output_gradients=vec)\n else:\n # Compute the dot product by explicitly constructing the Jacobian matrix.\n jacobian_mat = jacobian_fn_mat(time, state_vec)\n dot_product = tf.reshape(tf.matmul(vec[tf.newaxis, :], jacobian_mat), [-1])\n return dot_product\n\n\nclass _AutomaticJacobian(object):\n \"\"\"Callable that returns a Jacobian computed by automatic differentiation.\"\"\"\n\n def __init__(self, ode_fn_vec):\n self._ode_fn_vec = ode_fn_vec\n\n def __call__(self, time, state_vec):\n jacobian_mat = tfp_gradient.batch_jacobian(\n lambda state_vec: self._ode_fn_vec(time, state_vec[0])[tf.newaxis],\n state_vec[tf.newaxis])\n\n if jacobian_mat is None:\n return tf.zeros([tf.size(state_vec)] * 2, dtype=state_vec.dtype)\n return jacobian_mat[0]\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"ScaleMatvecLinearOperator and ScaleMatvecLinearOperatorBlock bijectors.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n\n__all__ = [\n 'ScaleMatvecLinearOperator',\n 'ScaleMatvecLinearOperatorBlock'\n]\n\n\nclass _ScaleMatvecLinearOperatorBase(bijector.AutoCompositeTensorBijector):\n \"\"\"Common base class for `ScaleMatvecLinearOperator{Block}`.\"\"\"\n\n @property\n def scale(self):\n \"\"\"The `scale` `LinearOperator` in `Y = scale @ X`.\"\"\"\n return self._scale\n\n @property\n def adjoint(self):\n \"\"\"`bool` indicating whether this class uses `self.scale` or its adjoint.\"\"\"\n return self._adjoint\n\n @classmethod\n def _parameter_properties(cls, dtype):\n return dict(scale=parameter_properties.BatchedComponentProperties())\n\n def _forward(self, x):\n return self.scale.matvec(x, adjoint=self.adjoint)\n\n def _inverse(self, y):\n return self.scale.solvevec(y, adjoint=self.adjoint)\n\n def _forward_log_det_jacobian(self, x):\n # is_constant_jacobian = True for this bijector, hence the\n # `log_det_jacobian` need only be specified for a single input, as this will\n # be tiled to match `event_ndims`.\n return self.scale.log_abs_determinant()\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n if is_init != any(tensor_util.is_ref(v) for v in self.scale.variables):\n return [self.scale.assert_non_singular()]\n return []\n\n\nclass ScaleMatvecLinearOperator(_ScaleMatvecLinearOperatorBase):\n \"\"\"Compute `Y = g(X; scale) = scale @ X`.\n\n `scale` is a `LinearOperator` and the forward transformation is: `scale @ X`\n where `@` denotes matrix-vector multiplication.\n\n If `X` is a scalar (represented as a vector of length `1`) then the forward\n transformation is: `scale * X` where `*` denotes broadcasted elementwise\n product.\n\n Example Use:\n\n ```python\n x = [1., 2, 3]\n\n diag = [1., 2, 3]\n scale = tf.linalg.LinearOperatorDiag(diag)\n bijector = ScaleMatvecLinearOperator(scale)\n # In this case, `forward` is equivalent to:\n # y = scale @ x\n y = bijector.forward(x) # Tensor([1., 4, 9])\n\n tril = [[1., 0, 0],\n [2, 1, 0],\n [3, 2, 1]]\n scale = tf.linalg.LinearOperatorLowerTriangular(tril)\n bijector = ScaleMatvecLinearOperator(scale)\n # In this case, `forward` is equivalent to:\n # np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1)\n y = bijector.forward(x) # Tensor([1., 4, 10])\n ```\n\n \"\"\"\n\n def __init__(self,\n scale,\n adjoint=False,\n validate_args=False,\n parameters=None,\n name='scale_matvec_linear_operator'):\n \"\"\"Instantiates the `ScaleMatvecLinearOperator` bijector.\n\n Args:\n scale: Subclass of `LinearOperator`. Represents the (batch, non-singular)\n linear transformation by which the `Bijector` transforms inputs.\n adjoint: Python `bool` indicating whether to use the `scale` matrix as\n specified or its adjoint.\n Default value: `False`.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n parameters: Locals dict captured by subclass constructor, to be used for\n copy/slice re-instantiation operators.\n name: Python `str` name given to ops managed by this object.\n\n Raises:\n TypeError: if `scale` is not a `LinearOperator`.\n ValueError: if not `scale.is_non_singular`.\n \"\"\"\n parameters = dict(locals()) if parameters is None else parameters\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([scale], dtype_hint=tf.float32)\n if not isinstance(scale, tf.linalg.LinearOperator):\n raise TypeError('scale is not an instance of tf.LinearOperator')\n if validate_args and not scale.is_non_singular:\n raise ValueError('Scale matrix must be non-singular.')\n self._scale = scale\n self._adjoint = adjoint\n super(ScaleMatvecLinearOperator, self).__init__(\n forward_min_event_ndims=1,\n is_constant_jacobian=True,\n dtype=dtype,\n validate_args=validate_args,\n parameters=parameters,\n name=name)\n\n\nclass ScaleMatvecLinearOperatorBlock(_ScaleMatvecLinearOperatorBase):\n \"\"\"Compute `Y = g(X; scale) = scale @ X` for blockwise `X` and `scale`.\n\n `scale` is a `LinearOperator` that supports blockwise semantics, e.g.\n `LinearOperatorBlockDiag` or `LinearOperatorBlockLowerTriangular`. The forward\n transformation is: `scale @ X` where `X` is a list or tuple of `Tensor`s, the\n rightmost dimensions of which match the `domain_dimension`s of the\n corresponding operators in `scale`'s block structure.\n\n Example use:\n\n ```python\n op_1 = tf.linalg.LinearOperatorDiag(diag=[1., -1., 3.])\n op_2 = tf.linalg.LinearOperatorFullMatrix([[12., 5.], [-1., 3.]])\n scale = tf.linalg.LinearOperatorBlockDiag([op_1, op_2], is_non_singular=True)\n bijector = ScaleMatvecLinearOperatorBlock(scale)\n\n x = [[2., 0., 1.], [3., 1.]] # Input consisting of two blocks\n y = bijector.forward(x) # [Tensor([2., 0., 3.]), Tensor([41., 0.])]\n ```\n\n \"\"\"\n\n def __init__(self,\n scale,\n adjoint=False,\n validate_args=False,\n parameters=None,\n name='scale_matvec_linear_operator_block'):\n \"\"\"Instantiates the `ScaleMatvecLinearOperatorBlock` bijector.\n\n Args:\n scale: Subclass of `LinearOperator` that supports blockwise semantics\n (e.g. `LinearOperatorBlockDiag` or\n `LinearOperatorBlockLowerTriangular`). Represents the (blockwise, batch,\n non-singular) linear transformation by which the `Bijector` transforms\n inputs.\n adjoint: Python `bool` indicating whether to use the `scale` matrix as\n specified or its adjoint.\n Default value: `False`.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n parameters: Locals dict captured by subclass constructor, to be used for\n copy/slice re-instantiation operators.\n name: Python `str` name given to ops managed by this object.\n\n Raises:\n TypeError: if `scale` is not a `LinearOperator`.\n ValueError: if not `scale.is_non_singular`.\n \"\"\"\n parameters = dict(locals()) if parameters is None else parameters\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([scale], dtype_hint=tf.float32)\n if not isinstance(scale, tf.linalg.LinearOperator):\n raise TypeError('scale is not an instance of tf.LinearOperator')\n if validate_args and not scale.is_non_singular:\n raise ValueError('Scale matrix must be non-singular.')\n\n forward_min_event_ndims = [1] * len(scale.operators)\n\n self._scale = scale\n self._adjoint = adjoint\n super(ScaleMatvecLinearOperatorBlock, self).__init__(\n forward_min_event_ndims=forward_min_event_ndims,\n is_constant_jacobian=True,\n dtype=dtype,\n validate_args=validate_args,\n parameters=parameters,\n name=name)\n\n if tensorshape_util.is_fully_defined(self._scale.batch_shape):\n self._parameter_batch_shape = self._scale.batch_shape\n else:\n self._parameter_batch_shape = self._scale.batch_shape_tensor()\n\n def _forward_event_shape(self, input_shape):\n if isinstance(self.scale, tf.linalg.LinearOperatorBlockLowerTriangular):\n return _cumulative_broadcast_static(input_shape)\n return input_shape\n\n def _forward_event_shape_tensor(self, input_shape):\n if isinstance(self.scale, tf.linalg.LinearOperatorBlockLowerTriangular):\n return _cumulative_broadcast_dynamic(input_shape)\n return input_shape\n\n def _inverse_event_shape(self, output_shape):\n if isinstance(self.scale, tf.linalg.LinearOperatorBlockLowerTriangular):\n return _cumulative_broadcast_static(output_shape)\n return output_shape\n\n def _inverse_event_shape_tensor(self, output_shape):\n if isinstance(self.scale, tf.linalg.LinearOperatorBlockLowerTriangular):\n return _cumulative_broadcast_dynamic(output_shape)\n return output_shape\n\n\ndef _cumulative_broadcast_static(event_shape):\n broadcast_shapes = [s[:-1] for s in event_shape]\n cumulative_shapes = [broadcast_shapes[0]]\n for shape in broadcast_shapes[1:]:\n out_shape = tf.broadcast_static_shape(shape, cumulative_shapes[-1])\n cumulative_shapes.append(out_shape)\n return [b.concatenate(s[-1]) for b, s in zip(cumulative_shapes, event_shape)]\n\n\ndef _cumulative_broadcast_dynamic(event_shape):\n broadcast_shapes = [\n ps.slice(s, begin=[0], size=[ps.size(s)-1]) for s in event_shape]\n cumulative_shapes = [broadcast_shapes[0]]\n for shape in broadcast_shapes[1:]:\n out_shape = ps.broadcast_shape(shape, cumulative_shapes[-1])\n cumulative_shapes.append(out_shape)\n return [\n ps.concat([b, ps.slice(s, begin=[ps.size(s)-1], size=[1])], axis=0)\n for b, s in zip(cumulative_shapes, event_shape)]\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for `leapfrog_integrator.py`.\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import test_util\nfrom tensorflow_probability.python.mcmc.internal import leapfrog_integrator as leapfrog_impl\n\n\n@test_util.test_all_tf_execution_regimes\nclass LeapfrogIntegratorTest(test_util.TestCase):\n\n def setUp(self):\n self._shape_param = 5.\n self._rate_param = 10.\n\n def assertAllFinite(self, x):\n self.assertAllEqual(np.ones_like(x).astype(bool), np.isfinite(x))\n\n def _log_gamma_log_prob(self, x, event_dims=()):\n \"\"\"Computes log-pdf of a log-gamma random variable.\n\n Args:\n x: Value of the random variable.\n event_dims: Dimensions not to treat as independent.\n\n Returns:\n log_prob: The log-pdf up to a normalizing constant.\n \"\"\"\n return tf.reduce_sum(\n self._shape_param * x - self._rate_param * tf.exp(x),\n axis=event_dims)\n\n def _integrator_conserves_energy(self, x, independent_chain_ndims, seed):\n event_dims = ps.range(independent_chain_ndims, tf.rank(x))\n\n target_fn = lambda x: self._log_gamma_log_prob(x, event_dims)\n\n m = tf.random.normal(tf.shape(x), seed=seed)\n log_prob_0 = target_fn(x)\n old_energy = -log_prob_0 + 0.5 * tf.reduce_sum(m**2., axis=event_dims)\n\n event_size = np.prod(\n self.evaluate(x).shape[independent_chain_ndims:])\n\n integrator = leapfrog_impl.SimpleLeapfrogIntegrator(\n target_fn,\n step_sizes=[0.09 / event_size],\n num_steps=1000)\n\n [[new_m], [_], log_prob_1, [_]] = integrator([m], [x])\n\n new_energy = -log_prob_1 + 0.5 * tf.reduce_sum(new_m**2., axis=event_dims)\n\n old_energy_, new_energy_ = self.evaluate([old_energy, new_energy])\n tf1.logging.vlog(\n 1, 'average energy relative change: {}'.format(\n (1. - new_energy_ / old_energy_).mean()))\n self.assertAllClose(old_energy_, new_energy_, atol=0., rtol=0.02)\n\n def _integrator_conserves_energy_wrapper(self, independent_chain_ndims):\n \"\"\"Tests the long-term energy conservation of the leapfrog integrator.\n\n The leapfrog integrator is symplectic, so for sufficiently small step\n sizes it should be possible to run it more or less indefinitely without\n the energy of the system blowing up or collapsing.\n\n Args:\n independent_chain_ndims: Python `int` scalar representing the number of\n dims associated with independent chains.\n \"\"\"\n seed_stream = test_util.test_seed_stream()\n x = self.evaluate(0.1 * tf.random.normal(\n shape=(50, 10, 2), seed=seed_stream()))\n x = tf.constant(x)\n self._integrator_conserves_energy(\n x, independent_chain_ndims, seed=seed_stream())\n\n def testIntegratorEnergyConservationNullShape(self):\n self._integrator_conserves_energy_wrapper(0)\n\n def testIntegratorEnergyConservation1(self):\n self._integrator_conserves_energy_wrapper(1)\n\n def testIntegratorEnergyConservation2(self):\n self._integrator_conserves_energy_wrapper(2)\n\n def testIntegratorEnergyConservation3(self):\n self._integrator_conserves_energy_wrapper(3)\n\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utility functions for dtypes.\"\"\"\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\n\n\n__all__ = [\n 'as_numpy_dtype',\n 'assert_same_float_dtype',\n 'base_dtype',\n 'base_equal',\n 'common_dtype',\n 'eps',\n 'is_bool',\n 'is_complex',\n 'is_floating',\n 'is_integer',\n 'is_numpy_compatible',\n 'max',\n 'min',\n 'name',\n 'real_dtype',\n 'size',\n]\n\n\nJAX_MODE = False\nNUMPY_MODE = False\nSKIP_DTYPE_CHECKS = False\n\n\ndef is_numpy_compatible(dtype):\n \"\"\"Returns if dtype has a corresponding NumPy dtype.\"\"\"\n if JAX_MODE or NUMPY_MODE:\n return True\n else:\n return tf.as_dtype(dtype).is_numpy_compatible\n\n\ndef as_numpy_dtype(dtype):\n \"\"\"Returns a `np.dtype` based on this `dtype`.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'as_numpy_dtype'):\n return dtype.as_numpy_dtype\n return dtype\n\n\ndef base_dtype(dtype):\n \"\"\"Returns a non-reference `dtype` based on this `dtype`.\"\"\"\n dtype = None if dtype is None else tf.as_dtype(dtype)\n if hasattr(dtype, 'base_dtype'):\n return dtype.base_dtype\n return dtype\n\n\ndef base_equal(a, b):\n \"\"\"Returns `True` if base dtypes are identical.\"\"\"\n return base_dtype(a) == base_dtype(b)\n\n\ndef common_dtype(args_list, dtype_hint=None):\n \"\"\"Returns explict dtype from `args_list` if there is one.\"\"\"\n dtype = None\n seen = []\n for a in tf.nest.flatten(args_list):\n if hasattr(a, 'dtype') and a.dtype:\n dt = as_numpy_dtype(a.dtype)\n seen.append(dt)\n else:\n seen.append(None)\n continue\n if dtype is None:\n dtype = dt\n elif dtype != dt:\n if SKIP_DTYPE_CHECKS:\n dtype = (np.ones([2], dtype) + np.ones([2], dt)).dtype\n else:\n raise TypeError(\n 'Found incompatible dtypes, {} and {}. Seen so far: {}'.format(\n dtype, dt, seen))\n return base_dtype(dtype_hint) if dtype is None else base_dtype(dtype)\n\n\ndef convert_to_dtype(tensor_or_dtype, dtype=None, dtype_hint=None):\n \"\"\"Get a dtype from a list/tensor/dtype using convert_to_tensor semantics.\"\"\"\n if tensor_or_dtype is None:\n return dtype or dtype_hint\n\n # Tensorflow dtypes need to be typechecked\n if tf.is_tensor(tensor_or_dtype):\n dt = base_dtype(tensor_or_dtype.dtype)\n elif isinstance(tensor_or_dtype, tf.DType):\n dt = base_dtype(tensor_or_dtype)\n # Numpy dtypes defer to dtype/dtype_hint\n elif isinstance(tensor_or_dtype, np.ndarray):\n dt = base_dtype(dtype or dtype_hint or tensor_or_dtype.dtype)\n elif np.issctype(tensor_or_dtype):\n dt = base_dtype(dtype or dtype_hint or tensor_or_dtype)\n else:\n # If this is a Python object, call `convert_to_tensor` and grab the dtype.\n # Note that this will add ops in graph-mode; we may want to consider\n # other ways to handle this case.\n dt = tf.convert_to_tensor(tensor_or_dtype, dtype, dtype_hint).dtype\n\n if not SKIP_DTYPE_CHECKS and dtype and not base_equal(dtype, dt):\n raise TypeError('Found incompatible dtypes, {} and {}.'.format(dtype, dt))\n return dt\n\n\ndef eps(dtype):\n \"\"\"Returns the distance between 1 and the next largest representable value.\"\"\"\n return np.finfo(as_numpy_dtype(dtype)).eps\n\n\ndef is_bool(dtype):\n \"\"\"Returns whether this is a boolean data type.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'is_bool'):\n return dtype.is_bool\n # We use `kind` because:\n # np.issubdtype(np.uint8, np.bool_) == True.\n return np.dtype(dtype).kind == 'b'\n\n\ndef is_complex(dtype):\n \"\"\"Returns whether this is a complex floating point type.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'is_complex'):\n return dtype.is_complex\n return np.issubdtype(np.dtype(dtype), np.complexfloating)\n\n\ndef is_floating(dtype):\n \"\"\"Returns whether this is a (non-quantized, real) floating point type.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'is_floating'):\n return dtype.is_floating\n return np.issubdtype(np.dtype(dtype), np.floating)\n\n\ndef is_integer(dtype):\n \"\"\"Returns whether this is a (non-quantized) integer type.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'is_integer') and not callable(dtype.is_integer):\n return dtype.is_integer\n return np.issubdtype(np.dtype(dtype), np.integer)\n\n\ndef max(dtype): # pylint: disable=redefined-builtin\n \"\"\"Returns the maximum representable value in this data type.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'max') and not callable(dtype.max):\n return dtype.max\n use_finfo = is_floating(dtype) or is_complex(dtype)\n return np.finfo(dtype).max if use_finfo else np.iinfo(dtype).max\n\n\ndef min(dtype): # pylint: disable=redefined-builtin\n \"\"\"Returns the minimum representable value in this data type.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'min') and not callable(dtype.min):\n return dtype.min\n use_finfo = is_floating(dtype) or is_complex(dtype)\n return np.finfo(dtype).min if use_finfo else np.iinfo(dtype).min\n\n\ndef name(dtype):\n \"\"\"Returns the string name for this `dtype`.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'name'):\n return dtype.name\n if hasattr(dtype, '__name__'):\n return dtype.__name__\n return str(dtype)\n\n\ndef size(dtype):\n \"\"\"Returns the number of bytes to represent this `dtype`.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'size') and hasattr(dtype, 'as_numpy_dtype'):\n return dtype.size\n return np.dtype(dtype).itemsize\n\n\ndef real_dtype(dtype):\n \"\"\"Returns the dtype of the real part.\"\"\"\n dtype = tf.as_dtype(dtype)\n if hasattr(dtype, 'real_dtype'):\n return dtype.real_dtype\n # TODO(jvdillon): Find a better way.\n return np.array(0, as_numpy_dtype(dtype)).real.dtype\n\n\ndef _assert_same_base_type(items, expected_type=None):\n r\"\"\"Asserts all items are of the same base type.\n\n Args:\n items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,\n `Operation`, or `IndexedSlices`). Can include `None` elements, which\n will be ignored.\n expected_type: Expected type. If not specified, assert all items are\n of the same base type.\n\n Returns:\n Validated type, or none if neither expected_type nor items provided.\n\n Raises:\n ValueError: If any types do not match.\n \"\"\"\n original_expected_type = expected_type\n mismatch = False\n for item in items:\n if item is not None:\n item_type = base_dtype(item.dtype)\n if expected_type is None:\n expected_type = item_type\n elif expected_type != item_type:\n mismatch = True\n break\n if mismatch:\n # Loop back through and build up an informative error message (this is very\n # slow, so we don't do it unless we found an error above).\n expected_type = original_expected_type\n original_item_str = None\n get_name = lambda x: x.name if hasattr(x, 'name') else str(x)\n for item in items:\n if item is not None:\n item_type = base_dtype(item.dtype)\n if not expected_type:\n expected_type = item_type\n original_item_str = get_name(item)\n elif expected_type != item_type:\n raise ValueError(\n '{}, type={}, must be of the same type ({}){}.'.format(\n get_name(item),\n item_type,\n expected_type,\n ((' as {}'.format(original_item_str))\n if original_item_str else '')))\n return expected_type # Should be unreachable\n else:\n return expected_type\n\n\ndef assert_same_float_dtype(tensors=None, dtype=None):\n \"\"\"Validate and return float type based on `tensors` and `dtype`.\n\n For ops such as matrix multiplication, inputs and weights must be of the\n same float type. This function validates that all `tensors` are the same type,\n validates that type is `dtype` (if supplied), and returns the type. Type must\n be a floating point type. If neither `tensors` nor `dtype` is supplied,\n the function will return `dtypes.float32`.\n\n Args:\n tensors: Tensors of input values. Can include `None` elements, which will\n be ignored.\n dtype: Expected type.\n\n Returns:\n Validated type.\n\n Raises:\n ValueError: if neither `tensors` nor `dtype` is supplied, or result is not\n float, or the common type of the inputs is not a floating point type.\n \"\"\"\n if tensors:\n dtype = _assert_same_base_type(tensors, dtype)\n if not dtype:\n dtype = tf.float32\n elif not is_floating(dtype):\n raise ValueError('Expected floating point type, got {}.'.format(dtype))\n return dtype\n",
"# Copyright 2021 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for layers.\"\"\"\n\nfrom absl.testing import parameterized\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import custom_gradient\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import test_util\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\njdlayers = tfp.experimental.joint_distribution_layers\nRoot = tfd.JointDistributionCoroutine.Root\n\n\n@test_util.test_all_tf_execution_regimes\nclass _JDLayersTestBase(test_util.TestCase):\n\n @test_util.numpy_disable_gradient_test\n def testBijectorIntegration(self):\n\n @tfd.JointDistributionCoroutine\n def bijector_model():\n\n bijectors = []\n for i in range(3):\n bijector_net = yield Root(\n jdlayers.Sequential(\n jdlayers.Affine(5, 5),\n tf.tanh,\n jdlayers.Affine(10, 5),\n jdlayers.Lambda(lambda x: tfb.Chain( # pylint: disable=g-long-lambda\n [tfb.Shift(x[..., :5]),\n tfb.Scale(log_scale=x[..., 5:])])),\n name=f'net{i}',\n ))\n\n bijectors.append(\n tfb.RealNVP(\n fraction_masked=0.5 * (-1)**i,\n bijector_fn=lambda x, _, bn=bijector_net: bn(x)))\n\n yield jdlayers.Lambda(lambda: tfb.Chain(bijectors))\n\n *params, _ = bijector_model.sample(\n 2, seed=test_util.test_seed(sampler_type='stateless'))\n\n def ldj_fn(params):\n bijector_fn = bijector_model.sample(\n value=params, seed=test_util.test_seed(sampler_type='stateless'))[-1]\n return bijector_fn().forward_log_det_jacobian(tf.ones([2, 10]), 1)\n\n ldj, (params_grad,) = tfp.math.value_and_gradient(ldj_fn, (params,))\n self.assertEqual([2], ldj.shape)\n self.assertAllAssertsNested(\n lambda x, g: self.assertTrue(x.shape[-1] == 0 or custom_gradient. # pylint: disable=g-long-lambda\n is_valid_gradient(g)),\n params,\n params_grad)\n\n @parameterized.named_parameters(\n ('Affine', lambda dtype: jdlayers.Affine(4, 3, dtype=dtype)),\n (\n 'Conv2D',\n lambda dtype: jdlayers.Conv2D(4, (3, 4), 3, dtype=dtype),\n True,\n ),\n ('Lambda', lambda dtype: jdlayers.Lambda(tf.nn.softplus, dtype=dtype)),\n (\n 'SequentialAffine',\n lambda dtype: jdlayers.Sequential( # pylint: disable=g-long-lambda\n jdlayers.Affine(2, 4, dtype=dtype),\n jdlayers.Lambda(tf.nn.softplus, dtype=dtype),\n ),\n ),\n (\n 'SequentialConv',\n lambda dtype: jdlayers.Sequential( # pylint: disable=g-long-lambda\n jdlayers.Conv2D(2, 3, 4, dtype=dtype),\n jdlayers.Lambda(tf.nn.softplus, dtype=dtype),\n ),\n True),\n )\n def testIsDistribution(self, dist_fn, has_conv=False):\n \"\"\"Instantiates a layer distribution and exercises its methods.\"\"\"\n dist = dist_fn(self.dtype)\n if has_conv and test_util.is_numpy_not_jax_mode():\n self.skipTest('tf.nn.conv not implemented in NumPy.')\n self.assertIsInstance(dist, tfd.Distribution)\n dtype = dist.dtype\n tf.nest.assert_same_structure(dtype, dist.batch_shape)\n tf.nest.assert_same_structure(dtype, dist.event_shape)\n tf.nest.assert_same_structure(dtype, dist.batch_shape_tensor())\n tf.nest.assert_same_structure(dtype, dist.event_shape_tensor())\n sample = dist.sample([3],\n seed=test_util.test_seed(sampler_type='stateless'))\n tf.nest.assert_same_structure(dtype, sample)\n\n # Make sure we can use bijectors too.\n bijector = dist.experimental_default_event_space_bijector()\n unconstrained_sample = bijector.inverse(sample)\n unconstrained_sample = tf.nest.map_structure(lambda x: x + 0.,\n unconstrained_sample)\n sample = bijector.forward(unconstrained_sample)\n\n lp = dist.log_prob(sample)\n expected_lp_shape = [3]\n self.assertEqual(expected_lp_shape, lp.shape)\n\n @parameterized.named_parameters(\n ('LayerBatch', [7], []),\n ('InputBatch', [], [7]),\n ('BothBatch', [7], [7]),\n )\n def testAffineBatching(self, layer_batch, input_batch):\n dist = jdlayers.Affine(4, 3, dtype=self.dtype)\n layer = dist.sample(\n layer_batch, seed=test_util.test_seed(sampler_type='stateless'))\n # Validate that we can map the layer.\n layer = tf.nest.map_structure(lambda x: x + 0., layer)\n x = tf.ones(input_batch + [3], dtype=self.dtype)\n y = layer(x)\n self.assertAllEqual(\n list(ps.broadcast_shape(layer_batch, input_batch)) + [4], y.shape)\n self.assertEqual(self.dtype, y.dtype)\n\n def testAffineCustomParamsDist(self):\n\n def params_model_fn(out_units, in_units, dtype):\n yield Root(\n tfd.LogNormal(\n tf.zeros([out_units, in_units], dtype), 1., name='weights'))\n yield Root(tfd.LogNormal(tf.zeros([out_units], dtype), 1., name='bias'))\n\n dist = jdlayers.Affine(\n 5, 4, dtype=self.dtype, params_model_fn=params_model_fn)\n layer = dist.sample(seed=test_util.test_seed(sampler_type='stateless'))\n x = tf.ones([4], self.dtype)\n y = layer(x)\n self.assertAllEqual([5], y.shape)\n self.assertEqual(self.dtype, y.dtype)\n # Since the parameters are log-normal, the outputs will be positive for\n # positive inputs.\n self.assertAllTrue(y > 0.)\n\n @test_util.disable_test_for_backend(\n disable_numpy=True, reason='tf.nn.conv not implemented in NumPy.')\n @parameterized.named_parameters(\n ('LayerBatch', [7], []),\n ('InputBatch', [], [7]),\n ('BothBatch', [7], [7]),\n )\n def testConv2DBatching(self, layer_batch, input_batch):\n dist = jdlayers.Conv2D(4, (3, 3), 3, dtype=self.dtype)\n layer = dist.sample(\n layer_batch, seed=test_util.test_seed(sampler_type='stateless'))\n # Validate that we can map the layer.\n layer = tf.nest.map_structure(lambda x: x + 0., layer)\n x = tf.ones(input_batch + [9, 5, 5, 3], dtype=self.dtype)\n y = layer(x)\n self.assertAllEqual(\n list(ps.broadcast_shape(layer_batch, input_batch)) + [9, 5, 5, 4],\n y.shape)\n self.assertEqual(self.dtype, y.dtype)\n\n @test_util.disable_test_for_backend(\n disable_numpy=True, reason='tf.nn.conv not implemented in NumPy.')\n @parameterized.named_parameters(\n ('SameNoStrides', 'SAME', [1, 1], [7, 11]),\n ('ValidNoStrides', 'VALID', [1, 1], [5, 9]),\n ('SameWithStrides', 'SAME', [2, 2], [4, 6]),\n ('ValidWithStrides', 'VALID', [2, 2], [3, 5]),\n )\n def testConv2DParams(self, padding, strides, expected_output_size):\n dist = jdlayers.Conv2D(\n 4, (3, 3), 3, strides=strides, padding=padding, dtype=self.dtype)\n layer = dist.sample(seed=test_util.test_seed(sampler_type='stateless'))\n # Validate that we can map the layer.\n layer = tf.nest.map_structure(lambda x: x + 0., layer)\n x = tf.ones([5, 7, 11, 3], dtype=self.dtype)\n y = layer(x)\n self.assertAllEqual([5] + expected_output_size + [4], y.shape)\n self.assertEqual(self.dtype, y.dtype)\n\n @test_util.disable_test_for_backend(\n disable_numpy=True, reason='tf.nn.conv not implemented in NumPy.')\n def testConvCustomParamsDist(self):\n\n def params_model_fn(out_channels, size, in_channels, dtype):\n yield Root(\n tfd.LogNormal(\n tf.zeros(list(size) + [in_channels, out_channels], dtype),\n 1.,\n name='kernel'))\n\n dist = jdlayers.Conv2D(\n 5, (3, 3), 4, dtype=self.dtype, params_model_fn=params_model_fn)\n layer = dist.sample(seed=test_util.test_seed(sampler_type='stateless'))\n x = tf.ones([3, 5, 7, 4], self.dtype)\n y = layer(x)\n self.assertAllEqual([3, 5, 7, 5], y.shape)\n self.assertEqual(self.dtype, y.dtype)\n # Since the parameters are log-normal, the outputs will be positive for\n # positive inputs.\n self.assertAllTrue(y > 0.)\n\n def testLambda(self):\n dist = jdlayers.Lambda(tf.square)\n layer = dist.sample(seed=test_util.test_seed(sampler_type='stateless'))\n self.assertAllClose(4, layer(2))\n\n def testSequential(self):\n dist = jdlayers.Sequential(\n jdlayers.Affine(5, 3),\n tf.nn.softplus,\n )\n layer = dist.sample(seed=test_util.test_seed(sampler_type='stateless'))\n x = tf.ones([4, 3])\n y = layer(x)\n self.assertAllEqual([4, 5], y.shape)\n self.assertAllTrue(y > 0.)\n self.assertAllEqual([], dist.log_prob(layer).shape)\n\n\nclass JDLayersTest32(_JDLayersTestBase):\n dtype = tf.float32\n\n\nclass JDLayersTest64(_JDLayersTestBase):\n dtype = tf.float64\n\n\ndel _JDLayersTestBase\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utility functions for `TensorShape`.\"\"\"\n\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.framework import ops # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.framework import tensor_shape # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.framework import tensor_util # pylint: disable=g-direct-tensorflow-import\n\n__all__ = [\n 'as_list',\n 'assert_has_rank',\n 'assert_is_compatible_with',\n 'concatenate',\n 'constant_value_as_shape',\n 'dims',\n 'is_compatible_with',\n 'is_fully_defined',\n 'merge_with',\n 'num_elements',\n 'rank',\n 'set_shape',\n 'with_rank',\n 'with_rank_at_least',\n]\n\nJAX_MODE = False\n\n\ndef as_list(x):\n \"\"\"Returns a `list` of integers or `None` for each dimension.\n\n For more details, see `help(tf.TensorShape.as_list)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n shape_as_list: list of `int` or `None` values representing each dimensions\n size if known.\n\n Raises:\n ValueError: If `x` has unknown rank.\n \"\"\"\n return tf.TensorShape(x).as_list()\n\n\ndef _cast_tensorshape(x, x_type):\n if issubclass(x_type, tf.TensorShape):\n return x\n if issubclass(x_type, np.ndarray):\n # np.ndarray default constructor will place x\n # as the shape, which we don't want.\n return np.array(as_list(x), dtype=np.int32)\n return x_type(as_list(x))\n\n\ndef assert_has_rank(x, rank): # pylint: disable=redefined-outer-name\n \"\"\"Raises an exception if `x` is not compatible with the given `rank`.\n\n For more details, see `help(tf.TensorShape.assert_has_rank)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n rank: an `int` representing the minimum required rank of `x`.\n\n Returns:\n None\n\n Raises:\n ValueError: If `x` does not represent a shape with the given `rank`.\n \"\"\"\n tf.TensorShape(x).assert_has_rank(rank)\n\n\ndef assert_is_compatible_with(x, other):\n \"\"\"Raises exception if `x` and `other` do not represent the same shape.\n\n This method can be used to assert that there exists a shape that both\n `x` and `other` represent.\n\n For more details, see `help(tf.TensorShape.assert_is_compatible_with)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n other: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n None\n\n Raises:\n ValueError: If `x` and `other` do not represent the same shape.\n \"\"\"\n tf.TensorShape(x).assert_is_compatible_with(other)\n\n\ndef concatenate(x, other):\n \"\"\"Returns the concatenation of the dimension in `x` and `other`.\n\n *Note:* If either `x` or `other` is completely unknown, concatenation will\n discard information about the other shape. In future, we might support\n concatenation that preserves this information for use with slicing.\n\n For more details, see `help(tf.TensorShape.concatenate)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n other: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n new_shape: an object like `x` whose elements are the concatenation of the\n dimensions in `x` and `other`.\n \"\"\"\n return _cast_tensorshape(tf.TensorShape(x).concatenate(other), type(x))\n\n\ndef constant_value_as_shape(tensor): # pylint: disable=invalid-name\n \"\"\"A version of `constant_value()` that returns a `TensorShape`.\n\n This version should be used when a constant tensor value is\n interpreted as a (possibly partial) shape, e.g. in the shape\n function for `tf.reshape()`. By explicitly requesting a\n `TensorShape` as the return value, it is possible to represent\n unknown dimensions; by contrast, `constant_value()` is\n all-or-nothing.\n\n Args:\n tensor: The rank-0 or rank-1 Tensor to be evaluated.\n\n Returns:\n A `TensorShape` based on the constant value of the given `tensor`.\n\n Raises:\n ValueError: If the shape is rank-0 and is not statically known to be -1.\n \"\"\"\n shape = tf.get_static_value(tensor)\n if shape is not None:\n return tensor_shape.as_shape(\n [None if dim == -1 else dim for dim in shape])\n try:\n # Importing here, conditionally, to avoid a hard dependency on\n # DeferredTensor, because that creates a BUILD dependency cycle.\n # Why is it necessary to mention DeferredTensor at all?\n # Because TF's `constant_value_as_shape` barfs on it: b/142254634.\n # NOTE: In the JAX/NumPy backends, DeferredTensor is not a class/type.\n # pylint: disable=g-import-not-at-top\n from tensorflow_probability.python.util.deferred_tensor import DeferredTensor\n if isinstance(DeferredTensor, type) and isinstance(tensor, DeferredTensor):\n # Presumably not constant if deferred\n return tf.TensorShape(None)\n except ImportError:\n # If DeferredTensor doesn't even exist, couldn't have been an instance of\n # it.\n pass\n if tf.executing_eagerly():\n # Working around b/142251799\n if hasattr(ops, 'EagerTensor') and isinstance(tensor, ops.EagerTensor):\n return tensor_shape.as_shape(\n [dim if dim != -1 else None for dim in tensor.numpy()])\n else:\n return tf.TensorShape(None)\n return tensor_util.constant_value_as_shape(tensor)\n\n\ndef dims(x):\n \"\"\"Returns a list of dimension sizes, or `None` if `rank` is unknown.\n\n For more details, see `help(tf.TensorShape.dims)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n shape_as_list: list of sizes or `None` values representing each\n dimensions size if known. A size is `tf.Dimension` if input is a\n `tf.TensorShape` and an `int` otherwise.\n \"\"\"\n if isinstance(x, tf.TensorShape):\n return x.dims\n r = tf.TensorShape(x).dims\n return None if r is None else list(map(tf.compat.dimension_value, r))\n\n\ndef is_compatible_with(x, other):\n \"\"\"Returns `True` iff `x` is compatible with `other`.\n\n For more details, see `help(tf.TensorShape.is_compatible_with)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n other: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n is_compatible: `bool` indicating of the shapes are compatible.\n \"\"\"\n return tf.TensorShape(x).is_compatible_with(other)\n\n\ndef is_fully_defined(x):\n \"\"\"Returns True iff `x` is fully defined in every dimension.\n\n For more details, see `help(tf.TensorShape.is_fully_defined)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n is_fully_defined: `bool` indicating that the shape is fully known.\n \"\"\"\n return tf.TensorShape(x).is_fully_defined()\n\n\ndef merge_with(x, other):\n \"\"\"Returns a shape combining the information in `x` and `other`.\n\n The dimensions in `x` and `other` are merged elementwise, according to the\n rules defined for `tf.Dimension.merge_with()`.\n\n For more details, see `help(tf.TensorShape.merge_with)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n other: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n merged_shape: shape having `type(x)` containing the combined information of\n `x` and `other`.\n\n Raises:\n ValueError: If `x` and `other` are not compatible.\n \"\"\"\n return _cast_tensorshape(tf.TensorShape(x).merge_with(other), type(x))\n\n\ndef num_elements(x):\n \"\"\"Returns the total number of elements, or `None` for incomplete shapes.\n\n For more details, see `help(tf.TensorShape.num_elements)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n\n Returns:\n num_elements: `int` representing the total number of elements implied by\n shape `x`.\n \"\"\"\n return tf.TensorShape(x).num_elements()\n\n\ndef rank(x):\n \"\"\"Returns the rank implied by this shape, or `None` if it is unspecified.\n\n For more details, see `help(tf.TensorShape.rank)`.\n\n Note: This is not the rank of the shape itself, viewed as a Tensor, which\n would always be 1; rather, it's the rank of every Tensor of the shape given by\n `x`.\n\n Args:\n x: object representing a shape; anything convertible to `tf.TensorShape`,\n or a `Tensor` (interpreted as an in-graph computed shape).\n\n Returns:\n rank: `int` representing the number of shape dimensions, or `None` if\n not statically known.\n \"\"\"\n return tf.TensorShape(x).rank\n\n\ndef set_shape(tensor, shape):\n \"\"\"Updates the shape of this tensor.\n\n This method can be called multiple times, and will merge the given\n `shape` with the current shape of this tensor. It can be used to\n provide additional information about the shape of this tensor that\n cannot be inferred from the graph alone. For example, this can be used\n to provide additional information about the shapes of images:\n\n ```python\n _, image_data = tf.TFRecordReader(...).read(...)\n image = tf.image.decode_png(image_data, channels=3)\n\n # The height and width dimensions of `image` are data dependent, and\n # cannot be computed without executing the op.\n print(image.shape)\n ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])\n\n # We know that each image in this dataset is 28 x 28 pixels.\n image.set_shape([28, 28, 3])\n print(image.shape)\n ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])\n ```\n\n NOTE: This shape is not enforced at runtime. Setting incorrect shapes can\n result in inconsistencies between the statically-known graph and the runtime\n value of tensors. For runtime validation of the shape, use `tf.ensure_shape`\n instead.\n\n Args:\n tensor: `Tensor` which will have its static shape set.\n shape: A `TensorShape` representing the shape of this tensor, a\n `TensorShapeProto`, a list, a tuple, or None.\n\n Raises:\n ValueError: If `shape` is not compatible with the current shape of\n this tensor.\n \"\"\"\n if hasattr(tensor, 'set_shape'):\n tensor.set_shape(shape)\n\n\ndef with_rank(x, rank): # pylint: disable=redefined-outer-name\n \"\"\"Returns a shape based on `x` with the given `rank`.\n\n This method promotes a completely unknown shape to one with a known rank.\n\n For more details, see `help(tf.TensorShape.with_rank)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n rank: An `int` representing the rank of `x`, or else an assertion is raised.\n\n Returns:\n shape: a shape having `type(x)` but guaranteed to have given rank (or else\n an assertion was raised).\n\n Raises:\n ValueError: If `x` does not represent a shape with the given `rank`.\n \"\"\"\n return _cast_tensorshape(tf.TensorShape(x).with_rank(rank), type(x))\n\n\ndef with_rank_at_least(x, rank): # pylint: disable=redefined-outer-name\n \"\"\"Returns a shape based on `x` with at least the given `rank`.\n\n For more details, see `help(tf.TensorShape.with_rank_at_least)`.\n\n Args:\n x: object representing a shape; convertible to `tf.TensorShape`.\n rank: An `int` representing the minimum rank of `x` or else an assertion is\n raised.\n\n Returns:\n shape: a shape having `type(x)` but guaranteed to have at least the given\n rank (or else an assertion was raised).\n\n Raises:\n ValueError: If `x` does not represent a shape with at least the given\n `rank`.\n \"\"\"\n return _cast_tensorshape(tf.TensorShape(x).with_rank_at_least(rank), type(x))\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n# Copyright 2019 OpenAI (http://openai.com).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Pixel CNN++ distribution class.\"\"\"\n\nimport functools\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.bijectors import shift\nfrom tensorflow_probability.python.distributions import categorical\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import independent\nfrom tensorflow_probability.python.distributions import logistic\nfrom tensorflow_probability.python.distributions import mixture_same_family\nfrom tensorflow_probability.python.distributions import quantized_distribution\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.layers import weight_norm\n\n\nclass PixelCNN(distribution.Distribution):\n \"\"\"The Pixel CNN++ distribution.\n\n Pixel CNN++ [(Salimans et al., 2017)][1] models a distribution over image\n data, parameterized by a neural network. It builds on Pixel CNN and\n Conditional Pixel CNN, as originally proposed by [(van den Oord et al.,\n 2016)][2, 3]. The model expresses the joint distribution over pixels as\n the product of conditional distributions:\n `p(x|h) = prod{ p(x[i] | x[0:i], h) : i=0, ..., d }`,\n in which `p(x[i] | x[0:i], h) : i=0, ..., d` is the\n probability of the `i`-th pixel conditional on the pixels that preceded it in\n raster order (color channels in RGB order, then left to right, then top to\n bottom). `h` is optional additional data on which to condition the image\n distribution, such as class labels or VAE embeddings. The Pixel CNN++\n network enforces the dependency structure among pixels by applying a mask to\n the kernels of the convolutional layers that ensures that the values for each\n pixel depend only on other pixels up and to the left (see\n `tfd.PixelCnnNetwork`).\n\n Pixel values are modeled with a mixture of quantized logistic distributions,\n which can take on a set of distinct integer values (e.g. between 0 and 255\n for an 8-bit image).\n\n Color intensity `v` of each pixel is modeled as:\n\n `v ~ sum{q[i] * quantized_logistic(loc[i], scale[i]) : i = 0, ..., k }`,\n\n in which `k` is the number of mixture components and the `q[i]` are the\n Categorical probabilities over the components.\n\n #### Sampling\n\n Pixels are sampled one at a time, in raster order. This enforces the\n autoregressive dependency structure, in which the sample of pixel `i` is\n conditioned on the samples of pixels `1, ..., i-1`. A single color image is\n sampled as follows:\n\n ```python\n samples = random_uniform([image_height, image_width, image_channels])\n for i in image_height:\n for j in image_width:\n component_logits, locs, scales, coeffs = pixel_cnn_network(samples)\n components = Categorical(component_logits).sample()\n locs = gather(locs, components)\n scales = gather(scales, components)\n\n coef_count = 0\n channel_samples = []\n for k in image_channels:\n loc = locs[k]\n for m in range(k):\n loc += channel_samples[m] * coeffs[coef_count]\n coef_count += 1\n channel_samp = Logistic(loc, scales[k]).sample()\n channel_samples.append(channel_samp)\n samples[i, j, :] = tf.stack(channel_samples, axis=-1)\n samples = round(samples)\n ```\n\n #### Examples\n\n ```python\n\n # Build a small Pixel CNN++ model to train on MNIST.\n\n import tensorflow as tf\n import tensorflow_datasets as tfds\n import tensorflow_probability as tfp\n\n tfd = tfp.distributions\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Load MNIST from tensorflow_datasets\n data = tfds.load('mnist')\n train_data, test_data = data['train'], data['test']\n\n def image_preprocess(x):\n x['image'] = tf.cast(x['image'], tf.float32)\n return (x['image'],) # (input, output) of the model\n\n batch_size = 16\n train_it = train_data.map(image_preprocess).batch(batch_size).shuffle(1000)\n\n image_shape = (28, 28, 1)\n # Define a Pixel CNN network\n dist = tfd.PixelCNN(\n image_shape=image_shape,\n num_resnet=1,\n num_hierarchies=2,\n num_filters=32,\n num_logistic_mix=5,\n dropout_p=.3,\n )\n\n # Define the model input\n image_input = tfkl.Input(shape=image_shape)\n\n # Define the log likelihood for the loss fn\n log_prob = dist.log_prob(image_input)\n\n # Define the model\n model = tfk.Model(inputs=image_input, outputs=log_prob)\n model.add_loss(-tf.reduce_mean(log_prob))\n\n # Compile and train the model\n model.compile(\n optimizer=tfk.optimizers.Adam(.001),\n metrics=[])\n\n model.fit(train_it, epochs=10, verbose=True)\n\n # sample five images from the trained model\n samples = dist.sample(5)\n\n ```\n\n To train a class-conditional model:\n\n ```python\n\n data = tfds.load('mnist')\n train_data, test_data = data['train'], data['test']\n\n def image_preprocess(x):\n x['image'] = tf.cast(x['image'], tf.float32)\n # return model (inputs, outputs): inputs are (image, label) and there are no\n # outputs\n return ((x['image'], x['label']),)\n\n batch_size = 16\n train_ds = train_data.map(image_preprocess).batch(batch_size).shuffle(1000)\n optimizer = tfk.optimizers.Adam()\n\n image_shape = (28, 28, 1)\n label_shape = ()\n dist = tfd.PixelCNN(\n image_shape=image_shape,\n conditional_shape=label_shape,\n num_resnet=1,\n num_hierarchies=2,\n num_filters=32,\n num_logistic_mix=5,\n dropout_p=.3,\n )\n\n image_input = tfkl.Input(shape=image_shape)\n label_input = tfkl.Input(shape=label_shape)\n\n log_prob = dist.log_prob(image_input, conditional_input=label_input)\n\n class_cond_model = tfk.Model(\n inputs=[image_input, label_input], outputs=log_prob)\n class_cond_model.add_loss(-tf.reduce_mean(log_prob))\n class_cond_model.compile(\n optimizer=tfk.optimizers.Adam(),\n metrics=[])\n class_cond_model.fit(train_ds, epochs=10)\n\n # Take 10 samples of the digit '5'\n samples = dist.sample(10, conditional_input=5.)\n\n # Take 4 samples each of the digits '1', '2', '3'.\n # Note that when a batch of conditional input is passed, the sample shape\n # (the first argument of `dist.sample`) must have its last dimension(s) equal\n # the batch shape of the conditional input (here, (3,)).\n samples = dist.sample((4, 3), conditional_input=[1., 2., 3.])\n\n ```\n\n Note: PixelCNN may also be trained using tfp.layers.DistributionLambda;\n however, as of this writing, that method is much slower and has the\n disadvantage of calling `sample()` upon construction, which causes the\n `PixelCnnNetwork` to be initialized with random data (if data-dependent\n initialization is used).\n\n #### References\n\n [1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma.\n PixelCNN++: Improving the PixelCNN with Discretized Logistic Mixture\n Likelihood and Other Modifications. In _International Conference on\n Learning Representations_, 2017.\n https://pdfs.semanticscholar.org/9e90/6792f67cbdda7b7777b69284a81044857656.pdf\n Additional details at https://github.com/openai/pixel-cnn\n\n [2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt,\n Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with\n PixelCNN Decoders. In _Neural Information Processing Systems_, 2016.\n https://arxiv.org/abs/1606.05328\n\n [3]: Aaron van den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel\n Recurrent Neural Networks. In _International Conference on Machine\n Learning_, 2016. https://arxiv.org/pdf/1601.06759.pdf\n \"\"\"\n\n def __init__(self,\n image_shape,\n conditional_shape=None,\n num_resnet=5,\n num_hierarchies=3,\n num_filters=160,\n num_logistic_mix=10,\n receptive_field_dims=(3, 3),\n dropout_p=0.5,\n resnet_activation='concat_elu',\n use_weight_norm=True,\n use_data_init=True,\n high=255,\n low=0,\n dtype=tf.float32,\n name='PixelCNN'):\n \"\"\"Construct Pixel CNN++ distribution.\n\n Args:\n image_shape: 3D `TensorShape` or tuple for the `[height, width, channels]`\n dimensions of the image.\n conditional_shape: `TensorShape` or tuple for the shape of the\n conditional input, or `None` if there is no conditional input.\n num_resnet: `int`, the number of layers (shown in Figure 2 of [2]) within\n each highest-level block of Figure 2 of [1].\n num_hierarchies: `int`, the number of hightest-level blocks (separated by\n expansions/contractions of dimensions in Figure 2 of [1].)\n num_filters: `int`, the number of convolutional filters.\n num_logistic_mix: `int`, number of components in the logistic mixture\n distribution.\n receptive_field_dims: `tuple`, height and width in pixels of the receptive\n field of the convolutional layers above and to the left of a given\n pixel. The width (second element of the tuple) should be odd. Figure 1\n (middle) of [2] shows a receptive field of (3, 5) (the row containing\n the current pixel is included in the height). The default of (3, 3) was\n used to produce the results in [1].\n dropout_p: `float`, the dropout probability. Should be between 0 and 1.\n resnet_activation: `string`, the type of activation to use in the resnet\n blocks. May be 'concat_elu', 'elu', or 'relu'.\n use_weight_norm: `bool`, if `True` then use weight normalization (works\n only in Eager mode).\n use_data_init: `bool`, if `True` then use data-dependent initialization\n (has no effect if `use_weight_norm` is `False`).\n high: `int`, the maximum value of the input data (255 for an 8-bit image).\n low: `int`, the minimum value of the input data.\n dtype: Data type of the `Distribution`.\n name: `string`, the name of the `Distribution`.\n \"\"\"\n\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n super(PixelCNN, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=False,\n allow_nan_stats=True,\n parameters=parameters,\n name=name)\n\n if not tensorshape_util.is_fully_defined(image_shape):\n raise ValueError('`image_shape` must be fully defined.')\n\n if (conditional_shape is not None and\n not tensorshape_util.is_fully_defined(conditional_shape)):\n raise ValueError('`conditional_shape` must be fully defined`')\n\n if tensorshape_util.rank(image_shape) != 3:\n raise ValueError('`image_shape` must have length 3, representing '\n '[height, width, channels] dimensions.')\n\n self._high = tf.cast(high, self.dtype)\n self._low = tf.cast(low, self.dtype)\n self._num_logistic_mix = num_logistic_mix\n self.network = _PixelCNNNetwork(\n dropout_p=dropout_p,\n num_resnet=num_resnet,\n num_hierarchies=num_hierarchies,\n num_filters=num_filters,\n num_logistic_mix=num_logistic_mix,\n receptive_field_dims=receptive_field_dims,\n resnet_activation=resnet_activation,\n use_weight_norm=use_weight_norm,\n use_data_init=use_data_init,\n dtype=dtype)\n\n image_shape = tensorshape_util.constant_value_as_shape(image_shape)\n conditional_shape = (None if conditional_shape is None\n else tensorshape_util.constant_value_as_shape(\n conditional_shape))\n\n image_input_shape = tensorshape_util.concatenate([None], image_shape)\n if conditional_shape is None:\n input_shape = image_input_shape\n else:\n conditional_input_shape = tensorshape_util.concatenate(\n [None], conditional_shape)\n input_shape = [image_input_shape, conditional_input_shape]\n\n self.image_shape = image_shape\n self.conditional_shape = conditional_shape\n self.network.build(input_shape)\n\n def _make_mixture_dist(self, component_logits, locs, scales):\n \"\"\"Builds a mixture of quantized logistic distributions.\n\n Args:\n component_logits: 4D `Tensor` of logits for the Categorical distribution\n over Quantized Logistic mixture components. Dimensions are `[batch_size,\n height, width, num_logistic_mix]`.\n locs: 4D `Tensor` of location parameters for the Quantized Logistic\n mixture components. Dimensions are `[batch_size, height, width,\n num_logistic_mix, num_channels]`.\n scales: 4D `Tensor` of location parameters for the Quantized Logistic\n mixture components. Dimensions are `[batch_size, height, width,\n num_logistic_mix, num_channels]`.\n\n Returns:\n dist: A quantized logistic mixture `tfp.distribution` over the input data.\n \"\"\"\n mixture_distribution = categorical.Categorical(logits=component_logits)\n\n # Convert distribution parameters for pixel values in\n # `[self._low, self._high]` for use with `QuantizedDistribution`\n locs = self._low + 0.5 * (self._high - self._low) * (locs + 1.)\n scales *= 0.5 * (self._high - self._low)\n logistic_dist = quantized_distribution.QuantizedDistribution(\n distribution=transformed_distribution.TransformedDistribution(\n distribution=logistic.Logistic(loc=locs, scale=scales),\n bijector=shift.Shift(shift=tf.cast(-0.5, self.dtype))),\n low=self._low, high=self._high)\n\n dist = mixture_same_family.MixtureSameFamily(\n mixture_distribution=mixture_distribution,\n components_distribution=independent.Independent(\n logistic_dist, reinterpreted_batch_ndims=1))\n return independent.Independent(dist, reinterpreted_batch_ndims=2)\n\n def _log_prob(self, value, conditional_input=None, training=None):\n \"\"\"Log probability function with optional conditional input.\n\n Calculates the log probability of a batch of data under the modeled\n distribution (or conditional distribution, if conditional input is\n provided).\n\n Args:\n value: `Tensor` or Numpy array of image data. May have leading batch\n dimension(s), which must broadcast to the leading batch dimensions of\n `conditional_input`.\n conditional_input: `Tensor` on which to condition the distribution (e.g.\n class labels), or `None`. May have leading batch dimension(s), which\n must broadcast to the leading batch dimensions of `value`.\n training: `bool` or `None`. If `bool`, it controls the dropout layer,\n where `True` implies dropout is active. If `None`, it defaults to\n `tf.keras.backend.learning_phase()`.\n Returns:\n log_prob_values: `Tensor`.\n \"\"\"\n # Determine the batch shape of the input images\n image_batch_shape = prefer_static.shape(value)[:-3]\n\n # Broadcast `value` and `conditional_input` to the same batch_shape\n if conditional_input is None:\n image_batch_and_conditional_shape = image_batch_shape\n else:\n conditional_input = tf.convert_to_tensor(conditional_input)\n conditional_input_shape = prefer_static.shape(conditional_input)\n conditional_batch_rank = (prefer_static.rank(conditional_input) -\n tensorshape_util.rank(self.conditional_shape))\n conditional_batch_shape = conditional_input_shape[:conditional_batch_rank]\n\n image_batch_and_conditional_shape = prefer_static.broadcast_shape(\n image_batch_shape, conditional_batch_shape)\n conditional_input = tf.broadcast_to(\n conditional_input,\n prefer_static.concat(\n [image_batch_and_conditional_shape, self.conditional_shape],\n axis=0))\n value = tf.broadcast_to(\n value,\n prefer_static.concat(\n [image_batch_and_conditional_shape, self.event_shape],\n axis=0))\n\n # Flatten batch dimension for input to Keras model\n conditional_input = tf.reshape(\n conditional_input,\n prefer_static.concat([(-1,), self.conditional_shape], axis=0))\n\n value = tf.reshape(\n value, prefer_static.concat([(-1,), self.event_shape], axis=0))\n\n transformed_value = (2. * (value - self._low) /\n (self._high - self._low)) - 1.\n inputs = (transformed_value if conditional_input is None\n else [transformed_value, conditional_input])\n\n params = self.network(inputs, training=training)\n\n num_channels = self.event_shape[-1]\n if num_channels == 1:\n component_logits, locs, scales = params\n else:\n # If there is more than one channel, we create a linear autoregressive\n # dependency among the location parameters of the channels of a single\n # pixel (the scale parameters within a pixel are independent). For a pixel\n # with R/G/B channels, the `r`, `g`, and `b` saturation values are\n # distributed as:\n #\n # r ~ Logistic(loc_r, scale_r)\n # g ~ Logistic(coef_rg * r + loc_g, scale_g)\n # b ~ Logistic(coef_rb * r + coef_gb * g + loc_b, scale_b)\n # TODO(emilyaf) Investigate using fill_triangular/matrix multiplication\n # on the coefficients instead of split/multiply/concat\n component_logits, locs, scales, coeffs = params\n num_coeffs = num_channels * (num_channels - 1) // 2\n loc_tensors = tf.split(locs, num_channels, axis=-1)\n coef_tensors = tf.split(coeffs, num_coeffs, axis=-1)\n channel_tensors = tf.split(transformed_value, num_channels, axis=-1)\n\n coef_count = 0\n for i in range(num_channels):\n channel_tensors[i] = channel_tensors[i][..., tf.newaxis, :]\n for j in range(i):\n loc_tensors[i] += channel_tensors[j] * coef_tensors[coef_count]\n coef_count += 1\n locs = tf.concat(loc_tensors, axis=-1)\n\n dist = self._make_mixture_dist(component_logits, locs, scales)\n return tf.reshape(dist.log_prob(value), image_batch_and_conditional_shape)\n\n def _sample_n(self, n, seed=None, conditional_input=None, training=False):\n \"\"\"Samples from the distribution, with optional conditional input.\n\n Args:\n n: `int`, number of samples desired.\n seed: PRNG seed; see `tfp.random.sanitize_seed` for details.\n conditional_input: `Tensor` on which to condition the distribution (e.g.\n class labels), or `None`.\n training: `bool` or `None`. If `bool`, it controls the dropout layer,\n where `True` implies dropout is active. If `None`, it defers to Keras'\n handling of train/eval status.\n Returns:\n samples: a `Tensor` of shape `[n, height, width, num_channels]`.\n \"\"\"\n if conditional_input is not None:\n conditional_input = tf.convert_to_tensor(\n conditional_input, dtype=self.dtype)\n conditional_event_rank = tensorshape_util.rank(self.conditional_shape)\n conditional_input_shape = prefer_static.shape(conditional_input)\n conditional_sample_rank = prefer_static.rank(\n conditional_input) - conditional_event_rank\n\n # If `conditional_input` has no sample dimensions, prepend a sample\n # dimension\n if conditional_sample_rank == 0:\n conditional_input = conditional_input[tf.newaxis, ...]\n conditional_sample_rank = 1\n\n # Assert that the conditional event shape in the `PixelCnnNetwork` is the\n # same as that implied by `conditional_input`.\n conditional_event_shape = conditional_input_shape[\n conditional_sample_rank:]\n with tf.control_dependencies([\n tf.assert_equal(self.conditional_shape, conditional_event_shape)]):\n\n conditional_sample_shape = conditional_input_shape[\n :conditional_sample_rank]\n repeat = n // prefer_static.reduce_prod(conditional_sample_shape)\n h = tf.reshape(\n conditional_input,\n prefer_static.concat([(-1,), self.conditional_shape], axis=0))\n h = tf.tile(h,\n prefer_static.pad(\n [repeat], paddings=[[0, conditional_event_rank]],\n constant_values=1))\n\n samples_0 = tf.random.uniform(\n prefer_static.concat([(n,), self.event_shape], axis=0),\n minval=-1., maxval=1., dtype=self.dtype, seed=seed)\n inputs = samples_0 if conditional_input is None else [samples_0, h]\n params_0 = self.network(inputs, training=training)\n samples_0 = self._sample_channels(*params_0, seed=seed)\n\n image_height, image_width, _ = tensorshape_util.as_list(self.event_shape)\n def loop_body(index, samples):\n \"\"\"Loop for iterative pixel sampling.\n\n Args:\n index: 0D `Tensor` of type `int32`. Index of the current pixel.\n samples: 4D `Tensor`. Images with pixels sampled in raster order, up to\n pixel `[index]`, with dimensions `[batch_size, height, width,\n num_channels]`.\n\n Returns:\n samples: 4D `Tensor`. Images with pixels sampled in raster order, up to\n and including pixel `[index]`, with dimensions `[batch_size, height,\n width, num_channels]`.\n \"\"\"\n inputs = samples if conditional_input is None else [samples, h]\n params = self.network(inputs, training=training)\n samples_new = self._sample_channels(*params, seed=seed)\n\n # Update the current pixel\n samples = tf.transpose(samples, [1, 2, 3, 0])\n samples_new = tf.transpose(samples_new, [1, 2, 3, 0])\n row, col = index // image_width, index % image_width\n updates = samples_new[row, col, ...][tf.newaxis, ...]\n samples = tf.tensor_scatter_nd_update(samples, [[row, col]], updates)\n samples = tf.transpose(samples, [3, 0, 1, 2])\n\n return index + 1, samples\n\n index0 = tf.zeros([], dtype=tf.int32)\n\n # Construct the while loop for sampling\n total_pixels = image_height * image_width\n loop_cond = lambda ind, _: tf.less(ind, total_pixels)\n init_vars = (index0, samples_0)\n _, samples = tf.while_loop(loop_cond, loop_body, init_vars,\n parallel_iterations=1)\n\n transformed_samples = (self._low +\n 0.5 * (self._high - self._low) * (samples + 1.))\n return tf.round(transformed_samples)\n\n def _sample_channels(\n self, component_logits, locs, scales, coeffs=None, seed=None):\n \"\"\"Sample a single pixel-iteration and apply channel conditioning.\n\n Args:\n component_logits: 4D `Tensor` of logits for the Categorical distribution\n over Quantized Logistic mixture components. Dimensions are `[batch_size,\n height, width, num_logistic_mix]`.\n locs: 4D `Tensor` of location parameters for the Quantized Logistic\n mixture components. Dimensions are `[batch_size, height, width,\n num_logistic_mix, num_channels]`.\n scales: 4D `Tensor` of location parameters for the Quantized Logistic\n mixture components. Dimensions are `[batch_size, height, width,\n num_logistic_mix, num_channels]`.\n coeffs: 4D `Tensor` of coefficients for the linear dependence among color\n channels, or `None` if there is only one channel. Dimensions are\n `[batch_size, height, width, num_logistic_mix, num_coeffs]`, where\n `num_coeffs = num_channels * (num_channels - 1) // 2`.\n seed: PRNG seed; see `tfp.random.sanitize_seed` for details.\n\n Returns:\n samples: 4D `Tensor` of sampled image data with autoregression among\n channels. Dimensions are `[batch_size, height, width, num_channels]`.\n \"\"\"\n num_channels = self.event_shape[-1]\n\n # sample mixture components once for the entire pixel\n component_dist = categorical.Categorical(logits=component_logits)\n mask = tf.one_hot(indices=component_dist.sample(seed=seed),\n depth=self._num_logistic_mix)\n mask = tf.cast(mask[..., tf.newaxis], self.dtype)\n\n # apply mixture component mask and separate out RGB parameters\n masked_locs = tf.reduce_sum(locs * mask, axis=-2)\n loc_tensors = tf.split(masked_locs, num_channels, axis=-1)\n masked_scales = tf.reduce_sum(scales * mask, axis=-2)\n scale_tensors = tf.split(masked_scales, num_channels, axis=-1)\n\n if coeffs is not None:\n num_coeffs = num_channels * (num_channels - 1) // 2\n masked_coeffs = tf.reduce_sum(coeffs * mask, axis=-2)\n coef_tensors = tf.split(masked_coeffs, num_coeffs, axis=-1)\n\n channel_samples = []\n coef_count = 0\n for i in range(num_channels):\n loc = loc_tensors[i]\n for c in channel_samples:\n loc += c * coef_tensors[coef_count]\n coef_count += 1\n\n logistic_samp = logistic.Logistic(\n loc=loc, scale=scale_tensors[i]).sample(seed=seed)\n logistic_samp = tf.clip_by_value(logistic_samp, -1., 1.)\n channel_samples.append(logistic_samp)\n\n return tf.concat(channel_samples, axis=-1)\n\n def _batch_shape(self):\n return tf.TensorShape([])\n\n def _event_shape(self):\n return tf.TensorShape(self.image_shape)\n\n\nclass _PixelCNNNetwork(tf.keras.layers.Layer):\n \"\"\"Keras `Layer` to parameterize a Pixel CNN++ distribution.\n\n This is a Keras implementation of the Pixel CNN++ network, as described in\n Salimans et al. (2017)[1] and van den Oord et al. (2016)[2].\n (https://github.com/openai/pixel-cnn).\n\n #### References\n\n [1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma.\n PixelCNN++: Improving the PixelCNN with Discretized Logistic Mixture\n Likelihood and Other Modifications. In _International Conference on\n Learning Representations_, 2017.\n https://pdfs.semanticscholar.org/9e90/6792f67cbdda7b7777b69284a81044857656.pdf\n Additional details at https://github.com/openai/pixel-cnn\n\n [2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt,\n Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with\n PixelCNN Decoders. In _30th Conference on Neural Information Processing\n Systems_, 2016.\n https://papers.nips.cc/paper/6527-conditional-image-generation-with-pixelcnn-decoders.pdf\n\n \"\"\"\n\n def __init__(\n self,\n dropout_p=0.5,\n num_resnet=5,\n num_hierarchies=3,\n num_filters=160,\n num_logistic_mix=10,\n receptive_field_dims=(3, 3),\n resnet_activation='concat_elu',\n use_weight_norm=True,\n use_data_init=True,\n dtype=tf.float32):\n \"\"\"Initialize the neural network for the Pixel CNN++ distribution.\n\n Args:\n dropout_p: `float`, the dropout probability. Should be between 0 and 1.\n num_resnet: `int`, the number of layers (shown in Figure 2 of [2]) within\n each highest-level block of Figure 2 of [1].\n num_hierarchies: `int`, the number of hightest-level blocks (separated by\n expansions/contractions of dimensions in Figure 2 of [1].)\n num_filters: `int`, the number of convolutional filters.\n num_logistic_mix: `int`, number of components in the logistic mixture\n distribution.\n receptive_field_dims: `tuple`, height and width in pixels of the receptive\n field of the convolutional layers above and to the left of a given\n pixel. The width (second element of the tuple) should be odd. Figure 1\n (middle) of [2] shows a receptive field of (3, 5) (the row containing\n the current pixel is included in the height). The default of (3, 3) was\n used to produce the results in [1].\n resnet_activation: `string`, the type of activation to use in the resnet\n blocks. May be 'concat_elu', 'elu', or 'relu'.\n use_weight_norm: `bool`, if `True` then use weight normalization.\n use_data_init: `bool`, if `True` then use data-dependent initialization\n (has no effect if `use_weight_norm` is `False`).\n dtype: Data type of the layer.\n \"\"\"\n super(_PixelCNNNetwork, self).__init__(dtype=dtype)\n self._dropout_p = dropout_p\n self._num_resnet = num_resnet\n self._num_hierarchies = num_hierarchies\n self._num_filters = num_filters\n self._num_logistic_mix = num_logistic_mix\n self._receptive_field_dims = receptive_field_dims\n self._resnet_activation = resnet_activation\n\n if use_weight_norm:\n def layer_wrapper(layer):\n def wrapped_layer(*args, **kwargs):\n return weight_norm.WeightNorm(\n layer(*args, **kwargs), data_init=use_data_init)\n return wrapped_layer\n self._layer_wrapper = layer_wrapper\n else:\n self._layer_wrapper = lambda layer: layer\n\n def build(self, input_shape):\n dtype = self.dtype\n if len(input_shape) == 2:\n batch_image_shape, batch_conditional_shape = input_shape\n conditional_input = tf.keras.layers.Input(\n shape=batch_conditional_shape[1:], dtype=dtype)\n else:\n batch_image_shape = input_shape\n conditional_input = None\n\n image_shape = batch_image_shape[1:]\n image_input = tf.keras.layers.Input(shape=image_shape, dtype=dtype)\n\n if self._resnet_activation == 'concat_elu':\n activation = tf.keras.layers.Lambda(\n lambda x: tf.nn.elu(tf.concat([x, -x], axis=-1)), dtype=dtype)\n else:\n activation = tf.keras.activations.get(self._resnet_activation)\n\n # Define layers with default inputs and layer wrapper applied\n Conv2D = functools.partial( # pylint:disable=invalid-name\n self._layer_wrapper(tf.keras.layers.Convolution2D),\n filters=self._num_filters,\n padding='same',\n dtype=dtype)\n\n Dense = functools.partial( # pylint:disable=invalid-name\n self._layer_wrapper(tf.keras.layers.Dense), dtype=dtype)\n\n Conv2DTranspose = functools.partial( # pylint:disable=invalid-name\n self._layer_wrapper(tf.keras.layers.Conv2DTranspose),\n filters=self._num_filters,\n padding='same',\n strides=(2, 2),\n dtype=dtype)\n\n rows, cols = self._receptive_field_dims\n\n # Define the dimensions of the valid (unmasked) areas of the layer kernels\n # for stride 1 convolutions in the internal layers.\n kernel_valid_dims = {'vertical': (rows - 1, cols),\n 'horizontal': (2, cols // 2 + 1)}\n\n # Define the size of the kernel necessary to center the current pixel\n # correctly for stride 1 convolutions in the internal layers.\n kernel_sizes = {'vertical': (2 * rows - 3, cols), 'horizontal': (3, cols)}\n\n # Make the kernel constraint functions for stride 1 convolutions in internal\n # layers.\n kernel_constraints = {\n k: _make_kernel_constraint(kernel_sizes[k], (0, v[0]), (0, v[1]))\n for k, v in kernel_valid_dims.items()}\n\n # Build the initial vertical stack/horizontal stack convolutional layers,\n # as shown in Figure 1 of [2]. The receptive field of the initial vertical\n # stack layer is a rectangular area centered above the current pixel.\n vertical_stack_init = Conv2D(\n kernel_size=(2 * rows - 1, cols),\n kernel_constraint=_make_kernel_constraint(\n (2 * rows - 1, cols), (0, rows - 1), (0, cols)))(image_input)\n\n # In Figure 1 [2], the receptive field of the horizontal stack is\n # illustrated as the pixels in the same row and to the left of the current\n # pixel. [1] increases the height of this receptive field from one pixel to\n # two (`horizontal_stack_left`) and additionally includes a subset of the\n # row of pixels centered above the current pixel (`horizontal_stack_up`).\n horizontal_stack_up = Conv2D(\n kernel_size=(3, cols),\n kernel_constraint=_make_kernel_constraint(\n (3, cols), (0, 1), (0, cols)))(image_input)\n\n horizontal_stack_left = Conv2D(\n kernel_size=(3, cols),\n kernel_constraint=_make_kernel_constraint(\n (3, cols), (0, 2), (0, cols // 2)))(image_input)\n\n horizontal_stack_init = tf.keras.layers.add(\n [horizontal_stack_up, horizontal_stack_left], dtype=dtype)\n\n layer_stacks = {\n 'vertical': [vertical_stack_init],\n 'horizontal': [horizontal_stack_init]}\n\n # Build the downward pass of the U-net (left-hand half of Figure 2 of [1]).\n # Each `i` iteration builds one of the highest-level blocks (identified as\n # 'Sequence of 6 layers' in the figure, consisting of `num_resnet=5` stride-\n # 1 layers, and one stride-2 layer that contracts the height/width\n # dimensions). The `_` iterations build the stride 1 layers. The layers of\n # the downward pass are stored in lists, since we'll later need them to make\n # skip-connections to layers in the upward pass of the U-net (the skip-\n # connections are represented by curved lines in Figure 2 [1]).\n for i in range(self._num_hierarchies):\n for _ in range(self._num_resnet):\n # Build a layer shown in Figure 2 of [2]. The 'vertical' iteration\n # builds the layers in the left half of the figure, and the 'horizontal'\n # iteration builds the layers in the right half.\n for stack in ['vertical', 'horizontal']:\n input_x = layer_stacks[stack][-1]\n x = activation(input_x)\n x = Conv2D(kernel_size=kernel_sizes[stack],\n kernel_constraint=kernel_constraints[stack])(x)\n\n # Add the vertical-stack layer to the horizontal-stack layer\n if stack == 'horizontal':\n h = activation(layer_stacks['vertical'][-1])\n h = Dense(self._num_filters)(h)\n x = tf.keras.layers.add([h, x], dtype=dtype)\n\n x = activation(x)\n x = tf.keras.layers.Dropout(self._dropout_p, dtype=dtype)(x)\n x = Conv2D(filters=2*self._num_filters,\n kernel_size=kernel_sizes[stack],\n kernel_constraint=kernel_constraints[stack])(x)\n\n if conditional_input is not None:\n h_projection = _build_and_apply_h_projection(\n conditional_input, self._num_filters, dtype=dtype)\n x = tf.keras.layers.add([x, h_projection], dtype=dtype)\n\n x = _apply_sigmoid_gating(x)\n\n # Add a residual connection from the layer's input.\n out = tf.keras.layers.add([input_x, x], dtype=dtype)\n layer_stacks[stack].append(out)\n\n if i < self._num_hierarchies - 1:\n # Build convolutional layers that contract the height/width dimensions\n # on the downward pass between each set of layers (e.g. contracting from\n # 32x32 to 16x16 in Figure 2 of [1]).\n for stack in ['vertical', 'horizontal']:\n # Define kernel dimensions/masking to maintain the autoregressive\n # property.\n x = layer_stacks[stack][-1]\n h, w = kernel_valid_dims[stack]\n kernel_height = 2 * h\n if stack == 'vertical':\n kernel_width = w + 1\n else:\n kernel_width = 2 * w\n\n kernel_size = (kernel_height, kernel_width)\n kernel_constraint = _make_kernel_constraint(\n kernel_size, (0, h), (0, w))\n x = Conv2D(strides=(2, 2), kernel_size=kernel_size,\n kernel_constraint=kernel_constraint)(x)\n layer_stacks[stack].append(x)\n\n # Upward pass of the U-net (right-hand half of Figure 2 of [1]). We stored\n # the layers of the downward pass in a list, in order to access them to make\n # skip-connections to the upward pass. For the upward pass, we need to keep\n # track of only the current layer, so we maintain a reference to the\n # current layer of the horizontal/vertical stack in the `upward_pass` dict.\n # The upward pass begins with the last layer of the downward pass.\n upward_pass = {key: stack.pop() for key, stack in layer_stacks.items()}\n\n # As with the downward pass, each `i` iteration builds a highest level block\n # in Figure 2 [1], and the `_` iterations build individual layers within the\n # block.\n for i in range(self._num_hierarchies):\n num_resnet = self._num_resnet if i == 0 else self._num_resnet + 1\n\n for _ in range(num_resnet):\n # Build a layer as shown in Figure 2 of [2], with a skip-connection\n # from the symmetric layer in the downward pass.\n for stack in ['vertical', 'horizontal']:\n input_x = upward_pass[stack]\n x_symmetric = layer_stacks[stack].pop()\n\n x = activation(input_x)\n x = Conv2D(kernel_size=kernel_sizes[stack],\n kernel_constraint=kernel_constraints[stack])(x)\n\n # Include the vertical-stack layer of the upward pass in the layers\n # to be added to the horizontal layer.\n if stack == 'horizontal':\n x_symmetric = tf.keras.layers.Concatenate(axis=-1, dtype=dtype)(\n [upward_pass['vertical'], x_symmetric])\n\n # Add a skip-connection from the symmetric layer in the downward\n # pass to the layer `x` in the upward pass.\n h = activation(x_symmetric)\n h = Dense(self._num_filters)(h)\n x = tf.keras.layers.add([h, x], dtype=dtype)\n\n x = activation(x)\n x = tf.keras.layers.Dropout(self._dropout_p, dtype=dtype)(x)\n x = Conv2D(filters=2*self._num_filters,\n kernel_size=kernel_sizes[stack],\n kernel_constraint=kernel_constraints[stack])(x)\n\n if conditional_input is not None:\n h_projection = _build_and_apply_h_projection(\n conditional_input, self._num_filters, dtype=dtype)\n x = tf.keras.layers.add([x, h_projection], dtype=dtype)\n\n x = _apply_sigmoid_gating(x)\n upward_pass[stack] = tf.keras.layers.add([input_x, x], dtype=dtype)\n\n # Define deconvolutional layers that expand height/width dimensions on the\n # upward pass (e.g. expanding from 8x8 to 16x16 in Figure 2 of [1]), with\n # the correct kernel dimensions/masking to maintain the autoregressive\n # property.\n if i < self._num_hierarchies - 1:\n for stack in ['vertical', 'horizontal']:\n h, w = kernel_valid_dims[stack]\n kernel_height = 2 * h - 2\n if stack == 'vertical':\n kernel_width = w + 1\n kernel_constraint = _make_kernel_constraint(\n (kernel_height, kernel_width), (h - 2, kernel_height), (0, w))\n else:\n kernel_width = 2 * w - 2\n kernel_constraint = _make_kernel_constraint(\n (kernel_height, kernel_width), (h - 2, kernel_height),\n (w - 2, kernel_width))\n\n x = upward_pass[stack]\n x = Conv2DTranspose(kernel_size=(kernel_height, kernel_width),\n kernel_constraint=kernel_constraint)(x)\n upward_pass[stack] = x\n\n x_out = tf.keras.layers.ELU(dtype=dtype)(upward_pass['horizontal'])\n\n # Build final Dense/Reshape layers to output the correct number of\n # parameters per pixel.\n num_channels = tensorshape_util.as_list(image_shape)[-1]\n num_coeffs = num_channels * (num_channels - 1) // 2\n num_out = num_channels * 2 + num_coeffs + 1\n num_out_total = num_out * self._num_logistic_mix\n params = Dense(num_out_total)(x_out)\n params = tf.reshape(params, prefer_static.concat(\n [[-1], image_shape[:-1], [self._num_logistic_mix, num_out]], axis=0))\n\n # If there is one color channel, split the parameters into a list of three\n # output `Tensor`s: (1) component logits for the Quantized Logistic mixture\n # distribution, (2) location parameters for each component, and (3) scale\n # parameters for each component. If there is more than one color channel,\n # return a fourth `Tensor` for the coefficients for the linear dependence\n # among color channels.\n splits = (3 if num_channels == 1\n else [1, num_channels, num_channels, num_coeffs])\n outputs = tf.split(params, splits, axis=-1)\n\n # Squeeze singleton dimension from component logits\n outputs[0] = tf.squeeze(outputs[0], axis=-1)\n\n # Ensure scales are positive and do not collapse to near-zero\n outputs[2] = tf.nn.softplus(outputs[2]) + tf.cast(tf.exp(-7.), self.dtype)\n\n inputs = (image_input if conditional_input is None\n else [image_input, conditional_input])\n self._network = tf.keras.Model(inputs=inputs, outputs=outputs)\n super(_PixelCNNNetwork, self).build(input_shape)\n\n def call(self, inputs, training=None):\n \"\"\"Call the Pixel CNN network model.\n\n Args:\n inputs: 4D `Tensor` of image data with dimensions [batch size, height,\n width, channels] or a 2-element `list`. If `list`, the first element is\n the 4D image `Tensor` and the second element is a `Tensor` with\n conditional input data (e.g. VAE encodings or class labels) with the\n same leading batch dimension as the image `Tensor`.\n training: `bool` or `None`. If `bool`, it controls the dropout layer,\n where `True` implies dropout is active. If `None`, it it defaults to\n `tf.keras.backend.learning_phase()`\n\n Returns:\n outputs: a 3- or 4-element `list` of `Tensor`s in the following order:\n component_logits: 4D `Tensor` of logits for the Categorical distribution\n over Quantized Logistic mixture components. Dimensions are\n `[batch_size, height, width, num_logistic_mix]`.\n locs: 4D `Tensor` of location parameters for the Quantized Logistic\n mixture components. Dimensions are `[batch_size, height, width,\n num_logistic_mix, num_channels]`.\n scales: 4D `Tensor` of location parameters for the Quantized Logistic\n mixture components. Dimensions are `[batch_size, height, width,\n num_logistic_mix, num_channels]`.\n coeffs: 4D `Tensor` of coefficients for the linear dependence among\n color channels, included only if the image has more than one channel.\n Dimensions are `[batch_size, height, width, num_logistic_mix,\n num_coeffs]`, where\n `num_coeffs = num_channels * (num_channels - 1) // 2`.\n \"\"\"\n return self._network(inputs, training=training)\n\n\ndef _make_kernel_constraint(kernel_size, valid_rows, valid_columns):\n \"\"\"Make the masking function for layer kernels.\"\"\"\n mask = np.zeros(kernel_size)\n lower, upper = valid_rows\n left, right = valid_columns\n mask[lower:upper, left:right] = 1.\n mask = mask[:, :, np.newaxis, np.newaxis]\n return lambda x: x * mask\n\n\ndef _build_and_apply_h_projection(h, num_filters, dtype):\n \"\"\"Project the conditional input.\"\"\"\n h = tf.keras.layers.Flatten(dtype=dtype)(h)\n h_projection = tf.keras.layers.Dense(\n 2*num_filters, kernel_initializer='random_normal', dtype=dtype)(h)\n return h_projection[..., tf.newaxis, tf.newaxis, :]\n\n\ndef _apply_sigmoid_gating(x):\n \"\"\"Apply the sigmoid gating in Figure 2 of [2].\"\"\"\n activation_tensor, gate_tensor = tf.split(x, 2, axis=-1)\n sigmoid_gate = tf.sigmoid(gate_tensor)\n return tf.keras.layers.multiply(\n [sigmoid_gate, activation_tensor], dtype=x.dtype)\n\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Parameter estimation by iterated filtering.\"\"\"\n\nimport collections\nimport contextlib\nimport functools\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.bijectors import invert\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import independent\nfrom tensorflow_probability.python.distributions import joint_distribution_named\nfrom tensorflow_probability.python.distributions import joint_distribution_sequential\nfrom tensorflow_probability.python.distributions import normal\nfrom tensorflow_probability.python.distributions import transformed_distribution\n\nfrom tensorflow_probability.python.experimental.mcmc import infer_trajectories\n\nfrom tensorflow_probability.python.internal import distribution_util as dist_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n\n__all__ = [\n 'geometric_cooling_schedule',\n 'IteratedFilter'\n]\n\nJAX_MODE = False # Overwritten by rewrite script.\nNUMPY_MODE = False\n\n\n# Utility to avoid breakage when passed-in structures are mutated externally.\n_copy_structure = lambda struct: tf.nest.map_structure(lambda x: x, struct)\n\n\nParametersAndState = collections.namedtuple('ParametersAndState',\n ['unconstrained_parameters',\n 'state'])\n\n\ndef geometric_cooling_schedule(cooling_fraction_per_k_iterations, k=1.):\n \"\"\"Defines a cooling schedule following a geometric sequence.\n\n This returns a function `f` such that\n\n ```python\n f(iteration) = cooling_fraction_per_k_iterations**(iteration / k)\n ```\n\n Args:\n cooling_fraction_per_k_iterations: float `Tensor` ratio by which the\n original value should be scaled once `k` iterations have been completed.\n k: int `Tensor` number of iterations used to define the schedule.\n Returns:\n f: Python `callable` representing the cooling schedule.\n \"\"\"\n cooling_fraction_per_k_iterations = tf.convert_to_tensor(\n cooling_fraction_per_k_iterations,\n dtype_hint=tf.float32,\n name='cooling_fraction_per_k_iterations')\n dtype = cooling_fraction_per_k_iterations.dtype\n k = tf.cast(k, dtype=dtype, name='k')\n\n def f(iteration):\n iteration = tf.cast(iteration, dtype=dtype, name='iteration')\n return cooling_fraction_per_k_iterations ** (iteration / k)\n return f\n\n\nclass DeterministicEmpirical(distribution.Distribution):\n \"\"\"Dummy 'proposal' distribution that just returns samples we pass in.\"\"\"\n\n def __init__(self, values_with_sample_dim, batch_ndims=0, validate_args=False,\n name=None):\n \"\"\"Initializes an empirical distribution with a list of samples.\n\n Args:\n values_with_sample_dim: nested structure of `Tensor`s, each of shape\n prefixed by `[num_samples, B1, ..., Bn]`, where `num_samples` as well as\n `B1, ..., Bn` are batch dimensions shared across all `Tensor`s.\n batch_ndims: optional scalar int `Tensor`, or structure matching\n `values_with_sample_dim` of scalar int `Tensor`s, specifying the number\n of batch dimensions. Used to determine the batch and event shapes of the\n distribution.\n Default value: `0`.\n validate_args: Python `bool` indicating whether to perform runtime checks\n that may have performance cost.\n Default value: `False`.\n name: Python `str` name for ops created by this distribution.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name or 'DeterministicEmpirical') as name:\n\n # Ensure we don't break if the passed-in structures are externally\n # mutated.\n values_with_sample_dim = _copy_structure(values_with_sample_dim)\n batch_ndims = _copy_structure(batch_ndims)\n\n # Prevent tf.Module from wrapping passed-in values, because the\n # wrapper breaks JointDistributionNamed (and maybe other JDs). Instead, we\n # save a separate ref to the input that is used only by tf.Module\n # tracking.\n self._values_for_tracking = values_with_sample_dim\n self._values_with_sample_dim = self._no_dependency(values_with_sample_dim)\n\n if not tf.nest.is_nested(batch_ndims):\n batch_ndims = tf.nest.map_structure(\n lambda _: batch_ndims, values_with_sample_dim)\n self._batch_ndims = batch_ndims\n\n self._max_num_samples = ps.reduce_min(\n [ps.size0(x) for x in tf.nest.flatten(values_with_sample_dim)])\n\n super(DeterministicEmpirical, self).__init__(\n dtype=tf.nest.map_structure(\n lambda x: x.dtype, self.values_with_sample_dim),\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=True,\n name=name)\n self._parameters = self._no_dependency(parameters)\n\n @property\n def batch_ndims(self):\n return _copy_structure(self._batch_ndims)\n\n @property\n def max_num_samples(self):\n return self._max_num_samples\n\n @property\n def values_with_sample_dim(self):\n return _copy_structure(self._values_with_sample_dim)\n\n def _event_shape(self):\n return tf.nest.map_structure(\n lambda x, nd: tf.TensorShape(x.shape[1 + nd:]),\n self.values_with_sample_dim,\n self.batch_ndims)\n\n def _event_shape_tensor(self):\n return tf.nest.map_structure(\n lambda x, nd: tf.shape(x)[1 + nd:],\n self.values_with_sample_dim,\n self.batch_ndims)\n\n def _batch_shape(self):\n return tf.nest.map_structure(\n lambda x, nd: tf.TensorShape(x.shape[1 : 1 + nd]),\n self.values_with_sample_dim,\n self.batch_ndims)\n\n def _batch_shape_tensor(self):\n return tf.nest.map_structure(\n lambda x, nd: tf.shape(x)[1 : 1 + nd],\n self.values_with_sample_dim,\n self.batch_ndims)\n\n # TODO(b/152797117): Override _sample_n, once it supports joint distributions.\n def sample(self, sample_shape=(), seed=None, name=None):\n with tf.name_scope(name or 'sample'):\n # Grab the required number of values from the provided tensors.\n sample_shape = dist_util.expand_to_vector(sample_shape)\n n = ps.cast(ps.reduce_prod(sample_shape), dtype=tf.int32)\n\n # Check that we're not trying to draw too many samples.\n assertions = []\n will_overflow_ = tf.get_static_value(n > self.max_num_samples)\n if will_overflow_:\n raise ValueError('Trying to draw {} samples from a '\n '`DeterministicEmpirical` instance for which only {} '\n 'samples were provided.'.format(\n tf.get_static_value(n),\n tf.get_static_value(self.max_num_samples)))\n elif (will_overflow_ is None # Couldn't determine statically.\n and self.validate_args):\n assertions.append(\n tf.debugging.assert_less_equal(\n n, self.max_num_samples, message='Number of samples to draw '\n 'from a `DeterministicEmpirical` instance must not exceed the '\n 'number provided at construction.'))\n\n # Extract the appropriate number of sampled values.\n with tf.control_dependencies(assertions):\n sampled = tf.nest.map_structure(\n lambda x: x[:n, ...], self.values_with_sample_dim)\n\n # Reshape the values to the appropriate sample shape.\n return tf.nest.map_structure(\n lambda x: tf.reshape(x, # pylint: disable=g-long-lambda\n ps.concat([ps.cast(sample_shape, tf.int32),\n ps.cast(ps.shape(x)[1:], tf.int32)],\n axis=0)),\n sampled)\n\n def _prob(self, x):\n flat_values = tf.nest.flatten(self.values_with_sample_dim)\n return tf.cast(\n tf.reduce_all([\n tf.equal(a, b[:ps.size0(a)])\n for (a, b) in zip(tf.nest.flatten(x), flat_values)]),\n dtype=flat_values[0].dtype)\n\n\ndef _maybe_build_joint_distribution(structure_of_distributions):\n \"\"\"Turns a (potentially nested) structure of dists into a single dist.\"\"\"\n # Base case: if we already have a Distribution, return it.\n if dist_util.is_distribution_instance(structure_of_distributions):\n return structure_of_distributions\n\n # Otherwise, recursively convert all interior nested structures into JDs.\n outer_structure = tf.nest.map_structure(\n _maybe_build_joint_distribution,\n structure_of_distributions)\n if (hasattr(outer_structure, '_asdict') or\n isinstance(outer_structure, collections.abc.Mapping)):\n return joint_distribution_named.JointDistributionNamed(outer_structure)\n else:\n return joint_distribution_sequential.JointDistributionSequential(\n outer_structure)\n\n\ndef augment_transition_fn_with_parameters(parameter_prior,\n parameterized_transition_fn,\n parameter_constraining_bijector):\n \"\"\"Wraps a transition fn on states to act on `ParametersAndState` tuples.\"\"\"\n\n def params_and_state_transition_fn(step,\n params_and_state,\n perturbation_scale,\n **kwargs):\n \"\"\"Transition function operating on a `ParamsAndState` namedtuple.\"\"\"\n # Extract the state, to pass through to the observation fn.\n unconstrained_params, state = params_and_state\n if 'state_history' in kwargs:\n kwargs['state_history'] = kwargs['state_history'].state\n\n # Perturb each (unconstrained) parameter with normally-distributed noise.\n if not tf.nest.is_nested(perturbation_scale):\n perturbation_scale = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(perturbation_scale, # pylint: disable=g-long-lambda\n name='perturbation_scale',\n dtype=x.dtype),\n unconstrained_params)\n perturbed_unconstrained_parameter_dists = tf.nest.map_structure(\n lambda x, p, s: independent.Independent( # pylint: disable=g-long-lambda\n normal.Normal(loc=x, scale=p),\n reinterpreted_batch_ndims=ps.rank_from_shape(s)),\n unconstrained_params,\n perturbation_scale,\n parameter_prior.event_shape_tensor())\n\n # For the joint transition, pass the perturbed parameters\n # into the original transition fn (after pushing them into constrained\n # space).\n return joint_distribution_named.JointDistributionNamed(\n ParametersAndState(\n unconstrained_parameters=_maybe_build_joint_distribution(\n perturbed_unconstrained_parameter_dists),\n state=lambda unconstrained_parameters: ( # pylint: disable=g-long-lambda\n parameterized_transition_fn(\n step,\n state,\n parameters=parameter_constraining_bijector.forward(\n unconstrained_parameters),\n **kwargs))))\n\n return params_and_state_transition_fn\n\n\ndef augment_observation_fn_with_parameters(parameterized_observation_fn,\n parameter_constraining_bijector):\n \"\"\"Augments an observation fn to take `ParametersAndState` namedtuples.\"\"\"\n\n def observation_from_params_and_state_fn(step,\n params_and_state,\n **kwargs):\n # Extract the state, to pass through to the observation fn.\n unconstrained_parameters, state = params_and_state\n if 'state_history' in kwargs:\n _, kwargs['state_history'] = kwargs['state_history']\n\n return parameterized_observation_fn(\n step,\n state,\n parameters=parameter_constraining_bijector.forward(\n unconstrained_parameters),\n **kwargs)\n\n return observation_from_params_and_state_fn\n\n\ndef joint_prior_on_parameters_and_state(parameter_prior,\n parameterized_initial_state_prior_fn,\n parameter_constraining_bijector,\n prior_is_constrained=True):\n \"\"\"Constructs a joint dist. from p(parameters) and p(state | parameters).\"\"\"\n if prior_is_constrained:\n parameter_prior = transformed_distribution.TransformedDistribution(\n parameter_prior,\n invert.Invert(parameter_constraining_bijector),\n name='unconstrained_parameter_prior')\n\n return joint_distribution_named.JointDistributionNamed(\n ParametersAndState(\n unconstrained_parameters=parameter_prior,\n state=lambda unconstrained_parameters: ( # pylint: disable=g-long-lambda\n parameterized_initial_state_prior_fn(\n parameter_constraining_bijector.forward(\n unconstrained_parameters)))))\n\n\nclass IteratedFilter(object):\n \"\"\"A model augmented with parameter perturbations for iterated filtering.\"\"\"\n\n def __init__(self,\n parameter_prior,\n parameterized_initial_state_prior_fn,\n parameterized_transition_fn,\n parameterized_observation_fn,\n parameterized_initial_state_proposal_fn=None,\n parameterized_proposal_fn=None,\n parameter_constraining_bijector=None,\n name=None):\n \"\"\"Builds an iterated filter for parameter estimation in sequential models.\n\n Iterated filtering is a parameter estimation method in which parameters\n are included in an augmented state space, with dynamics that introduce\n parameter perturbations, and a filtering\n algorithm such as particle filtering is run several times with perturbations\n of decreasing size. This class implements the IF2 algorithm of\n [Ionides et al., 2015][1], for which, under appropriate conditions\n (including a uniform prior) the final parameter distribution approaches a\n point mass at the maximum likelihood estimate. If a non-uniform prior is\n provided, the final parameter distribution will (under appropriate\n conditions) approach a point mass at the maximum a posteriori (MAP) value.\n\n This class augments the state space of a sequential model to include\n parameter perturbations, and provides utilities to run particle filtering\n on that augmented model. Alternately, the augmented components may be passed\n directly into a filtering algorithm of the user's choice.\n\n Args:\n parameter_prior: prior `tfd.Distribution` over parameters (may be a joint\n distribution).\n parameterized_initial_state_prior_fn: `callable` with signature\n `initial_state_prior = parameterized_initial_state_prior_fn(parameters)`\n where `parameters` has the form of a sample from `parameter_prior`,\n and `initial_state_prior` is a distribution over the initial state.\n parameterized_transition_fn: `callable` with signature\n `next_state_dist = parameterized_transition_fn(\n step, state, parameters, **kwargs)`.\n parameterized_observation_fn: `callable` with signature\n `observation_dist = parameterized_observation_fn(\n step, state, parameters, **kwargs)`.\n parameterized_initial_state_proposal_fn: optional `callable` with\n signature `initial_state_proposal =\n parameterized_initial_state_proposal_fn(parameters)` where `parameters`\n has the form of a sample from `parameter_prior`, and\n `initial_state_proposal` is a distribution over the initial state.\n parameterized_proposal_fn: optional `callable` with signature\n `next_state_dist = parameterized_transition_fn(\n step, state, parameters, **kwargs)`.\n Default value: `None`.\n parameter_constraining_bijector: optional `tfb.Bijector` instance\n such that `parameter_constraining_bijector.forward(x)` returns valid\n parameters for any real-valued `x` of the same structure and shape\n as `parameters`. If `None`, the default bijector of the provided\n `parameter_prior` will be used.\n Default value: `None`.\n name: `str` name for ops constructed by this object.\n Default value: `iterated_filter`.\n\n #### Example\n\n We'll walk through applying iterated filtering to a toy\n Susceptible-Infected-Recovered (SIR) model, a [compartmental model](\n https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SIR_model)\n of infectious disease. Note that the model we use here is extremely\n simplified and is intended as a pedagogical example; it should not be\n interpreted to describe disease spread in the real world.\n\n We begin by specifying a prior distribution over the parameters to be\n inferred, thus defining the structure of the parameter space and the support\n of the parameters (which will imply a default constraining bijector). Here\n we'll use uniform priors over ranges that we expect to contain the\n parameters:\n\n ```python\n parameter_prior = tfd.JointDistributionNamed({\n 'infection_rate': tfd.Uniform(low=0., high=3.),\n 'recovery_rate': tfd.Uniform(low=0., high=3.),\n })\n ```\n\n The model specification itself is identical to that used by\n `tfp.experimental.mcmc.infer_trajectories`, except that each component\n accepts an additional `parameters` keyword argument. We start by specifying\n a parameterized prior on initial states. In this case, our state\n includes the current number of susceptible and infected individuals\n (the third compartment, recovered individuals, is implicitly defined\n to include the remaining population). We'll also include, as auxiliary\n variables, the daily counts of new infections and new recoveries; these\n will help ensure that people shift consistently across compartments.\n\n ```python\n population_size = 1000\n initial_state_prior_fn = lambda parameters: tfd.JointDistributionNamed({\n 'new_infections': tfd.Poisson(parameters['infection_rate']),\n 'new_recoveries': tfd.Deterministic(\n tf.broadcast_to(0., tf.shape(parameters['recovery_rate']))),\n 'susceptible': (lambda new_infections:\n tfd.Deterministic(population_size - new_infections)),\n 'infected': (lambda new_infections:\n tfd.Deterministic(new_infections))})\n ```\n\n **Note**: the state prior must have the same batch shape as the\n passed-in parameters; equivalently, it must sample a full state for each\n parameter particle. If any part of the state prior does not depend\n on the parameters, you must manually ensure that it has the appropriate\n batch shape. For example, in the definition of `new_recoveries` above,\n applying `broadcast_to` with the shape of a parameter ensures that\n the batch shape is maintained.\n\n Next, we specify a transition model. This takes the state at the\n previous day, along with parameters, and returns a distribution\n over the state for the current day.\n\n ```python\n def parameterized_infection_dynamics(_, previous_state, parameters):\n new_infections = tfd.Poisson(\n parameters['infection_rate'] * previous_state['infected'] *\n previous_state['susceptible'] / population_size)\n new_recoveries = tfd.Poisson(\n previous_state['infected'] * parameters['recovery_rate'])\n return tfd.JointDistributionNamed({\n 'new_infections': new_infections,\n 'new_recoveries': new_recoveries,\n 'susceptible': lambda new_infections: tfd.Deterministic(\n tf.maximum(0., previous_state['susceptible'] - new_infections)),\n 'infected': lambda new_infections, new_recoveries: tfd.Deterministic(\n tf.maximum(0.,\n (previous_state['infected'] +\n new_infections - new_recoveries)))})\n ```\n\n Finally, assume that every day we get to observe noisy counts of new\n infections and recoveries.\n\n ```python\n def parameterized_infection_observations(_, state, parameters):\n del parameters # Not used.\n return tfd.JointDistributionNamed({\n 'new_infections': tfd.Poisson(state['new_infections'] + 0.1),\n 'new_recoveries': tfd.Poisson(state['new_recoveries'] + 0.1)})\n ```\n\n Combining these components, an `IteratedFilter` augments\n the state space to include parameters that may change over time.\n\n ```python\n iterated_filter = tfp.experimental.sequential.IteratedFilter(\n parameter_prior=parameter_prior,\n parameterized_initial_state_prior_fn=initial_state_prior_fn,\n parameterized_transition_fn=parameterized_infection_dynamics,\n parameterized_observation_fn=parameterized_infection_observations)\n ```\n\n We may then run the filter to estimate parameters from a series\n of observations:\n\n ```python\n # Simulated with `infection_rate=1.2` and `recovery_rate=0.1`.\n observed_values = {\n 'new_infections': tf.convert_to_tensor([\n 2., 7., 14., 24., 45., 93., 160., 228., 252., 158., 17.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),\n 'new_recoveries': tf.convert_to_tensor([\n 0., 0., 3., 4., 3., 8., 12., 31., 49., 73., 85., 65., 71.,\n 58., 42., 65., 36., 31., 32., 27., 31., 20., 19., 19., 14., 27.])\n }\n parameter_particles = iterated_filter.estimate_parameters(\n observations=observed_values,\n num_iterations=20,\n num_particles=4096,\n initial_perturbation_scale=1.0,\n cooling_schedule=(\n tfp.experimental.sequential.geometric_cooling_schedule(\n 0.001, k=20)),\n seed=test_util.test_seed())\n print('Mean of parameter particles from final iteration: {}'.format(\n tf.nest.map_structure(lambda x: tf.reduce_mean(x[-1], axis=0),\n parameter_particles)))\n print('Standard deviation of parameter particles from '\n 'final iteration: {}'.format(\n tf.nest.map_structure(lambda x: tf.math.reduce_std(x[-1], axis=0),\n parameter_particles)))\n ```\n\n For more control, we could alternately choose to run filtering iterations\n on the augmented model manually, using the filter of our choice.\n For example, manually invoking `infer_trajectories` would allow us\n to inspect the parameter and state values at all timesteps, and their\n corresponding log-probabilities:\n\n ```python\n trajectories, lps = tfp.experimental.mcmc.infer_trajectories(\n observations=observations,\n initial_state_prior=iterated_filter.joint_initial_state_prior,\n transition_fn=functools.partial(\n iterated_filter.joint_transition_fn,\n perturbation_scale=perturbation_scale),\n observation_fn=iterated_filter.joint_observation_fn,\n proposal_fn=iterated_filter.joint_proposal_fn,\n initial_state_proposal=iterated_filter.joint_initial_state_proposal(\n initial_unconstrained_parameters),\n num_particles=4096)\n ```\n\n #### References:\n\n [1] Edward L. Ionides, Dao Nguyen, Yves Atchade, Stilian Stoev, and Aaron A.\n King. Inference for dynamic and latent variable models via iterated,\n perturbed Bayes maps. _Proceedings of the National Academy of Sciences_\n 112, no. 3: 719-724, 2015.\n https://www.pnas.org/content/pnas/112/3/719.full.pdf\n \"\"\"\n name = name or 'IteratedFilter'\n with tf.name_scope(name):\n self._parameter_prior = parameter_prior\n self._parameterized_initial_state_prior_fn = (\n parameterized_initial_state_prior_fn)\n\n if parameter_constraining_bijector is None:\n parameter_constraining_bijector = (\n parameter_prior.experimental_default_event_space_bijector())\n self._parameter_constraining_bijector = parameter_constraining_bijector\n\n # Augment the prior to include both parameters and states.\n self._joint_initial_state_prior = joint_prior_on_parameters_and_state(\n parameter_prior,\n parameterized_initial_state_prior_fn,\n parameter_constraining_bijector,\n prior_is_constrained=True)\n\n # Check that prior samples have a consistent number of particles.\n # TODO(davmre): remove the need for dummy shape dependencies,\n # and this check, by using `JointDistributionNamedAutoBatched` with\n # auto-vectorization enabled in `joint_prior_on_parameters_and_state`.\n\n num_particles_canary = 13\n canary_seed = samplers.sanitize_seed([0, 1])\n def _get_shape_1(x):\n if hasattr(x, 'state'):\n x = x.state\n return tf.TensorShape(x.shape[1:2])\n prior_static_sample_shapes = tf.nest.map_structure(\n # Sample shape [0, num_particles_canary] particles (size will be zero)\n # then trim off the leading 0 and (possibly) any event shape.\n # We expect shape [num_particles_canary] to remain.\n _get_shape_1,\n self._joint_initial_state_prior.sample([0, num_particles_canary],\n seed=canary_seed))\n if not all([\n tensorshape_util.is_compatible_with(s[:1], [num_particles_canary])\n for s in tf.nest.flatten(prior_static_sample_shapes)\n ]):\n raise ValueError(\n 'The specified prior does not generate consistent '\n 'shapes when sampled. Please verify that all parts of '\n '`initial_state_prior_fn` have batch shape matching '\n 'that of the parameters. This may require creating '\n '\"dummy\" dependencies on parameters; for example: '\n '`tf.broadcast_to(value, tf.shape(parameter))`. (in a '\n f'test sample with {num_particles_canary} particles, we expected '\n 'all) values to have shape compatible with '\n f'[{num_particles_canary}, ...]; '\n f'saw shapes {prior_static_sample_shapes})')\n\n # Augment the transition and observation fns to cover both\n # parameters and states.\n self._joint_transition_fn = augment_transition_fn_with_parameters(\n parameter_prior,\n parameterized_transition_fn,\n parameter_constraining_bijector)\n self._joint_observation_fn = augment_observation_fn_with_parameters(\n parameterized_observation_fn,\n parameter_constraining_bijector)\n\n # If given a proposal for the initial state, augment it into a joint\n # proposal over parameters and states.\n joint_initial_state_proposal = None\n if parameterized_initial_state_proposal_fn:\n joint_initial_state_proposal = joint_prior_on_parameters_and_state(\n parameter_prior,\n parameterized_initial_state_proposal_fn,\n parameter_constraining_bijector)\n else:\n parameterized_initial_state_proposal_fn = (\n parameterized_initial_state_prior_fn)\n self._joint_initial_state_proposal = joint_initial_state_proposal\n self._parameterized_initial_state_proposal_fn = (\n parameterized_initial_state_proposal_fn)\n\n # If given a conditional proposal fn (for non-initial states), augment\n # it to be joint over states and parameters.\n self._joint_proposal_fn = None\n if parameterized_proposal_fn:\n self._joint_proposal_fn = augment_transition_fn_with_parameters(\n parameter_prior,\n parameterized_proposal_fn,\n parameter_constraining_bijector)\n\n self._batch_ndims = tf.nest.map_structure(\n ps.rank_from_shape,\n parameter_prior.batch_shape_tensor())\n self._name = name\n\n @property\n def batch_ndims(self):\n return _copy_structure(self._batch_ndims)\n\n @property\n def joint_initial_state_prior(self):\n \"\"\"Initial state prior for the joint (augmented) model.\"\"\"\n return self._joint_initial_state_prior\n\n def joint_initial_state_proposal(self, initial_unconstrained_parameters=None):\n \"\"\"Proposal to initialize the model with given parameter particles.\"\"\"\n if initial_unconstrained_parameters is None:\n joint_initial_state_proposal = self._joint_initial_state_proposal\n else:\n # Hack: DeterministicEmpirical is a fake distribution whose `sample`\n # just proposes *exactly* the parameters we pass in.\n unconstrained_parameter_proposal = DeterministicEmpirical(\n initial_unconstrained_parameters,\n batch_ndims=self.batch_ndims)\n\n # Propose initial state conditioned on the parameters.\n joint_initial_state_proposal = joint_prior_on_parameters_and_state(\n unconstrained_parameter_proposal,\n self.parameterized_initial_state_proposal_fn,\n parameter_constraining_bijector=(\n self.parameter_constraining_bijector),\n prior_is_constrained=False)\n\n # May return `None` if no initial proposal or params were specified.\n return joint_initial_state_proposal\n\n @property\n def joint_transition_fn(self):\n \"\"\"Transition function for the joint (augmented) model.\"\"\"\n return self._joint_transition_fn\n\n @property\n def joint_observation_fn(self):\n \"\"\"Observation function for the joint (augmented) model.\"\"\"\n return self._joint_observation_fn\n\n @property\n def joint_proposal_fn(self):\n \"\"\"Proposal function for the joint (augmented) model.\"\"\"\n return self._joint_proposal_fn\n\n @property\n def name(self):\n return self._name\n\n @property\n def parameter_constraining_bijector(self):\n \"\"\"Bijector mapping unconstrained real values into the parameter space.\"\"\"\n return self._parameter_constraining_bijector\n\n @property\n def parameterized_initial_state_prior_fn(self):\n \"\"\"Prior function that was passed in at construction.\"\"\"\n return self._parameterized_initial_state_prior_fn\n\n @property\n def parameterized_initial_state_proposal_fn(self):\n \"\"\"Initial proposal function passed in at construction.\"\"\"\n return self._parameterized_initial_state_proposal_fn\n\n @property\n def parameter_prior(self):\n \"\"\"Prior distribution on parameters passed in at construction.\"\"\"\n return self._parameter_prior\n\n def one_step(self,\n observations,\n perturbation_scale,\n num_particles,\n initial_unconstrained_parameters=None,\n seed=None,\n name=None,\n **kwargs):\n \"\"\"Runs one step of filtering to sharpen parameter estimates.\n\n Args:\n observations: observed `Tensor` value(s) on which to condition the\n parameter estimate.\n perturbation_scale: scalar float `Tensor`, or any structure of float\n `Tensor`s broadcasting to the same shape as the unconstrained\n parameters, specifying the scale (standard deviation) of Gaussian\n perturbations to each parameter at each timestep.\n num_particles: scalar int `Tensor` number of particles to use. Must match\n the batch dimension of `initial_unconstrained_parameters`, if specified.\n initial_unconstrained_parameters: optional structure of `Tensor`s, of\n shape matching\n `self.joint_initial_state_prior.sample([\n num_particles]).unconstrained_parameters`,\n used to initialize the filter.\n Default value: `None`.\n seed: PRNG seed; see `tfp.random.sanitize_seed` for details.\n name: `str` name for ops constructed by this method.\n **kwargs: additional keyword arguments passed to\n `tfp.experimental.mcmc.infer_trajectories`.\n Returns:\n final_unconstrained_parameters: structure of `Tensor`s matching\n `initial_unconstrained_parameters`, containing samples of\n unconstrained parameters at the final timestep, as computed by\n `self.filter_fn`.\n \"\"\"\n with self._name_scope(name or 'one_step'):\n # Run the particle filter.\n (unconstrained_parameter_trajectories, _), _ = (\n infer_trajectories(\n observations=observations,\n initial_state_prior=self.joint_initial_state_prior,\n transition_fn=functools.partial(\n self.joint_transition_fn,\n perturbation_scale=perturbation_scale),\n observation_fn=self.joint_observation_fn,\n proposal_fn=self.joint_proposal_fn,\n initial_state_proposal=self.joint_initial_state_proposal(\n initial_unconstrained_parameters),\n num_particles=num_particles,\n seed=seed,\n **kwargs))\n # Return the parameter estimates from the final step of the trajectory.\n return tf.nest.map_structure(\n lambda part: part[-1],\n unconstrained_parameter_trajectories)\n\n def estimate_parameters(self,\n observations,\n num_iterations,\n num_particles,\n initial_perturbation_scale,\n cooling_schedule,\n seed=None,\n name=None,\n **kwargs):\n \"\"\"Runs multiple iterations of filtering following a cooling schedule.\n\n Args:\n observations: observed `Tensor` value(s) on which to condition the\n parameter estimate.\n num_iterations: `int `Tensor` number of filtering iterations to run.\n num_particles: scalar int `Tensor` number of particles to use.\n initial_perturbation_scale: scalar float `Tensor`, or any structure of\n float `Tensor`s broadcasting to the same shape as the (unconstrained)\n parameters, specifying the scale (standard deviation) of Gaussian\n perturbations to each parameter at the first timestep.\n cooling_schedule: callable with signature\n `cooling_factor = cooling_schedule(iteration)` for `iteration` in\n `[0, ..., num_iterations - 1]`. The filter is\n invoked with perturbations of scale\n `initial_perturbation_scale * cooling_schedule(iteration)`.\n seed: PRNG seed; see `tfp.random.sanitize_seed` for details.\n name: `str` name for ops constructed by this method.\n **kwargs: additional keyword arguments passed to\n `tfp.experimental.mcmc.infer_trajectories`.\n Returns:\n final_parameter_particles: structure of `Tensor`s matching\n `self.parameter_prior`, each with batch shape\n `[num_iterations, num_particles]`. These are the populations\n of particles representing the parameter estimate after each iteration\n of filtering.\n \"\"\"\n with self._name_scope(name or 'estimate_parameters'):\n\n step_seed, initial_seed = samplers.split_seed(seed)\n initial_perturbation_scale = tf.convert_to_tensor(\n initial_perturbation_scale, name='initial_perturbation_scale')\n\n # Get initial parameter particles from the first filtering iteration.\n initial_unconstrained_parameters = self.one_step(\n observations=observations,\n num_particles=num_particles,\n perturbation_scale=initial_perturbation_scale,\n seed=step_seed,\n **kwargs)\n\n # Run the remaining iterations and accumulate the results.\n @tf.function(autograph=False)\n def loop_body(unconstrained_parameters_seed, cooling_fraction):\n unconstrained_parameters, seed = unconstrained_parameters_seed\n step_seed, seed = samplers.split_seed(seed)\n return (self.one_step(\n observations=observations,\n num_particles=num_particles,\n perturbation_scale=tf.nest.map_structure(\n lambda s: cooling_fraction * s, initial_perturbation_scale),\n initial_unconstrained_parameters=unconstrained_parameters,\n seed=step_seed,\n **kwargs), seed)\n\n estimated_unconstrained_parameters, _ = tf.scan(\n fn=loop_body,\n elems=cooling_schedule(ps.range(1, num_iterations)),\n initializer=(initial_unconstrained_parameters, initial_seed))\n\n return self.parameter_constraining_bijector.forward(\n estimated_unconstrained_parameters)\n\n @contextlib.contextmanager\n def _name_scope(self, name):\n with tf.name_scope(self.name):\n with tf.name_scope(name) as name_scope:\n yield name_scope\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for utilities for testing distributions and/or bijectors.\"\"\"\n\nimport warnings\nfrom absl.testing import parameterized\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\nfrom tensorflow_probability.python.internal import vectorization_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass VectorizationTest(test_util.TestCase):\n\n def test_iid_sample_stateful(self):\n\n # Random fn using stateful samplers.\n def fn(key1, key2, seed=None):\n return [tfd.Normal(0., 1.).sample([3, 2], seed=seed),\n {key1: tfd.Poisson([1., 2., 3., 4.]).sample(seed=seed + 1),\n key2: tfd.LogNormal(0., 1.).sample(seed=seed + 2)}]\n sample = self.evaluate(\n fn('a', key2='b', seed=test_util.test_seed(sampler_type='stateful')))\n\n sample_shape = [6, 1]\n iid_fn = vectorization_util.iid_sample(fn, sample_shape=sample_shape)\n iid_sample = self.evaluate(iid_fn('a', key2='b', seed=42))\n\n # Check that we did not get repeated samples.\n first_sampled_vector = iid_sample[0].flatten()\n self.assertAllGreater(\n (first_sampled_vector[1:] - first_sampled_vector[0])**2, 1e-6)\n\n expected_iid_shapes = tf.nest.map_structure(\n lambda x: np.concatenate([sample_shape, x.shape], axis=0), sample)\n iid_shapes = tf.nest.map_structure(lambda x: x.shape, iid_sample)\n self.assertAllEqualNested(expected_iid_shapes, iid_shapes)\n\n def test_iid_sample_stateless(self):\n\n sample_shape = [6]\n iid_fn = vectorization_util.iid_sample(\n tf.random.stateless_normal, sample_shape=sample_shape)\n\n warnings.simplefilter('always')\n with warnings.catch_warnings(record=True) as triggered:\n samples = iid_fn([], seed=test_util.test_seed(sampler_type='stateless'))\n self.assertTrue(\n any('may be quite slow' in str(warning.message)\n for warning in triggered))\n\n # Check that we did not get repeated samples.\n samples_ = self.evaluate(samples)\n self.assertAllGreater((samples_[1:] - samples_[0])**2, 1e-6)\n\n def test_docstring_example(self):\n add = lambda a, b: a + b\n add_vector_to_scalar = vectorization_util.make_rank_polymorphic(\n add, core_ndims=(1, 0))\n self.assertAllEqual(\n [[4., 5.], [5., 6.], [6., 7.]],\n self.evaluate(add_vector_to_scalar(\n tf.constant([1., 2.]), tf.constant([3., 4., 5.]))))\n\n def test_can_take_structured_input_and_output(self):\n # Dummy function that takes a (tuple, dict) pair\n # and returns a (dict, scalar) pair.\n def fn(x, y):\n a, b, c = x\n d, e = y['d'], y['e']\n return {'r': a * b + c}, d + e\n\n vectorized_fn = vectorization_util.make_rank_polymorphic(\n fn, core_ndims=0)\n\n x = np.array([[2.], [3.]]), np.array(2.), np.array([5., 6., 7.])\n y = {'d': np.array([[1.]]), 'e': np.array([2., 3., 4.])}\n vectorized_result = self.evaluate(vectorized_fn(x, y))\n result = tf.nest.map_structure(lambda a, b: a * np.ones(b.shape),\n fn(x, y), vectorized_result)\n self.assertAllClose(result, vectorized_result)\n\n @parameterized.named_parameters(\n ('static_shapes', True),\n ('dynamic_shapes', False))\n def tests_aligns_broadcast_dims_using_core_ndims(self, is_static):\n np.random.seed(test_util.test_seed() % 2**32)\n\n def matvec(a, b):\n # Throws an error if either arg has extra dimensions.\n return tf.linalg.matvec(tf.reshape(a, tf.shape(a)[-2:]),\n tf.reshape(b, tf.shape(b)[-1:]))\n\n vectorized_matvec = vectorization_util.make_rank_polymorphic(\n matvec, core_ndims=(\n self.maybe_static(2, is_static=is_static),\n self.maybe_static(1, is_static=is_static)))\n\n for (a_shape, b_shape) in (([3, 2], [2]),\n ([4, 3, 2], [2]),\n ([4, 3, 2], [5, 1, 2])):\n a = self.maybe_static(np.random.randn(*a_shape), is_static=is_static)\n b = self.maybe_static(np.random.randn(*b_shape), is_static=is_static)\n\n c = tf.linalg.matvec(a, b)\n c_vectorized = vectorized_matvec(a, b)\n if is_static:\n self.assertAllEqual(c.shape, c_vectorized.shape)\n self.assertAllEqual(*self.evaluate((c, c_vectorized)))\n\n def test_can_call_with_variable_number_of_args(self):\n\n def scalar_sum(*args):\n return sum([tf.reshape(x, []) for x in args])\n vectorized_sum = vectorization_util.make_rank_polymorphic(\n scalar_sum, core_ndims=0)\n\n xs = [1.,\n np.array([3., 2.]).astype(np.float32),\n np.array([[1., 2.], [-4., 3.]]).astype(np.float32)]\n self.assertAllEqual(self.evaluate(vectorized_sum(*xs)), sum(xs))\n\n def test_passes_insufficient_rank_input_through_to_function(self):\n\n vectorized_vector_sum = vectorization_util.make_rank_polymorphic(\n lambda a, b: a + b, core_ndims=(1, 1))\n c = vectorized_vector_sum(tf.convert_to_tensor(3.),\n tf.convert_to_tensor([1., 2., 3.]))\n self.assertAllClose(c, [4., 5., 6.])\n\n vectorized_matvec = vectorization_util.make_rank_polymorphic(\n tf.linalg.matvec, core_ndims=(2, 1))\n with self.assertRaisesRegexp(\n ValueError, 'Shape must be rank 2 but is rank 1'):\n vectorized_matvec(tf.zeros([5]), tf.zeros([2, 1, 5]))\n\n def test_can_escape_vectorization_with_none_ndims(self):\n\n # Suppose the original fn supports `None` as an input.\n fn = lambda x, y: (tf.reduce_sum(x, axis=0), y[0] if y is not None else y)\n\n polymorphic_fn = vectorization_util.make_rank_polymorphic(\n fn, core_ndims=[1, None])\n rx, ry = polymorphic_fn([[1., 2., 4.], [3., 5., 7.]], None)\n self.assertAllEqual(rx.shape, [2])\n self.assertIsNone(ry)\n\n single_arg_polymorphic_fn = vectorization_util.make_rank_polymorphic(\n lambda y: fn(tf.convert_to_tensor([1., 2., 3.]), y), core_ndims=None)\n rx, ry = self.evaluate(single_arg_polymorphic_fn(\n tf.convert_to_tensor([[1., 3.], [2., 4.]])))\n self.assertAllEqual(ry, [1., 3.])\n\n def test_unit_batch_dims_are_flattened(self):\n # Define `fn` to expect a vector input.\n fn = lambda x: tf.einsum('n->', x)\n # Verify that it won't accept a batch dimension.\n with self.assertRaisesRegexp(Exception, 'rank'):\n fn(tf.zeros([1, 5]))\n\n polymorphic_fn = vectorization_util.make_rank_polymorphic(fn,\n core_ndims=[1])\n for batch_shape in ([], [1], [1, 1]):\n self.assertEqual(batch_shape,\n polymorphic_fn(tf.zeros(batch_shape + [5])).shape)\n\n def test_unit_batch_dims_are_not_vectorized(self):\n if not tf.executing_eagerly():\n self.skipTest('Test relies on eager execution.')\n\n # Define `fn` to expect a vector input.\n def must_run_eagerly(x):\n if not tf.executing_eagerly():\n raise ValueError('Code is running inside tf.function. This may '\n 'indicate that auto-vectorization is being '\n 'triggered unnecessarily.')\n return x\n\n polymorphic_fn = vectorization_util.make_rank_polymorphic(\n must_run_eagerly, core_ndims=[0])\n for batch_shape in ([], [1], [1, 1]):\n polymorphic_fn(tf.zeros(batch_shape))\n\n def test_docstring_example_passing_fn_arg(self):\n def apply_binop(fn, a, b):\n return fn(a, b)\n apply_binop_to_vector_and_scalar = vectorization_util.make_rank_polymorphic(\n apply_binop, core_ndims=(None, 1, 0))\n r = self.evaluate(apply_binop_to_vector_and_scalar(\n lambda a, b: a * b, tf.constant([1., 2.]), tf.constant([3., 4., 5.])))\n self.assertAllEqual(r, np.array(\n [[3., 6.], [4., 8.], [5., 10.]], dtype=np.float32))\n\n def test_rectifies_distribution_batch_shapes(self):\n def fn(scale):\n d = tfd.Normal(loc=0, scale=[scale])\n x = d.sample()\n return d, x, d.log_prob(x)\n\n polymorphic_fn = vectorization_util.make_rank_polymorphic(\n fn, core_ndims=(0))\n batch_scale = tf.constant([[4., 2., 5.], [1., 2., 1.]], dtype=tf.float32)\n d, x, lp = polymorphic_fn(batch_scale)\n self.assertAllEqual(d.batch_shape.as_list(), x.shape.as_list())\n lp2 = d.log_prob(x)\n self.assertAllClose(*self.evaluate((lp, lp2)))\n\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for MatrixInverseTriL bijector.\"\"\"\n\n# Dependency imports\n\nimport numpy as np\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow_probability.python.internal import test_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass MatrixInverseTriLBijectorTest(test_util.TestCase):\n \"\"\"Tests the correctness of the Y = inv(tril) transformation.\"\"\"\n\n # The inverse of 0 is undefined, as the numbers above the main\n # diagonal must be zero, we zero out these numbers after running inverse.\n # See: https://github.com/numpy/numpy/issues/11445\n def _inv(self, x):\n y = np.linalg.inv(x)\n # Since triu_indices only works on 2d arrays we need to iterate over all the\n # 2d arrays in a x-dimensional array.\n for idx in np.ndindex(y.shape[0:-2]):\n y[idx][np.triu_indices(y[idx].shape[-1], 1)] = 0\n return y\n\n def testComputesCorrectValues(self):\n inv = tfb.MatrixInverseTriL(validate_args=True)\n self.assertStartsWith(inv.name, 'matrix_inverse_tril')\n x_ = np.array([[0.7, 0., 0.],\n [0.1, -1., 0.],\n [0.3, 0.25, 0.5]], dtype=np.float32)\n x_inv_ = np.linalg.inv(x_)\n\n y = inv.forward(x_)\n x_back = inv.inverse(x_inv_)\n\n y_, x_back_ = self.evaluate([y, x_back])\n\n self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)\n self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)\n\n def testOneByOneMatrix(self):\n inv = tfb.MatrixInverseTriL(validate_args=True)\n x_ = np.array([[5.]], dtype=np.float32)\n x_inv_ = np.array([[0.2]], dtype=np.float32)\n\n y = inv.forward(x_)\n x_back = inv.inverse(x_inv_)\n\n y_, x_back_ = self.evaluate([y, x_back])\n\n self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)\n self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)\n\n def testZeroByZeroMatrix(self):\n inv = tfb.MatrixInverseTriL(validate_args=True)\n x_ = np.eye(0, dtype=np.float32)\n x_inv_ = np.eye(0, dtype=np.float32)\n\n y = inv.forward(x_)\n x_back = inv.inverse(x_inv_)\n\n y_, x_back_ = self.evaluate([y, x_back])\n\n self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)\n self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)\n\n def testBatch(self):\n # Test batch computation with input shape (2, 1, 2, 2), i.e. batch shape\n # (2, 1).\n inv = tfb.MatrixInverseTriL(validate_args=True)\n x_ = np.array([[[[1., 0.],\n [2., 3.]]],\n [[[4., 0.],\n [5., -6.]]]], dtype=np.float32)\n x_inv_ = self._inv(x_)\n\n y = inv.forward(x_)\n x_back = inv.inverse(x_inv_)\n\n y_, x_back_ = self.evaluate([y, x_back])\n\n self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)\n self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)\n\n def testErrorOnInputRankTooLow(self):\n inv = tfb.MatrixInverseTriL(validate_args=True)\n x_ = np.array([0.1], dtype=np.float32)\n rank_error_msg = 'must have rank at least 2'\n with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):\n self.evaluate(inv.forward(x_))\n with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):\n self.evaluate(inv.inverse(x_))\n with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):\n self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))\n with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):\n self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))\n\n # TODO(b/80481923): Figure out why these assertions fail, and fix them.\n ## def testErrorOnInputNonSquare(self):\n ## inv = tfb.MatrixInverseTriL(validate_args=True)\n ## x_ = np.array([[1., 2., 3.],\n ## [4., 5., 6.]], dtype=np.float32)\n ## square_error_msg = 'must be a square matrix'\n ## with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,\n ## square_error_msg):\n ## self.evaluate(inv.forward(x_))\n ## with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,\n ## square_error_msg):\n ## self.evaluate(inv.inverse(x_))\n ## with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,\n ## square_error_msg):\n ## self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))\n ## with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,\n ## square_error_msg):\n ## self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))\n\n def testErrorOnInputNotLowerTriangular(self):\n inv = tfb.MatrixInverseTriL(validate_args=True)\n x_ = np.array([[1., 2.],\n [3., 4.]], dtype=np.float32)\n triangular_error_msg = 'must be lower triangular'\n with self.assertRaisesOpError(triangular_error_msg):\n self.evaluate(inv.forward(x_))\n with self.assertRaisesOpError(triangular_error_msg):\n self.evaluate(inv.inverse(x_))\n with self.assertRaisesOpError(triangular_error_msg):\n self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))\n with self.assertRaisesOpError(triangular_error_msg):\n self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))\n\n def testErrorOnInputSingular(self):\n inv = tfb.MatrixInverseTriL(validate_args=True)\n x_ = np.array([[1., 0.],\n [0., 0.]], dtype=np.float32)\n nonsingular_error_msg = 'must have all diagonal entries nonzero'\n with self.assertRaisesOpError(nonsingular_error_msg):\n self.evaluate(inv.forward(x_))\n with self.assertRaisesOpError(nonsingular_error_msg):\n self.evaluate(inv.inverse(x_))\n with self.assertRaisesOpError(nonsingular_error_msg):\n self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))\n with self.assertRaisesOpError(nonsingular_error_msg):\n self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))\n\n @test_util.numpy_disable_gradient_test\n def testJacobian(self):\n bijector = tfb.MatrixInverseTriL()\n batch_size = 5\n for ndims in range(2, 5):\n x_ = np.tril(\n np.random.uniform(\n -1., 1., size=[batch_size, ndims, ndims]).astype(np.float64))\n fldj = bijector.forward_log_det_jacobian(x_, event_ndims=2)\n fldj_theoretical = bijector_test_util.get_fldj_theoretical(\n bijector, x_, event_ndims=2,\n input_to_unconstrained=tfb.Invert(tfb.FillTriangular()),\n output_to_unconstrained=tfb.Invert(tfb.FillTriangular()))\n fldj_, fldj_theoretical_ = self.evaluate([fldj, fldj_theoretical])\n self.assertAllClose(fldj_, fldj_theoretical_)\n\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Functions for computing statistics of samples.\"\"\"\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.math.generic import reduce_logmeanexp\n\n\n__all__ = [\n 'auto_correlation',\n 'cholesky_covariance',\n 'correlation',\n 'covariance',\n 'cumulative_variance',\n 'log_average_probs',\n 'stddev',\n 'variance',\n 'windowed_mean',\n 'windowed_variance',\n]\n\n\n# TODO(langmore) Write separate versions of this for real/complex dtype, taking\n# advantage of optimized real-fft ops.\ndef auto_correlation(x,\n axis=-1,\n max_lags=None,\n center=True,\n normalize=True,\n name='auto_correlation'):\n \"\"\"Auto correlation along one axis.\n\n Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation\n `RXX` may be defined as (with `E` expectation and `Conj` complex conjugate)\n\n ```\n RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },\n W[n] := (X[n] - MU) / S,\n MU := E{ X[0] },\n S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }.\n ```\n\n This function takes the viewpoint that `x` is (along one axis) a finite\n sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an\n estimate of `RXX[m]` as follows:\n\n After extending `x` from length `L` to `inf` by zero padding, the auto\n correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as\n\n ```\n rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),\n w[n] := (x[n] - mu) / s,\n mu := L**-1 sum_n x[n],\n s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)\n ```\n\n The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users\n often set `max_lags` small enough so that the entire output is meaningful.\n\n Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by\n `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation\n contains a slight bias, which goes to zero as `len(x) - m --> infinity`.\n\n Args:\n x: `float32` or `complex64` `Tensor`.\n axis: Python `int`. The axis number along which to compute correlation.\n Other dimensions index different batch members.\n max_lags: Positive `int` tensor. The maximum value of `m` to consider (in\n equation above). If `max_lags >= x.shape[axis]`, we effectively re-set\n `max_lags` to `x.shape[axis] - 1`.\n center: Python `bool`. If `False`, do not subtract the mean estimate `mu`\n from `x[n]` when forming `w[n]`.\n normalize: Python `bool`. If `False`, do not divide by the variance\n estimate `s**2` when forming `w[n]`.\n name: `String` name to prepend to created ops.\n\n Returns:\n `rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for\n `i != axis`, and `rxx.shape[axis] = max_lags + 1`.\n\n Raises:\n TypeError: If `x` is not a supported type.\n \"\"\"\n # Implementation details:\n # Extend length N / 2 1-D array x to length N by zero padding onto the end.\n # Then, set\n # F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.\n # It is not hard to see that\n # F[x]_k Conj(F[x]_k) = F[R]_k, where\n # R_m := sum_n x_n Conj(x_{(n - m) mod N}).\n # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].\n\n # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT\n # based version of estimating RXX.\n # Note that this is a special case of the Wiener-Khinchin Theorem.\n with tf.name_scope(name):\n x = tf.convert_to_tensor(x, name='x')\n\n # Rotate dimensions of x in order to put axis at the rightmost dim.\n # FFT op requires this.\n rank = ps.rank(x)\n if axis < 0:\n axis = rank + axis\n shift = rank - 1 - axis\n # Suppose x.shape[axis] = T, so there are T 'time' steps.\n # ==> x_rotated.shape = B + [T],\n # where B is x_rotated's batch shape.\n x_rotated = distribution_util.rotate_transpose(x, shift)\n\n if center:\n x_rotated = x_rotated - tf.reduce_mean(x_rotated, axis=-1, keepdims=True)\n\n # x_len = N / 2 from above explanation. The length of x along axis.\n # Get a value for x_len that works in all cases.\n x_len = ps.shape(x_rotated)[-1]\n\n # TODO(langmore) Investigate whether this zero padding helps or hurts. At\n # the moment is necessary so that all FFT implementations work.\n # Zero pad to the next power of 2 greater than 2 * x_len, which equals\n # 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).\n x_len_float64 = ps.cast(x_len, np.float64)\n target_length = ps.pow(\n np.float64(2.), ps.ceil(\n ps.log(x_len_float64 * 2) / np.log(2.)))\n pad_length = ps.cast(target_length - x_len_float64, np.int32)\n\n # We should have:\n # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]\n # = B + [T + pad_length]\n x_rotated_pad = distribution_util.pad(\n x_rotated, axis=-1, back=True, count=pad_length)\n\n dtype = x.dtype\n if not dtype_util.is_complex(dtype):\n if not dtype_util.is_floating(dtype):\n raise TypeError('Argument x must have either float or complex dtype'\n ' found: {}'.format(dtype))\n x_rotated_pad = tf.complex(\n x_rotated_pad,\n dtype_util.as_numpy_dtype(dtype_util.real_dtype(dtype))(0.))\n\n # Autocorrelation is IFFT of power-spectral density (up to some scaling).\n fft_x_rotated_pad = tf.signal.fft(x_rotated_pad)\n spectral_density = fft_x_rotated_pad * tf.math.conj(fft_x_rotated_pad)\n # shifted_product is R[m] from above detailed explanation.\n # It is the inner product sum_n X[n] * Conj(X[n - m]).\n shifted_product = tf.signal.ifft(spectral_density)\n\n # Cast back to real-valued if x was real to begin with.\n shifted_product = tf.cast(shifted_product, dtype)\n\n # Figure out if we can deduce the final static shape, and set max_lags.\n # Use x_rotated as a reference, because it has the time dimension in the far\n # right, and was created before we performed all sorts of crazy shape\n # manipulations.\n know_static_shape = True\n if not tensorshape_util.is_fully_defined(x_rotated.shape):\n know_static_shape = False\n if max_lags is None:\n max_lags = x_len - 1\n else:\n max_lags = ps.convert_to_shape_tensor(max_lags, name='max_lags')\n max_lags_ = tf.get_static_value(max_lags)\n if max_lags_ is None or not know_static_shape:\n know_static_shape = False\n max_lags = tf.minimum(x_len - 1, max_lags)\n else:\n max_lags = min(x_len - 1, max_lags_)\n\n # Chop off the padding.\n # We allow users to provide a huge max_lags, but cut it off here.\n # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]\n shifted_product_chopped = shifted_product[..., :max_lags + 1]\n\n # If possible, set shape.\n if know_static_shape:\n chopped_shape = tensorshape_util.as_list(x_rotated.shape)\n chopped_shape[-1] = min(x_len, max_lags + 1)\n tensorshape_util.set_shape(shifted_product_chopped, chopped_shape)\n\n # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The\n # other terms were zeros arising only due to zero padding.\n # `denominator = (N / 2 - m)` (defined below) is the proper term to\n # divide by to make this an unbiased estimate of the expectation\n # E[X[n] Conj(X[n - m])].\n x_len = ps.cast(x_len, dtype_util.real_dtype(dtype))\n max_lags = ps.cast(max_lags, dtype_util.real_dtype(dtype))\n denominator = x_len - ps.range(0., max_lags + 1.)\n denominator = ps.cast(denominator, dtype)\n shifted_product_rotated = shifted_product_chopped / denominator\n\n if normalize:\n shifted_product_rotated /= shifted_product_rotated[..., :1]\n\n # Transpose dimensions back to those of x.\n return distribution_util.rotate_transpose(shifted_product_rotated, -shift)\n\n\ndef cholesky_covariance(x, sample_axis=0, keepdims=False, name=None):\n \"\"\"Cholesky factor of the covariance matrix of vector-variate random samples.\n\n This function can be use to fit a multivariate normal to data.\n\n ```python\n tf.enable_eager_execution()\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n # Assume data.shape = (1000, 2). 1000 samples of a random variable in R^2.\n observed_data = read_data_samples(...)\n\n # The mean is easy\n mu = tf.reduce_mean(observed_data, axis=0)\n\n # Get the scale matrix\n L = tfp.stats.cholesky_covariance(observed_data)\n\n # Make the best fit multivariate normal (under maximum likelihood condition).\n mvn = tfd.MultivariateNormalTriL(loc=mu, scale_tril=L)\n\n # Plot contours of the pdf.\n xs, ys = tf.meshgrid(\n tf.linspace(-5., 5., 50), tf.linspace(-5., 5., 50), indexing='ij')\n xy = tf.stack((tf.reshape(xs, [-1]), tf.reshape(ys, [-1])), axis=-1)\n pdf = tf.reshape(mvn.prob(xy), (50, 50))\n CS = plt.contour(xs, ys, pdf, 10)\n plt.clabel(CS, inline=1, fontsize=10)\n ```\n\n Why does this work?\n Given vector-variate random variables `X = (X1, ..., Xd)`, one may obtain the\n sample covariance matrix in `R^{d x d}` (see `tfp.stats.covariance`).\n\n The [Cholesky factor](https://en.wikipedia.org/wiki/Cholesky_decomposition)\n of this matrix is analogous to standard deviation for scalar random variables:\n Suppose `X` has covariance matrix `C`, with Cholesky factorization `C = L L^T`\n Then multiplying a vector of iid random variables which have unit variance by\n `L` produces a vector with covariance `L L^T`, which is the same as `X`.\n\n ```python\n observed_data = read_data_samples(...)\n L = tfp.stats.cholesky_covariance(observed_data, sample_axis=0)\n\n # Make fake_data with the same covariance as observed_data.\n uncorrelated_normal = tf.random.normal(shape=(500, 10))\n fake_data = tf.linalg.matvec(L, uncorrelated_normal)\n ```\n\n Args:\n x: Numeric `Tensor`. The rightmost dimension of `x` indexes events. E.g.\n dimensions of a random vector.\n sample_axis: Scalar or vector `Tensor` designating axis holding samples.\n Default value: `0` (leftmost dimension). Cannot be the rightmost dimension\n (since this indexes events).\n keepdims: Boolean. Whether to keep the sample axis as singletons.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'covariance'`).\n\n Returns:\n chol: `Tensor` of same `dtype` as `x`. The last two dimensions hold\n lower triangular matrices (the Cholesky factors).\n \"\"\"\n with tf.name_scope(name or 'cholesky_covariance'):\n sample_axis = ps.convert_to_shape_tensor(sample_axis, dtype=tf.int32)\n cov = covariance(\n x, sample_axis=sample_axis, event_axis=-1, keepdims=keepdims)\n return tf.linalg.cholesky(cov)\n\n\ndef covariance(x,\n y=None,\n sample_axis=0,\n event_axis=-1,\n keepdims=False,\n name=None):\n \"\"\"Sample covariance between observations indexed by `event_axis`.\n\n Given `N` samples of scalar random variables `X` and `Y`, covariance may be\n estimated as\n\n ```none\n Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}\n Xbar := N^{-1} sum_{n=1}^N X_n\n Ybar := N^{-1} sum_{n=1}^N Y_n\n ```\n\n For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,\n one is often interested in the covariance matrix, `C_{ij} := Cov[Xi, Yj]`.\n\n ```python\n x = tf.random.normal(shape=(100, 2, 3))\n y = tf.random.normal(shape=(100, 2, 3))\n\n # cov[i, j] is the sample covariance between x[:, i, j] and y[:, i, j].\n cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None)\n\n # cov_matrix[i, m, n] is the sample covariance of x[:, i, m] and y[:, i, n]\n cov_matrix = tfp.stats.covariance(x, y, sample_axis=0, event_axis=-1)\n ```\n\n Notice we divide by `N`, which does not create `NaN` when `N = 1`, but is\n slightly biased.\n\n Args:\n x: A numeric `Tensor` holding samples.\n y: Optional `Tensor` with same `dtype` and `shape` as `x`.\n Default value: `None` (`y` is effectively set to `x`).\n sample_axis: Scalar or vector `Tensor` designating axis holding samples, or\n `None` (meaning all axis hold samples).\n Default value: `0` (leftmost dimension).\n event_axis: Scalar or vector `Tensor`, or `None` (scalar events).\n Axis indexing random events, whose covariance we are interested in.\n If a vector, entries must form a contiguous block of dims. `sample_axis`\n and `event_axis` should not intersect.\n Default value: `-1` (rightmost axis holds events).\n keepdims: Boolean. Whether to keep the sample axis as singletons.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'covariance'`).\n\n Returns:\n cov: A `Tensor` of same `dtype` as the `x`, and rank equal to\n `rank(x) - len(sample_axis) + 2 * len(event_axis)`.\n\n Raises:\n AssertionError: If `x` and `y` are found to have different shape.\n ValueError: If `sample_axis` and `event_axis` are found to overlap.\n ValueError: If `event_axis` is found to not be contiguous.\n \"\"\"\n\n with tf.name_scope(name or 'covariance'):\n x = tf.convert_to_tensor(x, name='x')\n # Covariance *only* uses the centered versions of x (and y).\n x = x - tf.reduce_mean(x, axis=sample_axis, keepdims=True)\n\n if y is None:\n y = x\n else:\n y = tf.convert_to_tensor(y, name='y', dtype=x.dtype)\n # If x and y have different shape, sample_axis and event_axis will likely\n # be wrong for one of them!\n tensorshape_util.assert_is_compatible_with(x.shape, y.shape)\n y = y - tf.reduce_mean(y, axis=sample_axis, keepdims=True)\n\n if event_axis is None:\n return tf.reduce_mean(\n x * tf.math.conj(y), axis=sample_axis, keepdims=keepdims)\n\n if sample_axis is None:\n raise ValueError(\n 'sample_axis was None, which means all axis hold events, and this '\n 'overlaps with event_axis ({})'.format(event_axis))\n\n event_axis = _make_positive_axis(event_axis, ps.rank(x))\n sample_axis = _make_positive_axis(sample_axis, ps.rank(x))\n\n # If we get lucky and axis is statically defined, we can do some checks.\n if _is_list_like(event_axis) and _is_list_like(sample_axis):\n event_axis = tuple(map(int, event_axis))\n sample_axis = tuple(map(int, sample_axis))\n if set(event_axis).intersection(sample_axis):\n raise ValueError(\n 'sample_axis ({}) and event_axis ({}) overlapped'.format(\n sample_axis, event_axis))\n if (np.diff(np.array(sorted(event_axis))) > 1).any():\n raise ValueError(\n 'event_axis must be contiguous. Found: {}'.format(event_axis))\n batch_axis = list(\n sorted(\n set(range(tensorshape_util.rank(\n x.shape))).difference(sample_axis + event_axis)))\n else:\n batch_axis = ps.setdiff1d(\n ps.range(0, ps.rank(x)), ps.concat((sample_axis, event_axis), 0))\n\n event_axis = ps.cast(event_axis, dtype=tf.int32)\n sample_axis = ps.cast(sample_axis, dtype=tf.int32)\n batch_axis = ps.cast(batch_axis, dtype=tf.int32)\n\n # Permute x/y until shape = B + E + S\n perm_for_xy = ps.concat((batch_axis, event_axis, sample_axis), 0)\n x_permed = tf.transpose(a=x, perm=perm_for_xy)\n y_permed = tf.transpose(a=y, perm=perm_for_xy)\n\n batch_ndims = ps.size(batch_axis)\n batch_shape = ps.shape(x_permed)[:batch_ndims]\n event_ndims = ps.size(event_axis)\n event_shape = ps.shape(x_permed)[batch_ndims:batch_ndims + event_ndims]\n sample_shape = ps.shape(x_permed)[batch_ndims + event_ndims:]\n sample_ndims = ps.size(sample_shape)\n n_samples = ps.reduce_prod(sample_shape)\n n_events = ps.reduce_prod(event_shape)\n\n # Flatten sample_axis into one long dim.\n x_permed_flat = tf.reshape(\n x_permed, ps.concat((batch_shape, event_shape, [n_samples]), 0))\n y_permed_flat = tf.reshape(\n y_permed, ps.concat((batch_shape, event_shape, [n_samples]), 0))\n # Do the same for event_axis.\n x_permed_flat = tf.reshape(\n x_permed, ps.concat((batch_shape, [n_events], [n_samples]), 0))\n y_permed_flat = tf.reshape(\n y_permed, ps.concat((batch_shape, [n_events], [n_samples]), 0))\n\n # After matmul, cov.shape = batch_shape + [n_events, n_events]\n cov = tf.matmul(\n x_permed_flat, y_permed_flat, adjoint_b=True) / ps.cast(\n n_samples, x.dtype)\n\n # Insert some singletons to make\n # cov.shape = batch_shape + event_shape**2 + [1,...,1]\n # This is just like x_permed.shape, except the sample_axis is all 1's, and\n # the [n_events] became event_shape**2.\n cov = tf.reshape(\n cov,\n ps.concat(\n (\n batch_shape,\n # event_shape**2 used here because it is the same length as\n # event_shape, and has the same number of elements as one\n # batch of covariance.\n event_shape**2,\n ps.ones([sample_ndims], tf.int32)),\n 0))\n # Permuting by the argsort inverts the permutation, making\n # cov.shape have ones in the position where there were samples, and\n # [n_events * n_events] in the event position.\n cov = tf.transpose(a=cov, perm=ps.invert_permutation(perm_for_xy))\n\n # Now expand event_shape**2 into event_shape + event_shape.\n # We here use (for the first time) the fact that we require event_axis to be\n # contiguous.\n e_start = event_axis[0]\n e_len = 1 + event_axis[-1] - event_axis[0]\n cov = tf.reshape(\n cov,\n ps.concat((ps.shape(cov)[:e_start], event_shape, event_shape,\n ps.shape(cov)[e_start + e_len:]), 0))\n\n # tf.squeeze requires python ints for axis, not Tensor. This is enough to\n # require our axis args to be constants.\n if not keepdims:\n squeeze_axis = ps.where(sample_axis < e_start, sample_axis,\n sample_axis + e_len)\n cov = _squeeze(cov, axis=squeeze_axis)\n\n return cov\n\n\ndef correlation(x,\n y=None,\n sample_axis=0,\n event_axis=-1,\n keepdims=False,\n name=None):\n \"\"\"Sample correlation (Pearson) between observations indexed by `event_axis`.\n\n Given `N` samples of scalar random variables `X` and `Y`, correlation may be\n estimated as\n\n ```none\n Corr[X, Y] := Cov[X, Y] / Sqrt(Cov[X, X] * Cov[Y, Y]),\n where\n Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}\n Xbar := N^{-1} sum_{n=1}^N X_n\n Ybar := N^{-1} sum_{n=1}^N Y_n\n ```\n\n Correlation is always in the interval `[-1, 1]`, and `Corr[X, X] == 1`.\n\n For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,\n one is often interested in the correlation matrix, `C_{ij} := Corr[Xi, Yj]`.\n\n ```python\n x = tf.random.normal(shape=(100, 2, 3))\n y = tf.random.normal(shape=(100, 2, 3))\n\n # corr[i, j] is the sample correlation between x[:, i, j] and y[:, i, j].\n corr = tfp.stats.correlation(x, y, sample_axis=0, event_axis=None)\n\n # corr_matrix[i, m, n] is the sample correlation of x[:, i, m] and y[:, i, n]\n corr_matrix = tfp.stats.correlation(x, y, sample_axis=0, event_axis=-1)\n ```\n\n Notice we divide by `N` (the numpy default), which does not create `NaN`\n when `N = 1`, but is slightly biased.\n\n Args:\n x: A numeric `Tensor` holding samples.\n y: Optional `Tensor` with same `dtype` and `shape` as `x`.\n Default value: `None` (`y` is effectively set to `x`).\n sample_axis: Scalar or vector `Tensor` designating axis holding samples, or\n `None` (meaning all axis hold samples).\n Default value: `0` (leftmost dimension).\n event_axis: Scalar or vector `Tensor`, or `None` (scalar events).\n Axis indexing random events, whose correlation we are interested in.\n If a vector, entries must form a contiguous block of dims. `sample_axis`\n and `event_axis` should not intersect.\n Default value: `-1` (rightmost axis holds events).\n keepdims: Boolean. Whether to keep the sample axis as singletons.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'correlation'`).\n\n Returns:\n corr: A `Tensor` of same `dtype` as the `x`, and rank equal to\n `rank(x) - len(sample_axis) + 2 * len(event_axis)`.\n\n Raises:\n AssertionError: If `x` and `y` are found to have different shape.\n ValueError: If `sample_axis` and `event_axis` are found to overlap.\n ValueError: If `event_axis` is found to not be contiguous.\n \"\"\"\n\n with tf.name_scope(name or 'correlation'):\n # Corr[X, Y] = Cov[X, Y] / (Stddev[X] * Stddev[Y])\n # = Cov[X / Stddev[X], Y / Stddev[Y]]\n # So we could compute covariance first then divide by stddev, or\n # divide by stddev and compute covariance.\n # Dividing by stddev then computing covariance is potentially more stable.\n # But... computing covariance first then dividing involves 2 fewer large\n # broadcasts. We choose to divide first, largely because it avoids\n # difficulties with the various options for sample/event axis kwargs.\n\n x /= stddev(x, sample_axis=sample_axis, keepdims=True)\n if y is not None:\n y /= stddev(y, sample_axis=sample_axis, keepdims=True)\n\n return covariance(\n x=x,\n y=y,\n event_axis=event_axis,\n sample_axis=sample_axis,\n keepdims=keepdims)\n\n\ndef stddev(x, sample_axis=0, keepdims=False, name=None):\n \"\"\"Estimate standard deviation using samples.\n\n Given `N` samples of scalar valued random variable `X`, standard deviation may\n be estimated as\n\n ```none\n Stddev[X] := Sqrt[Var[X]],\n Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)},\n Xbar := N^{-1} sum_{n=1}^N X_n\n ```\n\n ```python\n x = tf.random.normal(shape=(100, 2, 3))\n\n # stddev[i, j] is the sample standard deviation of the (i, j) batch member.\n stddev = tfp.stats.stddev(x, sample_axis=0)\n ```\n\n Scaling a unit normal by a standard deviation produces normal samples\n with that standard deviation.\n\n ```python\n observed_data = read_data_samples(...)\n stddev = tfp.stats.stddev(observed_data)\n\n # Make fake_data with the same standard deviation as observed_data.\n fake_data = stddev * tf.random.normal(shape=(100,))\n ```\n\n Notice we divide by `N` (the numpy default), which does not create `NaN`\n when `N = 1`, but is slightly biased.\n\n Args:\n x: A numeric `Tensor` holding samples.\n sample_axis: Scalar or vector `Tensor` designating axis holding samples, or\n `None` (meaning all axis hold samples).\n Default value: `0` (leftmost dimension).\n keepdims: Boolean. Whether to keep the sample axis as singletons.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'stddev'`).\n\n Returns:\n stddev: A `Tensor` of same `dtype` as the `x`, and rank equal to\n `rank(x) - len(sample_axis)`\n \"\"\"\n with tf.name_scope(name or 'stddev'):\n return tf.sqrt(variance(x, sample_axis=sample_axis, keepdims=keepdims))\n\n\ndef variance(x, sample_axis=0, keepdims=False, name=None):\n \"\"\"Estimate variance using samples.\n\n Given `N` samples of scalar valued random variable `X`, variance may\n be estimated as\n\n ```none\n Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)}\n Xbar := N^{-1} sum_{n=1}^N X_n\n ```\n\n ```python\n x = tf.random.normal(shape=(100, 2, 3))\n\n # var[i, j] is the sample variance of the (i, j) batch member of x.\n var = tfp.stats.variance(x, sample_axis=0)\n ```\n\n Notice we divide by `N` (the numpy default), which does not create `NaN`\n when `N = 1`, but is slightly biased.\n\n Args:\n x: A numeric `Tensor` holding samples.\n sample_axis: Scalar or vector `Tensor` designating axis holding samples, or\n `None` (meaning all axis hold samples).\n Default value: `0` (leftmost dimension).\n keepdims: Boolean. Whether to keep the sample axis as singletons.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'variance'`).\n\n Returns:\n var: A `Tensor` of same `dtype` as the `x`, and rank equal to\n `rank(x) - len(sample_axis)`\n \"\"\"\n with tf.name_scope(name or 'variance'):\n return covariance(\n x, y=None, sample_axis=sample_axis, event_axis=None, keepdims=keepdims)\n\n\ndef cumulative_variance(x, sample_axis=0, name=None):\n \"\"\"Cumulative estimates of variance.\n\n Given `N` samples of a scalar-valued random variable `X`, we can compute\n cumulative variance estimates\n\n result[i] = variance(x[0:i+1])\n\n in O(N) work and O(log(N)) depth (the length of the longest series\n of operations that are performed sequentially), with O(1) TF kernel\n invocations. This implementation also arranges to do so in a\n numerically accurate manner, i.e., without incurring a subtraction\n of floating-point numbers of size quadratic in the data `x`. The\n underlying algorithm is from [1].\n\n Args:\n x: A numeric `Tensor` holding samples.\n sample_axis: Scalar `Tensor` designating the axis holding samples.\n Other axes are treated in batch. Default value: `0` (leftmost\n dimension).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'cumulative_variance'`).\n\n Returns:\n cum_var: A `Tensor` of same shape and dtype as `x` giving\n cumulative variance estimates. The zeroth element is the\n variance of a size-1 set of samples, so 0.\n\n #### References\n [1]: Philippe Pebay. Formulas for Robust, One-Pass Parallel Computation of\n Covariances and Arbitrary-Order Statistical Moments. _Technical Report\n SAND2008-6212_, 2008.\n https://prod-ng.sandia.gov/techlib-noauth/access-control.cgi/2008/086212.pdf\n\n \"\"\"\n with tf.name_scope(name or 'cumulative_variance'):\n # At each index, we are interested in\n # - The count of items up to that index (inclusive and exclusive);\n # - The sum of items up to that index (exclusive);\n # - From which we compute the mean of items up to that index (exclusive);\n # - The residual of items up to that index (inclusive), which is\n # the variance scaled by the count of items.\n #\n # The contribution from item i to the residual is that item's\n # squared discrepancy from the mean of all preceding items (i.e.,\n # the exclusive mean at the present item), adjusted by i-1/i.\n x = tf.convert_to_tensor(x)\n size = ps.shape(x)[sample_axis]\n counts_shp = ps.one_hot(\n sample_axis, depth=ps.rank(x), on_value=size, off_value=1)\n excl_counts = tf.reshape(tf.range(size, dtype=x.dtype), shape=counts_shp)\n incl_counts = excl_counts + 1\n excl_sums = tf.cumsum(x, axis=sample_axis, exclusive=True)\n discrepancies = (excl_sums / excl_counts - x)**2\n discrepancies = tf.where(excl_counts == 0, x**2, discrepancies)\n adjustments = excl_counts / incl_counts\n # The zeroth item's residual contribution is 0, because it has no\n # other items to vary from. The preceding expressions, however,\n # compute 0/0 at index 0, so we mask it out here.\n adjusted = tf.where(\n ~tf.equal(excl_counts, 0), adjustments * discrepancies, 0)\n incl_residual = tf.cumsum(adjusted, axis=sample_axis)\n return incl_residual / incl_counts\n\n\ndef windowed_variance(\n x, low_indices=None, high_indices=None, axis=0, name=None):\n \"\"\"Windowed estimates of variance.\n\n Computes variances among data in the Tensor `x` along the given windows:\n\n result[i] = variance(x[low_indices[i]:high_indices[i]+1])\n\n accurately and efficiently. To wit, if K is the size of\n `low_indices` and `high_indices`, and `N` is the size of `x` along\n the given `axis`, the computation takes O(K + N) work, O(log(N))\n depth (the length of the longest series of operations that are\n performed sequentially), and only uses O(1) TensorFlow kernel\n invocations. The underlying algorithm is an adaptation of the\n streaming reduction for accurate variance computations given in [1].\n\n This function can be useful for assessing the behavior over time of\n trailing-window estimators from some iterative process, such as the\n last half of an MCMC chain.\n\n Suppose `x` has shape `Bx + [N] + E`, where the `Bx` component has\n rank `axis`, and `low_indices` and `high_indices` broadcast to shape\n `[M]`. Then each element of `low_indices` and `high_indices`\n must be between 0 and N+1, and the shape of the output will be\n `Bx + [M] + E`. Batch shape in the indices is not currently supported.\n\n The default windows are\n `[0, 1), [1, 2), [1, 3), [2, 4), [2, 5), ...`\n This corresponds to analyzing `x` as though it were streaming, for\n example successive states of an MCMC sampler, and we were interested\n in the variance of the last half of the data at each point.\n\n Args:\n x: A numeric `Tensor` holding `N` samples along the given `axis`,\n whose windowed variances are desired.\n low_indices: An integer `Tensor` defining the lower boundary\n (inclusive) of each window. Default: elementwise half of\n `high_indices`.\n high_indices: An integer `Tensor` defining the upper boundary\n (exclusive) of each window. Must be broadcast-compatible with\n `low_indices`. Default: `tf.range(1, N+1)`, i.e., N windows\n that each end in the corresponding datum from `x` (inclusive)`.\n axis: Scalar `Tensor` designating the axis holding samples. This\n is the axis of `x` along which we take windows, and therefore\n the axis that `low_indices` and `high_indices` index into.\n Other axes are treated in batch. Default value: `0` (leftmost\n dimension).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'windowed_variance'`).\n\n Returns:\n variances: A numeric `Tensor` holding the windowed variances of\n `x` along the `axis` dimension.\n\n #### References\n [1]: Philippe Pebay. Formulas for Robust, One-Pass Parallel Computation of\n Covariances and Arbitrary-Order Statistical Moments. _Technical Report\n SAND2008-6212_, 2008.\n https://prod-ng.sandia.gov/techlib-noauth/access-control.cgi/2008/086212.pdf\n\n \"\"\"\n with tf.name_scope(name or 'windowed_variance'):\n x = tf.convert_to_tensor(x)\n low_indices, high_indices, low_counts, high_counts = _prepare_window_args(\n x, low_indices, high_indices, axis)\n\n # We have a problem with indexing: the standard convention demands\n # the low index be inclusive, and the high index be exclusive.\n # However, tf.cumsum and cumulative_variance both include the ith\n # element in the ith result, so to implement the standard convention\n # we have to either invoke exclusive variants of the above, or\n # index off by one element. Luckily, we can do the latter\n # without indexing off the beginning, because the value we fetch\n # when low_indices[i] == 0 or high_indices[i] == 0 is irrelevant,\n # because it gets multiplied by 0 later anyway.\n # Note that exclusive cumsum doesn't work either, because we\n # allow high_indices to take the value N+1 meaning \"all the data\".\n def index_for_cumulative(indices):\n return tf.maximum(indices - 1, 0)\n cum_sums = tf.cumsum(x, axis=axis)\n low_sums = tf.gather(\n cum_sums, index_for_cumulative(low_indices), axis=axis)\n high_sums = tf.gather(\n cum_sums, index_for_cumulative(high_indices), axis=axis)\n cum_variances = cumulative_variance(x, sample_axis=axis)\n low_variances = tf.gather(\n cum_variances, index_for_cumulative(low_indices), axis=axis)\n high_variances = tf.gather(\n cum_variances, index_for_cumulative(high_indices), axis=axis)\n\n # This formula is the binary accurate variance merge from [1],\n # adapted to subtract and batched across the indexed counts, sums,\n # and variances.\n # As a reminder, [1] shows the following for multisets A, B:\n # var(A u B) = (|A|*var(A) + |B|*var(B) + correction) / |A u B|\n # where\n # correction = (mean(A) - mean(B))**2 * |A| * |B| / |A u B|\n # For each high_indices[i] and low_indices[i], if we let\n # A be the multiset x[low_indices[i]:high_indices[i]] and B\n # be the multiset x[0:low_indices[i]], then\n # var(A u B) = cum_variances[high_indices[i]] = high_variances[i]\n # var(B) = cum_variances[low_indices[i]] = low_variances[i]\n # and the below solves for var(A).\n # This formula can also be read as implementing the above variance\n # computation by \"unioning\" A u B with a notional \"negative B\"\n # multiset.\n counts = high_counts - low_counts # |A|\n discrepancies = (\n _safe_average(high_sums, high_counts) -\n _safe_average(low_sums, low_counts))**2 # (mean(A u B) - mean(B))**2\n adjustments = high_counts * (-low_counts) / counts # |A u B| * -|B| / |A|\n residuals = (high_variances * high_counts -\n low_variances * low_counts +\n adjustments * discrepancies)\n return _safe_average(residuals, counts)\n\n\ndef windowed_mean(\n x, low_indices=None, high_indices=None, axis=0, name=None):\n \"\"\"Windowed estimates of mean.\n\n Computes means among data in the Tensor `x` along the given windows:\n\n result[i] = mean(x[low_indices[i]:high_indices[i]+1])\n\n efficiently. To wit, if K is the size of `low_indices` and\n `high_indices`, and `N` is the size of `x` along the given `axis`,\n the computation takes O(K + N) work, O(log(N)) depth (the length of\n the longest series of operations that are performed sequentially),\n and only uses O(1) TensorFlow kernel invocations.\n\n This function can be useful for assessing the behavior over time of\n trailing-window estimators from some iterative process, such as the\n last half of an MCMC chain.\n\n Suppose `x` has shape `Bx + [N] + E`, where the `Bx` component has\n rank `axis`, and `low_indices` and `high_indices` broadcast to shape\n `[M]`. Then each element of `low_indices` and `high_indices`\n must be between 0 and N+1, and the shape of the output will be\n `Bx + [M] + E`. Batch shape in the indices is not currently supported.\n\n The default windows are\n `[0, 1), [1, 2), [1, 3), [2, 4), [2, 5), ...`\n This corresponds to analyzing `x` as though it were streaming, for\n example successive states of an MCMC sampler, and we were interested\n in the variance of the last half of the data at each point.\n\n Args:\n x: A numeric `Tensor` holding `N` samples along the given `axis`,\n whose windowed means are desired.\n low_indices: An integer `Tensor` defining the lower boundary\n (inclusive) of each window. Default: elementwise half of\n `high_indices`.\n high_indices: An integer `Tensor` defining the upper boundary\n (exclusive) of each window. Must be broadcast-compatible with\n `low_indices`. Default: `tf.range(1, N+1)`, i.e., N windows\n that each end in the corresponding datum from `x` (inclusive).\n axis: Scalar `Tensor` designating the axis holding samples. This\n is the axis of `x` along which we take windows, and therefore\n the axis that `low_indices` and `high_indices` index into.\n Other axes are treated in batch. Default value: `0` (leftmost\n dimension).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'windowed_mean'`).\n\n Returns:\n means: A numeric `Tensor` holding the windowed means of `x` along\n the `axis` dimension.\n\n \"\"\"\n with tf.name_scope(name or 'windowed_mean'):\n x = tf.convert_to_tensor(x)\n low_indices, high_indices, low_counts, high_counts = _prepare_window_args(\n x, low_indices, high_indices, axis)\n\n raw_cumsum = tf.cumsum(x, axis=axis)\n cum_sums = tf.concat(\n [tf.zeros_like(tf.gather(raw_cumsum, [0], axis=axis)), raw_cumsum],\n axis=axis)\n low_sums = tf.gather(cum_sums, low_indices, axis=axis)\n high_sums = tf.gather(cum_sums, high_indices, axis=axis)\n\n counts = high_counts - low_counts\n return _safe_average(high_sums - low_sums, counts)\n\n\ndef _prepare_window_args(x, low_indices=None, high_indices=None, axis=0):\n \"\"\"Common argument defaulting logic for windowed statistics.\"\"\"\n if high_indices is None:\n high_indices = tf.range(ps.shape(x)[axis]) + 1\n else:\n high_indices = tf.convert_to_tensor(high_indices)\n if low_indices is None:\n low_indices = high_indices // 2\n else:\n low_indices = tf.convert_to_tensor(low_indices)\n # Broadcast indices together.\n high_indices = high_indices + tf.zeros_like(low_indices)\n low_indices = low_indices + tf.zeros_like(high_indices)\n\n # TODO(axch): Support batch low and high indices. That would\n # complicate this shape munging (though tf.gather should work\n # fine).\n\n # We want to place `low_counts` and `high_counts` at the `axis`\n # position, so we reshape them to shape `[1, 1, ..., 1, N, 1, ...,\n # 1]`, where the `N` is at `axis`. The `counts_shp`, below,\n # is this shape.\n size = ps.size(high_indices)\n counts_shp = ps.one_hot(\n axis, depth=ps.rank(x), on_value=size, off_value=1)\n\n low_counts = tf.reshape(tf.cast(low_indices, dtype=x.dtype),\n shape=counts_shp)\n high_counts = tf.reshape(tf.cast(high_indices, dtype=x.dtype),\n shape=counts_shp)\n return low_indices, high_indices, low_counts, high_counts\n\n\ndef _safe_average(totals, counts):\n # This tf.where protects `totals` from getting a gradient signal\n # when `counts` is 0.\n safe_totals = tf.where(~tf.equal(counts, 0), totals, 0)\n return tf.where(~tf.equal(counts, 0), safe_totals / counts, 0)\n\n\ndef log_average_probs(logits, sample_axis=0, event_axis=None, keepdims=False,\n validate_args=False, name=None):\n \"\"\"Computes `log(average(to_probs(logits)))` in a numerically stable manner.\n\n The meaning of `to_probs` is controlled by the `event_axis` argument. When\n `event_axis` is `None`, `to_probs = tf.math.sigmoid` and otherwise\n `to_probs = lambda x: tf.math.log_softmax(x, axis=event_axis)`.\n\n `sample_axis` and `event_axis` should have a null intersection. This\n requirement is always verified when `validate_args` is `True`.\n\n Args:\n logits: A `float` `Tensor` representing logits.\n sample_axis: Scalar or vector `Tensor` designating axis holding samples, or\n `None` (meaning all axis hold samples).\n Default value: `0` (leftmost dimension).\n event_axis: Scalar or vector `Tensor` designating the axis representing\n categorical logits.\n Default value: `None` (i.e., Bernoulli logits).\n keepdims: Boolean. Whether to keep the sample axis as singletons.\n Default value: `False` (i.e., squeeze the reduced dimensions).\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False` (i.e., do not validate args).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., `'log_average_probs'`).\n\n Returns:\n log_avg_probs: The natural log of the average of probs computed from logits.\n \"\"\"\n with tf.name_scope(name or 'average_sigmoid'):\n logits = tf.convert_to_tensor(logits, dtype_hint=tf.float32, name='logits')\n if sample_axis is not None:\n sample_axis = ps.convert_to_shape_tensor(\n sample_axis, dtype_hint=tf.int32, name='sample_axis')\n if event_axis is not None:\n event_axis = ps.convert_to_shape_tensor(\n event_axis, dtype_hint=tf.int32, name='event_axis')\n if event_axis is None:\n # log(sigmoid(x)) = log(1 / (1 + exp(-x))) = -log1p(exp(-x)) = -sp(-x)\n log_probs = -tf.math.softplus(-logits)\n else:\n sample_axis, event_axis = _log_average_probs_process_args(\n logits, validate_args, sample_axis, event_axis)\n with tf.control_dependencies(_log_average_probs_maybe_check_args(\n sample_axis, event_axis, validate_args)):\n log_probs = _log_softmax(logits, axis=event_axis)\n return reduce_logmeanexp(log_probs, axis=sample_axis, keepdims=keepdims)\n\n\n# TODO(b/137873989): Use tf.log_softmax once it correctly supports axis arg.\ndef _log_softmax(x, axis, name=None):\n \"\"\"Alternative to `tf.log_softmax` which correctly supports axis arg.\"\"\"\n with tf.name_scope(name or 'log_softmax'):\n if axis is None:\n return tf.math.log_softmax(x, axis=None, name=name)\n rank = ps.rank(axis)\n if rank == 0:\n return tf.math.log_softmax(x, axis=axis, name=name)\n if rank == 1:\n return tf.math.log_softmax(x, axis=axis[0], name=name)\n # The following handles the case when axis is a vector and which is not\n # currently supported by tf.math.log_softmax.\n x = tf.convert_to_tensor(x, dtype_hint=tf.float32, name='x')\n return x - tf.reduce_logsumexp(x, axis=axis, keepdims=True)\n\n\ndef _log_average_probs_process_args(\n logits, validate_args, sample_axis, event_axis):\n \"\"\"Processes args for `log_average_probs`.\"\"\"\n rank = ps.rank(logits)\n if sample_axis is None or validate_args:\n event_axis = ps.reshape(\n ps.non_negative_axis(event_axis, rank),\n shape=[-1])\n if sample_axis is None:\n sample_axis = ps.setdiff1d(\n ps.range(rank), event_axis)\n elif validate_args:\n sample_axis = ps.reshape(\n ps.non_negative_axis(sample_axis, rank),\n shape=[-1])\n return sample_axis, event_axis\n\n\ndef _log_average_probs_maybe_check_args(sample_axis, event_axis, validate_args):\n \"\"\"Assertions for `log_average_probs`.\"\"\"\n assertions = []\n msg = 'Arguments `sample_axis` and `event_axis` must be distinct.'\n sample_setdiff = ps.setdiff1d(sample_axis, event_axis)\n if ps.is_numpy(sample_setdiff):\n if not np.array_equal(sample_setdiff, tf.get_static_value(sample_axis)):\n raise ValueError(msg)\n elif validate_args:\n assertions.append(_assert_array_equal(\n sample_setdiff, sample_axis,\n message=msg, name='sample_setdiff_rank_check'))\n event_setdiff = ps.setdiff1d(event_axis, sample_axis)\n if ps.is_numpy(event_setdiff):\n if not np.array_equal(event_setdiff, tf.get_static_value(event_axis)):\n raise ValueError(msg)\n elif validate_args:\n assertions.append(_assert_array_equal(\n event_setdiff, event_axis,\n message=msg, name='event_setdiff_rank_check'))\n return assertions\n\n\ndef _assert_array_equal(x, y, message, name=None):\n \"\"\"TF assertion similar to checking `np.array_equal`.\"\"\"\n with tf.name_scope(name or 'array_equal_check'):\n rank_check = assert_util.assert_equal(\n tf.rank(x), tf.rank(y),\n message=message, name='rank_check')\n shape_check = assert_util.assert_equal(\n tf.shape(x), tf.shape(y),\n message=message, name='shape_check')\n with tf.control_dependencies([rank_check]):\n with tf.control_dependencies([shape_check]):\n return assert_util.assert_equal(\n x, y, message=message, name='value_check')\n\n\ndef _is_list_like(x):\n \"\"\"Helper which returns `True` if input is `list`-like.\"\"\"\n return isinstance(x, (tuple, list))\n\n\ndef _make_list_or_1d_tensor(values):\n \"\"\"Return a list (preferred) or 1d Tensor from values, if values.ndims < 2.\"\"\"\n values = ps.convert_to_shape_tensor(values, name='values')\n values_ = tf.get_static_value(values)\n\n # Static didn't work.\n if values_ is None:\n # Cheap way to bring to at least 1d.\n return values + tf.zeros([1], dtype=values.dtype)\n\n # Static worked!\n if values_.ndim > 1:\n raise ValueError('values had > 1 dim: {}'.format(values_.shape))\n # Cheap way to bring to at least 1d.\n values_ = values_ + np.zeros([1], dtype=values_.dtype)\n return list(values_)\n\n\ndef _make_positive_axis(axis, ndims):\n \"\"\"Rectify possibly negatively axis. Prefer return Python list.\"\"\"\n axis = _make_list_or_1d_tensor(axis)\n\n ndims = ps.convert_to_shape_tensor(ndims, name='ndims', dtype=tf.int32)\n ndims_ = tf.get_static_value(ndims)\n\n if _is_list_like(axis) and ndims_ is not None:\n # Static case\n positive_axis = []\n for a in axis:\n if a < 0:\n a = ndims_ + a\n positive_axis.append(a)\n else:\n # Dynamic case\n axis = tf.convert_to_tensor(axis, name='axis', dtype=tf.int32)\n positive_axis = tf.where(axis >= 0, axis, axis + ndims)\n\n return positive_axis\n\n\ndef _squeeze(x, axis):\n \"\"\"A version of squeeze that works with dynamic axis.\"\"\"\n x = tf.convert_to_tensor(x, name='x')\n if axis is None:\n return tf.squeeze(x, axis=None)\n axis = ps.convert_to_shape_tensor(axis, name='axis', dtype=tf.int32)\n axis = _make_list_or_1d_tensor(axis) # Ensure at least 1d.\n keep_axis = ps.setdiff1d(ps.range(0, ps.rank(x)), axis)\n return tf.reshape(x, ps.gather(ps.shape(x), keep_axis))\n"
] | [
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.linalg.diag_part",
"tensorflow.compat.v2.function",
"tensorflow.compat.v2.einsum",
"tensorflow.compat.v2.linalg.matmul",
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compat.v2.linalg.matvec",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.linspace"
],
[
"tensorflow.compat.v2.linalg.diag_part",
"numpy.linspace",
"numpy.meshgrid",
"numpy.reshape",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.ones",
"numpy.float64",
"tensorflow.compat.v2.nest.flatten",
"numpy.random.uniform",
"numpy.array"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.executing_eagerly",
"numpy.abs",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.reduce_variance",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.math.reduce_variance",
"tensorflow.compat.v2.minimum",
"numpy.ones",
"tensorflow.compat.v2.zeros",
"numpy.random.randn",
"tensorflow.compat.v2.ones",
"numpy.random.uniform",
"numpy.array",
"tensorflow.compat.v1.placeholder_with_default"
],
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.math.log1p",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.math.expm1"
],
[
"tensorflow.compat.v2.cumsum",
"tensorflow.compat.v2.argsort",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.identity",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.sort",
"tensorflow.compat.v2.compat.dimension_value"
],
[
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.less",
"tensorflow.compat.v2.math.ndtri",
"tensorflow.compat.v2.math.multiply_no_nan",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.math.log1p",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.constant"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.get_static_value",
"tensorflow.compat.v2.nest.is_nested",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.TensorShape"
],
[
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.name_scope"
],
[
"tensorflow.compat.v2.add_n",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.python.util.deprecation.silence",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.math.is_finite",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.nest.flatten",
"numpy.array",
"tensorflow.compat.v2.constant"
],
[
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.math.lgamma",
"tensorflow.compat.v2.math.softmax",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.reduce_logsumexp",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.math.log_softmax",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.compat.dimension_value",
"tensorflow.python.util.deprecation.deprecated"
],
[
"numpy.arange",
"numpy.array",
"tensorflow.compat.v2.zeros"
],
[
"numpy.array",
"numpy.finfo"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.reduce_all",
"tensorflow.compat.v2.size",
"tensorflow.compat.v2.clip_by_value",
"tensorflow.compat.v2.cast",
"tensorflow.python.util.nest.map_structure_up_to",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.stop_gradient",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.matmul"
],
[
"tensorflow.compat.v2.broadcast_static_shape",
"tensorflow.compat.v2.name_scope"
],
[
"tensorflow.compat.v2.exp",
"numpy.ones_like",
"numpy.isfinite",
"tensorflow.compat.v2.rank",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.constant"
],
[
"tensorflow.compat.v2.is_tensor",
"numpy.issctype",
"tensorflow.compat.v2.as_dtype",
"numpy.dtype",
"numpy.finfo",
"numpy.ones",
"tensorflow.compat.v2.convert_to_tensor",
"numpy.iinfo",
"tensorflow.compat.v2.nest.flatten"
],
[
"tensorflow.compat.v2.nest.assert_same_structure",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.nest.map_structure"
],
[
"tensorflow.compat.v2.get_static_value",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.python.framework.tensor_util.constant_value_as_shape",
"tensorflow.compat.v2.TensorShape",
"tensorflow.python.framework.tensor_shape.as_shape"
],
[
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.keras.layers.Flatten",
"tensorflow.compat.v2.transpose",
"tensorflow.compat.v2.keras.activations.get",
"tensorflow.compat.v2.clip_by_value",
"tensorflow.compat.v2.keras.Model",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.keras.layers.add",
"tensorflow.compat.v2.squeeze",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.nn.softplus",
"tensorflow.compat.v2.while_loop",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.tensor_scatter_nd_update",
"tensorflow.compat.v2.zeros",
"numpy.zeros",
"tensorflow.compat.v2.less",
"tensorflow.compat.v2.keras.layers.ELU",
"tensorflow.compat.v2.keras.layers.Dropout",
"tensorflow.compat.v2.round",
"tensorflow.compat.v2.keras.layers.Concatenate",
"tensorflow.compat.v2.keras.layers.multiply",
"tensorflow.compat.v2.split",
"tensorflow.compat.v2.assert_equal",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.keras.layers.Dense",
"tensorflow.compat.v2.keras.layers.Input",
"tensorflow.compat.v2.sigmoid"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.function",
"tensorflow.compat.v2.get_static_value",
"tensorflow.compat.v2.nest.is_nested",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.debugging.assert_less_equal",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.TensorShape"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.einsum",
"numpy.ones",
"numpy.concatenate",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.linalg.matvec",
"tensorflow.compat.v2.zeros",
"numpy.random.randn",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.reduce_sum",
"numpy.array"
],
[
"numpy.triu_indices",
"numpy.linalg.inv",
"numpy.eye",
"numpy.random.uniform",
"numpy.ndindex",
"numpy.array"
],
[
"tensorflow.compat.v2.cumsum",
"tensorflow.compat.v2.transpose",
"tensorflow.compat.v2.rank",
"tensorflow.compat.v2.linalg.cholesky",
"tensorflow.compat.v2.minimum",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.squeeze",
"tensorflow.compat.v2.math.log_softmax",
"tensorflow.compat.v2.math.softplus",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.signal.fft",
"numpy.zeros",
"numpy.log",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.math.conj",
"tensorflow.compat.v2.reduce_logsumexp",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.get_static_value",
"tensorflow.compat.v2.maximum",
"tensorflow.compat.v2.cast",
"numpy.float64",
"tensorflow.compat.v2.signal.ifft"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andybi7676/s3prl | [
"0e5acc5d499a629f946d561d87e8924ba3eb004b"
] | [
"s3prl/downstream/voxceleb1/expert.py"
] | [
"# -*- coding: utf-8 -*- #\n\"\"\"*********************************************************************************************\"\"\"\n# FileName [ expert.py ]\n# Synopsis [ the phone linear downstream wrapper ]\n# Author [ S3PRL ]\n# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]\n\"\"\"*********************************************************************************************\"\"\"\n\n\n###############\n# IMPORTATION #\n###############\nimport os\nimport math\nimport torch\nimport random\nimport pathlib\n#-------------#\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, DistributedSampler\nfrom torch.distributed import is_initialized\nfrom torch.nn.utils.rnn import pad_sequence\n#-------------#\nfrom ..model import *\nfrom .dataset import SpeakerClassifiDataset\nfrom argparse import Namespace\nfrom pathlib import Path\n\n\nclass DownstreamExpert(nn.Module):\n \"\"\"\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n \"\"\"\n\n def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):\n super(DownstreamExpert, self).__init__()\n self.upstream_dim = upstream_dim\n self.downstream = downstream_expert\n self.datarc = downstream_expert['datarc']\n self.modelrc = downstream_expert['modelrc']\n\n root_dir = Path(self.datarc['file_path'])\n\n self.train_dataset = SpeakerClassifiDataset('train', root_dir, self.datarc['meta_data'], self.datarc['max_timestep'])\n self.dev_dataset = SpeakerClassifiDataset('dev', root_dir, self.datarc['meta_data'])\n self.test_dataset = SpeakerClassifiDataset('test', root_dir, self.datarc['meta_data'])\n \n model_cls = eval(self.modelrc['select'])\n model_conf = self.modelrc.get(self.modelrc['select'], {})\n self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])\n self.model = model_cls(\n input_dim = self.modelrc['projector_dim'],\n output_dim = self.train_dataset.speaker_num,\n **model_conf,\n )\n self.objective = nn.CrossEntropyLoss()\n \n self.logging = os.path.join(expdir, 'log.log')\n self.register_buffer('best_score', torch.zeros(1))\n\n def _get_train_dataloader(self, dataset):\n sampler = DistributedSampler(dataset) if is_initialized() else None\n return DataLoader(\n dataset, batch_size=self.datarc['train_batch_size'], \n shuffle=(sampler is None), sampler=sampler,\n num_workers=self.datarc['num_workers'],\n collate_fn=dataset.collate_fn\n )\n\n def _get_eval_dataloader(self, dataset):\n return DataLoader(\n dataset, batch_size=self.datarc['eval_batch_size'],\n shuffle=False, num_workers=self.datarc['num_workers'],\n collate_fn=dataset.collate_fn\n )\n\n def get_train_dataloader(self):\n return self._get_train_dataloader(self.train_dataset)\n\n def get_dev_dataloader(self):\n return self._get_eval_dataloader(self.dev_dataset)\n\n def get_test_dataloader(self):\n return self._get_eval_dataloader(self.test_dataset)\n\n # Interface\n def get_dataloader(self, mode):\n return eval(f'self.get_{mode}_dataloader')()\n\n # Interface\n def forward(self, mode, features, labels, records, **kwargs):\n device = features[0].device\n features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)\n features = pad_sequence(features, batch_first=True)\n features = self.projector(features)\n predicted, _ = self.model(features, features_len)\n\n labels = torch.LongTensor(labels).to(features.device)\n loss = self.objective(predicted, labels)\n\n predicted_classid = predicted.max(dim=-1).indices\n records['acc'] += (predicted_classid == labels).view(-1).cpu().float().tolist()\n records['loss'].append(loss.item())\n\n return loss\n\n # interface\n def log_records(self, mode, records, logger, global_step, **kwargs):\n save_names = []\n for key, values in records.items():\n average = torch.FloatTensor(values).mean().item()\n logger.add_scalar(\n f'voxceleb1/{mode}-{key}',\n average,\n global_step=global_step\n )\n with open(self.logging, 'a') as f:\n if key == 'acc':\n f.write(f'{mode} at step {global_step}: {average}\\n')\n if mode == 'dev' and average > self.best_score:\n self.best_score = torch.ones(1) * average\n f.write(f'New best on {mode} at step {global_step}: {average}\\n')\n save_names.append(f'{mode}-best.ckpt')\n return save_names\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"torch.utils.data.DistributedSampler",
"torch.ones",
"torch.zeros",
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.DataLoader",
"torch.distributed.is_initialized",
"torch.nn.Linear",
"torch.FloatTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arielclj/singa-easy | [
"fd4bc601a5501062936f874df14711a3cefa1346"
] | [
"singa_easy/modules/mod_modelslicing/utils/lr_scheduler.py"
] | [
"from torch.optim.lr_scheduler import _LRScheduler\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\n\nclass GradualWarmupScheduler(_LRScheduler):\n \"\"\" Gradually warm-up(increasing) learning rate in optimizer.\n Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n multiplier: target learning rate = base lr * multiplier\n warmup_epoch: target learning rate is linearly reached at the warmup_epoch\n scheduler: scheduler used after warmup_epoch (eg. ReduceLROnPlateau)\n \"\"\"\n\n def __init__(self, optimizer, warmup_epoch, multiplier=1.0, scheduler=None):\n assert multiplier > 1., 'multiplier should be greater than 1.'\n self.multiplier = multiplier\n self.warmup_epoch = warmup_epoch\n self.scheduler = scheduler\n self.finish_warmup = False\n super().__init__(optimizer)\n\n def get_lr(self):\n if self.last_epoch > self.warmup_epoch:\n if self.scheduler:\n if not self.finish_warmup:\n self.scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]\n self.finish_warmup = True\n return self.scheduler.get_lr()\n return [base_lr * self.multiplier for base_lr in self.base_lrs]\n\n return [base_lr*((self.multiplier-1.)*self.last_epoch/self.warmup_epoch+1.) for base_lr in self.base_lrs]\n\n def step(self, epoch=None, metrics=None):\n if self.finish_warmup and self.scheduler:\n if epoch is None:\n self.scheduler.step(None)\n else:\n self.scheduler.step(epoch - self.warmup_epoch)\n else:\n return super(GradualWarmupScheduler, self).step(epoch)\n\n\nif __name__ == '__main__':\n import torch\n v = torch.zeros(10, requires_grad=True)\n optim = torch.optim.SGD([v], lr=0.01)\n\n scheduler = CosineAnnealingLR(optim, 95)\n scheduler = GradualWarmupScheduler(optim, multiplier=10, warmup_epoch=5, scheduler=scheduler)\n\n for epoch in range(0, 100):\n scheduler.step(epoch)\n print(epoch, optim.param_groups[0]['lr'])\n\n"
] | [
[
"torch.optim.SGD",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ThomasHoppe/pyflux | [
"297f2afc2095acd97c12e827dd500e8ea5da0c0f",
"297f2afc2095acd97c12e827dd500e8ea5da0c0f",
"297f2afc2095acd97c12e827dd500e8ea5da0c0f"
] | [
"pyflux/arma/tests/test_arima_laplace.py",
"pyflux/results.py",
"pyflux/gas/tests/gas_llt_tests_skewt.py"
] | [
"import numpy as np\nfrom pyflux.arma import ARIMA\nfrom pyflux.families import Laplace\n\nnoise = np.random.normal(0,1,100)\ndata = np.zeros(100)\n\nfor i in range(1,len(data)):\n data[i] = 0.9*data[i-1] + noise[i]\n\ndef test_no_terms():\n \"\"\"\n Tests an ARIMA model with no AR or MA terms, and that\n the latent variable list length is correct, and that the estimated\n latent variables are not nan\n \"\"\"\n model = ARIMA(data=data, ar=0, ma=0, family=Laplace())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 2)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_couple_terms():\n \"\"\"\n Tests an ARIMA model with 1 AR and 1 MA term and that\n the latent variable list length is correct, and that the estimated\n latent variables are not nan\n \"\"\"\n model = ARIMA(data=data, ar=1, ma=1, family=Laplace())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_couple_terms_integ():\n \"\"\"\n Tests an ARIMA model with 1 AR and 1 MA term, integrated once, and that\n the latent variable list length is correct, and that the estimated\n latent variables are not nan\n \"\"\"\n model = ARIMA(data=data, ar=1, ma=1, integ=1, family=Laplace())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_predict_length():\n \"\"\"\n Tests that the prediction dataframe length is equal to the number of steps h\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n assert(model.predict(h=5).shape[0] == 5)\n\ndef test_predict_is_length():\n \"\"\"\n Tests that the prediction IS dataframe length is equal to the number of steps h\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n assert(model.predict_is(h=5).shape[0] == 5)\n\ndef test_predict_nans():\n \"\"\"\n Tests that the predictions are not nans\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)\n\ndef test_predict_is_nans():\n \"\"\"\n Tests that the in-sample predictions are not nans\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)\n\ndef test_predict_nonconstant():\n \"\"\"\n We should not really have predictions that are constant (should be some difference)...\n This captures bugs with the predict function not iterating forward\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n predictions = model.predict(h=10, intervals=False)\n assert(not np.all(predictions.values==predictions.values[0]))\n \ndef test_predict_is_nonconstant():\n \"\"\"\n We should not really have predictions that are constant (should be some difference)...\n This captures bugs with the predict function not iterating forward\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n predictions = model.predict_is(h=10, intervals=False)\n assert(not np.all(predictions.values==predictions.values[0]))\n \ndef test_predict_intervals():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n predictions = model.predict(h=10, intervals=True)\n\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_is_intervals():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n predictions = model.predict_is(h=10, intervals=True)\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_intervals_bbvi():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('BBVI', iterations=100, quiet_progress=True)\n predictions = model.predict(h=10, intervals=True)\n\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_is_intervals_bbvi():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('BBVI', iterations=100, quiet_progress=True)\n predictions = model.predict_is(h=10, intervals=True)\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_intervals_mh():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('M-H', nsims=200, quiet_progress=True)\n predictions = model.predict(h=10, intervals=True)\n\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_is_intervals_mh():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('M-H', nsims=200, quiet_progress=True)\n predictions = model.predict_is(h=10, intervals=True)\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_sample_model():\n \"\"\"\n Tests sampling function\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('BBVI', iterations=100, quiet_progress=True)\n sample = model.sample(nsims=100)\n assert(sample.shape[0]==100)\n assert(sample.shape[1]==len(data)-2)\n\ndef test_ppc():\n \"\"\"\n Tests PPC value\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('BBVI', iterations=100, quiet_progress=True)\n p_value = model.ppc()\n assert(0.0 <= p_value <= 1.0)\n",
"import numpy as np\n\nfrom .output import TablePrinter\nfrom .tests import find_p_value\nfrom .inference import norm_post_sim\n\nclass Results(object):\n\n def __init__(self):\n pass\n\n\nclass MLEResults(Results):\n\n def __init__(self,data_name,X_names,model_name,model_type,latent_variables, \n results,data,index, multivariate_model,objective_object,method,\n z_hide,max_lag,ihessian=None,signal=None,scores=None,states=None,\n states_var=None):\n\n self.data_name = data_name\n self.X_names = X_names\n self.max_lag = max_lag\n self.model_name = model_name\n self.model_type = model_type\n self.z = latent_variables\n self.z_values = latent_variables.get_z_values()\n self.results = results\n self.method = method\n\n self.ihessian = ihessian\n self.scores = scores\n self.states = states\n self.states_var = states_var\n self.data = data\n self.index = index\n self.signal = signal\n self.multivariate_model = multivariate_model\n self.z_hide = int(z_hide)\n\n if self.multivariate_model is True:\n self.data_length = self.data[0].shape[0]\n self.data_name = \",\".join(self.data_name)\n else:\n self.data_length = self.data.shape[0]\n\n self.objective_object = objective_object\n\n if self.method == 'MLE' or self.method == 'OLS':\n self.loglik = -self.objective_object(self.z_values)\n self.aic = 2*len(self.z_values)+2*self.objective_object(self.z_values)\n self.bic = 2*self.objective_object(self.z_values) + len(self.z_values)*np.log(self.data_length)\n elif self.method == 'PML':\n self.aic = 2*len(self.z_values)+2*self.objective_object(self.z_values)\n self.bic = 2*self.objective_object(self.z_values) + len(self.z_values)*np.log(self.data_length)\n\n if self.model_type in ['LLT','LLEV']:\n self.rounding_points = 10\n else:\n self.rounding_points = 4\n\n def __str__(self):\n if self.method == 'MLE':\n print(\"MLE Results Object\")\n elif self.method == 'OLS':\n print(\"OLS Results Object\")\n else:\n print(\"PML Results Object\")\n print(\"==========================\")\n print(\"Dependent variable: \" + self.data_name)\n print(\"Regressors: \" + str(self.X_names))\n print(\"==========================\")\n print(\"Latent Variable Attributes: \")\n if self.ihessian is not None:\n print(\".ihessian: Inverse Hessian\") \n print(\".z : LatentVariables() object\")\n if self.results is not None:\n print(\".results : optimizer results\")\n print(\"\")\n print(\"Implied Model Attributes: \")\n print(\".aic: Akaike Information Criterion\") \n print(\".bic: Bayesian Information Criterion\") \n print(\".data: Model Data\") \n print(\".index: Model Index\")\n if self.method == 'MLE' or self.method == 'OLS':\n print(\".loglik: Loglikelihood\") \n if self.scores is not None:\n print(\".scores: Model Scores\") \n if self.signal is not None:\n print(\".signal: Model Signal\") \n if self.states is not None:\n print(\".states: Model States\") \n if self.states_var is not None:\n print(\".states_var: Model State Variances\") \n print(\".results : optimizer results\")\n print(\"\") \n print(\"Methods: \")\n print(\".summary() : printed results\")\n return(\"\")\n\n def summary(self, transformed=True):\n if self.ihessian is not None:\n return self.summary_with_hessian(transformed)\n else:\n return self.summary_without_hessian()\n\n def summary_with_hessian(self, transformed=True):\n\n ses = np.power(np.abs(np.diag(self.ihessian)),0.5)\n t_z = self.z.get_z_values(transformed=True)\n t_p_std = ses.copy() # vector for transformed standard errors\n\n # Create transformed variables\n # for k in range(len(t_z)-self.z_hide):\n # z_temp = (self.z_values[k]/float(ses[k]))\n # t_p_std[k] = t_z[k] / z_temp\n\n data = []\n\n for i in range(len(self.z.z_list)-int(self.z_hide)):\n if self.z.z_list[i].prior.transform == np.array:\n data.append({\n 'z_name': self.z.z_list[i].name, \n 'z_value':np.round(self.z.z_list[i].prior.transform(self.z_values[i]),self.rounding_points), \n 'z_std': np.round(t_p_std[i],self.rounding_points),\n 'z_z': np.round(t_z[i]/float(t_p_std[i]),self.rounding_points),\n 'z_p': np.round(find_p_value(t_z[i]/float(t_p_std[i])),self.rounding_points),\n 'ci': \"(\" + str(np.round(t_z[i] - t_p_std[i]*1.96,self.rounding_points)) + \" | \" + str(np.round(t_z[i] + t_p_std[i]*1.96,self.rounding_points)) + \")\"})\n else:\n if transformed is True:\n data.append({\n 'z_name': self.z.z_list[i].name, \n 'z_value':np.round(self.z.z_list[i].prior.transform(self.z_values[i]),self.rounding_points)}) \n else:\n data.append({\n 'z_name': self.z.z_list[i].prior.itransform_name + '(' + self.z.z_list[i].name + ')', \n 'z_value':np.round(self.z_values[i],self.rounding_points), \n 'z_std': np.round(t_p_std[i],self.rounding_points),\n 'z_z': np.round(t_z[i]/float(t_p_std[i]),self.rounding_points),\n 'z_p': np.round(find_p_value(t_z[i]/float(t_p_std[i])),self.rounding_points),\n 'ci': \"(\" + str(np.round(t_z[i] - t_p_std[i]*1.96,self.rounding_points)) + \" | \" + str(np.round(t_z[i] + t_p_std[i]*1.96,self.rounding_points)) + \")\"}) \n \n fmt = [\n ('Latent Variable', 'z_name', 40),\n ('Estimate', 'z_value', 10),\n ('Std Error', 'z_std', 10),\n ('z', 'z_z', 8),\n ('P>|z|', 'z_p', 8),\n ('95% C.I.', 'ci', 25)\n ]\n\n model_details = []\n\n model_fmt = [\n (self.model_name, 'model_details', 55),\n ('', 'model_results', 50)\n ]\n\n if self.method == 'MLE' or self.method == 'OLS' :\n obj_desc = \"Log Likelihood: \" + str(np.round(-self.objective_object(self.z_values),4))\n else:\n obj_desc = \"Unnormalized Log Posterior: \" + str(np.round(-self.objective_object(self.z_values),4))\n\n model_details.append({'model_details': 'Dependent Variable: ' + str(self.data_name), \n 'model_results': 'Method: ' + str(self.method)})\n model_details.append({'model_details': 'Start Date: ' + str(self.index[self.max_lag]),\n 'model_results': obj_desc})\n model_details.append({'model_details': 'End Date: ' + str(self.index[-1]),\n 'model_results': 'AIC: ' + str(np.round(2*len(self.z_values)+2*self.objective_object(self.z_values),4))})\n model_details.append({'model_details': 'Number of observations: ' + str(self.data_length),\n 'model_results': 'BIC: ' + str(np.round(2*self.objective_object(self.z_values) + len(self.z_values)*np.log(self.data_length),4))})\n\n\n print( TablePrinter(model_fmt, ul='=')(model_details) )\n print(\"=\"*106)\n print( TablePrinter(fmt, ul='=')(data) )\n print(\"=\"*106)\n if 'Skewt' in self.model_name:\n print(\"WARNING: Skew t distribution is not well-suited for MLE or MAP inference\")\n print(\"Workaround 1: Use a t-distribution instead for MLE/MAP\")\n print(\"Workaround 2: Use M-H or BBVI inference for Skew t distribution\")\n\n def summary_without_hessian(self):\n\n t_z = self.z.get_z_values(transformed=True)\n\n print (\"Hessian not invertible! Consider a different model specification.\")\n print (\"\") \n\n data = []\n\n for i in range(len(self.z.z_list)):\n data.append({'z_name': self.z.z_list[i].name, 'z_value':np.round(self.z.z_list[i].prior.transform(self.results.x[i]),4)})\n\n fmt = [\n ('Latent Variable', 'z_name', 40),\n ('Estimate', 'z_value', 10)\n ]\n\n model_details = []\n\n model_fmt = [\n (self.model_name, 'model_details', 55),\n ('', 'model_results', 50)\n ]\n\n if self.method == 'MLE':\n obj_desc = \"Log Likelihood: \" + str(np.round(-self.objective_object(self.results.x),4))\n else:\n obj_desc = \"Unnormalized Log Posterior: \" + str(np.round(-self.objective_object(self.results.x),4))\n\n model_details.append({'model_details': 'Dependent Variable: ' + self.data_name, \n 'model_results': 'Method: ' + str(self.method)})\n model_details.append({'model_details': 'Start Date: ' + str(self.index[self.max_lag]),\n 'model_results': obj_desc})\n model_details.append({'model_details': 'End Date: ' + str(self.index[-1]),\n 'model_results': 'AIC: ' + str(self.aic)})\n model_details.append({'model_details': 'Number of observations: ' + str(self.data_length),\n 'model_results': 'BIC: ' + str(self.bic)})\n\n\n print( TablePrinter(model_fmt, ul='=')(model_details) )\n print(\"=\"*106)\n print( TablePrinter(fmt, ul='=')(data) )\n print(\"=\"*106)\n if 'Skewt' in self.model_name:\n print(\"WARNING: Skew t distribution is not well-suited for MLE or MAP inference\")\n print(\"Workaround 1: Use a t-distribution instead for MLE/MAP\")\n print(\"Workaround 2: Use M-H or BBVI inference for Skew t distribution\")\n\n\nclass BBVIResults(Results):\n\n def __init__(self, data_name, X_names, model_name, model_type, latent_variables, \n data,index, multivariate_model, objective_object, method, z_hide, max_lag, ses,\n signal=None, scores=None, elbo_records=None, states=None, states_var=None):\n\n self.data_name = data_name\n self.X_names = X_names\n self.max_lag = max_lag\n self.model_name = model_name\n self.model_type = model_type\n self.z = latent_variables\n self.method = method\n\n self.ses = ses\n self.scores = scores\n self.states = states\n self.states_var = states_var\n self.data = data\n self.index = index\n self.signal = signal\n self.multivariate_model = multivariate_model\n self.z_hide = z_hide\n self.elbo_records = elbo_records\n\n if self.multivariate_model is True:\n self.data_length = self.data[0].shape[0]\n self.data_name = \",\".join(self.data_name)\n else:\n self.data_length = self.data.shape[0]\n\n z_values = self.z.get_z_values(transformed=False)\n\n self.objective_object = objective_object\n self.aic = 2*len(z_values)+2*self.objective_object(z_values)\n self.bic = 2*self.objective_object(z_values) + len(z_values)*np.log(self.data_length)\n\n if self.model_type in ['LLT','LLEV']:\n self.rounding_points = 10\n else:\n self.rounding_points = 4\n\n def __str__(self):\n print(\"BBVI Results Object\")\n print(\"==========================\")\n print(\"Dependent variable: \" + self.data_name)\n print(\"Regressors: \" + str(self.X_names))\n print(\"==========================\")\n print(\"Latent Variables Attributes: \")\n print(\".z : LatentVariables() object\")\n print(\".results : optimizer results\")\n print(\"\")\n print(\"Implied Model Attributes: \")\n print(\".aic: Akaike Information Criterion\") \n print(\".bic: Bayesian Information Criterion\") \n print(\".data: Model Data\") \n print(\".index: Model Index\")\n if self.scores is not None:\n print(\".scores: Model Scores\") \n if self.signal is not None:\n print(\".signal: Model Signal\") \n if self.states is not None:\n print(\".states: Model States\") \n if self.states_var is not None:\n print(\".states_var: Model State Variances\") \n print(\"\") \n print(\"Methods: \")\n print(\".summary() : printed results\")\n return(\"\")\n\n def plot_elbo(self, figsize=(15,7)):\n \"\"\"\n Plots the ELBO progress (if present)\n \"\"\"\n import matplotlib.pyplot as plt\n\n plt.figure(figsize=figsize)\n plt.plot(self.elbo_records)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"ELBO\")\n plt.show()\n\n def summary(self, transformed=True):\n ihessian = np.diag(np.power(np.exp(self.ses),2))\n z_values = self.z.get_z_values(transformed=False)\n chain, mean_est, median_est, upper_95_est, lower_95_est = norm_post_sim(z_values,ihessian)\n\n if transformed is True:\n for k in range(len(chain)):\n chain[k] = self.z.z_list[k].prior.transform(chain[k])\n mean_est[k] = self.z.z_list[k].prior.transform(mean_est[k])\n median_est[k] = self.z.z_list[k].prior.transform(median_est[k])\n upper_95_est[k] = self.z.z_list[k].prior.transform(upper_95_est[k])\n lower_95_est[k] = self.z.z_list[k].prior.transform(lower_95_est[k]) \n\n mean_est = np.array(mean_est)\n self.chains = chain[:]\n\n data = []\n\n for i in range(len(self.z.z_list)-int(self.z_hide)):\n data.append({\n 'z_name': self.z.z_list[i].name, \n 'z_mean': np.round(mean_est[i],self.rounding_points),\n 'z_median': np.round(median_est[i],self.rounding_points),\n 'ci': \"(\" + str(np.round(lower_95_est[i],self.rounding_points)) + \" | \" + str(np.round(upper_95_est[i],self.rounding_points)) + \")\"\n }) \n\n fmt = [\n ('Latent Variable','z_name',40),\n ('Median','z_median',18),\n ('Mean', 'z_mean', 18),\n ('95% Credibility Interval','ci',25)]\n\n model_details = []\n\n model_fmt = [\n (self.model_name, 'model_details', 55),\n ('', 'model_results', 50)\n ]\n\n if self.method == 'MLE':\n obj_desc = \"Log Likelihood: \" + str(np.round(-self.objective_object(z_values),4))\n else:\n obj_desc = \"Unnormalized Log Posterior: \" + str(np.round(-self.objective_object(z_values),4))\n\n model_details.append({'model_details': 'Dependent Variable: ' + self.data_name, \n 'model_results': 'Method: ' + str(self.method)})\n model_details.append({'model_details': 'Start Date: ' + str(self.index[self.max_lag]),\n 'model_results': obj_desc})\n model_details.append({'model_details': 'End Date: ' + str(self.index[-1]),\n 'model_results': 'AIC: ' + str(self.aic)})\n model_details.append({'model_details': 'Number of observations: ' + str(self.data_length),\n 'model_results': 'BIC: ' + str(self.bic)})\n\n print( TablePrinter(model_fmt, ul='=')(model_details) )\n print(\"=\"*106)\n print( TablePrinter(fmt, ul='=')(data) )\n print(\"=\"*106)\n\n\nclass BBVISSResults(Results):\n\n def __init__(self,data_name,X_names,model_name,model_type,latent_variables, \n data,index,multivariate_model,objective,method,\n z_hide,max_lag,ses,signal=None,scores=None,states=None,\n states_var=None,elbo_records=None):\n\n self.data_name = data_name\n self.X_names = X_names\n self.max_lag = max_lag\n self.model_name = model_name\n self.model_type = model_type\n self.z = latent_variables\n self.method = method\n\n self.ses = ses\n self.scores = scores\n self.states = states\n self.states_var = states_var\n self.data = data\n self.index = index\n self.signal = signal\n self.multivariate_model = multivariate_model\n self.z_hide = z_hide\n self.elbo_records = elbo_records\n\n if self.multivariate_model is True:\n self.data_length = self.data[0].shape[0]\n self.data_name = \",\".join(self.data_name)\n else:\n self.data_length = self.data.shape[0]\n\n z_values = self.z.get_z_values(transformed=False)\n\n self.objective = objective\n self.aic = 2*len(z_values)+2*self.objective\n self.bic = 2*self.objective + len(z_values)*np.log(self.data_length)\n\n if self.model_type in ['LLT','LLEV']:\n self.rounding_points = 10\n else:\n self.rounding_points = 4\n\n def __str__(self):\n print(\"BBVI Results Object\")\n print(\"==========================\")\n print(\"Dependent variable: \" + self.data_name)\n print(\"Regressors: \" + str(self.X_names))\n print(\"==========================\")\n print(\"Latent Variable Attributes: \")\n print(\".z : LatentVariables() object\")\n print(\"\")\n print(\"Implied Model Attributes: \")\n print(\".aic: Akaike Information Criterion\") \n print(\".bic: Bayesian Information Criterion\") \n print(\".data: Model Data\") \n print(\".index: Model Index\")\n print(\".objective: Unnormalized Log Posterior\")\n if self.scores is not None:\n print(\".scores: Model Scores\") \n if self.signal is not None:\n print(\".signal: Model Signal\") \n if self.states is not None:\n print(\".states: Model States\") \n if self.states_var is not None:\n print(\".states_var: Model State Variances\") \n print(\"\") \n print(\"Methods: \")\n print(\".summary() : printed results\")\n return(\"\")\n\n def plot_elbo(self, figsize=(15,7)):\n \"\"\"\n Plots the ELBO progress (if present)\n \"\"\"\n import matplotlib.pyplot as plt\n\n plt.figure(figsize=figsize)\n plt.plot(self.elbo_records)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"ELBO\")\n plt.show()\n\n def summary(self, transformed=True):\n ihessian = np.diag(np.power(np.exp(self.ses),2))\n z_values = self.z.get_z_values(transformed=False)\n chain, mean_est, median_est, upper_95_est, lower_95_est = norm_post_sim(z_values,ihessian)\n\n if transformed is True:\n for k in range(len(z_values)):\n chain[k] = self.z.z_list[k].prior.transform(chain[k])\n mean_est[k] = self.z.z_list[k].prior.transform(mean_est[k])\n median_est[k] = self.z.z_list[k].prior.transform(median_est[k])\n upper_95_est[k] = self.z.z_list[k].prior.transform(upper_95_est[k])\n lower_95_est[k] = self.z.z_list[k].prior.transform(lower_95_est[k]) \n\n mean_est = np.array(mean_est)\n self.chains = chain[:]\n\n data = []\n\n for i in range(len(self.z.z_list)-int(self.z_hide)):\n data.append({\n 'z_name': self.z.z_list[i].name, \n 'z_mean': np.round(mean_est[i],self.rounding_points),\n 'z_median': np.round(median_est[i],self.rounding_points),\n 'ci': \"(\" + str(np.round(lower_95_est[i],self.rounding_points)) + \" | \" + str(np.round(upper_95_est[i],self.rounding_points)) + \")\"\n }) \n\n fmt = [\n ('Latent Variable','z_name',40),\n ('Median','z_median',18),\n ('Mean', 'z_mean', 18),\n ('95% Credibility Interval','ci',25)]\n\n model_details = []\n\n model_fmt = [\n (self.model_name, 'model_details', 55),\n ('', 'model_results', 50)\n ]\n\n obj_desc = \"Unnormalized Log Posterior: \" + str(np.round(-self.objective,4))\n\n model_details.append({'model_details': 'Dependent Variable: ' + self.data_name, \n 'model_results': 'Method: ' + str(self.method)})\n model_details.append({'model_details': 'Start Date: ' + str(self.index[self.max_lag]),\n 'model_results': obj_desc})\n model_details.append({'model_details': 'End Date: ' + str(self.index[-1]),\n 'model_results': 'AIC: ' + str(self.aic)})\n model_details.append({'model_details': 'Number of observations: ' + str(self.data_length),\n 'model_results': 'BIC: ' + str(self.bic)})\n\n print( TablePrinter(model_fmt, ul='=')(model_details) )\n print(\"=\"*106)\n print( TablePrinter(fmt, ul='=')(data) )\n print(\"=\"*106)\n\n\nclass LaplaceResults(Results):\n\n def __init__(self,data_name,X_names,model_name,model_type,latent_variables, \n data,index,multivariate_model,objective_object,method,\n z_hide,max_lag,ihessian,signal=None,scores=None,states=None,\n states_var=None):\n\n self.data_name = data_name\n self.X_names = X_names\n self.max_lag = max_lag\n self.model_name = model_name\n self.model_type = model_type\n self.z = latent_variables\n self.method = method\n\n self.ihessian = ihessian\n self.scores = scores\n self.states = states\n self.states_var = states_var\n self.data = data\n self.index = index\n self.signal = signal\n self.multivariate_model = multivariate_model\n self.z_hide = z_hide\n\n if self.multivariate_model is True:\n self.data_length = self.data[0].shape[0]\n self.data_name = \",\".join(self.data_name)\n else:\n self.data_length = self.data.shape[0]\n\n z_values = self.z.get_z_values(transformed=False)\n\n self.objective_object = objective_object\n self.aic = 2*len(z_values)+2*self.objective_object(z_values)\n self.bic = 2*self.objective_object(z_values) + len(z_values)*np.log(self.data_length)\n\n if self.model_type in ['LLT','LLEV']:\n self.rounding_points = 10\n else:\n self.rounding_points = 4\n\n def __str__(self):\n print(\"Laplace Results Object\")\n print(\"==========================\")\n print(\"Dependent variable: \" + self.data_name)\n print(\"Regressors: \" + str(self.X_names))\n print(\"==========================\")\n print(\"Latent Variables Attributes: \")\n if self.ihessian is not None:\n print(\".ihessian: Inverse Hessian\") \n print(\".z : LatentVariables() object\")\n print(\".results : optimizer results\")\n print(\"\")\n print(\"Implied Model Attributes: \")\n print(\".aic: Akaike Information Criterion\") \n print(\".bic: Bayesian Information Criterion\") \n print(\".data: Model Data\") \n print(\".index: Model Index\")\n if self.scores is not None:\n print(\".scores: Model Scores\") \n if self.signal is not None:\n print(\".signal: Model Signal\") \n if self.states is not None:\n print(\".states: Model States\") \n if self.states_var is not None:\n print(\".states_var: Model State Variances\") \n print(\"\") \n print(\"Methods: \")\n print(\".summary() : printed results\")\n return(\"\")\n\n def summary(self, transformed=True):\n z_values = self.z.get_z_values(transformed=False)\n chain, mean_est, median_est, upper_95_est, lower_95_est = norm_post_sim(z_values,self.ihessian)\n\n if transformed is True:\n for k in range(len(chain)):\n chain[k] = self.z.z_list[k].prior.transform(chain[k])\n mean_est[k] = self.z.z_list[k].prior.transform(mean_est[k])\n median_est[k] = self.z.z_list[k].prior.transform(median_est[k])\n upper_95_est[k] = self.z.z_list[k].prior.transform(upper_95_est[k])\n lower_95_est[k] = self.z.z_list[k].prior.transform(lower_95_est[k]) \n\n mean_est = np.array(mean_est)\n self.chains = chain[:]\n\n data = []\n\n for i in range(len(self.z.z_list)-int(self.z_hide)):\n data.append({\n 'z_name': self.z.z_list[i].name, \n 'z_mean': np.round(mean_est[i],self.rounding_points),\n 'z_median': np.round(median_est[i],self.rounding_points),\n 'ci': \"(\" + str(np.round(lower_95_est[i],self.rounding_points)) + \" | \" + str(np.round(upper_95_est[i],self.rounding_points)) + \")\"\n }) \n\n fmt = [\n ('Latent Variable','z_name',40),\n ('Median','z_median',18),\n ('Mean', 'z_mean', 18),\n ('95% Credibility Interval','ci',25)]\n\n model_details = []\n\n model_fmt = [\n (self.model_name, 'model_details', 55),\n ('', 'model_results', 50)\n ]\n\n if self.method == 'MLE':\n obj_desc = \"Log Likelihood: \" + str(np.round(-self.objective_object(z_values),4))\n else:\n obj_desc = \"Unnormalized Log Posterior: \" + str(np.round(-self.objective_object(z_values),4))\n\n model_details.append({'model_details': 'Dependent Variable: ' + self.data_name, \n 'model_results': 'Method: ' + str(self.method)})\n model_details.append({'model_details': 'Start Date: ' + str(self.index[self.max_lag]),\n 'model_results': obj_desc})\n model_details.append({'model_details': 'End Date: ' + str(self.index[-1]),\n 'model_results': 'AIC: ' + str(self.aic)})\n model_details.append({'model_details': 'Number of observations: ' + str(self.data_length),\n 'model_results': 'BIC: ' + str(self.bic)})\n\n print( TablePrinter(model_fmt, ul='=')(model_details) )\n print(\"=\"*106)\n print( TablePrinter(fmt, ul='=')(data) )\n print(\"=\"*106)\n\nclass MCMCResults(Results):\n\n def __init__(self,data_name,X_names,model_name,model_type,latent_variables, \n data,index,multivariate_model,objective_object,method,\n z_hide,max_lag,samples,mean_est,median_est,lower_95_est,upper_95_est,\n signal=None,scores=None,states=None,states_var=None):\n self.data_name = data_name\n self.X_names = X_names\n self.max_lag = max_lag\n self.model_name = model_name\n self.model_type = model_type\n self.z = latent_variables\n self.method = method\n\n self.samples = samples\n self.mean_est = mean_est\n self.median_est = median_est\n self.lower_95_est = lower_95_est\n self.upper_95_est = upper_95_est\n self.scores = scores\n self.states = states\n self.states_var = states_var\n self.data = data\n self.index = index\n self.signal = signal\n self.multivariate_model = multivariate_model\n self.z_hide = z_hide\n\n if self.multivariate_model is True:\n self.data_length = self.data[0].shape[0]\n self.data_name = \",\".join(self.data_name)\n else:\n self.data_length = self.data.shape[0]\n\n z_values = self.z.get_z_values(transformed=False)\n\n self.objective_object = objective_object\n self.aic = 2*len(z_values)+2*self.objective_object(z_values)\n self.bic = 2*self.objective_object(z_values) + len(z_values)*np.log(self.data_length)\n\n if self.model_type in ['LLT','LLEV']:\n self.rounding_points = 10\n else:\n self.rounding_points = 4\n\n def __str__(self):\n print(\"Metropolis Hastings Results Object\")\n print(\"==========================\")\n print(\"Dependent variable: \" + self.data_name)\n print(\"Regressors: \" + str(self.X_names))\n print(\"==========================\")\n print(\"Latent Variable Attributes: \") \n print(\".z : LatentVariables() object\")\n if self.samples is not None:\n print(\".samples: MCMC samples\") \n print(\"\")\n print(\"Implied Model Attributes: \")\n print(\".aic: Akaike Information Criterion\") \n print(\".bic: Bayesian Information Criterion\") \n print(\".data: Model Data\") \n print(\".index: Model Index\")\n if self.scores is not None:\n print(\".scores: Model Scores\") \n if self.signal is not None:\n print(\".signal: Model Signal\") \n if self.states is not None:\n print(\".states: Model States\") \n if self.states_var is not None:\n print(\".states_var: Model State Variances\") \n print(\"\") \n print(\"Methods: \")\n print(\".summary() : printed results\")\n return(\"\")\n\n def summary(self, transformed=True):\n z_values = self.z.get_z_values(transformed=False)\n\n data = []\n\n for i in range(len(self.z.z_list)-int(self.z_hide)):\n data.append({\n 'z_name': self.z.z_list[i].name, \n 'z_mean': np.round(self.mean_est[i],self.rounding_points),\n 'z_median': np.round(self.median_est[i],self.rounding_points),\n 'ci': \"(\" + str(np.round(self.lower_95_est[i],self.rounding_points)) + \" | \" + str(np.round(self.upper_95_est[i],self.rounding_points)) + \")\"\n }) \n\n fmt = [\n ('Latent Variable','z_name',40),\n ('Median','z_median',18),\n ('Mean', 'z_mean', 18),\n ('95% Credibility Interval','ci',25)]\n\n model_details = []\n\n model_fmt = [\n (self.model_name, 'model_details', 55),\n ('', 'model_results', 50)\n ]\n\n if self.method == 'MLE':\n obj_desc = \"Log Likelihood: \" + str(np.round(-self.objective_object(z_values),4))\n else:\n obj_desc = \"Unnormalized Log Posterior: \" + str(np.round(-self.objective_object(z_values),4))\n\n model_details.append({'model_details': 'Dependent Variable: ' + self.data_name, \n 'model_results': 'Method: ' + str(self.method)})\n model_details.append({'model_details': 'Start Date: ' + str(self.index[self.max_lag]),\n 'model_results': obj_desc})\n model_details.append({'model_details': 'End Date: ' + str(self.index[-1]),\n 'model_results': 'AIC: ' + str(self.aic)})\n model_details.append({'model_details': 'Number of observations: ' + str(self.data_length),\n 'model_results': 'BIC: ' + str(self.bic)})\n\n print( TablePrinter(model_fmt, ul='=')(model_details) )\n print(\"=\"*106)\n print( TablePrinter(fmt, ul='=')(data) )\n print(\"=\"*106)\n",
"import numpy as np\nimport pyflux as pf\n\nnoise = np.random.normal(0,1,200)\ndata = np.zeros(200)\n\nfor i in range(1,len(data)):\n data[i] = 1.0*data[i-1] + noise[i]\n\ncountdata = np.random.poisson(3,200)\n\ndef test_skewt_couple_terms():\n \"\"\"\n Tests latent variable list length is correct, and that the estimated\n latent variables are not nan\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_skewt_couple_terms_integ():\n \"\"\"\n Tests latent variable list length is correct, and that the estimated\n latent variables are not nan\n \"\"\"\n model = pf.GASLLT(data=data, integ=1, family=pf.Skewt())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_skewt_bbvi():\n \"\"\"\n Tests an GAS model estimated with BBVI and that the length of the latent variable\n list is correct, and that the estimated latent variables are not nan\n \"\"\"\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit('BBVI',iterations=100)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n \"\"\"\n\ndef test_skewt_bbvi_mini_batch():\n \"\"\"\n Tests an ARIMA model estimated with BBVI and that the length of the latent variable\n list is correct, and that the estimated latent variables are not nan\n \"\"\"\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit('BBVI',iterations=100, mini_batch=32)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n \"\"\"\n \ndef test_skewt_bbvi_elbo():\n \"\"\"\n Tests that the ELBO increases\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit('BBVI',iterations=100, record_elbo=True)\n assert(x.elbo_records[-1]>x.elbo_records[0])\n\ndef test_skewt_bbvi_mini_batch_elbo():\n \"\"\"\n Tests that the ELBO increases\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True)\n assert(x.elbo_records[-1]>x.elbo_records[0])\n\ndef test_skewt_mh():\n \"\"\"\n Tests an GAS model estimated with Metropolis-Hastings and that the length of the \n latent variable list is correct, and that the estimated latent variables are not nan\n \"\"\"\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit('M-H',nsims=300)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n \"\"\"\n\n\"\"\" Uncomment in future if Skewt becomes more robust\ndef test_skewt_laplace():\n Tests an GAS model estimated with Laplace approximation and that the length of the \n latent variable list is correct, and that the estimated latent variables are not nan\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit('Laplace')\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\"\"\"\n\ndef test_skewt_pml():\n \"\"\"\n Tests a PML model estimated with Laplace approximation and that the length of the \n latent variable list is correct, and that the estimated latent variables are not nan\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit('PML')\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_skewt_predict_length():\n \"\"\"\n Tests that the prediction dataframe length is equal to the number of steps h\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit()\n x.summary()\n assert(model.predict(h=5).shape[0] == 5)\n\ndef test_skewt_predict_is_length():\n \"\"\"\n Tests that the prediction IS dataframe length is equal to the number of steps h\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit()\n assert(model.predict_is(h=5).shape[0] == 5)\n\ndef test_skewt_predict_nans():\n \"\"\"\n Tests that the predictions are not nans\n model = pf.GASLLT(data=data, family=pf.Skewt())\n \"\"\"\n \"\"\"\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit()\n x.summary()\n assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)\n \"\"\"\n\"\"\"\n\ndef test_skewt_predict_is_nans():\n\n Tests that the in-sample predictions are not nans\n\n model = pf.GASLLT(data=data, family=pf.Skewt())\n x = model.fit()\n x.summary()\n assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)\n\"\"\"\n\n"
] | [
[
"numpy.isnan",
"numpy.all",
"numpy.random.normal",
"numpy.array",
"numpy.zeros"
],
[
"numpy.diag",
"numpy.log",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"numpy.round",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.isnan",
"numpy.random.poisson",
"numpy.random.normal",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
modichirag/21cm_cleaning | [
"1615fea4e2d617bb6ef00770a49698901227daa8",
"1615fea4e2d617bb6ef00770a49698901227daa8"
] | [
"code/plotting/plot_evalrep.py",
"code/plotting/plot_lsstallic.py"
] | [
"#!/usr/bin/env python3\n#\n# Plots the power spectra and Fourier-space biases for the HI.\n#\nimport numpy as np\nimport os, sys\nimport matplotlib.pyplot as plt\nfrom pmesh.pm import ParticleMesh\nfrom scipy.interpolate import InterpolatedUnivariateSpline as ius\nfrom nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower\nfrom nbodykit.cosmology import Planck15, EHPower, Cosmology\n\nsys.path.append('../utils/')\nsys.path.append('../recon/')\nsys.path.append('../recon/cosmo4d/')\nfrom lab import mapbias as mapp\nfrom lab import report as rp\nfrom lab import dg\nfrom getbiasparams import getbias\nimport tools\n#\n\nfrom matplotlib import rc, rcParams, font_manager\nrcParams['font.family'] = 'serif'\nfsize = 12\nfontmanage = font_manager.FontProperties(family='serif', style='normal',\n size=fsize, weight='normal', stretch='normal')\nfont = {'family': fontmanage.get_family()[0],\n 'style': fontmanage.get_style(),\n 'weight': fontmanage.get_weight(),\n 'size': fontmanage.get_size(),\n }\n\nprint(font)\n\n\n#\nimport argparse\nparser = argparse.ArgumentParser()\n#parser.add_argument('-m', '--model', help='model name to use')\nparser.add_argument('-a', '--aa', help='scale factor', default=0.3333, type=float)\nparser.add_argument('-l', '--bs', help='boxsize', default=256, type=float)\nparser.add_argument('-n', '--nmesh', help='nmesh', default=128, type=int)\nparser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)\nparser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.01, type=float)\nargs = parser.parse_args()\n\nfigpath = './figs/'\n\nbs, nc, aa = args.bs, args.nmesh, args.aa\nzz = 1/aa- 1\nkmin = args.kmin\nang = args.angle\npm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])\nrank = pm.comm.rank\n\ndpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.1f/'%(aa, kmin, ang)\ndpath += 'L%04d-N%04d/'%(bs, nc)\n\n################\ndef make_rep_plot():\n \"\"\"Does the work of making the real-space xi(r) and b(r) figure.\"\"\"\n \n\n noises = np.loadtxt('/global/u1/c/chmodi/Programs/21cm/21cm_cleaning/data/summaryHI.txt').T\n for i in range(noises[0].size):\n if noises[0][i] == np.round(1/aa-1, 2): noise = noises[3][i]\n print(noise)\n\n datap = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap')\n dataprsd = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap')\n try:\n datapup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap_up')\n dataprsdup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap_up')\n except Exception as e: print(e)\n\n fig, ax = plt.subplots(1, 2, figsize=(9, 4))\n\n def makeplot(bfit, datapp, lss, lww, cc, lbl=None):\n rpfit = rp.evaluate1(bfit, datapp, field='mapp')[:-2]\n ax[0].plot(rpfit[0]['k'], rpfit[0]['power']/(rpfit[1]['power']*rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc, label=lbl)\n ax[1].plot(rpfit[0]['k'], (rpfit[1]['power']/rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc)\n \n\n #fits\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier/%d-0.00/'%(nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n print(bpaths)\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = datap\n lss, lww, cc, lbl = '-', 2, 'C0', 'Fid'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n \n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample1/%d-0.00/'%(2*nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = datapup\n lss, lww, cc, lbl = '-', 2, 'C1', 'Up1'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample2/%d-0.00/'%(2*nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = datapup\n lss, lww, cc, lbl = '-', 2, 'C2', 'Up2'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n #rsd\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/%d-0.00/'%(nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = dataprsd\n lss, lww, cc, lbl = '--', 2, 'C0', 'rsd'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample1/%d-0.00/'%(2*nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = dataprsdup\n lss, lww, cc, lbl = '--', 2, 'C1', 'rsd up'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample2/%d-0.00/'%(2*nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = dataprsdup\n lss, lww, cc, lbl = '--', 2, 'C2', 'rsd up2'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n\n ax[0].set_ylabel('$r_{cc}$', fontdict=font)\n ax[1].set_ylabel(r'$\\sqrt{P_{\\rm mod}/P_{hh}}$', fontdict=font)\n for axis in ax:\n axis.set_xlabel(r'$k\\quad [h\\,{\\rm Mpc}^{-1}]$', fontdict=font)\n axis.set_xscale('log')\n axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')\n axis.legend(prop=fontmanage)\n\n # Put on some more labels.\n for axis in ax:\n axis.set_xscale('log')\n for tick in axis.xaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n for tick in axis.yaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n ##and finish\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n if rank == 0: plt.savefig(figpath + '/rep_L%04d_%04d.pdf'%(bs, aa*10000))\n\n\n\n################\n\n\nif __name__==\"__main__\":\n make_rep_plot()\n #\n",
"#!/usr/bin/env python3\n#\n# Plots the power spectra and Fourier-space biases for the HI.\n#\nimport numpy as np\nimport os, sys\nimport matplotlib.pyplot as plt\nfrom pmesh.pm import ParticleMesh\nfrom scipy.interpolate import InterpolatedUnivariateSpline as ius\nfrom nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower\nfrom nbodykit.cosmology import Planck15, EHPower, Cosmology\n\nsys.path.append('../utils/')\nsys.path.append('../recon/')\nsys.path.append('../recon/cosmo4d/')\nfrom lab import mapbias as mapp\nfrom lab import report as rp\nfrom lab import dg\nfrom getbiasparams import getbias\nimport tools\n#\n\nfrom matplotlib import rc, rcParams, font_manager\nrcParams['font.family'] = 'serif'\nfsize = 12\nfontmanage = font_manager.FontProperties(family='serif', style='normal',\n size=fsize, weight='normal', stretch='normal')\nfont = {'family': fontmanage.get_family()[0],\n 'style': fontmanage.get_style(),\n 'weight': fontmanage.get_weight(),\n 'size': fontmanage.get_size(),\n }\n\nprint(font)\n\n\n#\nimport argparse\nparser = argparse.ArgumentParser()\n#parser.add_argument('-m', '--model', help='model name to use')\nparser.add_argument('-a', '--aa', help='scale factor', default=0.3333, type=float)\nparser.add_argument('-l', '--bs', help='boxsize', default=256, type=float)\nparser.add_argument('-n', '--nmesh', help='nmesh', default=256, type=int)\nparser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)\nparser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.03, type=float)\nparser.add_argument( '--up', help='upsample', default=0) \nargs = parser.parse_args()\n\nfigpath = './figs/'\n\nbs, nc, aa = args.bs, args.nmesh, args.aa\nnc2 = nc*2\nzz = 1/aa- 1\nkmin = args.kmin\nang = args.angle\nif args.up: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc2, nc2, nc2])\nelse: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])\nrank = pm.comm.rank\n\n\n\ndef getps(f1, f2=None, p2=False):\n p1 = FFTPower(f1, mode='1d').power['power']\n if f2 is not None:\n px = FFTPower(f1, second=f2, mode='1d').power['power']\n if p2:\n p2 = FFTPower(f2, mode='1d').power['power']\n return p1, p2, px\n else: return p1, px\n else: return p1\n\n\n\ndef getps2D(f1, f2=None, p2=False):\n p1 = FFTPower(f1, mode='2d').power['power']\n if f2 is not None:\n px = FFTPower(f1, second=f2, mode='2d').power['power']\n if p2:\n p2 = FFTPower(f2, mode='2d').power['power']\n return p1, p2, px\n else: return p1, px\n else: return p1\n\n \ndef plot1D(nc=nc):\n\n aas = [0.2000, 0.5000, 0.5000]\n lsstns = [0.0035, 0.0500, 0.0500]\n elgns = [None, 0.0010, 0.0010]\n\n\n fig, axar = plt.subplots(3, 2, figsize = (9, 12), sharex=True, sharey=False)\n\n for i in range(3):\n aa, lsstn, elgn = aas[i], lsstns[i], elgns[i]\n ax = axar[i]\n \n if i== 0:dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/reconlsstv2//fastpm_0.2000/wedge_kmin0.03_pess/L1024-N0256-R/thermal-reas-hex/ZA/opt_s777_h1massD_%s_rsdpos/'\n if i==1: dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/reconlsstv2//fastpm_0.5000/wedge_kmin0.03_pess/L1024-N0256-R/thermal-reas-hex/ZA/opt_s777_h1massD_%s_rsdpos/'\n if i==2 :\n dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/reconlsstv2//fastpm_0.5000/wedge_kmin0.03_pess/L1024-N0256-R/thermal-reas-hex-hirax/ZA/opt_s777_h1massD_%s_rsdpos/'\n \n if nc == 256:\n smesh = BigFileMesh(dpath%('lwt0') + '/datap', 's').paint()\n ps = FFTPower(smesh, mode='1d').power\n k, ps = ps['k'], ps['power']\n\n if i == 0: suff = 'lwt0'\n else: suff = 'lwt0-nob2'\n print(dpath%suff + '256-0.00/best-fit/')\n s0 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps0, ps0x = getps(s0, smesh)\n\n if i == 0: suff = 'lsst_ln%04d'%(lsstn * 1e4)\n else: suff = 'lsst-nob2_ln%04d'%(lsstn * 1e4)\n s1 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps1, ps1x = getps(s1, smesh)\n\n if i: \n suff = 'elg-nob2_ln%04d'%(elgn * 1e4)\n s2 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps2, ps2x = getps(s2, smesh)\n\n if i==1: \n suff = 'elg_noh1_ln%04d'%(elgn * 1e4)\n s3 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps3, ps3x = getps(s3, smesh)\n\n \n elif nc == 512:\n smesh = BigFileMesh(dpath%('lwt0') + '/datap_up', 's').paint()\n ps = FFTPower(smesh, mode='1d').power\n k, ps = ps['k'], ps['power']\n\n if i == 0: suff = 'lwt0'\n else: suff = 'lwt0-nob2'\n s0 = BigFileMesh(dpath%suff + 'upsample2/512-0.00/best-fit/', 's').paint()\n ps0, ps0x = getps(s0, smesh)\n\n if i == 0: suff = 'lsst_ln%04d'%(lsstn * 1e4)\n else: suff = 'lsst-nob2_ln%04d'%(lsstn * 1e4)\n s1 = BigFileMesh(dpath%suff + 'upsample2/512-0.00/best-fit/', 's').paint()\n ps1, ps1x = getps(s1, smesh)\n\n if i : \n suff = 'elg-nob2_ln%04d'%(elgn * 1e4)\n s2 = BigFileMesh(dpath%suff + 'upsample2/512-0.00/best-fit/', 's').paint()\n ps2, ps2x = getps(s2, smesh)\n\n if i==1: \n suff = 'elg_noh1_ln%04d'%(elgn * 1e4)\n s3 = BigFileMesh(dpath%suff + 'upsample2/512-0.00/best-fit/', 's').paint()\n #s3 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps3, ps3x = getps(s3, smesh)\n\n\n ax[0].plot(k, ps0x/(ps0*ps)**0.5, 'C0', label='HI data')\n ax[0].plot(k, ps1x/(ps1*ps)**0.5, 'C1', label='HI + LSST data')\n if i : ax[0].plot(k, ps2x/(ps2*ps)**0.5, 'C2', label='HI + DESI data')\n if i == 1 : ax[0].plot(k, ps3x/(ps3*ps)**0.5, 'C3', label='DESI data')\n\n \n ax[1].plot(k, (ps0/ps)**0.5, 'C0', label='HI')\n ax[1].plot(k, (ps1/ps)**0.5, 'C1')\n if i: ax[1].plot(k, (ps2/ps)**0.5, 'C2')\n if i == 1 : ax[1].plot(k, (ps3/ps)**0.5, 'C3')\n\n\n ax[0].set_ylabel('$r_{cc}$', fontdict=font)\n ax[1].set_ylabel('$t_{f}$', fontdict=font)\n ax[0].set_ylim(0.5, 1.07)\n ax[1].set_ylim(0.5, 2.05)\n if i == 2:\n for axis in ax: axis.set_xlabel(r'$k\\quad [h\\,{\\rm Mpc}^{-1}]$', fontdict=font)\n if i == 1:\n for axis in ax: axis.legend(prop=fontmanage)\n\n if i < 2: ax[1].text(1e-2, 1.82, 'z=%0.2f, PUMA'%(1/aa - 1), fontsize=fsize)\n else: ax[1].text(1e-2, 1.82, 'z=%0.2f, HIRAX'%(1/aa - 1), fontsize=fsize)\n \n for axis in ax.flatten():\n axis.set_xscale('log')\n axis.grid(which='both')\n # Put on some more labels.\n for axis in ax.flatten():\n axis.axhline(1, color='k', ls=\"--\")\n for tick in axis.xaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n for tick in axis.yaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n if rank == 0: plt.savefig(figpath + '/lsst_N%04d-IC.pdf'%(nc))\n\n\n\n\n\ndef plot2D(nc=nc):\n\n aas = [0.2000, 0.5000, 0.5000]\n lsstns = [0.0035, 0.0500, 0.0500]\n elgns = [None, 0.0010, 0.0010]\n\n\n fig, axar = plt.subplots(3, 2, figsize = (9, 12), sharex=True, sharey=True)\n\n for i in range(3):\n aa, lsstn, elgn = aas[i], lsstns[i], elgns[i]\n ax = axar[i]\n \n if i== 0:dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/reconlsstv2//fastpm_0.2000/wedge_kmin0.03_pess/L1024-N0256-R/thermal-reas-hex/ZA/opt_s777_h1massD_%s_rsdpos/'\n if i==1: dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/reconlsstv2//fastpm_0.5000/wedge_kmin0.03_pess/L1024-N0256-R/thermal-reas-hex/ZA/opt_s777_h1massD_%s_rsdpos/'\n if i==2 :\n dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/reconlsstv2//fastpm_0.5000/wedge_kmin0.03_pess/L1024-N0256-R/thermal-reas-hex-hirax/ZA/opt_s777_h1massD_%s_rsdpos/'\n \n \n if nc == 256:\n smesh = BigFileMesh(dpath%('lwt0') + '/datap', 's').paint()\n ps = FFTPower(smesh, mode='2d').power\n k, ps = ps['k'], ps['power']\n\n if i == 0: suff = 'lwt0'\n else: suff = 'lwt0-nob2'\n print(dpath%suff + '256-0.00/best-fit/')\n s0 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps0, ps0x = getps2D(s0, smesh)\n\n if i == 0: suff = 'lsst_ln%04d'%(lsstn * 1e4)\n else: suff = 'lsst-nob2_ln%04d'%(lsstn * 1e4)\n s1 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps1, ps1x = getps2D(s1, smesh)\n\n if i: \n suff = 'elg-nob2_ln%04d'%(elgn * 1e4)\n s2 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps2, ps2x = getps2D(s2, smesh)\n\n if i==1: \n suff = 'elg_noh1_ln%04d'%(elgn * 1e4)\n s3 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps3, ps3x = getps2D(s3, smesh)\n\n \n elif nc == 512:\n smesh = BigFileMesh(dpath%('lwt0') + '/datap_up', 's').paint()\n ps = FFTPower(smesh, mode='2d').power\n k, ps = ps['k'], ps['power']\n\n if i == 0: suff = 'lwt0'\n else: suff = 'lwt0-nob2'\n s0 = BigFileMesh(dpath%suff + 'upsample2/512-0.00/best-fit/', 's').paint()\n ps0, ps0x = getps2D(s0, smesh)\n\n if i == 0: suff = 'lsst_ln%04d'%(lsstn * 1e4)\n else: suff = 'lsst-nob2_ln%04d'%(lsstn * 1e4)\n s1 = BigFileMesh(dpath%suff + 'upsample2/512-0.00/best-fit/', 's').paint()\n ps1, ps1x = getps2D(s1, smesh)\n\n if i : \n suff = 'elg-nob2_ln%04d'%(elgn * 1e4)\n s2 = BigFileMesh(dpath%suff + 'upsample2/512-0.00/best-fit/', 's').paint()\n ps2, ps2x = getps2D(s2, smesh)\n\n if i==1: \n suff = 'elg_noh1_ln%04d'%(elgn * 1e4)\n s3 = BigFileMesh(dpath%suff + 'upsample2/512-0.00/best-fit/', 's').paint()\n #s3 = BigFileMesh(dpath%suff + '256-0.00/best-fit/', 's').paint()\n ps3, ps3x = getps2D(s3, smesh)\n\n\n\n ax[0].plot(k[:, 0], ps0x[:, 0]/(ps0[:, 0]*ps[:, 0])**0.5, 'C0', label='HI data')\n ax[0].plot(k[:, 0], ps1x[:, 0]/(ps1[:, 0]*ps[:, 0])**0.5, 'C1', label='HI + LSST data')\n if i: ax[0].plot(k[:, 0], ps2x[:, 0]/(ps2[:, 0]*ps[:, 0])**0.5, 'C2', label='HI + DESI data')\n if i == 1: ax[0].plot(k[:, 0], ps3x[:, 0]/(ps3[:, 0]*ps[:, 0])**0.5, 'C3', label='DESI data')\n\n ax[-1].plot(k[:, 0], ps0x[:, -1]/(ps0[:, -1]*ps[:, -1])**0.5, 'C0')\n ax[-1].plot(k[:, 0], ps1x[:, -1]/(ps1[:, -1]*ps[:, -1])**0.5, 'C1')\n if i: ax[-1].plot(k[:, 0], ps2x[:, -1]/(ps2[:, -1]*ps[:, -1])**0.5, 'C2')\n if i == 1: ax[-1].plot(k[:, -1], ps3x[:, -1]/(ps3[:, -1]*ps[:, -1])**0.5, 'C3')\n\n #\n ax[0].set_ylabel('$r_{cc}$', fontdict=font)\n #ax[1].set_ylabel('$r_{cc},\\, \\mu=0.9$', fontdict=font)\n\n if i < 2: ax[1].text(0.15, 1.02, 'z=%0.2f, PUMA'%(1/aa - 1), fontsize=fsize)\n else: ax[1].text(0.15, 1.02, 'z=%0.2f, HIRAX'%(1/aa - 1), fontsize=fsize)\n \n if i == 0:\n ax[0].text(1e-2, 0.55, '$\\mu=0.1$', fontsize=fsize)\n ax[1].text(1e-2, 0.55, '$\\mu=0.9$', fontsize=fsize)\n \n for axis in ax: axis.set_ylim(0.3, 1.1)\n if i == 2:\n for axis in ax: axis.set_xlabel(r'$k\\quad [h\\,{\\rm Mpc}^{-1}]$', fontdict=font)\n if i ==1 :\n for axis in ax: axis.legend(prop=fontmanage)\n \n for axis in ax.flatten():\n axis.set_xscale('log')\n axis.grid(which='both')\n \n # Put on some more labels.\n for axis in ax.flatten():\n for tick in axis.xaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n for tick in axis.yaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n if rank == 0:\n plt.savefig(figpath + '/lsst_N%04d-IC-2D.pdf'%(nc))\n\n################\n\n\nif __name__==\"__main__\":\n #plot1D()\n plot2D()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.round",
"numpy.loadtxt"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ashwanikumar04/ml-recommendation-engine | [
"57a7c0d5ac073b976e40c17d8892a4b7291d08ed"
] | [
"matrix_factorization/mf_keras.py"
] | [
"import pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Embedding, Dot, Add, Flatten\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.optimizers import Adam\n\n# df = pd.read_csv(\"./data/processed_rating.csv\")\n\n# N = df[\"user_idx\"].max() + 1\n# M = df[\"isbn_idx\"].max() + 1\n\n# df = shuffle(df)\n\n# cut_off = int(0.8 * len(df))\n\n# df_train = df.iloc[:cut_off]\n# df_test = df.iloc[cut_off:]\n\n# K = 15\n\n# mu = df_train[\"Book-Rating\"].mean()\n# epochs = 15\n# reg_penalty = 0.0\n\n# u = Input(shape=(1, ))\n# b = Input(shape=(1, ))\n\n# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)\n# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)\n\n# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)\n# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)\n\n# x = Dot(axes=2)([u_embedding, b_embedding])\n\n# x = Add()([x, u_bias, b_bias])\n# x = Flatten()(x)\n\n# model = Model(inputs=[u, b], outputs=x)\n# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=[\"mse\"])\n\n# r = model.fit(\n# x=[df_train[\"user_idx\"].values, df_train[\"isbn_idx\"].values],\n# y=df_train[\"Book-Rating\"].values - mu,\n# epochs=epochs,\n# batch_size=128,\n# validation_data=([df_test[\"user_idx\"].values,\n# df_test[\"isbn_idx\"].values], df_test[\"Book-Rating\"].values - mu))\n\n# plt.plot(r.history['loss'], label=\"train loss\")\n# plt.plot(r.history['val_loss'], label=\"test loss\")\n# plt.legend()\n# plt.show()\n\ndf = pd.read_csv(\"./data/archive/ratings.csv\")\n\n# N = len(set(df[\"user_id\"].values)) + 1\n# M = len(set(df[\"book_id\"].values)) + 1\n\n# df = shuffle(df)\n\n# cut_off = int(0.8 * len(df))\n\n# df_train = df.iloc[:cut_off]\n# df_test = df.iloc[cut_off:]\n\n# K = 15\n\n# mu = df_train[\"rating\"].mean()\n# epochs = 15\n# reg_penalty = 0.0\n\n# u = Input(shape=(1, ))\n# b = Input(shape=(1, ))\n\n# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)\n# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)\n\n# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)\n# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)\n\n# x = Dot(axes=2)([u_embedding, b_embedding])\n\n# x = Add()([x, u_bias, b_bias])\n# x = Flatten()(x)\n\n# model = Model(inputs=[u, b], outputs=x)\n# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=[\"mse\"])\n\n# r = model.fit(x=[df_train[\"user_id\"].values, df_train[\"book_id\"].values],\n# y=df_train[\"rating\"].values - mu,\n# epochs=epochs,\n# batch_size=128,\n# validation_data=([\n# df_test[\"user_id\"].values, df_test[\"book_id\"].values\n# ], df_test[\"rating\"].values - mu))\n\n# model.save('regression_model.h5')\n# plt.plot(r.history['loss'], label=\"train loss\")\n# plt.plot(r.history['val_loss'], label=\"test loss\")\n# plt.legend()\n# plt.show()\n\n\ndef predict(user_id):\n model = keras.models.load_model('regression_model.h5')\n book_data = np.array(list(set(df.book_id)))\n user = np.array([user_id for i in range(len(book_data))])\n predictions = model.predict([user, book_data])\n predictions = np.array([a[0] for a in predictions])\n recommended_book_ids = (-predictions).argsort()[:5]\n print(recommended_book_ids)\n print(predictions[recommended_book_ids])\n\npredict(1)\n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.array",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
RandolphVI/Question-Difficulty-Prediction | [
"77b4b83b5bc747c5074926d7a37545a5d46ed343",
"77b4b83b5bc747c5074926d7a37545a5d46ed343"
] | [
"TF/TARNN/test_tarnn.py",
"TMLA/SVM/train_svm.py"
] | [
"# -*- coding:utf-8 -*-\n__author__ = 'Randolph'\n\nimport os\nimport sys\nimport time\nimport logging\n\nsys.path.append('../')\nlogging.getLogger('tensorflow').disabled = True\n\nimport tensorflow as tf\nfrom utils import checkmate as cm\nfrom utils import data_helpers as dh\nfrom utils import param_parser as parser\nfrom sklearn.metrics import mean_squared_error, r2_score\n\nargs = parser.parameter_parser()\nMODEL = dh.get_model_name()\nlogger = dh.logger_fn(\"tflog\", \"logs/Test-{0}.log\".format(time.asctime()))\n\nCPT_DIR = 'runs/' + MODEL + '/checkpoints/'\nBEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'\nSAVE_DIR = 'output/' + MODEL\n\n\ndef test_tarnn():\n \"\"\"Test TARNN model.\"\"\"\n # Print parameters used for the model\n dh.tab_printer(args, logger)\n\n # Load data\n logger.info(\"Loading data...\")\n logger.info(\"Data processing...\")\n test_data = dh.load_data_and_labels(args.test_file, args.word2vec_file, data_aug_flag=False)\n\n logger.info(\"Data padding...\")\n x_test_content, x_test_question, x_test_option, y_test = dh.pad_data(test_data, args.pad_seq_len)\n\n # Load tarnn model\n OPTION = dh.option(pattern=1)\n if OPTION == 'B':\n logger.info(\"Loading best model...\")\n checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)\n else:\n logger.info(\"Loading latest model...\")\n checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)\n logger.info(checkpoint_file)\n\n graph = tf.Graph()\n with graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=args.allow_soft_placement,\n log_device_placement=args.log_device_placement)\n session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{0}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n # Get the placeholders from the graph by name\n input_x_content = graph.get_operation_by_name(\"input_x_content\").outputs[0]\n input_x_question = graph.get_operation_by_name(\"input_x_question\").outputs[0]\n input_x_option = graph.get_operation_by_name(\"input_x_option\").outputs[0]\n input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n is_training = graph.get_operation_by_name(\"is_training\").outputs[0]\n\n # Tensors we want to evaluate\n scores = graph.get_operation_by_name(\"output/scores\").outputs[0]\n loss = graph.get_operation_by_name(\"loss/loss\").outputs[0]\n\n # Split the output nodes name by '|' if you have several output nodes\n output_node_names = \"output/scores\"\n\n # Save the .pb model file\n output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,\n output_node_names.split(\"|\"))\n tf.train.write_graph(output_graph_def, \"graph\", \"graph-tarnn-{0}.pb\".format(MODEL), as_text=False)\n\n # Generate batches for one epoch\n batches = dh.batch_iter(list(zip(x_test_content, x_test_question, x_test_option, y_test)),\n args.batch_size, 1, shuffle=False)\n\n test_counter, test_loss = 0, 0.0\n\n # Collect the predictions here\n true_labels = []\n predicted_scores = []\n\n for batch_test in batches:\n x_batch_content, x_batch_question, x_batch_option, y_batch = zip(*batch_test)\n feed_dict = {\n input_x_content: x_batch_content,\n input_x_question: x_batch_question,\n input_x_option: x_batch_option,\n input_y: y_batch,\n dropout_keep_prob: 1.0,\n is_training: False\n }\n batch_scores, cur_loss = sess.run([scores, loss], feed_dict)\n\n # Prepare for calculating metrics\n for i in y_batch:\n true_labels.append(i)\n for j in batch_scores:\n predicted_scores.append(j)\n\n test_loss = test_loss + cur_loss\n test_counter = test_counter + 1\n\n # Calculate PCC & DOA\n pcc, doa = dh.evaluation(true_labels, predicted_scores)\n # Calculate RMSE\n rmse = mean_squared_error(true_labels, predicted_scores) ** 0.5\n r2 = r2_score(true_labels, predicted_scores)\n\n test_loss = float(test_loss / test_counter)\n\n logger.info(\"All Test Dataset: Loss {0:g} | PCC {1:g} | DOA {2:g} | RMSE {3:g} | R2 {4:g}\"\n .format(test_loss, pcc, doa, rmse, r2))\n\n # Save the prediction result\n if not os.path.exists(SAVE_DIR):\n os.makedirs(SAVE_DIR)\n dh.create_prediction_file(output_file=SAVE_DIR + \"/predictions.json\", all_id=test_data.id,\n all_labels=true_labels, all_predict_scores=predicted_scores)\n\n logger.info(\"All Done.\")\n\n\nif __name__ == '__main__':\n test_tarnn()\n",
"# -*- coding:utf-8 -*-\n__author__ = 'Randolph'\n\nimport sys\nimport time\n\nsys.path.append('../')\n\nfrom utils import data_process as dp\nfrom sklearn.svm import SVR\nfrom sklearn.externals import joblib\n\nlogger = dp.logger_fn(\"svm-log\", \"svm/train-{0}.log\".format(time.asctime()))\n\n# Data Parameters\nTRAININGSET_DIR = '../../data/Train_BOW_sample.json'\nMODEL_DIR = 'svm_model.m'\n\n\ndef train():\n # Load data\n logger.info(\"Loading data...\")\n\n x_train, y_train = dp.load_data(TRAININGSET_DIR)\n\n logger.info(\"Finish building BOW.\")\n\n model = SVR()\n\n logger.info(\"Training model...\")\n model.fit(x_train, y_train)\n\n logger.info(\"Finish training. Saving model...\")\n joblib.dump(model, MODEL_DIR)\n\n\nif __name__ == '__main__':\n train()\n"
] | [
[
"tensorflow.Graph",
"tensorflow.train.latest_checkpoint",
"sklearn.metrics.r2_score",
"sklearn.metrics.mean_squared_error",
"tensorflow.ConfigProto",
"tensorflow.Session"
],
[
"sklearn.svm.SVR",
"sklearn.externals.joblib.dump"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ambattz/Intelligent_Traffic_Management_System | [
"51c3100ddb3479538d8a6accbcc0ea9f751481a7"
] | [
"test_model_images.py"
] | [
"import numpy as np\r\nimport os\r\nimport six.moves.urllib as urllib\r\nimport sys\r\nimport tarfile\r\nimport tensorflow.compat.v1 as tf\r\ntf.disable_v2_behavior()\r\nimport zipfile\r\n\r\nfrom collections import defaultdict\r\nfrom io import StringIO\r\nfrom matplotlib import pyplot as plt\r\nfrom PIL import Image\r\n\r\nfrom object_detection.utils import label_map_util\r\n\r\nfrom object_detection.utils import visualization_utils as vis_util\r\n\r\n# This is needed since the notebook is stored in the object_detection folder.\r\nsys.path.append(\"..\")\r\n\r\n\r\n# script repurposed from sentdex's edits and TensorFlow's example script. Pretty messy as not all unnecessary\r\n# parts of the original have been removed\r\n\r\n\r\n\r\n\r\n# # Model preparation\r\n\r\n# ## Variables\r\n#\r\n# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.\r\n#\r\n# By default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.\r\n\r\n\r\n\r\n# What model to download.\r\nMODEL_NAME = 'trained_model' # change to whatever folder has the new graph\r\n# MODEL_FILE = MODEL_NAME + '.tar.gz' # these lines not needed as we are using our own model\r\n# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\r\n\r\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\r\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\r\n\r\n# List of the strings that is used to add correct label for each box.\r\nPATH_TO_LABELS = os.path.join('training', 'label.pbtxt') # our labels are in training/object-detection.pbkt\r\n\r\nNUM_CLASSES = 3 # we only are using one class at the moment (mask at the time of edit)\r\n\r\n\r\n# ## Download Model\r\n\r\n\r\n# opener = urllib.request.URLopener() # we don't need to download model since we have our own\r\n# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)\r\n# tar_file = tarfile.open(MODEL_FILE)\r\n# for file in tar_file.getmembers():\r\n# file_name = os.path.basename(file.name)\r\n# if 'frozen_inference_graph.pb' in file_name:\r\n# tar_file.extract(file, os.getcwd())\r\n\r\n\r\n# ## Load a (frozen) Tensorflow model into memory.\r\n\r\n\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n\r\n# ## Loading label map\r\n# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine\r\n\r\n# In[7]:\r\n\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\n\r\n\r\n\r\ndef load_image_into_numpy_array(image):\r\n (im_width, im_height) = image.size\r\n return np.array(image.getdata()).reshape(\r\n (im_height, im_width, 3)).astype(np.uint8)\r\n\r\n\r\n\r\n\r\n# For the sake of simplicity we will use only 2 images:\r\n# image1.jpg\r\n# image2.jpg\r\n# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\r\nPATH_TO_TEST_IMAGES_DIR = 'test'\r\nTEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(0, 60)] # adjust range for # of images in folder\r\n\r\n# Size, in inches, of the output images.\r\nIMAGE_SIZE = (12, 8)\r\n\r\n\r\nwith detection_graph.as_default():\r\n with tf.Session(graph=detection_graph) as sess:\r\n i = 0\r\n for image_path in TEST_IMAGE_PATHS:\r\n image = Image.open(image_path)\r\n # the array based representation of the image will be used later in order to prepare the\r\n # result image with boxes and labels on it.\r\n image_np = load_image_into_numpy_array(image)\r\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\r\n image_np_expanded = np.expand_dims(image_np, axis=0)\r\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n # Each box represents a part of the image where a particular object was detected.\r\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n # Each score represent how level of confidence for each of the objects.\r\n # Score is shown on the result image, together with the class label.\r\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n # Actual detection.\r\n (boxes, scores, classes, num_detections) = sess.run(\r\n [boxes, scores, classes, num_detections],\r\n feed_dict={image_tensor: image_np_expanded})\r\n # Visualization of the results of a detection.\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8)\r\n\r\n plt.figure(figsize=IMAGE_SIZE)\r\n plt.imshow(image_np) # matplotlib is configured for command line only so we save the outputs instead\r\n plt.savefig(\"outputs/detection_output{}.png\".format(i)) # create an outputs folder for the images to be saved\r\n i = i+1 # this was a quick fix for iteration, create a pull request if you'd like\r\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.expand_dims",
"tensorflow.compat.v1.import_graph_def",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.squeeze",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.gfile.GFile",
"tensorflow.compat.v1.GraphDef",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
intel-analytics/WorldBankPoC | [
"49c19268601ff1aa7e396ddc5a8a23abfe73880e"
] | [
"vegnoveg/vegnonveg-fulltraining-nnframe.py"
] | [
"# Databricks notebook source\n\nimport pandas as pd\nfrom os import listdir\nfrom os.path import join, basename\nimport struct\nimport pickle\nimport json\nimport os\nfrom scipy import misc\nimport datetime as dt\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import udf\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n# import matplotlib.pyplot as plt\n# %matplotlib inline\n\n# COMMAND ----------\n\n# %pylab inline\nfrom bigdl.nn.layer import *\nfrom bigdl.nn.criterion import *\nfrom bigdl.optim.optimizer import *\nfrom bigdl.util.common import *\nfrom bigdl.dataset.transformer import *\nfrom bigdl.dataset import mnist\nfrom bigdl.transform.vision.image import *\nfrom zoo.pipeline.nnframes.nn_image_reader import *\nfrom zoo.pipeline.nnframes.nn_image_transformer import *\nfrom zoo.pipeline.nnframes.nn_classifier import *\nfrom zoo.common.nncontext import *\nimport urllib\n\n\n# COMMAND ----------\n\n\ndef scala_T(input_T):\n \"\"\"\n Helper function for building Inception layers. Transforms a list of numbers to a dictionary with ascending keys \n and 0 appended to the front. Ignores dictionary inputs. \n \n :param input_T: either list or dict\n :return: dictionary with ascending keys and 0 appended to front {0: 0, 1: realdata_1, 2: realdata_2, ...}\n \"\"\" \n if type(input_T) is list:\n # insert 0 into first index spot, such that the real data starts from index 1\n temp = [0]\n temp.extend(input_T)\n return dict(enumerate(temp))\n # if dictionary, return it back\n return input_T\n\n# COMMAND ----------\n\ndef Inception_Layer_v1(input_size, config, name_prefix=\"\"):\n \"\"\"\n Builds the inception-v1 submodule, a local network, that is stacked in the entire architecture when building\n the full model. \n \n :param input_size: dimensions of input coming into the local network\n :param config: ?\n :param name_prefix: string naming the layers of the particular local network\n :return: concat container object with all of the Sequential layers' ouput concatenated depthwise\n \"\"\" \n \n '''\n Concat is a container who concatenates the output of it's submodules along the provided dimension: all submodules \n take the same inputs, and their output is concatenated.\n '''\n concat = Concat(2)\n \n \"\"\"\n In the above code, we first create a container Sequential. Then add the layers into the container one by one. The \n order of the layers in the model is same with the insertion order. \n \n \"\"\"\n conv1 = Sequential()\n \n #Adding layes to the conv1 model we jus created\n \n #SpatialConvolution is a module that applies a 2D convolution over an input image.\n conv1.add(SpatialConvolution(input_size, config[1][1], 1, 1, 1, 1).set_name(name_prefix + \"1x1\"))\n conv1.add(ReLU(True).set_name(name_prefix + \"relu_1x1\"))\n concat.add(conv1)\n \n conv3 = Sequential()\n conv3.add(SpatialConvolution(input_size, config[2][1], 1, 1, 1, 1).set_name(name_prefix + \"3x3_reduce\"))\n conv3.add(ReLU(True).set_name(name_prefix + \"relu_3x3_reduce\"))\n conv3.add(SpatialConvolution(config[2][1], config[2][2], 3, 3, 1, 1, 1, 1).set_name(name_prefix + \"3x3\"))\n conv3.add(ReLU(True).set_name(name_prefix + \"relu_3x3\"))\n concat.add(conv3)\n \n \n conv5 = Sequential()\n conv5.add(SpatialConvolution(input_size,config[3][1], 1, 1, 1, 1).set_name(name_prefix + \"5x5_reduce\"))\n conv5.add(ReLU(True).set_name(name_prefix + \"relu_5x5_reduce\"))\n conv5.add(SpatialConvolution(config[3][1], config[3][2], 5, 5, 1, 1, 2, 2).set_name(name_prefix + \"5x5\"))\n conv5.add(ReLU(True).set_name(name_prefix + \"relu_5x5\"))\n concat.add(conv5)\n \n \n pool = Sequential()\n pool.add(SpatialMaxPooling(3, 3, 1, 1, 1, 1, to_ceil=True).set_name(name_prefix + \"pool\"))\n pool.add(SpatialConvolution(input_size, config[4][1], 1, 1, 1, 1).set_name(name_prefix + \"pool_proj\"))\n pool.add(ReLU(True).set_name(name_prefix + \"relu_pool_proj\"))\n concat.add(pool).set_name(name_prefix + \"output\")\n return concat\n\n# COMMAND ----------\n\ndef Inception_v1(class_num):\n model = Sequential()\n model.add(SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3, 1, False).set_name(\"conv1/7x7_s2\"))\n model.add(ReLU(True).set_name(\"conv1/relu_7x7\"))\n model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name(\"pool1/3x3_s2\"))\n model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name(\"pool1/norm1\"))\n model.add(SpatialConvolution(64, 64, 1, 1, 1, 1).set_name(\"conv2/3x3_reduce\"))\n model.add(ReLU(True).set_name(\"conv2/relu_3x3_reduce\"))\n model.add(SpatialConvolution(64, 192, 3, 3, 1, 1, 1, 1).set_name(\"conv2/3x3\"))\n model.add(ReLU(True).set_name(\"conv2/relu_3x3\"))\n model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name(\"conv2/norm2\"))\n model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name(\"pool2/3x3_s2\"))\n model.add(Inception_Layer_v1(192, scala_T([scala_T([64]), scala_T(\n [96, 128]), scala_T([16, 32]), scala_T([32])]), \"inception_3a/\"))\n model.add(Inception_Layer_v1(256, scala_T([scala_T([128]), scala_T(\n [128, 192]), scala_T([32, 96]), scala_T([64])]), \"inception_3b/\"))\n model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))\n model.add(Inception_Layer_v1(480, scala_T([scala_T([192]), scala_T(\n [96, 208]), scala_T([16, 48]), scala_T([64])]), \"inception_4a/\"))\n model.add(Inception_Layer_v1(512, scala_T([scala_T([160]), scala_T(\n [112, 224]), scala_T([24, 64]), scala_T([64])]), \"inception_4b/\"))\n model.add(Inception_Layer_v1(512, scala_T([scala_T([128]), scala_T(\n [128, 256]), scala_T([24, 64]), scala_T([64])]), \"inception_4c/\"))\n model.add(Inception_Layer_v1(512, scala_T([scala_T([112]), scala_T(\n [144, 288]), scala_T([32, 64]), scala_T([64])]), \"inception_4d/\"))\n model.add(Inception_Layer_v1(528, scala_T([scala_T([256]), scala_T(\n [160, 320]), scala_T([32, 128]), scala_T([128])]), \"inception_4e/\"))\n model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))\n model.add(Inception_Layer_v1(832, scala_T([scala_T([256]), scala_T(\n [160, 320]), scala_T([32, 128]), scala_T([128])]), \"inception_5a/\"))\n model.add(Inception_Layer_v1(832, scala_T([scala_T([384]), scala_T(\n [192, 384]), scala_T([48, 128]), scala_T([128])]), \"inception_5b/\"))\n model.add(SpatialAveragePooling(7, 7, 1, 1).set_name(\"pool5/7x7_s1\"))\n model.add(Dropout(0.4).set_name(\"pool5/drop_7x7_s1\"))\n model.add(View([1024], num_input_dims=3))\n model.add(Linear(1024, class_num).set_name(\"loss3/classifier\"))\n model.add(LogSoftMax().set_name(\"loss3/loss3\"))\n model.reset()\n return model\n\n# COMMAND ----------\n\n# MAGIC %md ## Download the images from Amazon s3\n# MAGIC \n# MAGIC Make sure you have AWS command line interface to recursively download all images in s3 folder. You can set up aws cli from this link: http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html\n\n# COMMAND ----------\n\nimport urllib\nfrom os import path\nMODEL_ROOT = \"/mnt/nobigdl/few-inceptionv1\"\n# dbutils.fs.mkdirs(MODEL_ROOT)\n#local_folder = DATA_ROOT + '/vegnonveg-samples'\ncheckpoint_path = path.join(MODEL_ROOT, \"checkpoints\")\n\n# if not path.isdir(local_folder):\n# os.system('aws s3 cp --recursive s3://vegnonveg/vegnonveg-fewsamples %s' % local_folder)\n\n# COMMAND ----------\n\n# MAGIC %md ## Save images and load to Spark as BigDL ImageFrame\n# MAGIC \n# MAGIC save data to parquet files and load to spark. Add label to each image.\n\n# COMMAND ----------\n\nDATA_ROOT = \"/data/worldbank/\"\nsample_path = DATA_ROOT + 'samples/'\n# sample_path = DATA_ROOT + 'imagenet_samples/'\n# sample_path = '/mnt/nobigdl/vegnonveg-samples100/'\nlabel_path = DATA_ROOT + 'vegnonveg-samples_labels.csv'\nparquet_path = DATA_ROOT + 'sample_parquet/'\n# dbutils.fs.rm(parquet_path, True)\n\n\n\n# COMMAND ----------\nsparkConf = create_spark_conf().setMaster(\"local[2]\").setAppName(\"test_validation\")\nsc = get_spark_context(sparkConf)\nsqlContext = SQLContext(sc)\n#intializa bigdl\ninit_engine()\nredire_spark_logs()\n\n# This only runs at the first time to generate parquet files\nimage_frame = NNImageReader.readImages(sample_path, sc, minParitions=32)\n# save dataframe to parquet files\n# image_frame.write.parquet(parquet_path)\n# ImageFrame.write_parquet(sample_path, parquet_path, sc, partition_num=32)\n\n# COMMAND ----------\n\n# load parquet file into spark cluster\nimport time\nstart = time.time()\nimage_raw_DF = sqlContext.read.parquet(parquet_path)\nend = time.time()\nprint(\"Load data time is: \" + str(end-start) + \" seconds\")\n\n# COMMAND ----------\n\n# create dict from item_name to label\nlabels_csv = pd.read_csv(label_path)\nunique_labels = labels_csv['item_name'].unique().tolist()\nlabel_dict = dict(zip(unique_labels, range(1,len(unique_labels)+1)))\nclass_num = len(label_dict)\n\n# COMMAND ----------\n\n# create label dataframe\nlabel_raw_DF = sqlContext.read.format(\"com.databricks.spark.csv\")\\\n .option(\"header\", \"true\")\\\n .option(\"mode\", \"DROPMALFORMED\")\\\n .load(label_path)\nget_label = udf(lambda item_name: float(label_dict[item_name]), FloatType())\nchange_name = udf(lambda uid: uid+\".jpg\", StringType())\nlabelDF = label_raw_DF.withColumn(\"label\", get_label(\"item_name\")).withColumn(\"image_name\", change_name(\"obs_uid\"))\nlabelDF.show(truncate=False)\n\n# COMMAND ----------\n\nget_name = udf(lambda row: row[0].split(\"/\")[-1], StringType())\nimageDF = image_raw_DF.withColumn(\"image_name\", get_name(\"image\"))\nimageDF.show(truncate=False)\ndataDF = imageDF.join(labelDF, \"image_name\", \"inner\").select(\"image\", \"image_name\", \"label\")\ndataDF.show(truncate=False)\n\n# COMMAND ----------\n\n# MAGIC %md ## Do Train/Test Split and preprocessing\n# MAGIC Split Train/Test split with some ratio and preprocess images.\n\n# COMMAND ----------\n\ndata = dataDF.randomSplit([0.8, 0.2], seed=10)\ntrain_image = data[0]\nval_image = data[1]\ntype(train_image)\n\n\n# COMMAND ----------\n\nIMAGE_SIZE = 224\n\ntrain_transformer = NNImageTransformer(\n Pipeline([Resize(256, 256), RandomCrop(IMAGE_SIZE, IMAGE_SIZE),\n ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),\n MatToTensor()])\n).setInputCol(\"image\").setOutputCol(\"features\")\n\ntrain_data = train_transformer.transform(train_image)\n\n\n# COMMAND ----------\n\ntrain_size = train_image.count()\n\n# COMMAND ----------\n\nprint(train_size)\n\n\n# COMMAND ----------\n\nval_transformer = NNImageTransformer(\n Pipeline([Resize(256,256),\n CenterCrop(IMAGE_SIZE, IMAGE_SIZE),\n ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),\n MatToTensor(to_rgb=True)]\n )\n).setInputCol(\"image\").setOutputCol(\"features\")\n\n# COMMAND ----------\n\ntest_data = val_transformer.transform(val_image)\n\n# COMMAND ----------\n\n# MAGIC %md ## Define Model\n\n# COMMAND ----------\n\n# Network Parameters\nn_classes = len(label_dict)# item_name categories\nmodel = Inception_v1(n_classes)\n\n# COMMAND ----------\n\n# Parameters\nlearning_rate = 0.2\n# parameters for \nbatch_size = 2 #depends on dataset\nno_epochs = 1 #stop when validation accuracy doesn't improve anymore\n\n# COMMAND ----------\n\ncriterion = ClassNLLCriterion()\nclassifier = NNClassifier(model, criterion, [3,IMAGE_SIZE,IMAGE_SIZE])\\\n .setBatchSize(batch_size)\\\n .setMaxEpoch(no_epochs)\\\n .setLearningRate(learning_rate)\nstart = time.time()\ntrained_model = classifier.fit(train_data)\nend = time.time()\nprint(\"Optimization Done.\")\nprint(\"Training time is: %s seconds\" % str(end-start))\n# + dt.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# COMMAND ----------\n\nthroughput = train_size * no_epochs / (end - start)\nprint(\"Average throughput is: %s\" % str(throughput))\n\n# COMMAND ----------\n\n#predict\npredict_model = trained_model.setBatchSize(batch_size)\npredictionDF = predict_model.transform(test_data)\npredictionDF.show()\n\n# COMMAND ----------\n\nnum_preds = 1\npreds = predictionDF.select(\"label\", \"prediction\").take(num_preds)\nfor idx in range(num_preds):\n# true_label = str(map_to_label(map_groundtruth_label(truth[idx].label)))\n true_label = preds[idx][0]\n pred_label = preds[idx][1]\n print(idx + 1, ')', 'Ground Truth label: ', true_label)\n print(idx + 1, ')', 'Predicted label: ', pred_label)\n print(\"correct\" if true_label == pred_label else \"wrong\")\n\n# COMMAND ----------\n\n'''\nMeasure Test Accuracy w/Test Set\n'''\nevaluator = MulticlassClassificationEvaluator(labelCol=\"label\", predictionCol=\"prediction\", metricName=\"accuracy\")\naccuracy = evaluator.evaluate(predictionDF)\n# expected error should be less than 10%\nprint(\"Accuracy = %g \" % accuracy)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TUM-AAS/motron | [
"2f8800d1d6e297fc4baab555ceb2d37f55841406"
] | [
"motion/components/structural.py"
] | [
"from typing import Tuple, Optional, List, Union\n\nimport torch\nfrom torch.nn import *\nimport math\n\ndef gmm(x: torch.Tensor, w: torch.Tensor) -> torch.Tensor:\n return torch.einsum('ndo,bnd->bno', w, x)\n\n\nclass GraphLinear(Module):\n def __init__(self, in_features: int, out_features: int):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n\n def reset_parameters(self) -> None:\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n #stdv = 1. / math.sqrt(self.weight.size(1))\n #self.weight.data.uniform_(-stdv, stdv)\n #if self.learn_influence:\n # self.G.data.uniform_(-stdv, stdv)\n if len(self.weight.shape) == 3:\n self.weight.data[1:] = self.weight.data[0]\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, input: torch.Tensor, g: Optional[torch.Tensor] = None) -> torch.Tensor:\n if g is None and self.learn_influence:\n g = torch.nn.functional.normalize(self.G, p=1., dim=1)\n #g = torch.softmax(self.G, dim=1)\n elif g is None:\n g = self.G\n w = self.weight[self.node_type_index]\n output = self.mm(input, w.transpose(-2, -1))\n if self.bias is not None:\n bias = self.bias[self.node_type_index]\n output += bias\n output = g.matmul(output)\n\n return output\n\n\nclass DynamicGraphLinear(GraphLinear):\n def __init__(self, num_node_types: int = 1, *args):\n super().__init__(*args)\n\n def forward(self, input: torch.Tensor, g: torch.Tensor = None, t: torch.Tensor = None) -> torch.Tensor:\n assert g is not None or t is not None, \"Either Graph Influence Matrix or Node Type Vector is needed\"\n if g is None:\n g = self.G[t][:, t]\n return super().forward(input, g)\n\n\n\nclass StaticGraphLinear(GraphLinear):\n def __init__(self, *args, bias: bool = True, num_nodes: int = None, graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_influence: bool = False, node_types: torch.Tensor = None, weights_per_type: bool = False):\n \"\"\"\n :param in_features: Size of each input sample\n :param out_features: Size of each output sample\n :param num_nodes: Number of nodes.\n :param graph_influence: Graph Influence Matrix\n :param learn_influence: If set to ``False``, the layer will not learn an the Graph Influence Matrix.\n :param node_types: List of Type for each node. All nodes of same type will share weights.\n Default: All nodes have unique types.\n :param weights_per_type: If set to ``False``, the layer will not learn weights for each node type.\n :param bias: If set to ``False``, the layer will not learn an additive bias.\n \"\"\"\n super().__init__(*args)\n\n self.learn_influence = learn_influence\n\n if graph_influence is not None:\n assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'\n num_nodes = graph_influence.shape[0]\n if type(graph_influence) is Parameter:\n assert learn_influence, \"Graph Influence Matrix is a Parameter, therefore it must be learnable.\"\n self.G = graph_influence\n elif learn_influence:\n self.G = Parameter(graph_influence)\n else:\n self.register_buffer('G', graph_influence)\n else:\n assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'\n eye_influence = torch.eye(num_nodes, num_nodes)\n if learn_influence:\n self.G = Parameter(eye_influence)\n else:\n self.register_buffer('G', eye_influence)\n\n if weights_per_type and node_types is None:\n node_types = torch.tensor([i for i in range(num_nodes)])\n if node_types is not None:\n num_node_types = node_types.max() + 1\n self.weight = Parameter(torch.Tensor(num_node_types, self.out_features, self.in_features))\n self.mm = gmm\n self.node_type_index = node_types\n else:\n self.weight = Parameter(torch.Tensor(self.out_features, self.in_features))\n self.mm = torch.matmul\n self.node_type_index = None\n\n if bias:\n if node_types is not None:\n self.bias = Parameter(torch.Tensor(num_node_types, self.out_features))\n else:\n self.bias = Parameter(torch.Tensor(self.out_features))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n\n\nGraphLSTMState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]\n\nclass BN(Module):\n def __init__(self, num_nodes, num_features):\n super().__init__()\n self.num_nodes = num_nodes\n self.num_features = num_features\n self.bn = BatchNorm1d(num_nodes * num_features)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.bn(x.view(-1, self.num_nodes * self.num_features)).view(-1, self.num_nodes, self.num_features)\n\nclass LinearX(Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n return input\n\nclass StaticGraphLSTMCell_(Module):\n def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,\n recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,\n weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):\n \"\"\"\n\n :param input_size: The number of expected features in the input `x`\n :param hidden_size: The number of features in the hidden state `h`\n :param num_nodes:\n :param dropout:\n :param recurrent_dropout:\n :param graph_influence:\n :param learn_influence:\n :param additive_graph_influence:\n :param learn_additive_graph_influence:\n :param node_types:\n :param weights_per_type:\n :param bias:\n \"\"\"\n super().__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.learn_influence = learn_influence\n self.learn_additive_graph_influence = learn_additive_graph_influence\n if graph_influence is not None:\n assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'\n num_nodes = graph_influence.shape[0]\n if type(graph_influence) is Parameter:\n assert learn_influence, \"Graph Influence Matrix is a Parameter, therefore it must be learnable.\"\n self.G = graph_influence\n elif learn_influence:\n self.G = Parameter(graph_influence)\n else:\n self.register_buffer('G', graph_influence)\n else:\n assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'\n eye_influence = torch.eye(num_nodes, num_nodes)\n if learn_influence:\n self.G = Parameter(eye_influence)\n else:\n self.register_buffer('G', eye_influence)\n\n if additive_graph_influence is not None:\n if type(additive_graph_influence) is Parameter:\n self.G_add = additive_graph_influence\n elif learn_additive_graph_influence:\n self.G_add = Parameter(additive_graph_influence)\n else:\n self.register_buffer('G_add', additive_graph_influence)\n else:\n if learn_additive_graph_influence:\n self.G_add = Parameter(torch.zeros_like(self.G))\n else:\n self.G_add = 0.\n\n if weights_per_type and node_types is None:\n node_types = torch.tensor([i for i in range(num_nodes)])\n if node_types is not None:\n num_node_types = node_types.max() + 1\n self.weight_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, input_size))\n self.weight_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, hidden_size))\n self.mm = gmm\n self.register_buffer('node_type_index', node_types)\n else:\n self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))\n self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))\n self.mm = torch.matmul\n self.register_buffer('node_type_index', None)\n\n if bias:\n if node_types is not None:\n self.bias_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))\n self.bias_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))\n else:\n self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))\n self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))\n else:\n self.bias_ih = None\n self.bias_hh = None\n\n self.clockwork = clockwork\n if clockwork:\n phase = torch.arange(0., hidden_size)\n phase = phase - phase.min()\n phase = (phase / phase.max()) * 8.\n phase += 1.\n phase = torch.floor(phase)\n self.register_buffer('phase', phase)\n else:\n phase = torch.ones(hidden_size)\n self.register_buffer('phase', phase)\n\n self.dropout = Dropout(dropout)\n self.r_dropout = Dropout(recurrent_dropout)\n\n self.num_nodes = num_nodes\n\n self.init_weights()\n\n def init_weights(self):\n stdv = 1.0 / math.sqrt(self.hidden_size)\n for weight in self.parameters():\n if weight is self.G:\n continue\n if weight is self.G_add:\n continue\n weight.data.uniform_(-stdv, stdv)\n if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:\n weight.data[1:] = weight.data[0]\n\n def forward(self, input: torch.Tensor, state: GraphLSTMState, t: int = 0) -> Tuple[torch.Tensor, GraphLSTMState]:\n hx, cx, gx = state\n if hx is None:\n hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)\n if cx is None:\n cx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)\n if gx is None and self.learn_influence:\n gx = torch.nn.functional.normalize(self.G, p=1., dim=1)\n #gx = torch.softmax(self.G, dim=1)\n elif gx is None:\n gx = self.G\n\n hx = self.r_dropout(hx)\n\n weight_ih = self.weight_ih[self.node_type_index]\n weight_hh = self.weight_hh[self.node_type_index]\n if self.bias_hh is not None:\n bias_hh = self.bias_hh[self.node_type_index]\n else:\n bias_hh = 0.\n\n c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(cx)\n\n gates = (self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) +\n self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh)\n gates = torch.matmul(gx, gates)\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 2)\n\n ingate = torch.sigmoid(ingate)\n forgetgate = torch.sigmoid(forgetgate)\n cellgate = torch.tanh(cellgate)\n outgate = torch.sigmoid(outgate)\n\n cy = c_mask * ((forgetgate * cx) + (ingate * cellgate)) + (1 - c_mask) * cx\n hy = outgate * torch.tanh(cy)\n\n gx = gx + self.G_add\n if self.learn_influence or self.learn_additive_graph_influence:\n gx = torch.nn.functional.normalize(gx, p=1., dim=1)\n #gx = torch.softmax(gx, dim=1)\n\n return hy, (hy, cy, gx)\n\n\nclass StaticGraphLSTM_(Module):\n def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):\n super().__init__()\n self.layers = ModuleList([StaticGraphLSTMCell_(input_size, hidden_size, **kwargs)]\n + [StaticGraphLSTMCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])\n self.dropout = Dropout(layer_dropout)\n\n def forward(self, input: torch.Tensor, states: Optional[List[GraphLSTMState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphLSTMState]]:\n if states is None:\n n: Optional[torch.Tensor] = None\n states = [(n, n, n)] * len(self.layers)\n\n output_states: List[GraphLSTMState] = []\n output = input\n i = 0\n for rnn_layer in self.layers:\n state = states[i]\n inputs = output.unbind(1)\n outputs: List[torch.Tensor] = []\n for t, input in enumerate(inputs):\n out, state = rnn_layer(input, state, t_i+t)\n outputs += [out]\n output = torch.stack(outputs, dim=1)\n output = self.dropout(output)\n output_states += [state]\n i += 1\n return output, output_states\n\n\ndef StaticGraphLSTM(*args, **kwargs):\n return torch.jit.script(StaticGraphLSTM_(*args, **kwargs))\n\nGraphGRUState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]\n\n\nclass StaticGraphGRUCell_(Module):\n def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,\n recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,\n weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):\n \"\"\"\n\n :param input_size: The number of expected features in the input `x`\n :param hidden_size: The number of features in the hidden state `h`\n :param num_nodes:\n :param dropout:\n :param recurrent_dropout:\n :param graph_influence:\n :param learn_influence:\n :param additive_graph_influence:\n :param learn_additive_graph_influence:\n :param node_types:\n :param weights_per_type:\n :param bias:\n \"\"\"\n super().__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.learn_influence = learn_influence\n self.learn_additive_graph_influence = learn_additive_graph_influence\n if graph_influence is not None:\n assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'\n num_nodes = graph_influence.shape[0]\n if type(graph_influence) is Parameter:\n assert learn_influence, \"Graph Influence Matrix is a Parameter, therefore it must be learnable.\"\n self.G = graph_influence\n elif learn_influence:\n self.G = Parameter(graph_influence)\n else:\n self.register_buffer('G', graph_influence)\n else:\n assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'\n eye_influence = torch.eye(num_nodes, num_nodes)\n if learn_influence:\n self.G = Parameter(eye_influence)\n else:\n self.register_buffer('G', eye_influence)\n\n if additive_graph_influence is not None:\n if type(additive_graph_influence) is Parameter:\n self.G_add = additive_graph_influence\n elif learn_additive_graph_influence:\n self.G_add = Parameter(additive_graph_influence)\n else:\n self.register_buffer('G_add', additive_graph_influence)\n else:\n if learn_additive_graph_influence:\n self.G_add = Parameter(torch.zeros_like(self.G))\n else:\n self.G_add = 0.\n\n if weights_per_type and node_types is None:\n node_types = torch.tensor([i for i in range(num_nodes)])\n if node_types is not None:\n num_node_types = node_types.max() + 1\n self.weight_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, input_size))\n self.weight_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, hidden_size))\n self.mm = gmm\n self.register_buffer('node_type_index', node_types)\n else:\n self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))\n self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size))\n self.mm = torch.matmul\n self.register_buffer('node_type_index', None)\n\n if bias:\n if node_types is not None:\n self.bias_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))\n self.bias_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))\n else:\n self.bias_ih = Parameter(torch.Tensor(3 * hidden_size))\n self.bias_hh = Parameter(torch.Tensor(3 * hidden_size))\n else:\n self.bias_ih = None\n self.bias_hh = None\n\n self.clockwork = clockwork\n if clockwork:\n phase = torch.arange(0., hidden_size)\n phase = phase - phase.min()\n phase = (phase / phase.max()) * 8.\n phase += 1.\n phase = torch.floor(phase)\n self.register_buffer('phase', phase)\n else:\n phase = torch.ones(hidden_size)\n self.register_buffer('phase', phase)\n\n self.dropout = Dropout(dropout)\n self.r_dropout = Dropout(recurrent_dropout)\n\n self.num_nodes = num_nodes\n\n self.init_weights()\n\n def init_weights(self):\n stdv = 1.0 / math.sqrt(self.hidden_size)\n for weight in self.parameters():\n if weight is self.G:\n continue\n if weight is self.G_add:\n continue\n weight.data.uniform_(-stdv, stdv)\n #if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:\n # weight.data[1:] = weight.data[0]\n\n def forward(self, input: torch.Tensor, state: GraphGRUState, t: int = 0) -> Tuple[torch.Tensor, GraphGRUState]:\n hx, gx = state\n if hx is None:\n hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)\n if gx is None and self.learn_influence:\n gx = torch.nn.functional.normalize(self.G, p=1., dim=1)\n #gx = torch.softmax(self.G, dim=1)\n elif gx is None:\n gx = self.G\n\n hx = self.r_dropout(hx)\n\n weight_ih = self.weight_ih[self.node_type_index]\n weight_hh = self.weight_hh[self.node_type_index]\n if self.bias_hh is not None:\n bias_hh = self.bias_hh[self.node_type_index]\n else:\n bias_hh = 0.\n if self.bias_ih is not None:\n bias_ih = self.bias_ih[self.node_type_index]\n else:\n bias_ih = 0.\n\n c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(hx)\n\n x_results = self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) + bias_ih\n h_results = self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh\n x_results = torch.matmul(gx, x_results)\n h_results = torch.matmul(gx, h_results)\n\n i_r, i_z, i_n = x_results.chunk(3, 2)\n h_r, h_z, h_n = h_results.chunk(3, 2)\n\n r = torch.sigmoid(i_r + h_r)\n z = torch.sigmoid(i_z + h_z)\n n = torch.tanh(i_n + r * h_n)\n\n hy = n - torch.mul(n, z) + torch.mul(z, hx)\n hy = c_mask * hy + (1 - c_mask) * hx\n\n gx = gx + self.G_add\n if self.learn_influence or self.learn_additive_graph_influence:\n gx = torch.nn.functional.normalize(gx, p=1., dim=1)\n #gx = torch.softmax(gx, dim=1)\n\n return hy, (hy, gx)\n\n\nclass StaticGraphGRU_(Module):\n def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):\n super().__init__()\n self.layers = ModuleList([StaticGraphGRUCell_(input_size, hidden_size, **kwargs)]\n + [StaticGraphGRUCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])\n self.dropout = Dropout(layer_dropout)\n\n def forward(self, input: torch.Tensor, states: Optional[List[GraphGRUState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphGRUState]]:\n if states is None:\n n: Optional[torch.Tensor] = None\n states = [(n, n)] * len(self.layers)\n\n output_states: List[GraphGRUState] = []\n output = input\n i = 0\n for rnn_layer in self.layers:\n state = states[i]\n inputs = output.unbind(1)\n outputs: List[torch.Tensor] = []\n for t, input in enumerate(inputs):\n out, state = rnn_layer(input, state, t_i+t)\n outputs += [out]\n output = torch.stack(outputs, dim=1)\n output = self.dropout(output)\n output_states += [state]\n i += 1\n return output, output_states\n\n\ndef StaticGraphGRU(*args, **kwargs):\n return torch.jit.script(StaticGraphGRU_(*args, **kwargs))\n"
] | [
[
"torch.nn.functional.normalize",
"torch.sigmoid",
"torch.ones",
"torch.floor",
"torch.Tensor",
"torch.zeros",
"torch.einsum",
"torch.eye",
"torch.zeros_like",
"torch.tensor",
"torch.tanh",
"torch.matmul",
"torch.mul",
"torch.arange",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EternalImmortal/Real-time-emotion-classifier-mini-Xception | [
"161f295d4be511f7e4cc700399ca37c48ea81f6a"
] | [
"src/utils/preprocessor.py"
] | [
"import numpy as np\n# from scipy.misc import imread, imresize\nfrom scipy import misc\n\n\ndef preprocess_input(x, v2=True):\n x = x.astype('float32')\n x = x / 255.0\n if v2:\n x = x - 0.5\n x = x * 2.0\n return x\n\n\ndef _imread(image_name):\n return misc.imread(image_name)\n\n\ndef _imresize(image_array, size):\n return misc.imresize(image_array, size)\n\n\ndef to_categorical(integer_classes, num_classes=2):\n integer_classes = np.asarray(integer_classes, dtype='int')\n num_samples = integer_classes.shape[0]\n categorical = np.zeros((num_samples, num_classes))\n categorical[np.arange(num_samples), integer_classes] = 1\n return categorical\n"
] | [
[
"scipy.misc.imresize",
"numpy.asarray",
"numpy.arange",
"scipy.misc.imread",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.10",
"0.16",
"0.19",
"0.18",
"0.12",
"1.0",
"0.17",
"1.2"
],
"tensorflow": []
}
] |
randommm/pytorch-lightning | [
"10e87b7b7acbbad8fc12ec5c07638ed093547ef8"
] | [
"pytorch_lightning/plugins/training_type/parallel.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Any, List, Optional\n\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.overrides.base import unwrap_lightning_module\nfrom pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment\nfrom pytorch_lightning.plugins.training_type.training_type_plugin import TrainingTypePlugin\nfrom pytorch_lightning.utilities import _XLA_AVAILABLE\nfrom pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, ReduceOp\n\n\nclass ParallelPlugin(TrainingTypePlugin, ABC):\n \"\"\" Plugin for training with multiple processes in parallel. \"\"\"\n\n def __init__(\n self,\n parallel_devices: Optional[List[torch.device]] = None,\n cluster_environment: Optional[ClusterEnvironment] = None,\n ):\n super().__init__()\n self.parallel_devices = parallel_devices\n self.cluster_environment = cluster_environment\n\n @property\n @abstractmethod\n def root_device(self) -> torch.device:\n raise NotImplementedError\n\n @property\n def on_gpu(self) -> bool:\n return self.root_device.type == \"cuda\" and torch.cuda.is_available()\n\n @property\n def on_tpu(self) -> bool:\n return self.root_device.type == \"xla\" and _XLA_AVAILABLE\n\n @property\n def lightning_module(self):\n return unwrap_lightning_module(self._model)\n\n @property\n def global_rank(self) -> int:\n return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0\n\n @property\n def local_rank(self) -> int:\n return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0\n\n @property\n def node_rank(self) -> int:\n return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0\n\n @property\n def world_size(self) -> int:\n return self.cluster_environment.world_size() if self.cluster_environment is not None else 1\n\n @property\n def is_global_zero(self) -> bool:\n return self.global_rank == 0\n\n @property\n def distributed_sampler_kwargs(self):\n distributed_sampler_kwargs = dict(num_replicas=len(self.parallel_devices), rank=self.global_rank)\n return distributed_sampler_kwargs\n\n def reconciliate_processes(self, trace: str):\n \"\"\"\n Function to re-conciliate processes on failure\n \"\"\"\n\n def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor:\n \"\"\"Perform a all_gather on all processes \"\"\"\n return all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)\n\n def reduce_boolean_decision(self, decision: bool) -> bool:\n decision = torch.tensor(int(decision), device=self.lightning_module.device)\n decision = self.reduce(decision, reduce_op=ReduceOp.SUM)\n decision = bool(decision == self.world_size)\n return decision\n\n @property\n def torch_distributed_backend(self):\n torch_backend = os.getenv(\"PL_TORCH_DISTRIBUTED_BACKEND\")\n if torch_backend is None:\n torch_backend = \"nccl\" if self.on_gpu else \"gloo\"\n return torch_backend\n\n @staticmethod\n def configure_sync_batchnorm(model: 'pl.LightningModule') -> 'pl.LightningModule':\n \"\"\"\n Add global batchnorm for a model spread across multiple GPUs and nodes.\n\n Override to synchronize batchnorm between specific process groups instead\n of the whole world or use a different sync_bn like `apex`'s version.\n\n Args:\n model: pointer to current :class:`LightningModule`.\n\n Return:\n LightningModule with batchnorm layers synchronized between process groups\n \"\"\"\n return torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n @contextmanager\n def block_backward_sync(self):\n \"\"\"\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n Returns: context manager with sync behaviour off\n \"\"\"\n if isinstance(self.model, DistributedDataParallel):\n with self.model.no_sync():\n yield None\n else:\n yield None\n\n def teardown(self) -> None:\n # Un-reference the wrapper if any was used.\n # todo (tchaton): Add support for all plugins.\n if isinstance(self.model, DistributedDataParallel):\n self.model = self.lightning_module\n\n if self.on_gpu:\n # GPU teardown\n self.lightning_module.cpu()\n # clean up memory\n torch.cuda.empty_cache()\n"
] | [
[
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cremerlab/useless_expression | [
"a6020674f0ae73b4cc6173de60a0ea93016ee562",
"a6020674f0ae73b4cc6173de60a0ea93016ee562",
"a6020674f0ae73b4cc6173de60a0ea93016ee562",
"a6020674f0ae73b4cc6173de60a0ea93016ee562",
"a6020674f0ae73b4cc6173de60a0ea93016ee562"
] | [
"code/processing/growth_rates/2021-08-14_r1_DoubleKO_acetate/analysis.py",
"code/processing/growth_rates/2021-08-12_r1_DoubleKO_acetate/processing.py",
"code/processing/growth_rates/2021-07-27_r2_SingleKO_acetate/analysis.py",
"code/processing/growth_rates/2021-07-23_r1_SingleKO_glucose/analysis.py",
"code/processing/growth_rates/2021-08-20_r1_DoubleKO_acetate/analysis.py"
] | [
"#%%\nimport numpy as np \nimport pandas as pd \nimport futileprot.viz\nimport altair as alt\nimport altair_saver\nimport scipy.stats\ncolors, palette = futileprot.viz.altair_style()\n\n# Add metadata\nDATE = '2021-08-14'\nRUN_NO = 1\nSTRAINS = 'DoubleKO'\nMEDIUM = 'acetate'\n\n# Load the measurement data\ndata = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')\n\n# Perform a simplistic inference of the growth rate to get a sense of what\n# the result is.\n# data = data.groupby(['strain', 'elapsed_time_hr']).mean().reset_index()\ndata = data[['strain', 'elapsed_time_hr', 'od_600nm']]\n\n# For each strain, infer the growth rate and compute the fit\nlayout = False\nfor g, d in data.groupby(['strain']):\n time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)\n\n # Perform the regression\n popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))\n slope, intercept, err = popt[0], popt[1], popt[-1]\n print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')\n # Compute the fit\n fit = np.exp(intercept + slope * time_range)\n fit_df = pd.DataFrame([])\n fit_df['elapsed_time_hr'] = time_range\n fit_df['od_600nm'] = fit\n\n # Generate the plot\n points = alt.Chart(\n data=d, \n width=300, \n height=150\n ).mark_point(\n color=colors['primary_blue']\n ).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm:Q', title='optical density [a.u]',\n scale=alt.Scale(type='log'))\n )\n\n fit = alt.Chart(data=fit_df,\n title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'\n ).mark_line( \n color=colors['primary_blue']\n ).encode(\n x='elapsed_time_hr:Q',\n y='od_600nm:Q'\n )\n merge = points + fit\n if layout == False:\n layout = merge\n else: \n layout &= merge\n\naltair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',\n scale_factor=2)\n# %%\n",
"#%%\nimport numpy as np \nimport pandas as pd \nimport futileprot.io\nimport futileprot.viz\nimport altair as alt \nimport altair_saver\ncolors, palette = futileprot.viz.altair_style()\n\n# Define experiment parameters\nDATE = '2021-08-12'\nSTRAINS = 'DoubleKO'\nMEDIUM = 'acetate'\nRUN_NO = 1\nROOT = '../../../..'\nSKIPROWS = 36 \nOD_BOUNDS = [0.03, 0.18]\n\n# Add the well identifiers\nMAP = {'GC073': ['C3', 'D3', 'E3'],\n 'GC069': ['C4', 'D4', 'E4'],\n 'GC075': ['C5', 'D5', 'E5'],\n 'GC070': ['C6', 'D6', 'E6'],\n 'GC065': ['C7', 'D7', 'E7'],\n 'GC098': ['C8', 'D8', 'E8'],\n 'GC074': ['C9', 'D9', 'E9'],\n 'GC097': ['C10', 'D10' ,'E10'],\n 'GC084': ['F3', 'F4', 'F5'],\n 'GC106': ['F6', 'F7', 'F8'],\n 'GC100': ['F9', 'F10', 'F11']} \n\n# Generate a list of all valid wells\nwells = [f'{letter}{number}' for letter in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] for number in np.arange(1,13)]\n\n# Load the data\ndata = pd.read_csv(f'{ROOT}/data/growth_rates/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}/{DATE}_r{RUN_NO}.csv', \n skiprows=SKIPROWS)\n\n# Melt and drop unnecessary stuff\nmelted = data.melt(id_vars=['Time'], var_name='well', value_name='od_600nm')\nmelted = melted.loc[melted['well'].isin(wells)]\nmelted.dropna(inplace=True)\n\n# Add strain identifier and replicates\nmelted['strain'] = 'blank'\nmelted['replicate'] = 0\nfor strain, wells in MAP.items():\n for idx, well in enumerate(wells):\n melted.loc[melted['well']==well, 'strain'] = strain\n melted.loc[melted['well']==well, 'replicate'] = idx + 1\n\n# Add information regarding date and growth medium\nmelted['growth_medium'] = MEDIUM\nmelted['date'] = DATE\nmelted['run_number'] = RUN_NO\n\n# Convert time to elapsed time\nmelted['time_sec'] = pd.to_timedelta(melted['Time'].values)\nmelted['time_sec'] = melted['time_sec'].dt.total_seconds()\nmelted['elapsed_time_hr'] = (melted['time_sec'] - melted['time_sec'].min())/3600\n\n# Drop unnecessary Time columns\nmelted.drop(columns=['Time', 'time_sec'], inplace=True)\n\n\n# Reformat blank value as average eentry per time\nmeasurement = []\nfor g, d in melted.groupby(['elapsed_time_hr']):\n d = d.copy()\n avg_blank = d[d['strain']=='blank']\n meas = d[d['strain']!='blank']\n meas['avg_blank_value'] = avg_blank['od_600nm'].mean()\n measurement.append(meas)\nmeasurement = pd.concat(measurement, sort=False)\nmeasurement.rename(columns={'strain':'identifier'}, inplace=True)\n\n# Add shorthand strain information and class identifier\nstrain_shorthand, _, strain_class = futileprot.io.standardize_strains(measurement['identifier'].values)\nmeasurement['strain'] = strain_shorthand\nmeasurement['class'] = strain_class\n\n# measurement = pd.concat(measurements, sort=False)\n# Save to disk\nmeasurement.to_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_measurements.csv', index=False)\n\n#%%\n# Perform the blank subtraction\nmeasurement['od_600nm_subtracted'] = measurement['od_600nm'].values - measurement['avg_blank_value'].values\n\n# Given truncation, recalculated elapsed time and save truncated data\ntrunc = []\nfor g, d in measurement.groupby(['strain', 'replicate']):\n d = d.copy()\n d = d[(d['od_600nm_subtracted'] >= OD_BOUNDS[0]) & \n (d['od_600nm_subtracted'] <= OD_BOUNDS[1])]\n d['elapsed_time_hr'] -= d['elapsed_time_hr'].min()\n trunc.append(d)\ntrunc = pd.concat(trunc, sort=False)\ntrunc = trunc[['strain', 'elapsed_time_hr', \n 'od_600nm_subtracted', 'replicate', 'growth_medium', \n 'date', 'run_number', 'identifier', 'class']]\ntrunc.rename(columns={'od_600nm_subtracted':'od_600nm',\n 'replicate':'technical_replicate'}, inplace=True)\ntrunc.to_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv', index=False)\n\n# %%\n# Generate a figure of all of the raw traces\nraw_traces = alt.Chart(\n data=measurement, \n width=400, \n height=200\n ).mark_line(\n point=True,\n opacity=0.75\n ).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm:Q', title='optical density [a.u.]'),\n color=alt.Color('replicate:N', title='technical replicate')\n ).facet(\n row='strain'\n )\naltair_saver.save(raw_traces, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_raw_traces.png',\n scale_factor=2)\n\n# %%\n",
"#%%\nimport numpy as np \nimport pandas as pd \nimport futileprot.viz\nimport altair as alt\nimport altair_saver\nimport scipy.stats\ncolors, palette = futileprot.viz.altair_style()\n\n# Add metadata\nDATE = '2021-07-27'\nRUN_NO = 2\nSTRAINS = 'SingleKO'\nMEDIUM = 'acetate'\n\n# Load the measurement data\ndata = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')\n\n# Perform a simplistic inference of the growth rate to get a sense of what\n# the result is.\n# data = data.groupby(['strain', 'elapsed_time_hr']).mean().reset_index()\ndata = data[['strain', 'elapsed_time_hr', 'od_600nm']]\n\n# For each strain, infer the growth rate and compute the fit\nlayout = False\nfor g, d in data.groupby(['strain']):\n time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)\n\n # Perform the regression\n popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))\n slope, intercept, err = popt[0], popt[1], popt[-1]\n print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')\n # Compute the fit\n fit = np.exp(intercept + slope * time_range)\n fit_df = pd.DataFrame([])\n fit_df['elapsed_time_hr'] = time_range\n fit_df['od_600nm'] = fit\n\n # Generate the plot\n points = alt.Chart(\n data=d, \n width=300, \n height=150\n ).mark_point(\n color=colors['primary_blue']\n ).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm:Q', title='optical density [a.u]',\n scale=alt.Scale(type='log'))\n )\n\n fit = alt.Chart(data=fit_df,\n title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'\n ).mark_line( \n color=colors['primary_blue']\n ).encode(\n x='elapsed_time_hr:Q',\n y='od_600nm:Q'\n )\n merge = points + fit\n if layout == False:\n layout = merge\n else: \n layout &= merge\n\naltair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',\n scale_factor=2)\n# %%\n",
"#%%\nimport numpy as np \nimport pandas as pd \nimport futileprot.viz\nimport altair as alt\nimport altair_saver\nimport scipy.stats\ncolors, palette = futileprot.viz.altair_style()\n\n# Add metadata\nDATE = '2021-07-23'\nRUN_NO = 1\nSTRAINS = 'SingleKO'\nMEDIUM = 'glucose'\n\n# Load the measurement data\ndata = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')\n\n# Perform a simplistic inference of the growth rate to get a sense of what\n# the result is.\n# data = data.groupby(['strain', 'elapsed_time_hr']).mean().reset_index()\ndata = data[['strain', 'elapsed_time_hr', 'od_600nm']]\n\n# For each strain, infer the growth rate and compute the fit\nlayout = False\ntime_range = np.linspace(0, 3, 10)\nfor g, d in data.groupby(['strain']):\n\n # Perform the regression\n popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))\n slope, intercept, err = popt[0], popt[1], popt[-1]\n print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')\n # Compute the fit\n fit = np.exp(intercept + slope * time_range)\n fit_df = pd.DataFrame([])\n fit_df['elapsed_time_hr'] = time_range\n fit_df['od_600nm'] = fit\n\n # Generate the plot\n points = alt.Chart(\n data=d, \n width=300, \n height=150\n ).mark_point(\n color=colors['primary_blue']\n ).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm:Q', title='optical density [a.u]',\n scale=alt.Scale(type='log'))\n )\n\n fit = alt.Chart(data=fit_df,\n title=f'{g}, {MEDIUM}: µ = {slope:0.2f} ± {err:0.2f} per hr.'\n ).mark_line( \n color=colors['primary_blue']\n ).encode(\n x='elapsed_time_hr:Q',\n y='od_600nm:Q'\n )\n merge = points + fit\n if layout == False:\n layout = merge\n else: \n layout &= merge\n\naltair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',\n scale_factor=2)\n# %%\n",
"#%%\nimport numpy as np \nimport pandas as pd \nimport futileprot.viz\nimport altair as alt\nimport altair_saver\nimport scipy.stats\ncolors, palette = futileprot.viz.altair_style()\n\n# Add metadata\nDATE = '2021-08-20'\nRUN_NO = 1\nSTRAINS = 'DoubleKO'\nMEDIUM = 'acetate'\n\n# Load the measurement data\ndata = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')\n\n# Perform a simplistic inference of the growth rate to get a sense of what\n# the result is.\ndata = data[['strain', 'elapsed_time_hr', 'od_600nm']]\n\n# For each strain, infer the growth rate and compute the fit\nlayout = False\nfor g, d in data.groupby(['strain']):\n time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)\n\n # Perform the regression\n popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))\n slope, intercept, err = popt[0], popt[1], popt[-1]\n print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')\n # Compute the fit\n fit = np.exp(intercept + slope * time_range)\n fit_df = pd.DataFrame([])\n fit_df['elapsed_time_hr'] = time_range\n fit_df['od_600nm'] = fit\n\n # Generate the plot\n points = alt.Chart(\n data=d, \n width=300, \n height=150\n ).mark_point(\n color=colors['primary_blue']\n ).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm:Q', title='optical density [a.u]',\n scale=alt.Scale(type='log'))\n )\n\n fit = alt.Chart(data=fit_df,\n title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'\n ).mark_line( \n color=colors['primary_blue']\n ).encode(\n x='elapsed_time_hr:Q',\n y='od_600nm:Q'\n )\n merge = points + fit\n if layout == False:\n layout = merge\n else: \n layout &= merge\n\naltair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',\n scale_factor=2)\n# %%\n"
] | [
[
"numpy.exp",
"numpy.log",
"pandas.read_csv",
"pandas.DataFrame"
],
[
"pandas.to_timedelta",
"numpy.arange",
"pandas.concat",
"pandas.read_csv"
],
[
"numpy.exp",
"numpy.log",
"pandas.read_csv",
"pandas.DataFrame"
],
[
"numpy.log",
"pandas.read_csv",
"numpy.linspace",
"pandas.DataFrame",
"numpy.exp"
],
[
"numpy.exp",
"numpy.log",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Fred159/3D-Perception | [
"a23a42dc19d0a38e48beb5e7c0725e6d14c542f3"
] | [
"sensor_stick/src/sensor_stick/features.py"
] | [
"import matplotlib.colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pcl_helper import *\n\nprint('run features.py')\n\n\ndef rgb_to_hsv(rgb_list):\n rgb_normalized = [1.0 * rgb_list[0] / 255, 1.0 * rgb_list[1] / 255, 1.0 * rgb_list[2] / 255]\n hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]\n return hsv_normalized\n\n\ndef compute_color_histograms(cloud, using_hsv=False):\n # Compute histograms for the clusters\n point_colors_list = []\n\n # Step through each point in the point cloud\n for point in pc2.read_points(cloud, skip_nans=True):\n rgb_list = float_to_rgb(point[3])\n if using_hsv:\n point_colors_list.append(rgb_to_hsv(rgb_list) * 255)\n else:\n point_colors_list.append(rgb_list)\n\n # Populate lists with color values\n channel_1_vals = []\n channel_2_vals = []\n channel_3_vals = []\n\n for color in point_colors_list:\n channel_1_vals.append(color[0])\n channel_2_vals.append(color[1])\n channel_3_vals.append(color[2])\n\n # TODO: Compute histograms\n nbins = 32\n bins_range = (0, 256)\n # TODO: Concatenate and normalize the histograms\n channel_1_hist = np.histogram(channel_1_vals, bins=nbins, range=bins_range)\n channel_2_hist = np.histogram(channel_2_vals, bins=nbins, range=bins_range)\n channel_3_hist = np.histogram(channel_3_vals, bins=nbins, range=bins_range)\n hist_features = np.concatenate((channel_1_hist[0], channel_2_hist[0], channel_3_hist[0])).astype(np.float64)\n normed_features = hist_features / np.sum(hist_features)\n # Generate random features for demo mode.\n # Replace normed_features with your feature vectorl\n # normed_features = np.random.random(96)\n # print('run normed_features finished')\n return normed_features\n\n\ndef compute_normal_histograms(normal_cloud):\n norm_x_vals = []\n norm_y_vals = []\n norm_z_vals = []\n nbins = 32\n bins_range = (-1, 1)\n\n for norm_component in pc2.read_points(normal_cloud,\n field_names=('normal_x', 'normal_y', 'normal_z'),\n skip_nans=True):\n norm_x_vals.append(norm_component[0])\n norm_y_vals.append(norm_component[1])\n norm_z_vals.append(norm_component[2])\n\n # TODO: Compute histograms of normal values (just like with color)\n norm_x_hist = np.histogram(norm_x_vals, bins=nbins, range=bins_range)\n norm_y_hist = np.histogram(norm_y_vals, bins=nbins, range=bins_range)\n norm_z_hist = np.histogram(norm_z_vals, bins=nbins, range=bins_range)\n # TODO: Concatenate and normalize the histograms\n norm_hist_features = np.concatenate((norm_x_hist[0], norm_y_hist[0], norm_z_hist[0])).astype(np.float64)\n normed_features = norm_hist_features / np.sum(norm_hist_features)\n # Generate random features for demo mode.\n # Replace normed_features with your feature vector\n # normed_feature = np.random.random(96)\n # print('run compute_normal_histograms function finished')\n return normed_features\n\n"
] | [
[
"numpy.concatenate",
"numpy.histogram",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Complicateddd/Complicateddd-ROITransformer | [
"2adfbf98892d569c460d100c6e2169c5fa3a9b82"
] | [
"submit.py"
] | [
"from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections\nimport mmcv\nfrom mmcv import Config\nfrom mmdet.datasets import get_dataset\nimport cv2\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nimport DOTA_devkit.polyiou as polyiou\nimport math\nimport pdb\n\ndef py_cpu_nms_poly_fast_np(dets, thresh):\n obbs = dets[:, 0:-1]\n x1 = np.min(obbs[:, 0::2], axis=1)\n y1 = np.min(obbs[:, 1::2], axis=1)\n x2 = np.max(obbs[:, 0::2], axis=1)\n y2 = np.max(obbs[:, 1::2], axis=1)\n scores = dets[:, 8]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n polys = []\n for i in range(len(dets)):\n tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],\n dets[i][2], dets[i][3],\n dets[i][4], dets[i][5],\n dets[i][6], dets[i][7]])\n polys.append(tm_polygon)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n ovr = []\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1)\n h = np.maximum(0.0, yy2 - yy1)\n hbb_inter = w * h\n hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)\n h_inds = np.where(hbb_ovr > 0)[0]\n tmp_order = order[h_inds + 1]\n for j in range(tmp_order.size):\n iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])\n hbb_ovr[h_inds[j]] = iou\n\n try:\n if math.isnan(ovr[0]):\n pdb.set_trace()\n except:\n pass\n inds = np.where(hbb_ovr <= thresh)[0]\n order = order[inds + 1]\n return keep\n\nclass DetectorModel():\n def __init__(self,\n config_file,\n checkpoint_file):\n # init RoITransformer\n self.config_file = config_file\n self.checkpoint_file = checkpoint_file\n self.cfg = Config.fromfile(self.config_file)\n self.data_test = self.cfg.data['test']\n self.dataset = get_dataset(self.data_test)\n # self.classnames = self.dataset.CLASSES\n self.classnames = ('1', '2', '3', '4', '5')\n\n self.model = init_detector(config_file, checkpoint_file, device='cuda:0')\n\n def inference_single(self, imagname):\n img = mmcv.imread(imagname)\n height, width, channel = img.shape\n # slide_h, slide_w = slide_size\n # hn, wn = chip_size\n # TODO: check the corner case\n # import pdb; pdb.set_trace()\n total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]\n # print(self.classnames)\n\n chip_detections = inference_detector(self.model, img)\n # nms\n for i in range(5):\n keep = py_cpu_nms_poly_fast_np(chip_detections[i], 0.1)\n chip_detections[i] = chip_detections[i][keep]\n return chip_detections\n\n def inference_single_vis(self, srcpath, dstpath):\n detections = self.inference_single(srcpath)\n print(detections)\n img = draw_poly_detections(srcpath, detections, self.classnames, scale=1, threshold=0.3)\n cv2.imwrite(dstpath, img)\n\nif __name__ == '__main__':\n import tqdm\n roitransformer = DetectorModel(r'configs/Huojianjun/faster_rcnn_RoITrans_r101x_fpn_1x_anchors_augs_augfpn.py',\n r'work_dirs/faster_rcnn_RoITrans_r101_all_aug_rote_1333_crop_rote/epoch_278.pth')\n\n # roitransformer.inference_single_vis(r'demo/48.tif',\n # r'demo/48_out.tif',\n # (1024, 1024),\n # (1024, 1024))\n\n threshold=0.0001\n class_names=('1', '2', '3', '4', '5')\n import os\n path=\"/media/ubuntu/data/huojianjun/科目四/科目四/test2\"\n file_img_name=os.listdir(path)\n\n result_file=open(\"./科目四_莘莘学子.txt\",'w')\n\n # print(file_img_name)\n count=0\n def filer(x):\n x=int(x)\n if x>1024:\n return 1024\n if x<0:\n return 0\n else:\n return x\n\n for name in tqdm.tqdm(file_img_name):\n # count+=1\n path_img=os.path.join(path,name)\n detection_result=roitransformer.inference_single(path_img)\n for j, name_cls in enumerate(class_names):\n dets = detection_result[j]\n for det in dets:\n bbox = det[:8]\n score = round(det[-1],2)\n if score < threshold:\n continue\n bbox = list(map(filer, bbox))\n # print(bbox)\n # print(score)\n # print(name_cls)\n result_file.writelines(name+\" \"+str(name_cls)+\" \"+str(score)+\" \"\n +str(bbox[0])\n +\" \"+str(bbox[1])+\" \"+str(bbox[2])+\" \"+str(bbox[3])\n +\" \"+str(bbox[4])+\" \"+str(bbox[5])+\" \"+str(bbox[6])\n +\" \"+str(bbox[7]))\n result_file.writelines(\"\\n\")\n count+=1\n # if name==\"3.tif\":\n # print(count)\n # if count==3:\n\n # break\n\n # print(path_img)\n\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.min",
"numpy.max",
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Isaac-JenkinsRA/Stone-Soup | [
"54c9c7dca8162dadaa58e85933cf10a0f86ce1e1"
] | [
"stonesoup/predictor/tests/test_kalman.py"
] | [
"# coding: utf-8\nimport datetime\nimport pytest\nimport numpy as np\n\nfrom ...models.transition.linear import ConstantVelocity\nfrom ...predictor.kalman import (\n KalmanPredictor, ExtendedKalmanPredictor, UnscentedKalmanPredictor,\n SqrtKalmanPredictor)\nfrom ...types.prediction import GaussianStatePrediction\nfrom ...types.state import GaussianState, SqrtGaussianState\nfrom ...types.track import Track\n\n\[email protected](\n \"PredictorClass, transition_model, prior_mean, prior_covar\",\n [\n ( # Standard Kalman\n KalmanPredictor,\n ConstantVelocity(noise_diff_coeff=0.1),\n np.array([[-6.45], [0.7]]),\n np.array([[4.1123, 0.0013],\n [0.0013, 0.0365]])\n ),\n ( # Extended Kalman\n ExtendedKalmanPredictor,\n ConstantVelocity(noise_diff_coeff=0.1),\n np.array([[-6.45], [0.7]]),\n np.array([[4.1123, 0.0013],\n [0.0013, 0.0365]])\n ),\n ( # Unscented Kalman\n UnscentedKalmanPredictor,\n ConstantVelocity(noise_diff_coeff=0.1),\n np.array([[-6.45], [0.7]]),\n np.array([[4.1123, 0.0013],\n [0.0013, 0.0365]])\n )\n ],\n ids=[\"standard\", \"extended\", \"unscented\"]\n)\ndef test_kalman(PredictorClass, transition_model,\n prior_mean, prior_covar):\n\n # Define time related variables\n timestamp = datetime.datetime.now()\n timediff = 2 # 2sec\n new_timestamp = timestamp + datetime.timedelta(seconds=timediff)\n time_interval = new_timestamp - timestamp\n\n # Define prior state\n prior = GaussianState(prior_mean,\n prior_covar,\n timestamp=timestamp)\n\n transition_model_matrix = transition_model.matrix(time_interval=time_interval)\n transition_model_covar = transition_model.covar(time_interval=time_interval)\n # Calculate evaluation variables\n eval_prediction = GaussianStatePrediction(\n transition_model_matrix @ prior.mean,\n [email protected]@transition_model_matrix.T + transition_model_covar)\n\n # Initialise a kalman predictor\n predictor = PredictorClass(transition_model=transition_model)\n\n # Perform and assert state prediction\n prediction = predictor.predict(prior=prior,\n timestamp=new_timestamp)\n\n assert np.allclose(prediction.mean,\n eval_prediction.mean, 0, atol=1.e-14)\n assert np.allclose(prediction.covar,\n eval_prediction.covar, 0, atol=1.e-14)\n assert prediction.timestamp == new_timestamp\n\n # TODO: Test with Control Model\n\n\ndef test_lru_cache():\n predictor = KalmanPredictor(ConstantVelocity(noise_diff_coeff=0))\n\n timestamp = datetime.datetime.now()\n state = GaussianState([[0.], [1.]], np.diag([1., 1.]), timestamp)\n track = Track([state])\n\n prediction_time = timestamp + datetime.timedelta(seconds=1)\n prediction1 = predictor.predict(track, prediction_time)\n assert np.array_equal(prediction1.state_vector, np.array([[1.], [1.]]))\n\n prediction2 = predictor.predict(track, prediction_time)\n assert prediction2 is prediction1\n\n track.append(GaussianState([[1.], [1.]], np.diag([1., 1.]), prediction_time))\n prediction3 = predictor.predict(track, prediction_time)\n assert prediction3 is not prediction1\n\n\ndef test_sqrt_kalman():\n # Define time related variables\n timestamp = datetime.datetime.now()\n timediff = 2 # 2sec\n new_timestamp = timestamp + datetime.timedelta(seconds=timediff)\n\n # Define prior state\n prior_mean = np.array([[-6.45], [0.7]])\n prior_covar = np.array([[4.1123, 0.0013],\n [0.0013, 0.0365]])\n prior = GaussianState(prior_mean,\n prior_covar,\n timestamp=timestamp)\n sqrt_prior_covar = np.linalg.cholesky(prior_covar)\n sqrt_prior = SqrtGaussianState(prior_mean, sqrt_prior_covar,\n timestamp=timestamp)\n\n transition_model = ConstantVelocity(noise_diff_coeff=0.1)\n\n # Initialise a kalman predictor\n predictor = KalmanPredictor(transition_model=transition_model)\n sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model)\n # Can swap out this method\n sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model, qr_method=True)\n\n # Perform and assert state prediction\n prediction = predictor.predict(prior=prior, timestamp=new_timestamp)\n sqrt_prediction = sqrt_predictor.predict(prior=sqrt_prior,\n timestamp=new_timestamp)\n\n assert np.allclose(prediction.mean, sqrt_prediction.mean, 0, atol=1.e-14)\n assert np.allclose(prediction.covar,\n sqrt_prediction.sqrt_covar@sqrt_prediction.sqrt_covar.T, 0,\n atol=1.e-14)\n assert np.allclose(prediction.covar, sqrt_prediction.covar, 0, atol=1.e-14)\n assert prediction.timestamp == sqrt_prediction.timestamp\n"
] | [
[
"numpy.diag",
"numpy.array",
"numpy.allclose",
"numpy.linalg.cholesky"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jials/CS4243-project | [
"100d7ed1cbd379de3b2e65c16e037bf4afec0fb1"
] | [
"changeDetection.py"
] | [
"import numpy as np\nimport cv2\nimport imageMarker\n\nlucas_kanade_params = dict(\n winSize= (4, 4),\n maxLevel= 3, #level of pyramids used\n criteria= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)\n)\n\ndef mark_features_on_all_images(images, features_coordinates):\n marked_images = []\n marked_frame_coordinates = []\n\n last_gs_img = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)\n\n p0 = []\n for coordinate in features_coordinates:\n p0.append([coordinate,])\n p0 = np.float32(p0)\n\n mask = np.zeros_like(images[0])\n status_arr = []\n for fr in range(1, len(images)):\n marked_coordinates = []\n if images[fr] is None:\n print('change detection problematic frame', fr)\n print('len of given images', len(images))\n frame = images[fr].copy()\n gs_img = cv2.cvtColor(images[fr], cv2.COLOR_BGR2GRAY)\n\n p1, st, err = cv2.calcOpticalFlowPyrLK(last_gs_img, gs_img, p0, None, **lucas_kanade_params)\n\n status_arr.append(st)\n\n if p1 is None:\n marked_images.append(frame)\n marked_frame_coordinates.append(features_coordinates if len(images) == 1 else marked_frame_coordinates[-1])\n continue\n\n new_points = []\n for index in range(len(p1)):\n if st[index] == 1:\n new_points.append(p1[index])\n else:\n new_points.append(p0[index])\n new_points = np.array(new_points)\n\n for index, point in enumerate(new_points):\n x, y = point.ravel()\n marked_coordinates.append([x,y])\n imageMarker.mark_image_at_point(frame, int(y), int(x), 9, imageMarker.colors[index])\n marked_frame_coordinates.append(marked_coordinates)\n\n img = cv2.add(frame,mask)\n marked_images.append(img)\n\n # update last frame and point\n last_gs_img = gs_img.copy()\n p0 = new_points.reshape(-1,1,2)\n\n return marked_images, marked_frame_coordinates, status_arr\n"
] | [
[
"numpy.array",
"numpy.zeros_like",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zmlabe/ModelBiasesANN | [
"df28842a8594870db3282682b1261af5058af832",
"df28842a8594870db3282682b1261af5058af832",
"df28842a8594870db3282682b1261af5058af832"
] | [
"Scripts/ANN_AllAnalysis_ClimateModels_v4-RandomNoise-TestWarmthGFDL.py",
"Dark_Scripts/ANN_AllAnalysis_ClimateModels_v4-LINEAR-SMOOTHER_RandomNoise.py",
"Scripts/ANN_AllAnalysis_ClimateModels_v4-RandomNoise-StandarizeMethodsSeparate.py"
] | [
"\"\"\"\nANN for evaluating model biases, differences, and other thresholds using \nexplainable AI (add warmth/cool GFDL-CM3 model only)\n\nReference : Barnes et al. [2020, JAMES]\nAuthor : Zachary M. Labe\nDate : 20 July 2021\nVersion : 4 - subsamples random weight class (#8) for mmmean\n\"\"\"\n\n### Import packages\nimport sys\nimport math\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Dense, Activation\nfrom keras import regularizers\nfrom keras import metrics\nfrom keras import optimizers\nfrom keras.models import Sequential\nimport tensorflow.keras as keras\nimport tensorflow as tf\nimport pandas as pd\nimport random\nimport scipy.stats as stats\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport palettable.cubehelix as cm\nimport cmocean as cmocean\nimport calc_Utilities as UT\nimport calc_dataFunctions as df\nimport calc_Stats as dSS\nimport calc_LRPclass as LRP\nimport innvestigate\nfrom sklearn.metrics import accuracy_score\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\n\n### Prevent tensorflow 2.+ deprecation warnings\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n### LRP param\nDEFAULT_NUM_BWO_ITERATIONS = 200\nDEFAULT_BWO_LEARNING_RATE = .001\n\n### Plotting defaults \nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n###############################################################################\n###############################################################################\n###############################################################################\n### Data preliminaries \ndirectorydataLLL = '/Users/zlabe/Data/LENS/monthly'\ndirectorydataENS = '/Users/zlabe/Data/SMILE/'\ndirectorydataBB = '/Users/zlabe/Data/BEST/'\ndirectorydataEE = '/Users/zlabe/Data/ERA5/'\ndirectoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'\n###############################################################################\n###############################################################################\nmodelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',\n 'GFDL_CM3','GFDL_ESM2M','lens']\ndatasetsingle = ['SMILE']\ndataset_obs = 'ERA5BE'\nseasons = ['annual']\nvariq = 'T2M'\nreg_name = 'LowerArctic'\ntimeper = 'historical'\n###############################################################################\n###############################################################################\n# pickSMILE = ['CCCma_canesm2','CSIRO_MK3.6','KNMI_ecearth',\n# 'GFDL_ESM2M','lens']\n# pickSMILE = ['CCCma_canesm2','MPI','lens']\npickSMILE = []\nif len(pickSMILE) >= 1:\n lenOfPicks = len(pickSMILE)\nelse:\n lenOfPicks = len(modelGCMs)\n###############################################################################\n###############################################################################\nland_only = False\nocean_only = False\nif land_only == True:\n maskNoiseClass = 'land'\nelif ocean_only == True:\n maskNoiseClass = 'ocean'\nelse:\n maskNoiseClass = 'none'\n\n###############################################################################\n###############################################################################\nrm_merid_mean = False\nrm_annual_mean = False\n###############################################################################\n###############################################################################\nrm_ensemble_mean = False\nrm_observational_mean = False\n###############################################################################\n###############################################################################\ncalculate_anomalies = False\nif calculate_anomalies == True:\n if timeper == 'historical': \n baseline = np.arange(1951,1980+1,1)\n elif timeper == 'future':\n baseline = np.arange(2021,2050+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n###############################################################################\n###############################################################################\nwindow = 0\nensTypeExperi = 'ENS'\n# shuffletype = 'TIMEENS'\n# shuffletype = 'ALLENSRAND'\n# shuffletype = 'ALLENSRANDrmmean'\nshuffletype = 'RANDGAUSS'\nsizeOfTwin = 4 # name of experiment for adding noise class #8\nif sizeOfTwin > 0:\n sizeOfTwinq = 1\nelse:\n sizeOfTwinq = sizeOfTwin\n###############################################################################\n###############################################################################\nfactorObs = 10 # factor to add to obs\n###############################################################################\n###############################################################################\nif ensTypeExperi == 'ENS':\n if window == 0:\n rm_standard_dev = False\n if timeper == 'historical': \n yearsall = np.arange(1950,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravel_modelens = False\n ravelmodeltime = False\n else:\n rm_standard_dev = True\n if timeper == 'historical': \n yearsall = np.arange(1950+window,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020+window,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravelmodeltime = False\n ravel_modelens = True\nelif ensTypeExperi == 'GCM':\n if window == 0:\n rm_standard_dev = False\n yearsall = np.arange(1950,2019+1,1)\n ravel_modelens = False\n ravelmodeltime = False\n else:\n rm_standard_dev = True\n if timeper == 'historical': \n yearsall = np.arange(1950,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravelmodeltime = False\n ravel_modelens = True\n###############################################################################\n###############################################################################\nnumOfEns = 16\nlensalso = True\nif len(pickSMILE) == 0:\n if modelGCMs[-1] == 'RANDOM':\n randomalso = True\n else:\n randomalso = False\nelif len(pickSMILE) != 0:\n if pickSMILE[-1] == 'RANDOM':\n randomalso = True\n else:\n randomalso = False\nlentime = len(yearsall)\n###############################################################################\n###############################################################################\nravelyearsbinary = False\nravelbinary = False\nnum_of_class = lenOfPicks + sizeOfTwinq\n###############################################################################\n###############################################################################\nlrpRule = 'z'\nnormLRP = True\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Picking experiment to save\ntypeOfAnalysis = 'issueWithExperiment'\n\n# Experiment #1\nif rm_ensemble_mean == True:\n if window > 1:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-1'\n# Experiment #2\nif rm_ensemble_mean == True:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-2'\n# Experiment #3 (raw data)\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-3'\n if variq == 'T2M':\n integer = 20 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 20 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 20 # random noise value to add/subtract from each grid point\n# Experiment #4\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == True:\n typeOfAnalysis = 'Experiment-4'\n if variq == 'T2M':\n integer = 25 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 15 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 5 # random noise value to add/subtract from each grid point\n# Experiment #5\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-5'\n# Experiment #6\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == True:\n typeOfAnalysis = 'Experiment-6'\n# Experiment #7\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-7'\n# Experiment #8\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-8'\n if variq == 'T2M':\n integer = 1 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 1 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 5 # random noise value to add/subtract from each grid point\n# Experiment #9\nif rm_ensemble_mean == False:\n if window > 1:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-9'\n \nprint('\\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\\n' % (typeOfAnalysis,timeper))\nif typeOfAnalysis == 'issueWithExperiment':\n sys.exit('Wrong parameters selected to analyze')\n \n### Select how to save files\nif land_only == True:\n saveData = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nelif ocean_only == True:\n saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nelse:\n saveData = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nprint('*Filename == < %s >' % saveData) \n\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Create sample class labels for each model for my own testing\n### Appends a twin set of classes for the random noise class \nif seasons != 'none':\n classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))\n for i in range(lenOfPicks):\n classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i) \n \n if sizeOfTwin > 0: \n ### Add random noise models\n randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)\n classesl = np.append(classesl,randomNoiseClass,axis=0)\n \n if ensTypeExperi == 'ENS':\n classeslnew = np.swapaxes(classesl,0,1)\n elif ensTypeExperi == 'GCM':\n classeslnew = classesl\n###############################################################################\n###############################################################################\n###############################################################################\n############################################################################### \n### Begin ANN and the entire script\nfor sis,singlesimulation in enumerate(datasetsingle):\n lrpsns = []\n for seas in range(len(seasons)):\n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### ANN preliminaries\n simuqq = datasetsingle[0]\n monthlychoice = seasons[seas]\n lat_bounds,lon_bounds = UT.regions(reg_name)\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',\n 'RMSE Train','RMSE Test',\n 'ridge penalty','zero mean',\n 'zero merid mean','land only?','ocean only?']) \n \n ### Define primary dataset to use\n dataset = singlesimulation\n modelType = dataset\n \n ### Whether to test and plot the results using obs data\n if dataset_obs == '20CRv3':\n year_obsall = np.arange(yearsall[sis].min(),2015+1,1)\n elif dataset_obs == 'ERA5':\n year_obsall = np.arange(1979+window,2019+1,1)\n if rm_standard_dev == False:\n year_obsall = np.arange(1979,2019+1,1)\n elif dataset_obs == 'ERA5BE':\n year_obsall = np.arange(1950+window,2019+1,1)\n if rm_standard_dev == False:\n year_obsall = np.arange(1950,2019+1,1)\n if monthlychoice == 'DJF':\n obsyearstart = year_obsall.min()+1\n year_obs = year_obsall[1:]\n else:\n obsyearstart = year_obsall.min()\n year_obs = year_obsall\n \n ### Remove the annual mean? True to subtract it from dataset ##########\n if rm_annual_mean == True:\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n \n ### Rove the ensemble mean? True to subtract it from dataset ##########\n if rm_ensemble_mean == True:\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n \n ### Split the data into training and testing sets? value of 1 will use all \n ### data as training\n segment_data_factor = .75\n \n ### Hiddens corresponds to the number of hidden layers the nnet will use - 0 \n ### for linear model, or a list [10, 20, 5] for multiple layers of nodes \n ### (10 nodes in first layer, 20 in second, etc); The \"loop\" part \n ### allows you to loop through multiple architectures. For example, \n ### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the \n ### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,\n ### and the next would be 3 hidden layers of 1 node each.\n \n ### Set useGPU to True to use the GPU, but only if you selected the GPU \n ### Runtime in the menu at the top of this page\n useGPU = False\n \n ### Set Cascade to True to utilize the nnet's cascade function\n cascade = False\n \n ### Plot within the training loop - may want to set to False when testing out \n ### larget sets of parameters\n plot_in_train = False\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Read in model and observational/reanalysis data\n \n def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):\n data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)\n datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)\n print('\\nOur dataset: ',dataset,' is shaped',data.shape)\n return datar,lats,lons\n \n def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):\n data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)\n data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,\n lat_bounds,lon_bounds)\n \n print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)\n return data_obs,lats_obs,lons_obs\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Select data to test, train on \n def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):\n \n global random_segment_seed,trainIndices,testIndices\n if random_segment_seed == None:\n random_segment_seed = int(int(np.random.randint(1, 100000)))\n np.random.seed(random_segment_seed)\n\n############################################################################### \n############################################################################### \n############################################################################### \n ###################################################################\n ### Large Ensemble experiment\n if ensTypeExperi == 'ENS':\n \n ### Flip GCM and ensemble member axes\n datanew = np.swapaxes(data,0,1)\n classeslnew = np.swapaxes(classesl,0,1)\n \n if fac < 1 :\n nrows = datanew.shape[0]\n segment_train = int(np.round(nrows * fac))\n segment_test = nrows - segment_train\n print('Training on',segment_train,'ensembles, testing on',segment_test)\n \n ### Picking out random ensembles\n i = 0\n trainIndices = list()\n while i < segment_train:\n line = np.random.randint(0, nrows)\n if line not in trainIndices:\n trainIndices.append(line)\n i += 1\n else:\n pass\n \n i = 0\n testIndices = list()\n while i < segment_test:\n line = np.random.randint(0, nrows)\n if line not in trainIndices:\n if line not in testIndices:\n testIndices.append(line)\n i += 1\n else:\n pass\n \n ### Training segment----------\n data_train = np.empty((len(trainIndices),datanew.shape[1],\n datanew.shape[2],datanew.shape[3],\n datanew.shape[4]))\n Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],\n classeslnew.shape[2]))\n for index,ensemble in enumerate(trainIndices):\n data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]\n Ytrain[index,:,:] = classeslnew[ensemble,:,:]\n \n ### Random ensembles are picked\n if debug:\n print('\\nTraining on ensembles: ',trainIndices)\n print('Testing on ensembles: ',testIndices)\n print('\\norg data - shape', datanew.shape)\n print('training data - shape', data_train.shape)\n \n ### Reshape into X and Y\n Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))\n Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))\n Xtrain_shape = (data_train.shape[0])\n \n ### Testing segment----------\n data_test = np.empty((len(testIndices),datanew.shape[1],\n datanew.shape[2],datanew.shape[3],\n datanew.shape[4]))\n Ytest = np.empty((len(testIndices),classeslnew.shape[1],\n classeslnew.shape[2]))\n for index,ensemble in enumerate(testIndices):\n data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]\n Ytest[index,:,:] = classeslnew[ensemble,:,:]\n \n ### Random ensembles are picked\n if debug:\n print('Training on ensembles: %s' % len(trainIndices))\n print('Testing on ensembles: %s' % len(testIndices))\n print('\\norg data - shape', datanew.shape)\n print('testing data - shape', data_test.shape)\n \n ### Reshape into X and Y\n Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))\n Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))\n Xtest_shape = (data_test.shape[0])\n \n Xtest_shape = (data_test.shape[0], data_test.shape[1])\n data_train_shape = data_train.shape[0]\n data_test_shape = data_test.shape[0]\n \n ### 'unlock' the random seed\n np.random.seed(None)\n \n ### One-hot vectors\n Ytrain = keras.utils.to_categorical(Ytrain)\n Ytest = keras.utils.to_categorical(Ytest) \n \n ### Class weights\n class_weight = class_weight_creator(Ytrain)\n\n###############################################################################\n############################################################################### \n############################################################################### \n ###################################################################\n ### GCM type experiments without ensembles\n elif ensTypeExperi == 'GCM':\n if data.ndim == 5:\n datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))\n classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))\n else:\n datanew = data\n classeslnew = classesl\n \n if fac < 1 :\n nrows = datanew.shape[1]\n segment_train = int(np.floor(nrows * fac))\n segment_test = nrows - segment_train\n print('Training on',segment_train,'years, testing on',segment_test)\n \n ### Picking out random ensembles\n firstyears = int(np.floor(segment_test/2))\n lastyears = -int(np.floor(segment_test/2))\n trainIndices = np.arange(firstyears,firstyears+segment_train,1)\n testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)\n \n ### Training segment----------\n data_train = np.empty((datanew.shape[0],len(trainIndices),\n datanew.shape[2],datanew.shape[3]))\n Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))\n for index,ensemble in enumerate(trainIndices):\n data_train[:,index,:,:] = datanew[:,ensemble,:,:]\n Ytrain[:,index] = classeslnew[:,ensemble]\n \n ### Random ensembles are picked\n if debug:\n print('\\nTraining on years: ',trainIndices)\n print('Testing on years: ',testIndices)\n print('\\norg data - shape', datanew.shape)\n print('training data - shape', data_train.shape)\n \n ### Reshape into X and Y\n Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))\n Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))\n Xtrain_shape = (data_train.shape[0])\n \n ### Testing segment----------\n data_test = np.empty((datanew.shape[0],len(testIndices),\n datanew.shape[2],datanew.shape[3]))\n Ytest = np.empty((classeslnew.shape[0],len(testIndices)))\n for index,ensemble in enumerate(testIndices):\n data_test[:,index,:,:] = datanew[:,ensemble,:,:]\n Ytest[:,index] = classeslnew[:,ensemble]\n \n ### Random ensembles are picked\n if debug:\n print('Training on years: %s' % len(trainIndices))\n print('Testing on years: %s' % len(testIndices))\n print('\\norg data - shape', datanew.shape)\n print('testing data - shape', data_test.shape)\n \n ### Reshape into X and Y\n Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))\n Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))\n Xtest_shape = (data_test.shape[0])\n \n Xtest_shape = (data_test.shape[0], data_test.shape[1])\n data_train_shape = data_train.shape[0]\n data_test_shape = data_test.shape[0]\n \n ### 'unlock' the random seed\n np.random.seed(None)\n \n ### One-hot vectors\n Ytrain = keras.utils.to_categorical(Ytrain)\n Ytest = keras.utils.to_categorical(Ytest) \n \n ### Class weights\n class_weight = class_weight_creator(Ytrain)\n \n else:\n print(ValueError('WRONG EXPERIMENT!'))\n return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Plotting functions \n def adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n\n ###############################################################################\n ###############################################################################\n ############################################################################### \n ### Create a class weight dictionary to help if the classes are unbalanced\n def class_weight_creator(Y):\n class_dict = {}\n weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)\n for i in range( Y.shape[-1] ):\n class_dict[i] = weights[i] \n return class_dict\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Neural Network Creation & Training \n class TimeHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.times = []\n \n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_time_start = time.time()\n \n def on_epoch_end(self, epoch, logs={}):\n self.times.append(time.time() - self.epoch_time_start)\n \n def defineNN(hidden, input_shape, output_shape, ridgePenalty): \n \n model = Sequential()\n ### Initialize first layer\n ### Model is a single node with activation function\n model.add(Dense(hidden[0],input_shape=(input_shape,),\n activation=actFun, use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n ### Initialize other layers\n for layer in hidden[1:]:\n model.add(Dense(layer,activation=actFun,\n use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n print('\\nTHIS IS AN ANN!\\n')\n \n #### Initialize output layer\n model.add(Dense(output_shape,activation=None,use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n ### Add softmax layer at the end\n model.add(Activation('softmax'))\n \n return model\n \n def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):\n \n global lr_here, batch_size\n lr_here = 0.001\n model.compile(optimizer=optimizers.SGD(lr=lr_here,\n momentum=0.9,nesterov=True), \n loss = 'categorical_crossentropy',\n metrics=[metrics.categorical_accuracy])\n # model.compile(optimizer=optimizers.Nadam(lr=lr_here), \n # loss = 'categorical_crossentropy',\n # metrics=[metrics.categorical_accuracy])\n \n ### Declare the relevant model parameters\n batch_size = 24 \n \n print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----') \n \n ### Callbacks\n time_callback = TimeHistory()\n early_stopping = keras.callbacks.EarlyStopping(monitor='loss',\n patience=2,\n verbose=1,\n mode='auto')\n \n history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,\n shuffle=True,verbose=verbose,\n callbacks=[time_callback,early_stopping],\n validation_split=0.)\n print('******** done training ***********')\n \n return model, history\n \n def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):\n \"\"\"or loops to iterate through training iterations, ridge penalty, \n and hidden layer list\n \"\"\"\n results = {}\n global nnet,random_network_seed\n \n for niter in iterations:\n for penalty in ridge_penalty:\n for hidden in hiddens:\n \n ### Check / use random seed\n if random_network_seed == None:\n np.random.seed(None)\n random_network_seed = int(np.random.randint(1, 100000))\n np.random.seed(random_network_seed)\n random.seed(random_network_seed)\n tf.set_random_seed(0)\n \n ### Standardize the data\n Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)\n Xmean,Xstd = stdVals\n \n ### Define the model\n model = defineNN(hidden,\n input_shape=np.shape(Xtrain)[1],\n output_shape=np.shape(Ytrain)[1],\n ridgePenalty=penalty) \n \n ### Train the net\n model, history = trainNN(model,Xtrain,\n Ytrain,niter,class_weight,verbose=1)\n \n ### After training, use the network with training data to \n ### check that we don't have any errors and output RMSE\n rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))\n if type(Ytest) != bool:\n rmse_test = 0.\n rmse_test = dSS.rmse(Ytest,model.predict(Xtest))\n else:\n rmse_test = False\n \n this_result = {'iters': niter, \n 'hiddens' : hidden, \n 'RMSE Train' : rmse_train, \n 'RMSE Test' : rmse_test, \n 'ridge penalty': penalty, \n 'zero mean' : rm_annual_mean,\n 'zero merid mean' : rm_merid_mean,\n 'land only?' : land_only,\n 'ocean only?' : ocean_only,\n 'Segment Seed' : random_segment_seed,\n 'Network Seed' : random_network_seed }\n results.update(this_result)\n \n global experiment_result\n experiment_result = experiment_result.append(results,\n ignore_index=True)\n \n #if True to plot each iter's graphs.\n if plot_in_train == True:\n plt.figure()\n \n plt.subplot(1,1,1)\n plt.plot(history.history['loss'],label = 'training')\n plt.title(history.history['loss'][-1])\n plt.xlabel('epoch')\n plt.xlim(2,len(history.history['loss'])-1)\n plt.legend()\n \n plt.grid(True)\n plt.show()\n \n #'unlock' the random seed\n np.random.seed(None)\n random.seed(None)\n tf.set_random_seed(None)\n \n return experiment_result, model\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Results\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n \n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n K.clear_session()\n \n ### Parameters\n debug = True\n NNType = 'ANN'\n avgHalfChunk = 0\n option4 = True\n biasBool = False\n hiddensList = [[10,10]]\n ridge_penalty = [0.1]\n # hiddensList = [[8,8]]\n # ridge_penalty = [0.2]\n actFun = 'relu'\n \n if any([maskNoiseClass=='land',maskNoiseClass=='ocean']):\n debug = True\n NNType = 'ANN'\n avgHalfChunk = 0\n option4 = True\n biasBool = False\n hiddensList = [[8,8]]\n ridge_penalty = [0.10]\n actFun = 'relu'\n \n expList = [(0)] # (0,1)\n expN = np.size(expList)\n \n iterations = [100] \n random_segment = True\n foldsN = 1\n \n for avgHalfChunk in (0,): \n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n K.clear_session()\n \n for loop in ([0]): \n ### Get info about the region\n lat_bounds,lon_bounds = UT.regions(reg_name)\n data_all,lats,lons = read_primary_dataset(variq,dataset,\n numOfEns,lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,\n dataset_obs,\n numOfEns,\n lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n\n###############################################################################\n###############################################################################\n###############################################################################\n for exp in expList: \n ### Get the data together\n data, data_obs, = data_all, data_obs_all,\n###############################################################################\n if len(pickSMILE) >= 1:\n data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)\n print('\\n*Pick models to analysis from %s*\\n' % pickSMILE)\n###############################################################################\n if calculate_anomalies == True:\n data, data_obs = dSS.calculate_anomalies(data,data_obs,\n lats,lons,baseline,yearsall)\n print('\\n*Calculate anomalies for %s-%s*\\n' % (baseline.min(),baseline.max()))\n############################################################################### \n if rm_annual_mean == True:\n data, data_obs = dSS.remove_annual_mean(data,data_obs,\n lats,lons,\n lats_obs,lons_obs)\n print('\\n*Removed annual mean*\\n')\n############################################################################### \n if rm_merid_mean == True:\n data, data_obs = dSS.remove_merid_mean(data,data_obs,\n lats,lons,\n lats_obs,lons_obs)\n print('\\n*Removed meridional mean*\\n')\n############################################################################### \n if rm_ensemble_mean == True:\n data = dSS.remove_ensemble_mean(data,ravel_modelens,\n ravelmodeltime,\n rm_standard_dev,\n numOfEns)\n print('\\n*Removed ensemble mean*')\n############################################################################### \n if rm_standard_dev == True:\n data = dSS.rm_standard_dev(data,window,ravelmodeltime,\n numOfEns)\n print('\\n*Removed standard deviation*')\n############################################################################### \n if rm_observational_mean == True:\n data = dSS.remove_observations_mean(data,data_obs,lats,lons)\n print('\\n*Removed observational data*')\n############################################################################### \n if land_only == True:\n data, data_obs = dSS.remove_ocean(data,data_obs,\n lat_bounds,\n lon_bounds) \n print('\\n*Removed ocean data*')\n###############################################################################\n if ocean_only == True:\n data, data_obs = dSS.remove_land(data,data_obs,\n lat_bounds,\n lon_bounds) \n print('\\n*Removed land data*') \n###############################################################################\n ### Adding random data\n if sizeOfTwin > 0:\n random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))\n data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)\n\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Modify the GFDL-CM3 model for warmth and cooling that model only\n print('\\n <<< FACTOR FOR OBS IS %s! >>>\\n' % factorObs)\n if factorObs == 0:\n data = data\n elif factorObs == 1: # warm its mean state\n GFDL = data[4,:,:,:,:]\n GFDLwarmer = GFDL + 3\n data[4,:,:,:,:] = GFDLwarmer\n elif factorObs == 2: # cool its mean state\n GFDL = data[4,:,:,:,:]\n GFDLcooler = GFDL - 3\n data[4,:,:,:,:] = GFDLcooler\n elif factorObs == 3: # warm recent 10 years\n GFDL = data[4,:,:,:,:] \n GFDLbefore = GFDL[:,:-10,:,:]\n GFDLafter = GFDL[:,-10:,:,:] + 3\n GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)\n data[4,:,:,:,:] = GFDLq\n elif factorObs == 4: # cool recent 10 years\n GFDL = data[4,:,:,:,:] \n GFDLbefore = GFDL[:,:-10,:,:]\n GFDLafter = GFDL[:,-10:,:,:] - 3\n GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)\n data[4,:,:,:,:] = GFDLq \n elif factorObs == 5: # warm the North Pole\n sizeofNP = 10\n GFDL = data[4,:,:,:,:] \n warmerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) + 5\n addtoclimoNP = GFDL[:,:,sizeofNP:,:] + warmerNP\n GFDL[:,:,sizeofNP:,:] = addtoclimoNP\n data[4,:,:,:,:] = GFDL\n elif factorObs == 6: # cool the North Pole\n sizeofNP = 10\n GFDL = data[4,:,:,:,:] \n coolerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) - 5\n addtoclimoNP = GFDL[:,:,sizeofNP:,:] + coolerNP\n GFDL[:,:,sizeofNP:,:] = addtoclimoNP\n data[4,:,:,:,:] = GFDL\n elif factorObs == 7: # warm the Lower Arctic\n sizeofLA = 5\n GFDL = data[4,:,:,:,:] \n warmerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) + 5\n addtoclimoLA = GFDL[:,:,:sizeofLA,:] + warmerLA\n GFDL[:,:,:sizeofLA,:] = addtoclimoLA\n data[4,:,:,:,:] = GFDL\n elif factorObs == 8: # cool the Lower Arctic\n sizeofLA = 5\n GFDL = data[4,:,:,:,:] \n coolerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) - 5\n addtoclimoLA = GFDL[:,:,:sizeofLA,:] + coolerLA\n GFDL[:,:,:sizeofLA,:] = addtoclimoLA\n data[4,:,:,:,:] = GFDL\n elif factorObs == 9: # warm early 50 years\n GFDL = data[4,:,:,:,:] \n GFDLafter = GFDL[:,50:,:,:]\n GFDLbefore = GFDL[:,:50,:,:] + 3\n GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)\n data[4,:,:,:,:] = GFDLq\n elif factorObs == 10: # cool early 50 years\n GFDL = data[4,:,:,:,:] \n GFDLafter = GFDL[:,50:,:,:]\n GFDLbefore = GFDL[:,:50,:,:] - 3\n GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)\n data[4,:,:,:,:] = GFDLq \n \n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n\n###############################################################################\n###############################################################################\n###############################################################################\n ### Loop over folds\n for loop in np.arange(0,foldsN): \n \n K.clear_session()\n #---------------------------\n # random_segment_seed = 34515\n random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))\n #---------------------------\n Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)\n \n YtrainClassMulti = Ytrain \n YtestClassMulti = Ytest \n \n # For use later\n XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)\n Xmean, Xstd = stdVals \n \n #---------------------------\n random_network_seed = 87750\n #---------------------------\n \n # Create and train network\n exp_result,model = test_train_loopClass(Xtrain,\n YtrainClassMulti,\n Xtest,\n YtestClassMulti,\n iterations=iterations,\n ridge_penalty=ridge_penalty,\n hiddens=hiddensList,class_weight=class_weight,\n plot_in_train = True)\n model.summary() \n \n ################################################################################################################################################ \n # save the model\n dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'\n savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed) \n savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)\n \n if(reg_name=='Globe'):\n regSave = ''\n else:\n regSave = '_' + reg_name\n \n if(rm_annual_mean==True):\n savename = savename + '_AnnualMeanRemoved' \n savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'\n if(rm_ensemble_mean==True):\n savename = savename + '_EnsembleMeanRemoved' \n savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'\n \n savename = savename + regSave \n # model.save(dirname + savename + '.h5')\n # np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)\n \n print('saving ' + savename)\n \n ###############################################################\n ### Make final plot\n ### Get obs\n dataOBSERVATIONS = data_obs\n latsOBSERVATIONS = lats_obs\n lonsOBSERVATIONS = lons_obs\n \n Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])\n \n annType = 'class'\n if monthlychoice == 'DJF':\n startYear = yearsall[sis].min()+1\n endYear = yearsall[sis].max()\n else:\n startYear = yearsall[sis].min()\n endYear = yearsall[sis].max()\n years = np.arange(startYear,endYear+1,1) \n Xmeanobs = np.nanmean(Xobs,axis=0)\n Xstdobs = np.nanstd(Xobs,axis=0) \n \n XobsS = (Xobs-Xmeanobs)/Xstdobs\n XobsS[np.isnan(XobsS)] = 0\n \n xtrainpred = (Xtrain-Xmean)/Xstd\n xtrainpred[np.isnan(xtrainpred)] = 0\n xtestpred = (Xtest-Xmean)/Xstd\n xtestpred[np.isnan(xtestpred)] = 0\n \n if(annType=='class'):\n YpredObs = model.predict(XobsS)\n YpredTrain = model.predict(xtrainpred)\n YpredTest = model.predict(xtestpred)\n \n #######################################################\n #######################################################\n #######################################################\n ### Check null hypothesis of random data!\n randarray,latsra,lonsra = read_primary_dataset(variq,'RANDOM',\n numOfEns,lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])\n randarraymean = np.nanmean(randarrayn,axis=0)\n randarraystd = np.nanstd(randarrayn,axis=0)\n randarrayS = (randarrayn-randarraymean)/randarraystd\n \n ### Prediction on random data\n YpredRand = model.predict(randarrayS)\n #######################################################\n #######################################################\n #######################################################\n \n ### Get output from model\n trainingout = YpredTrain\n testingout = YpredTest\n \n if ensTypeExperi == 'ENS':\n classesltrain = classeslnew[trainIndices,:,:].ravel()\n classesltest = classeslnew[testIndices,:,:].ravel()\n elif ensTypeExperi == 'GCM':\n classesltrain = classeslnew[:,:,trainIndices].ravel()\n classesltest = classeslnew[:,:,testIndices].ravel()\n \n ### Random data tests\n randout = YpredRand\n labelsrand = np.argmax(randout,axis=1)\n uniquerand,countrand = np.unique(labelsrand,return_counts=True)\n np.savetxt(directoryoutput + 'RandLabels_' + saveData + '.txt',labelsrand)\n np.savetxt(directoryoutput + 'RandConfid_' + saveData + '.txt',randout)\n \n ### Observations\n obsout = YpredObs\n labelsobs = np.argmax(obsout,axis=1)\n uniqueobs,countobs = np.unique(labelsobs,return_counts=True)\n print(labelsobs)\n np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)\n np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)\n \n def truelabel(data):\n \"\"\"\n Calculate argmax\n \"\"\"\n maxindexdata= np.argmax(data[:,:],axis=1) \n \n return maxindexdata\n \n def accuracyTotalTime(data_pred,data_true):\n \"\"\"\n Compute accuracy for the entire time series\n \"\"\"\n \n data_truer = data_true\n data_predr = data_pred\n accdata_pred = accuracy_score(data_truer,data_predr)\n \n return accdata_pred\n\n ##############################################################################\n ##############################################################################\n ############################################################################## \n indextrain = truelabel(trainingout)\n acctrain = accuracyTotalTime(indextrain,classesltrain)\n indextest = truelabel(testingout)\n acctest = accuracyTotalTime(indextest,classesltest)\n print('\\n\\nAccuracy Training == ',acctrain)\n print('Accuracy Testing == ',acctest)\n \n ## Save the output for plotting\n np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)\n np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)\n \n np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)\n np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)\n \n np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)\n np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)\n \n ### See more more details\n model.layers[0].get_config()\n \n ## Define variable for analysis\n print('\\n\\n------------------------')\n print(variq,'= Variable!')\n print(monthlychoice,'= Time!')\n print(reg_name,'= Region!')\n print(lat_bounds,lon_bounds)\n print(dataset,'= Model!')\n print(dataset_obs,'= Observations!\\n')\n print(rm_annual_mean,'= rm_annual_mean') \n print(rm_merid_mean,'= rm_merid_mean') \n print(rm_ensemble_mean,'= rm_ensemble_mean') \n print(land_only,'= land_only')\n print(ocean_only,'= ocean_only')\n \n ## Variables for plotting\n lons2,lats2 = np.meshgrid(lons,lats) \n observations = data_obs\n modeldata = data\n modeldatamean = np.nanmean(modeldata,axis=1)\n \n spatialmean_obs = UT.calc_weightedAve(observations,lats2)\n spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)\n spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)\n plt.figure()\n plt.plot(yearsall,spatialmean_modmean.transpose())\n plt.plot(yearsall,spatialmean_modmean.transpose()[:,4],linewidth=3,color='red',label=r'GFDL-CM3 - %s-Experiment' % factorObs)\n plt.xlabel('Years')\n plt.ylabel('Average Arctic Temperature')\n plt.legend()\n plt.ylim([-14.5,-1])\n plt.savefig('/Users/zlabe/Desktop/factor-%s.png' % factorObs,dpi=300)\n plt.figure()\n plt.plot(spatialmean_obs)\n \n ##############################################################################\n ##############################################################################\n ##############################################################################\n ## Visualizing through LRP\n numLats = lats.shape[0]\n numLons = lons.shape[0] \n numDim = 3\n\n ##############################################################################\n ##############################################################################\n ##############################################################################\n \n lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),\n np.append(Ytrain,Ytest,axis=0),\n biasBool,annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n meanlrp = np.nanmean(lrpall,axis=0)\n fig=plt.figure()\n plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)\n \n ### For training data only\n lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,\n annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n \n ### For training data only\n lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,\n annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n \n \n ### For observations data only\n lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,\n num_of_class,yearsall,lrpRule,\n normLRP,numLats,numLons,numDim)\n\n ### For random data only\n lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,\n num_of_class,yearsall,lrpRule,\n normLRP,numLats,numLons,numDim)\n \n ##############################################################################\n ##############################################################################\n ##############################################################################\n def netcdfLRP(lats,lons,var,directory,typemodel,saveData):\n print('\\n>>> Using netcdfLRP function!')\n \n from netCDF4 import Dataset\n import numpy as np\n \n name = 'LRPMap' + typemodel + '_' + saveData + '.nc'\n filename = directory + name\n ncfile = Dataset(filename,'w',format='NETCDF4')\n ncfile.description = 'LRP maps for using selected seed' \n \n ### Dimensions\n ncfile.createDimension('years',var.shape[0])\n ncfile.createDimension('lat',var.shape[1])\n ncfile.createDimension('lon',var.shape[2])\n \n ### Variables\n years = ncfile.createVariable('years','f4',('years'))\n latitude = ncfile.createVariable('lat','f4',('lat'))\n longitude = ncfile.createVariable('lon','f4',('lon'))\n varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))\n \n ### Units\n varns.units = 'unitless relevance'\n ncfile.title = 'LRP relevance'\n ncfile.instituion = 'Colorado State University'\n ncfile.references = 'Barnes et al. [2020]'\n \n ### Data\n years[:] = np.arange(var.shape[0])\n latitude[:] = lats\n longitude[:] = lons\n varns[:] = var\n \n ncfile.close()\n print('*Completed: Created netCDF4 File!')\n \n netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)\n netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)\n netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)\n netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)",
"\"\"\"\nANN-LINEAR for evaluating model biases, differences, and other thresholds using \nexplainable AI\n\nReference : Barnes et al. [2020, JAMES]\nAuthor : Zachary M. Labe\nDate : 19 May 2021\nVersion : 4 -LINEAR-SMOOTHER\n\"\"\"\n\n### Import packages\nimport sys\nimport math\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Dense, Activation\nfrom keras import regularizers\nfrom keras import metrics\nfrom keras import optimizers\nfrom keras.models import Sequential\nimport tensorflow.keras as keras\nimport tensorflow as tf\nimport pandas as pd\nimport random\nimport scipy.stats as stats\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport palettable.cubehelix as cm\nimport cmocean as cmocean\nimport calc_Utilities as UT\nimport calc_dataFunctions as df\nimport calc_Stats as dSS\nimport calc_LRPclass as LRP\nimport innvestigate\nfrom sklearn.metrics import accuracy_score\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\n\n### Prevent tensorflow 2.+ deprecation warnings\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n### LRP param\nDEFAULT_NUM_BWO_ITERATIONS = 200\nDEFAULT_BWO_LEARNING_RATE = .001\n\n### Plotting defaults \nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n###############################################################################\n###############################################################################\n###############################################################################\n### Data preliminaries \ndirectorydataLLL = '/Users/zlabe/Data/LENS/monthly'\ndirectorydataENS = '/Users/zlabe/Data/SMILE/'\ndirectorydataBB = '/Users/zlabe/Data/BEST/'\ndirectorydataEE = '/Users/zlabe/Data/ERA5/'\ndirectoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'\n###############################################################################\n###############################################################################\nmodelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',\n 'GFDL_CM3','GFDL_ESM2M','lens']\ndatasetsingle = ['SMILE']\ndataset_obs = 'ERA5BE'\nseasons = ['annual']\nvariq = 'T2M'\nreg_name = 'Arctic'\ntimeper = 'historical'\n###############################################################################\n###############################################################################\npickSMILE = []\nif len(pickSMILE) >= 1:\n lenOfPicks = len(pickSMILE)\nelse:\n lenOfPicks = len(modelGCMs)\n###############################################################################\n###############################################################################\nland_only = True\nocean_only = False\nif land_only == True:\n maskNoiseClass = 'land'\nelif ocean_only == True:\n maskNoiseClass = 'ocean'\nelse:\n maskNoiseClass = 'none'\n\n###############################################################################\n###############################################################################\nrm_merid_mean = False\nrm_annual_mean = False\n###############################################################################\n###############################################################################\nsmoother = True\n###############################################################################\n###############################################################################\nrm_ensemble_mean = False\nrm_observational_mean = False\n###############################################################################\n###############################################################################\ncalculate_anomalies = False\nif calculate_anomalies == True:\n if timeper == 'historical': \n baseline = np.arange(1951,1980+1,1)\n elif timeper == 'future':\n baseline = np.arange(2021,2050+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n###############################################################################\n###############################################################################\nwindow = 0\nensTypeExperi = 'ENS'\n# shuffletype = 'TIMEENS'\n# shuffletype = 'ALLENSRAND'\n# shuffletype = 'ALLENSRANDrmmean'\nshuffletype = 'RANDGAUSS'\nsizeOfTwin = 4 # name of experiment for adding noise class #8\nif sizeOfTwin > 0:\n sizeOfTwinq = 1\nelse:\n sizeOfTwinq = sizeOfTwin\n###############################################################################\n###############################################################################\nif ensTypeExperi == 'ENS':\n if window == 0:\n rm_standard_dev = False\n if timeper == 'historical': \n yearsall = np.arange(1950,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravel_modelens = False\n ravelmodeltime = False\n else:\n rm_standard_dev = True\n if timeper == 'historical': \n yearsall = np.arange(1950+window,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020+window,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravelmodeltime = False\n ravel_modelens = True\nelif ensTypeExperi == 'GCM':\n if window == 0:\n rm_standard_dev = False\n yearsall = np.arange(1950,2019+1,1)\n ravel_modelens = False\n ravelmodeltime = False\n else:\n rm_standard_dev = True\n if timeper == 'historical': \n yearsall = np.arange(1950,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravelmodeltime = False\n ravel_modelens = True\n###############################################################################\n###############################################################################\nnumOfEns = 16\nlensalso = True\nif len(pickSMILE) == 0:\n if modelGCMs[-1] == 'RANDOM':\n randomalso = True\n else:\n randomalso = False\nelif len(pickSMILE) != 0:\n if pickSMILE[-1] == 'RANDOM':\n randomalso = True\n else:\n randomalso = False\nlentime = len(yearsall)\n###############################################################################\n###############################################################################\nravelyearsbinary = False\nravelbinary = False\nnum_of_class = lenOfPicks + sizeOfTwinq\n###############################################################################\n###############################################################################\nlrpRule = 'z'\nnormLRP = True\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Picking experiment to save\ntypeOfAnalysis = 'issueWithExperiment'\n\n# Experiment #1\nif rm_ensemble_mean == True:\n if window > 1:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-1'\n# Experiment #2\nif rm_ensemble_mean == True:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-2'\n# Experiment #3 (raw data)\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-3'\n if variq == 'T2M':\n integer = 20 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 20 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 20 # random noise value to add/subtract from each grid point\n# Experiment #4\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == True:\n typeOfAnalysis = 'Experiment-4'\n if variq == 'T2M':\n integer = 25 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 15 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 5 # random noise value to add/subtract from each grid point\n# Experiment #5\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-5'\n# Experiment #6\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == True:\n typeOfAnalysis = 'Experiment-6'\n# Experiment #7\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-7'\n# Experiment #8\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-8'\n if variq == 'T2M':\n integer = 1 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 1 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 5 # random noise value to add/subtract from each grid point\n# Experiment #9\nif rm_ensemble_mean == False:\n if window > 1:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-9'\n \nprint('\\n<<<<<<<<<<<< Linear-Analysis == %s (%s) ! >>>>>>>>>>>>>>>\\n' % (typeOfAnalysis,timeper))\nif typeOfAnalysis == 'issueWithExperiment':\n sys.exit('Wrong parameters selected to analyze')\n \n### Select how to save files\nif land_only == True:\n saveData = timeper + '_LAND' + '_LINEAR_SMOOTHER_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nelif ocean_only == True:\n saveData = timeper + '_OCEAN' + '_LINEAR_SMOOTHER_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nelse:\n saveData = timeper + '_LINEAR_SMOOTHER_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nprint('*Filename == < %s >' % saveData) \n\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Create sample class labels for each model for my own testing\n### Appends a twin set of classes for the random noise class \nif seasons != 'none':\n classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))\n for i in range(lenOfPicks):\n classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i) \n \n if sizeOfTwin > 0: \n ### Add random noise models\n randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)\n classesl = np.append(classesl,randomNoiseClass,axis=0)\n \n if ensTypeExperi == 'ENS':\n classeslnew = np.swapaxes(classesl,0,1)\n elif ensTypeExperi == 'GCM':\n classeslnew = classesl\n###############################################################################\n###############################################################################\n###############################################################################\n############################################################################### \n### Begin ANN and the entire script\nfor sis,singlesimulation in enumerate(datasetsingle):\n lrpsns = []\n for seas in range(len(seasons)):\n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### ANN preliminaries\n simuqq = datasetsingle[0]\n monthlychoice = seasons[seas]\n lat_bounds,lon_bounds = UT.regions(reg_name)\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',\n 'RMSE Train','RMSE Test',\n 'ridge penalty','zero mean',\n 'zero merid mean','land only?','ocean only?']) \n \n ### Define primary dataset to use\n dataset = singlesimulation\n modelType = dataset\n \n ### Whether to test and plot the results using obs data\n if dataset_obs == '20CRv3':\n year_obsall = np.arange(yearsall[sis].min(),2015+1,1)\n elif dataset_obs == 'ERA5':\n year_obsall = np.arange(1979+window,2019+1,1)\n if rm_standard_dev == False:\n year_obsall = np.arange(1979,2019+1,1)\n elif dataset_obs == 'ERA5BE':\n year_obsall = np.arange(1950+window,2019+1,1)\n if rm_standard_dev == False:\n year_obsall = np.arange(1950,2019+1,1)\n if monthlychoice == 'DJF':\n obsyearstart = year_obsall.min()+1\n year_obs = year_obsall[1:]\n else:\n obsyearstart = year_obsall.min()\n year_obs = year_obsall\n \n ### Remove the annual mean? True to subtract it from dataset ##########\n if rm_annual_mean == True:\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n \n ### Rove the ensemble mean? True to subtract it from dataset ##########\n if rm_ensemble_mean == True:\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n \n ### Split the data into training and testing sets? value of 1 will use all \n ### data as training\n segment_data_factor = .75\n \n ### Hiddens corresponds to the number of hidden layers the nnet will use - 0 \n ### for linear model, or a list [10, 20, 5] for multiple layers of nodes \n ### (10 nodes in first layer, 20 in second, etc); The \"loop\" part \n ### allows you to loop through multiple architectures. For example, \n ### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the \n ### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,\n ### and the next would be 3 hidden layers of 1 node each.\n \n ### Set useGPU to True to use the GPU, but only if you selected the GPU \n ### Runtime in the menu at the top of this page\n useGPU = False\n \n ### Set Cascade to True to utilize the nnet's cascade function\n cascade = False\n \n ### Plot within the training loop - may want to set to False when testing out \n ### larget sets of parameters\n plot_in_train = False\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Read in model and observational/reanalysis data\n \n def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):\n data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)\n datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)\n print('\\nOur dataset: ',dataset,' is shaped',data.shape)\n return datar,lats,lons\n \n def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):\n data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)\n data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,\n lat_bounds,lon_bounds)\n \n print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)\n return data_obs,lats_obs,lons_obs\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Select data to test, train on \n def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):\n \n global random_segment_seed,trainIndices,testIndices\n if random_segment_seed == None:\n random_segment_seed = int(int(np.random.randint(1, 100000)))\n np.random.seed(random_segment_seed)\n\n############################################################################### \n############################################################################### \n############################################################################### \n ###################################################################\n ### Large Ensemble experiment\n if ensTypeExperi == 'ENS':\n \n ### Flip GCM and ensemble member axes\n datanew = np.swapaxes(data,0,1)\n classeslnew = np.swapaxes(classesl,0,1)\n \n if fac < 1 :\n nrows = datanew.shape[0]\n segment_train = int(np.round(nrows * fac))\n segment_test = nrows - segment_train\n print('Training on',segment_train,'ensembles, testing on',segment_test)\n \n ### Picking out random ensembles\n i = 0\n trainIndices = list()\n while i < segment_train:\n line = np.random.randint(0, nrows)\n if line not in trainIndices:\n trainIndices.append(line)\n i += 1\n else:\n pass\n \n i = 0\n testIndices = list()\n while i < segment_test:\n line = np.random.randint(0, nrows)\n if line not in trainIndices:\n if line not in testIndices:\n testIndices.append(line)\n i += 1\n else:\n pass\n \n ### Training segment----------\n data_train = np.empty((len(trainIndices),datanew.shape[1],\n datanew.shape[2],datanew.shape[3],\n datanew.shape[4]))\n Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],\n classeslnew.shape[2]))\n for index,ensemble in enumerate(trainIndices):\n data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]\n Ytrain[index,:,:] = classeslnew[ensemble,:,:]\n \n ### Random ensembles are picked\n if debug:\n print('\\nTraining on ensembles: ',trainIndices)\n print('Testing on ensembles: ',testIndices)\n print('\\norg data - shape', datanew.shape)\n print('training data - shape', data_train.shape)\n \n ### Reshape into X and Y\n Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))\n Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))\n Xtrain_shape = (data_train.shape[0])\n \n ### Testing segment----------\n data_test = np.empty((len(testIndices),datanew.shape[1],\n datanew.shape[2],datanew.shape[3],\n datanew.shape[4]))\n Ytest = np.empty((len(testIndices),classeslnew.shape[1],\n classeslnew.shape[2]))\n for index,ensemble in enumerate(testIndices):\n data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]\n Ytest[index,:,:] = classeslnew[ensemble,:,:]\n \n ### Random ensembles are picked\n if debug:\n print('Training on ensembles: %s' % len(trainIndices))\n print('Testing on ensembles: %s' % len(testIndices))\n print('\\norg data - shape', datanew.shape)\n print('testing data - shape', data_test.shape)\n \n ### Reshape into X and Y\n Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))\n Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))\n Xtest_shape = (data_test.shape[0])\n \n Xtest_shape = (data_test.shape[0], data_test.shape[1])\n data_train_shape = data_train.shape[0]\n data_test_shape = data_test.shape[0]\n \n ### 'unlock' the random seed\n np.random.seed(None)\n \n ### One-hot vectors\n Ytrain = keras.utils.to_categorical(Ytrain)\n Ytest = keras.utils.to_categorical(Ytest) \n \n ### Class weights\n class_weight = class_weight_creator(Ytrain)\n\n###############################################################################\n############################################################################### \n############################################################################### \n ###################################################################\n ### GCM type experiments without ensembles\n elif ensTypeExperi == 'GCM':\n if data.ndim == 5:\n datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))\n classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))\n else:\n datanew = data\n classeslnew = classesl\n \n if fac < 1 :\n nrows = datanew.shape[1]\n segment_train = int(np.floor(nrows * fac))\n segment_test = nrows - segment_train\n print('Training on',segment_train,'years, testing on',segment_test)\n \n ### Picking out random ensembles\n firstyears = int(np.floor(segment_test/2))\n lastyears = -int(np.floor(segment_test/2))\n trainIndices = np.arange(firstyears,firstyears+segment_train,1)\n testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)\n \n ### Training segment----------\n data_train = np.empty((datanew.shape[0],len(trainIndices),\n datanew.shape[2],datanew.shape[3]))\n Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))\n for index,ensemble in enumerate(trainIndices):\n data_train[:,index,:,:] = datanew[:,ensemble,:,:]\n Ytrain[:,index] = classeslnew[:,ensemble]\n \n ### Random ensembles are picked\n if debug:\n print('\\nTraining on years: ',trainIndices)\n print('Testing on years: ',testIndices)\n print('\\norg data - shape', datanew.shape)\n print('training data - shape', data_train.shape)\n \n ### Reshape into X and Y\n Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))\n Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))\n Xtrain_shape = (data_train.shape[0])\n \n ### Testing segment----------\n data_test = np.empty((datanew.shape[0],len(testIndices),\n datanew.shape[2],datanew.shape[3]))\n Ytest = np.empty((classeslnew.shape[0],len(testIndices)))\n for index,ensemble in enumerate(testIndices):\n data_test[:,index,:,:] = datanew[:,ensemble,:,:]\n Ytest[:,index] = classeslnew[:,ensemble]\n \n ### Random ensembles are picked\n if debug:\n print('Training on years: %s' % len(trainIndices))\n print('Testing on years: %s' % len(testIndices))\n print('\\norg data - shape', datanew.shape)\n print('testing data - shape', data_test.shape)\n \n ### Reshape into X and Y\n Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))\n Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))\n Xtest_shape = (data_test.shape[0])\n \n Xtest_shape = (data_test.shape[0], data_test.shape[1])\n data_train_shape = data_train.shape[0]\n data_test_shape = data_test.shape[0]\n \n ### 'unlock' the random seed\n np.random.seed(None)\n \n ### One-hot vectors\n Ytrain = keras.utils.to_categorical(Ytrain)\n Ytest = keras.utils.to_categorical(Ytest) \n \n ### Class weights\n class_weight = class_weight_creator(Ytrain)\n \n else:\n print(ValueError('WRONG EXPERIMENT!'))\n return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Plotting functions \n def adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n\n ###############################################################################\n ###############################################################################\n ############################################################################### \n ### Create a class weight dictionary to help if the classes are unbalanced\n def class_weight_creator(Y):\n class_dict = {}\n weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)\n for i in range( Y.shape[-1] ):\n class_dict[i] = weights[i] \n return class_dict\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Neural Network Creation & Training \n class TimeHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.times = []\n \n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_time_start = time.time()\n \n def on_epoch_end(self, epoch, logs={}):\n self.times.append(time.time() - self.epoch_time_start)\n \n def defineNN(hidden, input_shape, output_shape, ridgePenalty): \n \n model = Sequential()\n \n ### Initialize first layer\n if hidden[0]==0:\n ### Model is linear\n model.add(Dense(1,input_shape=(input_shape,),\n activation='linear',use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n print('\\nTHIS IS A LINEAR NN!\\n')\n else:\n ### Initialize first layer\n ### Model is a single node with activation function\n model.add(Dense(hidden[0],input_shape=(input_shape,),\n activation=actFun, use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n ### Initialize other layers\n for layer in hidden[1:]:\n model.add(Dense(layer,activation=actFun,\n use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n print('\\nTHIS IS AN ANN!\\n')\n \n #### Initialize output layer\n model.add(Dense(output_shape,activation=None,use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n ### Add softmax layer at the end\n model.add(Activation('softmax'))\n \n return model\n \n def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):\n \n global lr_here, batch_size\n lr_here = 0.001\n model.compile(optimizer=optimizers.SGD(lr=lr_here,\n momentum=0.9,nesterov=True), \n loss = 'categorical_crossentropy',\n metrics=[metrics.categorical_accuracy])\n # model.compile(optimizer=optimizers.Nadam(lr=lr_here), \n # loss = 'categorical_crossentropy',\n # metrics=[metrics.categorical_accuracy])\n # model.compile(optimizer=optimizers.Adam(lr=lr_here), \n # loss = 'categorical_crossentropy',\n # metrics=[metrics.categorical_accuracy])\n \n ### Declare the relevant model parameters\n batch_size = 24 \n \n print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----') \n \n ### Callbacks\n time_callback = TimeHistory()\n early_stopping = keras.callbacks.EarlyStopping(monitor='loss',\n patience=2,\n verbose=1,\n mode='auto')\n \n history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,\n shuffle=True,verbose=verbose,\n callbacks=[time_callback,early_stopping],\n validation_split=0.)\n print('******** done training ***********')\n \n return model, history\n \n def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):\n \"\"\"or loops to iterate through training iterations, ridge penalty, \n and hidden layer list\n \"\"\"\n results = {}\n global nnet,random_network_seed\n \n for niter in iterations:\n for penalty in ridge_penalty:\n for hidden in hiddens:\n \n ### Check / use random seed\n if random_network_seed == None:\n np.random.seed(None)\n random_network_seed = int(np.random.randint(1, 100000))\n np.random.seed(random_network_seed)\n random.seed(random_network_seed)\n tf.set_random_seed(0)\n \n ### Standardize the data\n Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)\n Xmean,Xstd = stdVals\n \n ### Define the model\n model = defineNN(hidden,\n input_shape=np.shape(Xtrain)[1],\n output_shape=np.shape(Ytrain)[1],\n ridgePenalty=penalty) \n \n ### Train the net\n model, history = trainNN(model,Xtrain,\n Ytrain,niter,class_weight,verbose=1)\n \n ### After training, use the network with training data to \n ### check that we don't have any errors and output RMSE\n rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))\n if type(Ytest) != bool:\n rmse_test = 0.\n rmse_test = dSS.rmse(Ytest,model.predict(Xtest))\n else:\n rmse_test = False\n \n this_result = {'iters': niter, \n 'hiddens' : hidden, \n 'RMSE Train' : rmse_train, \n 'RMSE Test' : rmse_test, \n 'ridge penalty': penalty, \n 'zero mean' : rm_annual_mean,\n 'zero merid mean' : rm_merid_mean,\n 'land only?' : land_only,\n 'ocean only?' : ocean_only,\n 'Segment Seed' : random_segment_seed,\n 'Network Seed' : random_network_seed }\n results.update(this_result)\n \n global experiment_result\n experiment_result = experiment_result.append(results,\n ignore_index=True)\n \n #if True to plot each iter's graphs.\n if plot_in_train == True:\n plt.figure()\n \n plt.subplot(1,1,1)\n plt.plot(history.history['loss'],label = 'training')\n plt.title(history.history['loss'][-1])\n plt.xlabel('epoch')\n plt.xlim(2,len(history.history['loss'])-1)\n plt.legend()\n \n plt.grid(True)\n plt.show()\n \n #'unlock' the random seed\n np.random.seed(None)\n random.seed(None)\n tf.set_random_seed(None)\n \n return experiment_result, model\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Results\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n \n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n K.clear_session()\n \n ### Parameters\n debug = True\n NNType = 'linear'\n avgHalfChunk = 0\n option4 = True\n biasBool = False\n hiddensList = [[0]]\n ridge_penalty = [0]\n actFun = 'linear'\n \n expList = [(0)] # (0,1)\n expN = np.size(expList)\n \n iterations = [500] \n random_segment = True\n foldsN = 1\n \n for avgHalfChunk in (0,): # ([1,5,10]):#([1,2,5,10]):\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n K.clear_session()\n \n for loop in ([0]): # (0,1,2,3,4,5):\n ### Get info about the region\n lat_bounds,lon_bounds = UT.regions(reg_name)\n data_all,lats,lons = read_primary_dataset(variq,dataset,\n numOfEns,lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,\n dataset_obs,\n numOfEns,\n lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n\n###############################################################################\n###############################################################################\n###############################################################################\n for exp in expList: \n ### Get the data together\n data, data_obs, = data_all, data_obs_all,\n###############################################################################\n if len(pickSMILE) >= 1:\n data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)\n print('\\n*Pick models to analysis from %s*\\n' % pickSMILE)\n###############################################################################\n if calculate_anomalies == True:\n data, data_obs = dSS.calculate_anomalies(data,data_obs,\n lats,lons,baseline,yearsall)\n print('\\n*Calculate anomalies for %s-%s*\\n' % (baseline.min(),baseline.max()))\n############################################################################### \n if rm_annual_mean == True:\n data, data_obs = dSS.remove_annual_mean(data,data_obs,\n lats,lons,\n lats_obs,lons_obs)\n print('\\n*Removed annual mean*\\n')\n############################################################################### \n if rm_merid_mean == True:\n data, data_obs = dSS.remove_merid_mean(data,data_obs,\n lats,lons,\n lats_obs,lons_obs)\n print('\\n*Removed meridional mean*\\n')\n############################################################################### \n if rm_ensemble_mean == True:\n data = dSS.remove_ensemble_mean(data,ravel_modelens,\n ravelmodeltime,\n rm_standard_dev,\n numOfEns)\n print('\\n*Removed ensemble mean*')\n############################################################################### \n if rm_standard_dev == True:\n data = dSS.rm_standard_dev(data,window,ravelmodeltime,\n numOfEns)\n print('\\n*Removed standard deviation*')\n############################################################################### \n if rm_observational_mean == True:\n data = dSS.remove_observations_mean(data,data_obs,lats,lons)\n print('\\n*Removed observational data*')\n############################################################################### \n if land_only == True:\n data, data_obs = dSS.remove_ocean(data,data_obs,\n lat_bounds,\n lon_bounds) \n print('\\n*Removed ocean data*')\n###############################################################################\n if ocean_only == True:\n data, data_obs = dSS.remove_land(data,data_obs,\n lat_bounds,\n lon_bounds) \n print('\\n*Removed land data*') \n###############################################################################\n ### Adding random data\n if sizeOfTwin > 0:\n random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))\n data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)\n###############################################################################\n ### Smooth other ensembles\n if smoother == True:\n data = dSS.smoothedEnsembles(data,lat_bounds,lon_bounds)\n###############################################################################\n###############################################################################\n###############################################################################\n ### Loop over folds\n for loop in np.arange(0,foldsN): \n \n K.clear_session()\n #---------------------------\n # random_segment_seed = 34515\n random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))\n #---------------------------\n Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)\n \n YtrainClassMulti = Ytrain \n YtestClassMulti = Ytest \n \n # For use later\n XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)\n Xmean, Xstd = stdVals \n \n #---------------------------\n random_network_seed = 87750\n #---------------------------\n \n # Create and train network\n exp_result,model = test_train_loopClass(Xtrain,\n YtrainClassMulti,\n Xtest,\n YtestClassMulti,\n iterations=iterations,\n ridge_penalty=ridge_penalty,\n hiddens=hiddensList,class_weight=class_weight,\n plot_in_train = True)\n model.summary() \n \n ################################################################################################################################################ \n # save the model\n dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'\n savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed) \n savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)\n \n if(reg_name=='Globe'):\n regSave = ''\n else:\n regSave = '_' + reg_name\n \n if(rm_annual_mean==True):\n savename = savename + '_AnnualMeanRemoved' \n savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'\n if(rm_ensemble_mean==True):\n savename = savename + '_EnsembleMeanRemoved' \n savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'\n \n savename = savename + regSave \n # model.save(dirname + savename + '.h5')\n # np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)\n \n print('saving ' + savename)\n \n ###############################################################\n ### Make final plot\n ### Get obs\n dataOBSERVATIONS = data_obs\n latsOBSERVATIONS = lats_obs\n lonsOBSERVATIONS = lons_obs\n \n Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])\n \n annType = 'class'\n if monthlychoice == 'DJF':\n startYear = yearsall[sis].min()+1\n endYear = yearsall[sis].max()\n else:\n startYear = yearsall[sis].min()\n endYear = yearsall[sis].max()\n years = np.arange(startYear,endYear+1,1) \n Xmeanobs = np.nanmean(Xobs,axis=0)\n Xstdobs = np.nanstd(Xobs,axis=0) \n \n XobsS = (Xobs-Xmeanobs)/Xstdobs\n XobsS[np.isnan(XobsS)] = 0\n \n xtrainpred = (Xtrain-Xmean)/Xstd\n xtrainpred[np.isnan(xtrainpred)] = 0\n xtestpred = (Xtest-Xmean)/Xstd\n xtestpred[np.isnan(xtestpred)] = 0\n \n if(annType=='class'):\n YpredObs = model.predict(XobsS)\n YpredTrain = model.predict(xtrainpred)\n YpredTest = model.predict(xtestpred)\n \n #######################################################\n #######################################################\n #######################################################\n ### Check null hypothesis of random data!\n randarray,latsra,lonsra = read_primary_dataset(variq,'RANDOM',\n numOfEns,lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])\n randarraymean = np.nanmean(randarrayn,axis=0)\n randarraystd = np.nanstd(randarrayn,axis=0)\n randarrayS = (randarrayn-randarraymean)/randarraystd\n \n ### Prediction on random data\n YpredRand = model.predict(randarrayS)\n #######################################################\n #######################################################\n #######################################################\n \n ### Get output from model\n trainingout = YpredTrain\n testingout = YpredTest\n \n if ensTypeExperi == 'ENS':\n classesltrain = classeslnew[trainIndices,:,:].ravel()\n classesltest = classeslnew[testIndices,:,:].ravel()\n elif ensTypeExperi == 'GCM':\n classesltrain = classeslnew[:,:,trainIndices].ravel()\n classesltest = classeslnew[:,:,testIndices].ravel() \n \n ### Adding new file name for linear model\n saveData = saveData + '_L2-%s' % ridge_penalty[0]\n print('\\n>>>NEW FILE NAME = %s\\n' % saveData)\n \n ### Looking at linear model weights and biases\n weights = model.layers[0].get_weights()[0][:,0].reshape(lats.shape[0],lons.shape[0])\n biases = model.layers[0].get_weights()[1]\n np.savetxt(directoryoutput + 'weights_' + saveData + '.txt',weights)\n np.savetxt(directoryoutput + 'biases_' + saveData + '.txt',biases)\n fig=plt.figure()\n plt.contourf(weights,300,cmap=cmocean.cm.thermal)\n \n ### Random data tests\n randout = YpredRand\n labelsrand = np.argmax(randout,axis=1)\n uniquerand,countrand = np.unique(labelsrand,return_counts=True)\n np.savetxt(directoryoutput + 'RandLabels_' + saveData + '.txt',labelsrand)\n np.savetxt(directoryoutput + 'RandConfid_' + saveData + '.txt',randout)\n \n ### Observations\n obsout = YpredObs\n labelsobs = np.argmax(obsout,axis=1)\n uniqueobs,countobs = np.unique(labelsobs,return_counts=True)\n np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)\n np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)\n \n def truelabel(data):\n \"\"\"\n Calculate argmax\n \"\"\"\n maxindexdata= np.argmax(data[:,:],axis=1) \n \n return maxindexdata\n \n def accuracyTotalTime(data_pred,data_true):\n \"\"\"\n Compute accuracy for the entire time series\n \"\"\"\n \n data_truer = data_true\n data_predr = data_pred\n accdata_pred = accuracy_score(data_truer,data_predr)\n \n return accdata_pred\n\n ##############################################################################\n ##############################################################################\n ############################################################################## \n indextrain = truelabel(trainingout)\n acctrain = accuracyTotalTime(indextrain,classesltrain)\n indextest = truelabel(testingout)\n acctest = accuracyTotalTime(indextest,classesltest)\n print('\\n\\nAccuracy Training == ',acctrain)\n print('Accuracy Testing == ',acctest)\n \n ## Save the output for plotting\n np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)\n np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)\n \n np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)\n np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)\n \n np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)\n np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)\n \n ### See more more details\n model.layers[0].get_config()\n \n ## Define variable for analysis\n print('\\n\\n------------------------')\n print(variq,'= Variable!')\n print(monthlychoice,'= Time!')\n print(reg_name,'= Region!')\n print(lat_bounds,lon_bounds)\n print(dataset,'= Model!')\n print(dataset_obs,'= Observations!\\n')\n print(rm_annual_mean,'= rm_annual_mean') \n print(rm_merid_mean,'= rm_merid_mean') \n print(rm_ensemble_mean,'= rm_ensemble_mean') \n print(land_only,'= land_only')\n print(ocean_only,'= ocean_only')\n \n ## Variables for plotting\n lons2,lats2 = np.meshgrid(lons,lats) \n observations = data_obs\n modeldata = data\n modeldatamean = np.nanmean(modeldata,axis=1)\n \n spatialmean_obs = UT.calc_weightedAve(observations,lats2)\n spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)\n spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)\n plt.figure()\n plt.plot(spatialmean_modmean.transpose())\n \n ##############################################################################\n ##############################################################################\n ##############################################################################\n ## Visualizing through LRP\n numLats = lats.shape[0]\n numLons = lons.shape[0] \n numDim = 3\n\n ##############################################################################\n ##############################################################################\n ##############################################################################\n \n lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),\n np.append(Ytrain,Ytest,axis=0),\n biasBool,annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n meanlrp = np.nanmean(lrpall,axis=0)\n fig=plt.figure()\n plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)\n \n ### For training data only\n lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,\n annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n \n ### For training data only\n lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,\n annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n \n \n ### For observations data only\n lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,\n num_of_class,yearsall,lrpRule,\n normLRP,numLats,numLons,numDim)\n\n ### For random data only\n lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,\n num_of_class,yearsall,lrpRule,\n normLRP,numLats,numLons,numDim)\n \n ##############################################################################\n ##############################################################################\n ##############################################################################\n def netcdfLRP(lats,lons,var,directory,typemodel,saveData):\n print('\\n>>> Using netcdfLRP function!')\n \n from netCDF4 import Dataset\n import numpy as np\n \n name = 'LRPMap' + typemodel + '_' + saveData + '.nc'\n filename = directory + name\n ncfile = Dataset(filename,'w',format='NETCDF4')\n ncfile.description = 'LRP maps for using selected seed' \n \n ### Dimensions\n ncfile.createDimension('years',var.shape[0])\n ncfile.createDimension('lat',var.shape[1])\n ncfile.createDimension('lon',var.shape[2])\n \n ### Variables\n years = ncfile.createVariable('years','f4',('years'))\n latitude = ncfile.createVariable('lat','f4',('lat'))\n longitude = ncfile.createVariable('lon','f4',('lon'))\n varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))\n \n ### Units\n varns.units = 'unitless relevance'\n ncfile.title = 'LRP relevance'\n ncfile.instituion = 'Colorado State University'\n ncfile.references = 'Barnes et al. [2020]'\n \n ### Data\n years[:] = np.arange(var.shape[0])\n latitude[:] = lats\n longitude[:] = lons\n varns[:] = var\n \n ncfile.close()\n print('*Completed: Created netCDF4 File!')\n \n netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)\n netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)\n netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)\n netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)",
"\"\"\"\nANN for evaluating model biases, differences, and other thresholds using \nexplainable AI\n\nReference : Barnes et al. [2020, JAMES]\nAuthor : Zachary M. Labe\nDate : 26 April 2021\nVersion : 4 - subsamples random weight class (#8), but tries different noise\n\"\"\"\n\n### Import packages\nimport sys\nimport math\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Dense, Activation\nfrom keras import regularizers\nfrom keras import metrics\nfrom keras import optimizers\nfrom keras.models import Sequential\nimport tensorflow.keras as keras\nimport tensorflow as tf\nimport pandas as pd\nimport random\nimport scipy.stats as stats\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport palettable.cubehelix as cm\nimport cmocean as cmocean\nimport calc_Utilities as UT\nimport calc_dataFunctions as df\nimport calc_Stats as dSS\nimport calc_LRPclass as LRP\nimport innvestigate\nfrom sklearn.metrics import accuracy_score\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\n\n### Prevent tensorflow 2.+ deprecation warnings\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n### LRP param\nDEFAULT_NUM_BWO_ITERATIONS = 200\nDEFAULT_BWO_LEARNING_RATE = .001\n\n### Plotting defaults \nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n###############################################################################\n###############################################################################\n###############################################################################\n### Data preliminaries \ndirectorydataLLL = '/Users/zlabe/Data/LENS/monthly'\ndirectorydataENS = '/Users/zlabe/Data/SMILE/'\ndirectorydataBB = '/Users/zlabe/Data/BEST/'\ndirectorydataEE = '/Users/zlabe/Data/ERA5/'\ndirectoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'\n###############################################################################\n###############################################################################\nmodelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',\n 'GFDL_CM3','GFDL_ESM2M','lens']\ndatasetsingle = ['SMILE']\ndataset_obs = 'ERA5BE'\nseasons = ['annual']\nvariq = 'T2M'\nreg_name = 'SMILEGlobe'\ntimeper = 'historical'\n###############################################################################\n###############################################################################\n# pickSMILE = ['CCCma_canesm2','CSIRO_MK3.6','KNMI_ecearth',\n# 'GFDL_ESM2M','lens']\n# pickSMILE = ['CCCma_canesm2','MPI','lens']\npickSMILE = []\nif len(pickSMILE) >= 1:\n lenOfPicks = len(pickSMILE)\nelse:\n lenOfPicks = len(modelGCMs)\n###############################################################################\n###############################################################################\nland_only = False\nocean_only = False\nif land_only == True:\n maskNoiseClass = 'land'\nelif ocean_only == True:\n maskNoiseClass = 'ocean'\nelse:\n maskNoiseClass = 'none'\n\n###############################################################################\n###############################################################################\nrm_merid_mean = False\nrm_annual_mean = False\n###############################################################################\n###############################################################################\nrm_ensemble_mean = False\nrm_observational_mean = False\n###############################################################################\n###############################################################################\ncalculate_anomalies = False\nif calculate_anomalies == True:\n if timeper == 'historical': \n baseline = np.arange(1951,1980+1,1)\n elif timeper == 'future':\n baseline = np.arange(2021,2050+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n###############################################################################\n###############################################################################\nwindow = 0\nensTypeExperi = 'ENS'\n# shuffletype = 'TIMEENS'\n# shuffletype = 'ALLENSRAND'\n# shuffletype = 'ALLENSRANDrmmean'\nshuffletype = 'RANDGAUSS'\nsizeOfTwin = 4 # name of experiment for adding noise class #8\nif sizeOfTwin > 0:\n sizeOfTwinq = 1\nelse:\n sizeOfTwinq = sizeOfTwin\n###############################################################################\n###############################################################################\nif ensTypeExperi == 'ENS':\n if window == 0:\n rm_standard_dev = False\n if timeper == 'historical': \n yearsall = np.arange(1950,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravel_modelens = False\n ravelmodeltime = False\n else:\n rm_standard_dev = True\n if timeper == 'historical': \n yearsall = np.arange(1950+window,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020+window,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravelmodeltime = False\n ravel_modelens = True\nelif ensTypeExperi == 'GCM':\n if window == 0:\n rm_standard_dev = False\n yearsall = np.arange(1950,2019+1,1)\n ravel_modelens = False\n ravelmodeltime = False\n else:\n rm_standard_dev = True\n if timeper == 'historical': \n yearsall = np.arange(1950,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravelmodeltime = False\n ravel_modelens = True\n###############################################################################\n###############################################################################\nnumOfEns = 16\nlensalso = True\nif len(pickSMILE) == 0:\n if modelGCMs[-1] == 'RANDOM':\n randomalso = True\n else:\n randomalso = False\nelif len(pickSMILE) != 0:\n if pickSMILE[-1] == 'RANDOM':\n randomalso = True\n else:\n randomalso = False\nlentime = len(yearsall)\n###############################################################################\n###############################################################################\nravelyearsbinary = False\nravelbinary = False\nnum_of_class = lenOfPicks + sizeOfTwinq\n###############################################################################\n###############################################################################\nlrpRule = 'z'\nnormLRP = True\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Picking experiment to save\ntypeOfAnalysis = 'issueWithExperiment'\n\n# Experiment #1\nif rm_ensemble_mean == True:\n if window > 1:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-1'\n# Experiment #2\nif rm_ensemble_mean == True:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-2'\n# Experiment #3 (raw data)\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-3'\n if variq == 'T2M':\n integer = 20 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 20 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 20 # random noise value to add/subtract from each grid point\n# Experiment #4\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == True:\n typeOfAnalysis = 'Experiment-4'\n if variq == 'T2M':\n integer = 25 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 15 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 5 # random noise value to add/subtract from each grid point\n# Experiment #5\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-5'\n# Experiment #6\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == True:\n typeOfAnalysis = 'Experiment-6'\n# Experiment #7\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-7'\n# Experiment #8\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-8'\n if variq == 'T2M':\n integer = 1 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 1 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 5 # random noise value to add/subtract from each grid point\n# Experiment #9\nif rm_ensemble_mean == False:\n if window > 1:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-9'\n \nprint('\\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\\n' % (typeOfAnalysis,timeper))\nif typeOfAnalysis == 'issueWithExperiment':\n sys.exit('Wrong parameters selected to analyze')\n \n### Select how to save files\nif land_only == True:\n saveData = timeper + '_' + seasons[0] + '_LAND' + '_StandMethodsSEP_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nelif ocean_only == True:\n saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_StandMethodsSEP_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nelse:\n saveData = timeper + '_' + seasons[0] + '_StandMethodsSEP_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nprint('*Filename == < %s >' % saveData) \n\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Create sample class labels for each model for my own testing\n### Appends a twin set of classes for the random noise class \nif seasons != 'none':\n classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))\n for i in range(lenOfPicks):\n classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i) \n \n if sizeOfTwin > 0: \n ### Add random noise models\n randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)\n classesl = np.append(classesl,randomNoiseClass,axis=0)\n \n if ensTypeExperi == 'ENS':\n classeslnew = np.swapaxes(classesl,0,1)\n elif ensTypeExperi == 'GCM':\n classeslnew = classesl\n###############################################################################\n###############################################################################\n###############################################################################\n############################################################################### \n### Begin ANN and the entire script\nfor sis,singlesimulation in enumerate(datasetsingle):\n lrpsns = []\n for seas in range(len(seasons)):\n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### ANN preliminaries\n simuqq = datasetsingle[0]\n monthlychoice = seasons[seas]\n lat_bounds,lon_bounds = UT.regions(reg_name)\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',\n 'RMSE Train','RMSE Test',\n 'ridge penalty','zero mean',\n 'zero merid mean','land only?','ocean only?']) \n \n ### Define primary dataset to use\n dataset = singlesimulation\n modelType = dataset\n \n ### Whether to test and plot the results using obs data\n if dataset_obs == '20CRv3':\n year_obsall = np.arange(yearsall[sis].min(),2015+1,1)\n elif dataset_obs == 'ERA5':\n year_obsall = np.arange(1979+window,2019+1,1)\n if rm_standard_dev == False:\n year_obsall = np.arange(1979,2019+1,1)\n elif dataset_obs == 'ERA5BE':\n year_obsall = np.arange(1950+window,2019+1,1)\n if rm_standard_dev == False:\n year_obsall = np.arange(1950,2019+1,1)\n if monthlychoice == 'DJF':\n obsyearstart = year_obsall.min()+1\n year_obs = year_obsall[1:]\n else:\n obsyearstart = year_obsall.min()\n year_obs = year_obsall\n \n ### Remove the annual mean? True to subtract it from dataset ##########\n if rm_annual_mean == True:\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n \n ### Rove the ensemble mean? True to subtract it from dataset ##########\n if rm_ensemble_mean == True:\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n \n ### Split the data into training and testing sets? value of 1 will use all \n ### data as training\n segment_data_factor = .75\n \n ### Hiddens corresponds to the number of hidden layers the nnet will use - 0 \n ### for linear model, or a list [10, 20, 5] for multiple layers of nodes \n ### (10 nodes in first layer, 20 in second, etc); The \"loop\" part \n ### allows you to loop through multiple architectures. For example, \n ### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the \n ### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,\n ### and the next would be 3 hidden layers of 1 node each.\n \n ### Set useGPU to True to use the GPU, but only if you selected the GPU \n ### Runtime in the menu at the top of this page\n useGPU = False\n \n ### Set Cascade to True to utilize the nnet's cascade function\n cascade = False\n \n ### Plot within the training loop - may want to set to False when testing out \n ### larget sets of parameters\n plot_in_train = False\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Read in model and observational/reanalysis data\n \n def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):\n data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)\n datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)\n print('\\nOur dataset: ',dataset,' is shaped',data.shape)\n return datar,lats,lons\n \n def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):\n data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)\n data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,\n lat_bounds,lon_bounds)\n \n print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)\n return data_obs,lats_obs,lons_obs\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Select data to test, train on \n def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):\n \n global random_segment_seed,trainIndices,testIndices\n if random_segment_seed == None:\n random_segment_seed = int(int(np.random.randint(1, 100000)))\n np.random.seed(random_segment_seed)\n\n############################################################################### \n############################################################################### \n############################################################################### \n ###################################################################\n ### Large Ensemble experiment\n if ensTypeExperi == 'ENS':\n \n ### Flip GCM and ensemble member axes\n datanew = np.swapaxes(data,0,1)\n classeslnew = np.swapaxes(classesl,0,1)\n \n if fac < 1 :\n nrows = datanew.shape[0]\n segment_train = int(np.round(nrows * fac))\n segment_test = nrows - segment_train\n print('Training on',segment_train,'ensembles, testing on',segment_test)\n \n ### Picking out random ensembles\n i = 0\n trainIndices = list()\n while i < segment_train:\n line = np.random.randint(0, nrows)\n if line not in trainIndices:\n trainIndices.append(line)\n i += 1\n else:\n pass\n \n i = 0\n testIndices = list()\n while i < segment_test:\n line = np.random.randint(0, nrows)\n if line not in trainIndices:\n if line not in testIndices:\n testIndices.append(line)\n i += 1\n else:\n pass\n \n ### Training segment----------\n data_train = np.empty((len(trainIndices),datanew.shape[1],\n datanew.shape[2],datanew.shape[3],\n datanew.shape[4]))\n Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],\n classeslnew.shape[2]))\n for index,ensemble in enumerate(trainIndices):\n data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]\n Ytrain[index,:,:] = classeslnew[ensemble,:,:]\n \n ### Random ensembles are picked\n if debug:\n print('\\nTraining on ensembles: ',trainIndices)\n print('Testing on ensembles: ',testIndices)\n print('\\norg data - shape', datanew.shape)\n print('training data - shape', data_train.shape)\n \n ### Reshape into X and Y\n Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))\n Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))\n Xtrain_shape = (data_train.shape[0])\n \n ### Testing segment----------\n data_test = np.empty((len(testIndices),datanew.shape[1],\n datanew.shape[2],datanew.shape[3],\n datanew.shape[4]))\n Ytest = np.empty((len(testIndices),classeslnew.shape[1],\n classeslnew.shape[2]))\n for index,ensemble in enumerate(testIndices):\n data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]\n Ytest[index,:,:] = classeslnew[ensemble,:,:]\n \n ### Random ensembles are picked\n if debug:\n print('Training on ensembles: %s' % len(trainIndices))\n print('Testing on ensembles: %s' % len(testIndices))\n print('\\norg data - shape', datanew.shape)\n print('testing data - shape', data_test.shape)\n \n ### Reshape into X and Y\n Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))\n Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))\n Xtest_shape = (data_test.shape[0])\n \n Xtest_shape = (data_test.shape[0], data_test.shape[1])\n data_train_shape = data_train.shape[0]\n data_test_shape = data_test.shape[0]\n \n ### 'unlock' the random seed\n np.random.seed(None)\n \n ### One-hot vectors\n Ytrain = keras.utils.to_categorical(Ytrain)\n Ytest = keras.utils.to_categorical(Ytest) \n \n ### Class weights\n class_weight = class_weight_creator(Ytrain)\n\n###############################################################################\n############################################################################### \n############################################################################### \n ###################################################################\n ### GCM type experiments without ensembles\n elif ensTypeExperi == 'GCM':\n if data.ndim == 5:\n datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))\n classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))\n else:\n datanew = data\n classeslnew = classesl\n \n if fac < 1 :\n nrows = datanew.shape[1]\n segment_train = int(np.floor(nrows * fac))\n segment_test = nrows - segment_train\n print('Training on',segment_train,'years, testing on',segment_test)\n \n ### Picking out random ensembles\n firstyears = int(np.floor(segment_test/2))\n lastyears = -int(np.floor(segment_test/2))\n trainIndices = np.arange(firstyears,firstyears+segment_train,1)\n testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)\n \n ### Training segment----------\n data_train = np.empty((datanew.shape[0],len(trainIndices),\n datanew.shape[2],datanew.shape[3]))\n Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))\n for index,ensemble in enumerate(trainIndices):\n data_train[:,index,:,:] = datanew[:,ensemble,:,:]\n Ytrain[:,index] = classeslnew[:,ensemble]\n \n ### Random ensembles are picked\n if debug:\n print('\\nTraining on years: ',trainIndices)\n print('Testing on years: ',testIndices)\n print('\\norg data - shape', datanew.shape)\n print('training data - shape', data_train.shape)\n \n ### Reshape into X and Y\n Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))\n Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))\n Xtrain_shape = (data_train.shape[0])\n \n ### Testing segment----------\n data_test = np.empty((datanew.shape[0],len(testIndices),\n datanew.shape[2],datanew.shape[3]))\n Ytest = np.empty((classeslnew.shape[0],len(testIndices)))\n for index,ensemble in enumerate(testIndices):\n data_test[:,index,:,:] = datanew[:,ensemble,:,:]\n Ytest[:,index] = classeslnew[:,ensemble]\n \n ### Random ensembles are picked\n if debug:\n print('Training on years: %s' % len(trainIndices))\n print('Testing on years: %s' % len(testIndices))\n print('\\norg data - shape', datanew.shape)\n print('testing data - shape', data_test.shape)\n \n ### Reshape into X and Y\n Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))\n Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))\n Xtest_shape = (data_test.shape[0])\n \n Xtest_shape = (data_test.shape[0], data_test.shape[1])\n data_train_shape = data_train.shape[0]\n data_test_shape = data_test.shape[0]\n \n ### 'unlock' the random seed\n np.random.seed(None)\n \n ### One-hot vectors\n Ytrain = keras.utils.to_categorical(Ytrain)\n Ytest = keras.utils.to_categorical(Ytest) \n \n ### Class weights\n class_weight = class_weight_creator(Ytrain)\n \n else:\n print(ValueError('WRONG EXPERIMENT!'))\n return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Plotting functions \n def adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n\n ###############################################################################\n ###############################################################################\n ############################################################################### \n ### Create a class weight dictionary to help if the classes are unbalanced\n def class_weight_creator(Y):\n class_dict = {}\n weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)\n for i in range( Y.shape[-1] ):\n class_dict[i] = weights[i] \n return class_dict\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Neural Network Creation & Training \n class TimeHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.times = []\n \n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_time_start = time.time()\n \n def on_epoch_end(self, epoch, logs={}):\n self.times.append(time.time() - self.epoch_time_start)\n \n def defineNN(hidden, input_shape, output_shape, ridgePenalty): \n \n model = Sequential()\n ### Initialize first layer\n ### Model is a single node with activation function\n model.add(Dense(hidden[0],input_shape=(input_shape,),\n activation=actFun, use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n ### Initialize other layers\n for layer in hidden[1:]:\n model.add(Dense(layer,activation=actFun,\n use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n print('\\nTHIS IS AN ANN!\\n')\n \n #### Initialize output layer\n model.add(Dense(output_shape,activation=None,use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n ### Add softmax layer at the end\n model.add(Activation('softmax'))\n \n return model\n \n def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):\n \n global lr_here, batch_size\n lr_here = 0.001\n model.compile(optimizer=optimizers.SGD(lr=lr_here,\n momentum=0.9,nesterov=True), \n loss = 'categorical_crossentropy',\n metrics=[metrics.categorical_accuracy])\n # model.compile(optimizer=optimizers.Nadam(lr=lr_here), \n # loss = 'categorical_crossentropy',\n # metrics=[metrics.categorical_accuracy])\n \n ### Declare the relevant model parameters\n batch_size = 24 \n \n print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----') \n \n ### Callbacks\n time_callback = TimeHistory()\n early_stopping = keras.callbacks.EarlyStopping(monitor='loss',\n patience=2,\n verbose=1,\n mode='auto')\n \n history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,\n shuffle=True,verbose=verbose,\n callbacks=[time_callback,early_stopping],\n validation_split=0.)\n print('******** done training ***********')\n \n return model, history\n \n def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):\n \"\"\"or loops to iterate through training iterations, ridge penalty, \n and hidden layer list\n \"\"\"\n results = {}\n global nnet,random_network_seed\n \n for niter in iterations:\n for penalty in ridge_penalty:\n for hidden in hiddens:\n \n ### Check / use random seed\n if random_network_seed == None:\n np.random.seed(None)\n random_network_seed = int(np.random.randint(1, 100000))\n np.random.seed(random_network_seed)\n random.seed(random_network_seed)\n tf.set_random_seed(0)\n \n ### Standardize the data\n Xtrain,Xtest,stdVals = dSS.standardize_dataSEPARATE(Xtrain,Xtest)\n Xmean,Xstd = stdVals\n \n ### Define the model\n model = defineNN(hidden,\n input_shape=np.shape(Xtrain)[1],\n output_shape=np.shape(Ytrain)[1],\n ridgePenalty=penalty) \n \n ### Train the net\n model, history = trainNN(model,Xtrain,\n Ytrain,niter,class_weight,verbose=1)\n \n ### After training, use the network with training data to \n ### check that we don't have any errors and output RMSE\n rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))\n if type(Ytest) != bool:\n rmse_test = 0.\n rmse_test = dSS.rmse(Ytest,model.predict(Xtest))\n else:\n rmse_test = False\n \n this_result = {'iters': niter, \n 'hiddens' : hidden, \n 'RMSE Train' : rmse_train, \n 'RMSE Test' : rmse_test, \n 'ridge penalty': penalty, \n 'zero mean' : rm_annual_mean,\n 'zero merid mean' : rm_merid_mean,\n 'land only?' : land_only,\n 'ocean only?' : ocean_only,\n 'Segment Seed' : random_segment_seed,\n 'Network Seed' : random_network_seed }\n results.update(this_result)\n \n global experiment_result\n experiment_result = experiment_result.append(results,\n ignore_index=True)\n \n #if True to plot each iter's graphs.\n if plot_in_train == True:\n plt.figure()\n \n plt.subplot(1,1,1)\n plt.plot(history.history['loss'],label = 'training')\n plt.title(history.history['loss'][-1])\n plt.xlabel('epoch')\n plt.xlim(2,len(history.history['loss'])-1)\n plt.legend()\n \n plt.grid(True)\n plt.show()\n \n #'unlock' the random seed\n np.random.seed(None)\n random.seed(None)\n tf.set_random_seed(None)\n \n return experiment_result, model\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Results\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n \n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n K.clear_session()\n \n ### Parameters\n debug = True\n NNType = 'ANN'\n avgHalfChunk = 0\n option4 = True\n biasBool = False\n hiddensList = [[10,10]]\n ridge_penalty = [0.1]\n # hiddensList = [[8,8]]\n # ridge_penalty = [0.2]\n actFun = 'relu'\n \n if any([maskNoiseClass=='land',maskNoiseClass=='ocean']):\n debug = True\n NNType = 'ANN'\n avgHalfChunk = 0\n option4 = True\n biasBool = False\n hiddensList = [[8,8]]\n ridge_penalty = [0.10]\n actFun = 'relu'\n \n expList = [(0)] # (0,1)\n expN = np.size(expList)\n \n iterations = [100] \n random_segment = True\n foldsN = 1\n \n for avgHalfChunk in (0,): # ([1,5,10]):#([1,2,5,10]):\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n K.clear_session()\n \n for loop in ([0]): # (0,1,2,3,4,5):\n ### Get info about the region\n lat_bounds,lon_bounds = UT.regions(reg_name)\n data_all,lats,lons = read_primary_dataset(variq,dataset,\n numOfEns,lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,\n dataset_obs,\n numOfEns,\n lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n\n###############################################################################\n###############################################################################\n###############################################################################\n for exp in expList: \n ### Get the data together\n data, data_obs, = data_all, data_obs_all,\n###############################################################################\n if len(pickSMILE) >= 1:\n data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)\n print('\\n*Pick models to analysis from %s*\\n' % pickSMILE)\n###############################################################################\n if calculate_anomalies == True:\n data, data_obs = dSS.calculate_anomalies(data,data_obs,\n lats,lons,baseline,yearsall)\n print('\\n*Calculate anomalies for %s-%s*\\n' % (baseline.min(),baseline.max()))\n############################################################################### \n if rm_annual_mean == True:\n data, data_obs = dSS.remove_annual_mean(data,data_obs,\n lats,lons,\n lats_obs,lons_obs)\n print('\\n*Removed annual mean*\\n')\n############################################################################### \n if rm_merid_mean == True:\n data, data_obs = dSS.remove_merid_mean(data,data_obs,\n lats,lons,\n lats_obs,lons_obs)\n print('\\n*Removed meridional mean*\\n')\n############################################################################### \n if rm_ensemble_mean == True:\n data = dSS.remove_ensemble_mean(data,ravel_modelens,\n ravelmodeltime,\n rm_standard_dev,\n numOfEns)\n print('\\n*Removed ensemble mean*')\n############################################################################### \n if rm_standard_dev == True:\n data = dSS.rm_standard_dev(data,window,ravelmodeltime,\n numOfEns)\n print('\\n*Removed standard deviation*')\n############################################################################### \n if rm_observational_mean == True:\n data = dSS.remove_observations_mean(data,data_obs,lats,lons)\n print('\\n*Removed observational data*')\n############################################################################### \n if land_only == True:\n data, data_obs = dSS.remove_ocean(data,data_obs,\n lat_bounds,\n lon_bounds) \n print('\\n*Removed ocean data*')\n###############################################################################\n if ocean_only == True:\n data, data_obs = dSS.remove_land(data,data_obs,\n lat_bounds,\n lon_bounds) \n print('\\n*Removed land data*') \n###############################################################################\n ### Adding random data\n if sizeOfTwin > 0:\n random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))\n data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)\n\n###############################################################################\n###############################################################################\n###############################################################################\n ### Loop over folds\n for loop in np.arange(0,foldsN): \n \n K.clear_session()\n #---------------------------\n # random_segment_seed = 34515\n random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))\n #---------------------------\n Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)\n \n YtrainClassMulti = Ytrain \n YtestClassMulti = Ytest \n \n # For use later\n XtrainS,XtestS,stdVals = dSS.standardize_dataSEPARATE(Xtrain,Xtest)\n Xmean, Xstd = stdVals \n \n #---------------------------\n random_network_seed = 87750\n #---------------------------\n \n # Create and train network\n exp_result,model = test_train_loopClass(Xtrain,\n YtrainClassMulti,\n Xtest,\n YtestClassMulti,\n iterations=iterations,\n ridge_penalty=ridge_penalty,\n hiddens=hiddensList,class_weight=class_weight,\n plot_in_train = True)\n model.summary() \n \n ################################################################################################################################################ \n # save the model\n dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'\n savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed) \n savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)\n \n if(reg_name=='Globe'):\n regSave = ''\n else:\n regSave = '_' + reg_name\n \n if(rm_annual_mean==True):\n savename = savename + '_AnnualMeanRemoved' \n savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'\n if(rm_ensemble_mean==True):\n savename = savename + '_EnsembleMeanRemoved' \n savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'\n \n savename = savename + regSave \n # model.save(dirname + savename + '.h5')\n # np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)\n \n print('saving ' + savename)\n \n ###############################################################\n ### Make final plot\n ### Get obs\n dataOBSERVATIONS = data_obs\n latsOBSERVATIONS = lats_obs\n lonsOBSERVATIONS = lons_obs\n \n Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])\n \n annType = 'class'\n if monthlychoice == 'DJF':\n startYear = yearsall[sis].min()+1\n endYear = yearsall[sis].max()\n else:\n startYear = yearsall[sis].min()\n endYear = yearsall[sis].max()\n years = np.arange(startYear,endYear+1,1) \n Xmeanobs = np.nanmean(Xobs,axis=0)\n Xstdobs = np.nanstd(Xobs,axis=0) \n \n XobsS = (Xobs-Xmeanobs)/Xstdobs\n XobsS[np.isnan(XobsS)] = 0\n # XobsS = (Xobs-Xmean)/Xstd\n # XobsS[np.isnan(XobsS)] = 0\n \n # xtrainpred = (Xtrain-Xmean)/Xstd\n # xtrainpred[np.isnan(xtrainpred)] = 0\n # xtestpred = (Xtest-Xmean)/Xstd\n # xtestpred[np.isnan(xtestpred)] = 0\n \n if(annType=='class'):\n YpredObs = model.predict(XobsS)\n YpredTrain = model.predict(XtrainS)\n YpredTest = model.predict(XtestS)\n \n #######################################################\n #######################################################\n #######################################################\n ### Check null hypothesis of random data!\n randarray,latsra,lonsra = read_primary_dataset(variq,'RANDOM',\n numOfEns,lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])\n randarraymean = np.nanmean(randarrayn,axis=0)\n randarraystd = np.nanstd(randarrayn,axis=0)\n randarrayS = (randarrayn-randarraymean)/randarraystd\n \n ### Prediction on random data\n YpredRand = model.predict(randarrayS)\n #######################################################\n #######################################################\n #######################################################\n \n ### Get output from model\n trainingout = YpredTrain\n testingout = YpredTest\n \n if ensTypeExperi == 'ENS':\n classesltrain = classeslnew[trainIndices,:,:].ravel()\n classesltest = classeslnew[testIndices,:,:].ravel()\n elif ensTypeExperi == 'GCM':\n classesltrain = classeslnew[:,:,trainIndices].ravel()\n classesltest = classeslnew[:,:,testIndices].ravel()\n \n ### Random data tests\n randout = YpredRand\n labelsrand = np.argmax(randout,axis=1)\n uniquerand,countrand = np.unique(labelsrand,return_counts=True)\n np.savetxt(directoryoutput + 'RandLabels_' + saveData + '.txt',labelsrand)\n np.savetxt(directoryoutput + 'RandConfid_' + saveData + '.txt',randout)\n \n ### Observations\n obsout = YpredObs\n labelsobs = np.argmax(obsout,axis=1)\n uniqueobs,countobs = np.unique(labelsobs,return_counts=True)\n np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)\n np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)\n \n def truelabel(data):\n \"\"\"\n Calculate argmax\n \"\"\"\n maxindexdata= np.argmax(data[:,:],axis=1) \n \n return maxindexdata\n \n def accuracyTotalTime(data_pred,data_true):\n \"\"\"\n Compute accuracy for the entire time series\n \"\"\"\n \n data_truer = data_true\n data_predr = data_pred\n accdata_pred = accuracy_score(data_truer,data_predr)\n \n return accdata_pred\n\n ##############################################################################\n ##############################################################################\n ############################################################################## \n indextrain = truelabel(trainingout)\n acctrain = accuracyTotalTime(indextrain,classesltrain)\n indextest = truelabel(testingout)\n acctest = accuracyTotalTime(indextest,classesltest)\n print('\\n\\nAccuracy Training == ',acctrain)\n print('Accuracy Testing == ',acctest)\n \n ## Save the output for plotting\n np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)\n np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)\n \n np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)\n np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)\n \n np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)\n np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)\n \n ### See more more details\n model.layers[0].get_config()\n \n ## Define variable for analysis\n print('\\n\\n------------------------')\n print(variq,'= Variable!')\n print(monthlychoice,'= Time!')\n print(reg_name,'= Region!')\n print(lat_bounds,lon_bounds)\n print(dataset,'= Model!')\n print(dataset_obs,'= Observations!\\n')\n print(rm_annual_mean,'= rm_annual_mean') \n print(rm_merid_mean,'= rm_merid_mean') \n print(rm_ensemble_mean,'= rm_ensemble_mean') \n print(land_only,'= land_only')\n print(ocean_only,'= ocean_only')\n \n ## Variables for plotting\n lons2,lats2 = np.meshgrid(lons,lats) \n observations = data_obs\n modeldata = data\n modeldatamean = np.nanmean(modeldata,axis=1)\n \n spatialmean_obs = UT.calc_weightedAve(observations,lats2)\n spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)\n spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)\n plt.figure()\n plt.plot(spatialmean_modmean.transpose())\n \n ##############################################################################\n ##############################################################################\n ##############################################################################\n ## Visualizing through LRP\n numLats = lats.shape[0]\n numLons = lons.shape[0] \n numDim = 3\n\n ##############################################################################\n ##############################################################################\n ##############################################################################\n \n lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),\n np.append(Ytrain,Ytest,axis=0),\n biasBool,annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n meanlrp = np.nanmean(lrpall,axis=0)\n fig=plt.figure()\n plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)\n \n ### For training data only\n lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,\n annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n \n ### For training data only\n lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,\n annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n \n \n ### For observations data only\n lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,\n num_of_class,yearsall,lrpRule,\n normLRP,numLats,numLons,numDim)\n\n ### For random data only\n lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,\n num_of_class,yearsall,lrpRule,\n normLRP,numLats,numLons,numDim)\n \n ##############################################################################\n ##############################################################################\n ##############################################################################\n def netcdfLRP(lats,lons,var,directory,typemodel,saveData):\n print('\\n>>> Using netcdfLRP function!')\n \n from netCDF4 import Dataset\n import numpy as np\n \n name = 'LRPMap' + typemodel + '_' + saveData + '.nc'\n filename = directory + name\n ncfile = Dataset(filename,'w',format='NETCDF4')\n ncfile.description = 'LRP maps for using selected seed' \n \n ### Dimensions\n ncfile.createDimension('years',var.shape[0])\n ncfile.createDimension('lat',var.shape[1])\n ncfile.createDimension('lon',var.shape[2])\n \n ### Variables\n years = ncfile.createVariable('years','f4',('years'))\n latitude = ncfile.createVariable('lat','f4',('lat'))\n longitude = ncfile.createVariable('lon','f4',('lon'))\n varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))\n \n ### Units\n varns.units = 'unitless relevance'\n ncfile.title = 'LRP relevance'\n ncfile.instituion = 'Colorado State University'\n ncfile.references = 'Barnes et al. [2020]'\n \n ### Data\n years[:] = np.arange(var.shape[0])\n latitude[:] = lats\n longitude[:] = lons\n varns[:] = var\n \n ncfile.close()\n print('*Completed: Created netCDF4 File!')\n \n netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)\n netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)\n netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)\n netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.rc",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.round",
"numpy.nanmean",
"tensorflow.get_default_graph",
"numpy.nanstd",
"numpy.random.randint",
"numpy.swapaxes",
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"tensorflow.ConfigProto",
"numpy.size",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.isnan",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"numpy.append",
"numpy.floor",
"numpy.savetxt",
"tensorflow.set_random_seed",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.random.seed",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.shape",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.utils.to_categorical",
"sklearn.metrics.accuracy_score"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.rc",
"pandas.DataFrame",
"numpy.round",
"matplotlib.pyplot.plot",
"numpy.nanmean",
"tensorflow.get_default_graph",
"numpy.nanstd",
"numpy.random.randint",
"numpy.swapaxes",
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"tensorflow.ConfigProto",
"numpy.size",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.isnan",
"numpy.genfromtxt",
"numpy.append",
"numpy.floor",
"numpy.savetxt",
"tensorflow.set_random_seed",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.random.seed",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.shape",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.utils.to_categorical",
"sklearn.metrics.accuracy_score"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.rc",
"pandas.DataFrame",
"numpy.round",
"matplotlib.pyplot.plot",
"numpy.nanmean",
"tensorflow.get_default_graph",
"numpy.nanstd",
"numpy.random.randint",
"numpy.swapaxes",
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"tensorflow.ConfigProto",
"numpy.size",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.isnan",
"numpy.genfromtxt",
"numpy.append",
"numpy.floor",
"numpy.savetxt",
"tensorflow.set_random_seed",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.random.seed",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.shape",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.utils.to_categorical",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
fengxia41103/stock | [
"1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1"
] | [
"backend/stock/workers/get_valuation_ratio.py"
] | [
"import logging\n\nimport pandas as pd\n\nfrom stock.models import MyStock\nfrom stock.models import ValuationRatio\nfrom yahooquery import Ticker\n\nlogger = logging.getLogger(\"stock\")\n\n\nclass MyValuationRatio:\n def __init__(self, symbol):\n self.stock = MyStock.objects.get(symbol=symbol)\n\n def get(self):\n s = Ticker(self.stock.symbol, timeout=15)\n\n # all numbers convert to million\n df = s.valuation_measures\n if \"unavailable\" in df or \"error\" in df:\n logger.error(\"{}: {}\".format(self.stock.symbol, df))\n return\n\n # DB doesn't like NaN\n df = df.where(pd.notnull(df), 0)\n\n mapping = {\n \"forward_pe\": \"ForwardPeRatio\",\n \"pb\": \"PbRatio\",\n \"pe\": \"PeRatio\",\n \"peg\": \"PegRatio\",\n \"ps\": \"PsRatio\",\n }\n\n # enumerate data frame\n for row in df.itertuples(index=False):\n i, created = ValuationRatio.objects.get_or_create(\n stock=self.stock, on=row.asOfDate.date()\n )\n\n for key, val in mapping.items():\n try:\n tmp = float(getattr(row, val))\n except AttributeError:\n tmp = 0\n\n # set value\n setattr(i, key, tmp)\n i.save()\n\n # if all values are 0, discard the record\n ValuationRatio.objects.filter(\n forward_pe=0, pb=0, pe=0, peg=0, ps=0\n ).delete()\n"
] | [
[
"pandas.notnull"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ss18/shapenet | [
"5a605bee6b2750f3a586ca9a740165e66b5dd7d8",
"5a605bee6b2750f3a586ca9a740165e66b5dd7d8"
] | [
"shapenet/networks/utils.py",
"shapenet/scripts/train_single_shapenet.py"
] | [
"# author: Justus Schock ([email protected])\n\nimport torch\n\n\nclass CustomGroupNorm(torch.nn.Module):\n \"\"\"\n Custom Group Norm which adds n_groups=2 as default parameter\n \"\"\"\n\n def __init__(self, n_features, n_groups=2):\n \"\"\"\n\n Parameters\n ----------\n n_features : int\n number of input features\n n_groups : int\n number of normalization groups\n \"\"\"\n super().__init__()\n self.norm = torch.nn.GroupNorm(n_groups, n_features)\n\n def forward(self, x):\n \"\"\"\n Forward batch through network\n\n Parameters\n ----------\n x : :class:`torch.Tensor`\n batch to forward\n\n Returns\n -------\n :class:`torch.Tensor`\n normalized results\n\n \"\"\"\n return self.norm(x)\n",
"# author: Justus Schock ([email protected])\n\n\ndef train_shapenet():\n \"\"\"\n Trains a single shapenet with config file from comandline arguments\n\n See Also\n --------\n :class:`delira.training.PyTorchNetworkTrainer`\n \n \"\"\"\n\n import logging\n import numpy as np\n import torch\n from shapedata.single_shape import SingleShapeDataset\n from delira.training import PyTorchNetworkTrainer\n from ..utils import Config\n from ..layer import HomogeneousShapeLayer\n from ..networks import SingleShapeNetwork\n from delira.logging import TrixiHandler\n from trixi.logger import PytorchVisdomLogger\n from delira.training.callbacks import ReduceLROnPlateauCallbackPyTorch\n from delira.data_loading import BaseDataManager, RandomSampler, \\\n SequentialSampler\n import os\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config\", type=str,\n help=\"Path to configuration file\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n args = parser.parse_args()\n config = Config()\n\n config_dict = config(os.path.abspath(args.config))\n\n shapes = np.load(os.path.abspath(config_dict[\"layer\"].pop(\"pca_path\"))\n )[\"shapes\"][:config_dict[\"layer\"].pop(\"num_shape_params\") + 1]\n\n# layer_cls = HomogeneousShapeLayer\n\n net = SingleShapeNetwork(\n HomogeneousShapeLayer, {\"shapes\": shapes,\n **config_dict[\"layer\"]},\n img_size=config_dict[\"data\"][\"img_size\"],\n **config_dict[\"network\"])\n\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n\n if args.verbose:\n print(\"Number of Parameters: %d\" % num_params)\n\n criterions = {\"L1\": torch.nn.L1Loss()}\n metrics = {\"MSE\": torch.nn.MSELoss()}\n\n mixed_prec = config_dict[\"training\"].pop(\"mixed_prec\", False)\n\n config_dict[\"training\"][\"save_path\"] = os.path.abspath(\n config_dict[\"training\"][\"save_path\"])\n\n trainer = PyTorchNetworkTrainer(\n net, criterions=criterions, metrics=metrics,\n lr_scheduler_cls=ReduceLROnPlateauCallbackPyTorch,\n lr_scheduler_params=config_dict[\"scheduler\"],\n optimizer_cls=torch.optim.Adam,\n optimizer_params=config_dict[\"optimizer\"],\n mixed_precision=mixed_prec,\n **config_dict[\"training\"])\n\n if args.verbose:\n print(trainer.input_device)\n\n print(\"Load Data\")\n dset_train = SingleShapeDataset(\n os.path.abspath(config_dict[\"data\"][\"train_path\"]),\n config_dict[\"data\"][\"img_size\"], config_dict[\"data\"][\"crop\"],\n config_dict[\"data\"][\"landmark_extension_train\"],\n cached=config_dict[\"data\"][\"cached\"],\n rotate=config_dict[\"data\"][\"rotate_train\"],\n random_offset=config_dict[\"data\"][\"offset_train\"]\n )\n\n if config_dict[\"data\"][\"test_path\"]:\n dset_val = SingleShapeDataset(\n os.path.abspath(config_dict[\"data\"][\"test_path\"]),\n config_dict[\"data\"][\"img_size\"], config_dict[\"data\"][\"crop\"],\n config_dict[\"data\"][\"landmark_extension_test\"],\n cached=config_dict[\"data\"][\"cached\"],\n rotate=config_dict[\"data\"][\"rotate_test\"],\n random_offset=config_dict[\"data\"][\"offset_test\"]\n )\n\n else:\n dset_val = None\n\n mgr_train = BaseDataManager(\n dset_train,\n batch_size=config_dict[\"data\"][\"batch_size\"],\n n_process_augmentation=config_dict[\"data\"][\"num_workers\"],\n transforms=None,\n sampler_cls=RandomSampler\n )\n mgr_val = BaseDataManager(\n dset_val,\n batch_size=config_dict[\"data\"][\"batch_size\"],\n n_process_augmentation=config_dict[\"data\"][\"num_workers\"],\n transforms=None,\n sampler_cls=SequentialSampler\n )\n\n if args.verbose:\n print(\"Data loaded\")\n if config_dict[\"logging\"].pop(\"enable\", False):\n logger_cls = PytorchVisdomLogger\n\n logging.basicConfig(level=logging.INFO,\n handlers=[\n TrixiHandler(\n logger_cls, **config_dict[\"logging\"])\n ])\n\n else:\n logging.basicConfig(level=logging.INFO,\n handlers=[logging.NullHandler()])\n\n logger = logging.getLogger(\"Test Logger\")\n logger.info(\"Start Training\")\n\n if args.verbose:\n print(\"Start Training\")\n\n trainer.train(config_dict[\"training\"][\"num_epochs\"], mgr_train, mgr_val,\n config_dict[\"training\"][\"val_score_key\"],\n val_score_mode='lowest')\n\n\nif __name__ == '__main__':\n train_shapenet()\n"
] | [
[
"torch.nn.GroupNorm"
],
[
"torch.nn.MSELoss",
"torch.nn.L1Loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DevD1092/Retinaface_DLIB | [
"455e393f1bd688cf2d1cc41960105af9ea8a26c6"
] | [
"test_widerface.py"
] | [
"from __future__ import print_function\nimport os\nimport sys\nimport argparse\nimport torch\nimport torch.backends.cudnn as cudnn\nimport numpy as np\nfrom data import cfg_mnet, cfg_re50\nfrom layers.functions.prior_box import PriorBox\nfrom utils.nms.py_cpu_nms import py_cpu_nms\nimport cv2\nfrom models.retinaface import RetinaFace\nfrom utils.box_utils import decode, decode_landm\nfrom utils.timer import Timer\n\n\nparser = argparse.ArgumentParser(description='Retinaface')\nparser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',\n type=str, help='Trained state_dict file path to open')\nparser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')\nparser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')\nparser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')\nparser.add_argument('--cpu', action=\"store_true\", default=False, help='Use cpu inference')\nparser.add_argument('--dataset_folder', default='./data/widerface/widerface/val/images/', type=str, help='dataset path')\nparser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')\nparser.add_argument('--top_k', default=5000, type=int, help='top_k')\nparser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')\nparser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')\nparser.add_argument('-s', '--save_image', action=\"store_true\", default=False, help='show detection results')\nparser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')\nargs = parser.parse_args()\n\n\ndef check_keys(model, pretrained_state_dict):\n ckpt_keys = set(pretrained_state_dict.keys())\n model_keys = set(model.state_dict().keys())\n used_pretrained_keys = model_keys & ckpt_keys\n unused_pretrained_keys = ckpt_keys - model_keys\n missing_keys = model_keys - ckpt_keys\n print('Missing keys:{}'.format(len(missing_keys)))\n print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))\n print('Used keys:{}'.format(len(used_pretrained_keys)))\n assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'\n return True\n\n\ndef remove_prefix(state_dict, prefix):\n ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''\n print('remove prefix \\'{}\\''.format(prefix))\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x\n return {f(key): value for key, value in state_dict.items()}\n\n\ndef load_model(model, pretrained_path, load_to_cpu):\n print('Loading pretrained model from {}'.format(pretrained_path))\n if load_to_cpu:\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)\n else:\n device = torch.cuda.current_device()\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))\n if \"state_dict\" in pretrained_dict.keys():\n pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')\n else:\n pretrained_dict = remove_prefix(pretrained_dict, 'module.')\n check_keys(model, pretrained_dict)\n model.load_state_dict(pretrained_dict, strict=False)\n return model\n\n\nif __name__ == '__main__':\n torch.set_grad_enabled(False)\n \n cfg = None\n if args.network == \"mobile0.25\":\n cfg = cfg_mnet\n elif args.network == \"resnet50\":\n cfg = cfg_re50\n # net and model\n net = RetinaFace(cfg=cfg, phase = 'test')\n net = load_model(net, args.trained_model, args.cpu)\n net.eval()\n print('Finished loading model!')\n print(net)\n cudnn.benchmark = True\n device = torch.device(\"cpu\" if args.cpu else \"cuda\")\n net = net.to(device)\n \n # testing dataset\n testset_folder = args.dataset_folder\n print (testset_folder)\n testset_list = args.dataset_folder + \"test_list.txt\"\n test_dataset = []\n #print (testset_list)\n\n with open(testset_list, 'r') as fr:\n content = fr.readlines()\n test_dataset = [line.strip() for line in content]\n num_images = len(test_dataset)\n print (num_images)\n\n _t = {'forward_pass': Timer(), 'misc': Timer()}\n\n # testing begin\n for i, img_name in enumerate(test_dataset):\n image_path = testset_folder + img_name\n print (image_path)\n img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)\n img = np.float32(img_raw)\n\n # testing scale\n target_size = 1600\n max_size = 2150\n im_shape = img.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n resize = float(target_size) / float(im_size_min)\n # prevent bigger axis from being more than max_size:\n if np.round(resize * im_size_max) > max_size:\n resize = float(max_size) / float(im_size_max)\n if args.origin_size:\n resize = 1\n\n if resize != 1:\n img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)\n im_height, im_width, _ = img.shape\n scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])\n img -= (104, 117, 123)\n img = img.transpose(2, 0, 1)\n img = torch.from_numpy(img).unsqueeze(0)\n img = img.to(device)\n scale = scale.to(device)\n\n _t['forward_pass'].tic()\n loc, conf, landms = net(img) # forward pass\n _t['forward_pass'].toc()\n _t['misc'].tic()\n priorbox = PriorBox(cfg, image_size=(im_height, im_width))\n priors = priorbox.forward()\n priors = priors.to(device)\n prior_data = priors.data\n boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])\n boxes = boxes * scale / resize\n boxes = boxes.cpu().numpy()\n scores = conf.squeeze(0).data.cpu().numpy()[:, 1]\n landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])\n scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],\n img.shape[3], img.shape[2], img.shape[3], img.shape[2],\n img.shape[3], img.shape[2]])\n scale1 = scale1.to(device)\n landms = landms * scale1 / resize\n landms = landms.cpu().numpy()\n\n # ignore low scores\n inds = np.where(scores > args.confidence_threshold)[0]\n boxes = boxes[inds]\n landms = landms[inds]\n scores = scores[inds]\n\n # keep top-K before NMS\n order = scores.argsort()[::-1]\n # order = scores.argsort()[::-1][:args.top_k]\n boxes = boxes[order]\n landms = landms[order]\n scores = scores[order]\n\n # do NMS\n dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)\n keep = py_cpu_nms(dets, args.nms_threshold)\n # keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)\n dets = dets[keep, :]\n landms = landms[keep]\n\n # keep top-K faster NMS\n # dets = dets[:args.keep_top_k, :]\n # landms = landms[:args.keep_top_k, :]\n\n dets = np.concatenate((dets, landms), axis=1)\n _t['misc'].toc()\n\n # --------------------------------------------------------------------\n save_name = args.save_folder + img_name[:-4] + \".txt\"\n dirname = os.path.dirname(save_name)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(save_name, \"w\") as fd:\n bboxs = dets\n file_name = os.path.basename(save_name)[:-4] + \"\\n\"\n bboxs_num = str(len(bboxs)) + \"\\n\"\n fd.write(file_name)\n fd.write(bboxs_num)\n for box in bboxs:\n x = int(box[0])\n y = int(box[1])\n w = int(box[2]) - int(box[0])\n h = int(box[3]) - int(box[1])\n confidence = str(box[4])\n line = str(x) + \" \" + str(y) + \" \" + str(w) + \" \" + str(h) + \" \" + confidence + \" \\n\"\n fd.write(line)\n\n print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(i + 1, num_images, _t['forward_pass'].average_time, _t['misc'].average_time))\n\n # save image\n if args.save_image:\n for b in dets:\n if b[4] < args.vis_thres:\n continue\n text = \"{:.4f}\".format(b[4])\n b = list(map(int, b))\n cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)\n cx = b[0]\n cy = b[1] + 12\n cv2.putText(img_raw, text, (cx, cy),\n cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))\n\n # landms\n cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)\n cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)\n cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)\n cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)\n cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)\n # save image\n if not os.path.exists(\"./results_handtask/\"):\n os.makedirs(\"./results_handtask/\")\n name = \"./results_handtask/%05d.jpg\" % i\n cv2.imwrite(name, img_raw)\n\n"
] | [
[
"numpy.hstack",
"torch.Tensor",
"torch.cuda.current_device",
"torch.load",
"numpy.min",
"torch.from_numpy",
"numpy.concatenate",
"numpy.max",
"numpy.round",
"torch.set_grad_enabled",
"numpy.float32",
"torch.device",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bluetyson/archai | [
"50f70ccccf536466cc0370c8a63401e05dec33fd"
] | [
"archai/datasets/providers/svhn_provider.py"
] | [
"# Copyright (c) Microsoft Corporation.\r\n# Licensed under the MIT license.\r\n\r\nfrom typing import List, Tuple, Union, Optional\r\n\r\nfrom overrides import overrides, EnforceOverrides\r\nfrom torch.utils.data.dataset import Dataset\r\n\r\nimport torchvision\r\nfrom torchvision.transforms import transforms\r\nfrom torch.utils.data import ConcatDataset\r\n\r\nfrom archai.datasets.dataset_provider import DatasetProvider, register_dataset_provider, TrainTestDatasets\r\nfrom archai.common.config import Config\r\nfrom archai.common import utils\r\n\r\n\r\nclass SvhnProvider(DatasetProvider):\r\n def __init__(self, conf_dataset:Config):\r\n super().__init__(conf_dataset)\r\n self._dataroot = utils.full_path(conf_dataset['dataroot'])\r\n\r\n @overrides\r\n def get_datasets(self, load_train:bool, load_test:bool,\r\n transform_train, transform_test)->TrainTestDatasets:\r\n trainset, testset = None, None\r\n\r\n if load_train:\r\n trainset = torchvision.datasets.SVHN(root=self._dataroot, split='train',\r\n download=True, transform=transform_train)\r\n extraset = torchvision.datasets.SVHN(root=self._dataroot, split='extra',\r\n download=True, transform=transform_train)\r\n trainset = ConcatDataset([trainset, extraset])\r\n if load_test:\r\n testset = torchvision.datasets.SVHN(root=self._dataroot, split='test',\r\n download=True, transform=transform_test)\r\n\r\n return trainset, testset\r\n\r\n @overrides\r\n def get_transforms(self)->tuple:\r\n MEAN = [0.4914, 0.4822, 0.4465]\r\n STD = [0.2023, 0.1994, 0.20100]\r\n transf = [\r\n transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip()\r\n ]\r\n\r\n normalize = [\r\n transforms.ToTensor(),\r\n transforms.Normalize(MEAN, STD)\r\n ]\r\n\r\n train_transform = transforms.Compose(transf + normalize)\r\n test_transform = transforms.Compose(normalize)\r\n\r\n return train_transform, test_transform\r\n\r\nregister_dataset_provider('svhn', SvhnProvider)"
] | [
[
"torch.utils.data.ConcatDataset"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shinh/dldt | [
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f"
] | [
"model-optimizer/extensions/middle/Reduce_test.py",
"model-optimizer/mo/front/caffe/extractors/inner_product_test.py",
"model-optimizer/mo/middle/passes/infer_test.py",
"tools/accuracy_checker/tests/test_segmentation_metrics.py",
"model-optimizer/mo/front/common/partial_infer/split.py",
"model-optimizer/mo/front/common/partial_infer/squeeze.py",
"model-optimizer/extensions/front/onnx/reduce_sum_ext.py",
"model-optimizer/mo/middle/passes/fusing/fuse_linear_seq.py",
"model-optimizer/extensions/middle/NormalizeFullyConnected_test.py",
"model-optimizer/extensions/middle/GemmToFullyConnected.py"
] | [
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.middle.Reduce import ReduceReplacer\nfrom mo.middle.passes.eliminate_test import build_graph\nfrom mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs\n\n# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the\n# dictionary with node attributes.\nnodes_attributes = {\n # Placeholder layers\n 'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},\n 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n\n # Reshape layers\n 'reduce_1': {'type': 'Reduce', 'kind': 'op', 'op': 'Reduce'},\n 'reduce_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n # Reshape layers\n 'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},\n 'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n 'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},\n 'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n # Pooling\n 'pooling': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling'},\n 'pooling_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n # Power\n 'power': {'type': 'Power', 'kind': 'op', 'op': 'Power'},\n 'power_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n # Concat\n 'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},\n}\n\n\nclass ReduceReplacerTest(unittest.TestCase):\n def test1(self):\n # Original graph\n # data(1,64,1)-->Reduce(axis=1,keep_dims=True)-->data(1,1,1)\n #\n # Reference graph\n # data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 64, 1])},\n 'reduce_1': {'axis': np.array([1]), 'keep_dims': True, 'reduce_type': 'Mean'},\n 'reduce_1_data': {'shape': np.array([1, 1, 1])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 64, 1])},\n 'reshape_1': {'dim': np.array([1, 1, 64, 1])},\n 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},\n 'pooling': {'window': np.array([1, 1, 64, 1])},\n 'pooling_data': {'shape': np.array([1, 1, 1, 1])},\n 'reshape_2': {'dim': np.array([1, 1, 1])},\n 'reshape_2_data': {'shape': np.array([1, 1, 1])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test2(self):\n # Original graph\n # data(1,3,64,64)-->Reduce(axis=2,keep_dims=True)-->data(1,3,1,64)\n #\n # Reference graph\n # data(1,3,64,64)->Reshape->Pool(1,3,1,64)->Reshape(1,3,1,64)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'reduce_1': {'axis': np.array([2]), 'keep_dims': True, 'reduce_type': 'Mean'},\n 'reduce_1_data': {'shape': np.array([1, 3, 1, 64])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'reshape_1': {'dim': np.array([1, 3, 64, 64])},\n 'reshape_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'pooling': {'window': np.array([1, 1, 64, 1])},\n 'pooling_data': {'shape': np.array([1, 3, 1, 64])},\n 'reshape_2': {'dim': np.array([1, 3, 1, 64])},\n 'reshape_2_data': {'shape': np.array([1, 3, 1, 64])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test3(self):\n # Original graph\n # data(1,3,64,64)-->Reduce(axis=[2,3],keep_dims=True)-->data(1,3,1,1)\n #\n # Reference graph\n # data(1,3,64,64)->Reshape->Pool(1,3,1,1)->Reshape(1,3,1,1)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'reduce_1': {'axis': np.array([2, 3]), 'keep_dims': True, 'reduce_type': 'Mean'},\n 'reduce_1_data': {'shape': np.array([1, 3, 1, 1])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'reshape_1': {'dim': np.array([1, 3, 64 * 64, 1])},\n 'reshape_1_data': {'shape': np.array([1, 3, 64 * 64, 1])},\n 'pooling': {'window': np.array([1, 1, 64 * 64, 1])},\n 'pooling_data': {'shape': np.array([1, 3, 1, 1])},\n 'reshape_2': {'dim': np.array([1, 3, 1, 1])},\n 'reshape_2_data': {'shape': np.array([1, 3, 1, 1])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test4(self):\n # Original graph\n # data(2,3,64,64)-->Reduce(axis=[1,2,3],keep_dims=False)-->data(2)\n #\n # Reference graph\n # data(2,3,64,64)->Reshape(2,1,3*64*64,1)->Pool(2,1,1,1)->Reshape(2)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},\n 'reduce_1': {'axis': np.array([1, 2, 3]), 'keep_dims': False, 'reduce_type': 'Mean'},\n 'reduce_1_data': {'shape': np.array([2])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},\n 'reshape_1': {'dim': np.array([2, 1, 3 * 64 * 64, 1])},\n 'reshape_1_data': {'shape': np.array([2, 1, 3 * 64 * 64, 1])},\n 'pooling': {'window': np.array([1, 1, 3 * 64 * 64, 1])},\n 'pooling_data': {'shape': np.array([2, 1, 1, 1])},\n 'reshape_2': {'dim': np.array([2])},\n 'reshape_2_data': {'shape': np.array([2])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test5(self):\n # Original graph\n # data(1, 16, 64, 64, 64, 4)-->Reduce(axis=[5],keep_dims=False)-->data(1, 16, 64, 64, 64)\n #\n # Reference graph\n # data(1, 16, 64, 64, 64, 4)->Reshape(1*16*64*64, 64, 4, 1)->Pool(1, 1, 4, 1)->Reshape(1, 16, 64, 64, 64)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},\n 'reduce_1': {'axis': np.array([5]), 'keep_dims': False, 'reduce_type': 'max'},\n 'reduce_1_data': {'shape': np.array([1, 16, 64, 64, 64])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},\n 'reshape_1': {'dim': np.array([65536, 64, 4, 1])},\n 'reshape_1_data': {'shape': np.array([65536, 64, 4, 1])},\n 'pooling': {'window': np.array([1, 1, 4, 1])},\n 'pooling_data': {'shape': np.array([65536, 64, 1, 1])},\n 'reshape_2': {'dim': np.array([1, 16, 64, 64, 64])},\n 'reshape_2_data': {'shape': np.array([1, 16, 64, 64, 64])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test6(self):\n # Original graph\n # data(1,64,1)-->Reduce(axis=-2,keep_dims=True, reduce_type=Sum)-->data(1,1,1)\n #\n # Reference graph\n # data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)->Power(scale=64)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 64, 1])},\n 'reduce_1': {'axis': np.array([-2]), 'keep_dims': True, 'reduce_type': 'Sum'},\n 'reduce_1_data': {'shape': np.array([1, 1, 1])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'power'),\n ('power', 'power_data'),\n ('power_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 64, 1])},\n 'reshape_1': {'dim': np.array([1, 1, 64, 1])},\n 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},\n 'pooling': {'window': np.array([1, 1, 64, 1])},\n 'pooling_data': {'shape': np.array([1, 1, 1, 1])},\n 'reshape_2': {'dim': np.array([1, 1, 1])},\n 'reshape_2_data': {'shape': np.array([1, 1, 1])},\n 'power': {'scale': 64.0},\n 'power_data': {'shape': np.array([1, 1, 1])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n",
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom mo.front.caffe.extractors.inner_product import inner_product_ext\nfrom mo.front.common.partial_infer.inner_product import caffe_inner_product\nfrom mo.utils.unittest.extractors import FakeMultiParam, FakeModelLayer\n\n\nclass FakeProtoLayer:\n def __init__(self, val):\n self.inner_product_param = val\n\n\nclass TestInnerProduct(unittest.TestCase):\n def test_inner_product_ext(self):\n params = {\n 'num_output': 10,\n 'bias_term': True\n }\n mean_blob = np.array([1., 2.])\n variance_blob = np.array([3., 4.])\n blobs = [mean_blob, variance_blob]\n res = inner_product_ext(FakeProtoLayer(FakeMultiParam(params)),\n FakeModelLayer(blobs))\n exp_res = {\n 'type': 'FullyConnected',\n 'out-size': 10,\n 'infer': caffe_inner_product,\n 'weights': mean_blob,\n 'biases': variance_blob,\n 'embedded_inputs': [\n (1, 'weights', {\n 'bin': 'weights'\n }),\n (2, 'biases', {\n 'bin': 'biases'\n })\n ]\n }\n for i in exp_res:\n if i in ('weights', 'biases'):\n np.testing.assert_array_equal(res[i], exp_res[i])\n else:\n self.assertEqual(res[i], exp_res[i])\n",
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom mo.front.common.partial_infer.concat import concat_infer\nfrom mo.graph.graph import Node\nfrom mo.middle.passes.infer import override_placeholder_shapes, partial_infer\nfrom mo.utils.error import Error\nfrom mo.utils.unittest.graph import build_graph\n\nnodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'node_1_data': {'value': None, 'kind': 'data', 'data_type': None},\n 'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'concat': {'type': 'Concat', 'value': None, 'kind': 'op'},\n 'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'node_3_data': {'value': None, 'kind': 'data', 'data_type': None},\n # Placeholders\n 'placeholder_1': {'shape': None, 'type': 'Input', 'kind': 'op', 'op': 'Placeholder'},\n 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_2': {'shape': None, 'type': 'Input', 'kind': 'op', 'op': 'Placeholder'},\n 'pl_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},\n 'pl_1_data': {'value': None, 'kind': 'data', 'data_type': None},\n 'pl_2': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},\n 'pl_2_data': {'value': None, 'kind': 'data', 'data_type': None},\n 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n # ScaleShift layer\n 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'},\n 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},\n 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Mul op\n 'mul_1': {'type': None, 'kind': 'op', 'op': 'Mul'},\n 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'op_output': { 'kind': 'op', 'op': 'OpOutput', 'infer': lambda x: None}\n }\n\n\nclass TestInferPass(unittest.TestCase):\n def test_override_placeholder_shapes(self):\n \"\"\"\n Test for overriding shape in placeholder by shape from user_shapes.\n \"\"\"\n graph = build_graph(nodes_attributes,\n [('node_1', 'node_2'),\n ('node_2', 'op_output')\n ],\n {'node_2': {'shape': None},\n 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}\n },\n nodes_with_edges_only=True)\n\n ph_shape = np.array([1, 3, 224, 224])\n user_dict = {'node_1': [{'shape': ph_shape}]}\n override_placeholder_shapes(graph, user_dict)\n res_shape = graph.node['node_1']['shape']\n self.assertTrue(np.array_equal(ph_shape, res_shape))\n\n def test_override_placeholder_no_shape(self):\n \"\"\"\n Test for case when user_shapes is not defined.\n \"\"\"\n graph = build_graph(nodes_attributes,\n [('node_1', 'node_2'),\n ('node_2', 'op_output')\n ],\n {'node_2': {'shape': None, 'op': 'Placeholder'},\n 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}\n },\n nodes_with_edges_only=True)\n out = override_placeholder_shapes(graph, None)\n res_shape = graph.node['node_1']['shape']\n placeholder_shape = np.array([1, 3, 227, 227])\n self.assertIsNone(out)\n self.assertTrue(np.array_equal(placeholder_shape, res_shape))\n\n def test_override_placeholder_shapes(self):\n \"\"\"\n Test for case when user_shapes is not None, but it shouldn't rewrite shapes.\n \"\"\"\n graph = build_graph(nodes_attributes,\n [('node_1', 'node_2'),\n ('node_2', 'op_output')\n ],\n {'node_2': {'shape': None},\n 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}\n },\n nodes_with_edges_only=True)\n\n node_1_shape = np.array([1, 3, 227, 227])\n user_dict = {'some_node': [{'shape': np.zeros((3))}]}\n override_placeholder_shapes(graph, user_dict)\n res_shape = graph.node['node_1']['shape']\n self.assertTrue(np.array_equal(node_1_shape, res_shape))\n\n def test_override_placeholder_shapes_dict(self):\n graph = build_graph(nodes_attributes,\n [('node_1', 'node_2'),\n ('node_2', 'op_output')\n ],\n {'node_2': {'shape': None, 'op': 'Placeholder'},\n 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}\n },\n nodes_with_edges_only=True)\n\n placeholder_shape = np.array([1, 3, 224, 224])\n user_shapes = {\n 'node_1': [{'shape': placeholder_shape}],\n 'node_2': [{'shape': placeholder_shape}],\n }\n override_placeholder_shapes(graph, user_shapes)\n res_shape = graph.node['node_1']['shape']\n res_shape2 = graph.node['node_2']['shape']\n self.assertTrue(np.array_equal(placeholder_shape, res_shape))\n self.assertTrue(np.array_equal(placeholder_shape, res_shape2))\n\n nodes = {\n 'placeholder_1': {'name': 'placeholder_1', 'shape': [1, 2, 3, 4], 'type': 'Placeholder', 'value': None,\n 'kind': 'op', 'op': 'Placeholder'},\n 'placeholder_2': {'name': 'placeholder_2', 'shape': [5, 6, 7, 8], 'type': 'Placeholder', 'value': None,\n 'kind': 'op', 'op': 'Placeholder'},\n '1': {'name': 'node_1', 'type': 'Identity', 'value': None, 'kind': 'op'},\n '2': {'name': 'node_2', 'type': 'Identity', 'value': None, 'kind': 'op'},\n '3': {'name': 'concat', 'type': 'Identity', 'value': None, 'kind': 'op'},\n '4': {'name': 'output', 'type': 'SoftMax', 'value': None, 'kind': 'op'}\n }\n edges = [\n ('placeholder_1', '1'),\n ('1', '3'),\n ('placeholder_2', '2'),\n ('2', '3'),\n ('3', '4')\n ]\n\n def test_override_placeholder_shapes_batch_is_not_set(self):\n \"\"\"\n Test case when batch is not set. (shapes shouldn't change)\n \"\"\"\n graph = build_graph(self.nodes, self.edges)\n shapes = {}\n batch = None\n override_placeholder_shapes(graph, shapes, batch)\n res_shape_1 = graph.node['placeholder_1']['shape']\n res_shape_2 = graph.node['placeholder_2']['shape']\n self.assertTrue(np.array_equal(self.nodes['placeholder_1']['shape'], res_shape_1))\n self.assertTrue(np.array_equal(self.nodes['placeholder_2']['shape'], res_shape_2))\n\n def test_override_placeholder_shapes_real_inputs_and_batch(self):\n \"\"\"\n Test case when batch is set and shapes should overwrite by user shapes.\n \"\"\"\n graph = build_graph(self.nodes, self.edges)\n shapes = {'placeholder_1': [{'shape': np.array([1, 2, 3, 4])}],\n 'placeholder_2': [{'shape': np.array([1, 5, 6, 7])}]}\n batch = 4\n override_placeholder_shapes(graph, shapes, batch)\n res_shape_1 = graph.node['placeholder_1']['shape']\n res_shape_2 = graph.node['placeholder_2']['shape']\n self.assertTrue(np.array_equal(res_shape_1, np.array([4, 2, 3, 4])))\n self.assertTrue(np.array_equal(res_shape_2, np.array([4, 5, 6, 7])))\n\n def test_override_placeholder_shapes_real_inputs_and_batch_2(self):\n \"\"\"\n Test case when batch is set, but shapes in user_shapes is None.\n \"\"\"\n graph = build_graph(self.nodes, self.edges)\n shapes = {'placeholder_1': [{'shape': None}], 'placeholder_2': [{'shape': None}]}\n batch = 4\n graph.node['placeholder_2']['shape'] = np.array([1, 2, 3, 4])\n graph.node['placeholder_2']['shape'] = np.array([1, 5, 6, 7])\n override_placeholder_shapes(graph, shapes, batch)\n np.testing.assert_array_equal(graph.node['placeholder_1']['shape'], np.array([4, 2, 3, 4]))\n np.testing.assert_array_equal(graph.node['placeholder_2']['shape'], np.array([4, 5, 6, 7]))\n\n def test_partial_infer(self):\n graph = build_graph(nodes_attributes,\n [('node_1', 'concat'),\n ('node_2', 'concat'),\n ('concat', 'node_3'),\n ('node_3', 'op_output')\n ],\n {'node_3': {'kind': 'data', 'shape': None, 'infer': None},\n 'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},\n 'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},\n 'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer}\n },\n nodes_with_edges_only=True)\n\n start_node = 'concat'\n partial_infer(graph, start_node)\n node = Node(graph, start_node)\n self.assertTrue(node.is_partial_inferred)\n self.assertTrue(node.out_node().is_partial_inferred)\n\n # check if previous nodes are not inferred\n node = Node(graph, start_node)\n while True:\n # collect nodes in a list\n if isinstance(node.in_nodes(), list):\n in_nodes = node.in_nodes()\n else:\n in_nodes = [y for x, y in node.in_nodes().items()]\n\n # check parents and find next parent\n for n in in_nodes:\n if 'embedded_input_' not in n.id:\n node = n\n self.assertFalse(n.has('is_partial_inferred'))\n\n if not len(in_nodes):\n break\n\n def test_partial_infer_no_shape(self):\n graph = build_graph(nodes_attributes,\n [('node_1', 'node_2'),\n ('node_2', 'op_output')\n ],\n {'node_2': {'shape': None, 'infer': None},\n 'node_1': {'shape': None, 'infer': None}\n },\n nodes_with_edges_only=True)\n self.assertRaises(Error, partial_infer, graph, 'node_1')\n\n def test_partial_infer_cycle(self):\n graph = build_graph(nodes_attributes,\n [('node_1', 'concat'),\n ('node_2', 'concat'),\n ('concat', 'node_3'),\n ('node_3', 'concat'),\n ('node_3', 'op_output')\n ],\n {'node_3': {'kind': 'data', 'shape': None, 'infer': None},\n 'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},\n 'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},\n 'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer}\n },\n nodes_with_edges_only=True)\n\n start_node = 'concat'\n self.assertRaises(Error, partial_infer, graph, start_node)\n\n\nclass CycleTest(unittest.TestCase):\n def test_is_not_fully_inferred_param(self):\n # Node that have is_not_fully_inferred=True\n graph = build_graph(nodes_attributes,\n [('node_1', 'concat'),\n ('node_2', 'concat'),\n ('concat', 'node_3'),\n ('node_3', 'op_output')\n ],\n {'node_3': {'kind': 'data', 'shape': None, 'infer': None},\n 'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},\n 'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},\n 'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer, 'is_not_fully_inferred': True}\n },\n nodes_with_edges_only=True)\n\n start_node = 'concat'\n try:\n partial_infer(graph, start_node)\n except Error:\n self.fail(\"Unexpected Error raised\")\n node = Node(graph, start_node)\n self.assertTrue(node.is_partial_inferred)\n self.assertTrue(node.out_node().is_partial_inferred)\n\n def test_for_is_cyclic1(self):\n # Test for case of cyclic graph without is_cyclic attrs\n graph = build_graph(nodes_attributes,\n [('node_1', 'node_1_data'),\n ('node_1_data', 'node_3'),\n ('node_3', 'node_3_data'),\n ('node_3_data', 'node_1')],\n nodes_with_edges_only=True)\n with self.assertRaisesRegex(Error, 'Graph contains a cycle. Can not proceed.*'):\n partial_infer(graph)\n",
"\"\"\"\nCopyright (c) 2019 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport pytest\nimport numpy as np\nfrom accuracy_checker.metrics import MetricsExecutor\nfrom accuracy_checker.presenters import EvaluationResult\nfrom .common import single_class_dataset, multi_class_dataset, make_segmentation_representation\n\n\ndef create_config(metric_name, use_argmax=False):\n return {'annotation': 'mocked', 'metrics': [{'type': metric_name, 'use_argmax': use_argmax}]}\n\n\ndef generate_expected_result(values, metric_name, labels=None):\n meta = {'names': list(labels.values())} if labels else {}\n\n return EvaluationResult(pytest.approx(values), None, metric_name, None, meta)\n\n\nclass TestPixelAccuracy:\n name = 'segmentation_accuracy'\n\n def test_one_class(self):\n annotations = make_segmentation_representation(np.array([[0, 0], [0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[0, 0], [0, 0]]), False)\n dispatcher = MetricsExecutor(create_config(self.name), single_class_dataset())\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result(1.0, self.name)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n def test_multi_class_not_matched(self):\n annotations = make_segmentation_representation(np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]), False)\n dispatcher = MetricsExecutor(create_config(self.name), multi_class_dataset())\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result(0.0, self.name)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n def test_multi_class(self):\n annotations = make_segmentation_representation(np.array([[1, 0, 3, 0, 0], [0, 0, 0, 0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[1, 2, 3, 2, 3], [0, 0, 0, 0, 0]]), False)\n dispatcher = MetricsExecutor(create_config(self.name), multi_class_dataset())\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result((5.0+1.0+1.0)/(8.0+1.0+1.0), self.name)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n\nclass TestMeanAccuracy:\n name = 'mean_accuracy'\n\n def test_one_class(self):\n annotations = make_segmentation_representation(np.array([[0, 0], [0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[0, 0], [0, 0]]), False)\n dataset = single_class_dataset()\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result([1.0, 0.0], self.name, dataset.labels)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n def test_multi_class_not_matched(self):\n annotations = make_segmentation_representation(np.array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]), True)\n predictions = make_segmentation_representation(np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]), False)\n dataset = multi_class_dataset()\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result([0.0, 0.0, 0.0, 0.0], self.name, dataset.labels)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n def test_multi_class(self):\n dataset = multi_class_dataset()\n annotations = make_segmentation_representation(np.array([[1, 2, 3, 2, 3], [0, 0, 0, 0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[1, 0, 3, 0, 0], [0, 0, 0, 0, 0]]), False)\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result([1.0, 1.0, 0.0, 0.5], self.name, dataset.labels)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n\nclass TestMeanIOU:\n name = 'mean_iou'\n\n def test_one_class(self):\n annotations = make_segmentation_representation(np.array([[0, 0], [0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[0, 0], [0, 0]]), False)\n dataset = single_class_dataset()\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result([1.0, 0.0], self.name, dataset.labels)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n def test_multi_class_not_matched(self):\n annotations = make_segmentation_representation(np.array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]), True)\n predictions = make_segmentation_representation(np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]), False)\n dataset = multi_class_dataset()\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result([0.0, 0.0, 0.0, 0.0], self.name, dataset.labels)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n def test_multi_class(self):\n dataset = multi_class_dataset()\n annotations = make_segmentation_representation(np.array([[1, 2, 3, 2, 3], [0, 0, 0, 0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[1, 0, 3, 0, 0], [0, 0, 0, 0, 0]]), False)\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result([0.625, 1.0, 0.0, 0.5], self.name, dataset.labels)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n\nclass TestSegmentationFWAcc:\n name = 'frequency_weighted_accuracy'\n\n def test_one_class(self):\n annotations = make_segmentation_representation(np.array([[0, 0], [0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[0, 0], [0, 0]]), False)\n dataset = single_class_dataset()\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result(1.0, self.name)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n def test_multi_class_not_matched(self):\n annotations = make_segmentation_representation(np.array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]), True)\n predictions = make_segmentation_representation(np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]), False)\n dataset = multi_class_dataset()\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result(0.0, self.name)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n\n def test_multi_class(self):\n dataset = multi_class_dataset()\n annotations = make_segmentation_representation(np.array([[1, 2, 3, 2, 3], [0, 0, 0, 0, 0]]), True)\n predictions = make_segmentation_representation(np.array([[1, 0, 3, 0, 0], [0, 0, 0, 0, 0]]), False)\n dispatcher = MetricsExecutor(create_config(self.name), dataset)\n dispatcher.update_metrics_on_batch(annotations, predictions)\n expected = generate_expected_result(0.5125, self.name)\n for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):\n assert evaluation_result == expected\n",
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\n\nimport numpy as np\n\nfrom mo.ops.op import PermuteAttrs\nfrom mo.graph.graph import Node\n\n\ndef part_sizes_to_indices(part_sizes: list):\n \"\"\"\n Calculates indices of splits in the array based on part sizes for the split.\n Output list can be used as the second argument for np.split function.\n \"\"\"\n idx = 0\n indices = []\n for part_size in part_sizes:\n idx += part_size\n indices.append(idx)\n # the last element should equal to the size of original array and it is redundant to numpy\n log.debug(\"part_sizes: {} --> indices: {}\".format(part_sizes, indices))\n del indices[-1]\n log.debug(\"part_sizes: {} --> indices: {}\".format(part_sizes, indices))\n return np.array(indices)\n\n\ndef split(input_data_node: Node, node: Node, axis: int, part_sizes: list):\n \"\"\"\n Partial inference of generic split node.\n\n Args:\n @input: input tensor node, subject to split\n @node: node of one of the Split types\n @axis: split dimension index\n @part_sizes: a NumPy array with sizes of all pieces that we split to\n\n Returns:\n int: normalized axis index\n\n \"\"\"\n\n if input_data_node.shape is None:\n return\n\n # normalize axis\n if axis < 0:\n axis = input_data_node.shape.size + axis\n\n if axis < 0 or axis >= input_data_node.shape.size:\n log.error('Model is incorrect: axis for split node is out of range')\n return\n\n undef_indices = np.argwhere(part_sizes == -1)\n if undef_indices.size > 1:\n log.error('Desired split part sizes have more than one -1 element -- cannot deduce real sizes for them')\n return\n\n if undef_indices.size == 1:\n undef_index = undef_indices[0]\n part_sizes[undef_index] = 0\n deduced_dim = input_data_node.shape[axis] - np.add.reduce(part_sizes)\n if deduced_dim < 0:\n log.error('Just deduced dimension for the split has negative value that means that split input shape and '\n 'desired parts are not compatible')\n return\n\n all_parts_size = np.add.reduce(part_sizes)\n if all_parts_size != input_data_node.shape[axis]:\n log.error(\"input.shape[{}] = {} != {} = sum of all parts in part_sizes\".format(axis,\n input_data_node.shape[axis],\n all_parts_size))\n return\n\n splitted = None\n if input_data_node.value is not None:\n splitted = np.split(input_data_node.value, part_sizes_to_indices(part_sizes), axis)\n\n # not all outputs from the split could be used so it is necessary to iterate over output edges and infer shape for\n # necessary nodes only\n for _, dst, edge_attrs in node.graph.out_edges(node.id, data=True):\n out_port = edge_attrs['out']\n out_node = node.out_node(out_port)\n\n new_out_shape = input_data_node.shape.copy()\n new_out_shape[axis] = part_sizes[out_port]\n node.out_node(out_port).shape = new_out_shape\n if splitted is not None:\n out_node.value = splitted[out_port]\n assert all(out_node.value.shape == out_node.shape)\n\n assert not node.has_valid('axis') or node.axis == axis\n node.axis = axis\n # WARNING: != 4 is supposed to work for NHWC to NCHW translation only.\n # if other global permutations happen this will fail\n # TODO: redesign it to have this logic built in NHWC to NCHW translation pass; it requires\n # additional attributes with layout to be propagated through the network\n if len(input_data_node.shape) != 4 and node.has_valid('dim_attrs') and 'axis' in node.dim_attrs:\n log.warning('Removed \"axis\" attribute from the scope of the model relayout pass because len(input.shape) == {} '\n '!= 4 for node {}'.format(len(input_data_node.shape), node.soft_get('name')))\n node.dim_attrs.remove('axis')\n assert 'axis' not in node.dim_attrs\n log.debug('output shapes after split: {}'.format([v.shape for k, v in node.out_nodes().items()]))\n\n\ndef tf_split_infer(node):\n \"\"\"\n Partial infer of split node similar to Split op of TF.\n \"\"\"\n # Two inputs: [split_dim, input]\n assert len(node.in_nodes()) == 2, 'Node \"{}\" must have exactly two inputs'.format(node.soft_get('name'))\n split_dim = node.in_node(0).value\n if split_dim is None:\n log.error('split_dim value for node {} is None. Cannot do shape inference.')\n return\n\n assert split_dim.ndim == 0, 'The split dimension for node \"{}\" must be a scalar.'.format(node.soft_get('name'))\n split_dim = split_dim.item()\n input = node.in_node(1)\n\n if input.shape is None:\n log.error('Input shape for node {} is not defined'.format(node.soft_get('name')))\n return\n\n log.debug('input shape for split: {}, should be split along {} dim'.format(input.shape, split_dim))\n split_dim_size = input.shape[split_dim]\n log.debug('split_dim_size type = {}'.format(type(split_dim_size)))\n\n if split_dim_size % node.num_split != 0:\n log.error(\"split_dim cannot be evenly divided by a given number of parts\")\n return\n\n # split_dim is a numpy array, axis is split_dim[0]\n log.debug('split_dim_size = {}, node.num_split = {}, div = {}, typeof div = {}'.format(\n split_dim_size, node.num_split, split_dim_size / node.num_split, type(split_dim_size / node.num_split)))\n split(input, node, split_dim, [int(split_dim_size / node.num_split)] * node.num_split)\n node.graph.remove_edge(node.in_node(0).id, node.id)\n node['input_port'] = 1\n\n PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:1')])\n\n\ndef tf_split_v_infer(node: Node):\n \"\"\"\n Partial infer of split node similar to SplitV op of TF.\n \"\"\"\n\n if len(node.in_nodes()) == 1 and not (node.has_valid('axis') and node.has_valid('size_splits')):\n return\n\n if len(node.in_nodes()) == 3 and (node.has_valid('axis') or node.has_valid('size_splits')):\n return\n\n # Three inputs: [input, size_splits, split_dim)\n if len(node.in_nodes()) == 3:\n split_dim = node.in_node(2).value\n assert split_dim.ndim == 0\n split_dim = split_dim.item()\n size_splits = node.in_node(1).value\n node.graph.remove_edge(node.in_node(1).id, node.id)\n node.graph.remove_edge(node.in_node(2).id, node.id)\n else:\n split_dim = node.axis\n size_splits = node.size_splits\n \n if split_dim is None:\n log.error('split_dim value for node {} is None. Cannot do shape inference.')\n return\n \n input = node.in_node(0)\n if input.shape is None or size_splits is None:\n log.error('input shape or size of splits are not defined for node {}'.format(node.soft_get('name')))\n return\n\n log.debug('split_dim = {}, input.shape = {}, size_splits.value = {}'.format(split_dim, input.shape, size_splits))\n\n # split_dim is a numpy array, axis is split_dim\n split(input, node, split_dim, size_splits)\n\n PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])\n\n\ndef tf_unpack_infer(node: Node):\n if len(node.in_nodes()) != 1:\n log.debug('Unpack node \"{}\" must have one input.'.format(node.name))\n return\n\n in_shape = node.in_node().shape\n if in_shape is None:\n log.debug('Unpack node \"{}\" input node shape is not defined.'.format(node.name))\n return\n\n split_dim = node.axis\n log.debug('input shape for unpack: {}, should be split along {} dim'.format(in_shape, split_dim))\n split_dim_size = in_shape[split_dim]\n log.debug('split_dim_size type = {}'.format(type(split_dim_size)))\n\n if node.num_split is not None and node.num_split != split_dim_size:\n log.debug('The unpack where num to unpack is not equal to the size of the dimension to unpack is not supported')\n return\n\n if node.num_split is None:\n node.num_split = split_dim_size\n\n if split_dim_size % node.num_split != 0:\n log.error(\"split_dim cannot be evenly divided by a given number of parts\")\n return\n\n split(node.in_node(), node, split_dim, [int(split_dim_size / node.num_split)] * node.num_split)\n # node shapes will be squeezed in the separate pass\n",
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport logging as log\n\nimport numpy as np\n\nfrom mo.front.caffe.extractors.utils import get_canonical_axis_index\nfrom mo.front.common.layout import get_height_dim, get_width_dim, get_depth_dim\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.ops.op import PermuteAttrs\nfrom mo.utils.error import Error\n\n\ndef is_spatial_squeeze(layout: str, input_shape: np.ndarray, squeeze_dims: np.ndarray):\n \"\"\"\n Checks that the squeeze operation removes all spatial dimensions.\n :param layout: graph layout.\n :param input_shape: numpy array with input shape.\n :param squeeze_dims: numpy array with dims to squeeze.\n :return: result of the check.\n \"\"\"\n if len(input_shape) < 4 or len(input_shape) > 5:\n return False\n spatial_dims = [get_height_dim(layout, len(input_shape)), get_width_dim(layout, len(input_shape))]\n if len(input_shape) == 5:\n spatial_dims.append(get_depth_dim(layout, len(input_shape)))\n for dim in spatial_dims:\n if input_shape[dim] != 1:\n log.debug('The reshape from \"{}\" with squeezed dims \"{}\" is not a spatial squeeze'.format(input_shape,\n squeeze_dims))\n return False\n if len(squeeze_dims) != len(spatial_dims):\n log.debug('The reshape from \"{}\" with squeezed dims \"{}\" is not a spatial squeeze'.format(input_shape,\n squeeze_dims))\n return False\n log.debug('The reshape from \"{}\" with squeezed dims \"{}\" is not a spatial squeeze'.format(input_shape,\n squeeze_dims))\n return True\n\n\ndef tf_squeeze_infer(node):\n if node.squeeze_dims is None:\n # TODO: implement; there is no implementation now because no test\n return\n\n real_squeeze_dims = []\n input_shape = node.in_node().shape\n if input_shape is None:\n return\n # UGLY\n output_shape = input_shape.copy()\n for n in node.squeeze_dims:\n if output_shape[n] == 1:\n real_squeeze_dims.append(get_canonical_axis_index(output_shape, n))\n else:\n raise Error('Trying to squeeze dimension not equal to 1 for node \"{}\"'.format(node.soft_get('name')))\n\n output_shape = np.delete(output_shape, real_squeeze_dims)\n node.out_node().shape = output_shape\n\n if is_spatial_squeeze(node.graph.graph['layout'], input_shape, output_shape):\n output_shape = int64_array([0, -1])\n node['dim'] = output_shape\n if node.in_node().value is not None:\n node.out_node().value = np.array(np.reshape(node.in_node().value, output_shape))\n\n PermuteAttrs.create_permute_attrs(node, attrs=[('dim', 'output:0')])\n",
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport numpy as np\nfrom mo.front.extractor import FrontExtractorOp\nfrom mo.front.onnx.extractors.utils import onnx_attr\nfrom mo.graph.graph import Node\nfrom mo.ops.reduce import Reduce\n\n\nclass ReduceSumFrontExtractor(FrontExtractorOp):\n op = 'ReduceSum'\n enabled = True\n\n @staticmethod\n def extract(node: Node):\n axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type= lambda x: np.array(x, dtype=np.int64))\n keep_dims = onnx_attr(node, 'keepdims', 'i', default=True)\n Reduce.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims, 'reduce_type': 'sum'})\n return __class__.enabled\n",
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\nfrom collections import deque\n\nimport networkx as nx\nimport numpy as np\n\nfrom mo.front.extractor import add_attrs_props\nfrom mo.middle.passes.eliminate import graph_clean_up\nfrom mo.utils.graph import pseudo_topological_sort\nfrom mo.ops.lin_op import Mul, Add\nfrom mo.middle.passes.eliminate import merge_data_nodes\nfrom mo.ops.op import Op\nfrom mo.graph.graph import Node, Graph\nfrom mo.middle.passes.fusing.helpers import backward_bfs, forward_bfs, get_tensor_id, get_value_id\n\n\ndef _fuse_linear_sequence(graph: Graph, start_node: Node):\n \"\"\"\n This function finds the sequence of Mul/Add operations and replaces this sequence with two ops (Mul->Add).\n :param graph:\n :param start_node: The first operation of the sequence\n \"\"\"\n fnodes = [start_node]\n while True:\n node = fnodes[-1]\n data_node = node.out_node()\n if (len(data_node.out_nodes()) != 1):\n break\n if (data_node.out_node().op in ['Mul', 'Add']) and get_value_id(data_node.out_node()) is not None and data_node.out_node().soft_get('can_be_fused') == True:\n fnodes.append(data_node.out_node())\n else:\n break\n\n if len(fnodes) == 1 or (len(fnodes) == 2 and fnodes[0].op == 'Mul' and fnodes[1].op == 'Add'):\n return False\n\n input_shape = start_node.in_node(get_tensor_id(start_node)).shape\n\n init_dims_cnt = len(input_shape) - 2 if graph.graph['layout'] == 'NCHW' else 1\n\n mul = np.ones([1 for x in range(init_dims_cnt)])\n add = np.zeros([1 for x in range(init_dims_cnt)])\n\n first_mul_name = None\n first_add_name = None\n\n for idx in range(len(fnodes)):\n node = fnodes[idx]\n const_node = get_value_id(node)\n if node.op == 'Mul':\n if first_mul_name is None:\n first_mul_name = node.name\n mul = mul * node.in_node(const_node).value\n add = add * node.in_node(const_node).value\n elif node.op == 'Add':\n if first_add_name is None:\n first_add_name = node.name\n add = add + node.in_node(const_node).value\n\n # If mul is scalar we broadcast it to biases shape\n if mul.shape != add.shape and len(mul.shape) == 1 and mul.shape[0] == 1:\n mul = np.array([mul[0] for x in range(add.shape[0])])\n\n assert (np.array_equal(fnodes[0].in_node(get_tensor_id(fnodes[0])).shape, fnodes[-1].out_node().shape))\n\n mul_node = Mul(graph, dict(name=first_mul_name + '/Fused_Mul_' if first_mul_name is not None else ''))\n add_node = Add(graph, dict(name=first_add_name + '/Fused_Add_' if first_add_name is not None else ''))\n\n in_node = fnodes[0].in_node(get_tensor_id(fnodes[0]))\n out_node = fnodes[-1].out_node()\n\n graph.remove_edge(in_node.id, fnodes[0].id)\n graph.remove_edge(fnodes[-1].id, out_node.id)\n\n # Remove deleted subgraph\n for node in fnodes:\n for tmp_node in node.in_nodes().values():\n # Remove node only if it has one consumer (for case with shared weights)\n if len(tmp_node.out_nodes()) == 1:\n graph.remove_node(tmp_node.id)\n for tmp_node in node.out_nodes().values():\n graph.remove_node(tmp_node.id)\n graph.remove_node(node.id)\n\n \"\"\"\n Four cases considered below:\n 1. Mul and Add have valid values (mul value != 1 and add value != 0)\n 2. Only Mul has valid values, so we add only Mul node\n 3. Only Add has valid values, so we add only Add node\n 4. When Mul and Add has not valid values we just merge two data nodes\n \"\"\"\n if any([x != 0 for x in np.nditer(add)]) and any([x != 1 for x in np.nditer(mul)]):\n data_mul = Op.create_input_data_node(graph, \"data_mul_\", np.array(mul))\n data_add = Op.create_input_data_node(graph, \"data_add_\", np.array(add))\n add_node.create_node_with_data(inputs=[mul_node.create_node_with_data([in_node, data_mul]), data_add],\n data_nodes=out_node)\n elif any([x != 1 for x in np.nditer(mul)]):\n data_mul = Op.create_input_data_node(graph, \"data_mul_\", np.array(mul))\n mul_node.create_node_with_data(inputs=[in_node, data_mul], data_nodes=out_node)\n elif any([x != 0 for x in np.nditer(add)]):\n data_add = Op.create_input_data_node(graph, \"data_add_\", np.array(add))\n add_node.create_node_with_data(inputs=[in_node, data_add], data_nodes=out_node)\n else:\n merge_data_nodes(graph,out_node, in_node)\n graph.remove_node(in_node.id)\n\n log.debug('Fused {} operations'.format(len(fnodes)))\n return True\n\n\ndef fuse_mul_add_sequence(graph: Graph):\n \"\"\"\n This function finds first valid Mul/Add node and pass it to fuse_linear_sequence where full sequence will be found\n \"\"\"\n while True:\n is_fused = False\n for idx in list(pseudo_topological_sort(graph)):\n if idx in graph:\n node = Node(graph, idx)\n if node.soft_get('op') in ['Mul','Add'] and get_value_id(node) is not None and node.soft_get('can_be_fused') == True:\n is_fused |= _fuse_linear_sequence(graph, node)\n if not is_fused:\n break",
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.middle.FusePermutesSequence import FusePermutesSequence\nfrom extensions.middle.NormalizeFullyConnected import NormalizeFullyConnected\nfrom mo.middle.passes.eliminate_test import build_graph\nfrom mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs\n\n# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the\n# dictionary with node attributes.\nnodes_attributes = {\n 'placeholder_1': {'name': 'placeholder_1', 'value': None, 'shape': None, 'type': 'Placeholder', 'kind': 'op',\n 'op': 'Placeholder'},\n 'placeholder_1_data': {'name': 'placeholder_1_data', 'value': None, 'shape': None, 'kind': 'data',\n 'data_type': None},\n 'reshape_1': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},\n 'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n 'fc': {'type': 'FullyConnected', 'value': None, 'kind': 'op', 'op': 'MatMul'},\n 'fc_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'fc_weights': {'value': None, 'shape': None, 'kind': 'data'},\n\n 'reshape_2': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},\n 'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},\n}\n\n\nclass NormalizeFullyConnectedTest(unittest.TestCase):\n def test_1(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'fc'),\n ('fc_weights', 'fc'),\n ('fc', 'fc_data'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 16, 512])},\n 'fc': {'out-size': 101},\n 'fc_weights': {'shape': np.array([512,101]), 'value': np.ones([512, 101]), 'input_channel_dim': 1},\n 'fc_data': {'shape': np.array([1, 16, 101])},\n }, nodes_with_edges_only=True)\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'fc'),\n ('fc_weights', 'fc'),\n ('fc', 'fc_data'),\n ('fc_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 16, 512])},\n 'reshape_1_data': {'shape': np.array([16, 512])},\n 'reshape_2_data': {'shape': np.array([1, 16, 101])},\n 'fc_weights': {'shape': np.array([512,101]), 'value': np.ones([512, 101])},\n 'fc': {'out-size': 101},\n 'fc_data': {'shape': np.array([16, 101])},\n }, nodes_with_edges_only=True)\n\n pattern = NormalizeFullyConnected()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data', 'placeholder_1_data', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n\n def test_2(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'fc'),\n ('fc_weights', 'fc'),\n ('fc', 'fc_data'),\n ],\n {'placeholder_1_data': {'shape': np.array([2, 32, 16, 512])},\n 'fc': {'out-size': 101},\n 'fc_weights': {'shape': np.array([512,101]), 'value': np.ones([512, 101]), 'input_channel_dim': 1},\n 'fc_data': {'shape': np.array([2, 32, 16, 101])},\n }, nodes_with_edges_only=True)\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'fc'),\n ('fc_weights', 'fc'),\n ('fc', 'fc_data'),\n ('fc_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ],\n {'placeholder_1_data': {'shape': np.array([2, 32, 16, 512])},\n 'reshape_1_data': {'shape': np.array([2 * 32 * 16, 512])},\n 'reshape_2_data': {'shape': np.array([2, 32, 16, 101])},\n 'fc_weights': {'shape': np.array([512,101]), 'value': np.ones([512, 101])},\n 'fc': {'out-size': 101},\n 'fc_data': {'shape': np.array([2 * 32 * 16, 101])},\n }, nodes_with_edges_only=True)\n\n pattern = NormalizeFullyConnected()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data', 'placeholder_1_data', check_op_attrs=True)\n self.assertTrue(flag, resp)\n",
"\"\"\"\n Copyright (c) 2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\n\nimport numpy as np\n\nfrom typing import Dict\nfrom mo.front.common.partial_infer.utils import assign_dims_to_weights\nfrom mo.graph.graph import Graph, Node\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom mo.ops.lin_op import Add\n\n\nclass GemmToFullyConnected(MiddleReplacementPattern):\n enabled = True\n graph_condition = [lambda graph: graph.graph['fw'] == 'onnx']\n\n def run_after(self):\n from extensions.middle.pass_separator import MiddleStart\n return [MiddleStart]\n\n def run_before(self):\n from extensions.middle.pass_separator import MiddleFinish\n return [MiddleFinish]\n\n def pattern(self):\n return dict(\n nodes=[\n ('gemm', dict(kind='op', op='Gemm')),\n ('output', dict(kind='data'))],\n edges=[('gemm', 'output')]\n )\n\n def replace_pattern(self, graph: Graph, match: Dict[str, Node]):\n log.debug('GemmToFullyConnected is triggered')\n gemm = match['gemm']\n A = gemm.in_node(0)\n B = gemm.in_node(1)\n B_consumers = graph.out_edges(B.node)\n C = gemm.in_node(2)\n\n if not (B.value is not None and\n C.value is not None and\n A.shape is not None and\n not gemm.transpose_a and\n (len(B_consumers) == 1 or not gemm.transpose_b)):\n log.warning('Cannot convert Gemm to FullyConnected')\n return\n\n if gemm.transpose_b:\n # B.value = B.value.transpose()\n # B.shape = np.array(B.value.shape, dtype=np.int64)\n gemm.transpose_b = 0\n else:\n B.value = B.value.transpose()\n B.shape = np.array(B.value.shape, dtype=np.int64)\n\n gemm['out-size'] = gemm.out_port(0).data.get_shape()[-1]\n gemm['type'] = 'FullyConnected'\n gemm['channel_dims'] = len(match['output'].shape) - 1\n gemm['bias_addable'] = True\n gemm['input_channel_dim'] = 1 # MatMul weights in IO\n gemm['output_channel_dim'] = 0\n gemm['layout'] = 'NCHW'\n\n gemm.in_port(1).bin = 'weights'\n\n bias_node = Add(graph, {}).create_node()\n gemm.out_port(0).get_connection().set_source(bias_node.out_port(0))\n gemm.in_port(2).get_connection().set_destination(bias_node.in_port(1))\n gemm.out_port(0).connect(bias_node.in_port(0))\n\n assign_dims_to_weights(gemm.in_node(1), None, 1, 0, 2)\n # Do not transpose weights in this pass, it will be done as a separate pass\n"
] | [
[
"numpy.array"
],
[
"numpy.testing.assert_array_equal",
"numpy.array"
],
[
"numpy.array",
"numpy.zeros",
"numpy.array_equal"
],
[
"numpy.array"
],
[
"numpy.add.reduce",
"numpy.array",
"numpy.argwhere"
],
[
"numpy.delete"
],
[
"numpy.array"
],
[
"numpy.array",
"numpy.nditer"
],
[
"numpy.array",
"numpy.ones"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yfyangd/Computer_Vision_CS665 | [
"59dca3ce42f43b4aea446497a578f4a0eb93995d"
] | [
"Homography/hw2-2/homography.py"
] | [
"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\ndef get_homograph(u,v):\n A = np.array([[u[0][0], u[0][1], 1, 0, 0, 0, -1 * u[0][0] * v[0][0], -1 * u[0][1] * v[0][0]],\n [0, 0, 0, u[0][0], u[0][1], 1, -1 * u[0][0] * v[0][1], -1 * u[0][1] * v[0][1]],\n [u[1][0], u[1][1], 1, 0, 0, 0, -1 * u[1][0] * v[1][0], -1 * u[1][1] * v[1][0]],\n [0, 0, 0, u[1][0], u[1][1], 1, -1 * u[1][0] * v[1][1], -1 * u[1][1] * v[1][1]],\n [u[2][0], u[2][1], 1, 0, 0, 0, -1 * u[2][0] * v[2][0], -1 * u[2][1] * v[2][0]],\n [0, 0, 0, u[2][0], u[2][1], 1, -1 * u[2][0] * v[2][1], -1 * u[2][1] * v[2][1]],\n [u[3][0], u[3][1], 1, 0, 0, 0, -1 * u[3][0] * v[3][0], -1 * u[3][1] * v[3][0]],\n [0, 0, 0, u[3][0], u[3][1], 1, -1 * u[3][0] * v[3][1], -1 * u[3][1] * v[3][1]]\n ])\n b = np.array([[v[0][0]],\n [v[0][1]],\n [v[1][0]],\n [v[1][1]],\n [v[2][0]],\n [v[2][1]],\n [v[3][0]],\n [v[3][1]]\n ])\n tmp = np.dot(np.linalg.inv(A), b)\n H = np.array([[tmp[0][0], tmp[1][0], tmp[2][0]],\n [tmp[3][0], tmp[4][0], tmp[5][0]],\n [tmp[6][0], tmp[7][0], 1]\n ])\n return H\n\n\ndef interpolation(img, new_x, new_y):\n fx = round(new_x - int(new_x), 2)\n fy = round(new_y - int(new_y), 2)\n p = np.zeros((3,))\n p += (1 - fx) * (1 - fy) * img[int(new_y), int(new_x)]\n p += (1 - fx) * fy * img[int(new_y) + 1, int(new_x)]\n p += fx * (1 - fy) * img[int(new_y), int(new_x) + 1]\n p += fx * fy * img[int(new_y) + 1, int(new_x) + 1]\n return p\n\ndef forward_warping(u,v,input_image,canvas):\n matrix = get_homograph(u,v)\n i0_max = u[0:4,0:1].max()\n i0_min = u[0:4,0:1].min()\n i1_max = u[0:4,1:2].max()\n i1_min = u[0:4,1:2].min()\n i0_range = i0_max-i0_min\n i1_range = i1_max-i1_min\n \n for i in range(i1_range):\n for j in range(i0_range):\n tmp2 = np.dot(matrix, np.array([[j+i0_min, i+i1_min, 1]]).T)\n x, y = int(tmp2[0][0] / tmp2[2][0]), int(tmp2[1][0] / tmp2[2][0])\n canvas[y][x] = input_image[i+i1_min][j+i0_min]\n return canvas\n\ndef backward_warping(u,v,input_image,canvas):\n matrix = get_homograph(u,v) # v: output, u: input\n i0_max = u[0:4,0:1].max()\n i0_min = u[0:4,0:1].min()\n i1_max = u[0:4,1:2].max()\n i1_min = u[0:4,1:2].min()\n i0_range = i0_max-i0_min\n i1_range = i1_max-i1_min\n for j in range(i1_range):\n for i in range(i0_range):\n new_pos = np.dot(matrix, np.array([[i+i0_min, j+i1_min, 1]]).T)\n new_x, new_y = new_pos[0][0] / new_pos[2][0], new_pos[1][0] / new_pos[2][0]\n res = interpolation(input_image, new_x, new_y)\n canvas[j+i1_min][i+i0_min] = res\n return canvas"
] | [
[
"numpy.linalg.inv",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
The-SocialLion/Speech-Emotion-Recognition-using-MLP-Classifier | [
"5c4101ebbe2b43db28dbb97f94dc3001bdf56ff8"
] | [
"sp.py"
] | [
"import librosa\r\nimport soundfile\r\nimport os, glob, pickle\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ndef extract_feature(file_name, mfcc, chroma, mel):\r\n with soundfile.SoundFile(file_name) as sound_file:\r\n X = sound_file.read(dtype=\"float32\")\r\n sample_rate=sound_file.samplerate\r\n if chroma:\r\n stft=np.abs(librosa.stft(X))\r\n result=np.array([])\r\n if mfcc:\r\n mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)\r\n result=np.hstack((result, mfccs))\r\n if chroma:\r\n chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)\r\n result=np.hstack((result, chroma))\r\n if mel:\r\n mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)\r\n result=np.hstack((result, mel))\r\n return result\r\n\r\nemotions={\r\n '01':'neutral',\r\n '02':'calm',\r\n '03':'happy',\r\n '04':'sad',\r\n '05':'angry',\r\n '06':'fearful',\r\n '07':'disgust',\r\n '08':'surprised'\r\n}\r\n#DataFlair - Emotions to observe\r\nobserved_emotions=['calm', 'happy', 'fearful', 'disgust'] \r\n\r\ndef load_data(ts):\r\n tr=abs(1-ts)\r\n x,y=[],[]\r\n for file in glob.glob(\"D:\\\\python\\\\dl programs\\\\SP\\\\DATA\\\\Actor_*\\\\*.wav\"):\r\n file_name=os.path.basename(file)\r\n emotion=emotions[file_name.split(\"-\")[2]]\r\n print(emotion)\r\n if emotion not in observed_emotions:\r\n continue\r\n feature=extract_feature(file, mfcc=True, chroma=True, mel=True)\r\n x.append(feature)\r\n y.append(emotion)\r\n return train_test_split(np.array(x), y, test_size=ts, train_size=tr ,random_state=9)\r\nts=0.25\r\nload_data(ts)\r\nx_train,x_test,y_train,y_test=load_data(ts)\r\nprint((x_train.shape[0], x_test.shape[0]))\r\nprint(f'Features extracted: {x_train.shape[1]}')\r\n#DataFlair - Initialize the Multi Layer Perceptron Classifier\r\nmodel=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=500)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\naccuracy=accuracy_score(y_true=y_test, y_pred=y_pred)\r\n#DataFlair - Print the accuracy\r\nprint(\"Accuracy: {:.2f}%\".format(accuracy*100))\r\n"
] | [
[
"sklearn.neural_network.MLPClassifier",
"numpy.hstack",
"numpy.array",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brohrer/nn_methods | [
"acf3d1369e240971e5ee05696610c59c4c993a30"
] | [
"cottonwood/core/layers/dense.py"
] | [
"import numpy as np\nfrom cottonwood.core.activation import Tanh\nfrom cottonwood.core.initializers import LSUV\nfrom cottonwood.core.layers.generic_layer import GenericLayer\nfrom cottonwood.core.optimizers import SGD\nimport cottonwood.core.toolbox as tb\n\n\nclass Dense(GenericLayer):\n def __init__(\n self,\n n_outputs,\n m_inputs=None,\n activation_function=None,\n dropout_rate=0,\n initializer=None,\n previous_layer=None,\n optimizer=None,\n ):\n self.previous_layer = previous_layer\n if m_inputs is not None:\n self.m_inputs = m_inputs\n else:\n self.m_inputs = self.previous_layer.y.size\n self.n_outputs = int(n_outputs)\n self.activation_function = activation_function\n self.dropout_rate = dropout_rate\n\n if activation_function is None:\n self.activation_function = Tanh()\n else:\n self.activation_function = activation_function\n\n if initializer is None:\n self.initializer = LSUV()\n else:\n self.initializer = initializer\n\n if optimizer is None:\n self.optimizer = SGD()\n else:\n self.optimizer = optimizer\n\n # Choose random weights.\n # Inputs match to rows. Outputs match to columns.\n # Add one to m_inputs to account for the bias term.\n self.weights = self.initializer.initialize(\n self.m_inputs + 1, self.n_outputs)\n\n self.reset()\n self.regularizers = []\n\n def __str__(self):\n \"\"\"\n Make a descriptive, human-readable string for this layer.\n \"\"\"\n str_parts = [\n \"fully connected\",\n f\"number of inputs: {self.m_inputs}\",\n f\"number of outputs: {self.n_outputs}\",\n \"activation function:\" + tb.indent(\n self.activation_function.__str__()),\n \"initialization:\" + tb.indent(self.initializer.__str__()),\n \"optimizer:\" + tb.indent(self.optimizer.__str__()),\n ]\n for regularizer in self.regularizers:\n str_parts.append(\n \"regularizer:\" + tb.indent(regularizer.__str__()))\n return \"\\n\".join(str_parts)\n\n def add_regularizer(self, new_regularizer):\n self.regularizers.append(new_regularizer)\n\n def reset(self):\n self.x = np.zeros((1, self.m_inputs))\n self.y = np.zeros((1, self.n_outputs))\n self.de_dx = np.zeros((1, self.m_inputs))\n self.de_dy = np.zeros((1, self.n_outputs))\n\n def forward_pass(self, evaluating=False, **kwargs):\n \"\"\"\n Propagate the inputs forward through the network.\n\n evaluating: boolean\n Is this part of a training run or an evaluation run?\n \"\"\"\n if self.previous_layer is not None:\n self.x += self.previous_layer.y\n # Apply dropout only during training runs.\n if evaluating:\n dropout_rate = 0\n else:\n dropout_rate = self.dropout_rate\n\n if dropout_rate > 0:\n self.i_dropout = np.zeros(self.x.size, dtype=bool)\n self.i_dropout[np.where(\n np.random.uniform(size=self.x.size) < dropout_rate)] = True\n self.x[:, self.i_dropout] = 0\n self.x[:, np.logical_not(self.i_dropout)] *= 1 / (1 - dropout_rate)\n else:\n self.i_dropout = None\n\n bias = np.ones((1, 1))\n x_w_bias = np.concatenate((self.x, bias), axis=1)\n v = x_w_bias @ self.weights\n self.y = self.activation_function.calc(v)\n\n def backward_pass(self):\n \"\"\"\n Propagate the outputs back through the layer.\n \"\"\"\n bias = np.ones((1, 1))\n x_w_bias = np.concatenate((self.x, bias), axis=1)\n\n dy_dv = self.activation_function.calc_d(self.y)\n # v = self.x @ self.weights\n dv_dw = x_w_bias.transpose()\n dv_dx = self.weights.transpose()\n\n dy_dw = dv_dw @ dy_dv\n self.de_dw = self.de_dy * dy_dw\n\n for regularizer in self.regularizers:\n regularizer.pre_optim_update(self)\n\n self.optimizer.update(self)\n\n for regularizer in self.regularizers:\n regularizer.post_optim_update(self)\n\n self.de_dx = (self.de_dy * dy_dv) @ dv_dx\n\n # Remove the dropped-out inputs from this run.\n de_dx_no_bias = self.de_dx[:, :-1]\n\n if self.i_dropout is not None:\n de_dx_no_bias[:, self.i_dropout] = 0\n\n # Remove the bias node from the gradient vector.\n self.previous_layer.de_dy += de_dx_no_bias\n"
] | [
[
"numpy.logical_not",
"numpy.ones",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TinghuiWang/pyActLearn | [
"d858136e86324fac51b0943765ef60bd405e31d1",
"d858136e86324fac51b0943765ef60bd405e31d1"
] | [
"pyActLearn/sensors/sensor2vec.py",
"pyActLearn/learning/gcforest.py"
] | [
"import math\nimport numpy as np\nimport tensorflow as tf\nfrom ..learning.nn.injectors import SkipGramInjector\n\n\ndef sensor2vec(num_sensors, sensor_event_list, embedding_size=20,\n batch_size=128, num_skips=8, skip_window=5,\n num_neg_samples=64, learning_rate=1.0):\n \"\"\"Sensor to Vector\n \"\"\"\n if num_neg_samples > num_sensors:\n num_neg_samples = num_sensors\n # Initialize a SkipGram Injector\n injector = SkipGramInjector(sensor_event_list, batch_size, num_skips, skip_window)\n # Build Training Model\n graph = tf.Graph()\n with graph.as_default():\n # Input Place Holder\n train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n # As we normally do not have too many sensors - it is OK to use all of them\n valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)\n # Only CPU supports NCE loss\n with tf.device('/cpu:0'):\n # Look up embeddings for inputs.\n embeddings = tf.Variable(\n tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n\n # Construct the variables for the NCE loss\n nce_weights = tf.Variable(\n tf.truncated_normal([num_sensors, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([num_sensors]))\n\n # Compute the average NCE loss for the batch.\n # tf.nce_loss automatically draws a new sample of the negative labels each\n # time we evaluate the loss.\n loss = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=train_labels,\n inputs=embed,\n num_sampled=num_neg_samples,\n num_classes=num_sensors))\n\n # Construct the Optimizer\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n\n # Compute the cosine similarity between minibatch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(\n valid_embeddings, normalized_embeddings, transpose_b=True)\n\n # Add variable initializer.\n init = tf.initialize_all_variables()\n\n # Begin training.\n num_steps = 100001\n\n with tf.Session(graph=graph) as session:\n # We must initialize all variables before we use them.\n init.run()\n print(\"Initialized\")\n\n average_loss = 0\n for step in range(num_steps):\n batch_inputs, batch_labels = injector.next_batch()\n feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}\n\n # We perform one update step by evaluating the optimizer op (including it\n # in the list of returned values for session.run()\n _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % 2000 == 0:\n if step > 0:\n average_loss /= 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print(\"Average loss at step \", step, \": \", average_loss)\n average_loss = 0\n\n final_embeddings = normalized_embeddings.eval()\n final_similarity = 1 - similarity.eval()\n distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:, None]\n return final_embeddings, distance_matrix\n\n\n\ndef sensor2vec_data(sensor_list, event_list, embedding_size=20,\n batch_size=128, num_skips=8, skip_window=5,\n num_neg_samples=64, learning_rate=1.0, ignore_off=True):\n \"\"\"Transform sensor to high dimensional space\n\n Similar to word embedding used in natural language processing system, we want\n to represent sensors using in a synthesized vector space as well, instead of\n using an arbitrary labels for each sensors without any useful information.\n\n The methods used to find word embeddings can be classified into two categories:\n count-based methods (Latent Semantic Analysis) and predictive models.\n In this implementation for mapping sensor into high dimension vector space, we\n use skip-gram negative sampling models.\n\n Args:\n sensor_list (:obj:`list` of :obj:`dict`): List of dictionary containing\n sensor information.\n event_list (:obj:`list` of :obj:`dict`): List of events.\n embedding_size (:obj:`int`): The size of embedding vector.\n batch_size (:obj:`int`): The number of batch used in training\n num_skips (:obj:`int`): How many times to re-use an input to generate a label\n in skip-gram model.\n skip_window (:obj:`int`): How many items to consider left or right in skip-gram\n model.\n num_neg_samples (:obj:`int`): Number of negative samples to draw from the vocabulary.\n ignore_off (:obj:`bool`): Ignore motion-sensor with ``Off`` state in event.rst list.\n\n Please refer to :func:`sensor_distance` for an example of ``sensor_list``.\n Please refer to :func:`sensor_mi_distance` for an example of ``event_list``.\n \"\"\"\n # Put sensor in hash table for fast fetch of index\n num_sensors = len(sensor_list)\n # Negative samples cannot exceed sensor numbers\n if num_neg_samples > num_sensors:\n num_neg_samples = num_sensors\n # Store sensor ID in hash table for faster access\n sensor_dict = {}\n for i in range(num_sensors):\n sensor_dict[sensor_list[i]['name']] = i\n # Generate event.rst sensor list\n event_sensor_list = []\n for event_entry in event_list:\n if ignore_off and event_entry['sensor_status'].upper() == \"OFF\":\n continue\n event_sensor_list.append(sensor_dict[event_entry['sensor_id']])\n # Initialize a SkipGram Injector\n injector = SkipGramInjector(event_sensor_list, batch_size, num_skips, skip_window)\n # Build Training Model\n graph = tf.Graph()\n with graph.as_default():\n # Input Place Holder\n train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n # As we normally do not have too many sensors - it is OK to use all of them\n valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)\n # Only CPU supports NCE loss\n with tf.device('/cpu:0'):\n # Look up embeddings for inputs.\n embeddings = tf.Variable(\n tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n\n # Construct the variables for the NCE loss\n nce_weights = tf.Variable(\n tf.truncated_normal([num_sensors, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([num_sensors]))\n\n # Compute the average NCE loss for the batch.\n # tf.nce_loss automatically draws a new sample of the negative labels each\n # time we evaluate the loss.\n loss = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=train_labels,\n inputs=embed,\n num_sampled=num_neg_samples,\n num_classes=num_sensors))\n\n # Construct the Optimizer\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n\n # Compute the cosine similarity between minibatch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(\n valid_embeddings, normalized_embeddings, transpose_b=True)\n\n # Add variable initializer.\n init = tf.initialize_all_variables()\n\n # Begin training.\n num_steps = 100001\n\n with tf.Session(graph=graph) as session:\n # We must initialize all variables before we use them.\n init.run()\n print(\"Initialized\")\n\n average_loss = 0\n for step in range(num_steps):\n batch_inputs, batch_labels = injector.next_batch()\n feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}\n\n # We perform one update step by evaluating the optimizer op (including it\n # in the list of returned values for session.run()\n _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % 2000 == 0:\n if step > 0:\n average_loss /= 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print(\"Average loss at step \", step, \": \", average_loss)\n average_loss = 0\n\n # Note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % 10000 == 0:\n sim = similarity.eval()\n for i in range(num_sensors):\n valid_sensor = sensor_list[i]['name']\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k + 1]\n log_str = \"Nearest to %s:\" % valid_sensor\n for k in range(top_k):\n close_sensor = sensor_list[nearest[k]]['name']\n log_str = \"%s %s,\" % (log_str, close_sensor)\n print(log_str)\n final_embeddings = normalized_embeddings.eval()\n final_similarity = 1 - similarity.eval()\n distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:,None]\n\n # try:\n # from sklearn.manifold import TSNE\n # import matplotlib.pyplot as plt\n #\n # tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\n # low_dim_embs = tsne.fit_transform(final_embeddings)\n # labels = [sensor_list[i]['name'] for i in range(num_sensors)]\n #\n # assert low_dim_embs.shape[0] >= len(labels), \"More labels than embeddings\"\n # plt.figure(figsize=(18, 18)) # in inches\n # for i, label in enumerate(labels):\n # x, y = low_dim_embs[i, :]\n # plt.scatter(x, y)\n # plt.annotate(label,\n # xy=(x, y),\n # xytext=(5, 2),\n # textcoords='offset points',\n # ha='right',\n # va='bottom')\n # plt.show()\n # except ImportError:\n # print(\"Please install sklearn, matplotlib, and scipy to visualize embeddings.\")\n\n return final_embeddings, distance_matrix\n",
"#!usr/bin/env python\nimport itertools\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\nclass gcForest(object):\n max_acc = 0.0\n max_pred_layer = []\n\n def __init__(self, n_mgsRFtree=30, cascade_test_size=0.2, n_cascadeRF=2,\n n_cascadeRFtree=101, cascade_layer=np.inf,\n min_samples_cascade=0.05, tolerance=0.0):\n setattr(self, 'n_layer', 0)\n setattr(self, '_n_samples', 0)\n setattr(self, 'n_cascadeRF', int(n_cascadeRF))\n setattr(self, 'cascade_test_size', cascade_test_size)\n setattr(self, 'n_mgsRFtree', int(n_mgsRFtree))\n setattr(self, 'n_cascadeRFtree', int(n_cascadeRFtree))\n setattr(self, 'cascade_layer', cascade_layer)\n setattr(self, 'min_samples_cascade', min_samples_cascade)\n setattr(self, 'tolerance', tolerance)\n\n def fit(self, X, y):\n _ = self.cascade_forest(X, y)\n\n def predict_proba(self, X):\n cascade_all_pred_prob = self.cascade_forest(X)\n predict_proba = np.mean(cascade_all_pred_prob, axis=0)\n return predict_proba\n\n def predict(self, X):\n pred_proba = self.predict_proba(X=X)\n predictions = np.argmax(pred_proba, axis=1)\n return predictions\n\n def cascade_forest(self, X, y=None):\n if y is not None:\n setattr(self, 'n_layer', 0)\n test_size = getattr(self, 'cascade_test_size')\n max_layers = getattr(self, 'cascade_layer')\n tol = getattr(self, 'tolerance')\n # test_size = int(np.floor(X.shape[0] * test_size))\n # train_size = X.shape[0] - test_size\n # X_train = X[0:train_size, :]\n # y_train = y[0:train_size]\n # X_test = X[train_size:train_size + test_size, :]\n # y_test = y[train_size:train_size + test_size]\n # X_train, X_test, y_train, y_test = \\\n # train_test_split(X, y, test_size=test_size)\n X_train = X\n X_test = X\n y_train = y\n y_test = y\n self.n_layer += 1\n prf_pred_ref = self._cascade_layer(X_train, y_train)\n accuracy_ref = self._cascade_evaluation(X_test, y_test)\n feat_arr = self._create_feat_arr(X_train, prf_pred_ref)\n\n self.n_layer += 1\n prf_pred_layer = self._cascade_layer(feat_arr, y_train)\n accuracy_layer = self._cascade_evaluation(X_test, y_test)\n max_acc = accuracy_ref\n max_pred_layer = prf_pred_layer\n\n while accuracy_layer > (accuracy_ref + tol) and self.n_layer <= max_layers:\n #while accuracy_layer > (accuracy_ref - 0.000001) and \\\n # self.n_layer <= max_layers:\n if accuracy_layer > max_acc:\n max_acc = accuracy_layer\n max_pred_layer = prf_pred_layer\n accuracy_ref = accuracy_layer\n prf_pred_ref = prf_pred_layer\n feat_arr = self._create_feat_arr(X_train, prf_pred_ref)\n self.n_layer += 1\n prf_pred_layer = self._cascade_layer(feat_arr, y_train)\n accuracy_layer = self._cascade_evaluation(X_test, y_test)\n\n if accuracy_layer < accuracy_ref:\n n_cascadeRF = getattr(self, 'n_cascadeRF')\n for irf in range(n_cascadeRF):\n delattr(self, '_casprf{}_{}'.format(self.n_layer, irf))\n delattr(self, '_cascrf{}_{}'.format(self.n_layer, irf))\n self.n_layer -= 1\n\n print(\"layer %d - accuracy %f ref %f\" % (self.n_layer, accuracy_layer, accuracy_ref))\n else:\n at_layer = 1\n prf_pred_ref = self._cascade_layer(X, layer=at_layer)\n while at_layer < getattr(self, 'n_layer'):\n at_layer += 1\n feat_arr = self._create_feat_arr(X, prf_pred_ref)\n prf_pred_ref = self._cascade_layer(feat_arr, layer=at_layer)\n\n return prf_pred_ref\n\n def _cascade_layer(self, X, y=None, layer=0):\n n_tree = getattr(self, 'n_cascadeRFtree')\n n_cascadeRF = getattr(self, 'n_cascadeRF')\n min_samples = getattr(self, 'min_samples_cascade')\n\n prf = RandomForestClassifier(\n n_estimators=100, max_features=8,\n bootstrap=True, criterion=\"entropy\", min_samples_split=20,\n max_depth=None, class_weight='balanced', oob_score=True)\n crf = ExtraTreesClassifier(\n n_estimators=100, max_depth=None,\n bootstrap=True, oob_score=True)\n\n prf_pred = []\n if y is not None:\n # print('Adding/Training Layer, n_layer={}'.format(self.n_layer))\n for irf in range(n_cascadeRF):\n prf.fit(X, y)\n crf.fit(X, y)\n setattr(self, '_casprf{}_{}'.format(self.n_layer, irf), prf)\n setattr(self, '_cascrf{}_{}'.format(self.n_layer, irf), crf)\n probas = prf.oob_decision_function_\n probas += crf.oob_decision_function_\n prf_pred.append(probas)\n elif y is None:\n for irf in range(n_cascadeRF):\n prf = getattr(self, '_casprf{}_{}'.format(layer, irf))\n crf = getattr(self, '_cascrf{}_{}'.format(layer, irf))\n probas = prf.predict_proba(X)\n probas += crf.predict_proba(X)\n prf_pred.append(probas)\n\n return prf_pred\n\n def _cascade_evaluation(self, X_test, y_test):\n casc_pred_prob = np.mean(self.cascade_forest(X_test), axis=0)\n casc_pred = np.argmax(casc_pred_prob, axis=1)\n casc_accuracy = accuracy_score(y_true=y_test, y_pred=casc_pred)\n #print('Layer validation accuracy = {}'.format(casc_accuracy))\n\n return casc_accuracy\n\n def _create_feat_arr(self, X, prf_pred):\n swap_pred = np.swapaxes(prf_pred, 0, 1)\n add_feat = swap_pred.reshape([np.shape(X)[0], -1])\n feat_arr = np.concatenate([add_feat, X], axis=1)\n\n return feat_arr\n"
] | [
[
"tensorflow.Graph",
"tensorflow.matmul",
"tensorflow.device",
"tensorflow.zeros",
"tensorflow.placeholder",
"numpy.max",
"tensorflow.initialize_all_variables",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.nn.nce_loss",
"tensorflow.random_uniform",
"tensorflow.nn.embedding_lookup"
],
[
"numpy.swapaxes",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.ensemble.ExtraTreesClassifier",
"numpy.concatenate",
"numpy.argmax",
"numpy.mean",
"numpy.shape",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yelloooowww/Deep-Reinforcement-Learning-Hands-On | [
"d1a3a1272d7ceff8796fe412deb4e4d5bd6665a5"
] | [
"Chapter03/03_atari_gan.py"
] | [
"#!/usr/bin/env python\nimport random\nimport argparse\nimport cv2\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\n\nimport torchvision.utils as vutils\n\nimport gym\nimport gym.spaces\n\nimport numpy as np\n\nlog = gym.logger\nlog.set_level(gym.logger.INFO)\n\nLATENT_VECTOR_SIZE = 100\nDISCR_FILTERS = 64\nGENER_FILTERS = 64\nBATCH_SIZE = 16\n\n# dimension input image will be rescaled\nIMAGE_SIZE = 64\n\nLEARNING_RATE = 0.0001\nREPORT_EVERY_ITER = 25\nSAVE_IMAGE_EVERY_ITER = 1000\n\n\nclass InputWrapper(gym.ObservationWrapper):\n \"\"\"\n Preprocessing of input numpy array:\n 1. resize image into predefined size\n 2. move color channel axis to a first place\n \"\"\"\n def __init__(self, *args):\n super(InputWrapper, self).__init__(*args)\n assert isinstance(self.observation_space, gym.spaces.Box)\n old_space = self.observation_space\n self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high),\n dtype=np.float32)\n\n def observation(self, observation):\n # resize image\n new_obs = cv2.resize(observation, (IMAGE_SIZE, IMAGE_SIZE))\n # transform (210, 160, 3) -> (3, 210, 160)\n new_obs = np.moveaxis(new_obs, 2, 0)\n return new_obs.astype(np.float32)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_shape):\n super(Discriminator, self).__init__()\n # this pipe converges image into the single number\n self.conv_pipe = nn.Sequential(\n nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,\n kernel_size=4, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(DISCR_FILTERS*2),\n nn.ReLU(),\n nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(DISCR_FILTERS * 4),\n nn.ReLU(),\n nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(DISCR_FILTERS * 8),\n nn.ReLU(),\n nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,\n kernel_size=4, stride=1, padding=0),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n conv_out = self.conv_pipe(x)\n return conv_out.view(-1, 1).squeeze(dim=1)\n\n\nclass Generator(nn.Module):\n def __init__(self, output_shape):\n super(Generator, self).__init__()\n # pipe deconvolves input vector into (3, 64, 64) image\n self.pipe = nn.Sequential(\n nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,\n kernel_size=4, stride=1, padding=0),\n nn.BatchNorm2d(GENER_FILTERS * 8),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(GENER_FILTERS * 4),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(GENER_FILTERS * 2),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(GENER_FILTERS),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],\n kernel_size=4, stride=2, padding=1),\n nn.Tanh()\n )\n\n def forward(self, x):\n return self.pipe(x)\n\n\ndef iterate_batches(envs, batch_size=BATCH_SIZE):\n batch = [e.reset() for e in envs]\n env_gen = iter(lambda: random.choice(envs), None)\n\n while True:\n e = next(env_gen)\n obs, reward, is_done, _ = e.step(e.action_space.sample())\n if np.mean(obs) > 0.01:\n batch.append(obs)\n if len(batch) == batch_size:\n # Normalising input between -1 to 1\n batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0\n yield torch.tensor(batch_np)\n batch.clear()\n if is_done:\n e.reset()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # parser.add_argument(\"--cuda\", default=False, action='store_true', help=\"Enable cuda computation\")\n parser.add_argument(\"--cuda\", default=True, action='store_true', help=\"Enable cuda computation\")\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]\n input_shape = envs[0].observation_space.shape\n\n net_discr = Discriminator(input_shape=input_shape).to(device)\n net_gener = Generator(output_shape=input_shape).to(device)\n\n objective = nn.BCELoss()\n gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))\n dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))\n writer = SummaryWriter()\n\n gen_losses = []\n dis_losses = []\n iter_no = 0\n\n true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)\n fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)\n\n for batch_v in iterate_batches(envs):\n # generate extra fake samples, input is 4D: batch, filters, x, y\n gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)\n batch_v = batch_v.to(device)\n gen_output_v = net_gener(gen_input_v)\n\n # train discriminator\n dis_optimizer.zero_grad()\n dis_output_true_v = net_discr(batch_v)\n dis_output_fake_v = net_discr(gen_output_v.detach())\n dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)\n dis_loss.backward()\n dis_optimizer.step()\n dis_losses.append(dis_loss.item())\n\n # train generator\n gen_optimizer.zero_grad()\n dis_output_v = net_discr(gen_output_v)\n gen_loss_v = objective(dis_output_v, true_labels_v)\n gen_loss_v.backward()\n gen_optimizer.step()\n gen_losses.append(gen_loss_v.item())\n\n iter_no += 1\n if iter_no % REPORT_EVERY_ITER == 0:\n log.info(\"Iter %d: gen_loss=%.3e, dis_loss=%.3e\", iter_no, np.mean(gen_losses), np.mean(dis_losses))\n writer.add_scalar(\"gen_loss\", np.mean(gen_losses), iter_no)\n writer.add_scalar(\"dis_loss\", np.mean(dis_losses), iter_no)\n gen_losses = []\n dis_losses = []\n if iter_no % SAVE_IMAGE_EVERY_ITER == 0:\n writer.add_image(\"fake\", vutils.make_grid(gen_output_v.data[:64], normalize=True), iter_no)\n writer.add_image(\"real\", vutils.make_grid(batch_v.data[:64], normalize=True), iter_no)\n"
] | [
[
"torch.ones",
"torch.nn.ConvTranspose2d",
"torch.zeros",
"torch.nn.Conv2d",
"torch.nn.BCELoss",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.tensor",
"numpy.mean",
"torch.FloatTensor",
"torch.nn.BatchNorm2d",
"numpy.moveaxis",
"torch.device",
"torch.nn.ReLU",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
csjtx1021/CAGG | [
"67fde2f1488ee6e2ff137e87860b5243c5b5fe7c"
] | [
"CAGG-NAS/tools/nn/nn_visualise.py"
] | [
"\"\"\"\n Harness for visualising a neural network.\n -- [email protected]\n\"\"\"\n\n# pylint: disable=invalid-name\n\nimport functools\nimport graphviz as gv\nimport os\nimport networkx as nx\nimport numpy as np\n\n# Parameters for plotting\n_SAVE_FORMAT = 'eps'\n# _SAVE_FORMAT = 'png'\n_LAYER_SHAPE = 'rectangle'\n_IPOP_SHAPE = 'circle'\n_LAYER_FONT = 'DejaVuSans'\n_IPOP_FONT = 'Helvetica'\n_LAYER_FONTSIZE = '16'\n_FILLCOLOR = 'transparent'\n_IPOP_FONTSIZE = '12'\n_IPOP_FILLCOLOR = '#ffc0cb'\n_DECISION_FILLCOLOR = '#98fb98'\n_GRAPH_STYLES = {\n 'graph': {\n 'fontsize': _LAYER_FONTSIZE,\n 'rankdir': 'TB',\n 'label': None,\n },\n 'nodes': {\n },\n 'edges': {\n 'arrowhead': 'open',\n 'fontsize': '12',\n }\n}\n\nGV_GRAPH = functools.partial(gv.Graph, format=_SAVE_FORMAT)\nGV_DIGRAPH = functools.partial(gv.Digraph, format=_SAVE_FORMAT)\n\n# Utilities for adding nodes, edges and styles -------------------------------------------\ndef add_nodes(graph, nodes):\n \"\"\" Adds nodes to the graph. \"\"\"\n for n in nodes:\n if isinstance(n, tuple):\n graph.node(n[0], **n[1])\n else:\n graph.node(n)\n return graph\n\ndef add_edges(graph, edges):\n \"\"\" Adds edges to the graph. \"\"\"\n # pylint: disable=star-args\n for e in edges:\n if isinstance(e[0], tuple):\n graph.edge(*e[0], **e[1])\n else:\n graph.edge(*e)\n return graph\n\ndef apply_styles(graph, styles):\n \"\"\" Applies styles to the graph. \"\"\"\n graph.graph_attr.update(\n ('graph' in styles and styles['graph']) or {}\n )\n graph.node_attr.update(\n ('nodes' in styles and styles['nodes']) or {}\n )\n graph.edge_attr.update(\n ('edges' in styles and styles['edges']) or {}\n )\n return graph\n\n# Wrappers for tedious routines ----------------------------------------------------------\ndef _get_ip_layer(layer_idx):\n \"\"\" Returns a tuple representing the input layer. \"\"\"\n return (str(layer_idx), {'label': 'i/p', 'shape': 'circle', 'style': 'filled',\n 'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,\n 'fontname': _IPOP_FONT})\n\ndef _get_op_layer(layer_idx):\n \"\"\" Returns a tuple representing the output layer. \"\"\"\n return (str(layer_idx), {'label': 'o/p', 'shape': 'circle', 'style': 'filled',\n 'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,\n 'fontname': _IPOP_FONT})\n\ndef _get_layer(layer_idx, nn, for_pres):\n \"\"\" Returns a tuple representing the layer label. \"\"\"\n if nn.layer_labels[layer_idx] in ['ip', 'op']:\n fill_colour = _IPOP_FILLCOLOR\n elif nn.layer_labels[layer_idx] in ['softmax', 'linear']:\n fill_colour = _DECISION_FILLCOLOR\n else:\n fill_colour = _FILLCOLOR\n label = nn.get_layer_descr(layer_idx, for_pres)\n return (str(layer_idx), {'label': label, 'shape': 'rectangle', 'fillcolor': fill_colour,\n 'style': 'filled', 'fontname': _LAYER_FONT}),((layer_idx), nn.layer_labels[layer_idx],(nn.num_units_in_each_layer[layer_idx]))\n\n\n\n\n\ndef _get_edge(layer_idx_start, layer_idx_end):\n \"\"\" Returns a tuple which is an edge. \"\"\"\n return (str(layer_idx_start), str(layer_idx_end))\n\ndef _get_edges(conn_mat):\n \"\"\" Returns all edges. \"\"\"\n starts, ends = conn_mat.nonzero()\n return [_get_edge(starts[i], ends[i]) for i in range(len(starts))]\n\n# Main API ------------------------------------------------------------------------------\ndef visualise_nn(nn, save_file_prefix, fig_label=None, for_pres=True):\n \"\"\" The main API which will be used to visualise the network. \"\"\"\n # First create nodes in the order\n nodes = [_get_layer(i, nn, for_pres)[0] for i in range(nn.num_layers)]\n nodes_my = [_get_layer(i, nn, for_pres)[1] for i in range(nn.num_layers)]\n #print(\"nodes_my=\",nodes_my)\n edges = _get_edges(nn.conn_mat)\n edges_my = [(int(s),int(t)) for s,t in edges]\n #print(\"edges_my=\",edges_my)\n nn_graph = GV_DIGRAPH()\n add_nodes(nn_graph, nodes)\n add_edges(nn_graph, edges)\n graph_styles = _GRAPH_STYLES\n graph_styles['graph']['label'] = fig_label\n apply_styles(nn_graph, graph_styles)\n nn_graph.render(save_file_prefix)\n \n if os.path.exists(save_file_prefix):\n # graphviz also creates another file in the name of the prefix. delete it.\n os.remove(save_file_prefix)\n\n return tonxgraph(nodes_my,edges_my)\n\nNODE_TYPES = ['ip', 'op', 'linear']\nhidden_list = [8,16,32,64,128,256,512,1024]\nfor i in hidden_list:\n NODE_TYPES.append(\"relu-%s\"%i)\n NODE_TYPES.append(\"crelu-%s\"%i)\n NODE_TYPES.append(\"leaky-relu-%s\"%i)\n NODE_TYPES.append(\"softplus-%s\"%i)\n NODE_TYPES.append(\"elu-%s\"%i)\n NODE_TYPES.append(\"logistic-%s\"%i)\n NODE_TYPES.append(\"tanh-%s\"%i)\n\n\ndef tonxgraph(nodes_my,edges_my):\n g = {\"x\":[],\"edge_index\":[],\"edge_attr\":[]}\n \n for n_idx, type, num_hidden in nodes_my:\n n_idx = int(n_idx)\n if type=='ip' or type=='op' or type=='linear':\n g[\"x\"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index(type)])\n else:\n num_hidden = np.random.choice(hidden_list)\n g[\"x\"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index(\"%s-%s\"%(type,num_hidden))])\n row = []\n col = []\n for s, t in edges_my:\n row.append(s)\n col.append(t)\n g[\"edge_attr\"].append(np.ones(1))\n g[\"edge_index\"].append(row)\n g[\"edge_index\"].append(col)\n\n g[\"x\"]=np.array(g[\"x\"])\n g[\"edge_attr\"]=np.array(g[\"edge_attr\"])\n\n print(\"+\",g[\"x\"].shape)\n assert g[\"x\"].shape[0] <= 20\n \n return g\n\n\n\n\n #g_nx = nx.nx_agraph.from_agraph(nn_graph)\n #A = nx.nx_agraph.to_agraph(g_nx) # convert to a graphviz graph\n #A.layout() # neato layout\n #A.draw(\"a.ps\")\n\ndef visualise_list_of_nns(list_of_nns, save_dir, fig_labels=None, fig_file_names=None,\n for_pres=False):\n \"\"\" Visualises a list of neural networks. \"\"\"\n g_list = []\n if fig_labels is None:\n fig_labels = [None] * len(list_of_nns)\n if fig_file_names is None:\n fig_file_names = [str(idx) for idx in range(len(list_of_nns))]\n for idx, nn in enumerate(list_of_nns):\n save_file_prefix = os.path.join(save_dir, fig_file_names[idx])\n g = visualise_nn(nn, save_file_prefix, fig_labels[idx], for_pres)\n g_list.append(g)\n return g_list\n\n"
] | [
[
"numpy.ones",
"numpy.array",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
songzy12/MatchZoo | [
"a43dc3b1d43b3f2a1b43b11d3fc4009616507e23"
] | [
"matchzoo/layers/matching_layer.py"
] | [
"\"\"\"An implementation of Matching Layer.\"\"\"\nimport typing\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n\nclass MatchingLayer(layers.Layer):\n \"\"\"\n Layer that computes a matching matrix between samples in two tensors.\n\n :param normalize: Whether to L2-normalize samples along the\n dot product axis before taking the dot product.\n If set to True, then the output of the dot product\n is the cosine proximity between the two samples.\n :param matching_type: the similarity function for matching\n :param kwargs: Standard layer keyword arguments.\n\n Examples:\n >>> import matchzoo as mz\n >>> layer = mz.layers.MatchingLayer(matching_type='dot',\n ... normalize=True)\n >>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10\n >>> layer.build([[num_batch, left_len, num_dim],\n ... [num_batch, right_len, num_dim]])\n\n \"\"\"\n\n def __init__(self, normalize: bool = False,\n matching_type: str = 'dot', **kwargs):\n \"\"\":class:`MatchingLayer` constructor.\"\"\"\n super().__init__(**kwargs)\n self._normalize = normalize\n self._validate_matching_type(matching_type)\n self._matching_type = matching_type\n self._shape1 = None\n self._shape2 = None\n\n @classmethod\n def _validate_matching_type(cls, matching_type: str = 'dot'):\n valid_matching_type = ['dot', 'mul', 'plus', 'minus', 'concat']\n if matching_type not in valid_matching_type:\n raise ValueError(f\"{matching_type} is not a valid matching type, \"\n f\"{valid_matching_type} expected.\")\n\n def build(self, input_shape: list):\n \"\"\"\n Build the layer.\n\n :param input_shape: the shapes of the input tensors,\n for MatchingLayer we need tow input tensors.\n \"\"\"\n # Used purely for shape validation.\n if not isinstance(input_shape, list) or len(input_shape) != 2:\n raise ValueError('A `MatchingLayer` layer should be called '\n 'on a list of 2 inputs.')\n self._shape1 = input_shape[0]\n self._shape2 = input_shape[1]\n for idx in 0, 2:\n if self._shape1[idx] != self._shape2[idx]:\n raise ValueError(\n 'Incompatible dimensions: '\n f'{self._shape1[idx]} != {self._shape2[idx]}.'\n f'Layer shapes: {self._shape1}, {self._shape2}.'\n )\n\n def call(self, inputs: list, **kwargs) -> typing.Any:\n \"\"\"\n The computation logic of MatchingLayer.\n\n :param inputs: two input tensors.\n \"\"\"\n x1 = inputs[0]\n x2 = inputs[1]\n if self._matching_type == 'dot':\n if self._normalize:\n x1 = tf.math.l2_normalize(x1, axis=2)\n x2 = tf.math.l2_normalize(x2, axis=2)\n return tf.expand_dims(tf.einsum('abd,acd->abc', x1, x2), 3)\n else:\n if self._matching_type == 'mul':\n def func(x, y):\n return x * y\n elif self._matching_type == 'plus':\n def func(x, y):\n return x + y\n elif self._matching_type == 'minus':\n def func(x, y):\n return x - y\n elif self._matching_type == 'concat':\n def func(x, y):\n return tf.concat([x, y], axis=3)\n else:\n raise ValueError(f\"Invalid matching type.\"\n f\"{self._matching_type} received.\"\n f\"Mut be in `dot`, `mul`, `plus`, \"\n f\"`minus` and `concat`.\")\n x1_exp = tf.stack([x1] * self._shape2[1], 2)\n x2_exp = tf.stack([x2] * self._shape1[1], 1)\n return func(x1_exp, x2_exp)\n\n def compute_output_shape(self, input_shape: list) -> tuple:\n \"\"\"\n Calculate the layer output shape.\n\n :param input_shape: the shapes of the input tensors,\n for MatchingLayer we need tow input tensors.\n \"\"\"\n if not isinstance(input_shape, list) or len(input_shape) != 2:\n raise ValueError('A `MatchingLayer` layer should be called '\n 'on a list of 2 inputs.')\n shape1 = list(input_shape[0])\n shape2 = list(input_shape[1])\n if len(shape1) != 3 or len(shape2) != 3:\n raise ValueError('A `MatchingLayer` layer should be called '\n 'on 2 inputs with 3 dimensions.')\n if shape1[0] != shape2[0] or shape1[2] != shape2[2]:\n raise ValueError('A `MatchingLayer` layer should be called '\n 'on 2 inputs with same 0,2 dimensions.')\n\n if self._matching_type in ['mul', 'plus', 'minus']:\n return shape1[0], shape1[1], shape2[1], shape1[2]\n elif self._matching_type == 'dot':\n return shape1[0], shape1[1], shape2[1], 1\n elif self._matching_type == 'concat':\n return shape1[0], shape1[1], shape2[1], shape1[2] + shape2[2]\n else:\n raise ValueError(f\"Invalid `matching_type`.\"\n f\"{self._matching_type} received.\"\n f\"Must be in `mul`, `plus`, `minus` \"\n f\"`dot` and `concat`.\")\n\n def get_config(self) -> dict:\n \"\"\"Get the config dict of MatchingLayer.\"\"\"\n config = {\n 'normalize': self._normalize,\n 'matching_type': self._matching_type,\n }\n base_config = super(MatchingLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] | [
[
"tensorflow.stack",
"tensorflow.math.l2_normalize",
"tensorflow.concat",
"tensorflow.einsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
738844605/DualResidualNetworks | [
"6d025e074d4c914fae86f51cd8b93569a2c05335",
"6d025e074d4c914fae86f51cd8b93569a2c05335"
] | [
"test/noise.py",
"train/haze.py"
] | [
"# python 2.7, pytorch 0.3.1\n\nimport os, sys\nsys.path.insert(1, '../')\nimport torch\nimport cv2\nimport shutil\nimport torchvision\nimport numpy as np\nimport itertools\nimport subprocess\nimport random\n\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\n\nfrom pietorch import data_convertors\nfrom pietorch.DuRN_P import cleaner as cleaner\nfrom pietorch.DuRN_P_no_norm import cleaner as cleaner_no_norm\nfrom pietorch.pytorch_ssim import ssim as ssim\nfrom skimage.measure import compare_psnr as psnr\nfrom skimage.measure import compare_ssim as ski_ssim\n\n#------ Options -------\ntag = 'DuRN_P_no_norm' # 'DuRN_P' or 'DuRN_P_no_norm' for gaussion or real-world noise removal\ndata_name = 'RealNoiseHKPoly' # 'BSD_gray' or 'RealNoiseHKPoly'\n\n# Gaussian noise level. Comment it if you set data_name = 'RealNoiseHKPoly'.\n#noise_level = 70 # choose one from [30, 50, 70]\n#----------------------\n\nif data_name == 'BSD_gray': \n testroot = \"../data/\"+data_name+\"/test/\"\n test_list_pth = '../lists/'+data_name+'/testlist.txt'\nelse:\n testroot = \"../data/\"+data_name+\"/test1/\"\n test_list_pth = '../lists/'+data_name+'/test1_list.txt'\n\nPretrained = '../trainedmodels/'+data_name+'/'+tag+'_model.pt' \nshow_dst = '../cleaned_images/'+data_name+'/'+tag+'/'\nsubprocess.check_output(['mkdir', '-p', show_dst])\n\n# Make the transformer and the network\nif data_name == 'BSD_gray':\n transform = [transforms.ToTensor(), noise_level] \n cleaner = cleaner().cuda()\nelse:\n transform = transforms.ToTensor() \n cleaner = cleaner_no_norm().cuda()\n \ncleaner.load_state_dict(torch.load(Pretrained))\ncleaner.eval()\n\n# Make the dataloader\nconvertor = data_convertors.ConvertImageSet(testroot, test_list_pth, data_name,\n transform=transform)\ndataloader = DataLoader(convertor, batch_size=1, shuffle=False, num_workers=1)\n\nave_psnr = 0 \nave_ssim = 0 \nct_num = 0\nfor i, data in enumerate(dataloader):\n ct_num+= 1.0\n im_input, label, im_name = data \n im_input = Variable(im_input, requires_grad=False).cuda()\n res = cleaner(im_input)\n res = res.data.cpu().numpy()\n res[res>1] = 1\n res[res<0] = 0\n res*= 255\n if data_name == 'BSD_gray':\n res = res.astype(np.uint8)[0,0]\n label = label.numpy()[0,0]\n label*= 255\n label = label.astype(np.uint8) \n cv2.imwrite(show_dst+im_name[0].split('.')[0]+'_'+str(noise_level)+'.png', res)\n ave_psnr+= psnr(res, label, data_range=255)\n ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=False)\n \n elif data_name == 'RealNoiseHKPoly':\n res = res.astype(np.uint8)[0]\n res = res.transpose((1,2,0))\n label = label.numpy()[0].transpose((1,2,0))\n label*= 255\n label = label.astype(np.uint8) \n Image.fromarray(res).save(show_dst+im_name[0].split('real')[0]+'.png')\n ave_psnr+= psnr(res, label, data_range=255)\n ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=True)\n \n else:\n print('Unknown dataset name.')\n \nprint('psnr: '+str(ave_psnr/ct_num))\nprint('ssim: '+str(ave_ssim/ct_num))\nprint('Test done.')\n",
"import os, sys\nsys.path.insert(1, '../')\nimport torch\nimport cv2\nimport shutil\nimport torchvision\nimport numpy as np\nimport itertools\nimport subprocess\nimport random\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\nfrom pietorch import data_convertors\nfrom pietorch.DuRN_US import cleaner as cleaner\nfrom pietorch.pytorch_ssim import ssim as ssim\n\n#------ Options -------\ntag = 'DuRN_US'\ndata_name = 'RESIDE'\nbch_size = 40\nbase_lr = 0.0001\nepoch_size = 3000\ngpus = 1\ncrop_size = 256\n\nssim_weight = 1.1\nl1_loss_weight = 0.75\nwith_data_aug = False\n#----------------------\n\n# Set pathes\ndata_root = '../data/' +data_name+'/indoor_train/'\nimlist_pth = '../lists/'+data_name+'_indoor/train_list.txt'\n\n# dstroot for saving models. \n# logroot for writting some log(s), if is needed.\ndstroot = './trainedmodels/'+data_name+'/'+tag+'/'\nlogroot = './logs/'+data_name+'/'+tag+'/'\nsubprocess.check_output(['mkdir', '-p', dstroot])\nsubprocess.check_output(['mkdir', '-p', logroot])\n\n# Transform\ntransform = transforms.ToTensor()\n# Dataloader\nconvertor = data_convertors.ConvertImageSet(data_root, imlist_pth, data_name,\n transform=transform, is_train=True,\n with_aug=with_data_aug, crop_size=crop_size)\ndataloader = DataLoader(convertor, batch_size=bch_size, shuffle=False, num_workers=5)\n\n# Make network\ncleaner = cleaner().cuda()\ncleaner.train()\n\n# Optimizer and Loss\noptimizer = optim.Adam(cleaner.parameters(), lr=base_lr)\nL1_loss = nn.L1Loss()\n\n# Start training\nprint('Start training...')\nfor epoch in range(epoch_size): \n for iteration, data in enumerate(dataloader):\n img, label, _ = data\n img_var = Variable(img, requires_grad=False).cuda()\n label_var = Variable(label, requires_grad=False).cuda()\n\n # Cleaning noisy images\n cleaned = cleaner(img_var)\n\n # Compute ssim loss (not used)\n ssim_loss = -ssim(cleaned, label_var)\n ssim_loss = ssim_loss*ssim_weight\n\n # Compute L1 loss (not used)\n l1_loss = L1_loss(cleaned, label_var)\n l1_loss = l1_loss*l1_loss_weight\n\n loss = ssim_loss + l1_loss\n # Backward and update params \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Check \n# torchvision.utils.save_image(img[:16], logroot+'input_images.png')\n# torchvision.utils.save_image(label[:16], logroot+'label_images.png')\n# torchvision.utils.save_image(cleaned[:16], logroot+'temp_res.png' )\n print('Epoch('+str(epoch+1)+'), iteration('+str(iteration+1)+'): '+str(loss.item()))\n\n if epoch%10 == 9:\n if gpus == 1: \n torch.save(cleaner.state_dict(), dstroot+'epoch_'+str(epoch+1)+'_model.pt')\n else:\n torch.save(cleaner.module.state_dict(), dstroot+'epoch_'+str(epoch+1)+'_model.pt') \n\n if epoch in [700, 1400]:\n for param_group in optimizer.param_groups:\n param_group['lr']*= 0.1 \n\n"
] | [
[
"torch.autograd.Variable",
"torch.utils.data.DataLoader",
"torch.load"
],
[
"torch.utils.data.DataLoader",
"torch.nn.L1Loss",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sayabiws/simple-image-recommender | [
"27162c544fc08b5774049039694f0fa7c7faac3f"
] | [
"main.py"
] | [
"# Simple image recommender\n#\n# required:\n# data/images: a folder containing your images dataset\n# data/users: can be empty, but the folder needs to exist (for now ?)\n# \n# optional:\n# data/tags.csv: a comma-separated list containing the names of your \n# images and the corresponding semicolon-separated tags\n# (eg. \"37.png,sky;blue;cliff\")\n\n# Libraries import\nfrom PIL import Image\nfrom sklearn.cluster import MiniBatchKMeans\nfrom operator import itemgetter\nimport pandas\nfrom sklearn.ensemble import RandomForestClassifier\nimport numpy as np\nimport pandas as pd\nimport json\nimport math\nimport os\nimport json\nimport csv\n\n\n# User data gathering\ndef user_data_gathering():\n\tname = input(\"Please enter your username: \")\n\tuser_favs = []\n\tuser_dislikes = []\n\ttry:\n\t\twith open(\"data/users/\" + name + \".txt\", \"r\") as userfile:\n\t\t\tuser_favs = userfile.readline().rstrip().split(\",\")\n\t\t\tuser_dislikes = userfile.readline().rstrip().split(\",\")\n\texcept FileNotFoundError:\n\t\tprint(\"This user doesn't exist. Creating it...\")\n\tif not user_favs:\n\t\tprint(\"No favourite images defined!\")\n\tif not user_dislikes:\n\t\tprint(\"No disliked images defined!\")\n\tdo_fav = input(\"Would you like to define your favourite images? ([y]es/[n]o/[a]dd): \")\n\tif do_fav == \"y\":\n\t\tuser_favs = input(\"Please enter your favourite images, separated by a comma: \").split(\",\")\n\telif do_fav == \"a\":\n\t\tuser_favs += input(\"Please enter the images you want to add, separated by a comma: \").split(\",\")\n\telif do_fav == \"n\":\n\t\tpass\n\telse:\n\t\tprint(\"Incorrect choice. Exiting\")\n\t\texit()\n\n\tdo_dislike = input(\"Would you like to define your disliked images? ([y]es/[n]o/[a]dd): \")\n\tif do_dislike == \"y\":\n\t\tuser_dislikes = input(\"Please enter your disliked images, separated by a comma: \").split(\",\")\n\telif do_dislike == \"a\":\n\t\tuser_dislikes += input(\"Please enter the images you want to add, separated by a comma: \").split(\",\")\n\telif do_dislike == \"n\":\n\t\tpass\n\telse:\n\t\tprint(\"Incorrect choice. Exiting\")\n\t\texit()\n\tuserfile = open(\"data/users/\" + name + \".txt\", \"w+\")\n\tuserfile.write(\",\".join(user_favs) + \"\\n\")\n\tuserfile.write(\",\".join(user_dislikes) + \"\\n\")\n\tuserfile.close()\n\n\treturn user_favs,user_dislikes\n\n# Get all images filenames in data/images/\ndef get_image_list():\n\timagelist = []\n\tfor file in os.listdir(\"data/images\"):\n\t\tif file.endswith(\".png\") or file.endswith(\".jpg\") or file.endswith(\".gif\") or file.endswith(\".tif\") or file.endswith(\".bmp\"):\n\t\t\timagelist.append(file)\n\treturn imagelist\n\n# Get color clusters per image\ndef get_clusters(filename, n_clusters):\n\timgfile = Image.open(\"data/images/\" + filename).convert('RGBA')\n\tnumarray = np.array(imgfile.getdata(), np.uint8)\n\n\tclusters = MiniBatchKMeans(n_clusters=n_clusters)\n\tclusters.fit(numarray)\n\n\tnpbins = np.arange(0, n_clusters + 1)\n\thistogram = np.histogram(clusters.labels_, bins=npbins)\n\n\t# Sort histogram\n\tpairs = sorted(zip(histogram[0], histogram[1]), key=itemgetter(0))\n\thistogram = (np.array([v for v, i in pairs]),\n\t\t\t\t np.array([i for v, i in pairs]))\n\n\tcolors = []\n\n\tfor i in range(n_clusters):\n\t\tj = histogram[1][i]\n\t\tcolors.append(\n\t\t\t(\n\t\t\t\tmath.ceil(clusters.cluster_centers_[j][0]),\n\t\t\t\tmath.ceil(clusters.cluster_centers_[j][1]),\n\t\t\t\tmath.ceil(clusters.cluster_centers_[j][2])\n\t\t\t)\n\t\t)\n\n\treturn colors\n\n# Returns a pandas dataframe with the tags info\ndef get_tags(filename):\n\ttry:\n\t\ttags_df = pd.read_csv(filename)\n\texcept FileNotFoundError:\n\t\tprint(\"No tags have been defined. Ignoring tags.\")\n\n\ttags_df[\"tags\"] = tags_df.tags.str.split(\";\")\n\treturn tags_df\n\n# Clean the clusters data\ndef clean_data(clusters):\n\tfor image in clusters:\n\t\ttmp = []\n\t\tfor color in image[\"colors\"]: \n\t\t\ttmp.append(((color[0])<<16)|((color[1])<<8)|(color[2]))\n\t\timage[\"colors\"] = tmp\n\t\ttmp = []\n\n\treturn clusters\n\n# The actual prediction algorithm\ndef predict(clusters, user_fav, user_dislikes):\n\timages = sorted(clusters, key=lambda x: x['name'])\n\tcolor_clusters = [image[\"colors\"] for image in images]\n\n\t# Build training data\n\ttraining_data = color_clusters\n\tresult_data = [(image['name'] in user_fav) for image in images]\n\t\n\t# Build dataframes\n\ttraining_df = pandas.DataFrame(training_data, columns=['color1', 'color2', 'color3'])\n\tresult_df = pandas.DataFrame(result_data, columns=['favorite'])\n\n\t# Train decision tree\n\tclassifier = RandomForestClassifier(n_estimators=10, max_depth=10)\n\tclassifier = classifier.fit(training_df, result_df.values.ravel())\n\n\tpredicted = classifier.predict(list(map(lambda x: x['colors'], images)))\n\n\tprint(\"# Predicted as favorites\")\n\n\tfor index, favorite in enumerate(predicted):\n\t\tname = images[index]['name']\n\t\t# Only print new images\n\t\tif favorite and name not in user_fav and name not in user_dislikes:\n\t\t\tprint(name)\n\n# Main function\ndef main():\n\tprint(\"Loading...\")\n\tprint(\" -- Looking up images...\")\n\timagelist = get_image_list()\n\tprint(\" -- Calculating color clusters (this can take some time if it has never been done before)...\")\n\tn_clusters = 3\n\n\ttry:\n\t\tclustersData = open(\"data/clusters.json\", \"r\")\n\t\tclusters = json.load(clustersData)\n\texcept:\n\t\tclusters = [{\"name\":filename, \"colors\":get_clusters(filename, n_clusters)} for filename in imagelist]\n\t\tr = json.dumps(clusters)\n\t\tclusersfile = open(\"data/clusters.json\", \"w\")\n\t\tclusersfile.write(r)\n\t\tclusersfile.close()\n\n\tprint(\" -- Extracting tags...\")\n\ttags = get_tags(\"data/tags.csv\")\n\tprint(\"Loading done!\")\n\n\t# Gathering user data\n\tprint(\"Gathering user data...\")\n\t(user_favs, user_dislikes) = user_data_gathering()\n\n\t# Recommendation system\n\tprint(\"Computing recommendation...\")\n\tcleanedclusters = clean_data(clusters)\n\tpredict(cleanedclusters, user_favs, user_dislikes)\n\nif __name__ == \"__main__\":\n\tmain()"
] | [
[
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"numpy.arange",
"pandas.DataFrame",
"sklearn.cluster.MiniBatchKMeans",
"numpy.array",
"numpy.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Nickmeagan70/tensorflow | [
"6bfedde8466daced9f40a0e11840f5ce274abc7d",
"6bfedde8466daced9f40a0e11840f5ce274abc7d"
] | [
"tensorflow/python/pywrap_tensorflow.py",
"tensorflow/compiler/mlir/tfrt/jit/python_binding/tfrt_fallback.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"A Python wrapper that loads _pywrap_tensorflow_internal.so.\"\"\"\n\nimport ctypes\nimport sys\nimport traceback\n\nfrom tensorflow.python.platform import self_check\n\n# TODO(mdan): Cleanup antipattern: import for side effects.\n\n# Perform pre-load sanity checks in order to produce a more actionable error.\nself_check.preload_check()\n\n# pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long\n\ntry:\n # This import is expected to fail if there is an explicit shared object\n # dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL.\n from tensorflow.python import pywrap_dlopen_global_flags\n _use_dlopen_global_flags = True\nexcept ImportError:\n _use_dlopen_global_flags = False\n\n# On UNIX-based platforms, pywrap_tensorflow is a python library that\n# dynamically loads _pywrap_tensorflow.so.\n_can_set_rtld_local = (\n hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'))\nif _can_set_rtld_local:\n _default_dlopen_flags = sys.getdlopenflags()\n\ntry:\n if _use_dlopen_global_flags:\n pywrap_dlopen_global_flags.set_dlopen_flags()\n elif _can_set_rtld_local:\n # Ensure RTLD_LOCAL behavior for platforms where it isn't the default\n # (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not\n # override an RTLD_GLOBAL in _default_dlopen_flags).\n sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL)\n\n # Python2.7 does not have a ModuleNotFoundError.\n try:\n ModuleNotFoundError\n except NameError:\n ModuleNotFoundError = ImportError # pylint: disable=redefined-builtin\n\n # pylint: disable=wildcard-import,g-import-not-at-top,line-too-long,undefined-variable\n try:\n from tensorflow.python._pywrap_tensorflow_internal import *\n # This try catch logic is because there is no bazel equivalent for py_extension.\n # Externally in opensource we must enable exceptions to load the shared object\n # by exposing the PyInit symbols with pybind. This error will only be\n # caught internally or if someone changes the name of the target _pywrap_tensorflow_internal.\n\n # This logic is used in other internal projects using py_extension.\n except ModuleNotFoundError:\n pass\n\n if _use_dlopen_global_flags:\n pywrap_dlopen_global_flags.reset_dlopen_flags()\n elif _can_set_rtld_local:\n sys.setdlopenflags(_default_dlopen_flags)\nexcept ImportError:\n raise ImportError(\n f'{traceback.format_exc()}'\n f'\\n\\nFailed to load the native TensorFlow runtime.\\n'\n f'See https://www.tensorflow.org/install/errors '\n f'for some common causes and solutions.\\n'\n f'If you need help, create an issue '\n f'at https://github.com/tensorflow/tensorflow/issues '\n f'and include the entire stack trace above this error message.')\n\n# pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Minimal Python binding for TFRT Tensorflow fallback.\n\nThis is only for testing.\n\"\"\"\n\nfrom tensorflow.compiler.mlir.tfrt.jit.python_binding import _tfrt_fallback\n\n\ndef run_tfrt_fallback(module_ir, entrypoint, arguments):\n return _tfrt_fallback.run_tfrt_fallback(module_ir, entrypoint, arguments)\n"
] | [
[
"tensorflow.python.platform.self_check.preload_check",
"tensorflow.python.pywrap_dlopen_global_flags.reset_dlopen_flags",
"tensorflow.python.pywrap_dlopen_global_flags.set_dlopen_flags"
],
[
"tensorflow.compiler.mlir.tfrt.jit.python_binding._tfrt_fallback.run_tfrt_fallback"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nvaytet/scipp | [
"f14f56ed19cccb4162d55b1123df7225eeedb395",
"f14f56ed19cccb4162d55b1123df7225eeedb395",
"f14f56ed19cccb4162d55b1123df7225eeedb395",
"f14f56ed19cccb4162d55b1123df7225eeedb395"
] | [
"src/scipp/plotting/tools.py",
"lib/core/test/generate_arithmetic_parameters.py",
"tests/datasetslice_test.py",
"src/scipp/plotting/figure1d.py"
] | [
"# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)\n# @author Neil Vaytet\n\nfrom .. import config\nfrom ..core import concatenate, values, dtype, units, nanmin, nanmax, histogram, \\\n full_like\nfrom ..core import Variable, DataArray\nfrom ..core import abs as abs_\nimport numpy as np\nfrom copy import copy\nimport io\n\n\ndef get_line_param(name=None, index=None):\n \"\"\"\n Get the default line parameter from the config.\n If an index is supplied, return the i-th item in the list.\n \"\"\"\n param = getattr(config.plot, name)\n return param[index % len(param)]\n\n\ndef to_bin_centers(x, dim):\n \"\"\"\n Convert array edges to centers\n \"\"\"\n return 0.5 * (x[dim, 1:] + x[dim, :-1])\n\n\ndef to_bin_edges(x, dim):\n \"\"\"\n Convert array centers to edges\n \"\"\"\n idim = x.dims.index(dim)\n if x.shape[idim] < 2:\n one = 1.0 * x.unit\n return concatenate(x[dim, 0:1] - one, x[dim, 0:1] + one, dim)\n else:\n center = to_bin_centers(x, dim)\n # Note: use range of 0:1 to keep dimension dim in the slice to avoid\n # switching round dimension order in concatenate step.\n left = center[dim, 0:1] - (x[dim, 1] - x[dim, 0])\n right = center[dim, -1] + (x[dim, -1] - x[dim, -2])\n return concatenate(concatenate(left, center, dim), right, dim)\n\n\ndef parse_params(params=None, defaults=None, globs=None, array=None):\n \"\"\"\n Construct the colorbar settings using default and input values\n \"\"\"\n from matplotlib.colors import Normalize, LogNorm, LinearSegmentedColormap\n from matplotlib import cm\n\n parsed = dict(config.plot.params)\n if defaults is not None:\n for key, val in defaults.items():\n parsed[key] = val\n if globs is not None:\n for key, val in globs.items():\n # Global parameters need special treatment because by default they\n # are set to None, and we don't want to overwrite the defaults.\n if val is not None:\n parsed[key] = val\n if params is not None:\n if isinstance(params, bool):\n params = {\"show\": params}\n for key, val in params.items():\n parsed[key] = val\n\n if parsed[\"norm\"] == \"log\":\n norm = LogNorm\n elif parsed[\"norm\"] == \"linear\":\n norm = Normalize\n else:\n raise RuntimeError(\"Unknown norm. Expected 'linear' or 'log', \"\n \"got {}.\".format(parsed[\"norm\"]))\n vmin = parsed[\"vmin\"]\n vmax = parsed[\"vmax\"]\n parsed[\"norm\"] = norm(vmin=vmin.value if vmin is not None else None,\n vmax=vmax.value if vmax is not None else None)\n\n # Convert color into custom colormap\n if parsed[\"color\"] is not None:\n parsed[\"cmap\"] = LinearSegmentedColormap.from_list(\n \"tmp\", [parsed[\"color\"], parsed[\"color\"]])\n else:\n parsed[\"cmap\"] = copy(cm.get_cmap(parsed[\"cmap\"]))\n\n if parsed[\"under_color\"] is None:\n parsed[\"cmap\"].set_under(parsed[\"cmap\"](0.0))\n else:\n parsed[\"cmap\"].set_under(parsed[\"under_color\"])\n if parsed[\"over_color\"] is None:\n parsed[\"cmap\"].set_over(parsed[\"cmap\"](1.0))\n else:\n parsed[\"cmap\"].set_over(parsed[\"over_color\"])\n\n return parsed\n\n\ndef vars_to_err(v):\n \"\"\"\n Convert variances to errors.\n \"\"\"\n with np.errstate(invalid=\"ignore\"):\n v = np.sqrt(v)\n np.nan_to_num(v, copy=False)\n return v\n\n\ndef find_log_limits(x):\n \"\"\"\n To find log scale limits, we histogram the data between 1.0-30\n and 1.0e+30 and include only bins that are non-zero.\n \"\"\"\n from .. import flatten, ones\n volume = np.product(x.shape)\n pixel = flatten(values(x.astype(dtype.float64)), to='pixel')\n weights = ones(dims=['pixel'], shape=[volume], unit='counts')\n hist = histogram(DataArray(data=weights, coords={'order': pixel}),\n bins=Variable(dims=['order'],\n values=np.geomspace(1e-30, 1e30, num=61),\n unit=x.unit))\n # Find the first and the last non-zero bins\n inds = np.nonzero((hist.data > 0.0 * units.counts).values)\n ar = np.arange(hist.data.shape[0])[inds]\n # Safety check in case there are no values in range 1.0e-30:1.0e+30:\n # fall back to the linear method and replace with arbitrary values if the\n # limits are negative.\n if len(ar) == 0:\n [vmin, vmax] = find_linear_limits(x)\n if vmin.value <= 0.0:\n if vmax.value <= 0.0:\n vmin = full_like(vmin, 0.1)\n vmax = full_like(vmax, 1.0)\n else:\n vmin = 1.0e-3 * vmax\n else:\n vmin = hist.coords['order']['order', ar.min()]\n vmax = hist.coords['order']['order', ar.max() + 1]\n return [vmin, vmax]\n\n\ndef find_linear_limits(x):\n \"\"\"\n Find variable min and max.\n \"\"\"\n return [\n values(nanmin(x).astype(dtype.float64)),\n values(nanmax(x).astype(dtype.float64))\n ]\n\n\ndef find_limits(x, scale=None, flip=False):\n \"\"\"\n Find sensible limits, depending on linear or log scale.\n \"\"\"\n if scale is not None:\n if scale == \"log\":\n lims = {\"log\": find_log_limits(x)}\n else:\n lims = {\"linear\": find_linear_limits(x)}\n else:\n lims = {\"log\": find_log_limits(x), \"linear\": find_linear_limits(x)}\n if flip:\n for key in lims:\n lims[key] = np.flip(lims[key]).copy()\n return lims\n\n\ndef fix_empty_range(lims, replacement=None):\n \"\"\"\n Range correction in case xmin == xmax\n \"\"\"\n dx = 0.0 * lims[0].unit\n if lims[0].value == lims[1].value:\n if replacement is not None:\n dx = 0.5 * replacement\n elif lims[0].value == 0.0:\n dx = 0.5 * lims[0].unit\n else:\n dx = 0.5 * abs_(lims[0])\n return [lims[0] - dx, lims[1] + dx]\n\n\ndef fig_to_pngbytes(fig):\n \"\"\"\n Convert figure to png image bytes.\n We also close the figure to prevent it from showing up again in\n cells further down the notebook.\n \"\"\"\n import matplotlib.pyplot as plt\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n plt.close(fig)\n buf.seek(0)\n return buf.getvalue()\n\n\ndef to_dict(meta):\n \"\"\"\n Convert a coords, meta, attrs or masks object to a python dict.\n \"\"\"\n return {name: var for name, var in meta.items()}\n",
"# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)\n\"\"\"\nThis script generates input parameters to test whether arithmetic\noperations are consistent with Python.\n\nIt takes the output file as its only command line argument.\n\"\"\"\n\nfrom itertools import product\nimport sys\n\nimport numpy as np\n\n\ndef format_number(x):\n if np.isposinf(x):\n return 'INFINITY'\n if np.isneginf(x):\n return '-INFINITY'\n if np.isnan(x):\n return f'{\"-\" if np.sign(x) == -1 else \"\"}NAN'\n return f'{x}'\n\n\ndef build_param(a, b):\n # implement behavior of numpy 1.20\n sign = -1 if np.sign(a) == -1 or np.sign(b) == -1 else 1\n fd = sign * np.inf if ((isinstance(a, float) or isinstance(b, float))\n and b == 0 and a != 0) \\\n else np.floor_divide(a, b)\n return f'Params{{{a}, {b}, {format_number(np.true_divide(a, b))},' + \\\n f' {format_number(fd)}, {format_number(np.remainder(a, b))}}}'\n\n\ndef gen_values(dtype):\n return np.r_[np.arange(3, -4, -1), np.random.uniform(-10, 10, 5)] \\\n .astype(dtype)\n\n\ndef main():\n np.random.seed(14653503)\n with open(sys.argv[1], 'w') as outf:\n outf.write('// SPDX-License-Identifier: BSD-3-Clause\\n')\n outf.write('// Copyright (c) 2021 Scipp contributors '\n '(https://github.com/scipp)\\n')\n outf.write('// clang-format off\\n')\n outf.write('/*\\n')\n outf.write(' * This file was automatically generated\\n')\n outf.write(' * DO NOT CHANGE!\\n')\n outf.write(' */\\n\\n')\n\n outf.write('#include <array>\\n\\n')\n outf.write('#include <cmath>\\n\\n')\n\n outf.write('namespace {\\n')\n\n name_and_dtype = ((\"int\", int), (\"float\", float))\n for (a_name, a_dtype), (b_name, b_dtype) in product(name_and_dtype,\n name_and_dtype):\n outf.write('template <class Params>\\n')\n outf.write('constexpr inline auto '\n f'division_params_{a_name}_{b_name} = std::array{{\\n')\n for a, b in product(gen_values(a_dtype), gen_values(b_dtype)):\n outf.write(build_param(a, b) + ',\\n')\n outf.write('};\\n')\n\n outf.write('} // namespace\\n')\n outf.write('// clang-format on\\n')\n\n\nif __name__ == \"__main__\":\n main()\n",
"# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)\n# @file\n# @author Simon Heybrock\nimport scipp as sc\nimport numpy as np\nfrom .common import assert_export\n\n\nclass TestDatasetSlice:\n def setup_method(self):\n var = sc.Variable(dims=['x'], values=np.arange(5, dtype=np.int64))\n self._d = sc.Dataset(data={'a': var, 'b': var}, coords={'x': var})\n\n def test_slice_with_range_datasetview_then_dataarrayview(self):\n sl = self._d['x', 1:-1]['a'].data\n ref = sc.Variable(dims=['x'], values=np.array([1, 2, 3], dtype=np.int64))\n assert sc.identical(ref, sl)\n # omitting range end\n sl = self._d['x', 1:]['a'].data\n ref = sc.Variable(dims=['x'], values=np.array([1, 2, 3, 4], dtype=np.int64))\n assert sc.identical(ref, sl)\n # omitting range begin\n sl = self._d['x', :-1]['a'].data\n ref = sc.Variable(dims=['x'], values=np.array([0, 1, 2, 3], dtype=np.int64))\n assert sc.identical(ref, sl)\n # omitting range both begin and end\n sl = self._d['x', :]['b'].data\n ref = sc.Variable(dims=['x'], values=np.array([0, 1, 2, 3, 4], dtype=np.int64))\n assert sc.identical(ref, sl)\n\n def test_slice_with_range_dataarrayview_then_dataarrayview(self):\n sl = self._d['a']['x', 1:-1].data\n ref = sc.Variable(dims=['x'], values=np.array([1, 2, 3], dtype=np.int64))\n assert sc.identical(ref, sl)\n # omitting range end\n sl = self._d['a']['x', 1:].data\n ref = sc.Variable(dims=['x'], values=np.array([1, 2, 3, 4], dtype=np.int64))\n assert sc.identical(ref, sl)\n # omitting range begin\n sl = self._d['a']['x', :-1].data\n ref = sc.Variable(dims=['x'], values=np.array([0, 1, 2, 3], dtype=np.int64))\n assert sc.identical(ref, sl)\n # omitting range both begin and end\n sl = self._d['b']['x', :].data\n ref = sc.Variable(dims=['x'], values=np.array([0, 1, 2, 3, 4], dtype=np.int64))\n assert sc.identical(ref, sl)\n\n def test_slice_single_index(self):\n assert sc.identical(self._d['x', -2]['a'], self._d['x', 3]['a'])\n assert sc.identical(self._d['a']['x', -2], self._d['a']['x', 3])\n\n def _test_copy_exports_on(self, x):\n assert_export(x.copy)\n assert_export(x.__copy__)\n assert_export(x.__deepcopy__, {})\n\n def test_copy_dataarrayview_exports(self):\n view = self._d['a']\n self._test_copy_exports_on(view)\n\n def test_set_item_via_temporary_slice(self):\n N = 6\n M = 4\n d1 = sc.Dataset()\n d1['x'] = sc.Variable(dims=['x'], values=np.arange(N).astype(np.float64))\n d1['y'] = sc.Variable(dims=['y'], values=np.arange(M).astype(np.float64))\n arr1 = np.arange(N * M).reshape(N, M).astype(np.float64) + 1\n d1['a'] = sc.Variable(dims=['x', 'y'], values=arr1)\n d1 = d1['x', 1:2]\n d1['a'].data.values.tolist() == [[5.0, 6.0, 7.0, 8.0]]\n\n def test_set_dataarrayview_slice_items(self):\n d = self._d.copy()\n d['a']['x', 0:2] += d['b']['x', 0:2]\n assert d['a'].data.values.tolist() == [0, 2, 2, 3, 4]\n d['a']['x', 4] += \\\n d['b']['x', 1]\n assert d['a'].data.values.tolist() == [0, 2, 2, 3, 5]\n\n def test_slice_and_dimensions_items_dataarray(self):\n var = sc.Variable(dims=['x', 'y'], values=np.arange(50).reshape(5, 10))\n da = sc.DataArray(var)\n assert np.allclose(da['x', 0].values, da['x', 0:1].values)\n assert np.allclose(da['x', 4].values, da['x', -1].values)\n assert np.allclose(da['y', 1].values, da['y', -9].values)\n assert ('y' in da['x', 0].dims)\n assert ('x' not in da['x', 0].dims)\n assert ('y' in da['x', 0:1].dims)\n assert ('x' in da['x', 0:1].dims)\n\n def test_slice_and_dimensions_items_dataset(self):\n da = sc.DataArray(\n sc.Variable(dims=['x', 'y'], values=np.arange(50).reshape(5, 10)))\n ds = sc.Dataset(data={'a': da})\n assert (np.allclose(ds['x', 0]['a'].values,\n ds['x', 0:1]['a'].values[0],\n atol=1e-9))\n assert (np.allclose(ds['x', 4]['a'].values, ds['x', -1]['a'].values))\n assert (np.allclose(ds['y', 1]['a'].values, ds['y', -9]['a'].values))\n assert ('y' in da['x', 0].dims)\n assert ('x' not in da['x', 0].dims)\n assert ('y' in da['x', 0:1].dims)\n assert ('x' in da['x', 0:1].dims)\n\n def test_slice_dataset_with_data_only(self):\n d = sc.Dataset()\n d['data'] = sc.Variable(dims=['y'], values=np.arange(10))\n sliced = d['y', :]\n assert sc.identical(d, sliced)\n sliced = d['y', 2:6]\n assert sc.identical(sc.Variable(dims=['y'], values=np.arange(2, 6)),\n sliced['data'].data)\n\n def test_slice_dataset_with_coords_only(self):\n d = sc.Dataset(\n coords={'y-coord': sc.Variable(dims=['y'], values=np.arange(10))})\n sliced = d['y', :]\n assert sc.identical(d, sliced)\n sliced = d['y', 2:6]\n assert sc.identical(sc.Variable(dims=['y'], values=np.arange(2, 6)),\n sliced.coords['y-coord'])\n\n def test_slice_with_step_1(self):\n var = sc.Variable(dims=['x'], values=np.arange(1, 4, dtype=np.int64))\n expect = sc.Dataset(data={'a': var, 'b': var}, coords={'x': var})\n assert sc.identical(self._d['x', 1:4:1], expect)\n assert sc.identical(self._d['a']['x', 1:4:1], expect['a'])\n",
"# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)\n# @author Neil Vaytet\n\nfrom .figure import PlotFigure\nfrom .toolbar import PlotToolbar1d\nfrom .tools import get_line_param\nimport numpy as np\nimport copy as cp\nimport warnings\n\n\nclass PlotFigure1d(PlotFigure):\n \"\"\"\n Class for 1 dimensional plots. This is used by both the `PlotView1d` for\n normal 1d plots, and the `PlotProfile`.\n\n `PlotFigure1d` can \"keep\" the currently displayed line, or \"remove\" a\n previously saved line.\n \"\"\"\n def __init__(self,\n ax=None,\n mpl_line_params=None,\n title=None,\n norm=None,\n grid=False,\n mask_color=None,\n figsize=None,\n picker=False,\n legend=None,\n padding=None,\n xlabel=None,\n ylabel=None):\n\n super().__init__(ax=ax,\n figsize=figsize,\n title=title,\n padding=padding,\n xlabel=xlabel,\n ylabel=ylabel,\n toolbar=PlotToolbar1d,\n grid=grid)\n\n self._lines = {}\n\n if legend is None:\n legend = {\"show\": True}\n elif isinstance(legend, bool):\n legend = {\"show\": legend}\n elif \"show\" not in legend:\n legend[\"show\"] = True\n\n self._mask_color = mask_color if mask_color is not None else 'k'\n self.picker = picker\n self.norm = norm\n self.legend = legend\n if \"loc\" not in self.legend:\n self.legend[\"loc\"] = 0\n\n self._mpl_line_params = mpl_line_params # color, linewidth, ...\n\n def update_axes(self, scale, unit, legend_labels=True):\n \"\"\"\n Wipe the figure and start over when the dimension to be displayed along\n the horizontal axis is changed.\n \"\"\"\n scale = scale['x']\n self._legend_labels = legend_labels\n\n if self.own_axes:\n self._lines = {}\n title = self.ax.get_title()\n need_grid = self.ax.xaxis.get_gridlines()[0]._visible\n self.ax.clear()\n self.ax.set_title(title)\n if need_grid:\n self.ax.grid()\n\n self.ax.set_xscale(scale)\n self.ax.set_yscale(\"log\" if self.norm == \"log\" else \"linear\")\n self.ax.set_ylabel(unit if self.ylabel is None else self.ylabel)\n\n self.ax.set_xlabel(\n self._formatters['x']['label'] if self.xlabel is None else self.xlabel)\n\n self.ax.xaxis.set_major_locator(self.axlocator['x'][scale])\n self.ax.xaxis.set_major_formatter(self.axformatter['x'][scale])\n\n if self.show_legend():\n self.ax.legend(loc=self.legend[\"loc\"])\n\n self._axes_updated = True\n\n def _make_line(self, name, masks, hist):\n class Line:\n def __init__(self):\n self.data = None\n self.error = None\n self.masks = {}\n self.mpl_params = {}\n\n index = len(self._lines)\n line = Line()\n line.mpl_params = {\n key: get_line_param(key, index)\n for key in [\"color\", \"marker\", \"linestyle\", \"linewidth\"]\n }\n if self._mpl_line_params is not None:\n for key, item in self._mpl_line_params.items():\n if name in item:\n line.mpl_params[key] = item[name]\n label = None\n if self._legend_labels and len(name) > 0:\n label = name\n\n if hist:\n line.data = self.ax.step(\n [1, 2], [1, 2],\n label=label,\n zorder=10,\n picker=self.picker,\n **{key: line.mpl_params[key]\n for key in [\"color\", \"linewidth\"]})[0]\n for m in masks:\n line.masks[m] = self.ax.step([1, 2], [1, 2],\n linewidth=line.mpl_params[\"linewidth\"] *\n 3.0,\n color=self._mask_color,\n zorder=9)[0]\n # Abuse a mostly unused property `gid` of Line2D to\n # identify the line as a mask. We set gid to `onaxes`.\n # This is used by the profile viewer in the 2D plotter\n # to know whether to show the mask or not, depending on\n # whether the cursor is hovering over the 2D image or\n # not.\n line.masks[m].set_gid(\"onaxes\")\n else:\n line.data = self.ax.plot([1, 2], [1, 2],\n label=label,\n zorder=10,\n picker=self.picker,\n **line.mpl_params)[0]\n for m in masks:\n line.masks[m] = self.ax.plot([1, 2], [1, 2],\n zorder=11,\n mec=self._mask_color,\n mfc=\"None\",\n mew=3.0,\n linestyle=\"none\",\n marker=line.mpl_params[\"marker\"])[0]\n line.masks[m].set_gid(\"onaxes\")\n\n if self.picker:\n line.data.set_pickradius(5.0)\n line.data.set_url(name)\n\n # Add error bars\n if self.errorbars[name]:\n line.error = self.ax.errorbar([1, 2], [1, 2],\n yerr=[1, 1],\n color=line.mpl_params[\"color\"],\n zorder=10,\n fmt=\"none\")\n if self.show_legend():\n self.ax.legend(loc=self.legend[\"loc\"])\n return line\n\n def _preprocess_hist(self, name, vals):\n \"\"\"\n Convert 1d data to be plotted to internal format, e.g., padding\n histograms and duplicating info for variances.\n \"\"\"\n x = vals[\"values\"][\"x\"]\n y = vals[\"values\"][\"y\"]\n hist = len(x) != len(y)\n if hist:\n vals[\"values\"][\"y\"] = np.concatenate((y[0:1], y))\n for key, mask in vals[\"masks\"].items():\n vals[\"masks\"][key] = np.concatenate((mask[0:1], mask))\n vals[\"variances\"][\"x\"] = 0.5 * (x[1:] + x[:-1])\n else:\n vals[\"variances\"][\"x\"] = x\n vals[\"variances\"][\"y\"] = y\n return vals, hist\n\n def update_data(self, new_values):\n \"\"\"\n Update the x and y positions of the data points when a new data slice\n is received for display.\n \"\"\"\n xmin = np.Inf\n xmax = np.NINF\n for name in new_values:\n vals, hist = self._preprocess_hist(name, new_values[name])\n if name not in self._lines:\n self._lines[name] = self._make_line(name,\n masks=vals['masks'].keys(),\n hist=hist)\n line = self._lines[name]\n line.data.set_data(vals[\"values\"][\"x\"], vals[\"values\"][\"y\"])\n lab = vals[\"label\"] if len(vals[\"label\"]) > 0 else name\n line.label = f'{name}[{lab}]' # used later if line is kept\n\n for m in vals[\"masks\"]:\n line.masks[m].set_data(\n vals[\"values\"][\"x\"],\n np.where(vals[\"masks\"][m], vals[\"values\"][\"y\"],\n None).astype(np.float32))\n\n if self.errorbars[name]:\n coll = line.error.get_children()[0]\n coll.set_segments(\n self._change_segments_y(vals[\"variances\"][\"x\"],\n vals[\"variances\"][\"y\"],\n vals[\"variances\"][\"e\"]))\n coord = vals[\"values\"][\"x\"]\n low = min(coord[0], coord[-1])\n high = max(coord[0], coord[-1])\n xmin = min(xmin, low)\n xmax = max(xmax, high)\n\n deltax = 0.05 * (xmax - xmin)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n self.ax.set_xlim([xmin - deltax, xmax + deltax])\n if self._axes_updated:\n self._axes_updated = False\n self.fig.tight_layout(rect=self.padding)\n\n self.draw()\n\n def keep_line(self, color, line_id, names=None):\n \"\"\"\n Duplicate the current main line and give it an arbitrary color.\n Triggered by a `PlotPanel1d` keep button or a `keep_profile` event.\n \"\"\"\n if names is None:\n names = self._lines\n for name in names:\n # The main line\n line = self._lines[name]\n self.ax.lines.append(cp.copy(line.data))\n self.ax.lines[-1].set_label(line.label)\n self.ax.lines[-1].set_url(line_id)\n self.ax.lines[-1].set_zorder(2)\n if self.ax.lines[-1].get_marker() == \"None\":\n self.ax.lines[-1].set_color(color)\n else:\n self.ax.lines[-1].set_markerfacecolor(color)\n self.ax.lines[-1].set_markeredgecolor(\"None\")\n\n # The masks\n for m in self._lines[name].masks:\n self.ax.lines.append(cp.copy(self._lines[name].masks[m]))\n self.ax.lines[-1].set_url(line_id)\n self.ax.lines[-1].set_gid(m)\n self.ax.lines[-1].set_zorder(3)\n if self.ax.lines[-1].get_marker() != \"None\":\n self.ax.lines[-1].set_zorder(3)\n else:\n self.ax.lines[-1].set_zorder(1)\n\n if self.errorbars[name]:\n err = self._lines[name].error.get_children()\n self.ax.collections.append(cp.copy(err[0]))\n self.ax.collections[-1].set_color(color)\n self.ax.collections[-1].set_url(line_id)\n self.ax.collections[-1].set_zorder(2)\n\n if self.show_legend():\n self.ax.legend(loc=self.legend[\"loc\"])\n self.draw()\n\n def remove_line(self, line_id, names=None):\n \"\"\"\n Remove a previously saved line.\n Triggered by a `PlotPanel1d` remove button or a `remove_profile` event.\n \"\"\"\n if names is None:\n names = self._lines\n for name in names:\n lines = []\n for line in self.ax.lines:\n if line.get_url() != line_id:\n lines.append(line)\n collections = []\n for coll in self.ax.collections:\n if coll.get_url() != line_id:\n collections.append(coll)\n self.ax.lines = lines\n self.ax.collections = collections\n if self.show_legend():\n self.ax.legend(loc=self.legend[\"loc\"])\n self.draw()\n\n def update_line_color(self, line_id, color):\n \"\"\"\n Change the line color when the `ColorPicker` in the `PlotPanel1d` is\n being used.\n \"\"\"\n for line in self.ax.lines:\n if line.get_url() == line_id:\n if line.get_marker() == 'None':\n line.set_color(color)\n else:\n line.set_markerfacecolor(color)\n\n for coll in self.ax.collections:\n if coll.get_url() == line_id:\n coll.set_color(color)\n self.draw()\n\n def _change_segments_y(self, x, y, e):\n \"\"\"\n Update the positions of the errorbars when `update_data` is called.\n \"\"\"\n arr1 = np.repeat(x, 2)\n arr2 = np.array([y - e, y + e]).T.flatten()\n return np.array([arr1, arr2]).T.flatten().reshape(len(y), 2, 2)\n\n def toggle_mask(self, mask_group, mask_name, value):\n \"\"\"\n Show or hide a given mask.\n \"\"\"\n if mask_group in self._lines:\n msk = self._lines[mask_group].masks[mask_name]\n if msk.get_gid() == \"onaxes\":\n msk.set_visible(value)\n # Also toggle masks on additional lines created by keep button\n for line in self.ax.lines:\n if line.get_gid() == mask_name:\n line.set_visible(value)\n self.draw()\n\n def rescale_to_data(self, vmin=None, vmax=None):\n \"\"\"\n Rescale y axis to the contents of the plot.\n \"\"\"\n if (vmin is None) and (vmax is None):\n self.ax.autoscale(True)\n self.ax.relim()\n self.ax.autoscale_view()\n else:\n self.ax.set_ylim(vmin, vmax)\n self.draw()\n\n def show_legend(self):\n \"\"\"\n Only display legend if there is least 1 line in the plot.\n \"\"\"\n return self.legend[\"show\"] and len(self.ax.get_legend_handles_labels()[1]) > 0\n\n def toggle_norm(self, norm=None, vmin=None, vmax=None):\n \"\"\"\n Set yscale to either \"log\" or \"linear\", depending on norm.\n \"\"\"\n self.norm = norm\n self.ax.set_yscale(\"log\" if self.norm == \"log\" else \"linear\")\n self.draw()\n"
] | [
[
"numpy.product",
"numpy.sqrt",
"numpy.nonzero",
"numpy.arange",
"numpy.nan_to_num",
"numpy.geomspace",
"matplotlib.pyplot.close",
"matplotlib.cm.get_cmap",
"numpy.errstate",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.flip"
],
[
"numpy.true_divide",
"numpy.random.seed",
"numpy.isnan",
"numpy.floor_divide",
"numpy.arange",
"numpy.isposinf",
"numpy.isneginf",
"numpy.sign",
"numpy.remainder",
"numpy.random.uniform"
],
[
"numpy.arange",
"numpy.array",
"numpy.allclose"
],
[
"numpy.concatenate",
"numpy.repeat",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alishameli/CS231n-Sample-Code-1 | [
"e47e593026c80530f7c387c4feca24f88c1618a2"
] | [
"tensorflow/predict.py"
] | [
"import argparse\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nimport models\n\ndef predict(model_data_path, image_path):\n\n # Default input size\n height = 228\n width = 304\n channels = 3\n batch_size = 1\n \n # Read image\n img = Image.open(image_path)\n img = img.resize([width,height], Image.ANTIALIAS)\n img = np.array(img).astype('float32')\n img = np.expand_dims(np.asarray(img), axis = 0)\n \n # Create a placeholder for the input image\n input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))\n \n # Construct the network\n net = models.ResNet50UpProj({'data': input_node}, batch_size)\n \n with tf.Session() as sess:\n\n # Load the converted parameters\n print('Loading the model')\n net.load(model_data_path, sess) \n \n uninitialized_vars = []\n for var in tf.global_variables():\n try:\n sess.run(var)\n except tf.errors.FailedPreconditionError: \n uninitialized_vars.append(var)\n\n init_new_vars_op = tf.variables_initializer(uninitialized_vars)\n sess.run(init_new_vars_op)\n \n # Evalute the network for the given image\n pred = sess.run(net.get_output(), feed_dict={input_node: img})\n \n # Plot result\n fig = plt.figure()\n ii = plt.imshow(pred[0,:,:,0], interpolation='nearest')\n fig.colorbar(ii)\n plt.show()\n\n return pred\n \n \ndef main():\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('model_path', help='Converted parameters for the model')\n parser.add_argument('image_paths', help='Directory of images to predict')\n args = parser.parse_args()\n\n # Predict the image\n pred = predict(args.model_path, args.image_paths)\n \n os._exit(0)\n\nif __name__ == '__main__':\n main()\n\n \n\n\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.asarray",
"tensorflow.global_variables",
"tensorflow.variables_initializer",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
dbusbridge/spektral | [
"a95807603c2bb96c80f34d326f663273c72ca3fc"
] | [
"spektral/datasets/delaunay.py"
] | [
"from __future__ import absolute_import\n\nimport numpy as np\nfrom scipy.spatial import Delaunay\n\nfrom spektral.utils import label_to_one_hot, numpy_to_nx\n\nRETURN_TYPES = {'numpy', 'networkx'}\nMAX_K = 7 # Maximum number of nodes in a graph\n\n\ndef generate_data(return_type='networkx', classes=0, n_samples_in_class=1000,\n n_nodes=7, support_low=0., support_high=10., drift_amount=1.0,\n one_hot_labels=True, support=None, seed=None):\n \"\"\"\n Generates a dataset of Delaunay triangulations as described by\n [Zambon et al. (2017)](https://arxiv.org/abs/1706.06941).\n Note that this function is basically deprecated and will change soon.\n \n :param return_type: `'networkx'` or `'numpy'`, data format to return;\n :param classes: indices of the classes to load (integer, or list of integers\n between 0 and 20);\n :param n_samples_in_class: number of generated samples per class;\n :param n_nodes: number of nodes in a graph;\n :param support_low: lower bound of the uniform distribution from which the \n support is generated;\n :param support_high: upper bound of the uniform distribution from which the \n support is generated;\n :param drift_amount: coefficient to control the amount of change between \n classes;\n :param one_hot_labels: one-hot encode dataset labels;\n :param support: custom support to use instead of generating it randomly; \n :param seed: random numpy seed;\n :return: if `return_type='networkx'`, a list of graphs in Networkx format, \n and an array containing labels; if `return_type='numpy'`, the adjacency \n matrix, node features, and an array containing labels.\n \"\"\"\n if return_type not in RETURN_TYPES:\n raise ValueError('Possible return_type: {}'.format(RETURN_TYPES))\n\n if isinstance(classes, int):\n classes = [classes]\n\n if max(classes) > 20 or min(classes) < 0:\n raise ValueError('Class indices must be between 0 and 20')\n\n r_classes = list(reversed(classes))\n if r_classes[-1] == 0:\n r_classes.insert(0, r_classes.pop(-1))\n\n # Support points\n np.random.seed(seed)\n if support is None:\n support = np.random.uniform(support_low, support_high, (1, n_nodes, 2))\n else:\n try:\n assert support.shape == (1, n_nodes, 2)\n except AssertionError:\n print('The given support doesn\\'t have shape (1, n_nodes, 2) as'\n 'expected. Attempting to reshape.')\n support = support.reshape(1, n_nodes, 2)\n\n # Compute node features\n node_features = []\n # Other node features\n for idx, i in enumerate(r_classes):\n if i == 0:\n concept_0 = np.repeat(support, n_samples_in_class, 0)\n noise_0 = np.random.normal(0, 1, (n_samples_in_class, n_nodes, 2))\n class_0 = concept_0 + noise_0\n node_features.append(class_0)\n else:\n radius = 10. * ((2./3.) ** (drift_amount * (i - 1)))\n phase = np.random.uniform(0, 2 * np.pi, (n_nodes, 1))\n perturb_i_x = radius * np.cos(phase)\n perturb_i_y = radius * np.sin(phase)\n perturb_i = np.concatenate((perturb_i_x, perturb_i_y), axis=-1)\n support_i = support + perturb_i\n concept_i = np.repeat(support_i, n_samples_in_class, 0)\n noise_i = np.random.normal(0, 1, (n_samples_in_class, n_nodes, 2))\n class_i = concept_i + noise_i\n node_features.append(class_i)\n node_features = np.array(node_features).reshape((-1, n_nodes, 2))\n\n # Compute adjacency matrices\n adjacency = []\n for nf in node_features:\n adj = compute_adj(nf)\n adjacency.append(adj)\n adjacency = np.array(adjacency)\n\n # Compute labels\n labels = np.repeat(classes, n_samples_in_class)\n if one_hot_labels:\n labels = label_to_one_hot(labels, labels=classes)\n\n if return_type is 'numpy':\n return adjacency, node_features, labels\n elif return_type is 'networkx':\n graphs = numpy_to_nx(adjacency, node_features=node_features, nf_name='coords')\n return graphs, labels\n else:\n raise NotImplementedError\n\n\ndef compute_adj(x):\n \"\"\"\n Computes the Delaunay triangulation of the given points\n :param x: array of shape (num_nodes, 2)\n :return: the computed adjacency matrix\n \"\"\"\n tri = Delaunay(x)\n edges_explicit = np.concatenate((tri.vertices[:, :2],\n tri.vertices[:, 1:],\n tri.vertices[:, ::2]), axis=0)\n adj = np.zeros((x.shape[0], x.shape[0]))\n adj[edges_explicit[:, 0], edges_explicit[:, 1]] = 1.\n return np.clip(adj + adj.T, 0, 1)\n"
] | [
[
"numpy.random.seed",
"numpy.clip",
"scipy.spatial.Delaunay",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
finagle29/PypeIt | [
"418d6d24d24054ad590d2f06c0b4688ea18f492e",
"418d6d24d24054ad590d2f06c0b4688ea18f492e"
] | [
"pypeit/scripts/flux_setup.py",
"pypeit/spectrographs/magellan_fire.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport os,time\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom pypeit import msgs\nfrom pypeit.par.util import make_pypeit_file\n\n\nclass SmartFormatter(argparse.HelpFormatter):\n\n def _split_lines(self, text, width):\n if text.startswith('R|'):\n return text[2:].splitlines()\n # this is the RawTextHelpFormatter._split_lines\n return argparse.HelpFormatter._split_lines(self, text, width)\n\ndef parser(options=None):\n parser = argparse.ArgumentParser(description='Parse', formatter_class=SmartFormatter)\n parser.add_argument(\"sci_path\", type=str, help=\"Path for Science folder\")\n parser.add_argument(\"--objmodel\", type=str, default='qso', choices=['qso', 'star', 'poly'],\n help=\"R|Science object model used in the telluric fitting.\\n\"\n \"The options are:\\n\"\n \"\\n\"\n \" qso = For quasars. You might need to set redshift, bal_wv_min_mx in the tell file.\\n\"\n \"\\n\"\n \" star = For stars. You need to set star_type, star_ra, star_dec, and star_mag in the tell_file.\\n\"\n \"\\n\"\n \" poly = For other type object, You might need to set fit_wv_min_mx, \\n\"\n \" and norder in the tell_file.\"\n )\n\n if options is None:\n args = parser.parse_args()\n else:\n args = parser.parse_args(options)\n return args\n\n\ndef main(args):\n \"\"\"\n This setups PypeIt files for fluxing, coadding and telluric corrections.\n It will produce three files named as your_spectragraph.flux, your_spectragraph.coadd1d,\n and your_spectragraph.tell\n \"\"\"\n allfiles = os.listdir(args.sci_path)\n allfiles = np.sort(allfiles)\n spec1dfiles = []\n spec2dfiles = []\n spec1dinfos = []\n for ifile in allfiles:\n if ('spec1d' in ifile) and ('.fits' in ifile):\n spec1dfiles.append(ifile)\n elif ('spec2d' in ifile) and ('.fits' in ifile):\n spec2dfiles.append(ifile)\n elif ('spec1d' in ifile) and ('.txt' in ifile):\n spec1dinfos.append(ifile)\n else:\n msgs.warn('{:} is not a standard PypeIt output.'.format(ifile))\n if len(spec2dfiles) > len(spec1dfiles):\n msgs.warn('The following exposures do not have 1D extractions:')\n for ii in range(len(spec2dfiles)):\n if not os.path.exists(os.path.join(args.sci_path, spec2dfiles[ii].replace('spec2d','spec1d'))):\n msgs.info('\\t {:}'.format(spec2dfiles[ii]))\n\n if len(spec1dfiles) > 0:\n par = fits.open(os.path.join(args.sci_path, spec1dfiles[0]))\n\n ## fluxing pypeit file\n spectrograph = par[0].header['PYP_SPEC']\n pypeline = par[0].header['PYPELINE']\n flux_file = '{:}.flux'.format(spectrograph)\n cfg_lines = ['[fluxcalib]']\n cfg_lines += [' extinct_correct = False # Set to True if your SENSFUNC derived with the UVIS algorithm\\n']\n cfg_lines += ['# Please add your SENSFUNC file name below before running pypeit_flux_calib']\n make_pypeit_file(flux_file, spectrograph, spec1dfiles, cfg_lines=cfg_lines, setup_mode=True)\n fin = open(flux_file, \"rt\")\n data = fin.read()\n data = data.replace('spec1d_', os.path.join(args.sci_path,'spec1d_'))\n data = data.replace('data', 'flux')\n fin.close()\n fin = open(flux_file, \"wt\")\n fin.write(data)\n fin.close()\n\n ## coadd1d pypeit file\n coadd1d_file = '{:}.coadd1d'.format(spectrograph)\n cfg_lines = ['[coadd1d]']\n cfg_lines += [' coaddfile = YOUR_OUTPUT_FILE_NAME # Please set your output file name']\n cfg_lines += [' sensfuncfile = YOUR_SENSFUNC_FILE # Please set your SENSFUNC file name']\n if pypeline == 'Echelle':\n cfg_lines += [' wave_method = velocity # creates a uniformly space grid in log10(lambda)\\n']\n else:\n cfg_lines += [' wave_method = linear # creates a uniformly space grid in lambda\\n']\n\n cfg_lines += ['# This file includes all extracted objects. You need to figure out which object you want to \\n'+\\\n '# coadd before running pypeit_coadd_1dspec!!!']\n spec1d_info = []\n for ii in range(len(spec1dfiles)):\n meta_tbl = Table.read(os.path.join(args.sci_path, spec1dfiles[ii]).replace('.fits', '.txt'),\n format='ascii.fixed_width')\n _, indx = np.unique(meta_tbl['name'],return_index=True)\n objects = meta_tbl[indx]\n for jj in range(len(objects)):\n spec1d_info.append(spec1dfiles[ii] + ' '+ objects['name'][jj])\n make_pypeit_file(coadd1d_file, spectrograph, spec1d_info, cfg_lines=cfg_lines, setup_mode=True)\n fin = open(coadd1d_file, \"rt\")\n data = fin.read()\n data = data.replace('spec1d_', os.path.join(args.sci_path,'spec1d_'))\n data = data.replace('data', 'coadd1d')\n fin.close()\n fin = open(coadd1d_file, \"wt\")\n fin.write(data)\n fin.close()\n\n ## tellfit pypeit file\n tellfit_file = '{:}.tell'.format(spectrograph)\n cfg_lines = ['[tellfit]']\n if args.objmodel == 'qso':\n cfg_lines += [' objmodel = qso']\n cfg_lines += [' redshift = 0.0']\n cfg_lines += [' bal_wv_min_max = 10000.,11000.']\n elif args.objmodel == 'star':\n cfg_lines += [' objmodel = star']\n cfg_lines += [' star_type = A0']\n cfg_lines += [' star_mag = 0.0']\n elif args.objmodel == 'poly':\n cfg_lines += [' objmodel = poly']\n cfg_lines += [' polyorder = 5']\n cfg_lines += [' fit_wv_min_max = 17000.0,22000.0']\n\n with open(tellfit_file, 'w') as f:\n f.write('# Auto-generated PypeIt file\\n')\n f.write('# {0}\\n'.format(time.strftime(\"%a %d %b %Y %H:%M:%S\", time.localtime())))\n f.write(\"\\n\")\n f.write(\"# User-defined execution parameters\\n\")\n f.write(\"# This is only an example. Make sure to change the following parameters accordingly.\\n\")\n f.write('\\n'.join(cfg_lines))\n f.write('\\n')\n f.write('\\n')\n msgs.info('PypeIt file written to: {0}'.format(tellfit_file))\n\n\n",
"\"\"\"\nModule for Magellan/FIRE specific codes\n\nImportant Notes:\n\n - If you are reducing old FIRE data (before the broken happened\n in 2016), please change the ord_spat_pos array (see lines from\n ~220 to ~230)\n\n\"\"\"\nfrom pkg_resources import resource_filename\nimport numpy as np\nfrom pypeit import msgs\nfrom pypeit import telescopes\nfrom pypeit.core import framematch\nfrom pypeit.par import pypeitpar\nfrom pypeit.spectrographs import spectrograph\nfrom pypeit.images import detector_container\n\n\n\nclass MagellanFIRESpectrograph(spectrograph.Spectrograph):\n \"\"\"\n Child to handle Magellan/FIRE specific code\n\n .. note::\n For FIRE Echelle, we usually use high gain and SUTR read mode.\n The exposure time is usually around 900s. The detector\n parameters below are based on such mode. Standard star and\n calibrations are usually use Fowler 1 read mode in which case\n the read noise is ~20 electron.\n\n \"\"\"\n ndet = 1\n\n def __init__(self):\n # Get it started\n super(MagellanFIRESpectrograph, self).__init__()\n self.spectrograph = 'magellan_fire_base'\n self.telescope = telescopes.MagellanTelescopePar()\n\n @staticmethod\n def default_pypeit_par():\n \"\"\"\n Set default parameters for VLT XSHOOTER reductions.\n \"\"\"\n par = pypeitpar.PypeItPar()\n return par\n\n def init_meta(self):\n \"\"\"\n Generate the meta data dict\n Note that the children can add to this\n\n Returns:\n self.meta: dict (generated in place)\n\n \"\"\"\n meta = {}\n # Required (core)\n meta['ra'] = dict(ext=0, card='RA')\n meta['dec'] = dict(ext=0, card='DEC')\n meta['target'] = dict(ext=0, card='OBJECT')\n meta['decker'] = dict(ext=0, card=None, default='default')\n meta['dichroic'] = dict(ext=0, card=None, default='default')\n meta['binning'] = dict(ext=0, card=None, default='1,1')\n\n meta['mjd'] = dict(ext=0, card='ACQTIME')\n meta['exptime'] = dict(ext=0, card='EXPTIME')\n meta['airmass'] = dict(ext=0, card='AIRMASS')\n # Extras for config and frametyping\n meta['dispname'] = dict(ext=0, card='GRISM')\n meta['idname'] = dict(ext=0, card='OBSTYPE')\n\n # Ingest\n self.meta = meta\n\n\n\nclass MagellanFIREEchelleSpectrograph(MagellanFIRESpectrograph):\n \"\"\"\n Child to handle Magellan/FIRE Echelle data\n\n .. note::\n For FIRE Echelle, we usually use high gain and SUTR read mode.\n The exposure time is usually around 900s. The detector\n parameters below are based on such mode. Standard star and\n calibrations are usually use Fowler 1 read mode in which case\n the read noise is ~20 electron.\n\n \"\"\"\n def __init__(self):\n # Get it started\n super(MagellanFIREEchelleSpectrograph, self).__init__()\n #TODO Rename this magallen_fire_echelle??\n self.spectrograph = 'magellan_fire'\n self.camera = 'FIRE'\n self.numhead = 1\n\n def get_detector_par(self, hdu, det):\n \"\"\"\n Return a DectectorContainer for the current image\n\n Args:\n hdu (`astropy.io.fits.HDUList`):\n HDUList of the image of interest.\n Ought to be the raw file, or else..\n det (int):\n\n Returns:\n :class:`pypeit.images.detector_container.DetectorContainer`:\n\n \"\"\"\n # Detector 1\n detector_dict = dict(\n binning = '1,1',\n det = 1,\n dataext = 0,\n specaxis = 1,\n specflip = True,\n spatflip = False,\n platescale = 0.18,\n darkcurr = 0.01,\n #saturation = 20000., # high gain is 20000 ADU, low gain is 32000 ADU\n saturation = 100000., # This is an arbitrary value.\n nonlinear = 1.0, # high gain mode, low gain is 0.875\n mincounts = -1e10,\n numamplifiers = 1,\n gain = np.atleast_1d(1.2), # high gain mode, low gain is 3.8 e-/DN\n ronoise = np.atleast_1d(5.0), # for high gain mode and SUTR read modes with exptime ~ 900s\n datasec = np.atleast_1d('[5:2044,5:2044]'),\n oscansec = np.atleast_1d('[5:2044,:5]')\n )\n return detector_container.DetectorContainer(**detector_dict)\n\n @property\n def pypeline(self):\n return 'Echelle'\n\n def default_pypeit_par(self):\n \"\"\"\n Set default parameters for Shane Kast Blue reductions.\n \"\"\"\n par = pypeitpar.PypeItPar()\n par['rdx']['spectrograph'] = 'magellan_fire'\n\n # Wavelengths\n # 1D wavelength solution with OH lines\n par['calibrations']['wavelengths']['rms_threshold'] = 1.0\n par['calibrations']['wavelengths']['sigdetect']=[5,10,10,10,10,20,30,30,30,30,30,10,30,30,60,30,30,10,20,30,10]\n par['calibrations']['wavelengths']['n_first']=2\n par['calibrations']['wavelengths']['n_final']=[3,3,3,2,4,4,4,3,4,4,4,3,4,4,4,4,4,4,6,6,4]\n par['calibrations']['wavelengths']['lamps'] = ['OH_FIRE_Echelle']\n #par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']\n par['calibrations']['wavelengths']['method'] = 'reidentify'\n par['calibrations']['wavelengths']['cc_thresh'] = 0.35\n par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_fire_echelle.fits'\n par['calibrations']['wavelengths']['match_toler']=30.0\n\n # Echelle parameters\n par['calibrations']['wavelengths']['echelle'] = True\n par['calibrations']['wavelengths']['ech_fix_format'] = True\n par['calibrations']['wavelengths']['ech_nspec_coeff'] = 4\n par['calibrations']['wavelengths']['ech_norder_coeff'] = 6\n par['calibrations']['wavelengths']['ech_sigrej'] = 3.0\n\n # Always correct for flexure, starting with default parameters\n par['scienceframe']['process']['sigclip'] = 20.0\n par['scienceframe']['process']['satpix'] ='nothing'\n\n # Set slits and tilts parameters\n par['calibrations']['tilts']['tracethresh'] = 5\n par['calibrations']['slitedges']['edge_thresh'] = 10.\n par['calibrations']['slitedges']['trace_thresh'] = 10.\n par['calibrations']['slitedges']['fit_order'] = 5\n par['calibrations']['slitedges']['max_shift_adj'] = 0.5\n par['calibrations']['slitedges']['fit_min_spec_length'] = 0.5\n par['calibrations']['slitedges']['left_right_pca'] = True\n par['calibrations']['slitedges']['pca_order'] = 3\n\n # Model entire slit\n par['reduce']['extraction']['model_full_slit'] = True # local sky subtraction operates on entire slit\n\n # Processing steps\n turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False, use_darkimage=False)\n par.reset_all_processimages_par(**turn_off)\n # Do not correct for flexure\n par['flexure']['spec_method'] = 'skip'\n\n # Set the default exposure time ranges for the frame typing\n par['calibrations']['standardframe']['exprng'] = [None, 60]\n par['calibrations']['arcframe']['exprng'] = [20, None]\n par['calibrations']['darkframe']['exprng'] = [20, None]\n par['scienceframe']['exprng'] = [20, None]\n\n # Sensitivity function parameters\n # Sensitivity function parameters\n par['sensfunc']['algorithm'] = 'IR'\n par['sensfunc']['polyorder'] = 8\n # place holder for telgrid file\n par['sensfunc']['IR']['telgridfile'] = resource_filename('pypeit', '/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits')\n\n\n return par\n\n\n def check_frame_type(self, ftype, fitstbl, exprng=None):\n \"\"\"\n Check for frames of the provided type.\n \"\"\"\n good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)\n if ftype in ['pinhole', 'bias']:\n # No pinhole or bias frames\n return np.zeros(len(fitstbl), dtype=bool)\n if ftype in ['pixelflat', 'trace']:\n return good_exp & (fitstbl['idname'] == 'PixFlat')\n if ftype == 'standard':\n return good_exp & (fitstbl['idname'] == 'Telluric')\n if ftype == 'science':\n return good_exp & (fitstbl['idname'] == 'Science')\n if ftype in ['arc', 'tilt']:\n return good_exp & (fitstbl['idname'] == 'Science')\n msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))\n return np.zeros(len(fitstbl), dtype=bool)\n\n @property\n def norders(self):\n return 21\n\n @property\n def order_spat_pos(self):\n # ToDo: We somehow need to automate this.\n ## For OLD data, i.e. before 2017\n #ord_spat_pos = np.array([0.06054688, 0.14160156, 0.17089844, 0.22753906, 0.27539062,\n # 0.32128906, 0.36474609, 0.40673828, 0.45019531, 0.48974609,\n # 0.52978516, 0.56054688, 0.59814453, 0.63378906, 0.66503906,\n # 0.70019531, 0.7421875 , 0.77978516, 0.82763672, 0.87109375,\n # 0.9296875])\n ## For NEW data\n ord_spat_pos = np.array([0.078125, 0.13769531, 0.19189453, 0.24414062, 0.29296875,\n 0.34179688, 0.38330078, 0.42724609, 0.46582031, 0.50439453,\n 0.54199219, 0.57763672, 0.61279297, 0.6484375 , 0.68457031,\n 0.71875 , 0.75439453, 0.79443359, 0.83789062, 0.88671875,\n 0.94091797])\n return ord_spat_pos\n\n @property\n def orders(self):\n return np.arange(31, 10, -1, dtype=int)\n\n @property\n def spec_min_max(self):\n spec_max = np.asarray([2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,\n 2048,2048,2048,2048,2048])\n spec_min = np.asarray([ 500, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0])\n return np.vstack((spec_min, spec_max))\n\n def order_platescale(self, order_vec, binning=None):\n \"\"\"\n FIRE has no binning\n\n Args:\n order_vec (np.ndarray):\n binning (optional):\n\n Returns:\n np.ndarray:\n\n \"\"\"\n norders = order_vec.size\n return np.full(norders, 0.15)\n\n @property\n def dloglam(self):\n # This number was determined using the resolution and sampling quoted on the FIRE website\n R = 6000.0 * 2.7\n dloglam = 1.0 / R / np.log(10.0)\n return dloglam\n\n @property\n def loglam_minmax(self):\n return np.log10(8000.0), np.log10(25700)\n\n\nclass MagellanFIRELONGSpectrograph(MagellanFIRESpectrograph):\n \"\"\"\n Child to handle Magellan/FIRE high-throughput data\n\n .. note::\n For FIRE longslit, science data are usually taken with SUTR readout mode with ~600s exposure\n (at least for quasar hunting people) and the readout noise is ~6 e-\n\n \"\"\"\n def __init__(self):\n # Get it started\n super(MagellanFIRELONGSpectrograph, self).__init__()\n self.spectrograph = 'magellan_fire_long'\n self.camera = 'FIRE'\n self.numhead = 1\n\n\n def get_detector_par(self, hdu, det):\n \"\"\"\n Return a DectectorContainer for the current image\n\n Args:\n hdu (`astropy.io.fits.HDUList`):\n HDUList of the image of interest.\n Ought to be the raw file, or else..\n det (int):\n\n Returns:\n :class:`pypeit.images.detector_container.DetectorContainer`:\n\n \"\"\"\n\n # Detector 1\n detector_dict = dict(\n binning = '1,1',\n det = 1,\n dataext = 0,\n specaxis = 0,\n specflip = False,\n spatflip = False,\n platescale = 0.15,\n darkcurr = 0.01,\n saturation = 320000., #32000 for low gain, I set to a higher value to keep data in K-band\n nonlinear = 0.875,\n mincounts = -1e10,\n numamplifiers = 1,\n gain = np.atleast_1d(3.8),\n ronoise = np.atleast_1d(6.0), # SUTR readout mode with exposure~600s\n datasec = np.atleast_1d('[5:2044, 900:1250]'),\n oscansec = np.atleast_1d('[:5, 900:1250]')\n )\n return detector_container.DetectorContainer(**detector_dict)\n\n def default_pypeit_par(self):\n \"\"\"\n Set default parameters.\n \"\"\"\n par = pypeitpar.PypeItPar()\n par['rdx']['spectrograph'] = 'magellan_fire_long'\n\n # Wavelengths\n # 1D wavelength solution with arc lines\n par['calibrations']['wavelengths']['rms_threshold'] = 1.0\n par['calibrations']['wavelengths']['sigdetect']=3\n par['calibrations']['wavelengths']['fwhm'] = 20\n par['calibrations']['wavelengths']['n_first']=2\n par['calibrations']['wavelengths']['n_final']=4\n par['calibrations']['wavelengths']['lamps'] = ['ArI', 'ArII', 'ThAr', 'NeI']\n #par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']\n par['calibrations']['wavelengths']['method'] = 'full_template'\n par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_fire_long.fits'\n par['calibrations']['wavelengths']['match_toler']=5.0\n\n # Set slits and tilts parameters\n par['calibrations']['tilts']['tracethresh'] = 5\n par['calibrations']['slitedges']['trace_thresh'] = 10.\n par['calibrations']['slitedges']['sync_predict'] = 'nearest'\n\n # Processing steps\n turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False, use_darkimage=False)\n par.reset_all_processimages_par(**turn_off)\n\n # Scienceimage parameters\n par['reduce']['findobj']['sig_thresh'] = 5\n #par['reduce']['maxnumber'] = 2\n par['reduce']['findobj']['find_trim_edge'] = [50,50]\n par['flexure']['spec_method'] = 'skip'\n\n par['sensfunc']['IR']['telgridfile'] = resource_filename('pypeit', '/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits')\n\n # Set the default exposure time ranges for the frame typing\n par['calibrations']['standardframe']['exprng'] = [None, 60]\n par['calibrations']['arcframe']['exprng'] = [1, 50]\n par['calibrations']['darkframe']['exprng'] = [20, None]\n par['scienceframe']['exprng'] = [20, None]\n return par\n\n def check_frame_type(self, ftype, fitstbl, exprng=None):\n \"\"\"\n Check for frames of the provided type.\n \"\"\"\n good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)\n if ftype in ['pinhole', 'bias']:\n # No pinhole or bias frames\n return np.zeros(len(fitstbl), dtype=bool)\n if ftype in ['pixelflat', 'trace']:\n return good_exp & (fitstbl['idname'] == 'PixFlat')\n if ftype == 'standard':\n return good_exp & (fitstbl['idname'] == 'Telluric')\n if ftype == 'science':\n return good_exp & (fitstbl['idname'] == 'Science')\n if ftype in ['arc', 'tilt']:\n return good_exp & (fitstbl['idname'] == 'Arc')\n msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))\n return np.zeros(len(fitstbl), dtype=bool)\n\n"
] | [
[
"numpy.sort",
"numpy.unique"
],
[
"numpy.log",
"numpy.asarray",
"numpy.arange",
"numpy.full",
"numpy.atleast_1d",
"numpy.log10",
"numpy.array",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LamannaLeonardo/OLAM | [
"7a6611912ebb40d39a934dd454efec4cbb7913d3"
] | [
"Util/Latex_generator.py"
] | [
"# Copyright (c) 2022, Leonardo Lamanna\n# All rights reserved.\n# This source code is licensed under the MIT-style license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport pandas as pd\nimport os\n\npd.options.display.max_colwidth = 100\n\ndef generate_latex_table(data_file, labels, tab_name, caption, header):\n\n with open(tab_name + \".tex\", \"w\") as f:\n df = pd.read_excel(data_file, sheet_name=\"Summary\")\n df_restricted = df[labels]\n f.write(df_restricted.to_latex(index=False, escape=False,\n label=\"tab:{}\".format(tab_name),\n caption= caption,\n header = header))\n\n\ndef generate_comparison_latex_table():\n labels = [\"Domain\", \"Neg precision A\", \"Neg recall A\", \"Overall precision A\", \"Overall recall A\",\n \"Neg precision B\", \"Neg recall B\", \"Overall precision B\", \"Overall recall B\"]\n header = [\"Domain\", \"$P_{\\\\eff^{-}}$\", \"$R_{\\\\eff^{-}}$\", \"$P$\", \"$R$\",\n \"$P_{\\\\eff^{-}}$\", \"$R_{\\\\eff^{-}}$\", \"$P$\", \"$R$\"]\n caption = \"For each domain:statistics on final metrics of the last instance grouped by \" \\\n \"negative effects.\"\n tab_name = \"comparison_summary_uncertain\"\n file_path = os.path.join(\"comparison_summary_uncertain.xlsx\")\n\n generate_latex_table(file_path, labels, tab_name, caption, header)\n\n\ndef generate_comparison_latex_table_fama():\n labels = [\"Domain\", \"Tot time\", \"Overall precision\", \"Overall recall\", \"FAMA tot time\",\n \"FAMA precision\", \"FAMA recall\", \"Delta act\"]\n header = [\"Domain\", \"$t$\", \"$P$\", \"$R$\", \"$t$\", \"$P$\", \"$R$\", \"$\\delta_{A}$\"]\n caption = \"Comparison among OLAM and FAMA with full observability. FAMA is run with all plan traces \" \\\n \"provided in \\protect\\cite{aineto_AIJ2019}. MODEL WITH UNCERTAIN NEGATIVE EFFECTS AND STRIPS ASSUMPTION.\"\n tab_name = \"comparison_fama\"\n file_path = os.path.join(\"comparison_fama.xlsx\")\n\n generate_latex_table(file_path, labels, tab_name, caption, header)\n\n\ndef generate_summary_latex_table():\n # labels = [\"Domain\", \"Instances\", \"Precs precision\", \"Precs recall\",\"Pos precision\", \"Pos recall\",\n # \"Neg precision\", \"Neg recall\", \"Overall precision\", \"Overall recall\"]\n labels = [\"Domain\", \"Instances\", \"Precs precision\", \"Precs recall\",\"Pos precision\", \"Pos recall\",\n \"Neg precision\", \"Neg recall\", \"Average precision\", \"Average recall\"]\n header = [\"Domain\", \"$I$\", \"$P_{\\\\prec}$\", \"$R_{\\\\prec}$\", \"$P_{\\\\eff^{+}}$\", \"$R_{\\\\eff^{+}}$\", \"$P_{\\\\eff^{-}}$\",\n \"$R_{\\\\eff^{-}}$\", \"$P$\", \"$R$\"]\n caption = \"For each domain:statistics on final metrics of the last instance grouped by \" \\\n \"preconditions, positive effects and negative ones.\"\n tab_name = \"overall_summary_certain_nostripsass\"\n\n folder = \"../Analysis/IJCAI_Results/Results_certain_NOnegeff_assumption\"\n file_path = os.path.join(folder, \"overall_summary.xlsx\")\n\n generate_latex_table(file_path, labels, tab_name, caption, header)\n\n\ndef generate_domain_objects_table():\n\n header = [\"Domain\", \"Objects\"]\n caption = \"For each domain, problem objects of all problems in the generated set.\"\n tab_name = \"all_problem_objects\"\n\n df = pd.DataFrame({\n \"Domain\":[],\n \"Objects\":[]\n })\n # df.set_index('Domain', inplace=True)\n\n domain_dataframes = [name for name in os.listdir(os.path.join(\"..\", \"Analysis\", \"Results_cert\"))\n if not name.startswith(\"overall\")]\n\n for domain_dataframe in domain_dataframes:\n domain = domain_dataframe.split(\"_\")[0]\n df_domain = pd.read_excel(os.path.join(\"..\", \"Analysis\", \"Results_cert\", domain_dataframe),\n sheet_name=\"Objects\")\n domain_obj_types = [key.strip().lower() for key in list(df_domain) if key.strip().lower() != \"total objs\"]\n\n for i, row in df_domain.iterrows():\n problem_objs = []\n for k in domain_obj_types:\n problem_objs.append(\"{} {}\".format(k,row[\"\\t\" + k]))\n\n eval = {\n \"Domain\":domain,\n \"Objects\":\", \".join(problem_objs)\n }\n\n\n df = df.append(eval, ignore_index=True)\n\n\n\n\n with open(tab_name + \".tex\", \"w\") as f:\n f.write(df.to_latex(index=False,\n label=\"tab:{}\".format(tab_name),\n caption= caption,\n header = header))\n\n\n\nif __name__ == \"__main__\":\n\n generate_summary_latex_table()\n #\n # generate_domain_objects_table()\n"
] | [
[
"pandas.read_excel",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
RichardoLuo/ColossalAI | [
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba",
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba",
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba",
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba",
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba",
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba",
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba"
] | [
"tests/components_to_test/repeated_computed_layer.py",
"tests/test_tensor/test_tensor.py",
"colossalai/kernel/jit/bias_gelu.py",
"tests/test_layers/test_3d/test_3d.py",
"colossalai/cli/benchmark/utils.py",
"colossalai/nn/optimizer/cpu_adam.py",
"tests/test_moe/test_moe_zero_optim.py"
] | [
"#!/usr/bin/env python\n\nimport torch\nimport torch.nn as nn\nfrom colossalai.nn import CheckpointModule\nfrom .utils.dummy_data_generator import DummyDataGenerator\nfrom .registry import non_distributed_component_funcs\n\n\nclass NetWithRepeatedlyComputedLayers(CheckpointModule):\n \"\"\"\n This model is to test with layers which go through forward pass multiple times.\n In this model, the fc1 and fc2 call forward twice\n \"\"\"\n\n def __init__(self, checkpoint=False) -> None:\n super().__init__(checkpoint=checkpoint)\n self.fc1 = nn.Linear(5, 5)\n self.fc2 = nn.Linear(5, 5)\n self.fc3 = nn.Linear(5, 2)\n self.layers = [self.fc1, self.fc2, self.fc1, self.fc2, self.fc3]\n\n def forward(self, x):\n for layer in self.layers:\n x = layer(x)\n return x\n\n\nclass DummyDataLoader(DummyDataGenerator):\n\n def generate(self):\n data = torch.rand(16, 5)\n label = torch.randint(low=0, high=2, size=(16,))\n return data, label\n\n\n@non_distributed_component_funcs.register(name='repeated_computed_layers')\ndef get_training_components():\n\n def model_builder(checkpoint=True):\n return NetWithRepeatedlyComputedLayers(checkpoint)\n\n trainloader = DummyDataLoader()\n testloader = DummyDataLoader()\n\n criterion = torch.nn.CrossEntropyLoss()\n return model_builder, trainloader, testloader, torch.optim.Adam, criterion\n",
"import torch\nfrom colossalai.tensor import ColoTensor\nfrom numpy import allclose\n\n\ndef test_tensor_indexing():\n torch_t = torch.randn(2, 3)\n colo_t = ColoTensor.init_from_torch_tensor(torch_t)\n assert allclose(torch_t[:, 1], colo_t[:, 1].torch_tensor())\n\n\ndef test_lazy_init_tensor():\n lazy_t = ColoTensor(2, 3, dtype=torch.float32, requires_grad=True)\n assert lazy_t._torch_tensor.numel() == 0\n assert lazy_t.numel() == 6 == lazy_t.torch_tensor().numel()\n\n\ndef test_wrapped_tensor_func():\n t_ref = torch.randn(4, 5)\n t = ColoTensor.init_from_torch_tensor(t_ref.clone())\n\n # non-func attr\n assert t.is_cuda == t_ref.is_cuda\n\n # TODO I don't find out a tensor function which returns None.\n\n # return 1 torch.Tensor\n t_abs = t.abs()\n assert isinstance(t_abs, ColoTensor) and torch.equal(t_abs.torch_tensor(), t_ref.abs())\n\n # return 1 non-torch.Tensor\n assert t.dim() == t_ref.dim()\n\n # return >1 torch.Tensor\n t_split1, t_split2 = t.split(2)\n assert isinstance(t_split1, ColoTensor) and isinstance(t_split2, ColoTensor)\n\n\ndef test_operand():\n t_ref = torch.randn(4, 5)\n t = ColoTensor.init_from_torch_tensor(t_ref.clone())\n\n t_ref_res = t_ref + t_ref\n t_res = t + t\n assert torch.allclose(t_ref_res, t_res)\n",
"import torch\n\n\n###### BIAS GELU FUSION/ NO AUTOGRAD ################\n# 1/sqrt(2*pi)-> 0.3989423\n# 1/sqrt(2) -> 0.70710678\n# sqrt(2/pi) -> 0.79788456\n# this function is tanh approximation of gelu\n# actual gelu is:\n# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))\n\[email protected]\ndef bias_gelu(bias, y):\n x = bias + y\n return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))\n\n# gradient of tanh approximation of gelu\n# gradient of actual gelu is:\n# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)\[email protected]\ndef bias_gelu_back(g, bias, y):\n x = bias + y\n tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))\n # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243\n ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)\n return ff*g\n\nclass GeLUFunction(torch.autograd.Function):\n @staticmethod\n # bias is an optional argument\n def forward(ctx, input, bias):\n ctx.save_for_backward(input, bias)\n return bias_gelu(bias, input)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, bias = ctx.saved_tensors\n tmp = bias_gelu_back(grad_output, bias, input)\n return tmp, tmp\n\nbias_gelu_impl = GeLUFunction.apply",
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nfrom functools import partial\n\nimport pytest\nimport torch\nimport torch.multiprocessing as mp\nfrom colossalai.core import global_context as gpc\nfrom colossalai.initialize import launch\nfrom colossalai.logging import disable_existing_loggers\nfrom colossalai.utils import free_port\nfrom colossalai.testing import rerun_if_address_is_in_use\nfrom checks_3d.check_layer_3d import (check_classifier_given_embed_weight, check_classifier_no_given_weight,\n check_embed, check_layernorm, check_linear, check_loss, check_patch_embed,\n check_vocab_parallel_classifier_given_embed_weight,\n check_vocab_parallel_classifier_no_given_weight, check_vocab_parallel_embed,\n check_vocab_parallel_loss)\n\nCONFIG = dict(\n parallel=dict(\n pipeline=1,\n tensor=dict(mode='3d', size=8),\n ),\n seed=42,\n)\n\n\ndef check_layer():\n check_linear()\n check_layernorm()\n check_classifier_no_given_weight()\n check_vocab_parallel_classifier_no_given_weight()\n check_classifier_given_embed_weight()\n check_vocab_parallel_classifier_given_embed_weight()\n check_embed()\n check_patch_embed()\n check_vocab_parallel_embed()\n check_loss()\n check_vocab_parallel_loss()\n\n\ndef check_layer_and_operation(rank, world_size, port):\n disable_existing_loggers()\n launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')\n torch.backends.cuda.matmul.allow_tf32 = False\n torch.backends.cudnn.allow_tf32 = False\n torch.backends.cudnn.deterministic = True\n check_layer()\n gpc.destroy()\n torch.cuda.empty_cache()\n\n\[email protected]\n@rerun_if_address_is_in_use()\ndef test_3d():\n world_size = 8\n run_func = partial(check_layer_and_operation, world_size=world_size, port=free_port())\n mp.spawn(run_func, nprocs=world_size)\n\n\nif __name__ == '__main__':\n test_3d()\n",
"import math\nimport time\nfrom grpc import Call\nimport torch\n\nfrom colossalai.utils import MultiTimer\nfrom colossalai.core import global_context as gpc\nfrom colossalai.context import ParallelMode, Config\nfrom typing import List, Dict, Tuple, Callable\n\n\ndef get_time_stamp() -> int:\n \"\"\"\n Return the time stamp for profiling.\n\n Returns:\n time_stamp (int): the time given by time.time()\n \"\"\"\n\n torch.cuda.synchronize()\n time_stamp = time.time()\n return time_stamp\n\n\ndef get_memory_states() -> Tuple[float]:\n \"\"\"\n Return the memory statistics.\n\n Returns:\n max_allocated (float): the allocated CUDA memory \n max_cached (float): the cached CUDA memory \n \"\"\"\n\n max_allocated = torch.cuda.max_memory_allocated() / (1024**3)\n max_cached = torch.cuda.max_memory_reserved() / (1024**3)\n torch.cuda.reset_peak_memory_stats()\n torch.cuda.empty_cache()\n return max_allocated, max_cached\n\n\ndef find_all_configs(device_cnt: int) -> List[Dict]:\n \"\"\"\n Find all possible configurations for tensor parallelism\n\n Args:\n device_cnt (int): the number of devices\n\n Returns:\n config_list (List[Dict]): a list of configurations\n \"\"\"\n\n def _is_square(num):\n return math.floor(math.sqrt(num))**2 == num\n\n def _is_cube(num):\n return math.floor(num**(1. / 3.))**3 == num\n\n config_list = []\n\n # add non-parallel config\n config = dict(parallel=dict(tensor=dict(size=device_cnt, mode=None)))\n config_list.append(config)\n\n # add 1D config\n config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='1d')))\n config_list.append(config)\n\n # add 1D config only if device_cnt is a square\n if _is_square(device_cnt):\n config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='2d')))\n config_list.append(config)\n\n # check for 2.5D\n # iterate over depth\n for depth in range(1, device_cnt):\n if device_cnt % depth == 0 and _is_square(device_cnt // depth):\n config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='2.5d', depth=depth)))\n config_list.append(config)\n\n # check for 3D if device_cnt is a cube\n if _is_cube(device_cnt):\n config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='3d')))\n config_list.append(config)\n\n config_list = [Config(cfg) for cfg in config_list]\n return config_list\n\n\ndef profile_model(model: torch.nn.Module, warmup_steps: int, profile_steps: int, data_func: Callable,\n timer: MultiTimer) -> Tuple[float]:\n \"\"\"\n Profile the forward and backward of a model\n\n Args:\n model (torch.nn.Module): a PyTorch model\n warmup_steps (int): the number of steps for warmup\n profile_steps (int): the number of steps for profiling\n data_func (Callable): a function to generate random data\n timer (colossalai.utils.Multitimer): a timer instance for time recording\n \n Returns:\n fwd_time (float): the average forward time taken by forward pass in second\n bwd_time (float): the average backward time taken by forward pass in second\n max_allocated (float): the maximum GPU memory allocated in GB\n max_cached (float): the maximum GPU memory cached in GB\n \"\"\"\n\n def _run_step(data):\n timer.start('forward')\n out = model(data)\n timer.stop('forward', keep_in_history=True)\n timer.start('backward')\n out.mean().backward()\n timer.stop('backward', keep_in_history=True)\n\n data_list = [data_func() for _ in range(warmup_steps)]\n for data in data_list:\n _run_step(data)\n timer.reset('forward')\n timer.reset('backward')\n\n for _ in range(profile_steps):\n data = data_func()\n _run_step(data)\n\n max_allocated, max_cached = get_memory_states()\n fwd_time = timer.get_timer('forward').get_history_mean()\n bwd_time = timer.get_timer('backward').get_history_mean()\n return fwd_time, bwd_time, max_allocated, max_cached\n\n\ndef get_batch_data(dim: int, batch_size: int, seq_length: int, mode: ParallelMode) -> torch.Tensor:\n \"\"\"\n Return a random data of shape (batch_size, seq_length, dim) for profiling.\n\n Args:\n dim (int): hidden size\n batch_size (int): the number of data samples\n seq_length (int): the number of tokens\n mode (ParallelMode): Colossal-AI ParallelMode enum\n\n Returns:\n data (torch.Tensor): random data\n \"\"\"\n\n if mode in ['2d', '2.5d']:\n batch_size = batch_size // 2\n dim = dim // 2\n elif mode == '3d':\n batch_size = batch_size // 4\n dim = dim // 2\n\n data = torch.rand(batch_size, seq_length, dim).cuda()\n return data\n",
"import math\nimport torch\n\nfrom colossalai.registry import OPTIMIZERS\nfrom colossalai.nn.optimizer import CPU_ADAM_CNT\n\n\[email protected]_module\nclass CPUAdam(torch.optim.Optimizer):\n \"\"\"Implements Adam algorithm.\n\n Supports parameters updating on both GPU and CPU, depanding on the device of paramters.\n But the parameters and gradients should on the same device: \n * Parameters on CPU and gradients on CPU is allowed.\n * Parameters on GPU and gradients on GPU is allowed.\n * Parameters on GPU and gradients on CPU is **not** allowed.\n\n Requires ColossalAI to be installed via ``pip install .``.\n\n This version of CPU Adam accelates parameters updating on CPU with SIMD.\n Support of AVX2 or AVX512 is required.\n\n The GPU part is implemented in an naive way.\n\n CPU Adam also supports the hybrid precision calculation, eg. fp32 parameters and fp16 gradients.\n\n :class:`colossalai.nn.optimizer.CPUAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,\n or ``torch.optim.Adam`` with ``adamw_mode=False``\n\n Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n model_params (iterable): iterable of parameters of dicts defining\n parameter groups.\n lr (float, optional): learning rate. (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square. (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability. (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False) NOT SUPPORTED yet in CPUAdam!\n adamw_mode (boolean, optional): Apply L2 regularization or weight decay\n True for decoupled weight decay(also known as AdamW) (default: True)\n simd_log (boolean, optional): whether to show if you are using SIMD to \n accelerate. (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n # Number of fp32 shards for per parameter\n # Param weight, grad, momentum and variance\n num_fp32_shards_per_param = 4\n\n def __init__(self,\n model_params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n adamw_mode=True,\n simd_log=False):\n\n default_args = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction)\n super(CPUAdam, self).__init__(model_params, default_args)\n self.opt_id = CPU_ADAM_CNT()\n self.adamw_mode = adamw_mode\n try:\n import cpu_adam\n except ImportError:\n raise ImportError('Please install colossalai from source code to use CPUAdam')\n self.cpu_adam_op = cpu_adam\n self.cpu_adam_op.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode, simd_log)\n\n def __del__(self):\n if self.cpu_adam_op:\n self.cpu_adam_op.destroy_adam(self.opt_id)\n\n def torch_adam_update(self,\n data,\n grad,\n exp_avg,\n exp_avg_sq,\n lr,\n beta1,\n beta2,\n eps,\n weight_decay,\n bias_correction1,\n bias_correction2,\n use_adamw=False):\n # FIXME(ver217): remove the below line when replace torch adam with fused adam\n grad = grad.float()\n\n if weight_decay != 0:\n if use_adamw:\n data.mul_(1 - lr * weight_decay)\n else:\n grad = grad.add(data, alpha=weight_decay)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # TODO(jiaruifang) dose not support amsgrad\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)\n\n step_size = lr / bias_correction1\n\n data.addcdiv_(exp_avg, denom, value=-step_size)\n\n @torch.no_grad()\n def step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for _, group in enumerate(self.param_groups):\n for _, p in enumerate(group['params']):\n\n if p.grad is None:\n continue\n\n state = self.state[p]\n\n target_device = p.device\n if len(state) == 0:\n state['step'] = 0\n\n # gradient momentums\n state['exp_avg'] = torch.zeros_like(p.data, dtype=torch.float, device=target_device)\n # gradient variances\n state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=torch.float, device=target_device)\n\n state['step'] += 1\n beta1, beta2 = group['betas']\n\n if target_device.type == 'cpu':\n assert p.data.numel() == p.grad.data.numel(), \"parameter and gradient should have the same size\"\n assert state['exp_avg'].device.type == 'cpu', \"exp_avg should stay on cpu\"\n assert state['exp_avg_sq'].device.type == 'cpu', \"exp_avg should stay on cpu\"\n self.cpu_adam_op.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'],\n group['weight_decay'], group['bias_correction'], p.data, p.grad.data,\n state['exp_avg'], state['exp_avg_sq'], -1)\n elif target_device.type == 'cuda':\n assert state['exp_avg'].device.type == 'cuda', \"exp_avg should stay on cuda\"\n assert state['exp_avg_sq'].device.type == 'cuda', \"exp_avg should stay on cuda\"\n\n bias_correction1 = 1 - beta1**state['step']\n bias_correction2 = 1 - beta2**state['step']\n\n # adam on cuda\n self.torch_adam_update(p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'], group['lr'],\n beta1, beta2, group['eps'], group['weight_decay'], bias_correction1,\n bias_correction2, self.adamw_mode)\n else:\n raise RuntimeError\n return loss\n",
"from functools import partial\r\n\r\nimport colossalai\r\nimport pytest\r\nimport torch\r\nimport torch.multiprocessing as mp\r\nfrom colossalai.amp import convert_to_apex_amp\r\nfrom colossalai.nn.optimizer import CPUAdam\r\nfrom colossalai.testing import parameterize, rerun_if_address_is_in_use\r\nfrom colossalai.utils import free_port\r\nfrom colossalai.zero.init_ctx import ZeroInitContext\r\nfrom colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)\r\nfrom colossalai.zero.sharded_model import ShardedModelV2\r\nfrom colossalai.zero.sharded_model.utils import col_model_deepcopy\r\nfrom colossalai.zero.sharded_optim import ShardedOptimizerV2\r\nfrom colossalai.zero.sharded_optim._utils import has_inf_or_nan\r\nfrom colossalai.utils import get_current_device\r\nfrom tests.components_to_test.registry import non_distributed_component_funcs\r\nfrom colossalai.engine.gradient_handler import MoeGradientHandler\r\nfrom colossalai.context import MOE_CONTEXT\r\nfrom colossalai.testing import assert_equal_in_group\r\n\r\nfrom tests.test_zero.common import CONFIG, check_sharded_model_params\r\nfrom tests.test_moe.test_moe_zero_init import MoeModel\r\n\r\n\r\ndef _run_step(model, optimizer, data, label, criterion, grad_handler):\r\n model.train()\r\n optimizer.zero_grad()\r\n\r\n if criterion:\r\n y = model(data)\r\n loss = criterion(y, label)\r\n else:\r\n loss = model(data, label)\r\n\r\n loss = loss.float()\r\n if isinstance(model, ShardedModelV2):\r\n optimizer.backward(loss)\r\n else:\r\n loss.backward()\r\n\r\n if grad_handler is not None:\r\n grad_handler.handle_gradient()\r\n\r\n optimizer.step()\r\n\r\n\r\n@parameterize(\"cpu_offload\", [True])\r\n@parameterize(\"use_cpuadam\", [True]) # We do not use Hybrid Adam right now, since it has a little bug\r\n@parameterize(\"reuse_fp16_shard\", [True, False])\r\n@parameterize(\"shard_strategy_class\", [TensorShardStrategy, BucketTensorShardStrategy])\r\ndef _run_test_sharded_optim_v2(cpu_offload,\r\n shard_strategy_class,\r\n use_cpuadam,\r\n reuse_fp16_shard,\r\n gpu_margin_mem_ratio=0.0):\r\n shard_strategy = shard_strategy_class()\r\n if use_cpuadam and cpu_offload is False:\r\n return\r\n MOE_CONTEXT.reset_loss()\r\n get_components_func = non_distributed_component_funcs.get_callable('no_leaf_module')\r\n _, train_dataloader, _, optimizer_class, criterion = get_components_func()\r\n\r\n with ZeroInitContext(target_device=torch.device('cpu') if cpu_offload else get_current_device(),\r\n shard_strategy=shard_strategy,\r\n shard_param=True):\r\n zero_model = MoeModel(checkpoint=True)\r\n\r\n zero_model = ShardedModelV2(zero_model,\r\n shard_strategy,\r\n tensor_placement_policy='cpu' if cpu_offload else 'cuda',\r\n reuse_fp16_shard=reuse_fp16_shard)\r\n\r\n # check whether parameters are identical in ddp\r\n for name, p in zero_model.named_parameters():\r\n if not p.colo_attr.param_is_sharded and p.colo_attr.is_replicated:\r\n assert_equal_in_group(p.colo_attr.data_payload.to(get_current_device()))\r\n\r\n model = MoeModel(checkpoint=True).half()\r\n col_model_deepcopy(zero_model, model)\r\n model = model.cuda().float()\r\n\r\n if use_cpuadam:\r\n optimizer_class = CPUAdam\r\n optim = optimizer_class(model.parameters(), lr=1e-3)\r\n sharded_optim = optimizer_class(zero_model.parameters(), lr=1e-3)\r\n sharded_optim = ShardedOptimizerV2(zero_model,\r\n sharded_optim,\r\n initial_scale=2**5,\r\n gpu_margin_mem_ratio=gpu_margin_mem_ratio)\r\n\r\n amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False)\r\n apex_model, apex_optimizer = convert_to_apex_amp(model, optim, amp_config)\r\n apex_grad_handler = MoeGradientHandler(model)\r\n\r\n # Since MOE is not compatible with apex_amp now, we need to convert gate weight to fp32\r\n for (n, p), zp in zip(apex_model.named_parameters(), zero_model.parameters()):\r\n if 'gate' in n:\r\n p.data = p.float()\r\n p.data.copy_(zp.colo_attr.data_payload)\r\n\r\n for i, (data, label) in enumerate(train_dataloader):\r\n if i > 5:\r\n break\r\n data, label = data.cuda(), label.cuda()\r\n _run_step(apex_model, apex_optimizer, data, label, criterion, apex_grad_handler)\r\n _run_step(zero_model, sharded_optim, data, label, criterion, None)\r\n check_sharded_model_params(model, zero_model, loose=True, reuse_fp16_shard=use_cpuadam)\r\n for param in model.parameters():\r\n assert not has_inf_or_nan(param)\r\n\r\n\r\ndef _run_dist(rank, world_size, port):\r\n colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')\r\n MOE_CONTEXT.setup(seed=42)\r\n _run_test_sharded_optim_v2()\r\n\r\n\r\n# use_cpuadam = True can be used with cpu_offload = False\r\[email protected]\r\[email protected](\"world_size\", [2])\r\n@rerun_if_address_is_in_use()\r\ndef test_moe_zero_optim(world_size):\r\n run_func = partial(_run_dist, world_size=world_size, port=free_port())\r\n mp.spawn(run_func, nprocs=world_size)\r\n\r\n\r\nif __name__ == '__main__':\r\n test_moe_zero_optim(world_size=4)\r\n"
] | [
[
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.randint",
"torch.rand"
],
[
"torch.randn",
"torch.allclose"
],
[
"torch.tanh"
],
[
"torch.cuda.empty_cache",
"torch.multiprocessing.spawn"
],
[
"torch.cuda.synchronize",
"torch.cuda.empty_cache",
"torch.cuda.max_memory_allocated",
"torch.cuda.reset_peak_memory_stats",
"torch.rand",
"torch.cuda.max_memory_reserved"
],
[
"torch.zeros_like",
"torch.no_grad",
"torch.enable_grad"
],
[
"torch.device",
"torch.multiprocessing.spawn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wlm2019/Neural-Arithmetic-Units | [
"f9de9d004bb2dc2ee28577cd1760d0a00c185836",
"f9de9d004bb2dc2ee28577cd1760d0a00c185836"
] | [
"stable_nalu/layer/hard_softmax_nac.py",
"stable_nalu/functional/nac_weight_test.py"
] | [
"\nimport math\nimport torch\n\nfrom ..abstract import ExtendedTorchModule\nfrom ..functional import sparsity_error\nfrom ._abstract_recurrent_cell import AbstractRecurrentCell\n\nclass HardSoftmaxNACLayer(ExtendedTorchModule):\n \"\"\"Implements the NAC (Neural Accumulator)\n\n Arguments:\n in_features: number of ingoing features\n out_features: number of outgoing features\n \"\"\"\n\n def __init__(self, in_features, out_features, **kwargs):\n super().__init__('nac', **kwargs)\n self.in_features = in_features\n self.out_features = out_features\n\n # Define the target weights. Also, put 0 last such that p1 = p2 = 0\n # corresponds to p3 = 1 => w = 0.\n self.register_buffer('target_weights', torch.tensor([1, -1, 0], dtype=torch.float32))\n\n # Initialize a tensor, that will be the placeholder for the hard samples\n self.register_buffer('sample', torch.LongTensor(out_features, in_features))\n\n # We will only two parameters per weight, this is to prevent the redundancy\n # there would otherwise exist. This also makes it much more comparable with\n # NAC.\n self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features, 2))\n self.register_buffer('W_hat_k', torch.Tensor(out_features, in_features, 1))\n\n self.register_parameter('bias', None)\n\n def reset_parameters(self):\n # Use a gain of sqrt(0.5). Lets assume that softmax'(0) ~ 1, because this\n # holds for sigmoid. Then:\n # Var[W] = 1 * Var[S_1] - 1 * Var[S_2] + 0 * Var[S_3] = 2 / (fan[in] + fan[out])\n # Var[W] = 2 * Var[S_i] = 2 / (fan[in] + fan[out])\n # Var[S_i] = 1/2 * 2 / (fan[in] + fan[out])\n # sqrt(Var[S_i]) = sqrt(1/2) * sqrt(2 / (fan[in] + fan[out]))\n # This is not exactly true, because S_1, S_2, and S_3 are not enterily uncorrelated.\n torch.nn.init.xavier_uniform_(self.W_hat, gain=math.sqrt(0.5))\n torch.nn.init.constant_(self.W_hat_k, 0)\n\n def forward(self, input, reuse=False):\n # Concat trainable and non-trainable weights\n W_hat_full = torch.cat((self.W_hat, self.W_hat_k), dim=-1) # size = [out, in, 3]\n\n # Compute W_soft\n pi = torch.nn.functional.softmax(W_hat_full, dim=-1)\n W_soft = pi @ self.target_weights\n\n # Compute W_hard\n if not reuse:\n torch.multinomial(pi.view(-1, 3), 1, True, out=self.sample.view(-1))\n W_hard = self.target_weights[self.sample]\n\n # Use W_hard in the forward pass, but use W_soft for the gradients.\n # This implementation trick comes from torch.nn.functional.gumble_softmax(hard=True)\n W = W_hard - W_soft.detach() + W_soft\n\n # Compute the linear multiplication as usual\n self.writer.add_histogram('W', W)\n self.writer.add_tensor('W', W)\n self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)\n\n return torch.nn.functional.linear(input, W, self.bias)\n\n def extra_repr(self):\n return 'in_features={}, out_features={}'.format(\n self.in_features, self.out_features\n )\n\nclass HardSoftmaxNACCell(AbstractRecurrentCell):\n \"\"\"Implements the Gumbel NAC (Gumbel Neural Accumulator) as a recurrent cell\n\n Arguments:\n input_size: number of ingoing features\n hidden_size: number of outgoing features\n \"\"\"\n def __init__(self, input_size, hidden_size, **kwargs):\n super().__init__(HardSoftmaxNACLayer, input_size, hidden_size, **kwargs)\n",
"\nimport numpy as np\nimport torch\n\nfrom stable_nalu.functional import nac_weight\n\ndef test_nac_weight_calculates_backward_correctly():\n w_hat = torch.randn(100, 2, requires_grad=True, dtype=torch.float64)\n m_hat = torch.randn(100, 2, requires_grad=True, dtype=torch.float64)\n\n torch.autograd.gradcheck(\n lambda w_hat, m_hat: torch.sum((2 * nac_weight(w_hat * 2, m_hat * 2) - 0)**2),\n [w_hat, m_hat]\n )\n"
] | [
[
"torch.nn.functional.softmax",
"torch.LongTensor",
"torch.Tensor",
"torch.cat",
"torch.nn.init.constant_",
"torch.tensor",
"torch.nn.functional.linear"
],
[
"torch.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZhaoJ9014/Multi-Human-Parsing-MHP- | [
"a24eae67e9b4e730c75bcd8aec3e2ed06cb4b046"
] | [
"Nested_Adversarial_Networks/NAN_rework/modeleag.py"
] | [
"# Rework of model.py\n# https://github.com/ddddwee1/sul\n# This wrap-up is targeted for better touching low-level implementations \nimport layers2 as L \nimport tensorflow as tf \nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\ntf.enable_eager_execution(config=config)\nimport numpy as np \nimport os \nimport random\nimport time\n\nPARAM_RELU = 0\nPARAM_LRELU = 1\nPARAM_ELU = 2\nPARAM_TANH = 3\nPARAM_MFM = 4\nPARAM_MFM_FC = 5\nPARAM_SIGMOID = 6\n\n######## util functions ###########\ndef accuracy(pred,y,name='acc', one_hot=True):\n\twith tf.variable_scope(name):\n\t\tif one_hot:\n\t\t\tcorrect = tf.equal(tf.cast(tf.argmax(pred,-1),tf.int64),tf.cast(tf.argmax(y,-1),tf.int64))\n\t\telse:\n\t\t\tcorrect = tf.equal(tf.cast(tf.argmax(pred,-1),tf.int64),tf.cast(y,tf.int64))\n\t\tacc = tf.reduce_mean(tf.cast(correct,tf.float32))\n\treturn acc\n\n##########################\n# ETA class. I want to see the ETA. It's too boring to wait here.\nclass ETA():\n\tdef __init__(self,max_value):\n\t\tself.start_time = time.time()\n\t\tself.max_value = max_value\n\t\tself.current = 0\n\n\tdef start(self):\n\t\tself.start_time = time.time()\n\t\tself.current = 0\n\n\tdef sec2hms(self,sec):\n\t\thm = sec//60\n\t\ts = sec%60\n\t\th = hm//60\n\t\tm = hm%60\n\t\treturn h,m,s\n\n\tdef get_ETA(self,current,is_string=True):\n\t\tself.current = current\n\t\ttime_div = time.time() - self.start_time\n\t\ttime_remain = time_div * float(self.max_value - self.current) / float(self.current + 1)\n\t\th,m,s = self.sec2hms(int(time_remain))\n\t\tif is_string:\n\t\t\treturn '%d:%d:%d'%(h,m,s)\n\t\telse:\n\t\t\treturn h,m,s\n\n########### universal model class ##########\nclass Model(tf.contrib.checkpoint.Checkpointable):\n\tdef __init__(self,*args,**kwargs):\n\t\tself.initialized = False\n\t\tself.variables = []\n\t\tself.initialize(*args,**kwargs)\n\n\tdef initialize(self,*args,**kwargs):\n\t\tpass\n\n\tdef _gather_variables(self):\n\t\tself.variables = []\n\t\tatrs = dir(self)\n\t\tfor i in atrs:\n\t\t\tif i[0] == '_':\n\t\t\t\tcontinue\n\t\t\tobj = getattr(self, i)\n\t\t\tself.variables += self._gather_variables_recursive(obj)\n\n\tdef _gather_variables_recursive(self, obj):\n\t\tresult = []\n\t\tif isinstance(obj, list) or isinstance(obj, tuple):\n\t\t\tfor sub_obj in obj:\n\t\t\t\tresult += self._gather_variables_recursive(sub_obj)\n\t\telif isinstance(obj, Model) or isinstance(obj, L.Layer):\n\t\t\tresult += obj.variables\n\t\treturn result\n\n\tdef get_variables(self, layers=None):\n\t\tif layers is None:\n\t\t\treturn self.variables\n\t\telse:\n\t\t\tres = []\n\t\t\tfor l in layers:\n\t\t\t\tres += l.variables\n\t\t\treturn res \n\n\tdef set_bn_training(self, is_training):\n\t\tatrs = dir(self)\n\t\t# print(atrs)\n\t\tfor i in atrs:\n\t\t\tif i[0] == '_':\n\t\t\t\tcontinue\n\t\t\tobj = getattr(self, i)\n\t\t\tself._set_bn_training_recursive(obj, is_training)\n\n\tdef _set_bn_training_recursive(self, obj, is_training):\n\t\tif isinstance(obj, list):\n\t\t\tfor sub_obj in obj:\n\t\t\t\tself._set_bn_training_recursive(sub_obj, is_training)\n\t\tif isinstance(obj, Model) and obj!=self:\n\t\t\tobj.set_bn_training(is_training)\n\t\tif isinstance(obj, L.batch_norm):\n\t\t\tobj.is_training = is_training\n\n\tdef set_bn_epsilon(self, epsilon):\n\t\tatrs = dir(self)\n\t\t# print(atrs)\n\t\tfor i in atrs:\n\t\t\tif i[0] == '_':\n\t\t\t\tcontinue\n\t\t\tobj = getattr(self, i)\n\t\t\tself._set_bn_epsilon_recursive(obj, epsilon)\n\n\tdef _set_bn_epsilon_recursive(self, obj, epsilon):\n\t\tif isinstance(obj, list):\n\t\t\tfor sub_obj in obj:\n\t\t\t\tself._set_bn_training_recursive(sub_obj, epsilon)\n\t\tif isinstance(obj, Model) and obj!=self:\n\t\t\tobj.set_bn_training(epsilon)\n\t\tif isinstance(obj, L.batch_norm):\n\t\t\tobj.epsilon = epsilon\n\n\tdef __call__(self, x, *args, **kwargs):\n\t\tx = tf.convert_to_tensor(x, preferred_dtype=tf.float32)\n\t\tres = self.forward(x, *args, **kwargs)\n\t\tif not self.initialized:\n\t\t\tself._gather_variables()\n\t\t\tself.initialized = True\n\t\treturn res \n\n########### universal layer classes ##########\nclass ConvLayer(Model):\n\tdef initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):\n\t\tself.conv = L.conv2D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\tdef forward(self,x):\n\t\tx = self.conv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass ConvLayer1D(Model):\n\tdef initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):\n\t\tself.conv = L.conv1D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\tdef forward(self,x):\n\t\tx = self.conv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass ConvLayer3D(Model):\n\tdef initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):\n\t\tself.conv = L.conv3D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\tdef forward(self,x):\n\t\tx = self.conv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass DeconvLayer(Model):\n\tdef initialize(self, size, outchn, activation=-1, stride=1, usebias=True, pad='SAME', batch_norm=False):\n\t\tself.deconv = L.deconv2D(size,outchn,stride=stride,usebias=usebias,pad=pad, name=None)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\n\tdef forward(self,x):\n\t\tx = self.deconv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass DeconvLayer3D(Model):\n\tdef initialize(self, size, outchn, activation=-1, stride=1, usebias=True, pad='SAME', batch_norm=False):\n\t\tself.deconv = L.deconv3D(size,outchn,stride=stride,usebias=usebias,pad=pad, name=None)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\n\tdef forward(self,x):\n\t\tx = self.deconv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass Dense(Model):\n\tdef initialize(self, outsize, usebias=True, batch_norm=False, activation=-1):\n\t\tself.fclayer = L.fcLayer(outsize,usebias=usebias)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\n\tdef forward(self,x):\n\t\tx = self.fclayer(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass GraphConvLayer(Model):\n\tdef initialize(self, outsize, adj_mtx=None, adj_fn=None, usebias=True, activation=-1, batch_norm=False):\n\t\tself.GCL = L.graphConvLayer(outsize, adj_mtx=adj_mtx, adj_fn=adj_fn, usebias=usebias)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\n\tdef forward(self, x):\n\t\tx = self.GCL(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\n\nflatten = L.flatten()\nmaxPool = L.maxpoolLayer\navgPool = L.avgpoolLayer\n\n########### higher wrapped block ##########\n\nclass ResBlock(Model):\n\tdef initialize(self, outchn, stride=1, ratio=4, activation=PARAM_RELU):\n\t\tself.outchn = outchn\n\t\t# self.stride = stride\n\t\tself.activ = L.activation(activation)\n\t\tself.bn = L.batch_norm()\n\t\tself.l1 = ConvLayer(1, outchn//ratio, activation=PARAM_RELU, batch_norm=True)\n\t\tself.l2 = ConvLayer(3, outchn//ratio, activation=PARAM_RELU, batch_norm=True, stride=stride)\n\t\tself.l3 = ConvLayer(1, outchn)\n\t\tself.shortcut_conv = ConvLayer(1, outchn, activation=PARAM_RELU, stride=stride)\n\t\tself.shortcut_pool = L.maxpoolLayer(stride)\n\n\tdef forward(self, x):\n\t\tinshape = x.get_shape().as_list()[-1]\n\t\tif inshape==self.outchn:\n\t\t\tshort = self.shortcut_pool(x)\n\t\telse:\n\t\t\tshort = self.shortcut_conv(x)\n\n\t\tbranch = self.bn(x)\n\t\tbranch = self.activ(branch)\n\t\tbranch = self.l1(branch)\n\t\tbranch = self.l2(branch)\n\t\tbranch = self.l3(branch)\n\n\t\treturn branch + short\n\nclass Sequential(Model):\n\tdef initialize(self, modules):\n\t\tself.modules = modules\n\n\tdef forward(self, x):\n\t\tfor m in self.modules:\n\t\t\tx = m(x)\n\t\treturn x\n\n########### saver ##########\nclass Saver():\n\tdef __init__(self, model, optim=None):\n\t\tself.mod = model\n\n\t\tself.obj = tf.contrib.checkpoint.Checkpointable()\n\t\tself.obj.m = self.mod\n\t\tself.optim = optim \n\t\tif optim is None:\n\t\t\tself.ckpt = tf.train.Checkpoint(model=self.obj, optimizer_step=tf.train.get_or_create_global_step())\n\t\telse:\n\t\t\tself.ckpt = tf.train.Checkpoint(optimizer=optim, model=self.obj, optimizer_step=tf.train.get_or_create_global_step())\n\t\n\tdef save(self, path):\n\t\tprint('Saving model to path:',path)\n\t\thead, tail = os.path.split(path)\n\t\tif not os.path.exists(head):\n\t\t\tos.makedirs(head)\n\t\tself.ckpt.save(path)\n\t\tprint('Model saved to path:',path)\n\n\tdef restore(self, path, ptype='folder'):\n\t\tprint('Load from:', path)\n\t\ttry:\n\t\t\tif ptype=='folder':\n\t\t\t\tlast_ckpt = tf.train.latest_checkpoint(path)\n\t\t\t\tprint('Checkpoint:', last_ckpt)\n\t\t\t\tif last_ckpt is None:\n\t\t\t\t\tprint('No model found in checkpoint.')\n\t\t\t\t\tprint('Model will auto-initialize after first iteration.')\n\t\t\t\tself.ckpt.restore(last_ckpt)\n\t\t\telse:\n\t\t\t\tself.ckpt.restore(path)\n\t\t\tprint('Finish loading.')\n\t\texcept Exception as e:\n\t\t\tprint('Model restore failed, Exception:',e)\n\t\t\tprint('Model will auto-initialize after first iteration.')\n\n######### Gradient accumulator #########\nclass GradAccumulator():\n\tdef __init__(self):\n\t\tself.steps = 0\n\t\tself.grads = []\n\n\tdef accumulate(self, grads):\n\t\tif len(grads) == 0:\n\t\t\tself.grads = grads\n\t\telse:\n\t\t\tfor old_g, new_g in zip(self.grads, grads):\n\t\t\t\told_g.assign_add(new_g)\n\t\tself.steps += 1\n\n\tdef get_gradient(self):\n\t\tres = [i/self.steps for i in self.grads]\n\t\tself.grads = []\n\t\tself.steps = 0\n\t\treturn res\n\n\tdef get_step(self):\n\t\treturn self.steps\n\n######### Data Reader Template (serial) ##########\nclass DataReaderSerial():\n\tdef __init__(self, one_hot=None):\n\t\tself.data_pos = 0\n\t\tself.val_pos = 0\n\t\tself.data = []\n\t\tself.val = []\n\t\tself.one_hot = False\n\t\tif one_hot is not None:\n\t\t\tself.one_hot = True\n\t\t\tself.eye = np.eye(one_hot)\n\t\tself.load_data()\n\t\t\n\tdef get_next_batch(self,BSIZE):\n\t\tif self.data_pos + BSIZE > len(self.data):\n\t\t\trandom.shuffle(self.data)\n\t\t\tself.data_pos = 0\n\t\tbatch = self.data[self.data_pos : self.data_pos+BSIZE]\n\t\tx = [i[0] for i in batch]\n\t\ty = [i[1] for i in batch]\n\t\tif self.one_hot:\n\t\t\ty = self.eye[np.array(y)]\n\t\tself.data_pos += BSIZE\n\t\treturn x,y\n\n\tdef get_val_next_batch(self, BSIZE):\n\t\tif self.val_pos + BSIZE >= len(self.val):\n\t\t\tbatch = self.val[self.val_pos:]\n\t\t\trandom.shuffle(self.val)\n\t\t\tself.val_pos = 0\n\t\t\tis_end = True\n\t\telse:\n\t\t\tbatch = self.data[self.data_pos : self.data_pos+BSIZE]\n\t\t\tis_end = False\n\t\tx = [i[0] for i in batch]\n\t\ty = [i[1] for i in batch]\n\t\tif self.one_hot:\n\t\t\ty = self.eye[np.array(y)]\n\t\tself.val_pos += BSIZE\n\t\treturn x,y, is_end\n\n\tdef get_train_iter(self, BSIZE):\n\t\treturn len(self.data)//BSIZE\n\n\tdef get_val_iter(self, BSIZE):\n\t\treturn len(self.val)//BSIZE + 1\n\nclass ListReader():\n\tdef __init__(self, one_hot=None):\n\t\tself.data_pos = 0\n\t\tself.val_pos = 0\n\t\tself.data = []\n\t\tself.val = []\n\t\tself.one_hot = False\n\t\tif one_hot is not None:\n\t\t\tself.one_hot = True\n\t\t\tself.eye = np.eye(one_hot)\n\t\tself.load_data()\n\t\t\n\tdef get_next_batch(self,BSIZE):\n\t\tif self.data_pos + BSIZE > len(self.data):\n\t\t\trandom.shuffle(self.data)\n\t\t\tself.data_pos = 0\n\t\tbatch = self.data[self.data_pos : self.data_pos+BSIZE]\n\t\tx = [i[0] for i in batch]\n\t\ty = [i[1] for i in batch]\n\t\tif self.one_hot:\n\t\t\ty = self.eye[np.array(y)]\n\t\tself.data_pos += BSIZE\n\n\t\tx = [self.process_img(i) for i in x]\n\t\treturn x,y\n\n\tdef get_val_next_batch(self, BSIZE):\n\t\tif self.val_pos + BSIZE >= len(self.val):\n\t\t\tbatch = self.val[self.val_pos:]\n\t\t\trandom.shuffle(self.val)\n\t\t\tself.val_pos = 0\n\t\t\tis_end = True\n\t\telse:\n\t\t\tbatch = self.data[self.data_pos : self.data_pos+BSIZE]\n\t\t\tis_end = False\n\t\tx = [i[0] for i in batch]\n\t\ty = [i[1] for i in batch]\n\t\tif self.one_hot:\n\t\t\ty = self.eye[np.array(y)]\n\t\tself.val_pos += BSIZE\n\t\tx = [self.process_img(i) for i in x]\n\t\treturn x,y, is_end\n\n\tdef get_train_iter(self, BSIZE):\n\t\treturn len(self.data)//BSIZE\n\n\tdef get_val_iter(self, BSIZE):\n\t\treturn len(self.val)//BSIZE + 1\n\n######### Data Reader Template (parallel) ##########\n# multi-process to read data\nclass DataReader():\n\tdef __init__(self, data, fn, batch_size, shuffle=False, random_sample=False, processes=2, post_fn=None):\n\t\tfrom multiprocessing import Pool\n\t\tself.pool = Pool(processes)\n\t\tprint('Starting parallel data loader...')\n\t\tself.process_fn = fn\n\t\tself.data = data\n\t\tself.batch_size = batch_size\n\t\tself.position = batch_size\n\t\tself.post_fn = post_fn\n\t\tself.random_sample = random_sample\n\t\tself.shuffle = shuffle\n\t\tif shuffle:\n\t\t\trandom.shuffle(self.data)\n\t\tself._start_p(self.data[:batch_size])\n\n\tdef _start_p(self, data):\n\t\tself.ps = []\n\t\tfor i in data:\n\t\t\tself.ps.append(self.pool.apply_async(self.process_fn, [i]))\n\n\tdef get_next_batch(self):\n\t\t# print('call')\n\t\t# fetch data\n\t\tres = [i.get() for i in self.ps]\n\n\t\t# start new pre-fetch\n\t\tif self.random_sample:\n\t\t\tbatch = random.sample(self.data, self.batch_size)\n\t\telse:\n\t\t\tif self.position + self.batch_size > len(self.data):\n\t\t\t\tself.position = 0\n\t\t\t\tif self.shuffle:\n\t\t\t\t\trandom.shuffle(self.data)\t\n\t\t\tbatch = self.data[self.position:self.position+self.batch_size]\n\t\t\tself.position += self.batch_size\n\t\t\n\t\tself._start_p(batch)\n\n\t\t# post_process the data\n\t\tif self.post_fn is not None:\n\t\t\tres = self.post_fn(res)\n\t\treturn res \n\n\n######### short-cut functions #########\n\ngradient_reverse = L.gradient_reverse\n\ndef pad(x, pad):\n\tif isinstance(pad, list):\n\t\tx = tf.pad(x, [[0,0],[pad[0],pad[1]], [pad[2],pad[3]], [0,0]])\n\telse:\n\t\tx = tf.pad(x, [[0,0],[pad,pad],[pad,pad],[0,0]])\n\treturn x \n\ndef pad3D(x, pad):\n\tif isinstance(pad, list):\n\t\tx = tf.pad(x, [[0,0],[pad[0],pad[1]], [pad[2],pad[3]], [pad[4], pad[5]], [0,0]])\n\telse:\n\t\tx = tf.pad(x, [[0,0],[pad,pad],[pad,pad],[pad,pad],[0,0]])\n\treturn x \n\ndef image_transform(x, H, out_shape=None, interpolation='NEAREST'):\n\t# Will produce error if not specify 'output_shape' in eager mode\n\tshape = x.get_shape().as_list()\n\tif out_shape is None:\n\t\tif len(shape)==4:\n\t\t\tout_shape = shape[1:3]\n\t\telse:\n\t\t\tout_shape = shape[:2]\n\treturn tf.contrib.image.transform(x, H, interpolation=interpolation, output_shape=out_shape)\n \ndef zip_grad(grads, vars):\n\tassert len(grads)==len(vars)\n\tgrads_1 = []\n\tvars_1 = []\n\tfor i in range(len(grads)):\n\t\tif not grads[i] is None:\n\t\t\tgrads_1.append(grads[i])\n\t\t\tvars_1.append(vars[i])\n\tassert len(grads_1)!=0\n\treturn zip(grads_1, vars_1)\n\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.enable_eager_execution",
"tensorflow.train.latest_checkpoint",
"numpy.eye",
"tensorflow.cast",
"tensorflow.train.get_or_create_global_step",
"tensorflow.ConfigProto",
"tensorflow.contrib.checkpoint.Checkpointable",
"tensorflow.contrib.image.transform",
"tensorflow.pad",
"tensorflow.variable_scope",
"tensorflow.argmax",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
}
] |
cuis15/xorder | [
"6dde5a18552ffa07f29100038464a38c49495527"
] | [
"data/utils.py"
] | [
"import numpy as np\nfrom sklearn.metrics import roc_auc_score\nfrom numba import jit\n\n\ndef array2str(tmp_array, sep = \" \"):\n str_list = [\"{:.3f}\".format(tmp_item) for tmp_item in tmp_array]\n return sep.join(str_list)\n\n\ndef generate_sorted_groups(pred, y, a):\n a_idx = np.where(a == 0)\n b_idx = np.where(a == 1)\n b_score = pred[b_idx].reshape(-1)\n b_index = np.argsort(-b_score)\n b_score_sort = b_score[b_index]\n b_label = y[b_idx]\n b_label_sort = b_label[b_index]\n\n a_score = pred[a_idx].reshape(-1)\n a_index = np.argsort(-a_score)\n a_score_sort = a_score[a_index]\n a_label = y[a_idx]\n a_label_sort = a_label[a_index]\n\n return a_score_sort,b_score_sort,a_label_sort,b_label_sort\n\n\ndef cal_fairness_metric_by_groups(a_score, b_score, a_label, b_label, metric = \"xauc\"):\n if metric == \"xauc\":\n metric_ab, metric_ba, _ = xAUC_fast(a_score, b_score, a_label, b_label)\n else:\n metric_ab, metric_ba = pairwise_fast(a_score, b_score, a_label, b_label)\n return abs(metric_ab - metric_ba),metric_ab,metric_ba\n\n\ndef cal_fairness_metric(pred, y, a, metric = \"xauc\"):\n a_idx, b_idx = np.where(a == 0), np.where(a == 1)\n a_score, b_score = pred[a_idx].reshape(-1), pred[b_idx].reshape(-1)\n a_label, b_label = y[a_idx].reshape(-1), y[b_idx].reshape(-1)\n if metric == \"xauc\":\n metric_ab, metric_ba, _ = xAUC_fast(a_score, b_score, a_label, b_label)\n else:\n metric_ab, metric_ba = pairwise_fast(a_score, b_score, a_label, b_label)\n return abs(metric_ab - metric_ba),metric_ab,metric_ba\n\n\ndef AUC(score, label):\n ###[from big to small]\n sum_ = 0\n num = len(label)\n for i in range(num):\n for j in range(num):\n if label[i]==1 and label[j]==0:\n if score[i]>score[j]: \n sum_ += 1\n\n return sum_/(np.sum(label)*(num-np.sum(label))), sum_\n\n\ndef xAUC(a_score, b_score, a_label, b_label):\n sum_ab = 0\n sum_ba = 0\n numa = len(a_label)\n numb = len(b_label)\n a_num1 = np.sum(a_label)\n a_num0 = len(a_label) - a_num1\n b_num1 = np.sum(b_label)\n b_num0 = len(b_label) - b_num1\n for i in range(numa):\n for j in range(numb):\n if a_label[i] ==1 and b_label[j] ==0:\n if a_score[i]>b_score[j]:\n sum_ab+=1\n elif a_label[i]==0 and b_label[j]==1:\n if b_score[j]>a_score[i]:\n sum_ba+=1\n return sum_ab/(a_num1*b_num0), sum_ba/(b_num1*a_num0), sum_ab+sum_ba \n\n\ndef xAUC_fast(a_score, b_score, a_label, b_label):\n a_num1 = np.sum(a_label)\n a_num0 = len(a_label) - a_num1\n b_num1 = np.sum(b_label)\n b_num0 = len(b_label) - b_num1\n\n a_score1,a_score0 = a_score[a_label == 1],a_score[a_label == 0]\n b_score1,b_score0 = b_score[b_label == 1],b_score[b_label == 0]\n\n ab_label = np.concatenate((np.ones(int(a_num1)),np.zeros(int(b_num0))))\n ab_score = np.concatenate((a_score1,b_score0))\n xauc_ab = roc_auc_score(ab_label,ab_score)\n\n ba_label = np.concatenate((np.ones(int(b_num1)),np.zeros(int(a_num0))))\n ba_score = np.concatenate((b_score1,a_score0))\n xauc_ba = roc_auc_score(ba_label,ba_score)\n\n return xauc_ab, xauc_ba, xauc_ab * a_num1 * b_num0 + xauc_ba * b_num1 * a_num0\n\n\ndef post_score(train_score, train_score_post, test_score):\n tep_id = 0\n bins = [[] for i in range(len(train_score)+1)]\n for i in range(len(test_score)):\n s = test_score[i]\n if s>train_score[0]:\n bins[0].append(s)\n elif s<=train_score[-1]:\n bins[-1].append(s)\n else:\n for j in range(tep_id,len(train_score)):\n if train_score[j-1]>=s and train_score[j]<s:\n bins[j].append(s)\n tep_id = j\n break\n changed_b_score = []\n for bin_ in range(len(bins)):\n for item in range(len(bins[bin_])):\n num = (len(bins[bin_]))\n if bin_==0:\n changed_b_score.append((item)*train_score_post[bin_]/num+(num-item)/num)\n elif bin_==len(train_score_post):\n changed_b_score.append((num -item)*train_score_post[bin_-1]/num)\n else:\n changed_b_score.append((item)*train_score_post[bin_]/num + (num-item)*train_score_post[bin_-1]/num)\n \n return np.array(changed_b_score)\n\n\n@jit(nopython=True)\ndef maxAUC(a_label, b_label):\n\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a_label)\n b_1 = np.sum(b_label)\n path = np.zeros((M+1, N+1,2,2))\n\n cost = np.zeros((M+1, N+1))\n for i in range(1,M+1):\n if a_label[i]==1:\n cost[i,0] = N-b_1 + cost[i-1, 0]\n else:\n cost[i,0] = cost[i-1,0]\n path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])\n\n for i in range(1,N+1):\n if b_label[i]==1:\n cost[0, i] = cost[0,i-1]+ M - a_1\n else:\n cost[0, i] = cost[0,i-1]\n path[0,i,:,:] = np.array([[0, i-1],[0, i]])\n\n\n for i in range(2, M+1+N+1):\n for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]\n\n if i-j+1>N or a_label[j]==0:\n tep_b = 0 \n else:\n tep_b = N - (i-j) - np.sum(b_label[i-j+1:])\n\n if j+1>M or b_label[i-j]==0:\n tep_a = 0\n else:\n tep_a = M - j -np.sum(a_label[j+1:])\n\n if cost[j-1, i-j] + tep_b > cost[j, i-j-1] + tep_a:\n cost[j, i-j] = cost[j-1, i-j] + tep_b\n path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])\n\n else:\n cost[j, i-j] = cost[j, i-j-1] + tep_a\n path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])\n return cost[M,N], path\n\n\n@jit(nopython=True)\ndef xAUC_post(a_label, b_label, lamb):\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a_label)\n b_1 = np.sum(b_label)\n\n a_1_b_0 = a_1*(N-b_1)\n b_1_a_0 = b_1*(M - a_1)\n\n path = np.zeros((M+1, N+1,2,2))\n cost_unfair = np.zeros((M+1, N+1))\n cost = np.zeros((M+1, N+1))\n for i in range(1,M+1):\n if a_label[i]==1:\n cost_unfair[i, 0] = (N-b_1)/a_1_b_0*lamb + cost_unfair[i-1,0]\n cost[i,0] = N-b_1 + cost[i-1, 0] \n else:\n cost_unfair[i, 0] = cost_unfair[i-1,0]\n cost[i,0] = cost[i-1,0]\n path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])\n\n for i in range(1,N+1):\n if b_label[i]==1:\n cost_unfair[0,i] = -(M-a_1)/b_1_a_0*lamb + cost_unfair[0, i-1]\n cost[0, i] = cost[0,i-1] + M - a_1\n else:\n cost[0, i] = cost[0,i-1]\n cost_unfair[0, i] = cost_unfair[0,i-1]\n path[0,i,:,:] = np.array([[0, i-1],[0, i]])\n\n for i in range(2, M+1+N+1):\n for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]\n\n if i-j+1>N or a_label[j]==0:\n tep_b = 0 \n tep_unfair_b = 0\n else:\n tep_b = N - (i-j) - np.sum(b_label[i-j+1:])\n tep_unfair_b = tep_b/a_1_b_0*lamb \n\n if j+1>M or b_label[i-j]==0:\n tep_a = 0\n tep_unfair_a = 0\n else:\n tep_a = M - j -np.sum(a_label[j+1:])\n tep_unfair_a = -tep_a/b_1_a_0*lamb\n\n if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):\n cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]\n cost[j, i-j] = cost[j-1, i-j] + tep_b \n path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])\n\n else:\n cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]\n cost[j, i-j] = cost[j, i-j-1] + tep_a \n path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])\n\n return cost, path, cost_unfair\n\n@jit(nopython=True)\ndef xAUC_post_(a_label, b_label, lamb):\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a_label)\n b_1 = np.sum(b_label)\n\n a_1_b_0 = a_1*(N-b_1)\n b_1_a_0 = b_1*(M - a_1)\n\n path = np.zeros((M+1, N+1,2,2))\n cost_unfair = np.zeros((M+1, N+1))\n cost = np.zeros((M+1, N+1))\n for i in range(1,M+1):\n if a_label[i]==1:\n cost_unfair[i, 0] = (N-b_1)/a_1_b_0 * lamb + cost_unfair[i-1,0]\n cost[i,0] = N-b_1 + cost[i-1, 0] \n else:\n cost_unfair[i, 0] = cost_unfair[i-1,0]\n cost[i,0] = cost[i-1,0]\n path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])\n\n for i in range(1,N+1):\n if b_label[i]==1:\n cost_unfair[0,i] = -(M - a_1) / b_1_a_0 * lamb + cost_unfair[0, i-1]\n cost[0, i] = cost[0,i-1] + M - a_1\n else:\n cost[0, i] = cost[0,i-1]\n cost_unfair[0, i] = cost_unfair[0,i-1]\n path[0,i,:,:] = np.array([[0, i-1],[0, i]])\n\n for i in range(2, M+1+N+1):\n # print(i)\n for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]\n\n if a_label[j]==0:\n tep_b = 0 \n tep_unfair_b = 0\n else:\n tep_b = N - (i-j) - np.sum(b_label[i-j+1:])\n tep_unfair_b = tep_b/a_1_b_0*lamb \n\n if b_label[i-j]==0:\n tep_a = 0\n tep_unfair_a = 0\n else:\n tep_a = M - j -np.sum(a_label[j+1:])\n tep_unfair_a = -tep_a/b_1_a_0*lamb\n\n if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):\n cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]\n cost[j, i-j] = cost[j-1, i-j] + tep_b \n path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])\n\n else:\n cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]\n cost[j, i-j] = cost[j, i-j-1] + tep_a \n path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])\n\n return cost, path, cost_unfair\n\n\n@jit(nopython=True)\ndef pairwise_post(a_label, b_label, lamb):\n###a, b has been sorted decreasing sort.\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a_label)\n b_1 = np.sum(b_label)\n\n a_1_0 = a_1*((N-b_1)+(M - a_1))\n b_1_0 = b_1*((M - a_1)+(N-b_1))\n\n path = np.zeros((M+1, N+1,2,2))\n cost_unfair = np.zeros((M+1, N+1))\n cost = np.zeros((M+1, N+1))\n\n zeros_mat = np.zeros((M+1, N+1))\n zeros_mat[0,0] = ((N-b_1)+(M - a_1))\n\n for i in range(1,N+1):\n if b_label[i]==1:\n zeros_mat[0,i] = zeros_mat[0,i-1]\n else:\n zeros_mat[0,i] = zeros_mat[0,i-1]-1 \n\n for i in range(1,M+1):\n if a_label[i]==0:\n zeros_mat[i,0] = zeros_mat[i-1,0]-1\n else:\n zeros_mat[i,0] = zeros_mat[i-1,0]\n for j in range(1,N+1):\n if b_label[j]==0:\n zeros_mat[i,j] = zeros_mat[i,j-1]-1\n else:\n zeros_mat[i,j] = zeros_mat[i,j-1]\n for i in range(1,M+1):\n if a_label[i]==1:\n cost_unfair[i, 0] = zeros_mat[i,0]/a_1_0*lamb + cost_unfair[i-1,0]\n cost[i,0] = N-b_1 + cost[i-1, 0] \n else:\n cost_unfair[i, 0] = cost_unfair[i-1,0]\n cost[i,0] = cost[i-1,0]\n path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])\n\n for i in range(1,N+1):\n if b_label[i]==1:\n cost_unfair[0,i] = -zeros_mat[0,i]/b_1_0*lamb + cost_unfair[0, i-1]\n cost[0, i] = cost[0,i-1] + M - a_1\n else:\n\n cost[0, i] = cost[0,i-1]\n cost_unfair[0, i] = cost_unfair[0, i-1]\n path[0,i,:,:] = np.array([[0, i-1],[0, i]])\n\n for i in range(2, M+1+N+1):\n for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]\n\n if a_label[j]==0:\n tep_b = 0 \n tep_unfair_b = 0\n else:\n tep_b = N - (i-j) - np.sum(b_label[i-j+1:])\n tep_unfair_b = zeros_mat[j,i-j]/a_1_0*lamb \n\n\n if b_label[i-j]==0:\n tep_a = 0\n tep_unfair_a = 0\n else: \n tep_a = M - j -np.sum(a_label[j+1:])\n tep_unfair_a = -zeros_mat[j,i-j]/b_1_0*lamb\n\n if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):\n\n cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]\n cost[j, i-j] = cost[j-1, i-j] + tep_b \n path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])\n\n else:\n cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]\n cost[j, i-j] = cost[j, i-j-1] + tep_a \n path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])\n return cost, path, cost_unfair\n\n\ndef post_b_score(a_score, b_score, a_label, b_label, lamb = 0, _type=\"xauc\"): ## score has to be decreasing.\n M = len(a_score)\n N = len(b_score)\n if _type == \"xauc\":\n cost, path_ , cost_unfair = xAUC_post(a_label, b_label, lamb = lamb)\n elif _type==\"AUC\":\n cost, path_ = maxAUC(a_label, b_label)\n elif _type==\"prf\":\n cost, path_ , cost_unfair = pairwise_post(a_label, b_label, lamb = lamb)\n else:\n print(\"Unknown type\")\n exit()\n\n @jit(nopython=True)\n def pathTrace(path):\n\n trace = []\n tep = path[M,N,:,:]\n trace.append(tep[-1,:])\n trace.append(tep[0,:])\n for i in range(M+N-1):\n\n tep = path[int(tep[0][0]), int(tep[0][1]), :,:]\n trace.append(tep[0,:])\n trace.reverse()\n return trace\n\n path = pathTrace(path_)\n gap_a = [[] for i in range(M+1)]\n\n for i in range(1,len(path)):\n if int(path[i][0])==int(path[i-1][0]):\n gap_a[int(path[i][0])].append(int(path[i][1]))\n\n changed_b_score = []\n for bin_ in range(len(gap_a)):\n for item in range(len(gap_a[bin_])):\n num = (len(gap_a[bin_])+1)\n if bin_==0:\n changed_b_score.append((item+1)*a_score[bin_]/num+(num-item-1)/num)\n elif bin_==len(a_score):\n changed_b_score.append((num -item-1)*a_score[bin_-1]/num)\n else:\n changed_b_score.append((item+1)*a_score[bin_]/num + (num-item-1)*a_score[bin_-1]/num)\n if _type==\"AUC\":\n return np.array(changed_b_score), 0\n else:\n return np.array(changed_b_score), cost_unfair[-1, -1]\n\n\ndef pairwise(a_score, b_score, a_label, b_label):\n sum_ab = 0\n sum_ba = 0\n numa = len(a_label)\n numb = len(b_label)\n a_num1 = np.sum(a_label)\n a_num0 = len(a_label) - a_num1\n b_num1 = np.sum(b_label)\n b_num0 = len(b_label) - b_num1\n\n i_AUCa = roc_auc_score(a_label, a_score)\n i_AUCb = roc_auc_score(b_label, b_score)\n\n for i in range(numa):\n for j in range(numb):\n if a_label[i] ==1 and b_label[j] ==0:\n if a_score[i]>b_score[j]:\n sum_ab+=1\n elif a_label[i]==0 and b_label[j]==1:\n if b_score[j]>a_score[i]:\n sum_ba+=1\n return (sum_ab+i_AUCa*a_num0*a_num1)/(a_num1*(b_num0+a_num0)), (sum_ba+i_AUCb*b_num0*b_num1)/(b_num1*(a_num0+b_num0))\n\n\ndef pairwise_fast(a_score, b_score, a_label, b_label):\n a_num1 = np.sum(a_label)\n a_num0 = len(a_label) - a_num1\n b_num1 = np.sum(b_label)\n b_num0 = len(b_label) - b_num1\n\n a_score1,a_score0 = a_score[a_label == 1],a_score[a_label == 0]\n b_score1,b_score0 = b_score[b_label == 1],b_score[b_label == 0]\n\n ab_label = np.concatenate((np.ones(int(a_num1)),np.zeros(int(b_num0+a_num0))))\n ab_score = np.concatenate((a_score1,a_score0,b_score0))\n pair_ab = roc_auc_score(ab_label,ab_score) #[a=1, 0]\n\n ba_label = np.concatenate((np.ones(int(b_num1)),np.zeros(int(a_num0+b_num0))))\n ba_score = np.concatenate((b_score1,b_score0, a_score0))\n pair_ba = roc_auc_score(ba_label,ba_score) #[b=1, 0]\n\n return pair_ab, pair_ba \n\n\ndef zeros_mat(a, b):\n a_label = [0] + a\n b_label = [0] + b\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a)\n b_1 = np.sum(b)\n zeros_mat = np.zeros((M+1, N+1))\n zeros_mat[0,0] = ((N-b_1)+(M - a_1))\n\n for i in range(1,N+1):\n if b_label[i]==1:\n zeros_mat[0,i] = zeros_mat[0,i-1]\n else:\n zeros_mat[0,i] = zeros_mat[0,i-1]-1 \n\n for i in range(1,M+1):\n if a_label[i]==0:\n zeros_mat[i,0] = zeros_mat[i-1,0]-1\n else:\n zeros_mat[i,0] = zeros_mat[i-1,0]\n for j in range(1,N+1):\n if b_label[j]==0:\n zeros_mat[i,j] = zeros_mat[i,j-1]-1\n else:\n zeros_mat[i,j] = zeros_mat[i,j-1]\n return zeros_mat\n\n\n\n\n\n\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"numpy.concatenate",
"numpy.argsort",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mathuvu/nevergrad | [
"8e116190a8a29c238e655d728fc4816f7b4e0415",
"8e116190a8a29c238e655d728fc4816f7b4e0415",
"8e116190a8a29c238e655d728fc4816f7b4e0415",
"8e116190a8a29c238e655d728fc4816f7b4e0415",
"8e116190a8a29c238e655d728fc4816f7b4e0415"
] | [
"nevergrad/optimization/recastlib.py",
"nevergrad/functions/unitcommitment/test_core.py",
"nevergrad/optimization/multiobjective/test_hypervolume.py",
"nevergrad/parametrization/mutation.py",
"nevergrad/benchmark/frozenexperiments.py"
] | [
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport functools\nimport math\nimport warnings\nimport weakref\nimport numpy as np\nfrom scipy import optimize as scipyoptimize\nimport nevergrad.common.typing as tp\nfrom nevergrad.parametrization import parameter as p\nfrom nevergrad.common import errors\nfrom . import base\nfrom .base import IntOrParameter\nfrom . import recaster\n\n\nclass _NonObjectMinimizeBase(recaster.SequentialRecastOptimizer):\n def __init__(\n self,\n parametrization: IntOrParameter,\n budget: tp.Optional[int] = None,\n num_workers: int = 1,\n *,\n method: str = \"Nelder-Mead\",\n random_restart: bool = False,\n ) -> None:\n super().__init__(parametrization, budget=budget, num_workers=num_workers)\n self.multirun = 1 # work in progress\n self._normalizer: tp.Any = None\n self.initial_guess: tp.Optional[tp.ArrayLike] = None\n # configuration\n assert (\n method\n in [\n \"CmaFmin2\",\n \"Nelder-Mead\",\n \"COBYLA\",\n \"SLSQP\",\n \"Powell\",\n ]\n or \"NLOPT\" in method\n ), f\"Unknown method '{method}'\"\n self.method = method\n self.random_restart = random_restart\n # The following line rescales to [0, 1] if fully bounded.\n\n if method == \"CmaFmin2\" or \"NLOPT\" in method:\n normalizer = p.helpers.Normalizer(self.parametrization)\n if normalizer.fully_bounded:\n self._normalizer = normalizer\n\n def _internal_tell_not_asked(self, candidate: p.Parameter, loss: tp.Loss) -> None:\n \"\"\"Called whenever calling \"tell\" on a candidate that was not \"asked\".\n Defaults to the standard tell pipeline.\n \"\"\" # We do not do anything; this just updates the current best.\n\n def get_optimization_function(self) -> tp.Callable[[tp.Callable[[tp.ArrayLike], float]], tp.ArrayLike]:\n return functools.partial(self._optimization_function, weakref.proxy(self))\n\n @staticmethod\n def _optimization_function(\n weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]\n ) -> tp.ArrayLike:\n # pylint:disable=unused-argument\n budget = np.inf if weakself.budget is None else weakself.budget\n best_res = np.inf\n best_x: np.ndarray = weakself.current_bests[\"average\"].x # np.zeros(self.dimension)\n if weakself.initial_guess is not None:\n best_x = np.array(weakself.initial_guess, copy=True) # copy, just to make sure it is not modified\n remaining: float = budget - weakself._num_ask\n while remaining > 0: # try to restart if budget is not elapsed\n options: tp.Dict[str, tp.Any] = {} if weakself.budget is None else {\"maxiter\": remaining}\n # options: tp.Dict[str, tp.Any] = {} if self.budget is None else {\"maxiter\": remaining}\n if weakself.method[:5] == \"NLOPT\":\n # This is NLOPT, used as in the PCSE simulator notebook.\n # ( https://github.com/ajwdewit/pcse_notebooks ).\n import nlopt\n\n def nlopt_objective_function(*args):\n data = np.asarray([arg for arg in args])[0]\n assert len(data) == weakself.dimension, (\n str(data) + \" does not have length \" + str(weakself.dimension)\n )\n if weakself._normalizer is not None:\n data = weakself._normalizer.backward(np.asarray(data, dtype=np.float32))\n return objective_function(data)\n\n # Sbplx (based on Subplex) is used by default.\n nlopt_param = (\n getattr(nlopt, weakself.method[6:]) if len(weakself.method) > 5 else nlopt.LN_SBPLX\n )\n opt = nlopt.opt(nlopt_param, weakself.dimension)\n # Assign the objective function calculator\n opt.set_min_objective(nlopt_objective_function)\n # Set the bounds.\n opt.set_lower_bounds(np.zeros(weakself.dimension))\n opt.set_upper_bounds(np.ones(weakself.dimension))\n # opt.set_initial_step([0.05, 0.05])\n opt.set_maxeval(budget)\n\n # Start the optimization with the first guess\n firstguess = 0.5 * np.ones(weakself.dimension)\n best_x = opt.optimize(firstguess)\n # print(\"\\noptimum at TDWI: %s, SPAN: %s\" % (x[0], x[1]))\n # print(\"minimum value = \", opt.last_optimum_value())\n # print(\"result code = \", opt.last_optimize_result())\n # print(\"With %i function calls\" % objfunc_calculator.n_calls)\n if weakself._normalizer is not None:\n best_x = weakself._normalizer.backward(np.asarray(best_x, dtype=np.float32))\n\n elif weakself.method == \"CmaFmin2\":\n import cma # import inline in order to avoid matplotlib initialization warning\n\n def cma_objective_function(data):\n # Hopefully the line below does nothing if unbounded and rescales from [0, 1] if bounded.\n if weakself._normalizer is not None:\n data = weakself._normalizer.backward(np.asarray(data, dtype=np.float32))\n return objective_function(data)\n\n # cma.fmin2(objective_function, [0.0] * self.dimension, [1.0] * self.dimension, remaining)\n x0 = 0.5 * np.ones(weakself.dimension)\n num_calls = 0\n while budget - num_calls > 0:\n options = {\"maxfevals\": budget - num_calls, \"verbose\": -9}\n if weakself._normalizer is not None:\n # Tell CMA to work in [0, 1].\n options[\"bounds\"] = [0.0, 1.0]\n res = cma.fmin(\n cma_objective_function,\n x0=x0,\n sigma0=0.2,\n options=options,\n restarts=9,\n )\n x0 = 0.5 + np.random.uniform() * np.random.uniform(\n low=-0.5, high=0.5, size=weakself.dimension\n )\n if res[1] < best_res:\n best_res = res[1]\n best_x = res[0]\n if weakself._normalizer is not None:\n best_x = weakself._normalizer.backward(np.asarray(best_x, dtype=np.float32))\n num_calls += res[2]\n else:\n res = scipyoptimize.minimize(\n objective_function,\n best_x\n if not weakself.random_restart\n else weakself._rng.normal(0.0, 1.0, weakself.dimension),\n method=weakself.method,\n options=options,\n tol=0,\n )\n if res.fun < best_res:\n best_res = res.fun\n best_x = res.x\n remaining = budget - weakself._num_ask\n return best_x\n\n\nclass NonObjectOptimizer(base.ConfiguredOptimizer):\n \"\"\"Wrapper over Scipy optimizer implementations, in standard ask and tell format.\n This is actually an import from scipy-optimize, including Sequential Quadratic Programming,\n\n Parameters\n ----------\n method: str\n Name of the method to use among:\n\n - Nelder-Mead\n - COBYLA\n - SQP (or SLSQP): very powerful e.g. in continuous noisy optimization. It is based on\n approximating the objective function by quadratic models.\n - Powell\n - NLOPT* (https://nlopt.readthedocs.io/en/latest/; by default, uses Sbplx, based on Subplex);\n can be NLOPT,\n NLOPT_LN_SBPLX,\n NLOPT_LN_PRAXIS,\n NLOPT_GN_DIRECT,\n NLOPT_GN_DIRECT_L,\n NLOPT_GN_CRS2_LM,\n NLOPT_GN_AGS,\n NLOPT_GN_ISRES,\n NLOPT_GN_ESCH,\n NLOPT_LN_COBYLA,\n NLOPT_LN_BOBYQA,\n NLOPT_LN_NEWUOA_BOUND,\n NLOPT_LN_NELDERMEAD.\n random_restart: bool\n whether to restart at a random point if the optimizer converged but the budget is not entirely\n spent yet (otherwise, restarts from best point)\n\n Note\n ----\n These optimizers do not support asking several candidates in a row\n \"\"\"\n\n recast = True\n no_parallelization = True\n\n # pylint: disable=unused-argument\n def __init__(self, *, method: str = \"Nelder-Mead\", random_restart: bool = False) -> None:\n super().__init__(_NonObjectMinimizeBase, locals())\n\n\nNelderMead = NonObjectOptimizer(method=\"Nelder-Mead\").set_name(\"NelderMead\", register=True)\nCmaFmin2 = NonObjectOptimizer(method=\"CmaFmin2\").set_name(\"CmaFmin2\", register=True)\nNLOPT = NonObjectOptimizer(method=\"NLOPT\").set_name(\"NLOPT\", register=True)\nPowell = NonObjectOptimizer(method=\"Powell\").set_name(\"Powell\", register=True)\nRPowell = NonObjectOptimizer(method=\"Powell\", random_restart=True).set_name(\"RPowell\", register=True)\nCobyla = NonObjectOptimizer(method=\"COBYLA\").set_name(\"Cobyla\", register=True)\nRCobyla = NonObjectOptimizer(method=\"COBYLA\", random_restart=True).set_name(\"RCobyla\", register=True)\nSQP = NonObjectOptimizer(method=\"SLSQP\").set_name(\"SQP\", register=True)\nSLSQP = SQP # Just so that people who are familiar with SLSQP naming are not lost.\nRSQP = NonObjectOptimizer(method=\"SLSQP\", random_restart=True).set_name(\"RSQP\", register=True)\nRSLSQP = RSQP # Just so that people who are familiar with SLSQP naming are not lost.\n\n\nclass _PymooMinimizeBase(recaster.SequentialRecastOptimizer):\n def __init__(\n self,\n parametrization: IntOrParameter,\n budget: tp.Optional[int] = None,\n num_workers: int = 1,\n *,\n algorithm: str,\n ) -> None:\n super().__init__(parametrization, budget=budget, num_workers=num_workers)\n # configuration\n self.algorithm = algorithm\n self._no_hypervolume = True\n self._initial_seed = -1\n\n def get_optimization_function(self) -> tp.Callable[[tp.Callable[..., tp.Any]], tp.Optional[tp.ArrayLike]]:\n if self._initial_seed == -1:\n self._initial_seed = self._rng.randint(2**30)\n return functools.partial(self._optimization_function, weakref.proxy(self))\n # pylint:disable=useless-return\n\n @staticmethod\n def _optimization_function(\n weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]\n ) -> tp.Optional[tp.ArrayLike]:\n # pylint:disable=unused-argument, import-outside-toplevel\n from pymoo import optimize as pymoooptimize\n\n from pymoo.factory import get_algorithm as get_pymoo_algorithm\n\n # from pymoo.factory import get_reference_directions\n\n # reference direction code for when we want to use the other MOO optimizers in Pymoo\n # if self.algorithm in [\n # \"rnsga2\",\n # \"nsga3\",\n # \"unsga3\",\n # \"rnsga3\",\n # \"moead\",\n # \"ctaea\",\n # ]: # algorithms that require reference points or reference directions\n # the appropriate n_partitions must be looked into\n # ref_dirs = get_reference_directions(\"das-dennis\", self.num_objectives, n_partitions=12)\n # algorithm = get_pymoo_algorithm(self.algorithm, ref_dirs)\n # else:\n algorithm = get_pymoo_algorithm(weakself.algorithm)\n problem = _create_pymoo_problem(weakself, objective_function)\n pymoooptimize.minimize(problem, algorithm, seed=weakself._initial_seed)\n return None\n\n def _internal_ask_candidate(self) -> p.Parameter:\n \"\"\"\n Special version to make sure that num_objectives has been set before\n the proper _internal_ask_candidate, in our parent class, is called.\n \"\"\"\n if self.num_objectives == 0:\n # dummy ask i.e. not activating pymoo until num_objectives is set\n warnings.warn(\n \"with this optimizer, it is more efficient to set num_objectives before the optimization begins\",\n errors.NevergradRuntimeWarning,\n )\n # We need to get a datapoint that is a random point in parameter space,\n # and waste an evaluation on it.\n return self.parametrization.spawn_child()\n return super()._internal_ask_candidate()\n\n def _internal_tell_candidate(self, candidate: p.Parameter, loss: float) -> None:\n \"\"\"\n Special version to make sure that we the extra initial evaluation which\n we may have done in order to get num_objectives, is discarded.\n Note that this discarding means that the extra point will not make it into\n replay_archive_tell. Correspondingly, because num_objectives will make it into\n the pickle, __setstate__ will never need a dummy ask.\n \"\"\"\n if self._messaging_thread is None:\n return # dummy tell i.e. not activating pymoo until num_objectives is set\n super()._internal_tell_candidate(candidate, loss)\n\n def _post_loss(self, candidate: p.Parameter, loss: float) -> tp.Loss:\n # pylint: disable=unused-argument\n \"\"\"\n Multi-Objective override for this function.\n \"\"\"\n return candidate.losses\n\n\nclass Pymoo(base.ConfiguredOptimizer):\n \"\"\"Wrapper over Pymoo optimizer implementations, in standard ask and tell format.\n This is actually an import from Pymoo Optimize.\n\n Parameters\n ----------\n algorithm: str\n\n Use \"algorithm-name\" with following names to access algorithm classes:\n Single-Objective\n -\"de\"\n -'ga'\n -\"brkga\"\n -\"nelder-mead\"\n -\"pattern-search\"\n -\"cmaes\"\n Multi-Objective\n -\"nsga2\"\n Multi-Objective requiring reference directions, points or lines\n -\"rnsga2\"\n -\"nsga3\"\n -\"unsga3\"\n -\"rnsga3\"\n -\"moead\"\n -\"ctaea\"\n\n Note\n ----\n These optimizers do not support asking several candidates in a row\n \"\"\"\n\n recast = True\n no_parallelization = True\n\n # pylint: disable=unused-argument\n def __init__(self, *, algorithm: str) -> None:\n super().__init__(_PymooMinimizeBase, locals())\n\n\nclass _PymooBatchMinimizeBase(recaster.BatchRecastOptimizer):\n\n # pylint: disable=abstract-method\n\n def __init__(\n self,\n parametrization: IntOrParameter,\n budget: tp.Optional[int] = None,\n num_workers: int = 1,\n *,\n algorithm: str,\n ) -> None:\n super().__init__(parametrization, budget=budget, num_workers=num_workers)\n # configuration\n self.algorithm = algorithm\n self._no_hypervolume = True\n self._initial_seed = -1\n\n def get_optimization_function(self) -> tp.Callable[[tp.Callable[..., tp.Any]], tp.Optional[tp.ArrayLike]]:\n if self._initial_seed == -1:\n self._initial_seed = self._rng.randint(2**30)\n return functools.partial(self._optimization_function, weakref.proxy(self))\n # pylint:disable=useless-return\n\n @staticmethod\n def _optimization_function(\n weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]\n ) -> tp.Optional[tp.ArrayLike]:\n # pylint:disable=unused-argument, import-outside-toplevel\n from pymoo import optimize as pymoooptimize\n\n from pymoo.factory import get_algorithm as get_pymoo_algorithm\n\n # from pymoo.factory import get_reference_directions\n\n # reference direction code for when we want to use the other MOO optimizers in Pymoo\n # if self.algorithm in [\n # \"rnsga2\",\n # \"nsga3\",\n # \"unsga3\",\n # \"rnsga3\",\n # \"moead\",\n # \"ctaea\",\n # ]: # algorithms that require reference points or reference directions\n # the appropriate n_partitions must be looked into\n # ref_dirs = get_reference_directions(\"das-dennis\", self.num_objectives, n_partitions=12)\n # algorithm = get_pymoo_algorithm(self.algorithm, ref_dirs)\n # else:\n algorithm = get_pymoo_algorithm(weakself.algorithm)\n problem = _create_pymoo_problem(weakself, objective_function, False)\n pymoooptimize.minimize(problem, algorithm, seed=weakself._initial_seed)\n return None\n\n def _internal_ask_candidate(self) -> p.Parameter:\n \"\"\"Reads messages from the thread in which the underlying optimization function is running\n New messages are sent as \"ask\".\n \"\"\"\n # get a datapoint that is a random point in parameter space\n if self.num_objectives == 0: # dummy ask i.e. not activating pymoo until num_objectives is set\n warnings.warn(\n \"with this optimizer, it is more efficient to set num_objectives before the optimization begins\",\n errors.NevergradRuntimeWarning,\n )\n return self.parametrization.spawn_child()\n return super()._internal_ask_candidate()\n\n def _internal_tell_candidate(self, candidate: p.Parameter, loss: float) -> None:\n \"\"\"Returns value for a point which was \"asked\"\n (none asked point cannot be \"tell\")\n \"\"\"\n if self._messaging_thread is None:\n return # dummy tell i.e. not activating pymoo until num_objectives is set\n super()._internal_tell_candidate(candidate, loss)\n\n def _post_loss(self, candidate: p.Parameter, loss: float) -> tp.Loss:\n # pylint: disable=unused-argument\n \"\"\"\n Multi-Objective override for this function.\n \"\"\"\n return candidate.losses\n\n\nclass PymooBatch(base.ConfiguredOptimizer):\n \"\"\"Wrapper over Pymoo optimizer implementations, in standard ask and tell format.\n This is actually an import from Pymoo Optimize.\n\n Parameters\n ----------\n algorithm: str\n\n Use \"algorithm-name\" with following names to access algorithm classes:\n Single-Objective\n -\"de\"\n -'ga'\n -\"brkga\"\n -\"nelder-mead\"\n -\"pattern-search\"\n -\"cmaes\"\n Multi-Objective\n -\"nsga2\"\n Multi-Objective requiring reference directions, points or lines\n -\"rnsga2\"\n -\"nsga3\"\n -\"unsga3\"\n -\"rnsga3\"\n -\"moead\"\n -\"ctaea\"\n\n Note\n ----\n These optimizers do not support asking several candidates in a row\n \"\"\"\n\n recast = True\n\n # pylint: disable=unused-argument\n def __init__(self, *, algorithm: str) -> None:\n super().__init__(_PymooBatchMinimizeBase, locals())\n\n\ndef _create_pymoo_problem(\n optimizer: base.Optimizer,\n objective_function: tp.Callable[[tp.ArrayLike], float],\n elementwise: bool = True,\n):\n kwargs = {}\n try:\n # pylint:disable=import-outside-toplevel\n from pymoo.core.problem import ElementwiseProblem, Problem # type: ignore\n\n Base = ElementwiseProblem if elementwise else Problem\n except ImportError:\n # Used if pymoo < 0.5.0\n # pylint:disable=import-outside-toplevel\n from pymoo.model.problem import Problem as Base # type: ignore\n\n kwargs = {\"elementwise_evaluation\": elementwise}\n\n class _PymooProblem(Base): # type: ignore\n def __init__(self, optimizer, objective_function):\n self.objective_function = objective_function\n super().__init__(\n n_var=optimizer.dimension,\n n_obj=optimizer.num_objectives,\n n_constr=0, # constraints handled already by nevergrad\n xl=-math.pi * 0.5,\n xu=math.pi * 0.5,\n **kwargs,\n )\n\n def _evaluate(self, X, out, *args, **kwargs):\n # pylint:disable=unused-argument\n # pymoo is supplying us with bounded parameters in [-pi/2,pi/2]. Nevergrad wants unbounded reals from us.\n out[\"F\"] = self.objective_function(np.tan(X))\n\n return _PymooProblem(optimizer, objective_function)\n\n\nPymooNSGA2 = Pymoo(algorithm=\"nsga2\").set_name(\"PymooNSGA2\", register=True)\nPymooBatchNSGA2 = PymooBatch(algorithm=\"nsga2\").set_name(\"PymooBatchNSGA2\", register=False)\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nfrom . import core\n\n\ndef test_unit_commitment_p1() -> None:\n np.random.seed(0)\n T = 10\n N = 5\n func = core.UnitCommitmentProblem(problem_name=\"semi-continuous\", num_timepoints=T, num_generators=N)\n op_out = np.ones((N, T))\n op_states = np.ones((N, T))\n value = func.function(operational_output=op_out, operational_states=op_states)\n assert isinstance(value, float)\n assert np.allclose([value], [38721960.61493097], rtol=1e-04, atol=1e-05)\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# (C) Copyright 2020 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n# This work is implemented under the Formulations and Computational Engineering (FORCE) project within Horizon 2020\n# (`NMBP-23-2016/721027 <https://www.the-force-project.eu>`_).\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\n\nfrom .hypervolume import (\n VectorNode,\n VectorLinkedList,\n HypervolumeIndicator,\n)\n\n\ndef test_initialize_empty_node() -> None:\n dim = 4\n node = VectorNode(dim)\n\n assert isinstance(node.coordinates, np.ndarray)\n for entry in node.next:\n assert entry is node\n for entry in node.prev:\n assert entry is node\n\n assert list(node.area) == [0.0] * dim\n assert list(node.volume) == [0.0] * dim\n assert str(node) == \"None\"\n\n\ndef test_initialize_node() -> None:\n dim = 4\n coordinates = [1.0, 2.0, 3.0]\n node = VectorNode(dim, coordinates=coordinates)\n\n assert isinstance(node.coordinates, np.ndarray)\n assert list(node.coordinates) == coordinates\n for entry in node.next:\n assert entry is node\n for entry in node.prev:\n assert entry is node\n\n assert list(node.area) == [0.0] * dim\n assert list(node.volume) == [0.0] * dim\n assert str(node) == \"[1. 2. 3.]\"\n\n node.configure_area(0)\n assert node.area[0] == 1.0\n assert node.area[1] == 0.0\n assert node.area[2] == 0.0\n\n node.configure_area(1)\n assert node.area[0] == 1.0\n assert node.area[1] == -1.0\n assert node.area[2] == 0.0\n\n node.configure_area(2)\n assert node.area[0] == 1.0\n assert node.area[1] == -1.0\n assert node.area[2] == 2.0\n\n\ndef test_initialize_linked_list() -> None:\n dim = 4\n multilist = VectorLinkedList(dimension=dim)\n\n assert dim == multilist.dimension\n assert isinstance(multilist.sentinel, VectorNode)\n assert len(multilist.sentinel.prev) == 4\n assert len(multilist.sentinel.next) == 4\n assert len(multilist) == 4\n\n for d in range(dim):\n assert multilist.sentinel is multilist.sentinel.next[d]\n assert multilist.sentinel is multilist.sentinel.prev[d]\n\n assert len(multilist.sentinel.next) == len(multilist.sentinel.prev)\n assert len(multilist.sentinel.next) == len(multilist.sentinel.next[0].next)\n\n assert str(multilist) == \"\\n\".join([str([])] * dim)\n\n\ndef test_append() -> None:\n dim = 4\n multilist = VectorLinkedList(dimension=dim)\n\n new_node = VectorNode(dim)\n multilist.append(new_node, 0)\n\n for i in range(1, dim):\n assert new_node.next[i] is new_node\n assert new_node.prev[i] is new_node\n assert multilist.sentinel.next[i] is multilist.sentinel\n assert multilist.sentinel.prev[i] is multilist.sentinel\n\n assert new_node.next[0] is multilist.sentinel\n assert new_node.prev[0] is multilist.sentinel\n assert multilist.sentinel.next[0] is new_node\n assert multilist.sentinel.prev[0] is new_node\n\n another_node = VectorNode(dim)\n multilist.append(another_node, 0)\n for i in range(1, dim):\n assert new_node.next[i] is new_node\n assert new_node.prev[i] is new_node\n assert multilist.sentinel.next[i] is multilist.sentinel\n assert multilist.sentinel.prev[i] is multilist.sentinel\n\n assert new_node.next[0] is another_node\n assert new_node.prev[0] is multilist.sentinel\n assert multilist.sentinel.next[0] is new_node\n assert multilist.sentinel.prev[0] is another_node\n\n\ndef test_extend() -> None:\n dim = 1\n multilist = VectorLinkedList(dimension=dim)\n another_multilist = VectorLinkedList(dimension=dim)\n\n new_node = VectorNode(dim)\n another_node = VectorNode(dim)\n\n multilist.append(new_node, 0)\n multilist.append(another_node, 0)\n\n another_multilist.extend([new_node, another_node], 0)\n assert another_multilist.chain_length(0) == 2\n assert another_multilist.sentinel.next[0] is multilist.sentinel.next[0]\n assert another_multilist.sentinel.next[0].next[0] is multilist.sentinel.next[0].next[0]\n\n\ndef test_chain_length() -> None:\n dim = 3\n multilist = VectorLinkedList(dimension=dim)\n\n new_node = VectorNode(dim)\n multilist.append(new_node, 0)\n assert multilist.chain_length(0) == 1\n assert multilist.chain_length(1) == 0\n assert multilist.chain_length(2) == 0\n\n another_node = VectorNode(dim)\n multilist.append(another_node, 0)\n assert multilist.chain_length(0) == 2\n assert multilist.chain_length(1) == 0\n assert multilist.chain_length(2) == 0\n\n multilist.append(another_node, 1)\n assert multilist.chain_length(0) == 2\n assert multilist.chain_length(1) == 1\n assert multilist.chain_length(2) == 0\n\n multilist.append(new_node, 2)\n assert multilist.chain_length(0) == 2\n assert multilist.chain_length(1) == 1\n assert multilist.chain_length(2) == 1\n\n\ndef test_pop() -> None:\n dim = 4\n multilist = VectorLinkedList(dimension=dim)\n\n new_node = VectorNode(dim)\n multilist.append(new_node, 0)\n\n popped_node = multilist.pop(new_node, 0 + 1)\n assert popped_node is new_node\n assert new_node.next[0] is multilist.sentinel\n assert new_node.prev[0] is multilist.sentinel\n for i in range(dim):\n assert multilist.sentinel.next[i] is multilist.sentinel\n assert multilist.sentinel.prev[i] is multilist.sentinel\n\n\ndef test_reinsert() -> None:\n dim = 2\n multilist = VectorLinkedList(dimension=dim)\n\n new_node = VectorNode(dim)\n another_node = VectorNode(dim)\n\n multilist.append(new_node, 0)\n multilist.append(another_node, 0)\n\n multilist.append(another_node, 1)\n multilist.append(new_node, 1)\n\n popped_node = multilist.pop(new_node, 1 + 1)\n\n multilist.reinsert(new_node, 0 + 1)\n assert multilist.chain_length(0) == 2\n assert multilist.chain_length(1) == 1\n assert new_node.next[0] is another_node\n assert new_node.prev[0] is multilist.sentinel\n assert another_node.prev[0] is new_node\n assert another_node.next[0] is multilist.sentinel\n assert another_node.prev[1] is multilist.sentinel\n assert another_node.next[1] is multilist.sentinel\n\n multilist.reinsert(popped_node, 1 + 1)\n assert multilist.chain_length(0) == 2\n assert multilist.chain_length(1) == 2\n assert another_node.prev[1] is multilist.sentinel\n assert another_node.next[1] is new_node\n assert new_node.prev[1] is another_node\n assert new_node.next[1] is multilist.sentinel\n\n\ndef test_iterate() -> None:\n dim = 1\n multilist = VectorLinkedList(dimension=dim)\n\n new_node = VectorNode(dim)\n another_node = VectorNode(dim)\n\n multilist.append(new_node, 0)\n multilist.append(another_node, 0)\n gen = multilist.iterate(0)\n assert next(gen) is new_node\n assert next(gen) is another_node\n\n yet_another_node = VectorNode(dim)\n multilist.append(yet_another_node, 0)\n gen = multilist.iterate(0, start=another_node)\n assert next(gen) is another_node\n assert next(gen) is yet_another_node\n\n\ndef test_reverse_iterate() -> None:\n dim = 1\n multilist = VectorLinkedList(dimension=dim)\n\n new_node = VectorNode(dim)\n another_node = VectorNode(dim)\n yet_another_node = VectorNode(dim)\n\n multilist.append(new_node, 0)\n multilist.append(another_node, 0)\n multilist.append(yet_another_node, 0)\n\n gen = multilist.reverse_iterate(0)\n assert next(gen) is yet_another_node\n assert next(gen) is another_node\n assert next(gen) is new_node\n\n gen = multilist.reverse_iterate(0, start=another_node)\n assert next(gen) is another_node\n assert next(gen) is new_node\n\n\ndef test_update_coordinate_bounds() -> None:\n bounds = np.array([-1.0, -1.0, -1.0])\n node = VectorNode(3, coordinates=[1.0, -2.0, -1.0])\n bounds = VectorLinkedList.update_coordinate_bounds(bounds, node, 0 + 1)\n assert list(bounds) == [-1, -1, -1]\n bounds = VectorLinkedList.update_coordinate_bounds(bounds, node, 1 + 1)\n assert list(bounds) == [-1, -2, -1]\n bounds = VectorLinkedList.update_coordinate_bounds(bounds, node, 2 + 1)\n assert list(bounds) == [-1, -2, -1]\n\n\ndef test_sort_by_index() -> None:\n nodes = [VectorNode(3, [1, 2, 3]), VectorNode(3, [2, 3, 1]), VectorNode(3, [3, 1, 2])]\n new_nodes = VectorLinkedList.sort_by_index(nodes, 0)\n assert new_nodes == nodes\n\n new_nodes = VectorLinkedList.sort_by_index(nodes, 1)\n assert new_nodes == [nodes[2], nodes[0], nodes[1]]\n\n new_nodes = VectorLinkedList.sort_by_index(nodes, 2)\n assert new_nodes == [nodes[1], nodes[2], nodes[0]]\n\n\ndef test_create_sorted() -> None:\n dimension = 3\n coordinates = [[1, 2, 3], [2, 3, 1], [3, 1, 2]]\n linked_list = VectorLinkedList.create_sorted(dimension, coordinates)\n assert isinstance(linked_list, VectorLinkedList)\n assert list(linked_list.sentinel.next[0].coordinates) == [1, 2, 3]\n assert list(linked_list.sentinel.next[1].coordinates) == [3, 1, 2]\n assert list(linked_list.sentinel.next[2].coordinates) == [2, 3, 1]\n\n assert list(linked_list.sentinel.next[0].next[0].coordinates) == [2, 3, 1]\n assert list(linked_list.sentinel.next[1].next[1].coordinates) == [1, 2, 3]\n assert list(linked_list.sentinel.next[2].next[2].coordinates) == [3, 1, 2]\n\n\ndef test_version_consistency() -> None:\n reference = np.array([79, 89, 99])\n hv = HypervolumeIndicator(reference)\n front = np.array(\n [(110, 110, 100), (110, 90, 87), (80, 80, 36), (50, 50, 55), (105, 30, 43), (110, 110, 100)]\n )\n volume = hv.compute(front)\n assert volume == 11113.0\n\n\ndef test_reference_no_pointy() -> None:\n reference = np.array([10, 10])\n hv = HypervolumeIndicator(reference)\n front = np.array(\n [\n (11, 9),\n (9, 11),\n ]\n )\n volume = hv.compute(front)\n assert volume == -3 # not sure this is expected\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Experimental mutation patterns for structured data\n\"\"\"\n\nimport typing as tp\nimport numpy as np\nfrom . import core\nfrom . import transforms\nfrom .data import Data, Scalar\nfrom .choice import Choice\nfrom . import _layering\n\n\nD = tp.TypeVar(\"D\", bound=Data)\nP = tp.TypeVar(\"P\", bound=core.Parameter)\n\n\nclass Mutation(_layering.Layered):\n \"\"\"Custom mutation or recombination operation\n This is an experimental API\n\n Call on a Parameter to create a new Parameter with the\n provided mutation/recombination.\n \"\"\"\n\n _TYPE = core.Parameter\n\n def root(self) -> core.Parameter:\n param = self._layers[0]\n self._check_type(param)\n return param # type: ignore\n\n def _check_type(self, param: core.Layered) -> None:\n if not isinstance(param, self._TYPE):\n raise RuntimeError(\n f\"{self.__class__.__name__} must be applied to {self._TYPE} parameters, got: {type(param)}\"\n )\n\n def __call__(self, parameter: P, inplace: bool = False) -> P:\n self._check_type(parameter)\n new = parameter if inplace else parameter.copy()\n new.add_layer(self.copy())\n return new\n\n\nclass DataMutation(Mutation):\n _TYPE = Data\n\n def __init__(self, **parameters: tp.Any) -> None:\n self._parameters = parameters\n super().__init__()\n\n def root(self) -> Data: # pylint: disable=useless-super-delegation\n # simpler for typing\n return super().root() # type: ignore\n\n def _on_layer_added(self) -> None:\n params = self.root().parameters\n for name, obj in self._parameters.items():\n if name not in params:\n params[name] = core.as_parameter(obj)\n self._parameters = {}\n\n\nclass MutationChoice(DataMutation):\n \"\"\"Selects one of the provided mutation based on a Choice subparameter\n Caution: there may be subparameter collisions\n \"\"\"\n\n def __init__(self, mutations: tp.Sequence[Mutation], with_default: bool = True) -> None:\n self.mutations = list(mutations)\n self.with_default = with_default\n options = [lay.uid for lay in self.mutations] + ([\"#ROOT#\"] if with_default else [])\n super().__init__(mutation_choice=Choice(options))\n\n def _on_layer_added(self) -> None:\n root = self.root()\n for mut in self.mutations:\n root.add_layer(mut)\n self.mutations = []\n super()._on_layer_added()\n\n def _select(self) -> core.Layered:\n root = self.root()\n layers = {lay.uid: lay for lay in self._layers}\n layers[\"#ROOT#\"] = root\n return layers[root.parameters[\"mutation_choice\"].value]\n\n def _layered_recombine(self, *others: core.Layered) -> None:\n self._select()._layered_recombine(*others)\n\n def _layered_mutate(self) -> None:\n self._select()._layered_mutate()\n\n\nclass Cauchy(Mutation):\n def _layered_mutate(self) -> None:\n root = self.root()\n root.set_standardized_data(root.random_state.standard_cauchy(size=root.dimension))\n\n\nclass Crossover(DataMutation):\n \"\"\"Operator for merging part of an array into another one\n\n Parameters\n ----------\n axis: None or int or tuple of ints\n the axis (or axes) on which the merge will happen. This axis will be split into 3 parts: the first and last one will take\n value from the first array, the middle one from the second array.\n max_size: None or int\n maximum size of the part taken from the second array. By default, this is at most around half the number of total elements of the\n array to the power of 1/number of axis.\n\n\n Notes\n -----\n - this is experimental, the API may evolve\n - when using several axis, the size of the second array part is the same on each axis (aka a square in 2D, a cube in 3D, ...)\n\n Examples:\n ---------\n - 2-dimensional array, with crossover on dimension 1:\n 0 1 0 0\n 0 1 0 0\n 0 1 0 0\n - 2-dimensional array, with crossover on dimensions 0 and 1:\n 0 0 0 0\n 0 1 1 0\n 0 1 1 0\n \"\"\"\n\n def __init__(\n self,\n axis: tp.Any = None,\n max_size: tp.Union[int, Scalar, None] = None,\n fft: bool = False,\n ) -> None:\n if not isinstance(axis, core.Parameter):\n axis = (axis,) if isinstance(axis, int) else tuple(axis) if axis is not None else None\n super().__init__(max_size=max_size, axis=axis, fft=fft)\n\n @property\n def axis(self) -> tp.Optional[tp.Tuple[int, ...]]:\n return self.root().parameters[\"axis\"].value # type: ignore\n\n def _layered_recombine(self, *arrays: Data) -> None: # type: ignore\n root = self.root()\n new_value = self._apply_array([root.value] + [a.value for a in arrays])\n bounds = root.bounds\n if root.parameters[\"fft\"].value and any(x is not None for x in bounds):\n new_value = transforms.Clipping(a_min=bounds[0], a_max=bounds[1]).forward(new_value)\n root.value = new_value\n\n def _apply_array(self, arrays: tp.Sequence[np.ndarray]) -> np.ndarray:\n root = self.root()\n # checks\n if len(arrays) != 2:\n raise Exception(f\"Crossover can only be applied between 2 individuals, got {len(arrays)}\")\n transf = (\n transforms.Fourrier(range(arrays[0].ndim) if self.axis is None else self.axis)\n if root.parameters[\"fft\"].value\n else None\n )\n if transf is not None:\n arrays = [transf.forward(a) for a in arrays]\n shape = arrays[0].shape\n assert shape == arrays[1].shape, \"Individuals should have the same shape\"\n # settings\n axis = tuple(range(len(shape))) if self.axis is None else self.axis\n max_size = root.parameters[\"max_size\"].value\n max_size = int(((arrays[0].size + 1) / 2) ** (1 / len(axis))) if max_size is None else max_size\n max_size = min(max_size, *(shape[a] - 1 for a in axis))\n size = 1 if max_size == 1 else self.random_state.randint(1, max_size)\n # slices\n slices = _make_slices(shape, axis, size, self.random_state)\n result = np.array(arrays[0], copy=True)\n result[tuple(slices)] = arrays[1][tuple(slices)]\n if transf is not None:\n result = transf.backward(result)\n return result\n\n\nclass RavelCrossover(Crossover): # TODO: can be made for all parameters instead of just arrays\n \"\"\"Operator for merging part of an array into another one, after raveling\n\n Parameters\n ----------\n max_size: None or int\n maximum size of the part taken from the second array. By default, this is at most around half the number of total elements of the\n array to the power of 1/number of axis.\n \"\"\"\n\n def __init__(\n self,\n max_size: tp.Union[int, Scalar, None] = None,\n ) -> None:\n super().__init__(axis=0, max_size=max_size)\n\n def _apply_array(self, arrays: tp.Sequence[np.ndarray]) -> np.ndarray:\n shape = arrays[0].shape\n out = super()._apply_array([a.ravel() for a in arrays])\n return out.reshape(shape)\n\n\ndef _make_slices(\n shape: tp.Tuple[int, ...], axes: tp.Tuple[int, ...], size: int, rng: np.random.RandomState\n) -> tp.List[slice]:\n slices = []\n for a, s in enumerate(shape):\n if a in axes:\n if s <= 1:\n raise ValueError(\"Cannot crossover on axis with size 1\")\n start = rng.randint(s - size)\n slices.append(slice(start, start + size))\n else:\n slices.append(slice(None))\n return slices\n\n\nclass Translation(DataMutation):\n def __init__(self, axis: tp.Optional[tp.Union[int, tp.Iterable[int]]] = None):\n if not isinstance(axis, core.Parameter):\n axes = (axis,) if isinstance(axis, int) else tuple(axis) if axis is not None else None\n super().__init__(axes=axes)\n\n @property\n def axes(self) -> tp.Optional[tp.Tuple[int, ...]]:\n return self.root().parameters[\"axes\"].value # type: ignore\n\n def _layered_mutate(self) -> None:\n root = self.root()\n root._value = self._apply_array([root._value])\n\n def _apply_array(self, arrays: tp.Sequence[np.ndarray]) -> np.ndarray:\n assert len(arrays) == 1\n data = arrays[0]\n axes = tuple(range(data.ndim)) if self.axes is None else self.axes\n shifts = [self.random_state.randint(data.shape[a]) for a in axes]\n return np.roll(data, shifts, axis=axes) # type: ignore\n\n\nclass AxisSlicedArray:\n def __init__(self, array: np.ndarray, axis: int):\n self.array = array\n self.axis = axis\n\n def __getitem__(self, slice_: slice) -> np.ndarray:\n assert isinstance(slice_, slice)\n slices = tuple(slice_ if a == self.axis else slice(None) for a in range(self.array.ndim))\n return self.array[slices] # type: ignore\n\n\nclass Jumping(DataMutation):\n \"\"\"Move a chunk for a position to another in an array\"\"\"\n\n def __init__(self, axis: int, size: int):\n super().__init__(axis=axis, size=size)\n\n @property\n def axis(self) -> int:\n return self.root().parameters[\"axis\"].value # type: ignore\n\n @property\n def size(self) -> int:\n return self.root().parameters[\"size\"].value # type: ignore\n\n def _layered_mutate(self) -> None:\n root = self.root()\n root._value = self._apply_array([root._value])\n\n def _apply_array(self, arrays: tp.Sequence[np.ndarray]) -> np.ndarray:\n assert len(arrays) == 1\n data = arrays[0]\n L = data.shape[self.axis]\n size = self.random_state.randint(1, self.size)\n asdata = AxisSlicedArray(data, self.axis)\n init = self.random_state.randint(L)\n chunck = asdata[init : init + size]\n remain: np.ndarray = np.concatenate([asdata[:init], asdata[init + size :]], axis=self.axis)\n # pylint: disable=unsubscriptable-object\n newpos = self.random_state.randint(remain.shape[self.axis])\n asremain = AxisSlicedArray(remain, self.axis)\n return np.concatenate([asremain[:newpos], chunck, asremain[newpos:]], axis=self.axis) # type: ignore\n\n\nclass LocalGaussian(DataMutation):\n def __init__(\n self, size: tp.Union[int, core.Parameter], axes: tp.Optional[tp.Union[int, tp.Iterable[int]]] = None\n ):\n if not isinstance(axes, core.Parameter):\n axes = (axes,) if isinstance(axes, int) else tuple(axes) if axes is not None else None\n super().__init__(axes=axes, size=size)\n\n @property\n def axes(self) -> tp.Optional[tp.Tuple[int, ...]]:\n return self.root().parameters[\"axes\"].value # type: ignore\n\n def _layered_mutate(self) -> None:\n root = self.root()\n data = np.zeros(root.value.shape)\n # settings\n axis = tuple(range(len(data.shape))) if self.axes is None else self.axes\n size = self.root().parameters[\"size\"].value\n # slices\n slices = _make_slices(data.shape, axis, size, self.random_state)\n shape = data[tuple(slices)].shape\n data[tuple(slices)] += self.random_state.normal(0, 1, size=shape)\n root._internal_set_standardized_data(data.ravel(), reference=root)\n\n\ndef rolling_mean(vector: np.ndarray, window: int) -> np.ndarray:\n if window >= len(vector):\n return np.sum(vector) * np.ones((len(vector),)) # type: ignore\n if window <= 1:\n return vector\n cumsum: np.ndarray = np.cumsum(np.concatenate(([0], vector, vector[: window - 1])))\n return cumsum[window:] - cumsum[:-window] # type: ignore\n\n\n# class TunedTranslation(Mutation):\n# def __init__(self, axis: int, shape: tp.Sequence[int]):\n# assert isinstance(axis, int)\n# self.shape = tuple(shape)\n# super().__init__(shift=Choice(range(1, shape[axis])))\n# self.axis = axis\n#\n# @property\n# def shift(self) -> Choice:\n# return self.root().parameters[\"shift\"] # type: ignore\n#\n# def _apply_array(self, arrays: tp.Sequence[np.ndarray]) -> np.ndarray:\n# assert len(arrays) == 1\n# data = arrays[0]\n# assert data.shape == self.shape\n# shift = self.shift.value\n# # update shift arrray\n# shifts = self.shift.indices._value\n# self.shift.indices._value = np.roll(shifts, shift) # update probas\n# return np.roll(data, shift, axis=self.axis) # type: ignore\n\n\n# class ProbaLocalGaussian(Mutation):\n# def __init__(self, axis: int, shape: tp.Sequence[int]):\n# assert isinstance(axis, int)\n# self.shape = tuple(shape)\n# self.axis = axis\n# super().__init__(\n# positions=Array(shape=(shape[axis],)),\n# ratio=Scalar(init=1, lower=0, upper=1).set_mutation(sigma=0.05),\n# )\n#\n# def axes(self) -> tp.Optional[tp.Tuple[int, ...]]:\n# return self.root().parameters[\"axes\"].value # type: ignore\n#\n# def apply(self, arrays: tp.Sequence[Data]) -> None:\n# arrays = list(arrays)\n# assert len(arrays) == 1\n# data = np.zeros(arrays[0].value.shape)\n# # settings\n# length = self.shape[self.axis]\n# size = int(max(1, np.round(length * self.root().parameters[\"ratio\"].value)))\n# # slices\n# e_weights = np.exp(rolling_mean(self.root().parameters[\"positions\"].value, size))\n# probas = e_weights / np.sum(e_weights)\n# index = self.random_state.choice(range(length), p=probas)\n# # update (inefficient)\n# shape = tuple(size if a == self.axis else s for a, s in enumerate(arrays[0].value.shape))\n# data[tuple(slice(s) for s in shape)] += self.random_state.normal(0, 1, size=shape)\n# data = np.roll(data, shift=index, axis=self.axis)\n# arrays[0]._internal_set_standardized_data(data.ravel(), reference=arrays[0])\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport typing as tp\nimport numpy as np\nfrom nevergrad import optimizers\nfrom nevergrad.optimization.base import ConfiguredOptimizer\nfrom nevergrad.optimization import experimentalvariants # pylint: disable=unused-import\nfrom nevergrad.functions import ArtificialFunction\nfrom .xpbase import registry\nfrom .xpbase import create_seed_generator\nfrom .xpbase import Experiment\n\n# pylint: disable=stop-iteration-return, too-many-nested-blocks\n\n\[email protected]\ndef basic(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n \"\"\"Test settings\"\"\"\n seedg = create_seed_generator(seed)\n function = ArtificialFunction(name=\"sphere\", block_dimension=2, noise_level=1)\n np.random.seed(seed) # seed before initializing the function!\n # initialization uses randomness\n function.transform_var._initialize()\n return iter([Experiment(function, optimizer=\"OnePlusOne\", num_workers=2, budget=4, seed=next(seedg))])\n\n\[email protected]\ndef repeated_basic(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n \"\"\"Test settings\"\"\"\n seedg = create_seed_generator(seed)\n function = ArtificialFunction(name=\"sphere\", block_dimension=2, noise_level=1)\n optims: tp.List[tp.Union[str, ConfiguredOptimizer]] = [\"OnePlusOne\", optimizers.DifferentialEvolution()]\n for _ in range(5):\n for optim in optims:\n yield Experiment(function, optimizer=optim, num_workers=2, budget=4, seed=next(seedg))\n\n\[email protected]\ndef illcond(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n \"\"\"All optimizers on ill cond problems\"\"\"\n seedg = create_seed_generator(seed)\n for budget in [500, 1000, 2000, 4000]:\n for optim in [\"SQP\", \"DE\", \"CMA\", \"PSO\", \"RotationInvariantDE\", \"NelderMead\"]:\n for rotation in [True, False]:\n for name in [\"ellipsoid\", \"cigar\"]:\n function = ArtificialFunction(name=name, rotation=rotation, block_dimension=100)\n yield Experiment(function, optim, budget=budget, seed=next(seedg))\n\n\[email protected]\ndef compabasedillcond(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n \"\"\"All optimizers on ill cond problems\"\"\"\n seedg = create_seed_generator(seed)\n for budget in [500, 1000, 2000, 4000, 8000]:\n for optim in [\n \"DE\",\n \"CMA\",\n \"PSO\",\n \"BPRotationInvariantDE\",\n \"RotationInvariantDE\",\n \"AlmostRotationInvariantDE\",\n \"AlmostRotationInvariantDEAndBigPop\",\n ]:\n for rotation in [True, False]:\n for name in [\"ellipsoid\", \"cigar\"]:\n function = ArtificialFunction(name=name, rotation=rotation, block_dimension=30)\n yield Experiment(function, optim, budget=budget, seed=next(seedg))\n\n\[email protected]\ndef noise(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n \"\"\"All optimizers on ill cond problems\"\"\"\n seedg = create_seed_generator(seed)\n optims = sorted(\n x\n for x, y in optimizers.registry.items()\n if (\"TBPSA\" in x or \"ois\" in x or \"CMA\" in x or \"epea\" in x) and \"iscr\" not in x\n )\n for budget in [500, 1000, 2000, 4000, 8000, 16000, 32000, 64000, 128000]:\n for optim in optims:\n for rotation in [True, False]:\n for name in [\"sphere\", \"cigar\", \"sphere4\"]:\n function = ArtificialFunction(\n name=name, rotation=rotation, block_dimension=20, noise_level=10\n )\n yield Experiment(function, optim, budget=budget, seed=next(seedg))\n\n\[email protected]\ndef dim10_smallbudget(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n # prepare list of parameters to sweep for independent variables\n seedg = create_seed_generator(seed)\n names = [\"sphere\"]\n optims = sorted(\n x for x, y in optimizers.registry.items() if y.one_shot and \"arg\" not in x and \"mal\" not in x\n )\n functions = [\n ArtificialFunction(\n name, block_dimension=bd, num_blocks=n_blocks, useless_variables=bd * uv_factor * n_blocks\n )\n for name in names\n for bd in [10]\n for uv_factor in [0]\n for n_blocks in [1]\n ]\n for func in functions:\n for optim in optims:\n for budget in [4, 8, 16, 32]:\n yield Experiment(func, optim, budget=budget, num_workers=1, seed=next(seedg))\n\n\[email protected]\n# 2 variables matter - Scrambled Hammersley rules.\ndef dim10_select_two_features(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n # prepare list of parameters to sweep for independent variables\n seedg = create_seed_generator(seed)\n names = [\"sphere\"]\n optims = sorted(\n x for x, y in optimizers.registry.items() if y.one_shot and \"arg\" not in x and \"mal\" not in x\n )\n functions = [\n ArtificialFunction(\n name, block_dimension=bd, num_blocks=n_blocks, useless_variables=bd * uv_factor * n_blocks\n )\n for name in names\n for bd in [2]\n for uv_factor in [5]\n for n_blocks in [1]\n ]\n for func in functions:\n for optim in optims:\n for budget in [4, 8, 16, 32]:\n yield Experiment(func, optim, budget=budget, num_workers=1, seed=next(seedg))\n\n\[email protected]\ndef dim10_select_one_feature(\n seed: tp.Optional[int] = None,\n) -> tp.Iterator[Experiment]: # One and only one variable matters - LHS wins.\n # prepare list of parameters to sweep for independent variables\n seedg = create_seed_generator(seed)\n names = [\"sphere\"]\n optims = sorted(\n x for x, y in optimizers.registry.items() if y.one_shot and \"arg\" not in x and \"mal\" not in x\n )\n functions = [\n ArtificialFunction(\n name, block_dimension=bd, num_blocks=n_blocks, useless_variables=bd * uv_factor * n_blocks\n )\n for name in names\n for bd in [1]\n for uv_factor in [10]\n for n_blocks in [1]\n ]\n for func in functions:\n for optim in optims:\n for budget in [8, 10, 12, 14, 16, 18, 20]:\n yield Experiment(func, optim, budget=budget, num_workers=1, seed=next(seedg))\n\n\[email protected]\ndef doe_dim4(\n seed: tp.Optional[int] = None,\n) -> tp.Iterator[Experiment]: # Here, QR performs best, then Random, then LHS, then Cauchy.\n # prepare list of parameters to sweep for independent variables\n seedg = create_seed_generator(seed)\n names = [\"sphere\"] # n for n in ArtificialFunction.list_sorted_function_names() if \"sphere\" in n]\n optims = sorted(\n x for x, y in optimizers.registry.items() if y.one_shot and \"arg\" not in x and \"mal\" not in x\n )\n functions = [\n ArtificialFunction(\n name, block_dimension=bd, num_blocks=n_blocks, useless_variables=bd * uv_factor * n_blocks\n )\n for name in names\n for bd in [4]\n for uv_factor in [0]\n for n_blocks in [1]\n ]\n for func in functions:\n for optim in optims:\n for budget in [30, 100, 3000, 10000]:\n yield Experiment(func, optim, budget=budget, num_workers=1, seed=next(seedg))\n\n\[email protected]\ndef oneshot4(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n # General experiment comparing one-shot optimizers, excluding those with \"large\" or \"small\"\n # in the name.\n seedg = create_seed_generator(seed)\n names = [\"sphere\", \"cigar\", \"ellipsoid\", \"rosenbrock\", \"rastrigin\"]\n optims = sorted(\n x for x, y in optimizers.registry.items() if y.one_shot and \"arg\" not in x and \"mal\" not in x\n )\n functions = [\n ArtificialFunction(\n name, block_dimension=bd, num_blocks=n_blocks, useless_variables=bd * uv_factor * n_blocks\n )\n for name in names\n for bd in [1, 4, 20]\n for uv_factor in [0, 10]\n for n_blocks in [1]\n ]\n for func in functions:\n for optim in optims:\n for budget in [30, 100, 3000]:\n yield Experiment(func, optim, budget=budget, num_workers=1, seed=next(seedg))\n\n\[email protected]\ndef oneshot3(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n # General experiment comparing one-shot optimizers, excluding those with \"large\" or \"small\"\n # in the name.\n seedg = create_seed_generator(seed)\n names = [\"sphere\", \"altcigar\", \"cigar\", \"ellipsoid\", \"rosenbrock\", \"rastrigin\", \"altellipsoid\"]\n optims = sorted(\n x for x, y in optimizers.registry.items() if y.one_shot and \"arg\" not in x and \"mal\" not in x\n )\n functions = [ArtificialFunction(name, block_dimension=bd) for name in names for bd in [4, 20]]\n for func in functions:\n for optim in optims:\n for budget in [30, 60, 100]:\n yield Experiment(func, optim, budget=budget, num_workers=1, seed=next(seedg))\n\n\[email protected]\ndef oneshot2(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n # Experiment comparing one-shot optimizers in the context of useless vars vs critical vars.\n seedg = create_seed_generator(seed)\n names = [\"sphere\", \"altcigar\", \"cigar\", \"ellipsoid\", \"rosenbrock\", \"rastrigin\", \"altellipsoid\"]\n optims = sorted(\n x for x, y in optimizers.registry.items() if y.one_shot and \"arg\" not in x and \"mal\" not in x\n )\n functions = [\n ArtificialFunction(name, block_dimension=2, num_blocks=1, useless_variables=20) for name in names\n ]\n for func in functions:\n for optim in optims:\n for budget in [30, 60, 100]:\n yield Experiment(func, optim, budget=budget, num_workers=1, seed=next(seedg))\n\n\[email protected]\ndef oneshot1(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n \"\"\"Comparing one-shot optimizers as initializers for Bayesian Optimization.\"\"\"\n seedg = create_seed_generator(seed)\n for budget in [25, 31, 37, 43, 50, 60]: # , 4000, 8000, 16000, 32000]:\n for optim in sorted(x for x, y in optimizers.registry.items() if \"BO\" in x):\n for rotation in [False]:\n for d in [20]:\n for name in [\"sphere\", \"cigar\", \"hm\", \"ellipsoid\"]: # , \"hm\"]:\n for u in [0]:\n function = ArtificialFunction(\n name=name,\n rotation=rotation,\n block_dimension=d,\n useless_variables=d * u,\n translation_factor=1.0,\n )\n yield Experiment(function, optim, budget=budget, seed=next(seedg))\n\n\[email protected]\ndef metanoise(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:\n seedg = create_seed_generator(seed)\n optims = [\"NoisyBandit\", \"TBPSA\", \"NaiveTBPSA\"]\n for budget in [15, 31, 62, 125, 250, 500, 1000, 2000, 4000, 8000]:\n for optim in optims:\n for noise_dissymmetry in [False, True]:\n function = ArtificialFunction(\n name=\"sphere\",\n rotation=True,\n block_dimension=1,\n noise_level=10,\n noise_dissymmetry=noise_dissymmetry,\n translation_factor=10.0,\n )\n yield Experiment(function, optim, budget=budget, seed=next(seedg))\n"
] | [
[
"numpy.asarray",
"numpy.ones",
"numpy.tan",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
],
[
"numpy.allclose",
"numpy.random.seed",
"numpy.ones"
],
[
"numpy.array"
],
[
"numpy.concatenate",
"numpy.roll",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pplonski/automlbenchmark | [
"f49ddfa2583643173296ed8ab45a8c14c62a6987"
] | [
"reports/report/visualizations/linplot.py"
] | [
"import matplotlib as mp\nimport pandas as pd\nimport seaborn as sb\n\nimport report.config as config\nfrom ..util import create_file, sort_dataframe\nfrom .util import savefig, set_scales, set_labels, task_labels\n\n\ndef draw_parallel_coord(df, class_column,\n x_labels=True, yscale='linear',\n title=None, xlabel=None, ylabel=None,\n legend_loc='best', legend_title=None, colormap=None):\n colormap = config.colormap if colormap is None else colormap\n with sb.axes_style('ticks', rc={'grid.linestyle': 'dotted'}), sb.plotting_context('paper'):\n # print(sb.axes_style())\n parallel_fig = mp.pyplot.figure(dpi=120, figsize=(10, df.shape[0]))\n # select the first colors from the colormap to ensure we use the same colors as in the stripplot later\n colors = mp.cm.get_cmap(colormap).colors[:len(df[class_column].unique())]\n axes = pd.plotting.parallel_coordinates(df,\n class_column=class_column,\n color=colors,\n axvlines=False,\n )\n set_scales(axes, yscale=yscale)\n handles, labels = axes.get_legend_handles_labels()\n axes.legend(handles, labels, loc=legend_loc, title=legend_title)\n set_labels(axes, title=title, xlabel=xlabel, ylabel=ylabel, x_labels=x_labels,\n x_tick_params=dict(labelrotation=90))\n return parallel_fig\n\n\ndef draw_score_parallel_coord(col, results, type_filter='all', metadata=None,\n x_sort_by='name', ylabel=None, filename=None,\n **kwargs):\n res_group = results.groupby(['type', 'task', 'framework'])\n df = res_group[col].mean().unstack(['type', 'task'])\n df = df if type_filter == 'all' \\\n else df.iloc[:, df.columns.get_loc(type_filter)]\n if metadata:\n sort_by = lambda cols: getattr(metadata[cols[1]], x_sort_by)\n df = sort_dataframe(df, by=sort_by, axis=1)\n df.reset_index(inplace=True)\n fig = draw_parallel_coord(df,\n 'framework',\n x_labels=task_labels(df.columns.drop('framework')),\n # xlabel=\"Task\",\n ylabel=ylabel or \"Score\",\n legend_title=\"Framework\",\n **kwargs)\n if filename:\n savefig(fig, create_file(\"graphics\", config.results_group, filename))\n return fig\n"
] | [
[
"matplotlib.cm.get_cmap",
"pandas.plotting.parallel_coordinates",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
semeniuta/pdata | [
"5eb6ece8e2fb1856bc87ed76290240cd901f7654"
] | [
"pdata/dirstructure.py"
] | [
"import os\nfrom glob import glob\nimport pandas as pd\n\n\ndef get_list_of_full_child_dirs(d):\n \"\"\"\n For a directory d (full path), \n return a list of its subdirectories \n in a full path form.\n \"\"\"\n\n children = (os.path.join(d, child) for child in os.listdir(d))\n dirs = filter(os.path.isdir, children)\n\n return list(dirs)\n\n\ndef split_full_path(full_path, base_dir):\n \"\"\"\n Given a full path, return:\n \n - relative_dir: the part of the path that does not \n include the base directory and the basename\n - basename\n \"\"\"\n\n fname = os.path.basename(full_path)\n\n relative_path = full_path.split(base_dir)[-1]\n relative_dir = relative_path.split(fname)[0]\n relative_dir = relative_dir[1:-1] # clip slashes\n\n return relative_dir, fname\n\n\ndef gather_files(base_dir, file_mask):\n \"\"\"\n Walk the directory base_dir using os.walk\n and gather files that match file_mask (e.g. '*.jpg'). \n Return the result as a Pandas dataframe with columns \n 'relative_dir' and 'basename'.\n \"\"\"\n\n res_tuples = []\n\n for dir_name, subdirs, files in os.walk(base_dir):\n\n dir_has_files = len(files) > 0\n\n if dir_has_files:\n\n full_mask = os.path.join(dir_name, file_mask)\n mask_matches = glob(full_mask)\n\n res_tuples += [split_full_path(f, base_dir) for f in mask_matches]\n\n return pd.DataFrame(res_tuples, columns=['relative_dir', 'basename'])\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wjmaddox/pytorch_ess | [
"8e189666ce7381cf760666464384c634abbc4be2"
] | [
"pytorch_ess/mean_elliptical_slice.py"
] | [
"import torch\n\nfrom .elliptical_slice import EllipticalSliceSampler\n\n\nclass MeanEllipticalSliceSampler(EllipticalSliceSampler):\n def __init__(self, f_init, dist, lnpdf, nsamples, pdf_params=()):\n \"\"\"\n Implementation of elliptical slice sampling (Murray, Adams, & Mckay, 2010).\n f_init: initial value of `f`\n dist: multivariate normal to sample from to sample from\n lnpdf: likelihood function\n n_samples: number of samples\n pdf_params: callable arguments for lnpdf\n \"\"\"\n mean_vector = dist.mean\n\n demeaned_lnpdf = lambda g: lnpdf(g + mean_vector, *pdf_params)\n\n demeaned_init = f_init - mean_vector\n\n samples = dist.sample(sample_shape = torch.Size((nsamples,))).transpose(-1, -2)\n demeaned_samples = samples - mean_vector.unsqueeze(1)\n\n super(MeanEllipticalSliceSampler, self).__init__(demeaned_init, demeaned_samples, demeaned_lnpdf, nsamples, pdf_params=())\n\n self.mean_vector = mean_vector\n\n def run(self):\n self.f_sampled, self.ell = super().run()\n\n #add means back into f_sampled\n self.f_sampled = self.f_sampled + self.mean_vector.unsqueeze(1)\n\n return self.f_sampled, self.ell"
] | [
[
"torch.Size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tbcole/majoranaJJ | [
"dcf31f7786fa0a4874a940b7d8dcdd55f3921a46",
"dcf31f7786fa0a4874a940b7d8dcdd55f3921a46",
"dcf31f7786fa0a4874a940b7d8dcdd55f3921a46",
"dcf31f7786fa0a4874a940b7d8dcdd55f3921a46"
] | [
"demos/sparse_op/wfuncs/H0/donut.py",
"lattice/nbrs.py",
"demos/dense_op/bands/HBDG/square.py",
"nodular_JJ/finite_sc/phase_diagrams/fxd_gam_gap.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.sparse.linalg as spLA\n\nimport majoranaJJ.operators.sparse.qmsops as spop #sparse operators\nimport majoranaJJ.lattice.nbrs as nb #neighbor arrays\nimport majoranaJJ.lattice.shapes as shps #lattice shapes\nimport majoranaJJ.modules.plots as plots #plotting functions\n\nR = 50\nr = 15\nax = 10 #[A]\nay = 10 #[A]\n\ncoor = shps.donut(R, r)\nNN = nb.NN_Arr(coor)\nprint(\"lattice size\", coor.shape[0])\n\nalpha = 0 #Spin-Orbit Coupling constant: [eV*A]\ngammaz = 0 #Zeeman field energy contribution: [T]\ndelta = 0 #Superconducting Gap: [eV]\nV0 = 0.0 #Amplitude of potential : [eV]\nmu = 0 #Chemical Potential: [eV]\n\nH = spop.H0(coor, ax, ay, NN)\nprint(\"H shape: \", H.shape)\n\nnum = 75 # This is the number of eigenvalues and eigenvectors you want\nsigma = 0 # This is the eigenvalue we search around\nwhich = 'LM'\neigs, vecs = spLA.eigsh(H, k = num, sigma = sigma, which = which)\n\nplots.state_cmap(coor, eigs, vecs, n = 0, title = 'SPARSE Free Particle Ground State')\nn = 39\nplots.state_cmap(coor, eigs, vecs, n = n, title = 'SPARSE: Excited State # {}'.format(n))\n",
"from numpy import ones\n\n\"\"\" Neighbor Arrays:\nThese neighbor arrays are implemented in such a way as to avoid double looping. This saves a significant ammount of time in large unit cells, as can be tested in the majoranaJJ/time_tsts/[bound_arr, nbr_arr]\n\nDefining nearest neighbor array\nNN_arr is Nx4, the columns store the index of the 4 nearest neighbors for each\nlattice site\nLeft: NN[n,0] = n-1\nAbove: NN[n,1] = n+Nx\nRight: NN[n, 2] = n+1\nDown NN[n, 3] = n-Nx\nif there is no lattice site in nearest neighbor spot, value is -1\n\"\"\"\ndef NN_Arr(coor):\n N = coor.shape[0]\n NN = -1*ones((N,4), dtype = 'int')\n xmax = max(coor[:, 0])\n ymax = max(coor[:, 1])\n Lx = int(xmax + 1)\n Ly = int(ymax + 1)\n\n for i in range(N):\n xi = coor[i, 0]\n yi = coor[i, 1]\n\n if (i-1) >= 0:\n if (xi - coor[i-1, 0]) == 1 and (yi - coor[i-1, 1]) == 0:\n NN[i, 0] = i-1\n if (i+1) < N:\n if (xi - coor[i+1, 0]) == -1 and (yi - coor[i+1, 1]) == 0:\n NN[i, 2] = i+1\n for j in range(0, Lx+1):\n if (i+j) < N:\n if (yi - coor[i+j, 1]) == -1 and (xi - coor[i+j, 0]) == 0:\n NN[i, 1] = i+j\n if (i-j) >= 0:\n if (yi - coor[i-j, 1]) == 1 and (xi - coor[i-j, 0]) == 0:\n NN[i, 3]= i-j\n return NN\n\ndef NN_sqr(coor):\n N = coor.shape[0]\n NN = -1*ones((N,4), dtype = 'int')\n xmax = max(coor[:, 0])\n ymax = max(coor[:, 1])\n Lx = int(xmax + 1)\n Ly = int(ymax + 1)\n\n for i in range(N):\n xi = coor[i, 0]\n yi = coor[i, 1]\n\n if (i-1) >= 0 and (xi - coor[i-1, 0]) == 1:\n NN[i, 0] = i-1\n if (i+Lx) < N and (yi - coor[i+Lx, 1]) == -1:\n NN[i, 1] = i+Lx\n if (i+1) < N and (xi - coor[i+1, 0]) == -1:\n NN[i, 2] = i+1\n if (i-Lx) >= 0 and (yi - coor[i-Lx, 1]) == 1:\n NN[i, 3] = i-Lx\n\n return NN\n\n\"\"\" Periodic Boundary conditions\nif statements:\nif the x-coordinate of the ith lattice site is the minimum value, it must be on the edge of the unit cell and therefore has a nearest neighbor in the neighboring unit cell to the left which is equivalent to the right most site of the same y-value.\nEx: To find the lattice site that corresponds to the neighbor to the left in the neighboring unit cell, we know it will be at most the (i + xmax)th site. If we are given a perfect square, it is the (i+ xmax)th site. In the case of the donut, this is not the case, so we until we find the site that is at the same height as the ith site, and has an x-coordinate that is the maximum value. The other statements follow similar logic for other neighbors.\n\"\"\"\ndef Bound_Arr(coor):\n xmin = int(min(coor[:, 0]))\n ymin = int(min(coor[:, 1]))\n xmax = int(max(coor[:, 0]))\n ymax = int(max(coor[:, 1]))\n\n N = coor.shape[0]\n NNb = -1*ones((N,4), dtype = 'int') #stores the values of the coordinates of each periodic neighbor, -1 means no neighbor\n\n for i in range(N):\n x_index = coor[i, 0]\n y_index = coor[i, 1]\n if x_index == xmin:\n for j in range(i, N):\n y = coor[j, 1]\n x = coor[j, 0]\n if y == y_index and x == xmax:\n NNb[i, 0] = j\n break\n if y_index == ymax:\n for j in range(0, int(coor[i, 0]) + 1):\n x = coor[j, 0]\n y = coor[j, 1]\n if x == x_index and y == ymin:\n NNb[i, 1] = j\n break\n if x_index == xmax:\n for j in range(i, -1, -1):\n x = coor[j, 0]\n y = coor[j, 1]\n if y == y_index and x == xmin:\n NNb[i, 2] = j\n break\n if y_index == ymin:\n for j in range(N-1, int(coor[i, 0]), -1):\n x = coor[j, 0]\n y = coor[j, 1]\n if x == x_index and y == ymax:\n NNb[i, 3] = j\n break\n return NNb\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import linalg as LA\n\nimport modules.constants as const\nimport modules.lattice as lat\nimport modules.operators as op\nimport modules.alt_mod.altoperators as aop\n\nax = 2 #atomic spacing along x-direction in [A]\nay = 2 #atomic spacing along y-direction in [A]\n\nNx = 3 #number of lattice sites in x direction\nNy = 3 #number of lattice sites in y direction\nN = Ny*Nx #Total number of lattice sites\n\ncoor = lat.square(Nx, Ny) #square coordinate array\nNN = lat.NN_Arr(coor) #nearest neighbor array of square lattice\nNNb = lat.NN_Bound(NN, coor) #periodic NN array\n\nLx = (max(coor[:, 0]) - min(coor[:, 0]) + 1)*ax #Unit cell size in x-direction\nLy = (max(coor[:, 1]) - min(coor[:, 1]) + 1)*ay #Unit cell size in y-direction\n\nH_SizeTest = op.HBDG(coor, ax, ay, Wsc, Wj)\n\nprint(\"Number of Lattice Sites = \", N)\nprint('Size of BDG Hamiltonian = {}'.format(np.shape(H_SizeTest)))\nprint(\"Unit cell size in x-direction = {} [A] = {} [m]\".format(Lx, Lx*1e-10))\nprint(\"Unit cell size in y-direction = {} [A] = {} [m]\".format(Ly, Ly*1e-10))\n\n#Method paramters\n\"\"\" HBDG(coor, ax, ay,\n potential = 0,\n gammax = 0, gammay = 0, gammaz = 0,\n alpha = 0, qx = 0, qy = 0,\n periodic = 'yes'\n ):\n\"\"\"\n\"\"\"V_periodic(V0, Nx, Ny, coor)\"\"\"\n\na = 0.2 #[eV*A]\ngamma = 0.3 #[T]\ndelta = 0.1\nV0 = 0.0\nmu = 0.0\n\nsteps = 50\nnbands = 5\nqx = np.linspace(-np.pi/Lx, np.pi/Lx, steps)\nqy = np.linspace(-np.pi/Ly, np.pi/Ly, steps)\nV = op.V_periodic(V0, coor)\neigarr = np.zeros((steps, 2*nbands))\n\nfor i in range(steps):\n eigarr[i, :] = np.sort( LA.eigh(op.HBDG(coor, ax, ay, mu = mu, delta = delta, alpha = a, gammaz = gamma,\n potential = V, qx = qx[i]))[0] )[2*N - nbands: 2*N + nbands]\n\nop.bands(eigarr, qx, Lx, Ly, title = 'Superconducting Spectrum'.format(a, gamma, V0))\n",
"import sys\nimport time\nimport os\nimport gc\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom scipy.signal import argrelextrema\nimport scipy.linalg as LA\nimport scipy.sparse.linalg as spLA\n\nimport majoranaJJ.operators.sparse_operators as spop #sparse operators\nfrom majoranaJJ.operators.potentials import Vjj #potential JJ\nimport majoranaJJ.lattice.nbrs as nb #neighbor arrays\nimport majoranaJJ.lattice.shapes as shps #lattice shapes\nimport majoranaJJ.modules.plots as plots #plotting functions\nimport majoranaJJ.modules.gamfinder as gamfinder\nfrom majoranaJJ.modules.checkers import boundary_check as bc\nimport majoranaJJ.modules.checkers as check\n###################################################\n#Defining System\nNx = 3 #Number of lattice sites along x-direction\nNy = 360 #Number of lattice sites along y-direction\nax = 50 #lattice spacing in x-direction: [A]\nay = 50 #lattice spacing in y-direction: [A]\nWj = 10 #Junction region\ncutx = 0 #width of nodule\ncuty = 0 #height of nodule\nNx, Ny, cutx, cuty, Wj = check.junction_geometry_check(Nx, Ny, cutx, cuty, Wj)\nprint(\"Nx = {}, Ny = {}, cutx = {}, cuty = {}, Wj = {}\".format(Nx, Ny, cutx, cuty, Wj))\n\nJunc_width = Wj*ay*.10 #nm\nSC_width = ((Ny - Wj)*ay*.10)/2 #nm\nNod_widthx = cutx*ax*.1 #nm\nNod_widthy = cuty*ay*.1 #nm\nprint(\"Nodule Width in x-direction = \", Nod_widthx, \"(nm)\")\nprint(\"Nodule Width in y-direction = \", Nod_widthy, \"(nm)\")\nprint(\"Junction Width = \", Junc_width, \"(nm)\")\nprint(\"Supercondicting Lead Width = \", SC_width, \"(nm)\")\n###################################################\ncoor = shps.square(Nx, Ny) #square lattice\nNN = nb.NN_sqr(coor)\nNNb = nb.Bound_Arr(coor)\nlat_size = coor.shape[0]\nprint(\"Lattice Size: \", lat_size)\n\nLx = (max(coor[:, 0]) - min(coor[:, 0]) + 1)*ax #Unit cell size in x-direction\nLy = (max(coor[:, 1]) - min(coor[:, 1]) + 1)*ay #Unit cell size in y-direction\n###################################################\n#Defining Hamiltonian parameters\ngamx = 5\nalpha = 300 #Spin-Orbit Coupling constant: [meV*A]\nphi = np.pi #SC phase difference\ndelta = 1 #Superconducting Gap: [meV]\nVsc = 0 #SC potential: [meV]\nVj = 0 #Junction potential: [meV]\nV = Vjj(coor, Wj = Wj, Vsc = Vsc, Vj = Vj, cutx = cutx, cuty = cuty)\n\nmu_i = 0\nmu_f = 50\nres = 1\nmu_steps = int((mu_f-mu_i)/res)\nmu = np.linspace(mu_i, mu_f, mu_steps)\n\nq_steps = 500\nqx = np.linspace(0, np.pi/Lx, q_steps) #kx in the first Brillouin zone\n\nk = 4\nLE_Bands = np.zeros((qx.shape[0], mu.shape[0]))\n###################################################\ndirS = 'gap_data'\nif not os.path.exists(dirS):\n os.makedirs(dirS)\ntry:\n PLOT = str(sys.argv[1])\nexcept:\n PLOT = 'F'\nif PLOT != 'P':\n for i in range(q_steps):\n for j in range(mu.shape[0]):\n print(q_steps-i, mu.shape[0]-j)\n H = spop.HBDG(coor, ax, ay, NN, NNb=NNb, Wj=Wj, cutx=cutx, cuty=cuty, V=V, mu=mu[j], alpha=alpha, delta=delta, phi=phi, gamx=gamx, qx=qx[i]) #gives low energy basis\n eigs, vecs = spLA.eigsh(H, k=k, sigma=0, which='LM')\n idx_sort = np.argsort(eigs)\n eigs = eigs[idx_sort]\n LE_Bands[i, j] = eigs[int(k/2)]\n\n gap = np.zeros((mu.shape[0]))\n q_minima = []\n for i in range(LE_Bands.shape[1]):\n eig_min_idx = np.array(argrelextrema(LE_Bands[:, i], np.less)[0])\n q_minima.append(qx[eig_min_idx])\n gap[i] = min(LE_Bands[:, i])\n\n q_minima = np.array(q_minima)\n print(gap)\n np.save(\"%s/gap Lx = %.1f Ly = %.1f Wsc = %.1f Wj = %.1f nodx = %.1f nody = %.1f Vj = %.1f Vsc = %.1f alpha = %.1f delta = %.2f phi = %.3f.npy\" % (dirS, Lx*.1, Ly*.1, SC_width, Junc_width, Nod_widthx, Nod_widthy, Vj, Vsc, alpha, delta, phi), gap)\n gc.collect()\n\n sys.exit()\nelse:\n gap = np.load(\"%s/gap Lx = %.1f Ly = %.1f Wsc = %.1f Wj = %.1f nodx = %.1f nody = %.1f Vj = %.1f Vsc = %.1f alpha = %.1f delta = %.2f phi = %.3f.npy\" % (dirS, Lx*.1, Ly*.1, SC_width, Junc_width, Nod_widthx, Nod_widthy, Vj, Vsc, alpha, delta, phi))\n #q_minima = np.load(\"%s/q_minima Lx = %.1f Ly = %.1f Wsc = %.1f Wj = %.1f nodx = %.1f nody = %.1f Vj = %.1f Vsc = %.1f alpha = %.1f delta = %.2f phi = %.3f.npy\" % (dirS, Lx*.1, Ly*.1, SC_width, Junc_width, Nod_widthx, Nod_widthy, Vj, Vsc, alpha, delta, phi))\n\n gap = gap/delta\n\n plt.plot(mu, gap)\n\n plt.xlabel(r'$\\mu$ (meV)')\n plt.ylabel(r'$E_{gap}/\\Delta$ (meV)')\n plt.xlim(mu_i, mu_f)\n title = r\"$\\Gamma$ = %.1f $L_x$ = %.1f nm, $L_y$ = %.1f nm, $W_{sc}$ = %.1f nm, $W_j$ = %.1f nm, $nodule_x$ = %.1f nm, $nodule_y$ = %.1f nm, $V_j$ = %.1f meV, $V_{SC}$ = %.1f meV, $\\phi$ = %.2f \" % (gamx, Lx*.1, Ly*.1, SC_width, Junc_width, Nod_widthx, Nod_widthy, Vj, Vsc, phi)\n #title = r\"$L_x =$ {} nm, $L_y =$ {} nm, SC width = {} nm, $W_j =$ {} nm, $nodule_x = ${} nm, $nodule_y = ${} nm, $\\alpha = $ {} meV*A, $\\phi =$ {} \".format(Lx*.1, Ly*.1, SC_width, Junc_width, Nod_widthx, Nod_widthy, alpha, phi)\n plt.title(title, loc = 'center', wrap = True)\n plt.subplots_adjust(top=0.85)\n plt.savefig('gap juncwidth = {} SCwidth = {} nodwidthx = {} nodwidthy = {} phi = {} Vj = {} Vsc = {}.png'.format(Junc_width, SC_width, Nod_widthx, Nod_widthy, delta, alpha, phi, Vj, Vsc))\n plt.show()\n\n sys.exit()\n"
] | [
[
"scipy.sparse.linalg.eigsh"
],
[
"numpy.ones"
],
[
"numpy.shape",
"numpy.zeros",
"numpy.linspace"
],
[
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.save",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"scipy.signal.argrelextrema",
"matplotlib.pyplot.subplots_adjust",
"numpy.argsort",
"numpy.load",
"numpy.array",
"numpy.zeros",
"scipy.sparse.linalg.eigsh",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Grusinator/BirdClassification | [
"c78ca3dbf70c2509c79ca4641102a2d725084d2a"
] | [
"lib/utils/SegDataGenerator.py"
] | [
"from keras.preprocessing.image import *\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras import backend as K\nfrom PIL import Image\nimport numpy as np\nimport os\n#import cv2\n\n\ndef center_crop(x, center_crop_size, data_format, **kwargs):\n if data_format == 'channels_first':\n centerh, centerw = x.shape[1] // 2, x.shape[2] // 2\n elif data_format == 'channels_last':\n centerh, centerw = x.shape[0] // 2, x.shape[1] // 2\n lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2\n rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw\n\n h_start, h_end = centerh - lh, centerh + rh\n w_start, w_end = centerw - lw, centerw + rw\n if data_format == 'channels_first':\n return x[:, h_start:h_end, w_start:w_end]\n elif data_format == 'channels_last':\n return x[h_start:h_end, w_start:w_end, :]\n\n\ndef pair_center_crop(x, y, center_crop_size, data_format, **kwargs):\n if data_format == 'channels_first':\n centerh, centerw = x.shape[1] // 2, x.shape[2] // 2\n elif data_format == 'channels_last':\n centerh, centerw = x.shape[0] // 2, x.shape[1] // 2\n lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2\n rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw\n\n h_start, h_end = centerh - lh, centerh + rh\n w_start, w_end = centerw - lw, centerw + rw\n if data_format == 'channels_first':\n return x[:, h_start:h_end, w_start:w_end], \\\n y[:, h_start:h_end, w_start:w_end]\n elif data_format == 'channels_last':\n return x[h_start:h_end, w_start:w_end, :], \\\n y[h_start:h_end, w_start:w_end, :]\n\n\ndef random_crop(x, random_crop_size, data_format, sync_seed=None, **kwargs):\n np.random.seed(sync_seed)\n if data_format == 'channels_first':\n h, w = x.shape[1], x.shape[2]\n elif data_format == 'channels_last':\n h, w = x.shape[0], x.shape[1]\n rangeh = (h - random_crop_size[0]) // 2\n rangew = (w - random_crop_size[1]) // 2\n offseth = 0 if rangeh == 0 else np.random.randint(rangeh)\n offsetw = 0 if rangew == 0 else np.random.randint(rangew)\n\n h_start, h_end = offseth, offseth + random_crop_size[0]\n w_start, w_end = offsetw, offsetw + random_crop_size[1]\n if data_format == 'channels_first':\n return x[:, h_start:h_end, w_start:w_end]\n elif data_format == 'channels_last':\n return x[h_start:h_end, w_start:w_end, :]\n\n\ndef pair_random_crop(x, y, random_crop_size, data_format, sync_seed=None, **kwargs):\n np.random.seed(sync_seed)\n if data_format == 'channels_first':\n h, w = x.shape[1], x.shape[2]\n elif data_format == 'channels_last':\n h, w = x.shape[0], x.shape[1]\n rangeh = (h - random_crop_size[0]) // 2\n rangew = (w - random_crop_size[1]) // 2\n offseth = 0 if rangeh == 0 else np.random.randint(rangeh)\n offsetw = 0 if rangew == 0 else np.random.randint(rangew)\n\n h_start, h_end = offseth, offseth + random_crop_size[0]\n w_start, w_end = offsetw, offsetw + random_crop_size[1]\n if data_format == 'channels_first':\n return x[:, h_start:h_end, w_start:w_end], y[:, h_start:h_end, h_start:h_end]\n elif data_format == 'channels_last':\n return x[h_start:h_end, w_start:w_end, :], y[h_start:h_end, w_start:w_end, :]\n\n\nclass SegDirectoryIterator(Iterator):\n '''\n Users need to ensure that all files exist.\n Label images should be png images where pixel values represents class number.\n\n find images -name *.jpg > images.txt\n find labels -name *.png > labels.txt\n\n for a file name 2011_002920.jpg, each row should contain 2011_002920\n\n file_path: location of train.txt, or val.txt in PASCAL VOC2012 format,\n listing image file path components without extension\n data_dir: location of image files referred to by file in file_path\n label_dir: location of label files\n data_suffix: image file extension, such as `.jpg` or `.png`\n label_suffix: label file suffix, such as `.png`, or `.npy`\n loss_shape: shape to use when applying loss function to the label data\n '''\n\n def __init__(self, file_path, seg_data_generator,\n data_dir, data_suffix,\n label_dir, label_suffix, classes, ignore_label=255,\n crop_mode='none', label_cval=255, pad_size=None,\n target_size=None, color_mode='rgb',\n data_format='default', class_mode='sparse',\n batch_size=1, shuffle=True, seed=None,\n save_to_dir=None, save_prefix='', save_format='jpeg',\n loss_shape=None):\n if data_format == 'default':\n data_format = K.image_data_format()\n self.file_path = file_path\n self.data_dir = data_dir\n self.data_suffix = data_suffix\n self.label_suffix = label_suffix\n self.label_dir = label_dir\n self.classes = classes\n self.seg_data_generator = seg_data_generator\n self.target_size = tuple(target_size)\n self.ignore_label = ignore_label\n self.crop_mode = crop_mode\n self.label_cval = label_cval\n self.pad_size = pad_size\n if color_mode not in {'rgb', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\" or \"grayscale\".')\n self.color_mode = color_mode\n self.data_format = data_format\n self.nb_label_ch = 1\n self.loss_shape = loss_shape\n\n if (self.label_suffix == '.npy') or (self.label_suffix == 'npy'):\n self.label_file_format = 'npy'\n else:\n self.label_file_format = 'img'\n if target_size:\n if self.color_mode == 'rgb':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (3,)\n else:\n self.image_shape = (3,) + self.target_size\n else:\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (1,)\n else:\n self.image_shape = (1,) + self.target_size\n if self.data_format == 'channels_last':\n self.label_shape = self.target_size + (self.nb_label_ch,)\n else:\n self.label_shape = (self.nb_label_ch,) + self.target_size\n elif batch_size != 1:\n raise ValueError(\n 'Batch size must be 1 when target image size is undetermined')\n else:\n self.image_shape = None\n self.label_shape = None\n if class_mode not in {'sparse', None}:\n raise ValueError('Invalid class_mode:', class_mode,\n '; expected one of '\n '\"sparse\", or None.')\n self.class_mode = class_mode\n if save_to_dir:\n self.palette = None\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n\n white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'npy'}\n\n # build lists for data files and label files\n self.data_files = []\n self.label_files = []\n fp = open(file_path)\n lines = fp.readlines()\n fp.close()\n self.nb_sample = len(lines)\n for line in lines:\n line = line.strip('\\n')\n self.data_files.append(line + data_suffix)\n self.label_files.append(line + label_suffix)\n super(SegDirectoryIterator, self).__init__(\n self.nb_sample, batch_size, shuffle, seed)\n\n def next(self):\n with self.lock:\n index_array, current_index, current_batch_size = next(\n self.index_generator)\n\n # The transformation of images is not under thread lock so it can be\n # done in parallel\n if self.target_size:\n # TODO(ahundt) make dtype properly configurable\n batch_x = np.zeros((current_batch_size,) + self.image_shape)\n if self.loss_shape is None and self.label_file_format is 'img':\n batch_y = np.zeros((current_batch_size,) + self.label_shape,\n dtype=int)\n elif self.loss_shape is None:\n batch_y = np.zeros((current_batch_size,) + self.label_shape)\n else:\n batch_y = np.zeros((current_batch_size,) + self.loss_shape,\n dtype=np.uint8)\n grayscale = self.color_mode == 'grayscale'\n # build batch of image data and labels\n for i, j in enumerate(index_array):\n data_file = self.data_files[j]\n label_file = self.label_files[j]\n img_file_format = 'img'\n img = load_img(os.path.join(self.data_dir, data_file),\n grayscale=grayscale, target_size=None)\n label_filepath = os.path.join(self.label_dir, label_file)\n\n if self.label_file_format == 'npy':\n y = np.load(label_filepath)\n else:\n label = Image.open(label_filepath)\n if self.save_to_dir and self.palette is None:\n self.palette = label.palette\n\n # do padding\n if self.target_size:\n if self.crop_mode != 'none':\n x = img_to_array(img, data_format=self.data_format)\n if self.label_file_format is not 'npy':\n y = img_to_array(\n label, data_format=self.data_format).astype(int)\n img_w, img_h = img.size\n if self.pad_size:\n pad_w = max(self.pad_size[1] - img_w, 0)\n pad_h = max(self.pad_size[0] - img_h, 0)\n else:\n pad_w = max(self.target_size[1] - img_w, 0)\n pad_h = max(self.target_size[0] - img_h, 0)\n if self.data_format == 'channels_first':\n x = np.lib.pad(x, ((0, 0), (pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2)), 'constant', constant_values=0.)\n y = np.lib.pad(y, ((0, 0), (pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2)),\n 'constant', constant_values=self.label_cval)\n elif self.data_format == 'channels_last':\n x = np.lib.pad(x, ((pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2), (0, 0)), 'constant', constant_values=0.)\n y = np.lib.pad(y, ((pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2), (0, 0)), 'constant', constant_values=self.label_cval)\n else:\n x = img_to_array(img.resize((self.target_size[1], self.target_size[0]),\n Image.BILINEAR),\n data_format=self.data_format)\n if self.label_file_format is not 'npy':\n y = img_to_array(label.resize((self.target_size[1], self.target_size[\n 0]), Image.NEAREST), data_format=self.data_format).astype(int)\n else:\n print('ERROR: resize not implemented for label npy file')\n\n if self.target_size is None:\n batch_x = np.zeros((current_batch_size,) + x.shape)\n if self.loss_shape is not None:\n batch_y = np.zeros((current_batch_size,) + self.loss_shape)\n else:\n batch_y = np.zeros((current_batch_size,) + y.shape)\n\n x, y = self.seg_data_generator.random_transform(x, y)\n x = self.seg_data_generator.standardize(x)\n\n if self.ignore_label:\n y[np.where(y == self.ignore_label)] = self.classes\n\n if self.loss_shape is not None:\n y = np.reshape(y, self.loss_shape)\n\n batch_x[i] = x\n batch_y[i] = y\n # optionally save augmented images to disk for debugging purposes\n if self.save_to_dir:\n for i in range(current_batch_size):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n label = batch_y[i][:, :, 0].astype('uint8')\n label[np.where(label == self.classes)] = self.ignore_label\n label = Image.fromarray(label, mode='P')\n label.palette = self.palette\n fname = '{prefix}_{index}_{hash}'.format(prefix=self.save_prefix,\n index=current_index + i,\n hash=np.random.randint(1e4))\n img.save(os.path.join(self.save_to_dir, 'img_' +\n fname + '.{format}'.format(format=self.save_format)))\n label.save(os.path.join(self.save_to_dir,\n 'label_' + fname + '.png'))\n # return\n batch_x = preprocess_input(batch_x)\n if self.class_mode == 'sparse':\n return batch_x, batch_y\n else:\n return batch_x\n\n\nclass SegDataGenerator(object):\n\n def __init__(self,\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n channelwise_center=False,\n rotation_range=0.,\n width_shift_range=0.,\n height_shift_range=0.,\n shear_range=0.,\n zoom_range=0.,\n zoom_maintain_shape=True,\n channel_shift_range=0.,\n fill_mode='constant',\n cval=0.,\n label_cval=255,\n crop_mode='none',\n crop_size=(0, 0),\n pad_size=None,\n horizontal_flip=False,\n vertical_flip=False,\n rescale=None,\n data_format='default'):\n if data_format == 'default':\n data_format = K.image_data_format()\n self.__dict__.update(locals())\n self.mean = None\n self.ch_mean = None\n self.std = None\n self.principal_components = None\n self.rescale = rescale\n\n if data_format not in {'channels_last', 'channels_first'}:\n raise Exception('data_format should be channels_last (channel after row and '\n 'column) or channels_first (channel before row and column). '\n 'Received arg: ', data_format)\n if crop_mode not in {'none', 'random', 'center'}:\n raise Exception('crop_mode should be \"none\" or \"random\" or \"center\" '\n 'Received arg: ', crop_mode)\n self.data_format = data_format\n if data_format == 'channels_first':\n self.channel_index = 1\n self.row_index = 2\n self.col_index = 3\n if data_format == 'channels_last':\n self.channel_index = 3\n self.row_index = 1\n self.col_index = 2\n\n if np.isscalar(zoom_range):\n self.zoom_range = [1 - zoom_range, 1 + zoom_range]\n elif len(zoom_range) == 2:\n self.zoom_range = [zoom_range[0], zoom_range[1]]\n else:\n raise Exception('zoom_range should be a float or '\n 'a tuple or list of two floats. '\n 'Received arg: ', zoom_range)\n\n def flow_from_directory(self, file_path, data_dir, data_suffix,\n label_dir, label_suffix, classes,\n ignore_label=255,\n target_size=None, color_mode='rgb',\n class_mode='sparse',\n batch_size=32, shuffle=True, seed=None,\n save_to_dir=None, save_prefix='', save_format='jpeg',\n loss_shape=None):\n if self.crop_mode == 'random' or self.crop_mode == 'center':\n target_size = self.crop_size\n return SegDirectoryIterator(\n file_path, self,\n data_dir=data_dir, data_suffix=data_suffix,\n label_dir=label_dir, label_suffix=label_suffix,\n classes=classes, ignore_label=ignore_label,\n crop_mode=self.crop_mode, label_cval=self.label_cval,\n pad_size=self.pad_size,\n target_size=target_size, color_mode=color_mode,\n data_format=self.data_format, class_mode=class_mode,\n batch_size=batch_size, shuffle=shuffle, seed=seed,\n save_to_dir=save_to_dir, save_prefix=save_prefix,\n save_format=save_format,\n loss_shape=loss_shape)\n\n def standardize(self, x):\n if self.rescale:\n x *= self.rescale\n # x is a single image, so it doesn't have image number at index 0\n img_channel_index = self.channel_index - 1\n if self.samplewise_center:\n x -= np.mean(x, axis=img_channel_index, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)\n\n if self.featurewise_center:\n x -= self.mean\n if self.featurewise_std_normalization:\n x /= (self.std + 1e-7)\n\n if self.channelwise_center:\n x -= self.ch_mean\n return x\n\n def random_transform(self, x, y):\n # x is a single image, so it doesn't have image number at index 0\n img_row_index = self.row_index - 1\n img_col_index = self.col_index - 1\n img_channel_index = self.channel_index - 1\n if self.crop_mode == 'none':\n crop_size = (x.shape[img_row_index], x.shape[img_col_index])\n else:\n crop_size = self.crop_size\n\n assert x.shape[img_row_index] == y.shape[img_row_index] and x.shape[img_col_index] == y.shape[\n img_col_index], 'DATA ERROR: Different shape of data and label!\\ndata shape: %s, label shape: %s' % (str(x.shape), str(y.shape))\n\n # use composition of homographies to generate final transform that\n # needs to be applied\n if self.rotation_range:\n theta = np.pi / 180 * \\\n np.random.uniform(-self.rotation_range, self.rotation_range)\n else:\n theta = 0\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n if self.height_shift_range:\n # * x.shape[img_row_index]\n tx = np.random.uniform(-self.height_shift_range,\n self.height_shift_range) * crop_size[0]\n else:\n tx = 0\n\n if self.width_shift_range:\n # * x.shape[img_col_index]\n ty = np.random.uniform(-self.width_shift_range,\n self.width_shift_range) * crop_size[1]\n else:\n ty = 0\n\n translation_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n if self.shear_range:\n shear = np.random.uniform(-self.shear_range, self.shear_range)\n else:\n shear = 0\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(\n self.zoom_range[0], self.zoom_range[1], 2)\n if self.zoom_maintain_shape:\n zy = zx\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n\n transform_matrix = np.dot(\n np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)\n\n h, w = x.shape[img_row_index], x.shape[img_col_index]\n transform_matrix = transform_matrix_offset_center(\n transform_matrix, h, w)\n\n x = apply_transform(x, transform_matrix, img_channel_index,\n fill_mode=self.fill_mode, cval=self.cval)\n y = apply_transform(y, transform_matrix, img_channel_index,\n fill_mode='constant', cval=self.label_cval)\n\n if self.channel_shift_range != 0:\n x = random_channel_shift(\n x, self.channel_shift_range, img_channel_index)\n\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = flip_axis(x, img_col_index)\n y = flip_axis(y, img_col_index)\n\n if self.vertical_flip:\n if np.random.random() < 0.5:\n x = flip_axis(x, img_row_index)\n y = flip_axis(y, img_row_index)\n\n if self.crop_mode == 'center':\n x, y = pair_center_crop(x, y, self.crop_size, self.data_format)\n elif self.crop_mode == 'random':\n x, y = pair_random_crop(x, y, self.crop_size, self.data_format)\n\n # TODO:\n # channel-wise normalization\n # barrel/fisheye\n return x, y\n\n def fit(self, X,\n augment=False,\n rounds=1,\n seed=None):\n '''Required for featurewise_center and featurewise_std_normalization\n\n # Arguments\n X: Numpy array, the data to fit on.\n augment: whether to fit on randomly augmented samples\n rounds: if `augment`,\n how many augmentation passes to do over the data\n seed: random seed.\n '''\n X = np.copy(X)\n if augment:\n aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))\n for r in range(rounds):\n for i in range(X.shape[0]):\n aX[i + r * X.shape[0]] = self.random_transform(X[i])\n X = aX\n\n if self.featurewise_center:\n self.mean = np.mean(X, axis=0)\n X -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(X, axis=0)\n X /= (self.std + 1e-7)\n\n def set_ch_mean(self, ch_mean):\n self.ch_mean = ch_mean\n"
] | [
[
"numpy.lib.pad",
"numpy.dot",
"numpy.random.random",
"numpy.random.seed",
"numpy.reshape",
"numpy.load",
"numpy.cos",
"numpy.sin",
"numpy.copy",
"numpy.std",
"numpy.mean",
"numpy.isscalar",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AyishaR/deepC | [
"1dc9707ef5ca9000fc13c3da7f1129685a83b494",
"1dc9707ef5ca9000fc13c3da7f1129685a83b494",
"1dc9707ef5ca9000fc13c3da7f1129685a83b494"
] | [
"test/swig/Less.py",
"test/swig/LogSoftmax.py",
"test/swig/IsInf.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n#\n# This file is part of DNN compiler maintained at\n# https://github.com/ai-techsystems/dnnCompiler\n\nimport common\n\nimport deepC.dnnc as dc\nimport numpy as np\nimport unittest\n\nclass LessTest(unittest.TestCase):\n def setUp(self):\n self.len = 24\n self.np_a = np.random.randn(self.len).astype(np.float32)\n self.np_b = np.random.randn(self.len).astype(np.float32)\n self.dc_a = dc.array(list(self.np_a));\n self.dc_b = dc.array(list(self.np_b));\n\n def test_Less1D (self):\n npr = np.less(self.np_a, self.np_b)\n dcr = dc.less(self.dc_a, self.dc_b)\n np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.bool),\n rtol=1e-3, atol=1e-3)\n\n def test_Less2D (self):\n np_a = np.reshape(self.np_a, (6,4))\n np_b = np.reshape(self.np_b, (6,4))\n dc_a = dc.reshape(self.dc_a, (6,4));\n dc_b = dc.reshape(self.dc_b, (6,4));\n npr = np.less(np_a, np_b);\n dcr = dc.less(dc_a, dc_b);\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),\n rtol=1e-3, atol=1e-3)\n\n def test_Less3D (self):\n np_a = np.reshape(self.np_a, (2,4,3))\n np_b = np.reshape(self.np_b, (2,4,3))\n dc_a = dc.reshape(self.dc_a, (2,4,3));\n dc_b = dc.reshape(self.dc_b, (2,4,3));\n\n npr = np.less(np_a, np_b);\n dcr = dc.less(dc_a, dc_b);\n\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),\n rtol=1e-3, atol=1e-3)\n\n def test_Equal4D (self):\n np_a = np.reshape(self.np_a, (2,2,2,3))\n np_b = np.reshape(self.np_b, (2,2,2,3))\n dc_a = dc.reshape(self.dc_a, (2,2,2,3))\n dc_b = dc.reshape(self.dc_b, (2,2,2,3))\n\n npr = np.less(np_a, np_b)\n dcr = dc.less(dc_a, dc_b)\n\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),\n rtol=1e-3, atol=1e-3)\n \n def tearDown(self):\n return \"test finished\"\n \n\nif __name__ == '__main__':\n unittest.main()\n\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\") you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n#\n# This file is part of DNN compiler maintained at\n# https://github.com/ai-techsystems/dnnCompiler\n\nimport common\n\nimport deepC.dnnc as dc\nimport numpy as np\nimport unittest\n\ndef logsoftmax_2d(x):\n max_x = np.max(x, axis=1).reshape((-1, 1))\n exp_x = np.exp(x - max_x)\n return x - max_x - np.log(np.sum(exp_x, axis=1).reshape((-1, 1)))\n\nclass LogSoftmaxTest(unittest.TestCase):\n def setUp(self):\n self.len = 24\n self.np_a = np.random.randn(self.len).astype(np.float32)\n self.dc_a = dc.array(list(self.np_a))\n self.axis = 1\n self.axis1 = 1\n self.axis2 = 1\n def coerce(self,a):\n for i in range(self.axis):\n self.axis1 *= a.shape[i]\n self.axis2 = a.size // self.axis1;\n\n def test_LogSoftmax1D (self):\n npr = self.np_a - np.log(np.sum(np.exp(self.np_a), axis=0))\n dcr = dc.logsoftmax(self.dc_a,0)\n np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float32),\n rtol=1e-3, atol=1e-3)\n\n \n \n def test_LogSoftmax2D (self):\n np_a = np.reshape(self.np_a, (6,4))\n dc_a = dc.reshape(self.dc_a, (6,4))\n self.coerce(np_a)\n np_a = np.reshape(np_a, (self.axis1,self.axis2))\n npr = logsoftmax_2d(np_a)\n \n dcr = dc.logsoftmax(dc_a,self.axis)\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),\n rtol=1e-3, atol=1e-3)\n\n def test_LogSoftmax3D (self):\n np_a = np.reshape(self.np_a, (2,4,3))\n dc_a = dc.reshape(self.dc_a, (2,4,3))\n self.coerce(np_a)\n np_a = np.reshape(np_a, (self.axis1,self.axis2))\n npr = logsoftmax_2d(np_a)\n dcr = dc.logsoftmax(dc_a,self.axis)\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),\n rtol=1e-3, atol=1e-3)\n\n def test_LogSoftmax4D (self):\n np_a = np.reshape(self.np_a, (2,2,2,3))\n dc_a = dc.reshape(self.dc_a, (2,2,2,3))\n self.coerce(np_a)\n np_a = np.reshape(np_a, (self.axis1,self.axis2))\n npr = logsoftmax_2d(np_a)\n dcr = dc.logsoftmax(dc_a,self.axis)\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),\n rtol=1e-3, atol=1e-3)\n\n def tearDown(self):\n return \"test finished\"\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for divitional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\") you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n#\n# This file is part of DNN compiler maintained at\n# https://github.com/ai-techsystems/dnnCompiler\n\nimport common\n\nimport deepC.dnnc as dc\nimport numpy as np\nimport unittest\n\ndef Isinf(np_a,detect_positive,detect_negative):\n if (detect_positive and not(detect_negative) ):\n np_a[np_a<0] = 0\n return np.isinf(np_a)\n elif (detect_negative and not(detect_positive) ):\n np_a[np_a>0] = 0\n return np.isinf(np_a)\n elif ( not(detect_positive) and not(detect_negative) ):\n return np.zeros_like(np_a)\n else:\n return np.isinf(np_a)\n\nclass IsInfTest(unittest.TestCase):\n def setUp(self):\n self.len = 24\n self.np_a = np.random.randn(self.len)\n self.np_a.ravel()[np.random.choice(self.np_a.size, 5, replace=False)] = np.inf\n self.np_a.ravel()[np.random.choice(self.np_a.size, 5, replace=False)] = -np.inf\n self.dc_a = dc.array(list(self.np_a))\n self.detect_positive = 0\n self.detect_negative = 1\n\n def test_IsInf1D (self):\n npr = Isinf(self.np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(self.dc_a,self.detect_positive,self.detect_negative)\n np.testing.assert_array_equal(npr, np.array(dcr.data()))\n\n def test_IsInf2D_1 (self):\n np_a = np.reshape(self.np_a, (6,4))\n dc_a = dc.reshape(self.dc_a, (6,4))\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf2D_2 (self):\n np_a = np.reshape(self.np_a, (3,8))\n dc_a = dc.reshape(self.dc_a, (3,8))\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf2D_3 (self):\n np_a = np.reshape(self.np_a, (12,2))\n dc_a = dc.reshape(self.dc_a, (12,2))\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf3D_1 (self):\n np_a = np.reshape(self.np_a, (2,4,3))\n dc_a = dc.reshape(self.dc_a, (2,4,3))\n\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf3D_2 (self):\n np_a = np.reshape(self.np_a, (2,2,6))\n dc_a = dc.reshape(self.dc_a, (2,2,6))\n\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf3D_3 (self):\n np_a = np.reshape(self.np_a, (4,2,3))\n dc_a = dc.reshape(self.dc_a, (4,2,3))\n\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf3D_4 (self):\n np_a = np.reshape(self.np_a, (4,2,3))\n dc_a = dc.reshape(self.dc_a, (4,2,3))\n self.detect_positive = 1\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf3D_5 (self):\n np_a = np.reshape(self.np_a, (4,2,3))\n dc_a = dc.reshape(self.dc_a, (4,2,3))\n self.detect_positive = 1\n self.detect_negative = 0\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf4D_1 (self):\n np_a = np.reshape(self.np_a, (2,2,2,3))\n dc_a = dc.reshape(self.dc_a, (2,2,2,3))\n\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf4D_2 (self):\n np_a = np.reshape(self.np_a, (2,2,1,6))\n dc_a = dc.reshape(self.dc_a, (2,2,1,6))\n\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def test_IsInf4D_3 (self):\n np_a = np.reshape(self.np_a, (2,2,2,3))\n dc_a = dc.reshape(self.dc_a, (2,2,2,3))\n self.detect_positive = 1\n self.detect_negative = 0\n npr = Isinf(np_a,self.detect_positive,self.detect_negative)\n dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)\n\n np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))\n\n def tearDown(self):\n return \"test finished\"\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.reshape",
"numpy.less",
"numpy.random.randn"
],
[
"numpy.reshape",
"numpy.max",
"numpy.random.randn",
"numpy.exp",
"numpy.sum"
],
[
"numpy.random.choice",
"numpy.reshape",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ksboy/superglue | [
"12b5bf6d729ba5b95b8a29682f6bfa584131ae9c"
] | [
"run_classifier.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport random\nfrom tqdm import tqdm, trange\n\nimport numpy as np\nfrom scipy.special import softmax\n# from sklearn.utils.extmath import softmax\n\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom tensorboardX import SummaryWriter\n\nfrom pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME\nfrom pytorch_pretrained_bert.modeling import BertForSequenceClassification\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule\n\nfrom run_classifier_dataset_utils import processors, output_modes, convert_examples_to_features, compute_metrics\n\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelse:\n import pickle\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n \"bert-base-multilingual-cased, bert-base-chinese.\")\n parser.add_argument(\"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--loss_weight\",\n default=None,\n type=str,\n help=\"The Loss Weight.\")\n parser.add_argument(\"--pop_classifier_layer\",\n action='store_true',\n help=\"pop classifier layer\")\n parser.add_argument(\"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_predict\",\n action='store_true',\n help=\"Whether to run predict on the test set.\") \n parser.add_argument(\"--do_lower_case\",\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--predict_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for predict.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--overwrite_output_dir',\n action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--server_ip', type=str, default='', help=\"Can be used for distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"Can be used for distant debugging.\")\n args = parser.parse_args()\n\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n args.device = device\n\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval and not args.do_predict:\n raise ValueError(\"At least one of `do_train`, `do_eval` or `do_predict` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n task_name = args.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n output_mode = output_modes[task_name]\n\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n print(\"pop_classifier_layer\", args.pop_classifier_layer)\n model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels, pop_classifier_layer=args.pop_classifier_layer)\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n \n print(\"loss_weight\", args.loss_weight)\n\n global_step = 0\n nb_tr_steps = 0\n tr_loss = 0\n\n if args.do_train:\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n # Prepare data loader\n train_examples = processor.get_train_examples(args.data_dir)\n cached_train_features_file = os.path.join(args.data_dir, 'train_{0}_{1}_{2}'.format(\n list(filter(None, args.bert_model.split('/'))).pop(),\n str(args.max_seq_length),\n str(task_name)))\n try:\n with open(cached_train_features_file, \"rb\") as reader:\n train_features = pickle.load(reader)\n except:\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n if args.local_rank == -1 or torch.distributed.get_rank() == 0:\n logger.info(\" Saving train features into cached file %s\", cached_train_features_file)\n with open(cached_train_features_file, \"wb\") as writer:\n pickle.dump(train_features, writer)\n\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)\n\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, label_ids = batch\n\n # define a new function to compute loss values for both output_modes\n logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)\n # print(input_ids)\n # print(logits)\n # print(label_ids)\n\n if output_mode == \"classification\":\n if args.loss_weight == None:\n loss_fct = CrossEntropyLoss()\n else:\n loss_weight= [int(_) for _ in args.loss_weight.split(\",\")]\n loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())\n loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n elif output_mode == \"regression\":\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), label_ids.view(-1))\n\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n if args.local_rank in [-1, 0]:\n tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)\n tb_writer.add_scalar('loss', loss.item(), global_step)\n\n ### Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n ### Example:\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Save a trained model, configuration and tokenizer\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)\n tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n\n # Good practice: save your training arguments together with the trained model\n output_args_file = os.path.join(args.output_dir, 'training_args.bin')\n torch.save(args, output_args_file)\n else:\n model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)\n\n model.to(device)\n\n ### Evaluation\n if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n eval_examples = processor.get_dev_examples(args.data_dir)\n cached_eval_features_file = os.path.join(args.data_dir, 'dev_{0}_{1}_{2}'.format(\n list(filter(None, args.bert_model.split('/'))).pop(),\n str(args.max_seq_length),\n str(task_name)))\n try:\n with open(cached_eval_features_file, \"rb\") as reader:\n eval_features = pickle.load(reader)\n except:\n eval_features = convert_examples_to_features(\n eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n if args.local_rank == -1 or torch.distributed.get_rank() == 0:\n logger.info(\" Saving eval features into cached file %s\", cached_eval_features_file)\n with open(cached_eval_features_file, \"wb\") as writer:\n pickle.dump(eval_features, writer)\n\n\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)\n\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n # Run prediction for full data\n if args.local_rank == -1:\n eval_sampler = SequentialSampler(eval_data)\n else:\n eval_sampler = DistributedSampler(eval_data) # Note that this sampler samples randomly\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval()\n eval_loss = 0\n nb_eval_steps = 0\n preds = []\n out_label_ids = None\n for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n \n with torch.no_grad():\n logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)\n \n print(logits )\n print(label_ids)\n print(logits.view(-1, num_labels), label_ids.view(-1))\n # create eval loss and other metric required by the task\n if output_mode == \"classification\":\n if args.loss_weight == None:\n loss_fct = CrossEntropyLoss()\n else:\n loss_weight= [int(_) for _ in args.loss_weight.split(\",\")]\n loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())\n tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n elif output_mode == \"regression\":\n loss_fct = MSELoss()\n tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))\n \n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if len(preds) == 0:\n preds.append(logits.detach().cpu().numpy())\n out_label_ids = label_ids.detach().cpu().numpy()\n else:\n preds[0] = np.append(\n preds[0], logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(\n out_label_ids, label_ids.detach().cpu().numpy(), axis=0)\n \n eval_loss = eval_loss / nb_eval_steps\n preds = preds[0]\n print(preds)\n\n def swap_value(a):\n temp=a[0];a[0]=a[1];a[1]=temp\n if task_name == \"copa\":\n preds = softmax(preds,axis=1)\n print(preds)\n for i in range(int(len(preds)/2)):\n if preds[2*i][0]>=preds[2*i+1][0]:\n if preds[2*i][0]<preds[2*i][1]:\n # print(preds[2*i][0], preds[2*i][1])\n swap_value(preds[2*i])\n # print(preds[2*i][0], preds[2*i][1])\n if preds[2*i+1][0]>preds[2*i+1][1]:\n swap_value(preds[2*i+1])\n else:\n if preds[2*i][0]>preds[2*i][1]:\n swap_value(preds[2*i])\n if preds[2*i+1][0]<preds[2*i+1][1]:\n swap_value(preds[2*i+1])\n print(preds)\n if output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif output_mode == \"regression\":\n preds = np.squeeze(preds)\n\n print(preds,out_label_ids)\n result = compute_metrics(task_name, preds, out_label_ids)\n\n loss = tr_loss/global_step if args.do_train else None\n\n result['eval_loss'] = eval_loss\n result['global_step'] = global_step\n result['loss'] = loss\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n \n ### Prediction\n if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n predict_examples = processor.get_test_examples(args.data_dir)\n cached_predict_features_file = os.path.join(args.data_dir, 'predict_{0}_{1}_{2}'.format(\n list(filter(None, args.bert_model.split('/'))).pop(),\n str(args.max_seq_length),\n str(task_name)))\n try:\n with open(cached_predict_features_file, \"rb\") as reader:\n predict_features = pickle.load(reader)\n except:\n predict_features = convert_examples_to_features(\n predict_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n if args.local_rank == -1 or torch.distributed.get_rank() == 0:\n logger.info(\" Saving predict features into cached file %s\", cached_predict_features_file)\n with open(cached_predict_features_file, \"wb\") as writer:\n pickle.dump(predict_features, writer)\n\n\n logger.info(\"***** Running prediction *****\")\n logger.info(\" Num examples = %d\", len(predict_examples))\n logger.info(\" Batch size = %d\", args.predict_batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in predict_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in predict_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in predict_features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.float)\n\n predict_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n # Run prediction for full data\n if args.local_rank == -1:\n predict_sampler = SequentialSampler(predict_data)\n else:\n predict_sampler = DistributedSampler(predict_data) # Note that this sampler samples randomly\n predict_dataloader = DataLoader(predict_data, sampler=predict_sampler, batch_size=args.predict_batch_size)\n\n model.eval()\n # predict_loss = 0\n # nb_predict_steps = 0\n preds = []\n out_label_ids = None\n for input_ids, input_mask, segment_ids, label_ids in tqdm(predict_dataloader, desc=\"predicting\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n \n with torch.no_grad():\n logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)\n \n print(logits )\n print(label_ids)\n # create eval loss and other metric required by the task\n # if output_mode == \"classification\":\n # loss_fct = CrossEntropyLoss()\n # tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n # elif output_mode == \"regression\":\n # loss_fct = MSELoss()\n # tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))\n # \n # eval_loss += tmp_eval_loss.mean().item()\n # nb_predict_steps += 1\n if len(preds) == 0:\n preds.append(logits.detach().cpu().numpy())\n # out_label_ids = label_ids.detach().cpu().numpy()\n else:\n preds[0] = np.append(\n preds[0], logits.detach().cpu().numpy(), axis=0)\n # out_label_ids = np.append(\n # out_label_ids, label_ids.detach().cpu().numpy(), axis=0)\n # \n # eval_loss = eval_loss / nb_eval_steps\n\n preds = preds[0]\n print(preds)\n\n if task_name == \"copa\":\n preds = softmax(preds,axis=1)\n print(preds)\n results=[]\n for i in range(int(len(preds)/2)):\n if preds[2*i][0]>=preds[2*i+1][0]:\n results.append(0)\n else:\n results.append(1)\n preds= results\n label_map = {i : i for i in range(2)}\n else:\n if output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif output_mode == \"regression\":\n preds = np.squeeze(preds)\n label_map = {i : label for i, label in enumerate(label_list)}\n\n print(preds)\n\n # result = compute_metrics(task_name, preds, out_label_ids)\n\n # loss = tr_loss/global_step if args.do_train else None\n\n # result['eval_loss'] = eval_loss\n # result['global_step'] = global_step\n # result['loss'] = loss\n\n output_predict_file = os.path.join(args.output_dir, \"predict_results.txt\")\n with open(output_predict_file, \"w\") as writer:\n logger.info(\"***** Predict results *****\")\n for i in range(len(preds)):\n label_i = label_map[preds[i]]\n # json_i= \"\\\"idx: %d, \\\"label\\\": \\\"label_i\\\"\"\n writer.write(\"{\\\"idx\\\": %d, \\\"label\\\": \\\"%s\\\"}\\n\"%(i,label_i))\n # for key in sorted(result.keys()):\n # logger.info(\" %s = %s\", key, str(result[key]))\n # writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n \n \n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.squeeze",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"scipy.special.softmax",
"torch.save",
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.TensorDataset",
"torch.distributed.barrier",
"torch.tensor",
"numpy.argmax",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.set_device",
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
AghaSaad04/mlops-v2 | [
"d312ae108c93bacfb3541968bb913874af060ab2"
] | [
"sales_forecast/scoring/score.py"
] | [
"import numpy\r\nimport os\r\nimport math\r\nfrom azureml.core.model import Model\r\nfrom azureml.core.dataset import Dataset\r\nfrom inference_schema.schema_decorators \\\r\n import input_schema, output_schema\r\nfrom inference_schema.parameter_types.numpy_parameter_type \\\r\n import NumpyParameterType\r\nimport keras\r\nfrom keras.models import load_model\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom azureml.core.run import Run\r\nfrom azureml.core import Dataset, Datastore, Workspace\r\nimport argparse\r\nimport json\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom azureml.core.authentication import ServicePrincipalAuthentication\r\n# from azureml.core.authentication import InteractiveLoginAuthentication\r\n\r\ndef tts(data):\r\n data['date'] = pd.to_datetime(data['date'])\r\n data['date'] = (data['date'] - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\r\n (train, test) = data[0:-2000].values, data[-2000:].values\r\n return (train, test)\r\n\r\ndef scale_data(train_set, test_set):\r\n # apply Min Max Scaler\r\n scaler = MinMaxScaler(feature_range=(-1, 1))\r\n scaler = scaler.fit(train_set[:, :4])\r\n\r\n # reshape training set\r\n train_set = train_set.reshape(train_set.shape[0], train_set.shape[1])\r\n train_set_scaled = scaler.transform(train_set[:, :4])\r\n\r\n # reshape test set\r\n test_set = test_set.reshape(test_set.shape[0], test_set.shape[1])\r\n test_set_scaled = scaler.transform(test_set[:, :4])\r\n\r\n X_train, y_train = train_set[:, :4], train_set[:, 4:].ravel()\r\n X_test, y_test = test_set[:, :4], test_set[:, 4:].ravel()\r\n\r\n return X_train, y_train, X_test, y_test, scaler\r\n\r\ndef init():\r\n # load the model from file into a global object\r\n global model\r\n\r\n model_path = Model.get_model_path(\r\n os.getenv(\"AZUREML_MODEL_DIR\").split('/')[-2])\r\n\r\n print (\"model path\", model_path)\r\n\r\n # try:\r\n # print (\"try\")\r\n # dataset = pd.read_csv('/var/azureml-app/train.csv')\r\n # original_df = dataset.to_pandas_dataframe()\r\n # except:\r\n # print (\"except\")\r\n # train_dataset = original_df.to_csv('train.csv', index=False) \r\n \r\n # interactive_auth = InteractiveLoginAuthentication(tenant_id=\"def44f5f-0783-4b05-8f2f-dd615c5dfec4\")\r\n # ws = Workspace(subscription_id=\"6542067a-127a-43ff-b7f2-007fe21a37f0\",\r\n # resource_group=\"sales-mlops-rg\",\r\n # workspace_name=\"sales-mlops-ws\",\r\n # auth=interactive_auth)\r\n # ws.get_details()\r\n\r\n \r\n \r\n # print(original_df)\r\n\r\n model = keras.models.load_model(model_path)\r\n print(\"Current directory:\", os.getcwd())\r\n print(\"Model is loaded\")\r\n\r\n# date = '6/25/2020'\r\n# store = 3\r\n# item = 105\r\n# price = 990\r\n# date = pd.to_datetime(date)\r\n# date = (date - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\r\n\r\n# input_sample = numpy.array([[date, store, item, price]])\r\n# output_sample = numpy.array([4])\r\n\r\ninput_sample = numpy.array([[1591833600,34,759,690]])\r\noutput_sample = numpy.array([10])\r\n\r\n@input_schema('data', NumpyParameterType(input_sample))\r\n@output_schema(NumpyParameterType(output_sample))\r\n\r\n\r\ndef run(data, request_headers):\r\n global original_df\r\n sp = ServicePrincipalAuthentication(tenant_id=\"def44f5f-0783-4b05-8f2f-dd615c5dfec4\", service_principal_id=\"add8f304-2d88-45e3-94fa-ac6cf335d5df\", service_principal_password=\"If2-.7Wlno57NW6v9~nE~xNIj~naD-DL5f\") \r\n ws = Workspace.get(name=\"sales-mlops-ws\", auth = sp, subscription_id=\"6542067a-127a-43ff-b7f2-007fe21a37f0\")\r\n ws.get_details()\r\n dataset = ws.datasets['salesforecast_ds'] \r\n original_df = dataset.to_pandas_dataframe()\r\n # date = '6/25/2020'\r\n # store = 34\r\n # item = 759\r\n # price = 690\r\n # date = pd.to_datetime(date)\r\n # date = (date - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\r\n date = data[0][0]\r\n prev_sales = []\r\n (train, test) = tts(original_df)\r\n X_train, y_train, X_test, y_test, scaler_object = scale_data(train, test)\r\n first_date = original_df[\"date\"][0]\r\n for x in original_df.index:\r\n last_date = original_df[\"date\"][x]\r\n\r\n print(\"last date\", last_date)\r\n\r\n days_diff = (int(date) - int(last_date)) / (60 * 60 * 24)\r\n total_data_days = (int(last_date) - int(first_date)) / (60 * 60 * 24)\r\n\r\n print(\"days:\", days_diff)\r\n print(\"total_data_days:\", total_data_days)\r\n\r\n for i in original_df.index:\r\n if (original_df[\"item\"][i] == data[0][2] and original_df[\"store\"][i] == data[0][1]):\r\n prev_sales.append(original_df[\"sales\"][i])\r\n \r\n prev_sales_avg = 0\r\n prev_sales_avg = (sum(prev_sales)) / total_data_days\r\n\r\n forecast_result_array = []\r\n test_set = data\r\n test_set_scaled = scaler_object.transform(test_set)\r\n X_test = test_set_scaled[:, :4]\r\n X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])\r\n\r\n y_pred = model.predict(X_test)\r\n print(\"y_pred:\",y_pred)\r\n result = y_pred[0][0][0]\r\n result = round(result)\r\n print(\"result:\",result)\r\n prev_sales_avg = round (prev_sales_avg)\r\n next_day_prediction = math.ceil(result + prev_sales_avg)\r\n prev_sales.append(next_day_prediction)\r\n forecast_result_array.append(next_day_prediction)\r\n\r\n if days_diff > 1:\r\n for day in range(round(days_diff)):\r\n total_data_days += 1\r\n prev_sales_avg = sum(prev_sales) / total_data_days \r\n prev_sales_avg = round(prev_sales_avg)\r\n prev_sales.append(prev_sales_avg)\r\n forecast_result_array.append(prev_sales_avg)\r\n\r\n\r\n\r\n end_result = sum(forecast_result_array)\r\n print(\"end result: \", end_result)\r\n\r\n print(('{{\"RequestId\":\"{0}\", '\r\n '\"TraceParent\":\"{1}\", '\r\n '\"NumberOfPredictions\":{2}}}'\r\n ).format(\r\n request_headers.get(\"X-Ms-Request-Id\", \"\"),\r\n request_headers.get(\"Traceparent\", \"\"),\r\n end_result\r\n ))\r\n\r\n return {\"result\": end_result}\r\n\r\nif __name__ == \"__main__\":\r\n init()\r\n # date ='6/25/2020'\r\n # store = 34\r\n # item = 759\r\n # price = 690\r\n # date = pd.to_datetime(date)\r\n # date = (date - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\r\n test = numpy.array([[date, store, item, price]])\r\n #print(\"test:\",test)\r\n #test =numpy.array([[1591833600,34,759,690]])\r\n prediction = run(test, {}) \r\n print(\"Test result: \", prediction)\r\n"
] | [
[
"pandas.to_datetime",
"pandas.Timestamp",
"pandas.Timedelta",
"numpy.array",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
victorchen276/CarND-Advanced-Lane-Lines | [
"436d81150107c181e3f328adfd3f1c31d6a5cb15"
] | [
"source/Project.py"
] | [
"\nfrom source.camera import camera\nfrom source.LaneDetect import LaneDetect\n\nfrom moviepy.editor import VideoFileClip\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n#\n# def process_video(input_video_file):\n# clip1 = VideoFileClip(input_video_file);\n# outputclip = clip1.fl_image(process_vid)\n# outputclip.write_videofile('output_'+input_video_file, audio=False);\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\n\n\nif __name__ == \"__main__\":\n print('main')\n\n # images = glob.glob('../camera_cal/calibration*.jpg')\n # print(images)\n\n camera = camera()\n # camera.calibration(images, x_cor=9, y_cor=6, outputfilename='./camera_calibration_data_1')\n camera.load_calibration_data('./camera_calibration_data.p')\n\n\n # # images = sorted(images, key=lambda x: float(re.findall(\"(\\d+)\", x)[0]))\n #\n # print('Correction images (successfully detected corners):')\n # plt.figure(figsize=(11.5, 9))\n # gridspec.GridSpec(6, 3)\n # # Step through the list and search for chessboard corners\n # for i, image in enumerate(camera_calibrate.calibration_images_success):\n # plt.subplot2grid((6, 3), (i // 3, i % 3), colspan=1, rowspan=1)\n # plt.imshow(image)\n # plt.axis('off')\n # plt.show()\n #\n # plt.figure(figsize=(12, 4))\n # plt.figtext(.5, .8, 'Images in which cv2 failed to find desired corners', fontsize=22, ha='center')\n # for i, p in enumerate(camera_calibrate.calibration_images_fail):\n # plt.subplot(1, 3, i + 1)\n # plt.imshow(mpimg.imread(p)) # draw the first image of each class\n # plt.title(p)\n # plt.axis('off')\n # plt.tight_layout(pad=0, h_pad=0, w_pad=0)\n # plt.show()\n # plt.savefig('fail.png')\n\n # camera_calibrate.load_calibration_data('./camera_calibration_data.p')\n\n # orig_img = mpimg.imread('../test_images/test1.jpg')\n # undist_img = camera_calibrate.undistort(orig_img)\n # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))\n # ax1.imshow(orig_img)\n # ax1.set_title('Original', fontsize=20)\n # ax2.imshow(undist_img)\n # ax2.set_title('Undistorted', fontsize=20)\n # # plt.show()\n # plt.savefig('undistort2.png')\n\n # Perspective transform\n # for image in glob.glob('../test_images/*.jpg'):\n # orig_img = cv2.imread(image)\n # birdeye_img, _ = camera.birds_eye(orig_img)\n # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))\n # f.tight_layout()\n # ax1.imshow(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))\n # ax1.set_title('Original', fontsize=20)\n # ax2.imshow(cv2.cvtColor(birdeye_img, cv2.COLOR_BGR2RGB))\n # ax2.set_title('Undistorted and Warped Image', fontsize=20)\n # plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n # plt.show()\n # # plt.savefig('../output_images/warp_' + str(i) + '.png')\n\n\n\n # # edege\n # image = mpimg.imread('../test_images/test6.jpg')\n # lane_detecter = LaneDetect()\n # result = lane_detecter.get_edges(image)\n #\n # # Plot the result\n # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\n # # f.tight_layout()\n # ax1.axis('off')\n # ax1.imshow(image)\n # ax1.set_title('Original', fontsize=18)\n # ax2.axis('off')\n # ax2.set_title('Edge', fontsize=18)\n #\n #\n # ax2.imshow(result, cmap='gray')\n # plt.show()\n # plt.savefig('edge.png')\n\n # # Detect Lane line\n # for image_name in glob.glob('../test_images/*.jpg'):\n # orig_img = mpimg.imread(image_name)\n #\n # lane_detecter = LaneDetect()\n # lane_detecter.initcamera()\n # lane_detecter.initlines(orig_img)\n # output_img = lane_detecter.process_pipeline(orig_img)\n # f, (ax1) = plt.subplots(1, 1, figsize=(9, 6))\n # ax1.imshow(output_img)\n # ax1.set_title('output_img', fontsize=20)\n # plt.axis('off')\n # plt.show()\n # break\n\n # Applying pipeline to video\n clip1 = VideoFileClip('../project_video.mp4')\n lane_detecter = LaneDetect()\n lane_detecter.initcamera()\n lane_detecter.initlines(clip1.get_frame(0))\n outputclip = clip1.fl_image(lane_detecter.process_pipeline)\n outputclip.write_videofile('../output_videos/output_project_video.mp4', audio=False)\n #\n # clip1 = VideoFileClip('../harder_challenge_video.mp4');\n # lane_detecter = LaneDetect(clip1.get_frame(0))\n # outputclip = clip1.fl_image(lane_detecter.process_pipeline)\n # outputclip.write_videofile('../output_harder_challenge_video.mp4', audio=False)\n #\n # clip1 = VideoFileClip('../challenge_video.mp4')\n # lane_detecter = LaneDetect(clip1.get_frame(0))\n # outputclip = clip1.fl_image(lane_detecter.process_pipeline)\n # outputclip.write_videofile('../output_challenge_video.mp4', audio=False)"
] | [
[
"numpy.dot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lee-Ft/RHA | [
"8a832a9afebc9204148bbd340c31e26c83138024"
] | [
"model/stage.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport pprint\nfrom collections import defaultdict\nfrom .context_query_attention import StructuredAttention\nfrom .encoder import StackedEncoder\nfrom .cnn import DepthwiseSeparableConv\nfrom .model_utils import save_pickle, mask_logits, flat_list_of_lists, \\\n find_max_triples, get_high_iou_sapns, expand_span\n\n\nclass LinearWrapper(nn.Module):\n \"\"\"1D conv layer\"\"\"\n def __init__(self, in_hsz, out_hsz, layer_norm=True, dropout=0.1, relu=True):\n super(LinearWrapper, self).__init__()\n self.relu = relu\n layers = [nn.LayerNorm(in_hsz)] if layer_norm else []\n layers += [\n nn.Dropout(dropout),\n nn.Linear(in_hsz, out_hsz)\n ]\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"(N, L, D)\"\"\"\n if self.relu:\n return F.relu(self.conv(x), inplace=True) # (N, L, D)\n else:\n return self.conv(x) # (N, L, D)\n\n\nclass ConvLinear(nn.Module):\n \"\"\"1D conv layer\"\"\"\n def __init__(self, in_hsz, out_hsz, kernel_size=3, layer_norm=True, dropout=0.1, relu=True):\n super(ConvLinear, self).__init__()\n layers = [nn.LayerNorm(in_hsz)] if layer_norm else []\n layers += [\n nn.Dropout(dropout),\n DepthwiseSeparableConv(in_ch=in_hsz,\n out_ch=out_hsz,\n k=kernel_size,\n dim=1,\n relu=relu)\n ]\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"(N, L, D)\"\"\"\n return self.conv(x) # (N, L, D)\n\n\nclass STAGE(nn.Module):\n def __init__(self, opt):\n super(STAGE, self).__init__()\n self.opt = opt\n self.inference_mode = False\n self.sub_flag = opt.sub_flag\n self.vfeat_flag = opt.vfeat_flag\n self.vfeat_size = opt.vfeat_size\n self.t_iter = opt.t_iter\n self.extra_span_length = opt.extra_span_length\n self.add_local = opt.add_local\n self.use_sup_att = opt.use_sup_att\n self.num_negatives = opt.num_negatives\n self.negative_pool_size = opt.negative_pool_size\n self.num_hard = opt.num_hard\n self.drop_topk = opt.drop_topk\n self.margin = opt.margin\n self.att_loss_type = opt.att_loss_type\n self.scale = opt.scale\n self.alpha = opt.alpha\n self.dropout = opt.dropout\n self.hsz = opt.hsz\n self.bsz = None\n self.num_seg = None\n self.num_a = 5\n self.flag_cnt = self.sub_flag + self.vfeat_flag\n\n self.wd_size = opt.embedding_size\n self.bridge_hsz = 300\n\n self.bert_word_encoding_fc = nn.Sequential(\n nn.LayerNorm(self.wd_size),\n nn.Dropout(self.dropout),\n nn.Linear(self.wd_size, self.bridge_hsz),\n nn.ReLU(True),\n nn.LayerNorm(self.bridge_hsz),\n )\n\n if self.sub_flag:\n print(\"Activate sub branch\")\n\n if self.vfeat_flag:\n print(\"Activate vid branch\")\n self.vid_fc = nn.Sequential(\n nn.LayerNorm(self.vfeat_size),\n nn.Dropout(self.dropout),\n nn.Linear(self.vfeat_size, self.bridge_hsz),\n nn.ReLU(True),\n nn.LayerNorm(self.bridge_hsz)\n )\n\n if self.flag_cnt == 2:\n self.concat_fc = nn.Sequential(\n nn.LayerNorm(3 * self.hsz),\n nn.Dropout(self.dropout),\n nn.Linear(3 * self.hsz, self.hsz),\n nn.ReLU(True),\n nn.LayerNorm(self.hsz),\n )\n\n self.input_embedding = nn.Sequential(\n nn.Dropout(self.dropout),\n nn.Linear(self.bridge_hsz, self.hsz),\n nn.ReLU(True),\n nn.LayerNorm(self.hsz),\n )\n\n self.input_encoder = StackedEncoder(n_blocks=opt.input_encoder_n_blocks,\n n_conv=opt.input_encoder_n_conv,\n kernel_size=opt.input_encoder_kernel_size,\n num_heads=opt.input_encoder_n_heads,\n hidden_size=self.hsz,\n dropout=self.dropout)\n\n self.str_attn = StructuredAttention(dropout=self.dropout,\n scale=opt.scale,\n add_void=opt.add_non_visual) # no parameters inside\n\n self.c2q_down_projection = nn.Sequential(\n nn.LayerNorm(3 * self.hsz),\n nn.Dropout(self.dropout),\n nn.Linear(3*self.hsz, self.hsz),\n nn.ReLU(True),\n )\n\n self.cls_encoder = StackedEncoder(n_blocks=opt.cls_encoder_n_blocks,\n n_conv=opt.cls_encoder_n_conv,\n kernel_size=opt.cls_encoder_kernel_size,\n num_heads=opt.cls_encoder_n_heads,\n hidden_size=self.hsz,\n dropout=self.dropout)\n\n self.cls_projection_layers = nn.ModuleList(\n [\n LinearWrapper(in_hsz=self.hsz,\n out_hsz=self.hsz,\n layer_norm=True,\n dropout=self.dropout,\n relu=True)\n ] +\n [\n ConvLinear(in_hsz=self.hsz,\n out_hsz=self.hsz,\n kernel_size=3,\n layer_norm=True,\n dropout=self.dropout,\n relu=True)\n for _ in range(self.t_iter)])\n\n self.temporal_scoring_st_layers = nn.ModuleList([\n LinearWrapper(in_hsz=self.hsz,\n out_hsz=1,\n layer_norm=True,\n dropout=self.dropout,\n relu=False)\n for _ in range(self.t_iter+1)])\n\n self.temporal_scoring_ed_layers = nn.ModuleList([\n LinearWrapper(in_hsz=self.hsz,\n out_hsz=1,\n layer_norm=True,\n dropout=self.dropout,\n relu=False)\n for _ in range(self.t_iter+1)])\n\n self.temporal_criterion = nn.CrossEntropyLoss(reduction=\"sum\")\n\n self.classifier = LinearWrapper(in_hsz=self.hsz * 2 if self.add_local else self.hsz,\n out_hsz=1,\n layer_norm=True,\n dropout=self.dropout,\n relu=False)\n\n def load_word_embedding(self, pretrained_embedding, requires_grad=False):\n self.word_embedding.weight.data.copy_(torch.from_numpy(pretrained_embedding))\n self.word_embedding.weight.requires_grad = requires_grad\n\n def forward(self, batch):\n if self.inference_mode:\n return self.forward_main(batch)\n else:\n out, att_loss, att_predictions, temporal_loss, temporal_predictions, other_outputs = self.forward_main(batch)\n return out, att_loss, att_predictions, temporal_loss, temporal_predictions\n\n def forward_main(self, batch):\n \"\"\"\n Args:\n batch: edict, keys = qas, qas_mask, qa_noun_masks, sub, sub_mask, vcpt, vcpt_mask, vid, vid_mask,\n att_labels, att_labels_mask, qid, target, vid_name, ts_label\n qas, qas_mask, qa_noun_masks: (N, 5, Lqa)\n sub, sub_mask: (N, #imgs, Ls)\n vcpt, vcpt_mask: (N, #imgs, #regions)\n vid, vid_mask: (N, #imgs, #regions, D), (N, #imgs, #regions)\n att_labels, att_labels_mask: A list of N (#imgs, #qa-words, #regions)\n qid: list(int)\n vid_name: list(str)\n target: torch.LongTensor\n use_hard_negatives: bool, true to sample hard negatives\n q_l: int, length of the tokenized question\n anno_st_idx (list of int): each element is an index (at 0.5fps) of the first image\n with spatial annotation.\n ts_label: {\"st\": (N, ), \"ed\": (N, )} for 'st_ed'. (N, L) for 'frm'\n ts_label_mask: (N, L) for both 'st_ed' and 'frm'\n Returns:\n \"\"\"\n self.bsz = len(batch.qid)\n bsz = self.bsz\n num_a = self.num_a\n hsz = self.hsz\n\n a_embed = self.base_encoder(batch.qas_bert.view(bsz*num_a, -1, self.wd_size), # (N*5, L, D)\n batch.qas_mask.view(bsz * num_a, -1), # (N*5, L)\n self.bert_word_encoding_fc,\n self.input_embedding,\n self.input_encoder) # (N*5, L, D)\n a_embed = a_embed.view(bsz, num_a, 1, -1, hsz) # (N, 5, 1, L, D)\n a_mask = batch.qas_mask.view(bsz, num_a, 1, -1) # (N, 5, 1, L)\n\n attended_sub, attended_vid, attended_vid_mask, attended_sub_mask = (None, ) * 4\n other_outputs = {} # {\"pos_noun_mask\": batch.qa_noun_masks} # used to visualization and compute att acc\n if self.sub_flag:\n num_imgs, num_words = batch.sub_bert.shape[1:3]\n sub_embed = self.base_encoder(batch.sub_bert.view(bsz*num_imgs, num_words, -1), # (N*Li, Lw)\n batch.sub_mask.view(bsz * num_imgs, num_words), # (N*Li, Lw)\n self.bert_word_encoding_fc,\n self.input_embedding,\n self.input_encoder) # (N*Li, Lw, D)\n\n sub_embed = sub_embed.contiguous().view(bsz, 1, num_imgs, num_words, -1) # (N, Li, Lw, D)\n sub_mask = batch.sub_mask.view(bsz, 1, num_imgs, num_words) # (N, 1, Li, Lw)\n\n attended_sub, attended_sub_mask, sub_raw_s, sub_normalized_s = \\\n self.qa_ctx_attention(a_embed, sub_embed, a_mask, sub_mask,\n noun_mask=None,\n non_visual_vectors=None)\n\n other_outputs[\"sub_normalized_s\"] = sub_normalized_s\n other_outputs[\"sub_raw_s\"] = sub_raw_s\n\n if self.vfeat_flag:\n num_imgs, num_regions = batch.vid.shape[1:3]\n vid_embed = F.normalize(batch.vid, p=2, dim=-1) # (N, Li, Lr, D)\n\n vid_embed = self.base_encoder(vid_embed.view(bsz*num_imgs, num_regions, -1), # (N*Li, Lw)\n batch.vid_mask.view(bsz * num_imgs, num_regions), # (N*Li, Lr)\n self.vid_fc,\n self.input_embedding,\n self.input_encoder) # (N*Li, L, D)\n\n vid_embed = vid_embed.contiguous().view(bsz, 1, num_imgs, num_regions, -1) # (N, 1, Li, Lr, D)\n vid_mask = batch.vid_mask.view(bsz, 1, num_imgs, num_regions) # (N, 1, Li, Lr)\n\n attended_vid, attended_vid_mask, vid_raw_s, vid_normalized_s = \\\n self.qa_ctx_attention(a_embed, vid_embed, a_mask, vid_mask,\n noun_mask=None,\n non_visual_vectors=None)\n\n other_outputs[\"vid_normalized_s\"] = vid_normalized_s\n other_outputs[\"vid_raw_s\"] = vid_raw_s\n\n if self.flag_cnt == 2:\n visual_text_embedding = torch.cat([attended_sub,\n attended_vid,\n attended_sub * attended_vid], dim=-1) # (N, 5, Li, Lqa, 3D)\n visual_text_embedding = self.concat_fc(visual_text_embedding) # (N, 5, Li, Lqa, D)\n out, target, t_scores = self.classfier_head_multi_proposal(\n visual_text_embedding, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,\n extra_span_length=self.extra_span_length)\n elif self.sub_flag:\n out, target, t_scores = self.classfier_head_multi_proposal(\n attended_sub, attended_sub_mask, batch.target, batch.ts_label, batch.ts_label_mask,\n extra_span_length=self.extra_span_length)\n elif self.vfeat_flag:\n out, target, t_scores = self.classfier_head_multi_proposal(\n attended_vid, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,\n extra_span_length=self.extra_span_length)\n else:\n raise NotImplementedError\n assert len(out) == len(target)\n\n other_outputs[\"temporal_scores\"] = t_scores # (N, 5, Li) or (N, 5, Li, 2)\n\n if self.inference_mode:\n inference_outputs = {\n \"answer\": out, # (N, 5)\n \"t_scores\": F.softmax(t_scores, dim=2),\n \"att_predictions\": self.get_att_prediction(\n scores=other_outputs[\"vid_raw_s\"],\n object_vocab=batch.eval_object_word_ids,\n words=batch.qas,\n vid_names=batch.vid_name,\n qids=batch.qid,\n img_indices=batch.image_indices,\n boxes=batch.boxes,\n start_indices=batch.anno_st_idx,\n ) if self.vfeat_flag else None,\n }\n return inference_outputs\n\n att_loss = 0\n att_predictions = None\n # if (self.use_sup_att or not self.training) and self.vfeat_flag:\n if self.use_sup_att and self.training and self.vfeat_flag:\n start_indices = batch.anno_st_idx\n try:\n cur_att_loss, cur_att_predictions = \\\n self.get_att_loss(other_outputs[\"vid_raw_s\"], batch.att_labels, batch.target, batch.qas,\n qids=batch.qid,\n q_lens=batch.q_l,\n vid_names=batch.vid_name,\n img_indices=batch.image_indices,\n boxes=batch.boxes,\n start_indices=start_indices,\n num_negatives=self.num_negatives,\n use_hard_negatives=batch.use_hard_negatives,\n drop_topk=self.drop_topk)\n except AssertionError as e:\n save_pickle(\n {\"batch\": batch, \"start_indices\": start_indices, \"vid_raw_s\": other_outputs[\"vid_raw_s\"]},\n \"err_dict.pickle\"\n )\n import sys\n sys.exit(1)\n att_loss += cur_att_loss\n att_predictions = cur_att_predictions\n\n temporal_loss = self.get_ts_loss(temporal_scores=t_scores,\n ts_labels=batch.ts_label,\n answer_indices=batch.target)\n\n if self.training:\n return [out, target], att_loss, att_predictions, temporal_loss, t_scores, other_outputs\n else:\n return out, att_loss, att_predictions, temporal_loss, F.softmax(t_scores, dim=2), other_outputs\n\n @classmethod\n def base_encoder(cls, data, data_mask, init_encoder, downsize_encoder, input_encoder):\n \"\"\" Raw data --> higher-level embedding\n Args:\n data: (N, L) for text, (N, L, D) for video\n data_mask: (N, L)\n init_encoder: word_embedding layer for text, MLP (downsize) for video\n downsize_encoder: MLP, down project to hsz\n input_encoder: multiple layer of encoder block, with residual connection, CNN, layernorm, etc\n Returns:\n encoded_data: (N, L, D)\n \"\"\"\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data, data_mask)\n\n def qa_ctx_attention(self, qa_embed, ctx_embed, qa_mask, ctx_mask, noun_mask, non_visual_vectors):\n \"\"\" Align image regions with QA words\n Args:\n qa_embed: (N, 5, 1, Lqa, D)\n qa_mask: (N, 5, 1, Lqa)\n ctx_embed: (N, 1, Li, Lr, D)\n ctx_mask: (N, 1, Li, Lr)\n noun_mask: (N, 5, Lqa)\n non_visual_vectors: (m, D), m is a tunable parameter\n Returns:\n \"\"\"\n num_img, num_region = ctx_mask.shape[2:]\n\n u_a, raw_s, s_mask, s_normalized = self.str_attn(\n qa_embed, ctx_embed, qa_mask, ctx_mask,\n noun_mask=noun_mask, void_vector=non_visual_vectors) # (N, 5, Li, Lqa, D), (N, 5, Li, Lqa, lr) x2\n qa_embed = qa_embed.repeat(1, 1, num_img, 1, 1)\n mixed = torch.cat([qa_embed,\n u_a,\n qa_embed*u_a], dim=-1) # (N, 5, Li, Lqa, D)\n mixed = self.c2q_down_projection(mixed) # (N, 5, Li, Lqa, D)\n mixed_mask = (s_mask.sum(-1) != 0).float() # (N, 5, Li, Lqa)\n return mixed, mixed_mask, raw_s, s_normalized\n\n def get_proposals(self, max_statement, max_statement_mask, temporal_scores,\n targets, ts_labels, max_num_proposal=1, iou_thd=0.5, ce_prob_thd=0.01,\n extra_span_length=3):\n \"\"\"\n Args:\n max_statement: (N, 5, Li, D)\n max_statement_mask: (N, 5, Li, 1)\n temporal_scores: (N, 5, Li, 2)\n targets: (N, )\n ts_labels: (N, Li) for frm or N * (st, ed) for st_ed\n max_num_proposal:\n iou_thd:\n ce_prob_thd:\n extra_span_length:\n Returns:\n\n \"\"\"\n bsz, num_a, num_img, _ = max_statement_mask.shape\n if self.training:\n ca_temporal_scores_st_ed = \\\n temporal_scores[torch.arange(bsz, dtype=torch.long), targets].data # (N, Li, 2)\n ca_temporal_scores_st_ed = F.softmax(ca_temporal_scores_st_ed, dim=1) # (N, Li, 2)\n ca_pred_spans = find_max_triples(ca_temporal_scores_st_ed[:, :, 0],\n ca_temporal_scores_st_ed[:, :, 1],\n topN=max_num_proposal,\n prob_thd=ce_prob_thd) # N * [(st_idx, ed_idx, confidence), ...]\n # +1 for ed index before forward into get_high_iou_spans func.\n ca_pred_spans = [[[sub_e[0], sub_e[1] + 1, sub_e[2]] for sub_e in e] for e in ca_pred_spans]\n spans = get_high_iou_sapns(zip(ts_labels[\"st\"].tolist(), (ts_labels[\"ed\"] + 1).tolist()),\n ca_pred_spans, iou_thd=iou_thd, add_gt=True) # N * [(st, ed), ...]\n local_max_max_statement_list = [] # N_new * (5, D)\n global_max_max_statement_list = [] # N_new * (5, D)\n span_targets = [] # N_new * (1,)\n for idx, (t, span_sublist) in enumerate(zip(targets, spans)):\n span_targets.extend([t] * len(span_sublist))\n cur_global_max_max_statement = \\\n torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 1)[0]\n global_max_max_statement_list.extend([cur_global_max_max_statement] * len(span_sublist))\n for span in span_sublist:\n span = expand_span(span, expand_length=extra_span_length)\n cur_span_max_statement = mask_logits(\n max_statement[idx, :, span[0]:span[1]],\n max_statement_mask[idx, :, span[0]:span[1]]) # (5, Li[st:ed], D)\n local_max_max_statement_list.append(torch.max(cur_span_max_statement, 1)[0]) # (5, D)\n local_max_max_statement = torch.stack(local_max_max_statement_list) # (N_new, 5, D)\n global_max_max_statement = torch.stack(global_max_max_statement_list) # (N_new, 5, D)\n max_max_statement = torch.cat([\n local_max_max_statement,\n global_max_max_statement], dim=-1) # (N_new, 5, 2D)\n return max_max_statement, targets.new_tensor(span_targets) # (N_new, 5, 2D), (N_new, )\n else: # testing\n temporal_scores_st_ed = F.softmax(temporal_scores, dim=2) # (N, 5, Li, 2)\n temporal_scores_st_ed_reshaped = temporal_scores_st_ed.view(bsz * num_a, -1, 2) # (N*5, Li, 2)\n pred_spans = find_max_triples(temporal_scores_st_ed_reshaped[:, :, 0],\n temporal_scores_st_ed_reshaped[:, :, 1],\n topN=1, prob_thd=None) # (N*5) * [(st, ed, confidence), ]\n pred_spans = flat_list_of_lists(pred_spans) # (N*5) * (st, ed, confidence)\n pred_spans = torch.FloatTensor(pred_spans).to(temporal_scores_st_ed_reshaped.device) # (N*5, 3)\n pred_spans, pred_scores = pred_spans[:, :2].long(), pred_spans[:, 2] # (N*5, 2), (N*5, )\n pred_spans = [[e[0], e[1] + 1] for e in pred_spans]\n max_statement = max_statement.view(bsz * num_a, num_img, -1) # (N*5, Li, D)\n max_statement_mask = max_statement_mask.view(bsz * num_a, num_img, -1) # (N*5, Li, 1)\n local_max_max_statement_list = [] # N*5 * (D, )\n global_max_max_statement_list = [] # N*5 * (D, )\n for idx, span in enumerate(pred_spans):\n span = expand_span(span, expand_length=extra_span_length)\n cur_global_max_max_statement = \\\n torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 0)[0]\n global_max_max_statement_list.append(cur_global_max_max_statement)\n cur_span_max_statement = mask_logits(\n max_statement[idx, span[0]:span[1]],\n max_statement_mask[idx, span[0]:span[1]]) # (Li[st:ed], D), words for span[0] == span[1]\n local_max_max_statement_list.append(torch.max(cur_span_max_statement, 0)[0]) # (D, )\n local_max_max_statement = torch.stack(local_max_max_statement_list) # (N*5, D)\n global_max_max_statement = torch.stack(global_max_max_statement_list) # (N*5, D)\n max_max_statement = torch.cat([\n local_max_max_statement,\n global_max_max_statement], dim=-1) # (N_new, 5, 2D)\n return max_max_statement.view(bsz, num_a, -1), targets # (N, 5, 2D), (N, )\n\n def residual_temporal_predictor(self, layer_idx, input_tensor):\n \"\"\"\n Args:\n layer_idx (int):\n input_tensor: (N, L, D)\n\n Returns:\n temporal_score\n \"\"\"\n input_tensor = input_tensor + self.cls_projection_layers[layer_idx](input_tensor) # (N, L, D)\n t_score_st = self.temporal_scoring_st_layers[layer_idx](input_tensor) # (N, L, 1)\n t_score_ed = self.temporal_scoring_ed_layers[layer_idx](input_tensor) # (N, L, 1)\n t_score = torch.cat([t_score_st, t_score_ed], dim=2) # (N, L, 2)\n return input_tensor, t_score\n\n def classfier_head_multi_proposal(self, statement, statement_mask, targets, ts_labels, ts_labels_mask,\n max_num_proposal=1, ce_prob_thd=0.01, iou_thd=0.5, extra_span_length=3):\n \"\"\"Predict the probabilities of each statements being true. Statements = QA + Context.\n Args:\n statement: (N, 5, Li, Lqa, D)\n statement_mask: (N, 5, Li, Lqa)\n targets: (N, )\n ts_labels: (N, Li) for frm or N * (st, ed) for st_ed\n ts_labels_mask: (N, Li)\n max_num_proposal (int):\n ce_prob_thd (float): threshold for p1*p2 (st, ed)\n iou_thd (float): threshold for temporal iou\n extra_span_length (int): expand the localized span to give a little bit extra context\n Returns:\n \"\"\"\n bsz, num_a, num_img, num_words = statement_mask.shape\n statement = statement.view(bsz*num_a*num_img, num_words, -1) # (N*5*Li, Lqa, D)\n statement_mask = statement_mask.view(bsz*num_a*num_img, num_words) # (N*5*Li, Lqa)\n statement = self.cls_encoder(statement, statement_mask) # (N*5*Li, Lqa, D)\n max_statement = torch.max(mask_logits(statement, statement_mask.unsqueeze(2)), 1)[0] # (N*5*Li, D)\n max_statement_mask = (statement_mask.sum(1) != 0).float().view(bsz, num_a, num_img, 1) # (N, 5, Li, 1)\n max_statement = max_statement.view(bsz*num_a, num_img, -1) # (N, 5, Li, D)\n\n t_score_container = []\n encoded_max_statement_container = []\n encoded_max_statement = max_statement # (N*5, Li, D)\n for layer_idx in range(self.t_iter+1):\n encoded_max_statement, prev_t_score = \\\n self.residual_temporal_predictor(layer_idx, encoded_max_statement)\n t_score_container.append(prev_t_score.view(bsz, num_a, num_img, 2)) # (N, 5, Li, 2)\n encoded_max_statement_container.append(encoded_max_statement) # (N*5, Li, D)\n if self.t_iter > 0:\n temporal_scores_st_ed = 0.5 * (t_score_container[0] + torch.stack(t_score_container[:1]).mean(0))\n else:\n temporal_scores_st_ed = t_score_container[0] # (N, 5, Li, 2)\n\n # mask before softmax\n temporal_scores_st_ed = mask_logits(temporal_scores_st_ed, ts_labels_mask.view(bsz, 1, num_img, 1))\n\n # when predict answer, only consider 1st level representation !!!\n # since the others are all generated from the 1st level\n stacked_max_statement = encoded_max_statement_container[0].view(bsz, num_a, num_img, -1) # (N, 5, Li, D)\n if self.add_local:\n max_max_statement, targets = self.get_proposals(\n stacked_max_statement, max_statement_mask, temporal_scores_st_ed,\n targets, ts_labels, max_num_proposal=max_num_proposal, iou_thd=iou_thd,\n ce_prob_thd=ce_prob_thd, extra_span_length=extra_span_length) # (N, 5, D)\n else:\n max_max_statement = \\\n torch.max(mask_logits(stacked_max_statement, max_statement_mask), 2)[0] # (N, 5, D)\n # targets = targets\n\n answer_scores = self.classifier(max_max_statement).squeeze(2) # (N, 5)\n return answer_scores, targets, temporal_scores_st_ed # (N_new, 5), (N_new, ) (N, 5, Li, 2)\n\n def get_ts_loss(self, temporal_scores, ts_labels, answer_indices):\n \"\"\"\n Args:\n temporal_scores: (N, 5, Li, 2)\n ts_labels: dict(st=(N, ), ed=(N, ))\n answer_indices: (N, )\n\n Returns:\n\n \"\"\"\n bsz = len(answer_indices)\n # compute loss\n ca_temporal_scores_st_ed = \\\n temporal_scores[torch.arange(bsz, dtype=torch.long), answer_indices] # (N, Li, 2)\n loss_st = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 0], ts_labels[\"st\"])\n loss_ed = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 1], ts_labels[\"ed\"])\n return (loss_st + loss_ed) / 2.\n\n @classmethod\n def sample_negatives(cls, pred_score, pos_indices, neg_indices, num_negatives=2,\n use_hard_negatives=False, negative_pool_size=0, num_hard=2, drop_topk=0):\n \"\"\" Sample negatives from a set of indices. Several sampling strategies are supported:\n 1, random; 2, hard negatives; 3, drop_topk hard negatives; 4, mix easy and hard negatives\n 5, sampling within a pool of hard negatives; 6, sample across images of the same video.\n Args:\n pred_score: (num_img, num_words, num_region)\n pos_indices: (N_pos, 3) all positive region indices for the same word, not necessaryily the same image.\n neg_indices: (N_neg, 3) ...\n num_negatives (int):\n use_hard_negatives (bool):\n negative_pool_size (int):\n num_hard (int):\n drop_topk (int):\n Returns:\n\n \"\"\"\n num_unique_pos = len(pos_indices)\n sampled_pos_indices = torch.cat([pos_indices] * num_negatives, dim=0)\n if use_hard_negatives:\n # print(\"using use_hard_negatives\")\n neg_scores = pred_score[neg_indices[:, 0], neg_indices[:, 1], neg_indices[:, 2]] # TODO\n max_indices = torch.sort(neg_scores, descending=True)[1].tolist()\n if negative_pool_size > num_negatives: # sample from a pool of hard negatives\n hard_pool = max_indices[drop_topk:drop_topk + negative_pool_size]\n hard_pool_indices = neg_indices[hard_pool]\n num_hard_negs = num_negatives\n sampled_easy_neg_indices = []\n if num_hard < num_negatives:\n easy_pool = max_indices[drop_topk + negative_pool_size:]\n easy_pool_indices = neg_indices[easy_pool]\n num_hard_negs = num_hard\n num_easy_negs = num_negatives - num_hard_negs\n sampled_easy_neg_indices = easy_pool_indices[\n torch.randint(low=0, high=len(easy_pool_indices),\n size=(num_easy_negs * num_unique_pos, ), dtype=torch.long)\n ]\n sampled_hard_neg_indices = hard_pool_indices[\n torch.randint(low=0, high=len(hard_pool_indices),\n size=(num_hard_negs * num_unique_pos, ), dtype=torch.long)\n ]\n\n if len(sampled_easy_neg_indices) != 0:\n sampled_neg_indices = torch.cat([sampled_hard_neg_indices, sampled_easy_neg_indices], dim=0)\n else:\n sampled_neg_indices = sampled_hard_neg_indices\n\n else: # directly take the top negatives\n sampled_neg_indices = neg_indices[max_indices[drop_topk:drop_topk+len(sampled_pos_indices)]]\n else:\n sampled_neg_indices = neg_indices[\n torch.randint(low=0, high=len(neg_indices), size=(len(sampled_pos_indices),), dtype=torch.long)\n ]\n return sampled_pos_indices, sampled_neg_indices\n\n def get_att_loss(self, scores, att_labels, target, words, vid_names, qids, q_lens, img_indices, boxes,\n start_indices, num_negatives=2, use_hard_negatives=False, drop_topk=0):\n \"\"\" compute ranking loss, use for loop to find the indices,\n use advanced indexing to perform the real calculation\n Build a list contains a quaduple\n\n Args:\n scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]\n att_labels: list(tensor), each has dimension (#num_imgs, #num_words, #regions), not batched\n target: 1D tensor (N, )\n words: LongTensor (N, 5, Lqa)\n vid_names: list(str) (N,)\n qids: list(int), (N, )\n q_lens: list(int), (N, )\n img_indices: list(list(int)), (N, Li), or None\n boxes: list(list(box)) of length N, each sublist represent an image,\n each box contains the coordinates of xyxy, or None\n num_negatives: number of negatives for each positive region\n use_hard_negatives: use hard negatives, uselect negatives with high scores\n drop_topk: drop topk highest negatives (since the top negatives might be correct, they are just not labeled)\n start_indices (list of int): each element is an index (at 0.5fps) of the first image\n with spatial annotation. If with_ts, set to zero\n Returns:\n att_loss: loss value for the batch\n att_predictions: (list) [{\"gt\": gt_scores, \"pred\": pred_scores}, ], used to calculate att. accuracy\n \"\"\"\n pos_container = [] # contains tuples of 5 elements, which are (batch_i, ca_i, img_i, word_i, region_i)\n neg_container = []\n for batch_idx in range(len(target)): # batch\n ca_idx = target[batch_idx].cpu().item()\n gt_score = att_labels[batch_idx] # num_img * (num_words, num_region)\n start_idx = start_indices[batch_idx] # int\n num_img = len(gt_score)\n sen_l, _ = gt_score[0].shape\n pred_score = scores[batch_idx, ca_idx, :num_img, :sen_l] # (num_img, num_words, num_region)\n\n # find positive and negative indices\n batch_pos_indices = []\n batch_neg_indices = []\n for img_idx, img_gt_score in enumerate(gt_score):\n img_idx = start_idx + img_idx\n img_pos_indices = torch.nonzero(img_gt_score) # (N_pos, 2) ==> (#words, #regions)\n if len(img_pos_indices) == 0: # skip if no positive indices\n continue\n img_pos_indices = torch.cat([img_pos_indices.new_full([len(img_pos_indices), 1], img_idx),\n img_pos_indices], dim=1) # (N_pos, 3) ==> (#img, #words, #regions)\n\n img_neg_indices = torch.nonzero(img_gt_score == 0) # (N_neg, 2)\n img_neg_indices = torch.cat([img_neg_indices.new_full([len(img_neg_indices), 1], img_idx),\n img_neg_indices], dim=1) # (N_neg, 3)\n\n batch_pos_indices.append(img_pos_indices)\n batch_neg_indices.append(img_neg_indices)\n\n if len(batch_pos_indices) == 0: # skip if empty ==> no gt label for the video\n continue\n batch_pos_indices = torch.cat(batch_pos_indices, dim=0) # (N_pos, 3) -->\n batch_neg_indices = torch.cat(batch_neg_indices, dim=0) # (N_neg, 3)\n\n # sample positives and negatives\n available_img_indices = batch_pos_indices[:, 0].unique().tolist()\n for img_idx in available_img_indices:\n # pos_indices for a certrain img\n img_idx_pos_indices = batch_pos_indices[batch_pos_indices[:, 0] == img_idx]\n img_idx_neg_indices = batch_neg_indices[batch_neg_indices[:, 0] == img_idx]\n available_word_indices = img_idx_pos_indices[:, 1].unique().tolist()\n for word_idx in available_word_indices:\n # positives and negatives for a given image-word pair, specified by img_idx-word_idx\n img_idx_word_idx_pos_indices = img_idx_pos_indices[img_idx_pos_indices[:, 1] == word_idx]\n img_idx_word_idx_neg_indices = img_idx_neg_indices[img_idx_neg_indices[:, 1] == word_idx]\n # actually all the positives, not sampled pos\n sampled_pos_indices, sampled_neg_indices = \\\n self.sample_negatives(pred_score,\n img_idx_word_idx_pos_indices, img_idx_word_idx_neg_indices,\n num_negatives=num_negatives, use_hard_negatives=use_hard_negatives,\n negative_pool_size=self.negative_pool_size,\n num_hard=self.num_hard, drop_topk=drop_topk)\n\n base_indices = torch.LongTensor([[batch_idx, ca_idx]] * len(sampled_pos_indices)).\\\n to(sampled_pos_indices.device)\n pos_container.append(torch.cat([base_indices, sampled_pos_indices], dim=1))\n neg_container.append(torch.cat([base_indices, sampled_neg_indices], dim=1))\n\n pos_container = torch.cat(pos_container, dim=0)\n neg_container = torch.cat(neg_container, dim=0)\n\n # contain all the predictions and gt labels in this batch, only consider the ones with gt labels\n # also only consider the positive answer.\n att_predictions = None\n if not self.training and self.vfeat_flag:\n att_predictions = dict(det_q=[],\n det_ca=[])\n unique_pos_container = np.unique(pos_container.cpu().numpy(), axis=0) # unique rows in the array\n for row in unique_pos_container:\n batch_idx, ca_idx, img_idx, word_idx, region_idx = row\n start_idx = start_indices[batch_idx] # int\n cur_q_len = q_lens[batch_idx]\n num_region = att_labels[batch_idx][img_idx-start_idx].shape[1] # num_img * (num_words, num_region)\n if len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()) != \\\n len(boxes[batch_idx][img_idx-start_idx]):\n print(\"scores[batch_idx, ca_idx, img_idx, word_idx].data.cpu()\",\n len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()))\n print(\"len(boxes[batch_idx][img_idx-start_idx])\", len(boxes[batch_idx][img_idx-start_idx]))\n print(\"boxes, batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx\",\n batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx)\n print(row)\n raise AssertionError\n cur_det_data = {\n \"pred\": scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu(),\n \"word\": words[batch_idx, ca_idx, word_idx],\n \"qid\": qids[batch_idx],\n \"vid_name\": vid_names[batch_idx],\n \"img_idx\": img_indices[batch_idx][img_idx], # full indices\n \"boxes\": boxes[batch_idx][img_idx-start_idx] # located boxes\n }\n if word_idx < cur_q_len:\n att_predictions[\"det_q\"].append(cur_det_data)\n else:\n att_predictions[\"det_ca\"].append(cur_det_data)\n\n pos_scores = scores[pos_container[:, 0], pos_container[:, 1], pos_container[:, 2],\n pos_container[:, 3], pos_container[:, 4]]\n neg_scores = scores[neg_container[:, 0], neg_container[:, 1], neg_container[:, 2],\n neg_container[:, 3], neg_container[:, 4]]\n\n if self.att_loss_type == \"hinge\":\n # max(0, m + S_pos - S_neg)\n att_loss = torch.clamp(self.margin + neg_scores - pos_scores, min=0).sum()\n elif self.att_loss_type == \"lse\":\n # log[1 + exp(scale * (S_pos - S_neg))]\n att_loss = torch.log1p(torch.exp(self.alpha * (neg_scores - pos_scores))).sum()\n else:\n raise NotImplementedError(\"Only support hinge and lse\")\n return att_loss, att_predictions\n\n def get_att_prediction(self, scores, object_vocab, words, vid_names, qids, img_indices, boxes,\n start_indices, score_thd=0.2):\n \"\"\" compute ranking loss, use for loop to find the indices,\n use advanced indexing to perform the real calculation\n Build a list contains a quaduple\n\n Args:\n scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]\n object_vocab: list, object word ids in the vocabulary\n words: LongTensor (N, 5, Lqa)\n vid_names: list(str) (N,)\n qids: list(int), (N, )\n img_indices: list(list(int)), (N, Li), or None\n boxes: list(list(box)) of length N, each sublist represent an image,\n each box contains the coordinates of xyxy, or None\n start_indices (list of int): each element is an index (at 0.5fps) of the first image\n with spatial annotation. If with_ts, set to zero\n score_thd: only keep boxes with score higher than this value\n Returns:\n att_loss: loss value for the batch\n att_predictions: (list) [{\"gt\": gt_scores, \"pred\": pred_scores}, ], used to calculate att. accuracy\n \"\"\"\n # contain all the predictions and gt labels in this batch, only consider the ones with gt labels\n # also only consider the positive answer.\n att_predictions = None\n if self.vfeat_flag:\n att_predictions = []\n for batch_idx in range(len(scores)):\n start_idx = start_indices[batch_idx] # int\n q_att_predictions = dict() # predictions associated with this question\n for ans_idx in range(5):\n q_att_predictions[ans_idx] = []\n for img_idx_local in range(len(boxes[batch_idx])):\n # img_idx_local: for the imgs with box anno\n # img_idx_global: for all the imgs, including ones without box anno\n img_idx_global = img_idx_local + start_idx\n cur_img_scores = scores[batch_idx, ans_idx, img_idx_global] # (Lqa, Lr)\n cur_words = words[batch_idx, ans_idx].tolist() # (Lqa, )\n cur_img_boxes = boxes[batch_idx][img_idx_local]\n for word_idx, w in enumerate(cur_words):\n if w in object_vocab:\n cur_word_region_scores = cur_img_scores[word_idx].data.cpu().numpy() # (Lr, )\n accepted_region_ids = np.nonzero(cur_word_region_scores >= score_thd)[0].tolist()\n accepted_region_scores = [float(cur_word_region_scores[i]) for i in accepted_region_ids]\n accepted_region_boxes = [cur_img_boxes[i] for i in accepted_region_ids]\n sorted_indices = np.argsort(accepted_region_scores)\n accepted_region_scores = [accepted_region_scores[i] for i in sorted_indices]\n accepted_region_boxes = [accepted_region_boxes[i] for i in sorted_indices]\n cur_det_data = {\n \"pred\": accepted_region_scores,\n \"bbox\": accepted_region_boxes,\n \"word\": int(words[batch_idx, ans_idx, word_idx]),\n \"qid\": int(qids[batch_idx]),\n \"vid_name\": vid_names[batch_idx],\n \"img_idx\": img_indices[batch_idx][img_idx_global], # image file name id\n }\n q_att_predictions[ans_idx].append(cur_det_data)\n att_predictions.append(q_att_predictions)\n return att_predictions\n"
] | [
[
"torch.nn.functional.softmax",
"torch.max",
"torch.cat",
"torch.FloatTensor",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"torch.sort",
"torch.nonzero",
"torch.arange",
"torch.nn.Sequential",
"numpy.nonzero",
"torch.nn.ReLU",
"torch.exp",
"torch.nn.Linear",
"torch.stack",
"numpy.argsort",
"torch.nn.functional.normalize",
"torch.nn.LayerNorm",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
janewen134/catsdogs | [
"051dc0d4bf695ca2db03df6fc3cf758331df4aaa"
] | [
"cats_and_dogs_classification.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Cats and Dogs Classification\n\n# Data Loading and Exploring\n\n# In[1]:\n\n\nimport os\nbase_dir = './cats_and_dogs_filtered'\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\n\n# cat training pictures\ntrain_cats_dir = os.path.join(train_dir, 'cats')\n\n# dog training pictures\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\n\n# cat validation pictures\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\n\n# dog validation pictures\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\n\n\n# In[2]:\n\n\n# view file names\ntrain_cat_fnames = os.listdir(train_cats_dir)\nprint(train_cat_fnames[:10])\n\ntrain_dog_fnames = os.listdir(train_dogs_dir)\ntrain_dog_fnames.sort()\nprint(train_dog_fnames[:10])\n\n\n# In[3]:\n\n\n# preview images to know what the dataset is like\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# Parameters for our graph; we'll output images in a 4*4 configuration\nnrows = 4\nncols = 4\n\n# Index for iterating over images\npic_index = 0\n\n# Set up matplotlib fig, and size it to fit 4*4 pics\nfig = plt.gcf()\nfig.set_size_inches(ncols*4, nrows*4)\n\n# 8 images for cats and dogs separately\npic_index += 8\n\nnext_cat_pix = [os.path.join(train_cats_dir, fname) for fname in train_cat_fnames[pic_index-8:pic_index]]\nnext_dog_pix = [os.path.join(train_dogs_dir, fname) for fname in train_dog_fnames[pic_index-8:pic_index]]\n\nfor i, img_path in enumerate(next_cat_pix + next_dog_pix):\n # Set up subplot; subplot indices starts at 1\n sp = plt.subplot(nrows, ncols, i+1)\n sp.axis('Off')\n \n img = mpimg.imread(img_path)\n plt.imshow(img)\nplt.show() \n\n\n# build a small convnet from scratch to get to 72% accuracy\n\n# In[4]:\n\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\n\n# Our input feature map is 150*150*3: 150*150 for the image pixels, \n# and 3 for the three color channels: R, G and B\nimg_input = layers.Input(shape=(150,150,3))\n\n# First convolution extracts 16 filters that are 3*3\n# Convolution is followed by max-pooling layer with a 2*2 window\nx = layers.Conv2D(16,3,activation='relu')(img_input)\nx = layers.MaxPooling2D(2)(x)\n\n# Second convolution extracts 32 filters that are 3*3\n# Convolution is followed by max-pooling layer with a 2*2 window\nx = layers.Conv2D(32,3,activation='relu')(x)\nx = layers.MaxPooling2D(2)(x)\n\n# Third convolution extracts 64 filters that are 3*3\n# Convolution is followed by max-pooling layer with a 2*2 window\nx = layers.Conv2D(64,3, activation='relu')(x)\nx = layers.MaxPooling2D(2)(x)\n\n\n# fully-connected layers: because we are facing a binary classification problem, we will end our network with a sigmoid activation, so that the output of our network will be a single scalar between 0 and 1.\n\n# In[5]:\n\n\n# Flatten feature map to a 1-dim tensor so we can add fully connected layers\nx = layers.Flatten()(x)\n# Generate a fully connected layer with ReLU activation and 512 hidden units\nx = layers.Dense(512,activation='relu')(x)\n\n# Create output layer with a single node and sigmoid activation\noutput = layers.Dense(1, activation='sigmoid')(x)\n\n# Create Model\n# input = input feature map\n# output = output feature map\n# connected layer + sigmoid output layer \nmodel = Model(img_input,output)\n\n\n# Let's summarize the model architecture\n\n# In[6]:\n\n\nmodel.summary()\n\n\n# In[7]:\n\n\n# use RMSprop instead of stochastic gradient \nfrom tensorflow.keras.optimizers import RMSprop\n\nmodel.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])\n\n\n# Data Preprocessing\n\n# In[8]:\n\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# All images will be rescaled by 1./255\ntrain_datagen = ImageDataGenerator(rescale=1./255)\nval_datagen = ImageDataGenerator(rescale=1./255)\n\n# Flow training images in batches of 20 using train_datagen generator\ntrain_generator = train_datagen.flow_from_directory(\n train_dir, # This is the source directory for training images\n target_size=(150,150),\n batch_size=20,\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary'\n)\n\n# Flow validation images in batches of 20 using val_datagen generator\nvalidation_generator = val_datagen.flow_from_directory(\n validation_dir,\n target_size=(150,150),\n batch_size=20,\n class_mode='binary'\n)\n\n\n# Training\n# <br>train on 2000 images, for 15 epochs and validate on 1000 images\n\n# In[ ]:\n\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch=100, # 2000 images = batch_size * steps\n epochs=15,\n validation_data=validation_generator,\n validation_steps=50, # 1000 images = batch_size * steps\n verbose=1\n)\n\n# Visualizing Intermediate Representations\n# Visualize how an input gets transformed as it goes through the convnet\n\n# In[ ]:\n\nimport numpy as np\nimport random\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\n\n# define a new Model that takes an img as input and will output\n# intermediate representations for all layers in the previous model after\n# the first\nsuccessive_outputs = [layers.output for layer in model.layers[1:]]\nvisualization_model = Model(img_input, successive_outputs)\n\n# prepare a random input img of a cat or dog from the training set\ncat_img_files = [os.path.join(train_cats_dir, f) for f in train_cat_fnames]\ndog_img_files = [os.path.join(train_dogs_dir, f) for f in train_dog_fnames]\nimg_path = random.choice(cat_img_files + dog_img_files)\n\nimg = load_img(img_path, target_size=(150, 150)) # this is a PIL img\nx = img_to_array(img) # Numpy array with shape (150, 150, 3)\nx = x.reshape((1,) + x.shape)\n\n# Rescale by 1/255\nx /= 255\n\n# Let's run our image through our network, thus obtaining all\n# intermediate representations for this img.\nsuccessive_feature_maps = visualization_model.predict(x)\n\n# These are names of the layers\nlayer_names = [layer.name for layer in model.layers]\n\n# Now let's display our representations\nfor layer_name, feature_map in zip(layer_names, successive_feature_maps):\n if len(feature_map.shape) == 4:\n # Just do this for the conv/ maxpool layers, not the fully-connected layers\n n_features = feature_map.shape[-1] # number of features in feature map\n\n # retrieve a list of lists results on training and validattion data\n # sets for each training epoch\n loss = history.history['val_loss']\n\n # Get number of epochs\n epochs = range(len(acc))\n\n # Plot training and validation accuracy per epoch\n plt.plot(epochs, acc)\n plt.plot(epochs, val_acc)\n plt.title('Train and validation accuracy')\n\n plt.figure()\n\n # plot training and validation loss per epoch\n plt.plot(epochs, loss)\n plt.plot(epochs, val_loss)\n plt.title('Training and validation loss')\n\n\n# Evaluating Accuracy and Loss for the Model\n# plot the training / validation accuracy and loss as collected during training\n# In[ ]:\n\n# Retrieve a list of accuracy results on training and validation data\n# sets for each training epoch\nacc = history.history['acc']\nval_acc = history.history['val_acc']\n\n# Retrieve a list of list results on training and validation data\n# sets for each training epoch\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\n# Get number of epochs\nepochs = range(len(acc))\n\n# Plot training and validation accuracy per epoch\nplt.plot(epochs, acc)\nplt.plot(epochs, val_acc)\nplt.title('Training and validation accuracy')\n\nplt.figure()\n\n# Plot training and validation loss per epoch\nplt.plot(epochs, loss)\nplt.plot(epochs, val_loss)\nplt.title('Training and validation loss')\n\n# Clean Up\n# In[ ]:\n\nimport os, signal\nos.kill(os.getpid(), signal.SIGKILL)\n"
] | [
[
"matplotlib.pyplot.imshow",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"matplotlib.pyplot.title",
"tensorflow.keras.preprocessing.image.load_img",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.Model",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"matplotlib.image.imread",
"tensorflow.keras.layers.MaxPooling2D",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.4",
"2.3",
"2.5",
"2.6"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.