repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
dependencyInversion/UdemyML | [
"9e1a0a01688a82c61ef006a592a58c12fb186552"
] | [
"Chapter2_Python/NumpyIntro.py"
] | [
"import numpy as np\n\nlist1 = np.array([-2, 1, 2, -10, 22, -10])\nlist2 = np.array([-20, 123, 112, -10, 22, -120])\n\nprint(\n f\"values: {list1}\",\n f\"min: {np.min(list1)}\",\n f\"max: {np.max(list1)}\",\n f\"mean: {np.mean(list1)}\",\n f\"median: {np.median(list2)}\",\n sep=\"\\n\",\n end=\"\\n\\n\"\n)\n\nprint(\n f\"values: {list2}\",\n f\"min: {np.min(list2)}\",\n f\"max: {np.max(list2)}\",\n f\"mean: {np.mean(list2)}\",\n f\"median: {np.median(list2)}\",\n sep=\"\\n\",\n end=\"\\n\\n\"\n)\n"
] | [
[
"numpy.min",
"numpy.median",
"numpy.max",
"numpy.mean",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RobZelluf/RL_pong | [
"55c8feeb9c43c1c11d6fd8924660e8038138cf7e"
] | [
"DQN_SAA/DQN_SAA.py"
] | [
"from wimblepong import Wimblepong\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nfrom utils import Transition, ReplayMemory\n\n\nclass Q_CNN(nn.Module):\n def __init__(self, state_space, action_space, size, fc1_size=64):\n super(Q_CNN, self).__init__()\n self.state_space = state_space\n self.action_space = action_space\n self.linear_size = 13 * 13 * 32\n\n self.conv1 = nn.Conv2d(1, 8, 4, 4)\n self.conv2 = nn.Conv2d(8, 16, 3, 2)\n self.conv3 = nn.Conv2d(16, 32, 2, 1)\n self.fc1 = torch.nn.Linear(self.linear_size, fc1_size)\n self.fc2 = torch.nn.Linear(fc1_size, action_space)\n\n def forward(self, x):\n # Computes the activation of the first convolution\n # Size changes from (3, 32, 32) to (18, 32, 32)\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n print(x.shape)\n\n # Reshape data to input to the input layer of the neural net\n # Size changes from (18, 16, 16) to (1, 4608)\n # Recall that the -1 infers this dimension from the other given dimension\n x = x.view(-1, self.fc1.in_features)\n\n # Computes the activation of the first fully connected layer\n # Size changes from (1, 4608) to (1, 64)\n x = F.relu(self.fc1(x))\n\n # Computes the second fully connected layer (activation applied later)\n # Size changes from (1, 64) to (1, 10)\n x = self.fc2(x)\n return x\n\n\nclass DQN_SAA(object):\n def __init__(self, env, player_id=1, size=200, model_info=None, fc1_size=64):\n if type(env) is not Wimblepong:\n raise TypeError(\"I'm not a very smart AI. All I can play is Wimblepong.\")\n\n self.env = env\n self.player_id = player_id\n self.name = \"SAA\"\n self.gamma = 0.98\n self.size = size\n self.fc1_size = fc1_size\n\n if torch.cuda.is_available():\n print(\"Using GPU!\")\n torch.cuda.set_device(0)\n\n self.state_space = env.observation_space\n self.action_space = env.action_space.n\n self.memory = ReplayMemory()\n self.batch_size = 256 * 2\n self.chosen_actions = np.zeros(self.action_space)\n\n if model_info is not None:\n self.policy_net = torch.load(\"DQN_SAA/\" + model_info[\"model_name\"] + \"/policy_net.pth\")\n self.size = model_info[\"size\"]\n print(\"Policy loaded!\")\n else:\n self.policy_net = Q_CNN(self.state_space, self.action_space, size, fc1_size=fc1_size)\n\n self.target_net = self.policy_net\n self.optimizer = optim.Adam(self.policy_net.parameters(), lr=1e-4)\n\n def update_network(self, updates=1):\n for _ in range(updates):\n self._do_network_update()\n\n def _do_network_update(self):\n if len(self.memory) < self.batch_size:\n return\n\n transitions = self.memory.sample(self.batch_size)\n # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for\n # detailed explanation). This converts batch-array of Transitions\n # to Transition of batch-arrays.\n batch = Transition(*zip(*transitions))\n\n # Compute a mask of non-final states and concatenate the batch elements\n # (a final state would've been the one after which simulation ended)\n non_final_mask = 1 - torch.tensor(batch.done, dtype=torch.uint8)\n non_final_next_states = [s for nonfinal, s in zip(non_final_mask,\n batch.next_state) if nonfinal > 0]\n non_final_next_states = torch.stack(non_final_next_states)\n state_batch = torch.stack(batch.state)\n action_batch = torch.cat(batch.action)\n reward_batch = torch.cat(batch.reward)\n\n state_action_values = self.policy_net(state_batch).gather(1, action_batch)\n\n next_state_values = torch.zeros(self.batch_size)\n next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(1)[0].detach()\n\n expected_state_action_values = reward_batch + self.gamma * next_state_values\n\n loss = F.mse_loss(state_action_values.squeeze(),\n expected_state_action_values)\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n for param in self.policy_net.parameters():\n param.grad.data.clamp_(-1e-1, 1e-1)\n self.optimizer.step()\n\n def get_name(self):\n return self.name\n\n def get_action(self, state, epsilon=0.05):\n sample = random.random()\n if sample > epsilon:\n with torch.no_grad():\n state = state.reshape(1, 1, self.size, self.size)\n state = torch.from_numpy(state).float()\n q_values = self.policy_net(state)\n action = torch.argmax(q_values).item()\n self.chosen_actions[action] += 1\n\n return action\n else:\n return random.randrange(self.action_space)\n\n def update_target_network(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n def store_transition(self, state, action, next_state, reward, done):\n action = torch.Tensor([[action]]).long()\n reward = torch.tensor([reward], dtype=torch.float32)\n next_state = torch.from_numpy(next_state).float()\n state = torch.from_numpy(state).float()\n self.memory.push(state, action, next_state, reward, done)"
] | [
[
"torch.cuda.set_device",
"torch.cat",
"torch.zeros",
"torch.load",
"torch.Tensor",
"torch.nn.Conv2d",
"torch.from_numpy",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available",
"torch.stack",
"numpy.zeros",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adellanno/MetaXcan | [
"cfc9e369bbf5630e0c9488993cd877f231c5d02e",
"cfc9e369bbf5630e0c9488993cd877f231c5d02e",
"cfc9e369bbf5630e0c9488993cd877f231c5d02e"
] | [
"software/metax/misc/KeyedDataSource.py",
"software/metax/predixcan/Simulations.py",
"software/tests/test_feature_matrix.py"
] | [
"import gzip\nimport io\nimport pandas\n\nfrom .. import Utilities\n\ndef try_parse(string, fail=None):\n try:\n return float(string)\n except Exception:\n return fail;\n\ndef skip_na(key, value):\n skip = (not value or value == \"NA\")\n return skip\n\ndef skip_non_rsid_value(key, value):\n return not \"rs\" in value\n\ndef dot_to_na(value):\n return \"NA\" if value == \".\" else value\n\ndef load_data(path, key_name, value_name, white_list=None, value_white_list=None, numeric=False, should_skip=None, value_conversion=None, key_filter=None):\n data = {}\n c_key=None\n c_value=None\n for i, line in enumerate(Utilities.generate_from_any_plain_file(path)):\n if i==0:\n header = line.strip().split()\n c_key = header.index(key_name)\n c_value = header.index(value_name)\n continue\n\n comps = line.strip().split()\n key = comps[c_key]\n if white_list and not key in white_list:\n continue\n if key_filter and key_filter(key):\n continue\n\n value = comps[c_value]\n if value_white_list and not value in value_white_list:\n continue\n\n if should_skip and should_skip(key, value):\n continue\n\n if value_conversion:\n value = value_conversion(value)\n elif numeric:\n value = try_parse(value)\n\n if value:\n data[key] = value\n\n return data\n\ndef load_data_column(path, column_name):\n def _ogz(p):\n return io.TextIOWrapper(gzip.open(p, \"r\"), newline=\"\")\n _o = _ogz if \".gz\" in path else open\n data = []\n c_key=None\n with _o(path) as file:\n for i, line in enumerate(file):\n if i==0:\n header = line.strip().split()\n c_key = header.index(column_name)\n continue\n\n comps = line.strip().split()\n value = comps[c_key]\n data.append(value)\n return data\n\ndef to_data_frame(data, keys, key_column, value_column, to_numeric=None):\n ids = [x for x in keys if x in data]\n data = [(x, data[x]) for x in ids]\n if len(data) == 0:\n return pandas.DataFrame({key_column: [], value_column: []})\n data = Utilities.to_dataframe(data, [key_column, value_column], to_numeric)\n return data",
"__author__ = \"alvaro barbeira\"\n\nimport numpy\nimport pandas\nimport copy\nimport math\n\nfrom . import Utilities\nfrom . import MultiPrediXcanAssociation, PrediXcanAssociation\nfrom ..expression import HDF5Expression, Expression\n\n########################################################################################################################\ndef mp_callback(gene, model, result, vt_projection, variance, model_keys, coefs, save):\n save[\"coefs\"] = coefs\n\nclass Context(object):\n def __init__(self, expression_manager, phenotype_generator, filter, do_predixcan=False, only_truth=False):\n self.expression_manager = expression_manager\n self.phenotype_generator = phenotype_generator\n self.filter = filter\n self.do_predixcan = do_predixcan\n self.only_truth = only_truth\n\n def do_predixcan(self):\n return self.do_predixcan\n\n def get_genes(self):\n return self.expression_manager.get_genes()\n\n def __enter__(self):\n self.expression_manager.enter()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.expression_manager.exit()\n\n def get_mp_simulation(self, gene):\n if not gene:\n return Utilities.DumbMTPContext(None, None, None, self.filter), None, None\n\n expression = self.expression_manager.expression_for_gene(gene)\n phenotype, description = self.phenotype_generator.get(expression, gene)\n if phenotype is None:\n return None, None, None\n\n _cp = None\n if self.do_predixcan:\n _cp = {}\n for t in description.itertuples():\n if \"covariate\" in t.variable: continue\n _cp[t.variable] = Utilities.DumbPContext(expression[t.variable], phenotype, gene, self.filter)\n\n if self.only_truth:\n expression = {x.variable:expression[x.variable] for x in description.itertuples() if x.variable in expression}\n\n return Utilities.DumbMTPContext(expression, phenotype, gene, self.filter), _cp, description\n\n########################################################################################################################\n\nclass PhenotypeGenerator(object):\n def __init__(self): raise RuntimeError(\"Not implemented\")\n\nclass RandomPhenotypeGenerator(PhenotypeGenerator):\n def __init__(self):\n pass\n\n def get(self, expression, gene):\n k = list(expression.keys())[0]\n e = expression[k]\n n = len(e)\n pheno = numpy.random.uniform(size=n)\n description = pandas.DataFrame({ \"variable\":[\"covariate\"], \"param\": [1.0]})\n return pheno, description\n\nclass LinearCombinationPhenotypeGenerator(PhenotypeGenerator):\n def __init__(self, combination, covariate_sd, use_all=None):\n self.combination = combination\n self.covariate_sd = covariate_sd\n self.use_all = use_all\n\n def get(self, expression, gene):\n combination = copy.copy(self.combination)\n if self.use_all:\n if type(self.use_all) == float:\n c = self.use_all\n elif self.use_all == \"ONE_VAR\":\n c = math.sqrt(1.0 / len(expression))\n elif self.use_all == \"FIX_VAR\":\n c = 1.0\n combination[\"covariate\"] = math.sqrt(len(expression)*99)\n else:\n raise RuntimeError(\"Unsupported option\")\n for e in list(expression.keys()):\n combination[e] = c\n\n return _pheno_from_combination(expression, combination, self.covariate_sd)\n\nclass CombinationOfCorrelatedPhenotypeGenerator(PhenotypeGenerator):\n def __init__(self, covariate_coefficient, covariate_sd, threshold):\n self.covariate_coefficient = covariate_coefficient\n self.threshold = threshold\n self.covariate_sd = covariate_sd\n\n def get(self, expression, gene):\n # Get the tissue with the most correlated siblings;\n # then average them build a phenotype\n values = list(expression.values())\n if len(values) == 1:\n return None, None\n e = values\n c = numpy.corrcoef(e)\n d = len(expression)\n f = 0\n r = 0\n for i in range(0, d):\n f_ = numpy.sum(c[i] > self.threshold)\n if f_ > f:\n r = i\n f = f_\n\n if f<2:\n return None, None\n\n which = c[r] > self.threshold\n keys = list(expression.keys())\n combination = {keys[i]:math.sqrt(1.0/f) for i in range(0, d) if which[i]}\n #for i in xrange(0,d):\n # combination[\"covariate_{}\".format(i)] = 10.0/f\n combination[\"covariate\"] = self.covariate_coefficient\n\n return _pheno_from_combination(expression, combination, self.covariate_sd)\n\ndef _pheno_from_combination(expression, combination, covariate_sd):\n ok = True\n _k = list(expression.keys())[0]\n _e = expression[_k]\n n = len(_e)\n e = numpy.zeros(n)\n used = set()\n for k, v in combination.items():\n if k in expression:\n e += expression[k] * v\n used.add(k)\n elif \"covariate\" in k:\n e += numpy.random.normal(scale=covariate_sd, size=n) * v\n used.add(k)\n else:\n # If we couldn't build a model with the desired combination, abort\n ok = False\n break\n\n if not ok:\n return None, None\n\n _c = {x: v for x, v in combination.items() if x in used}\n pheno = e\n description = pandas.DataFrame({\"variable\": list(_c.keys()), \"param\": list(_c.values())})\n return pheno, description\n\n########################################################################################################################\n\nclass SExpressionManager(Expression.ExpressionManager):\n def __init__(self, em):\n self.em = em\n self.which = None\n\n def expression_for_gene(self, gene):\n e = self.em.expression_for_gene(gene)\n\n if self.which is None:\n n = len(e[list(e.keys())[0]])\n s = 10000\n #self.which = numpy.random.choice([True, False], size=n, p=[s*1.0/n, 1 - s*1.0/n])\n self.which = list(numpy.random.choice(range(0,n), size=s, replace=False))\n\n e = {k:v[self.which] for k,v in e.items()}\n return e\n\n def get_genes(self): return self.em.get_genes()\n def enter(self): return self.em.enter()\n def exit(self): self.em.exit()\n\n\ndef context_from_args(args):\n #expression_ = HDF5Expression.ExpressionManager(args.hdf5_expression_folder, args.expression_pattern, code_999=args.code_999, standardise= args.standardize_expression)\n #expression = SExpressionManager(expression_)\n expression = HDF5Expression.ExpressionManager(args.expression_folder, args.expression_pattern,\n code_999=args.code_999, standardise=args.standardize_expression)\n\n def _argumentize(x, t, default=1.0):\n return t(x) if x is not None else default\n\n p = {x[0]: x[1] for x in args.simulation_parameters} if args.simulation_parameters else {}\n covariate_coefficient = _argumentize(p.get(\"covariate_coefficient\"), float)\n covariate_sd = _argumentize(p.get(\"covariate_sd\"), float)\n if args.simulation_type == \"random\":\n phenotype = RandomPhenotypeGenerator()\n elif args.simulation_type == \"combination\":\n use_all = None\n if \"model_spec\" in p:\n _c = p.get(\"model_spec\")\n if not _c:\n _c = {}\n else:\n _c = _c.split()\n _c = {_c[i*2]:float(_c[i*2+1]) for i in range(0,len(_c)/2)}\n elif \"use_tissues\" in p:\n _c = p.get(\"use_tissues\").strip().split()\n _c = {x:math.sqrt(1.0/len(_c)) for x in _c}\n elif \"use_all\" in p:\n _c = {}\n if p[\"use_all\"] == \"ONE_VAR\" or p[\"use_all\"] == \"FIX_VAR\":\n use_all = p[\"use_all\"]\n else:\n use_all = float(p[\"use_all\"])\n _c[\"covariate\"] = covariate_coefficient\n phenotype = LinearCombinationPhenotypeGenerator(_c, covariate_sd=covariate_sd, use_all=use_all)\n elif args.simulation_type == \"combination_from_correlated\":\n threshold = _argumentize(p.get(\"threshold\"), float, 0.9)\n phenotype = CombinationOfCorrelatedPhenotypeGenerator(covariate_coefficient=covariate_coefficient, covariate_sd=covariate_sd, threshold=threshold)\n else:\n raise RuntimeError(\"Wrong phenotype simulation spec\")\n filter = Utilities._filter_from_args(args)\n context = Context(expression, phenotype, filter, args.do_predixcan, args.only_truth)\n return context\n\n\n########################################################################################################################\n\ndef simulate(gene, context):\n save_results = {}\n _cb = lambda gene, model, result, vt_projection, variance, model_keys, coefs: mp_callback(gene, model, result, vt_projection, variance, model_keys, coefs, save_results)\n\n _context_mt, _context_p, _description = context.get_mp_simulation(gene)\n if _context_mt is None:\n return None, None, None\n\n p = None\n if _context_p:\n p = pandas.DataFrame()\n for model,_c in _context_p.items():\n p_ = PrediXcanAssociation.predixcan_association(gene, _c)\n p_ = PrediXcanAssociation.dataframe_from_results([p_])\n p_[\"model\"] = model\n p = pandas.concat([p,p_])\n\n\n r = MultiPrediXcanAssociation.multi_predixcan_association(gene, _context_mt, [_cb])\n description = _description.assign(gene=gene, type=\"truth\")\n coefs = save_results[\"coefs\"].assign(gene=gene, type=\"result\")\n description = pandas.concat([description, coefs])\n\n return r, description, p",
"import numpy\nimport numpy.testing\n\nimport unittest\nfrom metax import misc\nfrom metax.misc import FeatureMatrix\n\nfrom . import SampleData\n\nEXPECTED_A = \\\n[[ 0.1 , 0.01, 0.07],\n [ 0.01, 0.02, 0.02],\n[ 0.07, 0.02, 0.06]]\n\nEXPECTED_B = \\\n[[ 3.5 , 3.84 , 3.67 ],\n[ 3.84 , 4.44 , 4.14 ],\n[ 3.67 , 4.14 , 3.905]]\n\nEXPECTED_D = \\\n[[ 1.74, 2.44],\n[ 2.44, 3.47]]\n\nclass TestFeatureMatrix(unittest.TestCase):\n\n def test_load(self):\n data = SampleData.set_of_feature_sets()\n manager = FeatureMatrix.FeatureMatrixManager(data, standardize=False)\n\n a = manager.data[\"a\"]\n self.assertEqual(len(a), 3)\n numpy.testing.assert_almost_equal(a[\"1\"], [0.0, 0.1, 0.3, 0.0])\n numpy.testing.assert_almost_equal(a[\"2\"], [0.0, 0.1, 0.0, 0.1])\n numpy.testing.assert_almost_equal(a[\"3\"], [0.0, 0.1, 0.2, 0.1])\n\n a_m, a_labels = manager.get_feature_product(\"a\")\n numpy.testing.assert_almost_equal(a_m, EXPECTED_A)\n self.assertEqual(a_labels, [\"1\", \"2\", \"3\"])\n\n a_m, a_labels = manager.get_feature_product(\"a\", center=True)\n numpy.testing.assert_almost_equal(a_m, numpy.cov([a[\"1\"], a[\"2\"], a[\"3\"]]))\n self.assertEqual(a_labels, [\"1\", \"2\", \"3\"])\n\n b = manager.data[\"b\"]\n self.assertEqual(len(b), 3)\n numpy.testing.assert_almost_equal(b[\"1\"], [1.0, 0.5, 1.2, 0.9])\n numpy.testing.assert_almost_equal(b[\"2\"], [1.0, 1.0, 1.2, 1.0])\n numpy.testing.assert_almost_equal(b[\"3\"], [1.0, 0.75, 1.2, 0.95])\n\n b_m, b_labels = manager.get_feature_product(\"b\")\n numpy.testing.assert_almost_equal(b_m, EXPECTED_B)\n self.assertEqual(b_labels, [\"1\", \"2\", \"3\"])\n\n d = manager.data[\"d\"]\n self.assertEqual(len(d), 2)\n numpy.testing.assert_almost_equal(d[\"2\"], [0.5, 0.7, 0.6, 0.8])\n numpy.testing.assert_almost_equal(d[\"3\"], [0.9, 0.9, 0.8, 1.1])\n\n d_m, d_labels = manager.get_feature_product(\"d\")\n numpy.testing.assert_almost_equal(d_m, EXPECTED_D)\n self.assertEqual(d_labels, [\"2\", \"3\"])\n\n e_m, e_labels = manager.get_feature_product(\"e\")\n numpy.testing.assert_almost_equal(e_m, [[ 0.3726]])\n self.assertEqual(e_labels, [\"2\"])\n\n"
] | [
[
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.DataFrame",
"numpy.random.normal",
"numpy.corrcoef",
"numpy.random.uniform",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.testing.assert_almost_equal",
"numpy.cov"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
simonfqy/DTI_prediction | [
"e01c592cc06c4de04b3ed6db35da5af5ff7f863f",
"e01c592cc06c4de04b3ed6db35da5af5ff7f863f",
"e01c592cc06c4de04b3ed6db35da5af5ff7f863f"
] | [
"davis_data/preprocess.py",
"dcCustom/data/data_loader.py",
"metz_data/preprocess.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport os\nimport sys\nimport pdb\nimport csv\n\n\ndef generate_data(input_csv, binarize=False, head_only=False, head_row_num=15000, \n limit_rows=False, limit_row_num=2400, prefix=\"davis_\", input_prot=True, output_csv=None):\n df = pd.read_csv(input_csv, header = 2, index_col=0, usecols=range(3, 76))\n if head_only:\n df = df.head(head_row_num)\n molList = list(df)\n #print(len(molList))\n protList = list(df.index)\n interactions = [] \n for row in df.itertuples():\n intxn = list(row)[1:]\n interactions.append(intxn) \n #print(interactions)\n interactions = np.array(interactions)\n interactions[np.isnan(interactions)] = 10000\n interactions = 9 - np.log10(interactions)\n if binarize:\n interaction_bin = (interactions >= 7.0) * 1\n if limit_rows:\n counter = 0\n with open(output_csv, 'w', newline='') as csvfile:\n fieldnames = ['smiles']\n if input_prot:\n fieldnames = ['davis'] + fieldnames + ['proteinName', 'protein_dataset']\n if binarize:\n fieldnames = ['davis_bin'] + fieldnames\n else:\n tasks = [prefix + prot for prot in protList]\n fieldnames = tasks + fieldnames\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n\n if input_prot:\n for i, protein in enumerate(protList): \n output_dict = {'proteinName': protein, 'protein_dataset': 'davis'}\n for j, compound in enumerate(molList):\n # will start writing rows.\n intxn_value = interactions[i][j]\n output_dict.update({'davis': intxn_value, 'smiles': compound})\n if binarize:\n intxn_bin = interaction_bin[i][j]\n output_dict['davis_bin'] = intxn_bin \n writer.writerow(output_dict)\n if not limit_rows:\n continue\n counter += 1\n if (counter > limit_row_num):\n break\n if not limit_rows:\n continue\n if (counter > limit_row_num):\n break\n else:\n for j, compound in enumerate(molList):\n output_dict = {'smiles': compound}\n for i, _ in enumerate(protList):\n task_name = fieldnames[i]\n output_dict[task_name] = interactions[i][j]\n writer.writerow(output_dict)\n\nif __name__ == '__main__':\n generate_data('Bio_results.csv', input_prot=True, limit_rows=True, limit_row_num=2400, \n output_csv='restructured_toy.csv')\n \n",
"\"\"\"\nProcess an input dataset into a format suitable for machine learning.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\nimport os\nimport gzip\nimport pandas as pd\nimport numpy as np\nimport csv\nimport numbers\nimport tempfile\nfrom rdkit.Chem import rdmolfiles\nfrom rdkit.Chem import rdmolops\nfrom rdkit import Chem\nimport time\nimport sys\nimport pdb\nfrom deepchem.utils.save import log\nfrom deepchem.utils.save import load_csv_files\n#from deepchem.utils.save import load_sdf_files\n#from deepchem.utils.save import encode_fasta_sequence\nfrom deepchem.feat import UserDefinedFeaturizer\nfrom dcCustom.data import DiskDataset\nfrom dcCustom.feat import Protein\n\n\ndef convert_df_to_numpy(df, tasks, verbose=False):\n \"\"\"Transforms a dataframe containing deepchem input into numpy arrays\"\"\"\n n_samples = df.shape[0]\n n_tasks = len(tasks)\n\n time1 = time.time()\n y = np.hstack(\n [np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])\n time2 = time.time()\n\n w = np.ones((n_samples, n_tasks))\n missing = np.zeros_like(y).astype(int)\n feature_shape = None\n\n for ind in range(n_samples):\n for task in range(n_tasks):\n if y[ind, task] == \"\":\n missing[ind, task] = 1\n\n # ids = df[id_field].values\n # Set missing data to have weight zero\n for ind in range(n_samples):\n for task in range(n_tasks):\n if missing[ind, task]:\n y[ind, task] = 0.\n w[ind, task] = 0.\n\n return y.astype(float), w.astype(float)\n \ndef featurize_protein(df, field, source_field, prot_seq_dict, log_every_N=500, verbose=True):\n '''This is supposed to match the format of functions for featurizing molecules.\n It is not really featurizing, but only constructs the protein objects from their names.'''\n elems = df[field].tolist()\n sources = df[source_field].tolist()\n proteins = []\n for ind, prot in enumerate(elems):\n source = sources[ind]\n pair = (source, prot)\n sequence = prot_seq_dict[pair]\n proteins.append([Protein(prot, source = source, sequence = sequence)]) \n #return np.squeeze(np.array(proteins), axis=1), valid_inds\n return np.array(proteins)\n \ndef featurize_smiles_df(df, featurizer, field, log_every_N=1000, verbose=True):\n \"\"\"Featurize individual compounds in dataframe.\n\n Given a featurizer that operates on individual chemical compounds \n or macromolecules, compute & add features for that compound to the \n features dataframe\n \"\"\"\n sample_elems = df[field].tolist()\n\n features = []\n stderr_fileno = sys.stderr.fileno()\n stderr_save = os.dup(stderr_fileno)\n stderr_fd = open('./logs/error.log', 'a')\n os.dup2(stderr_fd.fileno(), stderr_fileno)\n for ind, elem in enumerate(sample_elems):\n\n mol = Chem.MolFromSmiles(elem) \n # TODO (ytz) this is a bandage solution to reorder the atoms so\n # that they're always in the same canonical order. Presumably this\n # should be correctly implemented in the future for graph mols.\n if mol:\n new_order = rdmolfiles.CanonicalRankAtoms(mol)\n mol = rdmolops.RenumberAtoms(mol, new_order)\n if ind % log_every_N == 0:\n log(\"Featurizing sample %d\" % ind, verbose)\n features.append(featurizer.featurize([mol], smiles=elem))\n\n stderr_fd.close()\n os.dup2(stderr_save, stderr_fileno)\n \n valid_inds = np.array(\n [1 if elt.size > 0 else 0 for elt in features], dtype=bool)\n features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]\n \n #return np.squeeze(np.array(features), axis=1), valid_inds\n return np.array(features), valid_inds\n\n\ndef featurize_smiles_np(arr, featurizer, log_every_N=1000, verbose=True):\n \"\"\"Featurize individual compounds in a numpy array.\n\n Given a featurizer that operates on individual chemical compounds\n or macromolecules, compute & add features for that compound to the\n features array\n \"\"\"\n features = []\n for ind, elem in enumerate(arr.tolist()):\n mol = Chem.MolFromSmiles(elem)\n if mol:\n new_order = rdmolfiles.CanonicalRankAtoms(mol)\n mol = rdmolops.RenumberAtoms(mol, new_order)\n if ind % log_every_N == 0:\n log(\"Featurizing sample %d\" % ind, verbose)\n features.append(featurizer.featurize([mol]))\n\n valid_inds = np.array(\n [1 if elt.size > 0 else 0 for elt in features], dtype=bool)\n features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]\n features = np.squeeze(np.array(features))\n return features.reshape(-1,)\n\n\ndef get_user_specified_features(df, featurizer, verbose=True):\n \"\"\"Extract and merge user specified features. \n\n Merge features included in dataset provided by user\n into final features dataframe\n\n Three types of featurization here:\n\n 1) Molecule featurization\n -) Smiles string featurization\n -) Rdkit MOL featurization\n 2) Complex featurization\n -) PDB files for interacting molecules.\n 3) User specified featurizations.\n\n \"\"\"\n time1 = time.time()\n df[featurizer.feature_fields] = df[featurizer.feature_fields].apply(\n pd.to_numeric)\n X_shard = df.as_matrix(columns=featurizer.feature_fields)\n time2 = time.time()\n log(\"TIMING: user specified processing took %0.3f s\" % (time2 - time1),\n verbose)\n return X_shard\n\n\ndef featurize_mol_df(df, featurizer, field, verbose=True, log_every_N=1000):\n \"\"\"Featurize individual compounds in dataframe.\n\n Featurizes .sdf files, so the 3-D structure should be preserved\n so we use the rdkit \"mol\" object created from .sdf instead of smiles\n string. Some featurizers such as CoulombMatrix also require a 3-D\n structure. Featurizing from .sdf is currently the only way to\n perform CM feautization.\n \"\"\"\n sample_elems = df[field].tolist()\n\n features = []\n for ind, mol in enumerate(sample_elems):\n if ind % log_every_N == 0:\n log(\"Featurizing sample %d\" % ind, verbose)\n features.append(featurizer.featurize([mol]))\n valid_inds = np.array(\n [1 if elt.size > 0 else 0 for elt in features], dtype=bool)\n features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]\n return np.squeeze(np.array(features)), valid_inds\n\n\nclass DataLoader(object):\n \"\"\"\n Handles loading/featurizing of chemical samples (datapoints).\n\n Currently knows how to load csv-files/pandas-dataframes/SDF-files. Writes a\n dataframe object to disk as output.\n \"\"\"\n\n def __init__(self,\n tasks,\n smiles_field=None,\n id_field=None,\n mol_field=None,\n featurizer=None,\n protein_field=None,\n source_field=None, \n verbose=True,\n prot_seq_dict=None,\n log_every_n=1000,\n input_protein=True):\n \"\"\"Extracts data from input as Pandas data frame\"\"\"\n if not isinstance(tasks, list):\n raise ValueError(\"tasks must be a list.\")\n self.verbose = verbose\n self.tasks = tasks\n self.smiles_field = smiles_field\n if id_field is None:\n self.id_field = smiles_field\n else:\n self.id_field = id_field\n self.mol_field = mol_field\n self.protein_field = protein_field\n self.source_field = source_field\n self.prot_seq_dict = prot_seq_dict\n self.user_specified_features = None\n if isinstance(featurizer, UserDefinedFeaturizer):\n self.user_specified_features = featurizer.feature_fields\n self.featurizer = featurizer\n self.log_every_n = log_every_n\n self.input_protein = input_protein\n\n def featurize(self, input_files, data_dir=None, shard_size=8192):\n \"\"\"Featurize provided files and write to specified location.\n \n For large datasets, automatically shards into smaller chunks\n for convenience.\n\n Parameters\n ----------\n input_files: list\n List of input filenames.\n data_dir: str\n (Optional) Directory to store featurized dataset.\n shard_size: int\n (Optional) Number of examples stored in each shard.\n \"\"\"\n log(\"Loading raw samples now.\", self.verbose)\n log(\"shard_size: %d\" % shard_size, self.verbose)\n\n if not isinstance(input_files, list):\n input_files = [input_files]\n\n def shard_generator():\n for shard_num, shard in enumerate(\n self.get_shards(input_files, shard_size)):\n time1 = time.time()\n X, valid_inds = self.featurize_shard(shard)\n ids = shard[self.id_field].values\n ids = ids[valid_inds]\n if len(self.tasks) > 0:\n # Featurize task results iff they exist.\n y, w = convert_df_to_numpy(shard, self.tasks, self.id_field)\n # Filter out examples where featurization failed.\n y, w = (y[valid_inds], w[valid_inds])\n assert len(X) == len(ids) == len(y) == len(w)\n else:\n # For prospective data where results are unknown, it makes\n # no sense to have y values or weights.\n y, w = (None, None)\n assert len(X) == len(ids)\n\n time2 = time.time()\n log(\"TIMING: featurizing shard %d took %0.3f s\" %\n (shard_num, time2 - time1), self.verbose)\n yield X, y, w, ids\n\n return DiskDataset.create_dataset(\n shard_generator(), data_dir, self.tasks, verbose=self.verbose)\n\n def get_shards(self, input_files, shard_size):\n \"\"\"Stub for children classes.\"\"\"\n raise NotImplementedError\n\n def featurize_shard(self, shard):\n \"\"\"Featurizes a shard of an input dataframe.\"\"\"\n raise NotImplementedError\n\n\nclass CSVLoader(DataLoader):\n \"\"\"\n Handles loading of CSV files.\n \"\"\"\n\n def get_shards(self, input_files, shard_size, verbose=True):\n \"\"\"Defines a generator which returns data for each shard\"\"\"\n return load_csv_files(input_files, shard_size, verbose=verbose)\n\n def featurize_shard(self, shard):\n \"\"\"Featurizes a shard of an input dataframe.\"\"\"\n mol_features, valid_inds = featurize_smiles_df(shard, self.featurizer, field=self.smiles_field)\n if len(mol_features.shape) > 2:\n mol_features = np.squeeze(mol_features)\n if self.input_protein:\n proteins = featurize_protein(shard, field=self.protein_field, source_field=self.source_field,\n prot_seq_dict=self.prot_seq_dict)\n # Note: for ECFP with 1024 entries, mol_features is a (8192, 1024) sized array. \n return np.concatenate((mol_features, proteins), axis=1), valid_inds\n else:\n return mol_features, valid_inds\n\n\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport argparse\nimport os\nimport sys\nimport pwd\nimport pdb\nimport csv\nimport re\nimport math\nfrom collections import OrderedDict\n\n\ndef generate_data(input_csv, binarize=False, head_only=False, head_row_num=15000, \n limit_rows=False, limit_row_num=2400, prefix=\"metz_\", input_prot=True, output_csv=None):\n\n df = pd.read_csv(input_csv, header = 0, index_col=0, usecols=range(2, 186))\n if head_only:\n df = df.head(head_row_num)\n protList = list(df)[11:]\n molList = list(df.index)\n molList = [molName for molName in molList if molName == molName]\n prot_dict = {i: prot for (i, prot) in enumerate(protList)}\n pair_dict = {}\n smiles_to_indices = {}\n invalid_mols = set()\n duplicate_mols = set()\n #print(len(molList))\n interactions = []\n row_ind = 0\n for row in df.itertuples(): \n values = list(row)\n if values[0] != values[0]:\n continue\n smiles = values[0]\n values = values[12:]\n intxn = []\n if smiles not in smiles_to_indices:\n smiles_to_indices[smiles] = [row_ind]\n else:\n smiles_to_indices[smiles].append(row_ind)\n #pdb.set_trace() \n for i, element in enumerate(values):\n if element == element: #Not a NAN value\n matchObj = re.match('\\d', element)\n if not matchObj:\n value = np.nan\n else:\n value = float(element)\n prot = prot_dict[i] \n pair = (smiles, prot)\n if pair not in pair_dict:\n pair_dict[pair] = value\n else:\n duplicate_mols.add(smiles)\n if pair_dict[pair] != value: \n invalid_mols.add(smiles) \n else:\n value = np.nan\n intxn.append(value) \n interactions.append(intxn)\n row_ind += 1\n\n if binarize:\n interactions = np.array(interactions)\n interaction_bin = (interactions >= 7.6) * 1\n\n counter = 0\n dup_indices = {smiles: inds for (smiles, inds) in smiles_to_indices.items()\n if smiles in (duplicate_mols - invalid_mols)}\n\n # Stores the duplicate molecules which have been processed.\n processed_duplicate_mols = set()\n\n with open(output_csv, 'w', newline='') as csvfile:\n fieldnames = ['smiles']\n if input_prot:\n fieldnames = ['metz'] + fieldnames + ['proteinName', 'protein_dataset']\n if binarize:\n fieldnames = ['metz_bin'] + fieldnames\n else:\n tasks = [prefix + prot for prot in protList]\n fieldnames = tasks + fieldnames\n\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n \n for i, compound in enumerate(molList):\n if compound in invalid_mols:\n continue\n if compound in processed_duplicate_mols:\n continue\n output_dict = {'smiles': compound}\n mol_inds = [i]\n pair_set = set()\n if compound in duplicate_mols:\n mol_inds = dup_indices[compound]\n processed_duplicate_mols.add(compound)\n\n if input_prot: \n for j in mol_inds: \n for k, protein in prot_dict.items(): \n intxn_value = interactions[j][k]\n \n if intxn_value != intxn_value:\n continue\n if len(mol_inds) > 1:\n pair = (compound, protein)\n if pair in pair_set:\n counter += 1\n continue\n pair_set.add(pair) \n output_dict.update({'metz': intxn_value, 'proteinName': protein, 'protein_dataset': 'metz'}) \n writer.writerow(output_dict)\n\n else:\n for j in mol_inds: \n for k, protein in prot_dict.items():\n intxn_value = interactions[j][k]\n if intxn_value != intxn_value:\n intxn_value = '' \n task_name = fieldnames[k]\n if len(mol_inds) > 1:\n pair = (compound, protein)\n if pair in pair_set:\n counter += 1 \n if intxn_value == '':\n continue \n if output_dict[task_name] != '':\n assert output_dict[task_name] == intxn_value \n \n pair_set.add(pair)\n output_dict[task_name] = intxn_value\n\n writer.writerow(output_dict) \n print(\"counter: \", str(counter)) \n\n\ndef filter_data(input_file, filter_threshold=1, output_csv=None):\n df = pd.read_csv(input_file, header = 0, index_col=False)\n headers = list(df) \n finished = False\n while not finished:\n # Each row of the df corresponds to a molecule, each column corresponds to a protein.\n is_not_null_df = df.notnull()\n # Sum the columns first.\n col_sum_nonnull_entries = is_not_null_df.sum()\n deleted_column_names = []\n if any(col_sum_nonnull_entries <= filter_threshold):\n col_name_to_nonnull_num = OrderedDict(col_sum_nonnull_entries)\n for col_name, nonnull_num in col_name_to_nonnull_num.items():\n if nonnull_num > filter_threshold:\n continue\n deleted_column_names.append(col_name)\n df = df.drop(deleted_column_names, axis=1)\n is_not_null_df = is_not_null_df.drop(deleted_column_names, axis=1)\n print(\"deleted column number: \", len(deleted_column_names))\n\n # Then sum the rows.\n row_sum_nonnull_entries = is_not_null_df.sum(axis=1)\n deleted_row_inds = []\n if any(row_sum_nonnull_entries <= filter_threshold + 1):\n row_ind_to_nonnull_num = OrderedDict(row_sum_nonnull_entries)\n for row_ind, nonnull_num in row_ind_to_nonnull_num.items():\n if nonnull_num > filter_threshold + 1:\n continue\n deleted_row_inds.append(row_ind)\n df = df.drop(deleted_row_inds)\n is_not_null_df = is_not_null_df.drop(deleted_row_inds)\n print(\"deleted row number: \", len(deleted_row_inds))\n\n col_sum_nonnull_entries = is_not_null_df.sum()\n if all(col_sum_nonnull_entries > filter_threshold):\n finished = True\n\n # Output.\n df.to_csv(output_csv, index=False)\n \n\nif __name__ == '__main__':\n # generate_data('Metz_interaction.csv', input_prot=False, output_csv='restructured_no_prot_unfiltered.csv')\n filter_data('restructured_no_prot_unfiltered.csv', filter_threshold=1, output_csv='restructured_no_prot.csv')"
] | [
[
"numpy.isnan",
"numpy.log10",
"numpy.array"
],
[
"numpy.squeeze",
"numpy.ones",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.array"
],
[
"numpy.array",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch | [
"f1d3ad0711ca6b606f05dfee3ed223edd0ea699f",
"f1d3ad0711ca6b606f05dfee3ed223edd0ea699f"
] | [
"faster_rcnn/datasets/imdb.py",
"faster_rcnn/datasets/pascal_voc2.py"
] | [
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport os\nimport os.path as osp\nimport PIL\nimport numpy as np\nimport scipy.sparse\n\nfrom ..utils.cython_bbox import bbox_overlaps\nfrom PIL import Image\n\n# TODO: make fast_rcnn irrelevant\n# >>>> obsolete, because it depends on sth outside of this project\nfrom ..fast_rcnn.config import cfg\n\n# <<<< obsolete\n\nROOT_DIR = osp.join(osp.dirname(__file__), '..', '..')\nMATLAB = 'matlab_r2013b'\n\n\nclass imdb(object):\n \"\"\"Image database.\"\"\"\n\n def __init__(self, name):\n self._name = name\n self._num_classes = 0\n self._classes = []\n self._image_index = []\n self._obj_proposer = 'selective_search'\n self._roidb = None\n print (self.default_roidb)\n self._roidb_handler = self.default_roidb\n # Use this dict for storing dataset specific config options\n self.config = {}\n\n @property\n def name(self):\n return self._name\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n def set_proposal_method(self, method):\n method = eval('self.' + method + '_roidb')\n self.roidb_handler = method\n\n @property\n def roidb(self):\n # A roidb is a list of dictionaries, each with the following keys:\n # boxes\n # gt_overlaps\n # gt_classes\n # flipped\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n @property\n def num_images(self):\n return len(self.image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def default_roidb(self):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0]\n for i in range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes,\n 'gt_overlaps': self.roidb[i]['gt_overlaps'],\n 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n\n if 'gt_ishard' in self.roidb[i] and 'dontcare_areas' in self.roidb[i]:\n entry['gt_ishard'] = self.roidb[i]['gt_ishard'].copy()\n dontcare_areas = self.roidb[i]['dontcare_areas'].copy()\n oldx1 = dontcare_areas[:, 0].copy()\n oldx2 = dontcare_areas[:, 2].copy()\n dontcare_areas[:, 0] = widths[i] - oldx2 - 1\n dontcare_areas[:, 2] = widths[i] - oldx1 - 1\n entry['dontcare_areas'] = dontcare_areas\n\n self.roidb.append(entry)\n\n self._image_index = self._image_index * 2\n\n def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n \"\"\"Evaluate detection proposal recall metrics.\n\n Returns:\n results: dictionary of results with keys\n 'ar': average recall\n 'recalls': vector recalls at each IoU overlap threshold\n 'thresholds': vector of IoU overlap thresholds\n 'gt_overlaps': vector of all ground-truth overlaps\n \"\"\"\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [[0 ** 2, 1e5 ** 2], # all\n [0 ** 2, 32 ** 2], # small\n [32 ** 2, 96 ** 2], # medium\n [96 ** 2, 1e5 ** 2], # large\n [96 ** 2, 128 ** 2], # 96-128\n [128 ** 2, 256 ** 2], # 128-256\n [256 ** 2, 512 ** 2], # 256-512\n [512 ** 2, 1e5 ** 2], # 512-inf\n ]\n assert areas.has_key(area), 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in range(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in range(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()\n gt_ovr = max_overlaps.max()\n assert (gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert (_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}\n\n def create_roidb_from_box_list(self, box_list, gt_roidb):\n assert len(box_list) == self.num_images, \\\n 'Number of boxes must match number of ground-truth images'\n roidb = []\n for i in range(self.num_images):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)\n\n if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:\n gt_boxes = gt_roidb[i]['boxes']\n gt_classes = gt_roidb[i]['gt_classes']\n gt_overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n argmaxes = gt_overlaps.argmax(axis=1)\n maxes = gt_overlaps.max(axis=1)\n I = np.where(maxes > 0)[0]\n overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n roidb.append({\n 'boxes': boxes,\n 'gt_classes': np.zeros((num_boxes,), dtype=np.int32),\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': np.zeros((num_boxes,), dtype=np.float32),\n })\n return roidb\n\n @staticmethod\n def merge_roidbs(a, b):\n assert len(a) == len(b)\n for i in range(len(a)):\n a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))\n a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],\n b[i]['gt_classes']))\n a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],\n b[i]['gt_overlaps']])\n a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],\n b[i]['seg_areas']))\n return a\n\n def competition_mode(self, on):\n \"\"\"Turn competition mode on or off.\"\"\"\n pass\n",
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport xml.dom.minidom as minidom\n\nimport os\nimport PIL\nimport numpy as np\nimport scipy.sparse\nimport subprocess\nimport _pickle as cPickle\nimport math\nimport glob\nimport uuid\nimport scipy.io as sio\nimport xml.etree.ElementTree as ET\n\nfrom .imdb import imdb\nfrom .imdb import ROOT_DIR\nfrom .imdb import MATLAB\n\nfrom ..utils.cython_bbox import bbox_overlaps\nfrom ..utils.boxes_grid import get_boxes_grid\n\n# TODO: make fast_rcnn irrelevant\n# >>>> obsolete, because it depends on sth outside of this project\nfrom ..fast_rcnn.config import cfg\nfrom ..rpn_msr.generate_anchors import generate_anchors\n# <<<< obsolete\n\nclass pascal_voc(imdb):\n def __init__(self, image_set, year, pascal_path=None):\n imdb.__init__(self, 'voc_' + year + '_' + image_set)\n self._year = year\n self._image_set = image_set\n self._pascal_path = self._get_default_path() if pascal_path is None \\\n else pascal_path\n self._data_path = os.path.join(self._pascal_path, 'VOCdevkit' + self._year, 'VOC' + self._year)\n self._classes = ('__background__', # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._image_ext = '.jpg'\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n if cfg.IS_RPN:\n self._roidb_handler = self.gt_roidb\n else:\n self._roidb_handler = self.region_proposal_roidb\n\n # num of subclasses\n self._num_subclasses = 240 + 1\n\n # load the mapping for subcalss to class\n filename = os.path.join(self._pascal_path, 'subcategory_exemplars', 'mapping.txt')\n assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)\n \n mapping = np.zeros(self._num_subclasses, dtype=np.int)\n with open(filename) as f:\n for line in f:\n words = line.split()\n subcls = int(words[0])\n mapping[subcls] = self._class_to_ind[words[1]]\n self._subclass_mapping = mapping\n\n # PASCAL specific config options\n self.config = {'cleanup' : True,\n 'use_salt' : True,\n 'top_k' : 2000}\n\n # statistics for computing recall\n self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)\n self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)\n self._num_boxes_proposal = 0\n\n assert os.path.exists(self._pascal_path), \\\n 'PASCAL path does not exist: {}'.format(self._pascal_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._pascal_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL VOC is expected to be installed.\n \"\"\"\n return os.path.join(ROOT_DIR, 'data', 'PASCAL')\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print( '{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_pascal_subcategory_exemplar_annotation(index)\n for index in self.image_index]\n\n if cfg.IS_RPN:\n # print( out recall\n for i in range(1, self.num_classes):\n print( '{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]))\n print( '{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]))\n print( '{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i])))\n\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print( 'wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n # print( 'Loading: {}'.format(filename)\n def get_data_from_tag(node, tag):\n return node.getElementsByTagName(tag)[0].childNodes[0].data\n\n with open(filename) as f:\n data = minidom.parseString(f.read())\n\n objs = data.getElementsByTagName('object')\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n # Make pixel indexes 0-based\n x1 = float(get_data_from_tag(obj, 'xmin')) - 1\n y1 = float(get_data_from_tag(obj, 'ymin')) - 1\n x2 = float(get_data_from_tag(obj, 'xmax')) - 1\n y2 = float(get_data_from_tag(obj, 'ymax')) - 1\n cls = self._class_to_ind[\n str(get_data_from_tag(obj, \"name\")).lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n gt_subclasses = np.zeros((num_objs), dtype=np.int32)\n gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)\n subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)\n subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)\n\n if cfg.IS_RPN:\n if cfg.IS_MULTISCALE:\n # compute overlaps between grid boxes and gt boxes in multi-scales\n # rescale the gt boxes\n boxes_all = np.zeros((0, 4), dtype=np.float32)\n for scale in cfg.TRAIN.SCALES:\n boxes_all = np.vstack((boxes_all, boxes * scale))\n gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))\n\n # compute grid boxes\n s = PIL.Image.open(self.image_path_from_index(index)).size\n image_height = s[1]\n image_width = s[0]\n boxes_grid, _, _ = get_boxes_grid(image_height, image_width)\n\n # compute overlap\n overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))\n \n # check how many gt boxes are covered by grids\n if num_objs != 0:\n index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))\n max_overlaps = overlaps_grid.max(axis = 0)\n fg_inds = []\n for k in range(1, self.num_classes):\n fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])\n index_covered = np.unique(index[fg_inds])\n\n for i in range(self.num_classes):\n self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])\n self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])\n else:\n assert len(cfg.TRAIN.SCALES_BASE) == 1\n scale = cfg.TRAIN.SCALES_BASE[0]\n feat_stride = 16\n # faster rcnn region proposal\n anchors = generate_anchors()\n num_anchors = anchors.shape[0]\n\n # image size\n s = PIL.Image.open(self.image_path_from_index(index)).size\n image_height = s[1]\n image_width = s[0]\n\n # height and width of the heatmap\n height = np.round((image_height * scale - 1) / 4.0 + 1)\n height = np.floor((height - 1) / 2 + 1 + 0.5)\n height = np.floor((height - 1) / 2 + 1 + 0.5)\n\n width = np.round((image_width * scale - 1) / 4.0 + 1)\n width = np.floor((width - 1) / 2.0 + 1 + 0.5)\n width = np.floor((width - 1) / 2.0 + 1 + 0.5)\n\n # gt boxes\n gt_boxes = boxes * scale\n\n # 1. Generate proposals from bbox deltas and shifted anchors\n shift_x = np.arange(0, width) * feat_stride\n shift_y = np.arange(0, height) * feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose()\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = num_anchors\n K = shifts.shape[0]\n all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n all_anchors = all_anchors.reshape((K * A, 4))\n\n # compute overlap\n overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))\n \n # check how many gt boxes are covered by anchors\n if num_objs != 0:\n max_overlaps = overlaps_grid.max(axis = 0)\n fg_inds = []\n for k in range(1, self.num_classes):\n fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])\n\n for i in range(self.num_classes):\n self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])\n self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])\n\n return {'boxes' : boxes,\n 'gt_classes': gt_classes,\n 'gt_subclasses': gt_subclasses,\n 'gt_subclasses_flipped': gt_subclasses_flipped,\n 'gt_overlaps' : overlaps,\n 'gt_subindexes': subindexes,\n 'gt_subindexes_flipped': subindexes_flipped,\n 'flipped' : False}\n\n\n def _load_pascal_subcategory_exemplar_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from txt file in the pascal subcategory exemplar format.\n \"\"\"\n if self._image_set == 'test':\n return self._load_pascal_annotation(index)\n\n filename = os.path.join(self._pascal_path, 'subcategory_exemplars', index + '.txt')\n assert os.path.exists(filename), \\\n 'Path does not exist: {}'.format(filename)\n\n # the annotation file contains flipped objects \n lines = []\n lines_flipped = []\n with open(filename) as f:\n for line in f:\n words = line.split()\n subcls = int(words[1])\n is_flip = int(words[2])\n if subcls != -1:\n if is_flip == 0:\n lines.append(line)\n else:\n lines_flipped.append(line)\n \n num_objs = len(lines)\n\n # store information of flipped objects\n assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'\n gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)\n \n for ix, line in enumerate(lines_flipped):\n words = line.split()\n subcls = int(words[1])\n gt_subclasses_flipped[ix] = subcls\n\n boxes = np.zeros((num_objs, 4), dtype=np.float32)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n gt_subclasses = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)\n subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)\n\n for ix, line in enumerate(lines):\n words = line.split()\n cls = self._class_to_ind[words[0]]\n subcls = int(words[1])\n # Make pixel indexes 0-based\n boxes[ix, :] = [float(n)-1 for n in words[3:7]]\n gt_classes[ix] = cls\n gt_subclasses[ix] = subcls\n overlaps[ix, cls] = 1.0\n subindexes[ix, cls] = subcls\n subindexes_flipped[ix, cls] = gt_subclasses_flipped[ix]\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n if cfg.IS_RPN:\n if cfg.IS_MULTISCALE:\n # compute overlaps between grid boxes and gt boxes in multi-scales\n # rescale the gt boxes\n boxes_all = np.zeros((0, 4), dtype=np.float32)\n for scale in cfg.TRAIN.SCALES:\n boxes_all = np.vstack((boxes_all, boxes * scale))\n gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))\n\n # compute grid boxes\n s = PIL.Image.open(self.image_path_from_index(index)).size\n image_height = s[1]\n image_width = s[0]\n boxes_grid, _, _ = get_boxes_grid(image_height, image_width)\n\n # compute overlap\n overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))\n \n # check how many gt boxes are covered by grids\n if num_objs != 0:\n index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))\n max_overlaps = overlaps_grid.max(axis = 0)\n fg_inds = []\n for k in range(1, self.num_classes):\n fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])\n index_covered = np.unique(index[fg_inds])\n\n for i in range(self.num_classes):\n self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])\n self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])\n else:\n assert len(cfg.TRAIN.SCALES_BASE) == 1\n scale = cfg.TRAIN.SCALES_BASE[0]\n feat_stride = 16\n # faster rcnn region proposal\n base_size = 16\n ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]\n scales = 2**np.arange(1, 6, 0.5)\n anchors = generate_anchors(base_size, ratios, scales)\n num_anchors = anchors.shape[0]\n\n # image size\n s = PIL.Image.open(self.image_path_from_index(index)).size\n image_height = s[1]\n image_width = s[0]\n\n # height and width of the heatmap\n height = np.round((image_height * scale - 1) / 4.0 + 1)\n height = np.floor((height - 1) / 2 + 1 + 0.5)\n height = np.floor((height - 1) / 2 + 1 + 0.5)\n\n width = np.round((image_width * scale - 1) / 4.0 + 1)\n width = np.floor((width - 1) / 2.0 + 1 + 0.5)\n width = np.floor((width - 1) / 2.0 + 1 + 0.5)\n\n # gt boxes\n gt_boxes = boxes * scale\n\n # 1. Generate proposals from bbox deltas and shifted anchors\n shift_x = np.arange(0, width) * feat_stride\n shift_y = np.arange(0, height) * feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose()\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = num_anchors\n K = shifts.shape[0]\n all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n all_anchors = all_anchors.reshape((K * A, 4))\n\n # compute overlap\n overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))\n \n # check how many gt boxes are covered by anchors\n if num_objs != 0:\n max_overlaps = overlaps_grid.max(axis = 0)\n fg_inds = []\n for k in range(1, self.num_classes):\n fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])\n\n for i in range(self.num_classes):\n self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])\n self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])\n\n return {'boxes' : boxes,\n 'gt_classes': gt_classes,\n 'gt_subclasses': gt_subclasses,\n 'gt_subclasses_flipped': gt_subclasses_flipped,\n 'gt_overlaps': overlaps,\n 'gt_subindexes': subindexes, \n 'gt_subindexes_flipped': subindexes_flipped, \n 'flipped' : False}\n\n def region_proposal_roidb(self):\n \"\"\"\n Return the database of regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_' + cfg.REGION_PROPOSAL + '_region_proposal_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print( '{} roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n if self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n\n print(( 'Loading region proposal network boxes...'))\n model = cfg.REGION_PROPOSAL\n rpn_roidb = self._load_rpn_roidb(gt_roidb, model)\n print( 'Region proposal network boxes loaded')\n roidb = imdb.merge_roidbs(rpn_roidb, gt_roidb)\n else:\n print( 'Loading region proposal network boxes...')\n model = cfg.REGION_PROPOSAL\n roidb = self._load_rpn_roidb(None, model)\n print( 'Region proposal network boxes loaded')\n\n print( '{} region proposals per image'.format(self._num_boxes_proposal / len(self.image_index)))\n\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print( 'wrote roidb to {}'.format(cache_file))\n\n return roidb\n\n def _load_rpn_roidb(self, gt_roidb, model):\n # set the prefix\n if self._image_set == 'test':\n prefix = model + '/testing'\n else:\n prefix = model + '/training'\n\n box_list = []\n for index in self.image_index:\n filename = os.path.join(self._pascal_path, 'region_proposals', prefix, index + '.txt')\n assert os.path.exists(filename), \\\n 'RPN data not found at: {}'.format(filename)\n raw_data = np.loadtxt(filename, dtype=float)\n if len(raw_data.shape) == 1:\n if raw_data.size == 0:\n raw_data = raw_data.reshape((0, 5))\n else:\n raw_data = raw_data.reshape((1, 5))\n\n x1 = raw_data[:, 0]\n y1 = raw_data[:, 1]\n x2 = raw_data[:, 2]\n y2 = raw_data[:, 3]\n score = raw_data[:, 4]\n inds = np.where((x2 > x1) & (y2 > y1))[0]\n raw_data = raw_data[inds,:4]\n self._num_boxes_proposal += raw_data.shape[0]\n box_list.append(raw_data)\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n\n def selective_search_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print( '{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = self._load_selective_search_roidb(None)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print( 'wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def _load_selective_search_roidb(self, gt_roidb):\n filename = os.path.abspath(os.path.join(self.cache_path, '..',\n 'selective_search_data',\n self.name + '.mat'))\n assert os.path.exists(filename), \\\n 'Selective search data not found at: {}'.format(filename)\n raw_data = sio.loadmat(filename)['boxes'].ravel()\n\n box_list = []\n for i in range(raw_data.shape[0]):\n box_list.append(raw_data[i][:, (1, 0, 3, 2)] - 1)\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def selective_search_IJCV_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n '{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.\n format(self.name, self.config['top_k']))\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print( '{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print( 'wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def _load_selective_search_IJCV_roidb(self, gt_roidb):\n IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',\n 'selective_search_IJCV_data',\n 'voc_' + self._year))\n assert os.path.exists(IJCV_path), \\\n 'Selective search IJCV data not found at: {}'.format(IJCV_path)\n\n top_k = self.config['top_k']\n box_list = []\n for i in range(self.num_images):\n filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')\n raw_data = sio.loadmat(filename)\n box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n\n def _write_voc_results_file(self, all_boxes):\n use_salt = self.config['use_salt']\n comp_id = 'comp4'\n if use_salt:\n comp_id += '-{}'.format(os.getpid())\n\n # VOCdevkit/results/VOC2007/Main/comp4-44503_det_test_aeroplane.txt\n path = os.path.join(self._pascal_path, 'VOCdevkit' + self._year, 'results', 'VOC' + self._year,\n 'Main', comp_id + '_')\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print( 'Writing {} VOC results file'.format(cls))\n filename = path + 'det_' + self._image_set + '_' + cls + '.txt'\n print( filename)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, 4],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n return comp_id\n\n def _do_matlab_eval(self, comp_id, output_dir='output'):\n rm_results = self.config['cleanup']\n\n path = os.path.join(os.path.dirname(__file__),\n 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',{:d}); quit;\"' \\\n .format(self._pascal_path + '/VOCdevkit' + self._year, comp_id,\n self._image_set, output_dir, int(rm_results))\n print(('Running:\\n{}'.format(cmd)))\n status = subprocess.call(cmd, shell=True)\n\n # evaluate detection results\n def evaluate_detections(self, all_boxes, output_dir):\n comp_id = self._write_voc_results_file(all_boxes)\n self._do_matlab_eval(comp_id, output_dir)\n\n def evaluate_proposals(self, all_boxes, output_dir):\n # for each image\n for im_ind, index in enumerate(self.image_index):\n filename = os.path.join(output_dir, index + '.txt')\n print( 'Writing PASCAL results to file ' + filename)\n with open(filename, 'wt') as f:\n # for each class\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n for k in range(dets.shape[0]):\n f.write('{:f} {:f} {:f} {:f} {:.32f}\\n'.format(\\\n dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))\n\n def evaluate_proposals_msr(self, all_boxes, output_dir):\n # for each image\n for im_ind, index in enumerate(self.image_index):\n filename = os.path.join(output_dir, index + '.txt')\n print( 'Writing PASCAL results to file ' + filename)\n with open(filename, 'wt') as f:\n dets = all_boxes[im_ind]\n if dets == []:\n continue\n for k in range(dets.shape[0]):\n f.write('{:f} {:f} {:f} {:f} {:.32f}\\n'.format(dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))\n\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\nif __name__ == '__main__':\n d = pascal_voc('trainval', '2007')\n res = d.roidb\n from IPython import embed; embed()\n"
] | [
[
"numpy.hstack",
"numpy.arange",
"numpy.sort",
"numpy.zeros_like",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
],
[
"numpy.unique",
"numpy.arange",
"numpy.vstack",
"scipy.io.loadmat",
"numpy.round",
"numpy.floor",
"numpy.meshgrid",
"numpy.zeros",
"numpy.where",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
vberthiaume/vblandr | [
"dbd139e7b6172b9dbc97707ff4874bc398de7aaa"
] | [
"mnist/mainMnist.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Trains and Evaluates the MNIST network using a feed dictionary.\"\"\"\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport math\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n\nimport gzip\nimport os\nimport tempfile\n\nimport numpy\n\n# Basic model parameters as external flags.\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_float ('learning_rate', 0.01, 'Initial learning rate.')\nflags.DEFINE_integer('max_steps', 2000, 'Number of steps to run trainer.')\nflags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')\nflags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')\nflags.DEFINE_integer('batch_size', 100, 'Batch size. Must divide evenly into the dataset sizes.')\nflags.DEFINE_string ('train_dir', 'data', 'Directory to put the training data.')\nflags.DEFINE_boolean('fake_data', False, 'If true, uses fake data for unit testing.')\n\n# The MNIST dataset has 10 classes, representing the digits 0 through 9.\nNUM_CLASSES = 10\n\n# The MNIST images are always 28x28 pixels.\nIMAGE_SIZE = 28\nIMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE\n\n\ndef run_training():\n \"\"\"Train MNIST for a number of steps.\"\"\"\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n data_sets = read_data_sets(FLAGS.train_dir, FLAGS.fake_data)\n\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)\n # Build a Graph that computes predictions from the inference model.\n logits = inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)\n # Add to the Graph the Ops for loss calculation.\n loss = loss_funct(logits, labels_placeholder)\n # Add to the Graph the Ops that calculate and apply gradients.\n train_op = training(loss, FLAGS.learning_rate)\n # Add the Op to compare the logits to the labels during evaluation.\n eval_correct = evaluation(logits, labels_placeholder)\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n # Add the variable initializer Op.\n init = tf.initialize_all_variables()\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n # Create a session for running Ops on the Graph.\n sess = tf.Session()\n # Instantiate a SummaryWriter to output summaries and the Graph.\n summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)\n # Run the Op to initialize the variables.\n sess.run(init)\n # training loop.\n for step in xrange(FLAGS.max_steps):\n start_time = time.time()\n # Fill a feed dictionary with the actual set of images and labels\n # for this particular training step.\n feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder)\n # Run one step of the model. The return values are the activations\n # from the `train_op` (which is discarded) and the `loss` Op. To\n # inspect the values of your Ops or variables, you may include them\n # in the list passed to sess.run() and the value tensors will be\n # returned in the tuple from the call.\n _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)\n duration = time.time() - start_time\n # Write the summaries and print an overview fairly often.\n if step % 100 == 0:\n # Print status to stdout.\n print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))\n # Update the events file.\n summary_str = sess.run(summary_op, feed_dict=feed_dict)\n summary_writer.add_summary(summary_str, step)\n summary_writer.flush()\n\n # Save a checkpoint and evaluate the model periodically.\n if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:\n saver.save(sess, FLAGS.train_dir, global_step=step)\n # Evaluate against the training set.\n print('Training Data Eval:')\n do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train)\n # Evaluate against the validation set.\n print('Validation Data Eval:')\n do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation)\n # Evaluate against the test set.\n print('Test Data Eval:')\n do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)\n\ndef placeholder_inputs(batch_size):\n \"\"\"Generate placeholder variables to represent the input tensors.\n\n These placeholders are used as inputs by the rest of the model building\n code and will be fed from the downloaded data in the .run() loop, below.\n\n Args:\n batch_size: The batch size will be baked into both placeholders.\n\n Returns:\n images_placeholder: Images placeholder.\n labels_placeholder: Labels placeholder.\n \"\"\"\n # Note that the shapes of the placeholders match the shapes of the full\n # image and label tensors, except the first dimension is now batch_size\n # rather than the full size of the train or test data sets.\n images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, IMAGE_PIXELS))\n labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))\n return images_placeholder, labels_placeholder\n\ndef inference(images, hidden1_units, hidden2_units):\n #Build the MNIST model up to where it may be used for inference.\n\n # Hidden 1\n with tf.name_scope('hidden1'):\n weights = tf.Variable(tf.truncated_normal([IMAGE_PIXELS, hidden1_units], stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))), name='weights')\n biases = tf.Variable(tf.zeros([hidden1_units]), name='biases')\n hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)\n # Hidden 2\n with tf.name_scope('hidden2'):\n weights = tf.Variable(tf.truncated_normal([hidden1_units, hidden2_units], stddev=1.0 / math.sqrt(float(hidden1_units))), name='weights')\n biases = tf.Variable(tf.zeros([hidden2_units]), name='biases')\n hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)\n # Linear\n with tf.name_scope('softmax_linear'):\n weights = tf.Variable(tf.truncated_normal([hidden2_units, NUM_CLASSES], stddev=1.0 / math.sqrt(float(hidden2_units))), name='weights')\n biases = tf.Variable(tf.zeros([NUM_CLASSES]), name='biases')\n logits = tf.matmul(hidden2, weights) + biases\n return logits\n\ndef loss_funct(logits, labels):\n labels = tf.to_int64(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy')\n loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n return loss\n\ndef fill_feed_dict(data_set, images_pl, labels_pl):\n \"\"\"Fills the feed_dict for training the given step.\n\n A feed_dict takes the form of:\n feed_dict = {\n <placeholder>: <tensor of values to be passed for placeholder>,\n ....\n }\n\n Args:\n data_set: The set of images and labels, from input_data.read_data_sets()\n images_pl: The images placeholder, from placeholder_inputs().\n labels_pl: The labels placeholder, from placeholder_inputs().\n\n Returns:\n feed_dict: The feed dictionary mapping from placeholders to values.\n \"\"\"\n # Create the feed_dict for the placeholders filled with the next `batch size ` examples.\n images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size, FLAGS.fake_data)\n feed_dict = { images_pl: images_feed, labels_pl: labels_feed}\n return feed_dict\n\ndef do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set):\n \"\"\"Runs one evaluation against the full epoch of data.\n\n Args:\n sess: The session in which the model has been trained.\n eval_correct: The Tensor that returns the number of correct predictions.\n images_placeholder: The images placeholder.\n labels_placeholder: The labels placeholder.\n data_set: The set of images and labels to evaluate, from\n input_data.read_data_sets().\n \"\"\"\n # And run one epoch of eval.\n true_count = 0 # Counts the number of correct predictions.\n steps_per_epoch = data_set.num_examples // FLAGS.batch_size\n num_examples = steps_per_epoch * FLAGS.batch_size\n for step in xrange(steps_per_epoch):\n feed_dict = fill_feed_dict(data_set, images_placeholder, labels_placeholder)\n true_count += sess.run(eval_correct, feed_dict=feed_dict)\n precision = true_count / num_examples\n print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' % (num_examples, true_count, precision))\n\ndef training(loss, learning_rate):\n \"\"\"Sets up the training Ops. Creates a summarizer to track the loss over time in TensorBoard. Creates an optimizer and applies the gradients \n to all trainable variables. The Op returned by this function is what must be passed to the`sess.run()` call to cause the model to train.\n Args:\n loss: Loss tensor, from loss().\n learning_rate: The learning rate to use for gradient descent.\n Returns:\n train_op: The Op for training.\n \"\"\"\n # Add a scalar summary for the snapshot loss.\n tf.scalar_summary(loss.op.name, loss)\n # Create the gradient descent optimizer with the given learning rate.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n # Create a variable to track the global step.\n global_step = tf.Variable(0, name='global_step', trainable=False)\n # Use the optimizer to apply the gradients that minimize the loss\n # (and also increment the global step counter) as a single training step.\n train_op = optimizer.minimize(loss, global_step=global_step)\n return train_op\n\ndef evaluation(logits, labels):\n \"\"\"Evaluate the quality of the logits at predicting the label.\n Args:\n logits: Logits tensor, float - [batch_size, NUM_CLASSES].\n labels: Labels tensor, int32 - [batch_size], with values in the range [0, NUM_CLASSES).\n Returns:\n A scalar int32 tensor with the number of examples (out of batch_size)\n that were predicted correctly.\n \"\"\"\n # For a classifier model, we can use the in_top_k Op. It returns a bool tensor with shape [batch_size] that is true for\n # the examples where the label is in the top k (here k=1) of all logits for that example.\n correct = tf.nn.in_top_k(logits, labels, 1)\n # Return the number of true entries.\n return tf.reduce_sum(tf.cast(correct, tf.int32))\n\ndef main(_):\n run_training()\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.zeros",
"tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets",
"tensorflow.cast",
"tensorflow.to_int64",
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.merge_all_summaries",
"tensorflow.initialize_all_variables",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.nn.in_top_k",
"tensorflow.app.run",
"tensorflow.matmul",
"tensorflow.placeholder",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.reduce_mean",
"tensorflow.scalar_summary",
"tensorflow.train.SummaryWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mawanda-jun/NoLabels | [
"6861867ad5ab49fc7ae6f562977f60195f9ff216"
] | [
"Dataset/generate_hamming_set.py"
] | [
"\"\"\"\nThis file generates a set of most distant permutations from each other. This is only a support dataset: this will be useful\nwhen we will crop each image and reorder them in those distances.\nThis is a kind of mathematical distribution, no data are loaded here.\n\"\"\"\n\nimport numpy as np\nimport itertools\nimport os\nfrom scipy.spatial.distance import cdist\nimport h5py\nfrom config import conf\n\nNUM_CROPS = conf.numCrops\nNUM_PERMUTATIONS = conf.hammingSetSize\nSELECTION = conf.selectionMethod\nFILENAME = conf.hammingFileName\nFOLDER = conf.resources\n\n\ndef hamming_set(num_crops=NUM_CROPS, num_permutations=NUM_PERMUTATIONS, selection=SELECTION, filename=FILENAME):\n \"\"\"\n generate and save the hamming set\n :param num_crops: number of tiles from each image\n :param num_permutations: Number of permutations to select (i.e. number of classes for the pretext task)\n :param selection: Sample selected per iteration based on hamming distance: [max] highest; [mean] average\n :param filename: name of file\n :return a list of different permutations: [[perm1], [perm2], ...]. Each permutation is in form (10_elements)\n \"\"\"\n # create different permutation for num_crops (i.e: num_crops=9, P_hat[0]=(0, 1, 2, 4, 5, 3, 7, 8, 6, 9)\n P_hat = np.array(list(itertools.permutations(list(range(num_crops)), num_crops)))\n n = P_hat.shape[0] # number of total permutations (i.e num_crops=9 -> shape[0]=3628800\n\n j = np.random.randint(n)\n P = np.array(P_hat[j]).reshape((1, -1)) # reshape j array into [[1, 2, ...]]\n\n for _ in range(num_permutations)[1:]:\n # select the <num_permutations> max distant from each other of permutations\n P = np.concatenate([P, P_hat[j].reshape([1, -1])], axis=0) # concatenate as [[el1], [el2], [...]]\n P_hat = np.delete(P_hat, j, axis=0)\n # Takes the distance between the combination that are already present in P and those who are in P_hat.\n # Note that in P_hat there are no combinations of P.\n D = cdist(P, P_hat, metric='hamming').mean(axis=0).flatten()\n\n if selection == 'max':\n # select max distances between\n j = D.argmax()\n elif selection == 'mean':\n m = int(D.shape[0] / 2)\n S = D.argsort()\n j = S[np.random.randint(m - 10, m + 10)]\n\n os.makedirs(FOLDER, exist_ok=True)\n\n with h5py.File(os.path.join(os.getcwd(), FOLDER, filename + str(NUM_PERMUTATIONS) + '.h5'), 'w') as h5f:\n h5f.create_dataset('max_hamming_set', data=P)\n\n print('file created --> ' + FOLDER + filename + str(NUM_PERMUTATIONS) + '.h5')\n\n\nif __name__ == \"__main__\":\n hamming_set()\n\n"
] | [
[
"numpy.delete",
"numpy.array",
"scipy.spatial.distance.cdist",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Gnupur/scikit-learn | [
"513695f1e4f7613f988159333bfccc59358879dd"
] | [
"sklearn/ensemble/_voting.py"
] | [
"\"\"\"\nSoft Voting/Majority Rule classifier and Voting regressor.\n\nThis module contains:\n - A Soft Voting/Majority Rule classifier for classification estimators.\n - A Voting regressor for regression estimators.\n\"\"\"\n\n# Authors: Sebastian Raschka <[email protected]>,\n# Gilles Louppe <[email protected]>,\n# Ramil Nugmanov <[email protected]>\n# Mohamed Ali Jamaoui <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom abc import abstractmethod\n\nimport numpy as np\n\nfrom joblib import Parallel\n\nfrom ..base import ClassifierMixin\nfrom ..base import RegressorMixin\nfrom ..base import TransformerMixin\nfrom ..base import clone\nfrom ._base import _fit_single_estimator\nfrom ._base import _BaseHeterogeneousEnsemble\nfrom ..preprocessing import LabelEncoder\nfrom ..utils import Bunch\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.multiclass import check_classification_targets\nfrom ..utils.validation import column_or_1d\nfrom ..exceptions import NotFittedError\nfrom ..utils._estimator_html_repr import _VisualBlock\nfrom ..utils.fixes import delayed\n\n\nclass _BaseVoting(TransformerMixin, _BaseHeterogeneousEnsemble):\n \"\"\"Base class for voting.\n\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n def _log_message(self, name, idx, total):\n if not self.verbose:\n return None\n return \"(%d of %d) Processing %s\" % (idx, total, name)\n\n @property\n def _weights_not_none(self):\n \"\"\"Get the weights of not `None` estimators.\"\"\"\n if self.weights is None:\n return None\n return [w for est, w in zip(self.estimators, self.weights) if est[1] != \"drop\"]\n\n def _predict(self, X):\n \"\"\"Collect results from clf.predict calls.\"\"\"\n return np.asarray([est.predict(X) for est in self.estimators_]).T\n\n @abstractmethod\n def fit(self, X, y, sample_weight=None):\n \"\"\"Get common fit operations.\"\"\"\n names, clfs = self._validate_estimators()\n\n if self.weights is not None and len(self.weights) != len(self.estimators):\n raise ValueError(\n \"Number of `estimators` and weights must be equal\"\n \"; got %d weights, %d estimators\"\n % (len(self.weights), len(self.estimators))\n )\n\n self.estimators_ = Parallel(n_jobs=self.n_jobs)(\n delayed(_fit_single_estimator)(\n clone(clf),\n X,\n y,\n sample_weight=sample_weight,\n message_clsname=\"Voting\",\n message=self._log_message(names[idx], idx + 1, len(clfs)),\n )\n for idx, clf in enumerate(clfs)\n if clf != \"drop\"\n )\n\n self.named_estimators_ = Bunch()\n\n # Uses 'drop' as placeholder for dropped estimators\n est_iter = iter(self.estimators_)\n for name, est in self.estimators:\n current_est = est if est == \"drop\" else next(est_iter)\n self.named_estimators_[name] = current_est\n\n return self\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"Return class labels or probabilities for each estimator.\n\n Return predictions for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features)\n Input samples.\n\n y : ndarray of shape (n_samples,), default=None\n Target values (None for unsupervised transformations).\n\n **fit_params : dict\n Additional fit parameters.\n\n Returns\n -------\n X_new : ndarray array of shape (n_samples, n_features_new)\n Transformed array.\n \"\"\"\n return super().fit_transform(X, y, **fit_params)\n\n @property\n def n_features_in_(self):\n \"\"\"Number of features seen during :term:`fit`.\"\"\"\n # For consistency with other estimators we raise a AttributeError so\n # that hasattr() fails if the estimator isn't fitted.\n try:\n check_is_fitted(self)\n except NotFittedError as nfe:\n raise AttributeError(\n \"{} object has no n_features_in_ attribute.\".format(\n self.__class__.__name__\n )\n ) from nfe\n\n return self.estimators_[0].n_features_in_\n\n def _sk_visual_block_(self):\n names, estimators = zip(*self.estimators)\n return _VisualBlock(\"parallel\", estimators, names=names)\n\n def _more_tags(self):\n return {\"preserves_dtype\": []}\n\n\nclass VotingClassifier(ClassifierMixin, _BaseVoting):\n \"\"\"Soft Voting/Majority Rule classifier for unfitted estimators.\n\n Read more in the :ref:`User Guide <voting_classifier>`.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n estimators : list of (str, estimator) tuples\n Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones\n of those original estimators that will be stored in the class attribute\n ``self.estimators_``. An estimator can be set to ``'drop'``\n using ``set_params``.\n\n .. versionchanged:: 0.21\n ``'drop'`` is accepted. Using None was deprecated in 0.22 and\n support was removed in 0.24.\n\n voting : {'hard', 'soft'}, default='hard'\n If 'hard', uses predicted class labels for majority rule voting.\n Else if 'soft', predicts the class label based on the argmax of\n the sums of the predicted probabilities, which is recommended for\n an ensemble of well-calibrated classifiers.\n\n weights : array-like of shape (n_classifiers,), default=None\n Sequence of weights (`float` or `int`) to weight the occurrences of\n predicted class labels (`hard` voting) or class probabilities\n before averaging (`soft` voting). Uses uniform weights if `None`.\n\n n_jobs : int, default=None\n The number of jobs to run in parallel for ``fit``.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n .. versionadded:: 0.18\n\n flatten_transform : bool, default=True\n Affects shape of transform output only when voting='soft'\n If voting='soft' and flatten_transform=True, transform method returns\n matrix with shape (n_samples, n_classifiers * n_classes). If\n flatten_transform=False, it returns\n (n_classifiers, n_samples, n_classes).\n\n verbose : bool, default=False\n If True, the time elapsed while fitting will be printed as it\n is completed.\n\n .. versionadded:: 0.23\n\n Attributes\n ----------\n estimators_ : list of classifiers\n The collection of fitted sub-estimators as defined in ``estimators``\n that are not 'drop'.\n\n named_estimators_ : :class:`~sklearn.utils.Bunch`\n Attribute to access any fitted sub-estimators by name.\n\n .. versionadded:: 0.20\n\n le_ : :class:`~sklearn.preprocessing.LabelEncoder`\n Transformer used to encode the labels during fit and decode during\n prediction.\n\n classes_ : ndarray of shape (n_classes,)\n The classes labels.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`. Only defined if the\n underlying classifier exposes such an attribute when fit.\n\n .. versionadded:: 0.24\n\n See Also\n --------\n VotingRegressor : Prediction voting regressor.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.linear_model import LogisticRegression\n >>> from sklearn.naive_bayes import GaussianNB\n >>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier\n >>> clf1 = LogisticRegression(multi_class='multinomial', random_state=1)\n >>> clf2 = RandomForestClassifier(n_estimators=50, random_state=1)\n >>> clf3 = GaussianNB()\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> y = np.array([1, 1, 1, 2, 2, 2])\n >>> eclf1 = VotingClassifier(estimators=[\n ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')\n >>> eclf1 = eclf1.fit(X, y)\n >>> print(eclf1.predict(X))\n [1 1 1 2 2 2]\n >>> np.array_equal(eclf1.named_estimators_.lr.predict(X),\n ... eclf1.named_estimators_['lr'].predict(X))\n True\n >>> eclf2 = VotingClassifier(estimators=[\n ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],\n ... voting='soft')\n >>> eclf2 = eclf2.fit(X, y)\n >>> print(eclf2.predict(X))\n [1 1 1 2 2 2]\n >>> eclf3 = VotingClassifier(estimators=[\n ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],\n ... voting='soft', weights=[2,1,1],\n ... flatten_transform=True)\n >>> eclf3 = eclf3.fit(X, y)\n >>> print(eclf3.predict(X))\n [1 1 1 2 2 2]\n >>> print(eclf3.transform(X).shape)\n (6, 6)\n \"\"\"\n\n def __init__(\n self,\n estimators,\n *,\n voting=\"hard\",\n weights=None,\n n_jobs=None,\n flatten_transform=True,\n verbose=False,\n ):\n super().__init__(estimators=estimators)\n self.voting = voting\n self.weights = weights\n self.n_jobs = n_jobs\n self.flatten_transform = flatten_transform\n self.verbose = verbose\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n .. versionadded:: 0.18\n\n Returns\n -------\n self : object\n Returns the instance itself.\n\n \"\"\"\n check_classification_targets(y)\n if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:\n raise NotImplementedError(\n \"Multilabel and multi-output classification is not supported.\"\n )\n\n if self.voting not in (\"soft\", \"hard\"):\n raise ValueError(\n \"Voting must be 'soft' or 'hard'; got (voting=%r)\" % self.voting\n )\n\n self.le_ = LabelEncoder().fit(y)\n self.classes_ = self.le_.classes_\n transformed_y = self.le_.transform(y)\n\n return super().fit(X, transformed_y, sample_weight)\n\n def predict(self, X):\n \"\"\"Predict class labels for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n maj : array-like of shape (n_samples,)\n Predicted class labels.\n \"\"\"\n check_is_fitted(self)\n if self.voting == \"soft\":\n maj = np.argmax(self.predict_proba(X), axis=1)\n\n else: # 'hard' voting\n predictions = self._predict(X)\n maj = np.apply_along_axis(\n lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)),\n axis=1,\n arr=predictions,\n )\n\n maj = self.le_.inverse_transform(maj)\n\n return maj\n\n def _collect_probas(self, X):\n \"\"\"Collect results from clf.predict calls.\"\"\"\n return np.asarray([clf.predict_proba(X) for clf in self.estimators_])\n\n def _predict_proba(self, X):\n \"\"\"Predict class probabilities for X in 'soft' voting.\"\"\"\n check_is_fitted(self)\n avg = np.average(\n self._collect_probas(X), axis=0, weights=self._weights_not_none\n )\n return avg\n\n @property\n def predict_proba(self):\n \"\"\"Compute probabilities of possible outcomes for samples in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n avg : array-like of shape (n_samples, n_classes)\n Weighted average probability for each class per sample.\n \"\"\"\n if self.voting == \"hard\":\n raise AttributeError(\n \"predict_proba is not available when voting=%r\" % self.voting\n )\n return self._predict_proba\n\n def transform(self, X):\n \"\"\"Return class labels or probabilities for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n\n Returns\n -------\n probabilities_or_labels\n If `voting='soft'` and `flatten_transform=True`:\n returns ndarray of shape (n_classifiers, n_samples *\n n_classes), being class probabilities calculated by each\n classifier.\n If `voting='soft' and `flatten_transform=False`:\n ndarray of shape (n_classifiers, n_samples, n_classes)\n If `voting='hard'`:\n ndarray of shape (n_samples, n_classifiers), being\n class labels predicted by each classifier.\n \"\"\"\n check_is_fitted(self)\n\n if self.voting == \"soft\":\n probas = self._collect_probas(X)\n if not self.flatten_transform:\n return probas\n return np.hstack(probas)\n\n else:\n return self._predict(X)\n\n\nclass VotingRegressor(RegressorMixin, _BaseVoting):\n \"\"\"Prediction voting regressor for unfitted estimators.\n\n A voting regressor is an ensemble meta-estimator that fits several base\n regressors, each on the whole dataset. Then it averages the individual\n predictions to form a final prediction.\n\n Read more in the :ref:`User Guide <voting_regressor>`.\n\n .. versionadded:: 0.21\n\n Parameters\n ----------\n estimators : list of (str, estimator) tuples\n Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones\n of those original estimators that will be stored in the class attribute\n ``self.estimators_``. An estimator can be set to ``'drop'`` using\n ``set_params``.\n\n .. versionchanged:: 0.21\n ``'drop'`` is accepted. Using None was deprecated in 0.22 and\n support was removed in 0.24.\n\n weights : array-like of shape (n_regressors,), default=None\n Sequence of weights (`float` or `int`) to weight the occurrences of\n predicted values before averaging. Uses uniform weights if `None`.\n\n n_jobs : int, default=None\n The number of jobs to run in parallel for ``fit``.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n verbose : bool, default=False\n If True, the time elapsed while fitting will be printed as it\n is completed.\n\n .. versionadded:: 0.23\n\n Attributes\n ----------\n estimators_ : list of regressors\n The collection of fitted sub-estimators as defined in ``estimators``\n that are not 'drop'.\n\n named_estimators_ : :class:`~sklearn.utils.Bunch`\n Attribute to access any fitted sub-estimators by name.\n\n .. versionadded:: 0.20\n\n n_features_in_ : int\n Number of features seen during :term:`fit`. Only defined if the\n underlying regressor exposes such an attribute when fit.\n\n .. versionadded:: 0.24\n\n See Also\n --------\n VotingClassifier : Soft Voting/Majority Rule classifier.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.linear_model import LinearRegression\n >>> from sklearn.ensemble import RandomForestRegressor\n >>> from sklearn.ensemble import VotingRegressor\n >>> r1 = LinearRegression()\n >>> r2 = RandomForestRegressor(n_estimators=10, random_state=1)\n >>> X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]])\n >>> y = np.array([2, 6, 12, 20, 30, 42])\n >>> er = VotingRegressor([('lr', r1), ('rf', r2)])\n >>> print(er.fit(X, y).predict(X))\n [ 3.3 5.7 11.8 19.7 28. 40.3]\n \"\"\"\n\n def __init__(self, estimators, *, weights=None, n_jobs=None, verbose=False):\n super().__init__(estimators=estimators)\n self.weights = weights\n self.n_jobs = n_jobs\n self.verbose = verbose\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n y = column_or_1d(y, warn=True)\n return super().fit(X, y, sample_weight)\n\n def predict(self, X):\n \"\"\"Predict regression target for X.\n\n The predicted regression target of an input sample is computed as the\n mean predicted regression targets of the estimators in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted values.\n \"\"\"\n check_is_fitted(self)\n return np.average(self._predict(X), axis=1, weights=self._weights_not_none)\n\n def transform(self, X):\n \"\"\"Return predictions for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n\n Returns\n -------\n predictions : ndarray of shape (n_samples, n_classifiers)\n Values predicted by each regressor.\n \"\"\"\n check_is_fitted(self)\n return self._predict(X)\n"
] | [
[
"numpy.hstack",
"numpy.bincount"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andreasala98/pykeen | [
"205af6c2604b3882bf0adf275610bceb6ac53c0c"
] | [
"src/pykeen/triples/triples_numeric_literals_factory.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"Implementation of factory that create instances containing of triples and numeric literals.tsv.\"\"\"\n\nimport logging\nimport pathlib\nfrom typing import Any, Dict, Optional, TextIO, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom .triples_factory import TriplesFactory\nfrom .utils import load_triples\nfrom ..typing import EntityMapping, LabeledTriples, MappedTriples\n\n__all__ = [\n \"TriplesNumericLiteralsFactory\",\n]\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_matrix_of_literals(\n numeric_triples: np.array,\n entity_to_id: EntityMapping,\n) -> Tuple[np.ndarray, Dict[str, int]]:\n \"\"\"Create matrix of literals where each row corresponds to an entity and each column to a literal.\"\"\"\n data_relations = np.unique(np.ndarray.flatten(numeric_triples[:, 1:2]))\n data_rel_to_id: Dict[str, int] = {value: key for key, value in enumerate(data_relations)}\n # Prepare literal matrix, set every literal to zero, and afterwards fill in the corresponding value if available\n num_literals = np.zeros([len(entity_to_id), len(data_rel_to_id)], dtype=np.float32)\n\n # TODO vectorize code\n for h, r, lit in numeric_triples:\n try:\n # row define entity, and column the literal. Set the corresponding literal for the entity\n num_literals[entity_to_id[h], data_rel_to_id[r]] = lit\n except KeyError:\n logger.info(\"Either entity or relation to literal doesn't exist.\")\n continue\n\n return num_literals, data_rel_to_id\n\n\nclass TriplesNumericLiteralsFactory(TriplesFactory):\n \"\"\"Create multi-modal instances given the path to triples.\"\"\"\n\n def __init__(\n self,\n *,\n path: Union[None, str, pathlib.Path, TextIO] = None,\n triples: Optional[LabeledTriples] = None,\n path_to_numeric_triples: Union[None, str, pathlib.Path, TextIO] = None,\n numeric_triples: Optional[np.ndarray] = None,\n **kwargs,\n ) -> None:\n \"\"\"Initialize the multi-modal triples factory.\n\n :param path: The path to a 3-column TSV file with triples in it. If not specified,\n you should specify ``triples``.\n :param triples: A 3-column numpy array with triples in it. If not specified,\n you should specify ``path``\n :param path_to_numeric_triples: The path to a 3-column TSV file with triples and\n numeric. If not specified, you should specify ``numeric_triples``.\n :param numeric_triples: A 3-column numpy array with numeric triples in it. If not\n specified, you should specify ``path_to_numeric_triples``.\n \"\"\"\n if path is not None:\n base = TriplesFactory.from_path(path=path, **kwargs)\n elif triples is None:\n base = TriplesFactory(**kwargs)\n else:\n base = TriplesFactory.from_labeled_triples(triples=triples, **kwargs)\n super().__init__(\n entity_to_id=base.entity_to_id,\n relation_to_id=base.relation_to_id,\n mapped_triples=base.mapped_triples,\n create_inverse_triples=base.create_inverse_triples,\n )\n\n if path_to_numeric_triples is None and numeric_triples is None:\n raise ValueError(\"Must specify one of path_to_numeric_triples or numeric_triples\")\n elif path_to_numeric_triples is not None and numeric_triples is not None:\n raise ValueError(\"Must not specify both path_to_numeric_triples and numeric_triples\")\n elif path_to_numeric_triples is not None:\n self.numeric_triples = load_triples(path_to_numeric_triples)\n else:\n self.numeric_triples = numeric_triples\n\n assert self.entity_to_id is not None\n self.numeric_literals, self.literals_to_id = create_matrix_of_literals(\n numeric_triples=self.numeric_triples,\n entity_to_id=self.entity_to_id,\n )\n\n def get_numeric_literals_tensor(self) -> torch.FloatTensor:\n \"\"\"Return the numeric literals as a tensor.\"\"\"\n return torch.as_tensor(self.numeric_literals, dtype=torch.float32)\n\n def extra_repr(self) -> str: # noqa: D102\n return super().extra_repr() + (f\"num_literals={len(self.literals_to_id)}\")\n\n def clone_and_exchange_triples(\n self,\n mapped_triples: MappedTriples,\n extra_metadata: Optional[Dict[str, Any]] = None,\n keep_metadata: bool = True,\n create_inverse_triples: Optional[bool] = None,\n ) -> \"TriplesNumericLiteralsFactory\": # noqa: D102\n if create_inverse_triples is None:\n create_inverse_triples = self.create_inverse_triples\n return TriplesNumericLiteralsFactory(\n numeric_triples=self.numeric_triples,\n mapped_triples=mapped_triples,\n entity_to_id=self.entity_to_id,\n relation_to_id=self.relation_to_id,\n create_inverse_triples=create_inverse_triples,\n metadata={\n **(extra_metadata or {}),\n **(self.metadata if keep_metadata else {}), # type: ignore\n },\n )\n"
] | [
[
"numpy.ndarray.flatten",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Fanping/ai.example | [
"452a8c00e58a6c3a27ab78a2b4a30bdd054ef165",
"452a8c00e58a6c3a27ab78a2b4a30bdd054ef165"
] | [
"10.Face-Generated-With-GAN/face_generated_gan_model.py",
"02.Slim-Basic-Classifier/slim_basic_classifier_service.py"
] | [
"import tensorflow as tf\n\n\nclass FaceGANModel(object):\n def __init__(self, batch_size=64, learning_rate=1e-3):\n # 1. Define input.\n self.input_image = tf.placeholder(tf.float32, [batch_size, 40 * 55],\n name=\"input_image\")\n self.input_prior = tf.placeholder(tf.float32, [batch_size, 100],\n name=\"input_prior\")\n self.keep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")\n\n # 2. Define generator.\n generator_w1 = tf.Variable(tf.truncated_normal([100, 150], stddev=0.1),\n name=\"g_w1\", dtype=tf.float32)\n generator_b1 = tf.Variable(tf.zeros([150]), name=\"g_b1\",\n dtype=tf.float32)\n generator_layer1 = tf.nn.relu(\n tf.matmul(self.input_prior, generator_w1) + generator_b1)\n generator_w2 = tf.Variable(tf.truncated_normal([150, 300], stddev=0.1),\n name=\"g_w2\", dtype=tf.float32)\n generator_b2 = tf.Variable(tf.zeros([300]), name=\"g_b2\",\n dtype=tf.float32)\n generator_layer2 = tf.nn.relu(\n tf.matmul(generator_layer1, generator_w2) + generator_b2)\n generator_w3 = tf.Variable(\n tf.truncated_normal([300, 40 * 55], stddev=0.1),\n name=\"g_w3\", dtype=tf.float32)\n generator_b3 = tf.Variable(tf.zeros([40 * 55]), name=\"g_b3\",\n dtype=tf.float32)\n generator_layer3 = tf.matmul(generator_layer2,\n generator_w3) + generator_b3\n self.generator = tf.nn.tanh(generator_layer3)\n\n # 3. Define discriminator.\n x_in = tf.concat([self.input_image, self.generator], 0)\n discriminator_w1 = tf.Variable(\n tf.truncated_normal([40 * 55, 300], stddev=0.1),\n name=\"d_w1\", dtype=tf.float32)\n discriminator_b1 = tf.Variable(tf.zeros([300]), name=\"d_b1\",\n dtype=tf.float32)\n discriminator_layer1 = tf.nn.dropout(\n tf.nn.relu(tf.matmul(x_in, discriminator_w1) + discriminator_b1),\n self.keep_prob)\n discriminator_w2 = tf.Variable(\n tf.truncated_normal([300, 150], stddev=0.1),\n name=\"d_w2\", dtype=tf.float32)\n discriminator_b2 = tf.Variable(tf.zeros([150]), name=\"d_b2\",\n dtype=tf.float32)\n discriminator_layer2 = tf.nn.dropout(tf.nn.relu(\n tf.matmul(discriminator_layer1,\n discriminator_w2) + discriminator_b2), self.keep_prob)\n discriminator_w3 = tf.Variable(\n tf.truncated_normal([150, 1], stddev=0.1),\n name=\"d_w3\",\n dtype=tf.float32)\n discriminator_b3 = tf.Variable(tf.zeros([1]), name=\"d_b3\",\n dtype=tf.float32)\n discriminator_h3 = tf.matmul(discriminator_layer2,\n discriminator_w3) + discriminator_b3\n y_data = tf.nn.sigmoid(\n tf.slice(discriminator_h3, [0, 0], [batch_size, -1]))\n self.discriminator = tf.nn.sigmoid(\n tf.slice(discriminator_h3, [batch_size, 0], [-1, -1]))\n\n # 4.Define loss\n discriminator_loss = - (tf.log(y_data) + tf.log(1 - self.discriminator))\n generator_loss = - tf.log(self.discriminator)\n self.optimizer = tf.train.AdamOptimizer(learning_rate)\n self.discriminator_trainer = self.optimizer.minimize(discriminator_loss,\n var_list=[\n discriminator_w1,\n discriminator_b1,\n discriminator_w2,\n discriminator_b2,\n discriminator_w3,\n discriminator_b3])\n self.generator_trainer = self.optimizer.minimize(generator_loss,\n var_list=[generator_w1,\n generator_b1,\n generator_w2,\n generator_b2,\n generator_w3,\n generator_b3])\n",
"import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport progressbar\n\nfrom slim_basic_classifier_model import SlimBasicClassifierModel\n\n\nclass SlimBasicClassifierService(object):\n def __init__(self, input_size, output_size, learning_rate=0.001):\n self.model = SlimBasicClassifierModel(input_size, output_size)\n self.model.create(learning_rate)\n\n def train(self, train_x, train_y, epochs=1000):\n variables = tf.initialize_all_variables()\n with tf.Session() as sess:\n sess.run(variables)\n saver = tf.train.Saver(max_to_keep=None)\n with progressbar.ProgressBar(max_value=epochs) as bar:\n for epoch in range(epochs):\n avg_cost = 0.0\n length = len(train_x)\n for index in range(length):\n input_data = np.asarray([[train_x[index]]])\n label_data = np.asarray([train_y[index]])\n _, c = sess.run([self.model.optimizer, self.model.cost],\n feed_dict={\n self.model.input: input_data,\n self.model.output: label_data})\n avg_cost += c\n avg_cost = avg_cost / length\n plt.plot(epoch, avg_cost, 'bo')\n bar.update(epoch)\n plt.title(self.model.name + ' training line')\n plt.xlabel('Epoch')\n plt.ylabel('Cost')\n plt.savefig('epoch.png', dpi=200)\n print('Epoch:', '%04d' % (epoch + 1), 'final cost=',\n '{:.9f}'.format(avg_cost))\n saver.save(sess, 'model/' + self.model.name)\n\n def test(self, test_x, test_y):\n variables = tf.initialize_all_variables()\n with tf.Session() as sess:\n sess.run(variables)\n saver = tf.train.Saver(max_to_keep=None)\n last_ckpt_path = tf.train.latest_checkpoint('model/')\n if last_ckpt_path is not None:\n saver.restore(sess, last_ckpt_path)\n else:\n print('Not found the model.')\n return None\n return self.model.accuracy.eval({self.model.input: np.asarray(\n [[test_x]]),\n self.model.output: np.asarray([test_y])})\n\n def predict(self, data_x):\n variables = tf.initialize_all_variables()\n actual_results = []\n with tf.Session() as sess:\n sess.run(variables)\n saver = tf.train.Saver(max_to_keep=None)\n last_ckpt_path = tf.train.latest_checkpoint('model/')\n if last_ckpt_path is not None:\n saver.restore(sess, last_ckpt_path)\n else:\n print('Not found the model.')\n return None\n for x in data_x:\n y = sess.run([self.model.net],\n feed_dict={self.model.input: np.asarray(\n [[x]])})\n actual_results.append(y)\n return actual_results\n"
] | [
[
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.slice",
"tensorflow.placeholder",
"tensorflow.nn.tanh",
"tensorflow.log",
"tensorflow.train.AdamOptimizer"
],
[
"tensorflow.train.latest_checkpoint",
"matplotlib.pyplot.title",
"numpy.asarray",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.train.Saver",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Fngg/contact_tracing | [
"0df5d859fe7b89ac6539d409af8e6cec86f32137"
] | [
"src/service.py"
] | [
"'''\n流调查询\n'''\nfrom settings_class import settings_obj\nimport pandas as pd\nfrom util.logger import logger\nfrom sqlalchemy import desc\nfrom datetime import datetime,timedelta\n\n\ndef get_forward_backward_time(current_time, minutes):\n # 获取向前和向后的时间\n if isinstance(current_time,str):\n current = datetime.strptime(current_time, \"%Y-%m-%d %H:%M:%S\")\n else:\n current =current_time\n backward_time = current + timedelta(minutes=minutes)\n forward_time = current - timedelta(minutes=minutes)\n return forward_time, backward_time\n\n\ndef get_forward_contact(forward_time, current_time, location_condition_str, user_id, conn,table_obj,close_contact_record_num):\n '''\n 先向前查询,逻辑为 查询在向前时间到当前时间范围内,在该校区和该地点内的,并以时间倒序排序查询的限制close_contact_record_num\n :return:\n '''\n # 查(在限制的时间范围内)close_contact_record_num 条记录\n s = f\"conn.session.query({settings_obj.result_filed_names_str}).filter(table_obj.c[settings_obj.user_id_field_name]!=user_id, table_obj.c[settings_obj.time_field_name]>=forward_time, table_obj.c[settings_obj.time_field_name]<=current_time, {location_condition_str}).order_by(desc(table_obj.c[settings_obj.time_field_name])).limit(close_contact_record_num).all()\"\n records = eval(s)\n forward_results = [list(result) for result in records]\n return forward_results\n\n\ndef get_backward_contact(backward_time,current_time,location_condition_str,user_id,conn,table_obj,close_contact_record_num):\n '''\n 再向后查询,逻辑为 查询在当前时间到向后时间范围内,在该校区和该地点内的,并以时间正序排序查询的限制close_contact_record_num 条消费记录\n :param close_contact_record_num:\n :param backward_time:\n :param current_time:\n :param location_condition_str:\n :param user_id:\n :param conn:\n :return:\n '''\n # 先查(在限制的时间范围内))close_contact_record_num 条记录\n s =f\"conn.session.query({settings_obj.result_filed_names_str}).filter(table_obj.c[settings_obj.user_id_field_name]!=user_id, table_obj.c[settings_obj.time_field_name]>=current_time, table_obj.c[settings_obj.time_field_name]<=backward_time, {location_condition_str}).order_by(table_obj.c[settings_obj.time_field_name]).limit(close_contact_record_num).all()\"\n records = eval(s)\n backward_results = [list(result) for result in records]\n return backward_results\n\n\ndef get_backward_contacts(backward_time, current_time, location_condition_str, user_id, conn,table_obj, close_contact_record_num=settings_obj.close_contact_people_num, backward_result_df=None):\n if backward_result_df is None:\n backward_result_df = pd.DataFrame(columns=settings_obj.result_filed_names)\n backward_results = get_backward_contact(backward_time, current_time, location_condition_str, user_id, conn,table_obj, close_contact_record_num)\n if len(backward_results)>0:\n tem_df = pd.DataFrame(backward_results,columns=backward_result_df.columns)\n backward_result_df = pd.concat([backward_result_df, tem_df], ignore_index=True)\n backward_result_df.sort_values(by=settings_obj.time_field_name, inplace=True, ascending=True) # 升序\n if len(backward_results)>=close_contact_record_num:\n close_contact_record_num = settings_obj.close_contact_people_num - backward_result_df[settings_obj.user_id_field_name].nunique()\n if close_contact_record_num>0 and len(backward_result_df)>0:\n current_time = backward_result_df.loc[len(backward_result_df)-1,settings_obj.time_field_name]\n user_id = backward_result_df.loc[len(backward_result_df)-1,settings_obj.user_id_field_name]\n backward_result_df = get_backward_contacts(backward_time,current_time, location_condition_str, user_id, conn,table_obj,close_contact_record_num=close_contact_record_num,backward_result_df=backward_result_df)\n return backward_result_df\n\n\ndef get_forward_contacts(forward_time, current_time, location_condition_str, user_id, conn, table_obj, close_contact_record_num=settings_obj.close_contact_people_num, forward_result_df=None):\n '''\n 递归查询\n :param table_obj:\n :param forward_time:\n :param current_time:\n :param location_condition_str:\n :param user_id:\n :param conn:\n :param close_contact_record_num:\n :param forward_result_df:\n :return: 返回dataframe格式\n '''\n if forward_result_df is None:\n forward_result_df = pd.DataFrame(columns=settings_obj.result_filed_names)\n forward_results = get_forward_contact(forward_time, current_time, location_condition_str, user_id, conn,table_obj,close_contact_record_num)\n if len(forward_results)>0:\n tem_df = pd.DataFrame(forward_results, columns=forward_result_df.columns)\n forward_result_df = pd.concat([forward_result_df, tem_df], ignore_index=True)\n forward_result_df.sort_values(by=settings_obj.time_field_name, inplace=True, ascending=True) # 升序\n if len(forward_results)>=close_contact_record_num:\n # 这时候在forward_time, current_time时间范围内所有记录都已经被查询出\n close_contact_record_num = settings_obj.close_contact_people_num - forward_result_df[settings_obj.user_id_field_name].nunique()\n if close_contact_record_num>0 and len(forward_result_df)>0:\n current_time = forward_result_df.loc[0,settings_obj.time_field_name]\n user_id = forward_result_df.loc[0,settings_obj.user_id_field_name]\n forward_result_df = get_forward_contacts(forward_time,current_time, location_condition_str, user_id, conn, table_obj,close_contact_record_num=close_contact_record_num,forward_result_df=forward_result_df)\n return forward_result_df\n\n\ndef change_time_type(time,table_obj):\n '''\n 转换str_time的类型\n :param time: 字符串或者datetime两种类型\n :param table_obj:\n :return:\n '''\n if isinstance(time,datetime) or isinstance(time,str):\n if table_obj.c[settings_obj.time_field_name].type.python_type==datetime:\n if isinstance(time,str):\n return datetime.strptime(time,'%Y-%m-%d %H:%M:%S')\n else:\n if isinstance(time,datetime):\n return str(time)\n return time\n\n\ndef change_userid_type(user_id,table_obj):\n if table_obj.c[settings_obj.user_id_field_name].type.python_type==int:\n return int(user_id)\n return str(user_id)\n\n\ndef get_contact(user_id, conn, table_obj):\n # 针对user_id进行密接查询\n # 首先查询user_id在flow_tone_start_time和flow_tone_end_time时间内的消费记录\n # 因为user_id对应的消费记录也需要输出,所以查询时要查出包括location_filed_names、time_field_name、result_filed_names中的所有字段\n flow_tone_start_time = change_time_type(settings_obj.flow_tone_start_time, table_obj)\n flow_tone_end_time = change_time_type(settings_obj.flow_tone_end_time, table_obj)\n user_id = change_userid_type(user_id,table_obj)\n s = f\"conn.session.query({settings_obj.filed_names_str}).filter(table_obj.c[settings_obj.user_id_field_name]== user_id, table_obj.c[settings_obj.time_field_name]>=flow_tone_start_time, table_obj.c[settings_obj.time_field_name]<=flow_tone_end_time).order_by(table_obj.c[settings_obj.time_field_name]).all()\"\n records = eval(s)\n result_df = pd.DataFrame(columns=settings_obj.result_filed_names)\n for record in records:\n # 针对每一条消费记录,分别查询在该时间前后close_contact_time分钟内在该地点消费的相关记录,不超过多少人\n current_time = record[settings_obj.time_field_name]\n forward_time, backward_time = get_forward_backward_time(current_time, settings_obj.close_contact_time)\n forward_time = change_time_type(forward_time, table_obj)\n backward_time = change_time_type(backward_time, table_obj)\n current_time = change_time_type(current_time, table_obj)\n location_condition_list = []\n for location_filed_name in settings_obj.location_filed_names:\n location_condition = f\"table_obj.c['{location_filed_name}']=='{record[location_filed_name]}'\"\n location_condition_list.append(location_condition)\n location_condition_str = \", \".join(location_condition_list)\n # 先向前查询\n forward_results = get_forward_contacts(forward_time,current_time,location_condition_str,user_id,conn,table_obj)\n forward_results.drop_duplicates(subset=[settings_obj.user_id_field_name], keep ='last', inplace = True) # 去重\n # 先向后查询\n backward_results = get_backward_contacts(backward_time,current_time,location_condition_str,user_id,conn,table_obj)\n backward_results.drop_duplicates(subset=[settings_obj.user_id_field_name], keep ='first', inplace = True)\n # 放在一起\n result_df = pd.concat([result_df, forward_results], ignore_index=True)\n userid_result = [record[i] for i in settings_obj.result_filed_names]\n result_df.loc[len(result_df)] = userid_result # 记录user_id对应的消费记录\n result_df = pd.concat([result_df, backward_results], ignore_index=True)\n result_df[\"contact_\" + settings_obj.user_id_field_name] = user_id\n return result_df\n\n\ndef change_settings_obj():\n '''\n 将settings_obj 中的列表参数转为字符串\n :return:\n '''\n tmp_result_filed_names2 = settings_obj.result_filed_names.copy()\n tmp_result_filed_names2.append(settings_obj.time_field_name)\n tmp_result_filed_names2.extend(settings_obj.location_filed_names)\n tmp_result_filed_names2= [\"table_obj.c['\"+i+\"']\" for i in set(tmp_result_filed_names2)]\n filed_names_str = \", \".join(tmp_result_filed_names2)\n settings_obj.filed_names_str = filed_names_str\n tmp_result_filed_names3= [\"table_obj.c['\"+i+\"']\" for i in settings_obj.result_filed_names]\n result_filed_names_str = \", \".join(tmp_result_filed_names3)\n settings_obj.result_filed_names_str = result_filed_names_str\n\n\ndef flow_tone(user_ids, conn):\n # 定义输出的df\n tmp_result_filed_names = settings_obj.result_filed_names.copy()\n result_columns = tmp_result_filed_names.append(\"contact_\" + settings_obj.user_id_field_name)\n result_df = pd.DataFrame(columns=result_columns)\n change_settings_obj()\n # 获取数据库表的对象\n table_obj = conn.get_table_obj(settings_obj.table_name,settings_obj)\n for user_id in user_ids:\n # 针对每一条 user_id 进行密接查询\n logger.info(f\"针对 {settings_obj.user_id_field_name} 为 {user_id} 的人员进行密接查询\")\n sub_result_df = get_contact(user_id, conn, table_obj)\n result_df = pd.concat([result_df, sub_result_df], ignore_index=True)\n return result_df\n"
] | [
[
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
HsiuWen/FairMOT | [
"67c4fe4b5ce11f960251bd8d0cdfd37622194e29"
] | [
"src/lib/datasets/dataset/jde.py"
] | [
"import glob\nimport math\nimport os\nimport os.path as osp\nimport random\nimport time\nfrom collections import OrderedDict\n\nimport cv2\nimport json\nimport numpy as np\nimport torch\nimport copy\n\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import transforms as T\nfrom cython_bbox import bbox_overlaps as bbox_ious\nfrom opts import opts\nfrom utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian\nfrom utils.utils import xyxy2xywh, generate_anchors, xywh2xyxy, encode_delta\n\n\nclass LoadImages: # for inference\n def __init__(self, path, img_size=(1088, 608)):\n if os.path.isdir(path):\n image_format = ['.jpg', '.jpeg', '.png', '.tif']\n self.files = sorted(glob.glob('%s/*.*' % path))\n self.files = list(filter(lambda x: os.path.splitext(x)[1].lower() in image_format, self.files))\n elif os.path.isfile(path):\n self.files = [path]\n\n self.nF = len(self.files) # number of image files\n self.width = img_size[0]\n self.height = img_size[1]\n self.count = 0\n\n assert self.nF > 0, 'No images found in ' + path\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if self.count == self.nF:\n raise StopIteration\n img_path = self.files[self.count]\n\n # Read image\n img0 = cv2.imread(img_path) # BGR\n assert img0 is not None, 'Failed to load ' + img_path\n\n # Padded resize\n img, _, _, _ = letterbox(img0, height=self.height, width=self.width)\n\n # Normalize RGB\n img = img[:, :, ::-1].transpose(2, 0, 1)\n img = np.ascontiguousarray(img, dtype=np.float32)\n img /= 255.0\n\n # cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image\n return img_path, img, img0\n\n def __getitem__(self, idx):\n idx = idx % self.nF\n img_path = self.files[idx]\n\n # Read image\n img0 = cv2.imread(img_path) # BGR\n assert img0 is not None, 'Failed to load ' + img_path\n\n # Padded resize\n img, _, _, _ = letterbox(img0, height=self.height, width=self.width)\n\n # Normalize RGB\n img = img[:, :, ::-1].transpose(2, 0, 1)\n img = np.ascontiguousarray(img, dtype=np.float32)\n img /= 255.0\n\n return img_path, img, img0\n\n def __len__(self):\n return self.nF # number of files\n\n\nclass LoadVideo: # for inference\n def __init__(self, path, img_size=(1088, 608)):\n self.cap = cv2.VideoCapture(path)\n self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))\n self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n self.width = img_size[0]\n self.height = img_size[1]\n self.count = 0\n\n self.w, self.h = 1920, 1080\n print('Lenth of the video: {:d} frames'.format(self.vn))\n\n def get_size(self, vw, vh, dw, dh):\n wa, ha = float(dw) / vw, float(dh) / vh\n a = min(wa, ha)\n return int(vw * a), int(vh * a)\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if self.count == len(self):\n raise StopIteration\n # Read image\n res, img0 = self.cap.read() # BGR\n assert img0 is not None, 'Failed to load frame {:d}'.format(self.count)\n img0 = cv2.resize(img0, (self.w, self.h))\n\n # Padded resize\n img, _, _, _ = letterbox(img0, height=self.height, width=self.width)\n\n # Normalize RGB\n img = img[:, :, ::-1].transpose(2, 0, 1)\n img = np.ascontiguousarray(img, dtype=np.float32)\n img /= 255.0\n\n # cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image\n return self.count, img, img0\n\n def __len__(self):\n return self.vn # number of files\n\n\nclass LoadImagesAndLabels: # for training\n def __init__(self, path, img_size=(1088, 608), augment=False, transforms=None):\n with open(path, 'r') as file:\n self.img_files = file.readlines()\n self.img_files = [x.replace('\\n', '') for x in self.img_files]\n self.img_files = list(filter(lambda x: len(x) > 0, self.img_files))\n\n self.label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')\n for x in self.img_files]\n\n self.nF = len(self.img_files) # number of image files\n self.width = img_size[0]\n self.height = img_size[1]\n self.augment = augment\n self.transforms = transforms\n\n def __getitem__(self, files_index):\n img_path = self.img_files[files_index]\n label_path = self.label_files[files_index]\n return self.get_data(img_path, label_path)\n\n def get_data(self, img_path, label_path):\n height = self.height\n width = self.width\n img = cv2.imread(img_path) # BGR\n if img is None:\n raise ValueError('File corrupt {}'.format(img_path))\n augment_hsv = True\n if self.augment and augment_hsv:\n # SV augmentation by 50%\n fraction = 0.50\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n S = img_hsv[:, :, 1].astype(np.float32)\n V = img_hsv[:, :, 2].astype(np.float32)\n\n a = (random.random() * 2 - 1) * fraction + 1\n S *= a\n if a > 1:\n np.clip(S, a_min=0, a_max=255, out=S)\n\n a = (random.random() * 2 - 1) * fraction + 1\n V *= a\n if a > 1:\n np.clip(V, a_min=0, a_max=255, out=V)\n\n img_hsv[:, :, 1] = S.astype(np.uint8)\n img_hsv[:, :, 2] = V.astype(np.uint8)\n cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)\n\n h, w, _ = img.shape\n img, ratio, padw, padh = letterbox(img, height=height, width=width)\n\n # Load labels\n if os.path.isfile(label_path):\n labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)\n\n # Normalized xywh to pixel xyxy format\n labels = labels0.copy()\n labels[:, 2] = ratio * w * (labels0[:, 2] - labels0[:, 4] / 2) + padw\n labels[:, 3] = ratio * h * (labels0[:, 3] - labels0[:, 5] / 2) + padh\n labels[:, 4] = ratio * w * (labels0[:, 2] + labels0[:, 4] / 2) + padw\n labels[:, 5] = ratio * h * (labels0[:, 3] + labels0[:, 5] / 2) + padh\n else:\n labels = np.array([])\n\n # Augment image and labels\n if self.augment:\n img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.50, 1.20))\n\n plotFlag = False\n if plotFlag:\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n plt.figure(figsize=(50, 50))\n plt.imshow(img[:, :, ::-1])\n plt.plot(labels[:, [1, 3, 3, 1, 1]].T, labels[:, [2, 2, 4, 4, 2]].T, '.-')\n plt.axis('off')\n plt.savefig('test.jpg')\n time.sleep(10)\n\n nL = len(labels)\n if nL > 0:\n # convert xyxy to xywh\n labels[:, 2:6] = xyxy2xywh(labels[:, 2:6].copy()) # / height\n labels[:, 2] /= width\n labels[:, 3] /= height\n labels[:, 4] /= width\n labels[:, 5] /= height\n if self.augment:\n # random left-right flip\n lr_flip = True\n if lr_flip & (random.random() > 0.5):\n img = np.fliplr(img)\n if nL > 0:\n labels[:, 2] = 1 - labels[:, 2]\n\n img = np.ascontiguousarray(img[:, :, ::-1]) # BGR to RGB\n\n if self.transforms is not None:\n img = self.transforms(img)\n\n return img, labels, img_path, (h, w)\n\n def __len__(self):\n return self.nF # number of batches\n\n\ndef letterbox(img, height=608, width=1088,\n color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular\n shape = img.shape[:2] # shape = [height, width]\n ratio = min(float(height) / shape[0], float(width) / shape[1])\n new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]\n dw = (width - new_shape[0]) / 2 # width padding\n dh = (height - new_shape[1]) / 2 # height padding\n top, bottom = round(dh - 0.1), round(dh + 0.1)\n left, right = round(dw - 0.1), round(dw + 0.1)\n img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular\n return img, ratio, dw, dh\n\n\ndef random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),\n borderValue=(127.5, 127.5, 127.5)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))\n # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4\n\n border = 0 # width of added border (optional)\n height = img.shape[0]\n width = img.shape[1]\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.random() * (degrees[1] - degrees[0]) + degrees[0]\n # a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations\n s = random.random() * (scale[1] - scale[0]) + scale[0]\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)\n T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)\n\n M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!\n imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,\n borderValue=borderValue) # BGR order borderValue\n\n # Return warped points also\n if targets is not None:\n if len(targets) > 0:\n n = targets.shape[0]\n points = targets[:, 2:6].copy()\n area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])\n\n # warp points\n xy = np.ones((n * 4, 3))\n xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = (xy @ M.T)[:, :2].reshape(n, 8)\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # apply angle-based reduction\n radians = a * math.pi / 180\n reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5\n x = (xy[:, 2] + xy[:, 0]) / 2\n y = (xy[:, 3] + xy[:, 1]) / 2\n w = (xy[:, 2] - xy[:, 0]) * reduction\n h = (xy[:, 3] - xy[:, 1]) * reduction\n xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T\n\n # reject warped points outside of image\n #np.clip(xy[:, 0], 0, width, out=xy[:, 0])\n #np.clip(xy[:, 2], 0, width, out=xy[:, 2])\n #np.clip(xy[:, 1], 0, height, out=xy[:, 1])\n #np.clip(xy[:, 3], 0, height, out=xy[:, 3])\n w = xy[:, 2] - xy[:, 0]\n h = xy[:, 3] - xy[:, 1]\n area = w * h\n ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))\n i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)\n\n targets = targets[i]\n targets[:, 2:6] = xy[i]\n\n return imw, targets, M\n else:\n return imw\n\n\ndef collate_fn(batch):\n imgs, labels, paths, sizes = zip(*batch)\n batch_size = len(labels)\n imgs = torch.stack(imgs, 0)\n max_box_len = max([l.shape[0] for l in labels])\n labels = [torch.from_numpy(l) for l in labels]\n filled_labels = torch.zeros(batch_size, max_box_len, 6)\n labels_len = torch.zeros(batch_size)\n\n for i in range(batch_size):\n isize = labels[i].shape[0]\n if len(labels[i]) > 0:\n filled_labels[i, :isize, :] = labels[i]\n labels_len[i] = isize\n\n return imgs, filled_labels, paths, sizes, labels_len.unsqueeze(1)\n\n\nclass JointDataset(LoadImagesAndLabels): # for training\n default_resolution = [1088, 608]\n mean = None\n std = None\n num_classes = 1\n\n def __init__(self, opt, root, paths, img_size=(1088, 608), augment=False, transforms=None):\n self.opt = opt\n dataset_names = paths.keys()\n self.img_files = OrderedDict()\n self.label_files = OrderedDict()\n self.tid_num = OrderedDict()\n self.tid_start_index = OrderedDict()\n self.num_classes = 1 #TODO JointDataset limit the tracking to one class type only\n\n for ds, path in paths.items():\n with open(path, 'r') as file:\n self.img_files[ds] = file.readlines()\n self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]\n self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))\n\n self.label_files[ds] = [\n x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')\n for x in self.img_files[ds]]\n\n for ds, label_paths in self.label_files.items():\n max_index = -1\n print('Processing labels files ({}), please be patiend.'.format(len(label_paths)))\n every_10_percent = int(len(label_paths)/10)\n count = 0\n for lp in label_paths:\n count +=1\n if count % every_10_percent == 0:\n print('Finished {}0%'.format(int(count/every_10_percent)))\n lb = np.loadtxt(lp)\n if len(lb) < 1:\n continue\n if len(lb.shape) < 2:\n img_max = lb[1]\n else:\n img_max = np.max(lb[:, 1])\n if img_max > max_index:\n max_index = img_max\n self.tid_num[ds] = max_index + 1\n # For fast debugging of UA-DETRAC\n # self.tid_num[ds] = 342\n \n last_index = 0\n for i, (k, v) in enumerate(self.tid_num.items()):\n self.tid_start_index[k] = last_index\n last_index += v\n\n self.nID = int(last_index + 1)\n self.nds = [len(x) for x in self.img_files.values()]\n self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]\n self.nF = sum(self.nds)\n self.width = img_size[0]\n self.height = img_size[1]\n self.max_objs = opt.K\n self.augment = augment\n self.transforms = transforms\n\n print('=' * 80)\n print('dataset summary')\n print(self.tid_num)\n print('total # identities:', self.nID)\n print('start index')\n print(self.tid_start_index)\n print('=' * 80)\n\n def __getitem__(self, files_index):\n\n for i, c in enumerate(self.cds):\n if files_index >= c:\n ds = list(self.label_files.keys())[i]\n start_index = c\n\n img_path = self.img_files[ds][files_index - start_index]\n label_path = self.label_files[ds][files_index - start_index]\n\n imgs, labels, img_path, (input_h, input_w) = self.get_data(img_path, label_path)\n for i, _ in enumerate(labels):\n if labels[i, 1] > -1:\n labels[i, 1] += self.tid_start_index[ds]\n\n output_h = imgs.shape[1] // self.opt.down_ratio\n output_w = imgs.shape[2] // self.opt.down_ratio\n num_classes = self.num_classes\n num_objs = labels.shape[0]\n hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)\n if self.opt.ltrb:\n wh = np.zeros((self.max_objs, 4), dtype=np.float32)\n else:\n wh = np.zeros((self.max_objs, 2), dtype=np.float32)\n reg = np.zeros((self.max_objs, 2), dtype=np.float32)\n ind = np.zeros((self.max_objs, ), dtype=np.int64)\n reg_mask = np.zeros((self.max_objs, ), dtype=np.uint8)\n ids = np.zeros((self.max_objs, ), dtype=np.int64)\n bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32)\n\n draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian\n for k in range(num_objs):\n label = labels[k]\n bbox = label[2:]\n cls_id = int(label[0])\n bbox[[0, 2]] = bbox[[0, 2]] * output_w\n bbox[[1, 3]] = bbox[[1, 3]] * output_h\n bbox_amodal = copy.deepcopy(bbox)\n bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2.\n bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.\n bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]\n bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]\n bbox[0] = np.clip(bbox[0], 0, output_w - 1)\n bbox[1] = np.clip(bbox[1], 0, output_h - 1)\n h = bbox[3]\n w = bbox[2]\n\n bbox_xy = copy.deepcopy(bbox)\n bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2\n bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2\n bbox_xy[2] = bbox_xy[0] + bbox_xy[2]\n bbox_xy[3] = bbox_xy[1] + bbox_xy[3]\n\n if h > 0 and w > 0:\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius))\n radius = 6 if self.opt.mse_loss else radius\n #radius = max(1, int(radius)) if self.opt.mse_loss else radius\n ct = np.array(\n [bbox[0], bbox[1]], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n draw_gaussian(hm[cls_id], ct_int, radius)\n if self.opt.ltrb:\n wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \\\n bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]\n else:\n wh[k] = 1. * w, 1. * h\n ind[k] = ct_int[1] * output_w + ct_int[0]\n reg[k] = ct - ct_int\n reg_mask[k] = 1\n ids[k] = label[1]\n bbox_xys[k] = bbox_xy\n\n ret = {'input': imgs, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'reg': reg, 'ids': ids, 'bbox': bbox_xys}\n return ret\n\n\nclass DetDataset(LoadImagesAndLabels): # for training\n def __init__(self, root, paths, img_size=(1088, 608), augment=False, transforms=None):\n\n dataset_names = paths.keys()\n self.img_files = OrderedDict()\n self.label_files = OrderedDict()\n self.tid_num = OrderedDict()\n self.tid_start_index = OrderedDict()\n for ds, path in paths.items():\n with open(path, 'r') as file:\n self.img_files[ds] = file.readlines()\n self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]\n self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))\n\n self.label_files[ds] = [\n x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')\n for x in self.img_files[ds]]\n\n for ds, label_paths in self.label_files.items():\n max_index = -1\n for lp in label_paths:\n lb = np.loadtxt(lp)\n if len(lb) < 1:\n continue\n if len(lb.shape) < 2:\n img_max = lb[1]\n else:\n img_max = np.max(lb[:, 1])\n if img_max > max_index:\n max_index = img_max\n self.tid_num[ds] = max_index + 1\n\n last_index = 0\n for i, (k, v) in enumerate(self.tid_num.items()):\n self.tid_start_index[k] = last_index\n last_index += v\n\n self.nID = int(last_index + 1)\n self.nds = [len(x) for x in self.img_files.values()]\n self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]\n self.nF = sum(self.nds)\n self.width = img_size[0]\n self.height = img_size[1]\n self.augment = augment\n self.transforms = transforms\n\n print('=' * 80)\n print('dataset summary')\n print(self.tid_num)\n print('total # identities:', self.nID)\n print('start index')\n print(self.tid_start_index)\n print('=' * 80)\n\n def __getitem__(self, files_index):\n\n for i, c in enumerate(self.cds):\n if files_index >= c:\n ds = list(self.label_files.keys())[i]\n start_index = c\n\n img_path = self.img_files[ds][files_index - start_index]\n label_path = self.label_files[ds][files_index - start_index]\n if os.path.isfile(label_path):\n labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)\n\n imgs, labels, img_path, (h, w) = self.get_data(img_path, label_path)\n for i, _ in enumerate(labels):\n if labels[i, 1] > -1:\n labels[i, 1] += self.tid_start_index[ds]\n\n return imgs, labels0, img_path, (h, w)\n\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"torch.zeros",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.concatenate",
"numpy.clip",
"numpy.fliplr",
"numpy.eye",
"torch.from_numpy",
"matplotlib.pyplot.axis",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.ascontiguousarray",
"matplotlib.pyplot.savefig",
"torch.stack",
"numpy.array",
"numpy.maximum",
"matplotlib.use",
"numpy.ones",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
llichengtong/yx1 | [
"6619532e43799fea739a21fb14b999f7d1898fe9"
] | [
"h_RNN/Mnist.py"
] | [
"import time\r\nimport tflearn\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom h_RNN.RNN import RNNWrapper, Generator\r\nfrom h_RNN.SpRNN import SparseRNN\r\nfrom Util.Util import DataUtil\r\n\r\n\r\nclass MnistGenerator(Generator):\r\n def __init__(self, im=None, om=None, one_hot=True):\r\n super(MnistGenerator, self).__init__(im, om)\r\n self._x, self._y = DataUtil.get_dataset(\"mnist\", \"../_Data/mnist.txt\", quantized=True, one_hot=one_hot)\r\n self._x = self._x.reshape(-1, 28, 28)\r\n self._x_train, self._x_test = self._x[:1800], self._x[1800:]\r\n self._y_train, self._y_test = self._y[:1800], self._y[1800:]\r\n\r\n def gen(self, batch, test=False, **kwargs):\r\n if batch == 0:\r\n if test:\r\n return self._x_test, self._y_test\r\n return self._x_train, self._y_train\r\n batch = np.random.choice(len(self._x_train), batch)\r\n return self._x_train[batch], self._y_train[batch]\r\n\r\nif __name__ == '__main__':\r\n n_history = 3\r\n print(\"=\" * 60, \"\\n\" + \"Normal LSTM\", \"\\n\" + \"-\" * 60)\r\n generator = MnistGenerator()\r\n t = time.time()\r\n tf.reset_default_graph()\r\n rnn = RNNWrapper()\r\n rnn.fit(28, 10, generator, n_history=n_history, epoch=10, squeeze=True)\r\n print(\"Time Cost: {}\".format(time.time() - t))\r\n rnn.draw_err_logs()\r\n\r\n print(\"=\" * 60, \"\\n\" + \"Sparse LSTM\" + \"\\n\" + \"-\" * 60)\r\n generator = MnistGenerator(one_hot=False)\r\n t = time.time()\r\n tf.reset_default_graph()\r\n rnn = SparseRNN()\r\n rnn.fit(28, 10, generator, n_history=n_history, epoch=10)\r\n print(\"Time Cost: {}\".format(time.time() - t))\r\n rnn.draw_err_logs()\r\n\r\n print(\"=\" * 60, \"\\n\" + \"Tflearn\", \"\\n\" + \"-\" * 60)\r\n generator = MnistGenerator()\r\n t = time.time()\r\n tf.reset_default_graph()\r\n net = tflearn.input_data(shape=[None, 28, 28])\r\n net = tf.concat(tflearn.lstm(net, 128, return_seq=True)[-n_history:], axis=1)\r\n net = tflearn.fully_connected(net, 10, activation='softmax')\r\n net = tflearn.regression(net, optimizer='adam', batch_size=64,\r\n loss='categorical_crossentropy')\r\n model = tflearn.DNN(net, tensorboard_verbose=0)\r\n model.fit(*generator.gen(0), n_epoch=10, validation_set=generator.gen(0, True), show_metric=True)\r\n print(\"Time Cost: {}\".format(time.time() - t))\r\n"
] | [
[
"tensorflow.reset_default_graph"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
vamsigp/EVA5 | [
"e03603cbf41f8d18d2e0ac149ddc5718371a360e"
] | [
"trainer/trainer.py"
] | [
"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tqdm import tqdm\n\n\nclass Trainer():\n\n def __init__(self, model, device, train_loader, test_loader, optimizer, loss_func, lr_scheduler):\n self.is_last_epoch = False\n# self.train_losses = [] # detailed training loss\n self.test_losses = []\n# self.train_acc = [] # detailed training accuracy\n self.train_acc_total = [] # per epoch training accuracy\n self.train_loss_total = [] # per epoch train loss\n self.test_acc = []\n self.model = model\n self.device = device\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.optimizer = optimizer\n self.loss_func = loss_func\n self.lr_scheduler = lr_scheduler\n\n def train_model(self, lambda_l1, epochs=5):\n for epoch in range(epochs):\n print(\"\\nCurrent EPOCH:\", epoch)\n\n if self.lr_scheduler is not None:\n if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n print(\"Current EPOCH:\", epoch, \"last LR=\", self.lr_scheduler.get_last_lr(), \"LR = \", self.lr_scheduler.get_lr())\n else:\n print(\"Learning Rate(ReduceLROnPlateau) = \", self.optimizer.param_groups[0]['lr'])\n # https://discuss.pytorch.org/t/how-to-retrieve-learning-rate-from-reducelronplateau-scheduler/54234\n\n self.train(epoch, lambda_l1)\n self.is_last_epoch = epoch == epochs\n tst_metric = self.test()\n\n if self.lr_scheduler is not None:\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n val_loss = tst_metric # test_losses[-1]\n print(\"ReduceLROnPlateau, ReduceLROnPlateau::step(), val_loss\", val_loss)\n self.lr_scheduler.step(val_loss)\n\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.StepLR):\n self.lr_scheduler.step()\n\n\n return (self.train_loss_total, self.train_acc_total, self.test_losses, self.test_acc)\n\n# def get_detailed_train_stats(self):\n# return (self.train_losses, self.train_acc)\n\n def train(self, epoch, lambda_l1):\n self.model.train()\n pbar = tqdm(self.train_loader)\n correct = 0\n processed = 0\n loss = 0\n for batch_idx, (data, target) in enumerate(pbar):\n # get samples\n data, target = data.to(self.device), target.to(self.device)\n\n # Init\n self.optimizer.zero_grad()\n # In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes.\n # Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly.\n\n # Predict\n y_pred = self.model(data)\n\n # Calculate loss\n loss = self.loss_func(y_pred, target)\n\n # L2 loss\n\n # L1 loss\n loss_l1 = 0\n # lambda_l1 = 0.05\n if lambda_l1 > 0:\n for p in self.model.parameters():\n loss_l1 = loss_l1 + p.abs().sum()\n loss = loss + lambda_l1 * loss_l1\n\n# self.train_losses.append(loss)\n\n # Backpropagation\n loss.backward()\n self.optimizer.step()\n\n pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n processed += len(data)\n\n pbar.set_description(\n desc=f'Train set: Loss={loss.item()} Batch_id={batch_idx} Accuracy={100 * correct / processed:0.2f}')\n# self.train_acc.append(100 * correct / processed)\n\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.OneCycleLR):\n self.lr_scheduler.step()\n\n\n training_accuracy_perepoch = 100 * correct / processed\n self.train_acc_total.append(training_accuracy_perepoch)\n self.train_loss_total.append(loss)\n\n def test(self):\n self.model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in self.test_loader:\n data, target = data.to(self.device), target.to(self.device)\n output = self.model(data)\n test_loss += self.loss_func(output, target).item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n is_correct = pred.eq(target.view_as(pred))\n correct += is_correct.sum().item()\n\n test_loss /= len(self.test_loader.dataset)\n self.test_losses.append(test_loss)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(self.test_loader.dataset),\n 100. * correct / len(self.test_loader.dataset)))\n\n self.test_acc.append(100. * correct / len(self.test_loader.dataset))\n return test_loss\n\n def getValues(self):\n return (self.train_loss_total, self.train_acc_total, self.test_losses, self.test_acc)\n\n def get_misclassified(self):\n self.model.eval()\n misclassified_imgs = []\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in self.test_loader:\n data, target = data.to(self.device), target.to(self.device)\n output = self.model(data)\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n is_correct = pred.eq(target.view_as(pred))\n if True:\n misclassified_inds = (is_correct == 0).nonzero()[:, 0]\n for mis_ind in misclassified_inds:\n if len(misclassified_imgs) == 25:\n break\n misclassified_imgs.append({\n \"target\": target[mis_ind].cpu().numpy(),\n \"pred\": pred[mis_ind][0].cpu().numpy(),\n \"img\": data[mis_ind].cpu().numpy()\n })\n correct += is_correct.sum().item()\n\n return misclassified_imgs\n\n def classwise_acc(self, classes):\n class_correct = list(0. for i in range(10))\n class_total = list(0. for i in range(10))\n with torch.no_grad():\n for images, labels in self.test_loader:\n images, labels = images.to(self.device), labels.to(self.device)\n outputs = self.model(images)\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels).squeeze()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n # print class-wise test accuracies\n print()\n for i in range(10):\n print('Accuracy of %5s : %2d %%' % (\n classes[i], 100 * class_correct[i] / class_total[i]))\n print()\n"
] | [
[
"torch.no_grad",
"torch.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlantheBenign/Minecraft-Stronghold-Finder | [
"05d3eb89eaccd3620fa25cd4c828c907aecf2178"
] | [
"finder.py"
] | [
"import numpy as np\n\n#hitCircle calculates where the first stronghold generation ring starts, where the player would \"hit\" it, moving directly forward\n#and calculates the position to the second ender eye throw\ndef hitCircle(pX,pZ,angle):\n xHit = None\n yHit = None\n cos = np.cos(angle*np.pi/180)\n #if the stronghold is at the +X\n if cos >= 0:\n x = np.linspace(pX-10, 2700,2700)\n a = np.tan(angle*np.pi/180)\n b = pZ - pX * a\n y = a*x + b\n for i in range(len(x)):\n if x[i]*x[i] + y[i]*y[i] >= 1408*1408:\n xHit = x[i]\n yHit = y[i]\n break\n pos1 = (xHit,yHit)\n x2 = np.linspace(xHit, xHit+100,500)\n a2 = -1/np.tan(angle*np.pi/180)\n b2 = yHit - xHit * a2\n y2 = a2*x2 + b2\n for i in range(len(x2)):\n if abs(x2[i] - xHit)**2 + abs(y2[i] - yHit)**2 >= 42*42:\n xST = x2[i]\n yST = y2[i]\n pos2 = (xST,yST)\n #if the stronghold is at the -X\n else:\n x = np.linspace(pX+10, -2700,2700)\n a = np.tan(angle*np.pi/180)\n b = pZ - pX * a\n y = a*x + b\n for i in range(len(x)):\n if x[i]*x[i] + y[i]*y[i] >= 1408*1408:\n xHit = x[i]\n yHit = y[i]\n break\n pos1 = (xHit,yHit)\n x2 = np.linspace(xHit, xHit+100,500)\n a2 = -1/np.tan(angle*np.pi/180)\n b2 = yHit - xHit * a2\n y2 = a2*x2 + b2\n for i in range(len(x2)):\n if abs(x2[i] - xHit)**2 + abs(y2[i] - yHit)**2 >= 42*42:\n xST = x2[i]\n yST = y2[i]\n pos2 = (xST,yST)\n \n return (pos1,pos2)\n \ndef StrongholdCoords():\n #stabilishing the variables\n f3c0 = input()\n f3c0 = f3c0[42:]\n f3c0 = f3c0.split()\n px0 = float(f3c0[0]) \n pz0 = float(f3c0[2])\n angle0 = float(f3c0[3])%360\n\n #translating minecraft angles to daily life cartesian angles\n if angle0 >= 0:\n angle0 = (angle0+90)%360\n else:\n angle0 = (angle0-270)%360\n \n #distance from origin\n distOrigin = np.sqrt(px0*px0 + pz0*pz0)\n #print(\"You're this far from the Origin: \", distOrigin)\n \n if distOrigin >= 1400:\n print(\"Move 27 blocks perpendicularly to the Ender Eye flight direction and throw the second one. (27 blocks = 4 seconds sprinting)\")\n \n else:\n circlePoint, secThrowPoint = hitCircle(px0,pz0,angle0)\n print(\"Go to: \", secThrowPoint, \"\\nCircle starts at: \", circlePoint)\n \n #stabilishing the variables\n f3c1 = input()\n f3c1 = f3c1[42:]\n f3c1 = f3c1.split()\n px1 = float(f3c1[0])\n pz1 = float(f3c1[2])\n angle1 = float(f3c1[3])%360\n \n #translating minecraft angles to daily life cartesian angles \n if angle1 >= 0:\n angle1 = (angle1+90)%360\n else:\n angle1 = (angle1-270)%360\n \n #calculating stronghold position\n a0 = np.tan(angle0*np.pi/180)\n a1 = np.tan(angle1*np.pi/180)\n b0 = pz0 - px0 * a0\n b1 = pz1 - px1 * a1\n pxS = (b1 - b0)/(a0 - a1)\n pzS = pxS * a0 + b0\n \n #printing\n print(\"Stronghold is at: \", (pxS, pzS), \" GOOD LUCK :D\")\n"
] | [
[
"numpy.tan",
"numpy.sqrt",
"numpy.cos",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OliverScherf/mlir-emitc | [
"af6a34bee5563bf71a218a93139da0e25cd9b2a5"
] | [
"scripts/optimize_tf_dialect.py"
] | [
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Forked from https://github.com/google/iree\n\nimport argparse\n\nfrom tensorflow.python import pywrap_mlir # pylint: disable=no-name-in-module\n\n\ndef optimize(model_path: str, output_path: str):\n pass_pipeline = \",\".join([\n \"symbol-dce\", \"tf-standard-pipeline\",\n \"func(tf-device-index-selector)\", \"inline\", \"canonicalize\",\n \"func(tf-device-decompose-resource-ops)\",\n \"func(tf-functional-control-flow-to-cfg)\", \"inline\", \"symbol-dce\",\n \"canonicalize\", \"tf-saved-model-optimize-global-tensors\",\n \"tf-saved-model-freeze-global-tensors\"\n ])\n with open(model_path) as file:\n mlir = file.read()\n\n with open(output_path, \"w\") as file:\n file.write(\n pywrap_mlir.experimental_run_pass_pipeline(mlir, pass_pipeline,\n True))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Optimize model in tf dialect\")\n parser.add_argument(\"model_path\",\n metavar=\"model-path\",\n help=\"Path to tf mlir model\")\n parser.add_argument(\"output_path\",\n metavar=\"output-path\",\n help=\"Output path\")\n args = parser.parse_args()\n\n optimize(args.model_path, args.output_path)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.python.pywrap_mlir.experimental_run_pass_pipeline"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
V4I2021/V4IBack | [
"88c6215d65eccae42cde18515d1da84d94c45ba6"
] | [
"dataService/dataService.py"
] | [
"import datetime\nimport os\nimport pandas as pd\nfrom flask_caching import Cache\nimport seaborn as sns\nfrom scipy.stats import pearsonr\n\nimport numpy as np\nimport math\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\n\ncache = Cache()\n\nFILE_ABS_PATH = os.path.dirname(__file__)\nROOT_PATH = os.path.join(FILE_ABS_PATH, '../')\nEDGE_FOLDER = os.path.join(ROOT_PATH, 'data/edge')\nINSIGHT_FOLDER = os.path.join(ROOT_PATH, 'data/insight')\nRECORD_FOLDER = os.path.join(ROOT_PATH, 'data/record')\nSID_CID_FOLDER = os.path.join(ROOT_PATH, 'data/sid_cid')\nSUBSPACE_FOLDER = os.path.join(ROOT_PATH, 'data/subspace')\n\nsns_counter = 0\n\n\nclass DataService():\n def __init__(self):\n pass\n\n def read_data_names(self):\n return [p.split('.')[0].split('_')[-1] for p in os.listdir(INSIGHT_FOLDER)]\n\n @cache.memoize(timeout=50)\n def __get_edge_by_name(self, name):\n edges_path = os.path.join(EDGE_FOLDER, 'edge_{}.csv'.format(name))\n df = pd.read_csv(edges_path)\n return df\n\n @cache.memoize(timeout=50)\n def __get_insight_by_name(self, name):\n insight_path = os.path.join(INSIGHT_FOLDER, 'insight_{}.csv'.format(name))\n df = pd.read_csv(insight_path)\n return df, df['insight'].unique().tolist(), df['insight_type'].unique().tolist()\n\n @cache.memoize(timeout=50)\n def __get_record_by_name(self, name):\n record_path = os.path.join(RECORD_FOLDER, 'record_{}.csv'.format(name))\n df = pd.read_csv(record_path)\n return df\n\n @cache.memoize(timeout=50)\n def __get_sid_cid_by_name(self, name):\n sid_cid_path = os.path.join(SID_CID_FOLDER, 'sid_cid_{}.csv'.format(name))\n df = pd.read_csv(sid_cid_path)\n return df\n\n @cache.memoize(timeout=50)\n def __get_subspace_by_name(self, name):\n subspace_path = os.path.join(SUBSPACE_FOLDER, 'subspace_{}.csv'.format(name))\n df = pd.read_csv(subspace_path)\n return df, df.columns.values.tolist()[0:-1]\n\n def __get_record_by_subspace(self, name, sid):\n sid_cid_data = self.__get_sid_cid_by_name(name)\n record_data = self.__get_record_by_name(name)\n\n df = pd.merge(sid_cid_data, record_data, on=['cid'])\n df = df.loc[df['sid'] == sid]\n df = df.drop(['sid', 'cid'], axis=1)\n return df\n\n def __get_subspace_str(self, subspace_col, row):\n style_before = '<span style=\"color:#f7cd59; display:inline;\">'\n style_after = '</span>'\n\n subspace = ''\n for i in range(len(subspace_col)):\n col = subspace_col[i]\n if row[col].tolist()[0] != '*':\n subspace += col + ' is ' + style_before + row[col].tolist()[0] + style_after + ', '\n if subspace[-2:] == ', ':\n subspace = subspace[0:-2]\n return subspace\n\n def get_data_by_name(self, name):\n edge_data = self.__get_edge_by_name(name)\n insight_data, insight_name, insight_type = self.__get_insight_by_name(name)\n measure_col = insight_data['measure'].unique()\n measures = []\n for measure in measure_col:\n if ';' not in measure:\n measures.append(measure)\n if len(measures) > 1:\n measures = ['All Measures'] + measures\n # record_data = self.__get_record_by_name(name)\n subspace_data, feature_data = self.__get_subspace_by_name(name)\n insight_cnt = insight_data['insight'].value_counts().to_dict()\n return {\n # 'record': record_data.to_dict('records'),\n 'insight': insight_data.to_dict('records'),\n 'edge': edge_data.to_dict('records'),\n 'feature': feature_data,\n 'insight_name': insight_name,\n 'insight_count': [insight_cnt[x] for x in insight_name],\n 'insight_type': insight_type,\n 'subspace': subspace_data.to_dict('index'),\n 'measures': measures\n }\n\n def get_insight_count_for_record_by_name(self, name):\n sid_cid_df = self.__get_sid_cid_by_name(name)\n insight_data, _, _ = self.__get_insight_by_name(name)\n iids_df = pd.merge(insight_data, sid_cid_df, on='sid', how='inner')\n iids_df = iids_df.groupby('cid')['iid'].apply(list).reset_index(name='iids')\n iids_df['iid_count'] = [len(id_list) for id_list in iids_df['iids']]\n iids_df.sort_values(by='iid_count', inplace=True, ascending=False)\n iids_df.reset_index(inplace=True, drop=True)\n res = iids_df.to_dict('index')\n return res\n\n def get_insight_by_iid(self, iid, name):\n insight_data, insight_name, insight_type = self.__get_insight_by_name(name)\n subspace_data, feature_data = self.__get_subspace_by_name(name)\n insight = insight_data.loc[insight_data['iid'] == iid]\n insight = pd.merge(insight, subspace_data, on='sid', how='inner')\n record = self.__get_record_by_subspace(name, insight['sid'].iloc[0])\n\n insight_name = insight['insight'].iloc[0]\n breakdown = insight['breakdown'].iloc[0]\n breakdown_value = insight['breakdown_value'].iloc[0]\n if breakdown_value.isdigit():\n breakdown_value = int(breakdown_value)\n measure = insight['measure'].iloc[0]\n subspace = self.__get_subspace_str(feature_data, insight)\n\n if insight_name == 'Top1':\n record = record.groupby(breakdown, as_index=False).agg({measure: 'sum'})\n record = record.sort_values(by=measure, ascending=False).iloc[0:10]\n measure_value = record[measure].tolist()\n sentence = '<span style=\"display:inline;\">The highest {} among {} is {} with {} ' \\\n 'equals <span style=\"color:#f7cd59; display:inline;\">{}</span> {}.</span>' \\\n .format(measure, breakdown, round(measure_value[0], 2),\n breakdown, breakdown_value,\n 'when ' + subspace if subspace != '' else '')\n return {\n 'insight_name': insight_name,\n 'breakdown': breakdown,\n 'breakdown_value': record[breakdown].tolist(),\n 'measure': measure,\n 'measure_value': measure_value,\n 'sentence': sentence\n }\n elif insight_name == 'Trend':\n record = record.groupby(breakdown, as_index=False).agg(\n {breakdown: 'first', measure: 'sum'})\n\n breakdown_value = record[breakdown]\n\n if breakdown == 'date' or breakdown == 'Date':\n record[breakdown] = pd.to_datetime(record[breakdown])\n try:\n x = record[breakdown].map(datetime.datetime.toordinal).values\n except:\n x = record[breakdown].values\n\n x = np.array(x).reshape(-1, 1)\n y = np.array(record[measure]).reshape(-1, 1)\n reg = LinearRegression().fit(x, y)\n slope = reg.coef_[0][0]\n sentence = '<span style=\"display:inline;\">The sum of {} over {} is ' \\\n '<span style=\"color:#f7cd59; display:inline;\">{}</span> {}.</span>' \\\n .format(measure, breakdown,\n ('increasing' if slope >= 0 else 'decreasing'),\n ('when ' + ' and '.join(subspace.rsplit(', ', 1)) if subspace != '' else ''), )\n # sentence = 'The trend of the {} {} over {}s' \\\n # '{}{} is {}.' \\\n # .format('total', measure, breakdown,\n # (' when ' if subspace != '' else ' in all data'),\n # ' and '.join(subspace.rsplit(', ', 1)),\n # ('increasing' if slope >= 0 else 'decreasing'))\n return {\n 'insight_name': insight_name,\n 'breakdown': breakdown,\n 'measure': measure,\n 'breakdown_value': breakdown_value.tolist(),\n 'measure_value': record[measure].tolist(),\n 'sentence': sentence\n }\n elif insight_name == 'Correlation':\n time_col = \"\"\n if name == 'carSales1':\n time_col = 'Year'\n elif name == 'Emission':\n time_col = 'Year'\n elif name == 'Census':\n time_col = 'Birthday'\n elif name == 'NBA':\n time_col = 'year'\n\n breakdown_value = insight['breakdown_value'].values[0].split(';')\n _, col_list = self.__get_subspace_by_name(name)\n record = self.__get_record_by_name(name)\n record.drop(['cid'], axis=1, inplace=True)\n\n for i in range(len(col_list)):\n value = insight[col_list[i]].values[0]\n if value != '*':\n record = record.loc[record[col_list[i]] == value]\n if breakdown_value[1] != '*':\n corr_record = record.loc[record[insight['breakdown'].values[0]] == breakdown_value[1]]\n else:\n corr_record = record.copy()\n if breakdown_value[0] != '*':\n record = record.loc[record[insight['breakdown'].values[0]] == breakdown_value[0]]\n\n corr_record = corr_record.groupby(time_col, as_index=False).agg(\n {breakdown: 'first', measure: 'sum'})\n record = record.groupby(time_col, as_index=False).agg(\n {breakdown: 'first', measure: 'sum'})\n y1 = record[measure].values\n y2 = corr_record[measure].values\n corr, _ = pearsonr(y1, y2)\n\n sentence = '<span style=\"display:inline;\">The {} of ' \\\n '<span style=\"color:#f7cd59; display:inline;\">{}</span> and ' \\\n '<span style=\"color:#f7cd59; display:inline;\">{}</span> are correlated {}.</span>' \\\n .format(measure, breakdown_value[0],\n (breakdown_value[1] if breakdown_value[1] != '*' else 'all data'),\n ('in the dataset' if subspace == '' else 'when ' + subspace))\n\n y1 = y1.tolist()\n y2 = y2.tolist()\n\n return {\n 'insight_name': insight_name,\n 'time_col': time_col,\n 'time_col_value': record[time_col].tolist(),\n 'measure': measure,\n 'measure_value': [y1, y2],\n 'max_min': [max(max(y1), max(y2)), min(min(y1), min(y2))],\n 'sentence': sentence\n }\n elif insight_name == 'Change Point' or insight_name == 'Outlier':\n record = record.groupby(breakdown, as_index=False).agg(\n {breakdown: 'first', measure: 'sum'})\n # todo: int value might be read as string\n y = record.loc[record[breakdown] == breakdown_value][measure].iloc[0]\n\n if insight_name == 'Change Point':\n sentence = '<span style=\"display:inline;\">Among {}s{}{}, ' \\\n 'change occurs in <span style=\"color:#f7cd59; display:inline;\">{}</span> ' \\\n 'and its {} {} is {}.</span>' \\\n .format(breakdown, (' when ' if subspace != '' else ' in all data'),\n ' and '.join(subspace.rsplit(', ', 1)),\n breakdown_value, 'total', measure, round(y, 2))\n\n else:\n sentence = '<span style=\"display:inline;\">Among {}s{}{}, ' \\\n 'the {} {} of {} in ' \\\n '<span style=\"color:#f7cd59; display:inline;\">{}</span> is an anomaly.</span>' \\\n .format(breakdown, (' when ' if subspace != '' else ' in all data'),\n ' and '.join(subspace.rsplit(', ', 1)),\n 'total', measure, round(y, 2), breakdown_value)\n\n return {\n 'insight_name': insight_name,\n 'breakdown': breakdown,\n 'measure': measure,\n 'breakdown_value': record[breakdown].tolist(),\n 'measure_value': record[measure].tolist(),\n 'x': str(breakdown_value),\n 'y': str(y),\n 'sentence': sentence\n }\n elif insight_name == 'Attribution':\n record = record.groupby(breakdown, as_index=False).agg(\n {breakdown: 'first', measure: 'sum'})\n record = record.sort_values(by=measure, ascending=False)\n\n breakdown_value = record[breakdown].tolist()\n percentage = (record[measure] / record[measure].sum()).tolist()\n\n breakdown_value_list = ''\n percentage_list = ''\n for i in range(len(breakdown_value)):\n percentage[i] = round(percentage[i], 2)\n if i == 0:\n breakdown_value_list += '<span style=\"color:#f7cd59; display:inline;\">' \\\n + breakdown_value[i] + '</span>, '\n percentage_list += '<span style=\"color:#f7cd59; display:inline;\">' \\\n + str(percentage[i]) + '</span>, '\n else:\n breakdown_value_list += breakdown_value[i] + ', '\n percentage_list += str(percentage[i]) + ', '\n if breakdown_value_list[-2:] == ', ':\n breakdown_value_list = breakdown_value_list[0:-2]\n percentage_list = percentage_list[0:-2]\n sentence = '<span style=\"display:inline;\">{} makes up {} ' \\\n 'of the {} {}{}{}{}.</span>' \\\n .format(' and '.join(breakdown_value_list.rsplit(', ', 1)),\n ' and '.join(percentage_list.rsplit(', ', 1)),\n 'total', measure,\n ' respectively ' if len(percentage) > 1 else '',\n (' when ' if subspace != '' else ' in all data'),\n ' and '.join(subspace.rsplit(', ', 1)))\n\n return {\n 'insight_name': insight_name,\n 'breakdown_value': breakdown_value,\n 'measure_value': record[measure].tolist(),\n 'sentence': sentence,\n 'percentage': percentage\n }\n elif insight_name == 'Cross Measure Correlation':\n measures = measure.split(';')\n record = record.groupby(breakdown, as_index=False).agg(\n {breakdown: 'first', measures[0]: 'sum', measures[1]: 'sum'})\n record = record.sort_values(by=measures[0])\n\n x_value = record[measures[0]].values\n y_value = record[measures[1]].values\n reg = LinearRegression().fit(x_value.reshape(-1, 1), y_value.reshape(-1, 1))\n\n sentence = '<span style=\"display:inline;\">' \\\n '<span style=\"color:#f7cd59; display:inline;\">{}</span> and ' \\\n '<span style=\"color:#f7cd59; display:inline;\">{}</span> are linear correlated' \\\n '{}{}{}being grouped by {}.</span>' \\\n .format(measures[0], measures[1],\n (' when ' if subspace != '' else ' in all data'),\n ' and '.join(subspace.rsplit(', ', 1)),\n (' and ' if subspace != '' else ' when '),\n breakdown)\n\n return {\n 'insight_name': insight_name,\n 'x_value': x_value.tolist(),\n 'y_value': y_value.tolist(),\n 'line_y_value': [reg.predict(x_value[0].reshape(-1, 1))[0][0],\n reg.predict(x_value[-1].reshape(-1, 1))[0][0]],\n 'sentence': sentence\n }\n elif insight_name == 'Clustering':\n measures = measure.split(';')\n record = record.groupby(breakdown, as_index=False).agg(\n {breakdown: 'first', measures[0]: 'sum', measures[1]: 'sum'})\n record = record.sort_values(by=measures[0])\n\n x_value = record[measures[0]].values\n y_value = record[measures[1]].values\n X = np.vstack((x_value, y_value)).T\n X_scale = StandardScaler().fit_transform(X)\n db = DBSCAN(eps=0.3, min_samples=5).fit(X_scale)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n\n noise = ''\n if -1 in labels:\n breakdown_value = record[breakdown].values\n cnt = 2\n for i, label in enumerate(labels):\n if label == -1 and cnt > 0:\n noise += str(breakdown_value[i]) + ', '\n cnt -= 1\n noise += 'etc'\n\n sentence = '<span style=\"display:inline;\">' \\\n '<span style=\"color:#f7cd59; display:inline;\">{}</span> and ' \\\n '<span style=\"color:#f7cd59; display:inline;\">{}</span> form clusters' \\\n '{}{}{}being grouped by {}{}{}.</span>' \\\n .format(measures[0], measures[1],\n (' when ' if subspace != '' else ' in all data'),\n ' and '.join(subspace.rsplit(', ', 1)),\n (' and ' if subspace != '' else ' when '),\n breakdown,\n (', except for ' if noise != '' else ''),\n noise)\n\n return {\n 'insight_name': insight_name,\n 'x_value': x_value.tolist(),\n 'y_value': y_value.tolist(),\n 'label': labels.tolist(),\n 'sentence': sentence\n }\n else:\n return 0\n\n def get_insight_count_for_subspace_by_name(self, name):\n insight_data, _, _ = self.__get_insight_by_name(name)\n iid_sid_df = insight_data[['iid', 'sid']].groupby('sid')['iid'].apply(list).reset_index(name='iids')\n iid_sid_df['iid_count'] = [len(id_list) for id_list in iid_sid_df['iids']]\n subspace_data, _ = self.__get_subspace_by_name(name)\n iid_sid_df = pd.merge(iid_sid_df, subspace_data, on='sid', how='inner')\n iid_sid_df.sort_values(by=['iid_count'], inplace=True, ascending=False)\n iid_sid_df.reset_index(inplace=True, drop=True)\n res = iid_sid_df.to_dict('index')\n return res\n\n def get_subspace_count_for_record_by_name(self, name):\n sid_cid_df = self.__get_sid_cid_by_name(name)\n record_data = self.__get_record_by_name(name)\n df = pd.merge(record_data, sid_cid_df, on='cid', how='inner')\n df = df.groupby('cid')['sid'].apply(list).reset_index(name='sid')\n df['sid_count'] = [len(id_list) for id_list in df['sid']]\n df.sort_values(by='sid_count', inplace=True, ascending=False)\n df.reset_index(inplace=True, drop=True)\n res = df.to_dict('index')\n return res\n\n @cache.memoize(timeout=50)\n def get_data_info_by_name(self, name):\n global sns_counter\n record_data = self.__get_record_by_name(name)\n insight_data, _, _ = self.__get_insight_by_name(name)\n record_data = record_data.drop(columns=['cid'])\n\n data_info = {\n 'dataName': name,\n 'dataDescription': '',\n 'rowCnt': record_data.shape[0],\n 'colCnt': record_data.shape[1],\n 'colName': [],\n 'colType': [],\n 'colValueType': [],\n 'colValue': []\n }\n\n for value_type in record_data.dtypes.tolist():\n value_type = str(value_type)\n if value_type != 'int64' and value_type != 'float64':\n data_info['colValueType'].append('categorical')\n else:\n data_info['colValueType'].append(value_type.rstrip('64'))\n\n if name == 'carSales1':\n data_info['dataDescription'] = 'Describe vehicle sales.'\n elif name == 'Census':\n data_info['dataDescription'] = 'Describe demographic information.'\n elif name == 'COVID-19':\n data_info['dataDescription'] = 'Describe COVID-19 cases and deaths.'\n elif name == 'Emission':\n data_info['dataDescription'] = 'Describe emissions of harmful gases in the process of energy production.'\n elif name == 'NBA':\n data_info['dataDescription'] = 'Describe NBA players\\' stats since 1947.'\n\n data_info['colName'] = record_data.columns.values.tolist()\n measure_list = insight_data['measure'].unique().tolist()\n for measure in measure_list:\n if ';' in measure:\n measure = measure.split(';')\n measure_list.append(measure[0])\n measure_list.append(measure[1])\n\n for i in range(len(data_info['colName'])):\n col = data_info['colName'][i]\n if col in measure_list:\n data_info['colType'].append('measure')\n else:\n data_info['colType'].append('dimension')\n\n if data_info['colValueType'][i] == 'int' \\\n or data_info['colValueType'][i] == 'float':\n p = sns.kdeplot(record_data[col].values)\n lines = [obj for obj in p.findobj() if str(type(obj)) == \"<class 'matplotlib.lines.Line2D'>\"]\n x, y = lines[sns_counter].get_data()[0].tolist(), lines[sns_counter].get_data()[1].tolist()\n data_info['colValue'].append([x, y,\n round(min(x), 2), round(max(x), 2),\n min(y), max(y)])\n sns_counter += 1\n else:\n cnt_dict = record_data[col].value_counts().to_dict()\n value_list = list(cnt_dict.values())\n upper_bound = value_list[0]\n while upper_bound % 5 != 0 or upper_bound % 2 != 0:\n upper_bound += 1\n data_info['colValue'].append([list(cnt_dict), value_list, upper_bound])\n\n return data_info\n\n def get_data_attr_map_by_name(self, name):\n record_data = self.__get_record_by_name(name)\n _, feature_data = self.__get_subspace_by_name(name)\n attr_map = dict()\n for feature in feature_data:\n feature_list = record_data[feature].unique().tolist()\n attr_map[feature] = dict((k, i) for (i, k) in enumerate(feature_list))\n # {'Year': {2007: 0, 2008: 1, 2009: 2, 2010: 3, 2011: 4},\n return attr_map\n\n def get_subspace_range_by_name(self, name):\n insight, _, _ = self.__get_insight_by_name(name)\n sid_list = np.unique(insight['sid'].tolist())\n subspace, feature_data = self.__get_subspace_by_name(name)\n subspace = subspace.loc[subspace['sid'].isin(sid_list)]\n subspace_range = dict()\n for feature in feature_data:\n feature_list = subspace[feature].unique().tolist()\n subspace_range[feature] = feature_list\n return subspace_range\n\n def get_data_feature_attribution_by_name(self, name):\n record_data = self.__get_record_by_name(name)\n # result = {'feature_name': {'value_name' : [start_angle, end_angle]}}\n _, feature_data = self.__get_subspace_by_name(name)\n result = {}\n for feature in feature_data:\n value_count = record_data[feature].value_counts().sort_values(ascending=False)\n value_angle = (value_count / value_count.sum() * 2 * math.pi).tolist()\n start_angle = np.concatenate(([0.0], np.cumsum(value_angle)))\n end_angle = np.cumsum(value_angle)\n feature_res = {str(val): [start_angle[idx], end_angle[idx]] for (idx, val) in enumerate(value_count.keys())}\n result[feature] = feature_res\n # feature_cid_count = {feature: record_data[feature].value_counts().to_dict() for feature in feature_data}\n return result\n\n def get_similar_insight(self, feature, sid, name, breakdown, breakdown_value):\n insight_data, insight_name, insight_type = self.__get_insight_by_name(name)\n subspace_data, feature_data = self.__get_subspace_by_name(name)\n subspace_data = pd.merge(subspace_data, insight_data, on='sid', how='inner')\n\n subspace = subspace_data.loc[subspace_data['sid'] == sid]\n feature_value = subspace[feature].tolist()[0]\n if feature_value == '*':\n if breakdown == feature:\n feature_value = breakdown_value\n else:\n return {\n 'similar_iid': [],\n 'similar_sid': [],\n 'similar_insight_name': []\n }\n\n if ';' in feature_value:\n feature_value = breakdown_value.split(';')\n else:\n feature_value = [feature_value]\n\n similar_iid = []\n similar_sid = []\n similar_insight_name = []\n for value in feature_value:\n if value == '*':\n continue\n similar_subspace = subspace_data.loc[subspace_data[feature] == value]\n similar_iid.extend(similar_subspace['iid'].tolist())\n similar_sid.extend(similar_subspace['sid'].tolist())\n similar_insight_name.extend(similar_subspace['insight'].tolist())\n\n similar_subspace = subspace_data.loc[subspace_data['breakdown_value'] == value]\n similar_iid.extend(similar_subspace['iid'].tolist())\n similar_sid.extend(similar_subspace['sid'].tolist())\n similar_insight_name.extend(similar_subspace['insight'].tolist())\n\n return {\n 'similar_iid': similar_iid,\n 'similar_sid': similar_sid,\n 'similar_insight_name': similar_insight_name\n }\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"pandas.to_datetime",
"scipy.stats.pearsonr",
"numpy.cumsum",
"sklearn.cluster.DBSCAN",
"numpy.zeros_like",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
tfederico/human_dynamics | [
"ab7ab6aefce5ec33c208091710a37624a1a6ef4a",
"ab7ab6aefce5ec33c208091710a37624a1a6ef4a"
] | [
"src/discriminators.py",
"src/datasets/h36/read_human36m.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\n\nclass PoseDiscriminator(object):\n def __init__(self, weight_decay):\n self.vars = []\n self.reuse = False\n self.wd = weight_decay\n\n def get_output(self, poses):\n \"\"\"\n Gets discriminator's predictions for each pose and all poses.\n\n Args:\n poses (Nx23x1x9).\n\n Returns:\n Predictions (Nx[23+1]).\n \"\"\"\n data_format = 'NHWC'\n with tf.variable_scope('D_pose', reuse=self.reuse) as scope:\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n weights_regularizer=slim.l2_regularizer(self.wd)):\n with slim.arg_scope([slim.conv2d], data_format=data_format):\n poses = slim.conv2d(\n inputs=poses,\n num_outputs=32,\n kernel_size=[1, 1],\n reuse=self.reuse,\n scope='D_conv1')\n poses = slim.conv2d(\n inputs=poses,\n num_outputs=32,\n kernel_size=[1, 1],\n reuse=self.reuse,\n scope='D_conv2')\n theta_out = []\n for i in range(0, 23):\n theta_out.append(\n slim.fully_connected(\n inputs=poses[:, i, :, :],\n num_outputs=1,\n activation_fn=None,\n reuse=self.reuse,\n scope='pose_out_j{}'.format(i)))\n theta_out_all = tf.squeeze(tf.stack(theta_out, axis=1))\n\n # Compute joint correlation prior!\n nz_feat = 1024\n poses_all = slim.flatten(poses, scope='vectorize')\n poses_all = slim.fully_connected(\n inputs=poses_all,\n num_outputs=nz_feat,\n reuse=self.reuse,\n scope='D_alljoints_fc1')\n poses_all = slim.fully_connected(\n inputs=poses_all,\n num_outputs=nz_feat,\n reuse=self.reuse,\n scope='D_alljoints_fc2')\n poses_all_out = slim.fully_connected(\n inputs=poses_all,\n num_outputs=1,\n activation_fn=None,\n reuse=self.reuse,\n scope='D_alljoints_out')\n out = tf.concat([theta_out_all, poses_all_out], 1)\n\n if not self.reuse:\n self.update(tf.contrib.framework.get_variables(scope))\n\n return out\n\n def get_vars(self):\n return self.vars\n\n def update(self, vars):\n self.reuse = True\n self.vars.extend(vars)\n",
"\"\"\"\nTakes in original H3.6M, renames and writes frames to file,\ncreate annotations in a single pickle file that the later\nh36_to_tfrecords_video.py can read in.\n\nMosh data is not available.\n\nRequires spacepy & CDF:\n- Get CDF (NASA): https://cdf.gsfc.nasa.gov/html/sw_and_docs.html\n- Follow the instructions in README.install\n- need to run . ~/Downloads/cdf36_4-dist/bin/definitions.B\nfor spacepy to work.\n\nThen run:\npython -m src.datasets.h36.read_human36m\n\nOriginally developed by Federica Bogo, edited by Angjoo Kanazawa\n\"\"\"\n\nfrom glob import glob\nimport os\nfrom os import makedirs, system\nfrom os.path import join, getsize, exists\nimport pickle\nfrom spacepy import pycdf\nimport sys\n\nimport cv2\nimport numpy as np\n\nfrom absl import flags\n\n\nflags.DEFINE_string(\n 'source_dir', '/scratch1/storage/human36m_full_raw',\n 'Root dir of the original Human3.6M dataset unpacked with metadata.xml'\n)\nflags.DEFINE_string('out_dir', '/scratch1/storage/human36m_25fps',\n 'Output directory')\nflags.DEFINE_integer('frame_skip', 2,\n 'subsample factor, 5 corresponds to 10fps, 2=25fps')\n\nFLAGS = flags.FLAGS\n\ncolors = np.random.randint(0, 255, size=(17, 3))\njoint_ids = [0, 1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27]\n\n# Mapping from H36M joints to LSP joints (0:13). In this roder:\n_COMMON_JOINT_IDS = np.array([\n 3, # R ankle\n 2, # R knee\n 1, # R hip\n 4, # L hip\n 5, # L knee\n 6, # L ankle\n 16, # R Wrist\n 15, # R Elbow\n 14, # R shoulder\n 11, # L shoulder\n 12, # L Elbow\n 13, # L Wrist\n 8, # Neck top\n 10, # Head top\n])\n\n\ndef plot_points(points_, img, points0_=None, radius=10):\n global colors\n tmp_img = img.copy()\n\n num_colors = len(colors)\n\n points = points_.T if points_.shape[0] == 2 else points_\n points0 = None if points0_ is None else (points0_.T\n if points0_.shape[0] == 2 else\n points0_)\n\n for i, coord in enumerate(points.astype(int)):\n if coord[0] < img.shape[1] and coord[1] < img.shape[0] and coord[0] > 0 and coord[1] > 0:\n cv2.circle(tmp_img,\n tuple(coord), radius, colors[i % num_colors].tolist(),\n -1)\n\n if points0 is not None:\n for i, coord in enumerate(points0.astype(int)):\n if coord[0] < img.shape[1] and coord[1] < img.shape[0] and coord[0] > 0 and coord[1] > 0:\n cv2.circle(tmp_img,\n tuple(coord), radius + 2,\n colors[i % num_colors].tolist(), 2)\n\n return tmp_img\n\n\ndef rotation_matrix(args):\n\n (x, y, z) = args\n\n X = np.vstack([[1, 0, 0], [0, np.cos(x), -np.sin(x)],\n [0, np.sin(x), np.cos(x)]])\n Y = np.vstack([[np.cos(y), 0, np.sin(y)], [0, 1, 0],\n [-np.sin(y), 0, np.cos(y)]])\n Z = np.vstack([[np.cos(z), -np.sin(z), 0], [np.sin(z),\n np.cos(z), 0], [0, 0, 1]])\n\n return (X.dot(Y)).dot(Z)\n\n\ndef project_point_radial(P, R, t, f, c, all_k):\n k = np.array(list(all_k[:2]) + list(all_k[-1:]))\n p = all_k[2:4]\n\n N = P.shape[0]\n\n X = R.dot(P.T - np.tile(t.reshape((-1, 1)), (1, len(P))))\n\n XX = X[:2, :] / np.tile(X[2, :], (2, 1))\n\n r2 = XX[0, :]**2 + XX[1, :]**2\n radial = 1 + np.sum(\n np.tile(k.reshape((-1, 1)), (1, N)) * np.vstack((r2, r2**2, r2**3)),\n axis=0)\n tan = p[0] * XX[1, :] + p[1] * XX[0, :]\n\n XXX = XX * np.tile(radial + tan, (2, 1)) + p[::-1].reshape(\n (-1, 1)).dot(r2.reshape((1, -1)))\n\n proj = (np.tile(f, (N, 1)) * XXX.T) + np.tile(c, (N, 1))\n return proj\n\n\ndef read_cam_parameters(xml_path, sbj_id, cam_id):\n import xml.etree.ElementTree\n\n # use the notation from 0 -- more practical to access array\n sbj_id = sbj_id - 1\n cam_id = cam_id - 1\n\n n_sbjs = 11\n n_cams = 4\n\n root = xml.etree.ElementTree.parse(xml_path).getroot()\n\n for child in root:\n if child.tag == 'w0':\n all_cameras = child.text\n tokens = all_cameras.split(' ')\n tokens[0] = tokens[0].replace('[', '')\n tokens[-1] = tokens[-1].replace(']', '')\n\n start = (cam_id * n_sbjs) * 6 + sbj_id * 6\n extrs = tokens[start:start + 6]\n\n start = (n_cams * n_sbjs * 6) + cam_id * 9\n intrs = tokens[start:start + 9]\n\n rot = rotation_matrix(np.array(extrs[:3], dtype=float))\n\n rt = rot\n t = np.array(extrs[3:], dtype=float)\n\n f = np.array(intrs[:2], dtype=float)\n c = np.array(intrs[2:4], dtype=float)\n\n distortion = np.array(intrs[4:], dtype=float)\n\n k = np.hstack((distortion[:2], distortion[3:5], distortion[2:3]))\n\n return (rt, t, f, c, k)\n\n\ndef read_action_name(xml_path, sbj_id, actionno, trialno):\n import xml.etree.ElementTree\n\n root = xml.etree.ElementTree.parse(xml_path).getroot()\n myactionno = actionno + 1 # otherwise we take ALL into account\n for child in root:\n if child.tag == 'mapping':\n for tr in child.getchildren():\n if tr.getchildren()[0].text == str(myactionno):\n if tr.getchildren()[1].text == str(trialno):\n return tr.getchildren()[2 + sbj_id - 1].text\n\n\ndef read_fua_results(path, sbj_id, trial_id, cam_id):\n from scipy.io import loadmat\n\n # sbj_id already follows the standard convention\n choose_id = sbj_id * (2 * 4) + (trial_id - 1) * 4 + (cam_id - 1)\n print(choose_id)\n res = loadmat(path)\n joints = res['Pred'].squeeze()\n return [j.reshape((-1, 3)) for j in joints[choose_id]]\n\n\ndef get_num_frames(path):\n vid = cv2.VideoCapture(path)\n return int(vid.get(cv2.CAP_PROP_FRAME_COUNT))\n\n\ndef read_frames(path, n_frames=None):\n vid = cv2.VideoCapture(path)\n\n imgs = []\n\n if n_frames is None:\n n_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))\n\n for _ in range(n_frames):\n success, img = vid.read()\n imgs.append(img)\n\n return imgs\n\n\ndef read_silhouettes(path, n_frames=None):\n import h5py\n f = h5py.File(path, 'r')\n refs = f['Masks']\n masks = []\n\n if n_frames is None:\n n_frames = len(refs)\n\n for i in range(n_frames):\n mask = np.array(f[refs[i, 0]], dtype=bool)\n mask = np.fliplr(np.rot90(mask, 3))\n masks.append(mask)\n return masks\n\n\ndef read_poses(path, n_frames=None, is_3d=False, joint_ids=range(32)):\n data = pycdf.CDF(path)\n\n # <CDF:\n # Pose: CDF_FLOAT [1, N, 64]\n # >\n poses = data['Pose'][...][0]\n\n if n_frames is None:\n n_frames = poses.shape[0]\n\n dim = 2 if not is_3d else 3\n packed_poses = [\n poses[i].reshape((-1, dim))[joint_ids] for i in range(n_frames)\n ]\n\n return packed_poses\n\n\ndef compute_fua_joints(joints, new_root_joint, in_meter=False):\n new_joints = np.zeros_like(joints)\n\n new_joints[0, :] = new_root_joint\n\n for i, offset in enumerate(joints[1:]):\n new_joints[i + 1] = new_root_joint + offset\n\n if in_meter:\n new_joints = np.vstack([j / 1000. for j in new_joints])\n\n return new_joints\n\n\ndef crop_image(silhs):\n res = np.asarray(silhs).any(axis=0)\n cnts, hier = cv2.findContours(\n np.uint8(res) * 255, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n \"\"\"\n checks = []\n for cnt in cnts:\n kk = np.zeros((1000, 1000, 3), dtype=np.uint8)\n hull = cv2.convexHull(cnt)\n cv2.drawContours(kk, [cnt], 0, (255,255,255), -1)\n checks.append(kk)\n \"\"\"\n\n max_id = 0\n max_length = len(cnts[0])\n for i in range(1, len(cnts)):\n if len(cnts[i]) > max_length:\n max_id = i\n max_length = len(cnts[i])\n\n (x, y, w, h) = cv2.boundingRect(cnts[max_id])\n return (x, y, w, h)\n\n\ndef crop_and_clean_mask_to_int(mask, x, y, w, h):\n mask = np.uint8(mask[y:y + h, x:x + w]) * 255\n\n # TODO: put this into a function (it's used above as well)\n cnts, hier = cv2.findContours(mask.copy(), cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n\n max_id = 0\n max_length = len(cnts[0])\n for i in range(1, len(cnts)):\n if len(cnts[i]) > max_length:\n max_id = i\n max_length = len(cnts[i])\n\n tmp_mask = np.dstack((mask, mask, mask))\n for i, cnt in enumerate(cnts):\n if i != max_id:\n cv2.drawContours(tmp_mask, [cnt], 0, (0, 0, 0), -1)\n return cv2.split(tmp_mask)[0]\n\n\ndef main(raw_data_root, output_root, frame_skip):\n xml_path = join(raw_data_root, 'metadata.xml')\n\n # <actionno> for each different action class:\n # 1) Directions, 2) Discussion, 3) Eating, 4) Greeting,\n # 5) Phone Talk, 6) Posing, 7) Buying, 8) Sitting,\n # 9) Sitting Down, 10) Smoking, 11) Taking Photo, 12) Waiting,\n # 13) Walking, 14) Walking Dog, 15) Walking Pair\n action_names = [\n 'Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Posing',\n 'Purchases', 'Sitting', 'SittingDown', 'Smoking', 'TakingPhoto',\n 'Waiting', 'Walking', 'WakingDog', 'WalkTogether'\n ]\n\n n_frames = None\n\n sub_ids = [1, 6, 7, 8, 5, 9, 11]\n\n # Action, camera, suject id starts from 1 Matlab convention\n cam_ids = range(1, 5)\n trial_ids = [1, 2]\n action_ids = range(1, 16)\n import itertools\n all_pairs = [\n p\n for p in list(\n itertools.product(*[sub_ids, action_ids, trial_ids, cam_ids]))\n ]\n\n def has_num(str):\n return any(i.isdigit() for i in str)\n\n for (sbj_id, action_id, trial_id, cam_id) in all_pairs:\n seq_name = read_action_name(xml_path, sbj_id, action_id, trial_id)\n\n save_seq_name = '%s_%d' % (action_names[action_id - 1], trial_id - 1)\n\n output_base = join(output_root, 'S%s' % str(sbj_id), save_seq_name)\n output_dir = join(output_base, 'cam_%01d' % (cam_id - 1))\n\n print('Sub: {}, action {}, trial {}, cam {}'.format(\n sbj_id, action_id, trial_id, cam_id))\n print('Orig seq_name %s, new_seq_name %s' % (seq_name, save_seq_name))\n print('Saving to {}'.format(output_dir))\n\n if sbj_id == 11 and 'Phoning 2' in seq_name:\n print('Skipping.. {}'.format(output_dir))\n continue\n\n if not exists(output_dir):\n makedirs(output_dir)\n\n # Save orig name\n name_path = join(output_base, 'orig_seq_name.txt')\n if not exists(name_path):\n with open(name_path, \"w\") as f:\n f.write(seq_name)\n\n video_paths = sorted(\n glob(\n join(raw_data_root, 'S%s' % str(sbj_id), 'Videos',\n '%s.*mp4' % seq_name)))\n pose2d_paths = sorted(\n glob(\n join(raw_data_root, 'S%s' % str(sbj_id),\n 'MyPoseFeatures/D2_Positions', '%s.*cdf' % seq_name)))\n pose3d_paths = sorted(\n glob(\n join(raw_data_root, 'S%s' % str(sbj_id),\n 'MyPoseFeatures/D3_Positions_mono',\n '%s.*cdf' % seq_name)))\n\n (rot, t, flen, c, k) = read_cam_parameters(xml_path, sbj_id, cam_id)\n cam_path = join(output_dir, 'camera_wext.pkl')\n print('Writing %s' % cam_path)\n if not exists(cam_path):\n with open(cam_path, 'wb') as fw:\n pickle.dump({'f': flen, 'c': c, 'k': k, 'rt': rot, 't': t}, fw)\n\n # AJ: frames\n poses2d = read_poses(pose2d_paths[cam_id - 1], joint_ids=joint_ids)\n poses3d = read_poses(\n pose3d_paths[cam_id - 1], is_3d=True, joint_ids=joint_ids)\n\n # Check if we're done here.\n want_length = len(poses2d[::frame_skip])\n written_images = glob(join(output_dir, '*.png'))\n num_imgs_written = len(written_images)\n if want_length == num_imgs_written:\n is_done = True\n for fname in written_images:\n if getsize(fname) == 0:\n is_done = False\n break\n if is_done:\n print('Done!')\n continue\n\n # Write images..\n print('reading images...')\n imgs = read_frames(video_paths[cam_id - 1], n_frames=n_frames)\n # For some reason, len(poses2d) < len(imgs) by few frames sometimes\n # len(poses2d) == len(poses3d) always.\n # clip the images according to them..\n imgs = imgs[:len(poses2d)]\n\n # Subsample\n imgs = imgs[::frame_skip]\n poses2d = poses2d[::frame_skip]\n poses3d = poses3d[::frame_skip]\n gt_path = join(output_dir, 'gt_poses.pkl')\n if not exists(gt_path):\n with open(gt_path, 'wb') as fgt:\n pickle.dump({'2d': poses2d, '3d': poses3d}, fgt)\n\n # Link the mp4 with the new name to the old name.\n # video_paths[cam_id - 1]\n action_name = action_names[action_id - 1]\n out_video_name = 'S{}_{}_{}_cam_{}.mp4'.format(\n sbj_id, action_name, trial_id - 1, cam_id - 1)\n out_video_path = join(output_dir, out_video_name)\n if not exists(out_video_path):\n orig_vid_path = video_paths[cam_id - 1].replace(\" \", \"\\ \")\n cmd = 'ln -s {} {}'.format(orig_vid_path, out_video_path)\n ret = system(cmd)\n if ret > 0:\n print('something went wrong!')\n import ipdb\n ipdb.set_trace()\n\n for i, (img) in enumerate(imgs):\n if exists(join(output_dir, 'frame%04d.png' % i)):\n if getsize(join(output_dir, 'frame%04d.png' % i)) > 0:\n continue\n if i % 50 == 0:\n import matplotlib.pyplot as plt\n plt.ion()\n plt.imshow(plot_points(poses2d[i], img)[:, :, ::-1])\n plt.draw()\n plt.pause(1e-3)\n cv2.imwrite(join(output_dir, 'frame%04d.png' % i), img)\n if i % 50 == 0:\n print(join(output_dir, 'frame%04d.png' % i))\n\n\nif __name__ == '__main__':\n FLAGS(sys.argv)\n frame_skip = FLAGS.frame_skip\n raw_data_root = FLAGS.source_dir\n output_root = FLAGS.out_dir\n\n main(raw_data_root, output_root, frame_skip)\n \n"
] | [
[
"tensorflow.concat",
"tensorflow.contrib.framework.get_variables",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.stack",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.contrib.slim.flatten",
"tensorflow.contrib.slim.conv2d",
"tensorflow.variable_scope"
],
[
"numpy.hstack",
"numpy.rot90",
"numpy.asarray",
"numpy.uint8",
"scipy.io.loadmat",
"numpy.tile",
"numpy.dstack",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.draw",
"numpy.zeros_like",
"numpy.array",
"matplotlib.pyplot.pause",
"numpy.vstack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mohanadhammad/dlnd-sagemaker-deployment | [
"ad1da6fc928e351f6782d1a9c180e0910b67b702"
] | [
"Project/train/train.py"
] | [
"import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef _get_train_data_loader(batch_size, training_dir):\n print(\"Get train data loader.\")\n\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()\n train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()\n\n train_ds = torch.utils.data.TensorDataset(train_X, train_y)\n\n return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)\n\n\ndef train(model, train_loader, epochs, optimizer, loss_fn, device):\n \"\"\"\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n optimizer - The optimizer to use during training.\n loss_fn - The loss function used for training.\n device - Where the model and data should be loaded (gpu or cpu).\n \"\"\"\n \n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n optimizer.zero_grad()\n prediction = model(batch_X)\n loss = loss_fn(prediction, batch_y)\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n\n\nif __name__ == '__main__':\n # All of the model parameters and training parameters are sent as arguments when the script\n # is executed. Here we set up an argument parser to easily access the parameters.\n\n parser = argparse.ArgumentParser()\n\n # Training Parameters\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 512)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n # Model Parameters\n parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',\n help='size of the word embeddings (default: 32)')\n parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',\n help='size of the hidden dimension (default: 100)')\n parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',\n help='size of the vocabulary (default: 5000)')\n\n # SageMaker Parameters\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))\n parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])\n parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device {}.\".format(device))\n\n torch.manual_seed(args.seed)\n\n # Load the training data.\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir)\n\n # Build the model.\n model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)\n\n with open(os.path.join(args.data_dir, \"word_dict.pkl\"), \"rb\") as f:\n model.word_dict = pickle.load(f)\n\n print(\"Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.\".format(\n args.embedding_dim, args.hidden_dim, args.vocab_size\n ))\n\n # Train the model.\n optimizer = optim.Adam(model.parameters())\n loss_fn = torch.nn.BCELoss()\n\n train(model, train_loader, args.epochs, optimizer, loss_fn, device)\n\n # Save the parameters used to construct the model\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'embedding_dim': args.embedding_dim,\n 'hidden_dim': args.hidden_dim,\n 'vocab_size': args.vocab_size,\n }\n torch.save(model_info, f)\n\n\t# Save the word_dict\n word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'wb') as f:\n pickle.dump(model.word_dict, f)\n\n\t# Save the model parameters\n model_path = os.path.join(args.model_dir, 'model.pth')\n with open(model_path, 'wb') as f:\n torch.save(model.cpu().state_dict(), f)\n"
] | [
[
"torch.load",
"torch.manual_seed",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.nn.BCELoss",
"torch.cuda.is_available",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yuhongsun96/PySyft | [
"36f624ec47336d58fb73504c0817aa988a678626",
"36f624ec47336d58fb73504c0817aa988a678626"
] | [
"packages/syft/src/syft/core/node/common/client.py",
"packages/syft/src/syft/core/tensor/autodp/single_entity_phi.py"
] | [
"# stdlib\nimport sys\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\n# third party\nfrom google.protobuf.reflection import GeneratedProtocolMessageType\nfrom nacl.signing import SigningKey\nfrom nacl.signing import VerifyKey\nimport pandas as pd\n\n# syft absolute\nimport syft as sy\n\n# relative\nfrom ....logger import critical\nfrom ....logger import debug\nfrom ....logger import error\nfrom ....logger import info\nfrom ....logger import traceback_and_raise\nfrom ....proto.core.node.common.client_pb2 import Client as Client_PB\nfrom ....proto.core.node.common.metadata_pb2 import Metadata as Metadata_PB\nfrom ....util import get_fully_qualified_name\nfrom ...common.message import EventualSyftMessageWithoutReply\nfrom ...common.message import ImmediateSyftMessageWithReply\nfrom ...common.message import ImmediateSyftMessageWithoutReply\nfrom ...common.message import SignedEventualSyftMessageWithoutReply\nfrom ...common.message import SignedImmediateSyftMessageWithReply\nfrom ...common.message import SignedImmediateSyftMessageWithoutReply\nfrom ...common.message import SyftMessage\nfrom ...common.serde.serializable import serializable\nfrom ...common.uid import UID\nfrom ...io.location import Location\nfrom ...io.location import SpecificLocation\nfrom ...io.route import Route\nfrom ...pointer.garbage_collection import GarbageCollection\nfrom ...pointer.garbage_collection import gc_get_default_strategy\nfrom ...pointer.pointer import Pointer\nfrom ..abstract.node import AbstractNodeClient\nfrom .action.exception_action import ExceptionMessage\nfrom .node_service.object_search.obj_search_service import ObjectSearchMessage\n\n\n@serializable()\nclass Client(AbstractNodeClient):\n \"\"\"Client is an incredibly powerful abstraction in Syft. We assume that,\n no matter where a client is, it can figure out how to communicate with\n the Node it is supposed to point to. If I send you a client I have\n with all of the metadata in it, you should have all the information\n you need to know to interact with a node (although you might not\n have permissions - clients should not store private keys).\"\"\"\n\n def __init__(\n self,\n name: Optional[str],\n routes: List[Route],\n network: Optional[Location] = None,\n domain: Optional[Location] = None,\n device: Optional[Location] = None,\n vm: Optional[Location] = None,\n signing_key: Optional[SigningKey] = None,\n verify_key: Optional[VerifyKey] = None,\n ):\n name = f\"{name}\" if name is not None else None\n super().__init__(\n name=name, network=network, domain=domain, device=device, vm=vm\n )\n\n self.routes = routes\n self.default_route_index = 0\n\n gc_strategy_name = gc_get_default_strategy()\n self.gc = GarbageCollection(gc_strategy_name)\n\n # create a signing key if one isn't provided\n if signing_key is None:\n self.signing_key = SigningKey.generate()\n else:\n self.signing_key = signing_key\n\n # if verify key isn't provided, get verify key from signing key\n if verify_key is None:\n self.verify_key = self.signing_key.verify_key\n else:\n self.verify_key = verify_key\n\n self.install_supported_frameworks()\n\n self.store = StoreClient(client=self)\n\n def obj_exists(self, obj_id: UID) -> bool:\n raise NotImplementedError\n\n @property\n def icon(self) -> str:\n icon = \"📡\"\n sub = []\n if self.vm is not None:\n sub.append(\"🍰\")\n if self.device is not None:\n sub.append(\"📱\")\n if self.domain is not None:\n sub.append(\"🏰\")\n if self.network is not None:\n sub.append(\"🔗\")\n\n if len(sub) > 0:\n icon = f\"{icon} [\"\n for s in sub:\n icon += s\n icon += \"]\"\n return icon\n\n @staticmethod\n def deserialize_client_metadata_from_node(\n metadata: Metadata_PB,\n ) -> Tuple[SpecificLocation, str, UID]:\n # string of bytes\n meta = sy.deserialize(blob=metadata)\n return meta.node, meta.name, meta.id\n\n def install_supported_frameworks(self) -> None:\n self.lib_ast = sy.lib.create_lib_ast(client=self)\n\n # first time we want to register for future updates\n self.lib_ast.register_updates(self)\n\n if self.lib_ast is not None:\n for attr_name, attr in self.lib_ast.attrs.items():\n setattr(self, attr_name, attr)\n\n # shortcut syft.lib.python to just python\n if hasattr(self.lib_ast, \"syft\"):\n try:\n lib_attr = getattr(self.lib_ast.syft, \"lib\", None)\n\n if lib_attr is not None:\n python_attr = getattr(lib_attr, \"python\", None)\n setattr(self, \"python\", python_attr)\n python_attr = getattr(lib_attr, \"adp\", None)\n setattr(self, \"adp\", python_attr)\n\n except Exception as e:\n critical(f\"Failed to set python attribute on client. {e}\")\n\n def configure(self, **kwargs: Any) -> Any:\n # relative\n from .node_service.node_setup.node_setup_messages import UpdateSetupMessage\n\n if \"daa_document\" in kwargs.keys():\n kwargs[\"daa_document\"] = open(kwargs[\"daa_document\"], \"rb\").read()\n else:\n kwargs[\"daa_document\"] = b\"\"\n response = self._perform_grid_request( # type: ignore\n grid_msg=UpdateSetupMessage, content=kwargs\n ).content\n info(response)\n\n @property\n def settings(self, **kwargs: Any) -> Dict[Any, Any]: # type: ignore\n # relative\n from .node_service.node_setup.node_setup_messages import GetSetUpMessage\n\n return self._perform_grid_request( # type: ignore\n grid_msg=GetSetUpMessage, content=kwargs\n ).content # type : ignore\n\n def join_network(\n self,\n client: Optional[AbstractNodeClient] = None,\n host_or_ip: Optional[str] = None,\n ) -> None:\n # this asks for a VPN key so it must be on a public interface hence the\n # client or a public host_or_ip\n try:\n if client is None and host_or_ip is None:\n raise ValueError(\n \"join_network requires a Client object or host_or_ip string\"\n )\n if client is not None:\n # connection.host has a http protocol\n connection_host = client.routes[0].connection.host # type: ignore\n parts = connection_host.split(\"://\")\n host_or_ip = parts[1]\n # if we are using localhost to connect we need to change to docker-host\n # so that the domain container can connect to the host not itself\n host_or_ip = str(host_or_ip).replace(\"localhost\", \"docker-host\")\n return self.vpn.join_network(host_or_ip=str(host_or_ip)) # type: ignore\n except Exception as e:\n print(f\"Failed to join network with {host_or_ip}. {e}\")\n\n @property\n def id(self) -> UID:\n \"\"\"This client points to an node, this returns the id of that node.\"\"\"\n traceback_and_raise(NotImplementedError)\n\n # TODO fix the msg type but currently tensor needs SyftMessage\n\n def send_immediate_msg_with_reply(\n self,\n msg: Union[\n SignedImmediateSyftMessageWithReply,\n ImmediateSyftMessageWithReply,\n Any, # TEMPORARY until we switch everything to NodeRunnableMessage types.\n ],\n route_index: int = 0,\n ) -> SyftMessage:\n\n # relative\n from .node_service.simple.simple_messages import NodeRunnableMessageWithReply\n\n # TEMPORARY: if message is instance of NodeRunnableMessageWithReply then we need to wrap it in a SimpleMessage\n if isinstance(msg, NodeRunnableMessageWithReply):\n msg = msg.prepare(address=self.address, reply_to=self.address)\n\n route_index = route_index or self.default_route_index\n\n if isinstance(msg, ImmediateSyftMessageWithReply):\n output = (\n f\"> {self.pprint} Signing {msg.pprint} with \"\n + f\"{self.key_emoji(key=self.signing_key.verify_key)}\"\n )\n debug(output)\n msg = msg.sign(signing_key=self.signing_key)\n\n response = self.routes[route_index].send_immediate_msg_with_reply(msg=msg)\n if response.is_valid:\n # check if we have an ExceptionMessage to trigger a local exception\n # from a remote exception that we caused\n if isinstance(response.message, ExceptionMessage):\n exception_msg = response.message\n exception = exception_msg.exception_type(exception_msg.exception_msg)\n error(str(exception))\n traceback_and_raise(exception)\n else:\n return response.message\n\n traceback_and_raise(\n Exception(\"Response was signed by a fake key or was corrupted in transit.\")\n )\n\n # TODO fix the msg type but currently tensor needs SyftMessage\n\n def send_immediate_msg_without_reply(\n self,\n msg: Union[\n SignedImmediateSyftMessageWithoutReply, ImmediateSyftMessageWithoutReply\n ],\n route_index: int = 0,\n ) -> None:\n route_index = route_index or self.default_route_index\n\n if isinstance(msg, ImmediateSyftMessageWithoutReply):\n output = (\n f\"> {self.pprint} Signing {msg.pprint} with \"\n + f\"{self.key_emoji(key=self.signing_key.verify_key)}\"\n )\n debug(output)\n msg = msg.sign(signing_key=self.signing_key)\n debug(f\"> Sending {msg.pprint} {self.pprint} ➡️ {msg.address.pprint}\")\n self.routes[route_index].send_immediate_msg_without_reply(msg=msg)\n\n def send_eventual_msg_without_reply(\n self, msg: EventualSyftMessageWithoutReply, route_index: int = 0\n ) -> None:\n route_index = route_index or self.default_route_index\n output = (\n f\"> {self.pprint} Signing {msg.pprint} with \"\n + f\"{self.key_emoji(key=self.signing_key.verify_key)}\"\n )\n debug(output)\n signed_msg: SignedEventualSyftMessageWithoutReply = msg.sign(\n signing_key=self.signing_key\n )\n\n self.routes[route_index].send_eventual_msg_without_reply(msg=signed_msg)\n\n def __repr__(self) -> str:\n return f\"<Client pointing to node with id:{self.id}>\"\n\n def register_route(self, route: Route) -> None:\n self.routes.append(route)\n\n def set_default_route(self, route_index: int) -> None:\n self.default_route = route_index\n\n def _object2proto(self) -> Client_PB:\n client_pb = Client_PB(\n obj_type=get_fully_qualified_name(obj=self),\n id=sy.serialize(self.id),\n name=self.name,\n routes=[sy.serialize(route) for route in self.routes],\n network=self.network._object2proto() if self.network else None,\n domain=self.domain._object2proto() if self.domain else None,\n device=self.device._object2proto() if self.device else None,\n vm=self.vm._object2proto() if self.vm else None,\n )\n return client_pb\n\n @staticmethod\n def _proto2object(proto: Client_PB) -> \"Client\":\n module_parts = proto.obj_type.split(\".\")\n klass = module_parts.pop()\n obj_type = getattr(sys.modules[\".\".join(module_parts)], klass)\n\n obj = obj_type(\n name=proto.name,\n routes=[sy.deserialize(route) for route in proto.routes],\n network=sy.deserialize(proto.network)\n if proto.HasField(\"network\")\n else None,\n domain=sy.deserialize(proto.domain) if proto.HasField(\"domain\") else None,\n device=sy.deserialize(proto.device) if proto.HasField(\"device\") else None,\n vm=sy.deserialize(proto.vm) if proto.HasField(\"vm\") else None,\n )\n\n if type(obj) != obj_type:\n traceback_and_raise(\n TypeError(\n f\"Deserializing Client. Expected type {obj_type}. Got {type(obj)}\"\n )\n )\n\n return obj\n\n @staticmethod\n def get_protobuf_schema() -> GeneratedProtocolMessageType:\n return Client_PB\n\n @property\n def keys(self) -> str:\n verify = (\n self.key_emoji(key=self.signing_key.verify_key)\n if self.signing_key is not None\n else \"🚫\"\n )\n keys = f\"🔑 {verify}\"\n\n return keys\n\n def __hash__(self) -> Any:\n return hash(self.id)\n\n\nclass StoreClient:\n def __init__(self, client: Client) -> None:\n self.client = client\n\n @property\n def store(self) -> List[Pointer]:\n msg = ObjectSearchMessage(\n address=self.client.address, reply_to=self.client.address\n )\n\n results = getattr(\n self.client.send_immediate_msg_with_reply(msg=msg), \"results\", None\n )\n if results is None:\n traceback_and_raise(ValueError(\"TODO\"))\n\n # This is because of a current limitation in Pointer where we cannot\n # serialize a client object. TODO: Fix limitation in Pointer so that we don't need this.\n for result in results:\n result.gc_enabled = False\n result.client = self.client\n\n return results\n\n def __len__(self) -> int:\n \"\"\"Return the number of items in the object store we're allowed to know about\"\"\"\n return len(self.store)\n\n def __iter__(self) -> Iterator[Any]:\n return self.store.__iter__()\n\n def __getitem__(self, key: Union[str, int]) -> Pointer:\n if isinstance(key, str):\n matches = 0\n match_obj: Optional[Pointer] = None\n\n for obj in self.store:\n if key in obj.tags:\n matches += 1\n match_obj = obj\n if matches == 1 and match_obj is not None:\n return match_obj\n elif matches > 1:\n traceback_and_raise(KeyError(\"More than one item with tag:\" + str(key)))\n else:\n # If key does not math with any tags, we then try to match it with id string.\n # But we only do this if len(key)>=5, because if key is too short, for example\n # if key=\"a\", there are chances of mismatch it with id string, and I don't\n # think the user pass a key such short as part of id string.\n if len(key) >= 5:\n for obj in self.store:\n if key in str(obj.id_at_location.value).replace(\"-\", \"\"):\n return obj\n else:\n traceback_and_raise(\n KeyError(\n f\"No such item found for tag: {key}, and we \"\n + \"don't consider it as part of id string because its too short.\"\n )\n )\n\n traceback_and_raise(KeyError(\"No such item found for id:\" + str(key)))\n if isinstance(key, int):\n return self.store[key]\n else:\n traceback_and_raise(KeyError(\"Please pass in a string or int key\"))\n\n def __repr__(self) -> str:\n return repr(self.store)\n\n @property\n def pandas(self) -> pd.DataFrame:\n obj_lines: List[Dict[str, Any]] = list()\n for obj in self.store:\n obj_lines.append(\n {\n \"ID\": obj.id_at_location,\n \"Tags\": obj.tags,\n \"Description\": obj.description,\n \"object_type\": obj.object_type,\n }\n )\n return pd.DataFrame(obj_lines)\n\n def _repr_html_(self) -> str:\n return self.pandas._repr_html_()\n",
"# future\nfrom __future__ import annotations\n\n# stdlib\nimport operator\nimport typing\nfrom typing import Any\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple as TypeTuple\nfrom typing import Union\n\n# third party\nfrom google.protobuf.reflection import GeneratedProtocolMessageType\nfrom nacl.signing import VerifyKey\nimport numpy as np\nimport numpy.typing as npt\nimport torch\n\n# relative\nfrom .... import lib\nfrom ....ast.klass import pointerize_args_and_kwargs\nfrom ....proto.core.tensor.single_entity_phi_tensor_pb2 import (\n TensorWrappedSingleEntityPhiTensorPointer as TensorWrappedSingleEntityPhiTensorPointer_PB,\n)\nfrom ....util import inherit_tags\nfrom ...adp.entity import Entity\nfrom ...adp.vm_private_scalar_manager import VirtualMachinePrivateScalarManager\nfrom ...common.serde.deserialize import _deserialize as deserialize\nfrom ...common.serde.serializable import serializable\nfrom ...common.serde.serialize import _serialize as serialize\nfrom ...common.uid import UID\nfrom ...node.abstract.node import AbstractNodeClient\nfrom ...node.common.action.run_class_method_action import RunClassMethodAction\nfrom ...pointer.pointer import Pointer\nfrom ..ancestors import AutogradTensorAncestor\nfrom ..broadcastable import is_broadcastable\nfrom ..passthrough import AcceptableSimpleType # type: ignore\nfrom ..passthrough import PassthroughTensor # type: ignore\nfrom ..passthrough import implements # type: ignore\nfrom ..passthrough import is_acceptable_simple_type # type: ignore\nfrom ..smpc import utils\nfrom ..smpc.mpc_tensor import MPCTensor\nfrom ..smpc.share_tensor import ShareTensor\nfrom ..smpc.utils import TYPE_TO_RING_SIZE\nfrom ..tensor import Tensor\nfrom ..types import SupportedChainType # type: ignore\nfrom ..util import inputs2child # type: ignore\nfrom .adp_tensor import ADPTensor\nfrom .dp_tensor_converter import convert_to_gamma_tensor\nfrom .initial_gamma import InitialGammaTensor\nfrom .initial_gamma import IntermediateGammaTensor\n\n\n@serializable()\nclass TensorWrappedSingleEntityPhiTensorPointer(Pointer):\n \"\"\"\n This tensor represents a pointer to a very specific tensor chain. Eventually we'll have some sort\n of more intelligent/general representation for pointers to chains of objects, but for now this is\n what we're going with. This pointer represents all the arguments of the objects in the chain as its\n attributes.\n\n Thus, this class has two groups of attributes: one set are the attributes for SingeEntityPhiTensor:\n child: SupportedChainType,\n entity: Entity,\n min_vals: np.ndarray,\n max_vals: np.ndarray,\n scalar_manager: Optional[VirtualMachinePrivateScalarManager] = None,\n\n And the others are for initializing a Pointer object:\n client=self.client,\n id_at_location=self.id_at_location,\n object_type=self.object_type,\n tags=self.tags,\n description=self.description,\n \"\"\"\n\n __name__ = \"TensorWrappedSingleEntityPhiTensorPointer\"\n __module__ = \"syft.core.tensor.autodp.single_entity_phi\"\n\n def __init__(\n self,\n entity: Entity,\n min_vals: np.typing.ArrayLike,\n max_vals: np.typing.ArrayLike,\n client: Any,\n scalar_manager: Optional[VirtualMachinePrivateScalarManager] = None,\n id_at_location: Optional[UID] = None,\n object_type: str = \"\",\n tags: Optional[List[str]] = None,\n description: str = \"\",\n public_shape: Optional[TypeTuple[int, ...]] = None,\n public_dtype: Optional[np.dtype] = None,\n ):\n\n super().__init__(\n client=client,\n id_at_location=id_at_location,\n object_type=object_type,\n tags=tags,\n description=description,\n )\n\n self.min_vals = min_vals\n self.max_vals = max_vals\n self.entity = entity\n self.scalar_manager = scalar_manager\n self.public_shape = public_shape\n self.public_dtype = public_dtype\n\n def share(self, *parties: TypeTuple[AbstractNodeClient, ...]) -> MPCTensor:\n all_parties = list(parties) + [self.client]\n ring_size = TYPE_TO_RING_SIZE.get(self.public_dtype, None)\n self_mpc = MPCTensor(\n secret=self,\n shape=self.public_shape,\n ring_size=ring_size,\n parties=all_parties,\n )\n return self_mpc\n\n def _apply_tensor_op(self, other: Any, op_str: str) -> Any:\n # we want to get the return type which matches the attr_path_and_name\n # so we ask lib_ast for the return type name that matches out\n # attr_path_and_name and then use that to get the actual pointer klass\n # then set the result to that pointer klass\n\n attr_path_and_name = f\"syft.core.tensor.tensor.Tensor.__{op_str}__\"\n\n result = TensorWrappedSingleEntityPhiTensorPointer(\n entity=self.entity,\n min_vals=self.min_vals,\n max_vals=self.max_vals,\n client=self.client,\n scalar_manager=self.scalar_manager,\n )\n\n # QUESTION can the id_at_location be None?\n result_id_at_location = getattr(result, \"id_at_location\", None)\n\n if result_id_at_location is not None:\n # first downcast anything primitive which is not already PyPrimitive\n (\n downcast_args,\n downcast_kwargs,\n ) = lib.python.util.downcast_args_and_kwargs(args=[other], kwargs={})\n\n # then we convert anything which isnt a pointer into a pointer\n pointer_args, pointer_kwargs = pointerize_args_and_kwargs(\n args=downcast_args,\n kwargs=downcast_kwargs,\n client=self.client,\n gc_enabled=False,\n )\n\n cmd = RunClassMethodAction(\n path=attr_path_and_name,\n _self=self,\n args=pointer_args,\n kwargs=pointer_kwargs,\n id_at_location=result_id_at_location,\n address=self.client.address,\n )\n self.client.send_immediate_msg_without_reply(msg=cmd)\n\n inherit_tags(\n attr_path_and_name=attr_path_and_name,\n result=result,\n self_obj=self,\n args=[other],\n kwargs={},\n )\n\n result_public_shape = None\n\n if isinstance(other, TensorWrappedSingleEntityPhiTensorPointer):\n other_shape = other.public_shape\n other_dtype = other.public_dtype\n elif isinstance(other, (int, float)):\n other_shape = (1,)\n other_dtype = np.int32\n elif isinstance(other, bool):\n other_shape = (1,)\n other_dtype = np.dtype(\"bool\")\n elif isinstance(other, np.ndarray):\n other_shape = other.shape\n other_dtype = other.dtype\n else:\n raise ValueError(\n f\"Invalid Type for TensorWrappedSingleEntityPhiTensorPointer:{type(other)}\"\n )\n\n if self.public_shape is not None and other_shape is not None:\n result_public_shape = utils.get_shape(\n op_str, self.public_shape, other_shape\n )\n\n if self.public_dtype is not None and other_dtype is not None:\n if self.public_dtype != other_dtype:\n raise ValueError(\n f\"Type for self and other do not match ({self.public_dtype} vs {other_dtype})\"\n )\n result_public_dtype = self.public_dtype\n\n result.public_shape = result_public_shape\n result.public_dtype = result_public_dtype\n\n return result\n\n @staticmethod\n def _apply_op(\n self: TensorWrappedSingleEntityPhiTensorPointer,\n other: Union[\n TensorWrappedSingleEntityPhiTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n op_str: str,\n ) -> Union[MPCTensor, TensorWrappedSingleEntityPhiTensorPointer]:\n \"\"\"Performs the operation based on op_str\n\n Args:\n other (Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor,int,float,np.ndarray]): second operand.\n\n Returns:\n Tuple[MPCTensor,Union[MPCTensor,int,float,np.ndarray]] : Result of the operation\n \"\"\"\n op = getattr(operator, op_str)\n\n if (\n isinstance(other, TensorWrappedSingleEntityPhiTensorPointer)\n and self.client != other.client\n ):\n\n parties = [self.client, other.client]\n\n self_mpc = MPCTensor(secret=self, shape=self.public_shape, parties=parties)\n other_mpc = MPCTensor(\n secret=other, shape=other.public_shape, parties=parties\n )\n\n return op(self_mpc, other_mpc)\n\n elif isinstance(other, MPCTensor):\n\n return op(other, self)\n\n return self._apply_tensor_op(other=other, op_str=op_str)\n\n def __add__(\n self,\n other: Union[\n TensorWrappedSingleEntityPhiTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedSingleEntityPhiTensorPointer, MPCTensor]:\n \"\"\"Apply the \"add\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedSingleEntityPhiTensorPointer._apply_op(self, other, \"add\")\n\n def __sub__(\n self,\n other: Union[\n TensorWrappedSingleEntityPhiTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedSingleEntityPhiTensorPointer, MPCTensor]:\n \"\"\"Apply the \"sub\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedSingleEntityPhiTensorPointer._apply_op(self, other, \"sub\")\n\n def __mul__(\n self,\n other: Union[\n TensorWrappedSingleEntityPhiTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedSingleEntityPhiTensorPointer, MPCTensor]:\n \"\"\"Apply the \"mul\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedSingleEntityPhiTensorPointer._apply_op(self, other, \"mul\")\n\n def __gt__(\n self,\n other: Union[\n TensorWrappedSingleEntityPhiTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedSingleEntityPhiTensorPointer, MPCTensor]:\n \"\"\"Apply the \"gt\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedSingleEntityPhiTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedSingleEntityPhiTensorPointer._apply_op(self, other, \"gt\")\n\n def to_local_object_without_private_data_child(self) -> SingleEntityPhiTensor:\n \"\"\"Convert this pointer into a partial version of the SingleEntityPhiTensor but without\n any of the private data therein.\"\"\"\n\n public_shape = getattr(self, \"public_shape\", None)\n public_dtype = getattr(self, \"public_dtype\", None)\n return Tensor(\n child=SingleEntityPhiTensor(\n child=None,\n entity=self.entity,\n min_vals=self.min_vals, # type: ignore\n max_vals=self.max_vals, # type: ignore\n scalar_manager=self.scalar_manager,\n ),\n public_shape=public_shape,\n public_dtype=public_dtype,\n )\n\n def _object2proto(self) -> \"TensorWrappedSingleEntityPhiTensorPointer_PB\":\n\n _entity = serialize(self.entity)\n _min_vals = serialize(self.min_vals)\n _max_vals = serialize(self.max_vals)\n _location = serialize(self.client.address)\n _scalar_manager = serialize(self.scalar_manager, to_bytes=True)\n _id_at_location = serialize(self.id_at_location)\n _object_type = self.object_type\n _tags = self.tags\n _description = self.description\n _public_shape = serialize(getattr(self, \"public_shape\", None), to_bytes=True)\n _public_dtype = serialize(getattr(self, \"public_dtype\", None), to_bytes=True)\n\n return TensorWrappedSingleEntityPhiTensorPointer_PB(\n entity=_entity,\n min_vals=_min_vals,\n max_vals=_max_vals,\n location=_location,\n scalar_manager=_scalar_manager,\n id_at_location=_id_at_location,\n object_type=_object_type,\n tags=_tags,\n description=_description,\n public_shape=_public_shape,\n public_dtype=_public_dtype,\n )\n\n @staticmethod\n def _proto2object(\n proto: TensorWrappedSingleEntityPhiTensorPointer_PB,\n ) -> \"TensorWrappedSingleEntityPhiTensorPointer\":\n\n entity = deserialize(blob=proto.entity)\n min_vals = deserialize(blob=proto.min_vals)\n max_vals = deserialize(blob=proto.max_vals)\n client = deserialize(blob=proto.location)\n scalar_manager = deserialize(blob=proto.scalar_manager, from_bytes=True)\n id_at_location = deserialize(blob=proto.id_at_location)\n object_type = proto.object_type\n tags = proto.tags\n public_shape = deserialize(blob=proto.public_shape, from_bytes=True)\n public_dtype = deserialize(blob=proto.public_dtype, from_bytes=True)\n description = proto.description\n\n return TensorWrappedSingleEntityPhiTensorPointer(\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n client=client,\n scalar_manager=scalar_manager,\n id_at_location=id_at_location,\n object_type=object_type,\n tags=tags,\n description=description,\n public_shape=public_shape,\n public_dtype=public_dtype,\n )\n\n @staticmethod\n def get_protobuf_schema() -> GeneratedProtocolMessageType:\n \"\"\"Return the type of protobuf object which stores a class of this type\n\n As a part of serialization and deserialization, we need the ability to\n lookup the protobuf object type directly from the object type. This\n static method allows us to do this.\n\n Importantly, this method is also used to create the reverse lookup ability within\n the metaclass of Serializable. In the metaclass, it calls this method and then\n it takes whatever type is returned from this method and adds an attribute to it\n with the type of this class attached to it. See the MetaSerializable class for details.\n\n :return: the type of protobuf object which corresponds to this class.\n :rtype: GeneratedProtocolMessageType\n\n \"\"\"\n\n return TensorWrappedSingleEntityPhiTensorPointer_PB\n\n\n@serializable(recursive_serde=True)\nclass SingleEntityPhiTensor(PassthroughTensor, AutogradTensorAncestor, ADPTensor):\n\n PointerClassOverride = TensorWrappedSingleEntityPhiTensorPointer\n\n __attr_allowlist__ = [\n \"child\",\n \"_min_vals\",\n \"_max_vals\",\n \"entity\",\n \"scalar_manager\",\n \"n_entities\",\n ]\n\n def __init__(\n self,\n child: SupportedChainType,\n entity: Entity,\n min_vals: np.ndarray,\n max_vals: np.ndarray,\n scalar_manager: Optional[VirtualMachinePrivateScalarManager] = None,\n ) -> None:\n\n # child = the actual private data\n super().__init__(child)\n\n # identically shaped tensor to \"child\" but making the LOWEST possible value of this private value\n self._min_vals = min_vals\n\n # identically shaped tensor to \"child\" but making the HIGHEST possible value of this private value\n self._max_vals = max_vals\n\n # the identity of the data subject\n self.entity = entity\n\n if scalar_manager is None:\n self.scalar_manager = VirtualMachinePrivateScalarManager()\n else:\n self.scalar_manager = scalar_manager\n\n # Number of entities in a SEPT is by definition 1\n self.n_entities = 1\n\n def init_pointer(\n self,\n client: Any,\n id_at_location: Optional[UID] = None,\n object_type: str = \"\",\n tags: Optional[List[str]] = None,\n description: str = \"\",\n ) -> TensorWrappedSingleEntityPhiTensorPointer:\n return TensorWrappedSingleEntityPhiTensorPointer(\n # Arguments specifically for SEPhiTensor\n entity=self.entity,\n min_vals=self._min_vals,\n max_vals=self._max_vals,\n scalar_manager=self.scalar_manager,\n # Arguments required for a Pointer to work\n client=client,\n id_at_location=id_at_location,\n object_type=object_type,\n tags=tags,\n description=description,\n )\n\n @property\n def gamma(self) -> InitialGammaTensor:\n\n \"\"\"Property to cast this tensor into a GammaTensor\"\"\"\n return self.create_gamma()\n\n def create_gamma(\n self, scalar_manager: Optional[VirtualMachinePrivateScalarManager] = None\n ) -> InitialGammaTensor:\n\n \"\"\"Return a new Gamma tensor based on this phi tensor\"\"\"\n\n if scalar_manager is None:\n scalar_manager = self.scalar_manager\n\n # Gamma expects an entity for each scalar\n entities = np.array([self.entity] * np.array(self.child.shape).prod()).reshape(\n self.shape\n )\n\n return InitialGammaTensor(\n values=self.child,\n min_vals=self.min_vals,\n max_vals=self.max_vals,\n entities=entities,\n scalar_manager=scalar_manager,\n )\n\n def publish(\n self, acc: Any, sigma: float, user_key: VerifyKey\n ) -> AcceptableSimpleType:\n print(\"PUBLISHING TO GAMMA:\")\n print(self.child)\n return self.gamma.publish(acc=acc, sigma=sigma, user_key=user_key)\n\n @property\n def min_vals(self) -> np.ndarray:\n\n return self._min_vals\n\n @property\n def max_vals(self) -> np.ndarray:\n\n return self._max_vals\n\n def __repr__(self) -> str:\n\n \"\"\"Pretty print some information, optimized for Jupyter notebook viewing.\"\"\"\n return (\n f\"{self.__class__.__name__}(entity={self.entity.name}, child={self.child})\"\n )\n\n def __and__(self, other: Any) -> SingleEntityPhiTensor:\n \"\"\"Note: this is bitwise and, not logical and\"\"\"\n return SingleEntityPhiTensor(\n child=self.child & other,\n min_vals=np.zeros_like(self.child),\n max_vals=np.ones_like(self.child),\n entity=self.entity,\n )\n\n def __or__(self, other: Any) -> SingleEntityPhiTensor:\n return SingleEntityPhiTensor(\n child=self.child | other,\n min_vals=np.zeros_like(self.child),\n max_vals=np.ones_like(self.child),\n entity=self.entity,\n )\n\n # Check for shape1 = (1,s), and shape2 = (,s) --> as an example\n def __eq__(\n self, other: SupportedChainType\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n if is_acceptable_simple_type(other):\n if isinstance(other, np.ndarray):\n # If other is a Numpy Array, we need to check if shapes can be broadcast\n if is_broadcastable(other.shape, self.child.shape): # type: ignore\n data = self.child == other\n else:\n raise Exception(\n f\"Tensor shapes do not match \" # type: ignore\n f\"for __eq__: {self.child.shape} != {other.child.shape}\" # type: ignore\n )\n else:\n data = self.child == other\n elif isinstance(other, SingleEntityPhiTensor):\n if self.entity != other.entity:\n return convert_to_gamma_tensor(self) == convert_to_gamma_tensor(other)\n else:\n if is_broadcastable(self.child.shape, other.child.shape): # type: ignore\n data = self.child == other.child\n else:\n raise Exception(\n f\"Tensor shapes do not match for __eq__: {self.child.shape} != {other.child.shape}\"\n )\n elif isinstance(other, PassthroughTensor):\n if is_broadcastable(self.child.shape, other.child.shape): # type: ignore\n data = self.child == other.child\n else:\n raise Exception(\n f\"Tensor shapes do not match for __eq__: {self.child} != len{other}\"\n )\n return SingleEntityPhiTensor(\n child=data,\n entity=self.entity,\n min_vals=self.min_vals * 0.0,\n max_vals=self.max_vals * 0.0 + 1.0,\n scalar_manager=self.scalar_manager,\n )\n\n def __ne__(\n self, other: SupportedChainType\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n # Make use of the equal operator we just implemented, and invert the result\n opposite_result = self.__eq__(other)\n\n if isinstance(opposite_result, SingleEntityPhiTensor):\n return SingleEntityPhiTensor(\n child=np.invert(opposite_result.child),\n entity=opposite_result.entity,\n min_vals=opposite_result.min_vals,\n max_vals=opposite_result.max_vals,\n scalar_manager=opposite_result.scalar_manager,\n )\n elif isinstance(opposite_result, InitialGammaTensor):\n return InitialGammaTensor(\n values=opposite_result.values,\n entities=opposite_result.entities,\n min_vals=opposite_result.min_vals,\n max_vals=opposite_result.max_vals,\n scalar_manager=opposite_result.scalar_manager,\n )\n else:\n raise Exception\n\n def __abs__(self) -> SingleEntityPhiTensor:\n\n data = self.child.abs()\n\n # create true/false gate inputs\n minvals_is_gt0 = self.min_vals > 0\n minvals_is_le0 = -minvals_is_gt0 + 1\n maxvals_is_gt0 = self.max_vals >= 0\n maxvals_is_le0 = -maxvals_is_gt0 + 1\n\n # create true/false gates\n is_strict_gt0 = minvals_is_gt0\n is_gtlt0 = minvals_is_le0 * maxvals_is_gt0\n is_strict_lt0 = minvals_is_le0 * maxvals_is_le0\n\n # if min_vals > 0, then new min_vals doesn't change\n min_vals_strict_gt0 = self.min_vals\n\n # if min_vals < 0 and max_vals > 0, then new min_vals = 0\n min_vals_gtlt0 = self.min_vals * 0\n\n # if min_vals < 0 and max_vals < 0, then new min_vals = -max_vals\n min_vals_strict_lt0 = -self.max_vals\n\n # sum of masked options\n min_vals = is_strict_gt0 * min_vals_strict_gt0\n min_vals = min_vals + (is_gtlt0 * min_vals_gtlt0)\n min_vals = min_vals + (is_strict_lt0 * min_vals_strict_lt0)\n\n # if min_vals > 0, then new min_vals doesn't change\n max_vals_strict_gt0 = self.max_vals\n\n # if min_vals < 0 and max_vals > 0, then new min_vals = 0\n max_vals_gtlt0 = np.max([self.max_vals, -self.min_vals]) # type: ignore\n\n # if min_vals < 0 and max_vals < 0, then new min_vals = -max_vals\n max_vals_strict_lt0 = -self.min_vals\n\n # sum of masked options\n max_vals = is_strict_gt0 * max_vals_strict_gt0\n max_vals = max_vals + (is_gtlt0 * max_vals_gtlt0)\n max_vals = max_vals + (is_strict_lt0 * max_vals_strict_lt0)\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def __add__(\n self, other: SupportedChainType\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n\n # if the tensor being added is also private\n if isinstance(other, SingleEntityPhiTensor):\n if self.entity.name != other.entity.name:\n return convert_to_gamma_tensor(self) + convert_to_gamma_tensor(other)\n\n data = self.child + other.child\n min_vals = self.min_vals + other.min_vals\n max_vals = self.max_vals + other.max_vals\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n # if the tensor being added is a public tensor / int / float / etc.\n elif is_acceptable_simple_type(other):\n\n data = self.child + other\n min_vals = self.min_vals + other\n max_vals = self.max_vals + other\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n else:\n raise NotImplementedError\n\n def __neg__(self) -> SingleEntityPhiTensor:\n\n data = self.child * -1\n min_vals = self.max_vals * -1\n max_vals = self.min_vals * -1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def __getitem__(self, key: Any) -> SingleEntityPhiTensor:\n\n data = self.child.__getitem__(key)\n min_vals = self.min_vals.__getitem__(key)\n max_vals = self.max_vals.__getitem__(key)\n\n if isinstance(data, (np.number, bool, int, float)):\n data = np.array([data]) # 1 dimensional np.array\n if isinstance(min_vals, (np.number, bool, int, float)):\n min_vals = np.array(min_vals) # 1 dimensional np.array\n if isinstance(max_vals, (np.number, bool, int, float)):\n max_vals = np.array(max_vals) # 1 dimensional np.array\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def __mul__(\n self, other: SupportedChainType\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n\n if isinstance(other, SingleEntityPhiTensor):\n print(f\"SELF ENTITY:{self.entity.name}\")\n print(f\"OTHER ENTITY:{other.entity.name}\")\n if self.entity != other.entity:\n print(\"Entities are not the same?!?!?!\")\n return convert_to_gamma_tensor(self) * convert_to_gamma_tensor(other)\n\n data = self.child * other.child\n\n min_min = self.min_vals * other.min_vals\n min_max = self.min_vals * other.max_vals\n max_min = self.max_vals * other.min_vals\n max_max = self.max_vals * other.max_vals\n\n min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n elif is_acceptable_simple_type(other):\n\n data = self.child * other\n\n min_min = self.min_vals * other\n min_max = self.min_vals * other\n max_min = self.max_vals * other\n max_max = self.max_vals * other\n\n min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n else:\n return NotImplemented\n\n def __pos__(self) -> SingleEntityPhiTensor:\n \"\"\"Identity operator, returns itself\"\"\"\n return self\n\n def __sub__(\n self, other: SupportedChainType\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n\n if isinstance(other, SingleEntityPhiTensor):\n if self.entity != other.entity:\n return convert_to_gamma_tensor(self) - convert_to_gamma_tensor(other)\n\n data = self.child - other.child\n min_vals = self.min_vals - other.min_vals\n max_vals = self.max_vals - other.max_vals\n entity = self.entity\n\n elif is_acceptable_simple_type(other):\n if isinstance(other, np.ndarray):\n if not is_broadcastable(other.shape, self.child.shape): # type: ignore\n raise Exception(\n f\"Shapes do not match for subtraction: {self.child.shape} and {other.shape}\"\n )\n data = self.child - other\n min_vals = self.min_vals - other\n max_vals = self.max_vals - other\n entity = self.entity\n else:\n raise NotImplementedError\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def __truediv__(self, other: SupportedChainType) -> SingleEntityPhiTensor:\n\n if isinstance(other, SingleEntityPhiTensor):\n\n if self.entity != other.entity:\n # this should return a GammaTensor\n return NotImplemented\n\n data = self.child / other.child\n\n if (other.min_vals == 0).any() or (other.max_vals == 0).any():\n\n raise Exception(\n \"Infinite sensitivity - we can support this in the future but not yet\"\n )\n\n else:\n\n min_min = self.min_vals / other.min_vals\n min_max = self.min_vals / other.max_vals\n max_min = self.max_vals / other.min_vals\n max_max = self.max_vals / other.max_vals\n\n min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n else:\n # Ignoring unsupported operand error b/c other logic is taken care of\n return self * (1 / other) # type: ignore\n\n def compress(\n self,\n condition: np.typing.ArrayLike,\n axis: Optional[int] = None,\n out: Optional[np.ndarray] = None,\n ) -> SingleEntityPhiTensor:\n \"\"\"Return selected slices of this array along a given axis\"\"\"\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n # For these data types, the compress operation is meaningless, so don't change them.\n data = self.child\n print(\n f\"Warning: Tensor data was of type {type(data)}, compress operation had no effect.\"\n )\n else:\n data = self.child.compress(condition, axis, out)\n\n # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n # For these data types, the compress operation is meaningless, so don't change them.\n min_vals = self.min_vals\n # print(f'Warning: Tensor data was of type {type(data)}, compress operation had no effect.')\n else:\n min_vals = self.min_vals.compress(condition, axis, out)\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n # For these data types, the compress operation is meaningless, so don't change them.\n max_vals = self.max_vals\n # print(f'Warning: Tensor data was of type {type(data)}, compress operation had no effect.')\n else:\n max_vals = self.max_vals.compress(condition, axis, out)\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def dot(self, other: SupportedChainType) -> SingleEntityPhiTensor:\n return self.manual_dot(other)\n\n # ndarray.flatten(order='C')\n def flatten(self, order: str = \"C\") -> SingleEntityPhiTensor:\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n # For these data types, the flatten operation is meaningless, so don't change them.\n data = self.child\n print(\n f\"Warning: Tensor data was of type {type(data)}, flatten operation had no effect.\"\n )\n else:\n data = self.child.flatten(order)\n\n # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n # For these data types, the flatten operation is meaningless, so don't change them.\n min_vals = self.min_vals\n # print(f'Warning: Tensor data was of type {type(data)}, flatten operation had no effect.')\n else:\n min_vals = self.min_vals.flatten(order)\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n # For these data types, the flatten operation is meaningless, so don't change them.\n max_vals = self.max_vals\n # print(f'Warning: Tensor data was of type {type(data)}, flatten operation had no effect.')\n else:\n max_vals = self.max_vals.flatten(order)\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n # def partition(\n # self,\n # kth: Union[int, List[int], np.ndarray],\n # axis: Optional[int] = -1,\n # kind: Optional[str] = \"introselect\",\n # order: Optional[Union[str, List[str]]] = None,\n # ) -> SingleEntityPhiTensor:\n # # this method mutates self\n # \"\"\"Interchange two axes of the Tensor\"\"\"\n # if (\n # isinstance(self.child, int)\n # or isinstance(self.child, float)\n # or isinstance(self.child, bool)\n # ):\n # # For these singleton data types, the partition operation is meaningless, so don't change them.\n # print(\n # f\"Warning: Tensor data was of type {type(self.child)}, partition operation had no effect.\"\n # )\n # else:\n # self.child.partition(kth, axis, kind, order)\n #\n # # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n # if (\n # isinstance(self.min_vals, int)\n # or isinstance(self.min_vals, float)\n # or isinstance(self.min_vals, bool)\n # ):\n # # For these singleton data types, the partition operation is meaningless, so don't change them.\n # print(\n # f\"Warning: Min_vals metadata was of type {type(self.min_vals)}, partition operation had no effect.\"\n # )\n # else:\n # self.min_vals.partition(kth, axis, kind, order)\n #\n # if (\n # isinstance(self.max_vals, int)\n # or isinstance(self.max_vals, float)\n # or isinstance(self.max_vals, bool)\n # ):\n # # For these singleton data types, the partition operation is meaningless, so don't change them.\n # print(\n # f\"Warning: Max_vals metadata was of type {type(self.max_vals)}, partition operation had no effect.\"\n # )\n # else:\n # self.max_vals.partition(kth, axis, kind, order)\n #\n # return self\n\n # ndarray.ravel(order='C')\n def ravel(self, order: str = \"C\") -> SingleEntityPhiTensor:\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n # For these data types, the ravel operation is meaningless, so don't change them.\n data = self.child\n print(\n f\"Warning: Tensor data was of type {type(data)}, ravel operation had no effect.\"\n )\n else:\n data = self.child.ravel(order)\n\n # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n # For these data types, the ravel operation is meaningless, so don't change them.\n min_vals = self.min_vals\n # print(f'Warning: Tensor data was of type {type(data)}, ravel operation had no effect.')\n else:\n min_vals = self.min_vals.ravel(order)\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n # For these data types, the ravel operation is meaningless, so don't change them.\n max_vals = self.max_vals\n # print(f'Warning: Tensor data was of type {type(data)}, ravel operation had no effect.')\n else:\n max_vals = self.max_vals.ravel(order)\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def repeat(\n self, repeats: Union[int, TypeTuple[int, ...]], axis: Optional[int] = None\n ) -> SingleEntityPhiTensor:\n\n data = self.child.repeat(repeats, axis=axis)\n min_vals = self.min_vals.repeat(repeats, axis=axis)\n max_vals = self.max_vals.repeat(repeats, axis=axis)\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def reshape(self, *args: Any) -> SingleEntityPhiTensor:\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n # For these data types, the reshape operation is meaningless, so don't change them.\n data = self.child\n print(\n f\"Warning: Tensor data was of type {type(data)}, reshape operation had no effect.\"\n )\n else:\n data = self.child.reshape(*args)\n\n # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n # For these data types, the reshape operation is meaningless, so don't change them.\n min_vals = self.min_vals\n # print(f'Warning: Tensor data was of type {type(data)}, reshape operation had no effect.')\n else:\n min_vals = self.min_vals.reshape(*args)\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n # For these data types, the reshape operation is meaningless, so don't change them.\n max_vals = self.max_vals\n # print(f'Warning: Tensor data was of type {type(data)}, reshape operation had no effect.')\n else:\n max_vals = self.max_vals.reshape(*args)\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def resize(\n self,\n new_shape: Union[TypeTuple[int], int, typing.Iterable],\n refcheck: bool = True,\n ) -> None:\n \"\"\"Change shape and size of array, in-place.\"\"\"\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n # For these data types, the resize operation is meaningless, so don't change them.\n pass\n print(\n f\"Warning: Tensor data was of type {type(self.child)}, resize operation had no effect.\"\n )\n else:\n # self.child = self.child.resize(new_shape, refcheck)\n self.child = np.resize(self.child, new_shape)\n\n # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n # For these data types, the resize operation is meaningless, so don't change them.\n pass\n # print(f'Warning: min_vals data was of type {type(self.min_vals)}, resize operation had no effect.')\n else:\n self._min_vals = np.reshape(self.min_vals, new_shape)\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n # For these data types, the resize operation is meaningless, so don't change them.\n pass\n # print(f'Warning: max_vals data was of type {type(data)}, resize operation had no effect.')\n else:\n self._max_vals = np.reshape(self.max_vals, new_shape)\n\n return None\n\n def squeeze(self, axis: Optional[int] = None) -> SingleEntityPhiTensor:\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n # For these data types, the squeeze operation is meaningless, so don't change them.\n data = self.child\n print(\n f\"Warning: Tensor data was of type {type(data)}, squeeze operation had no effect.\"\n )\n else:\n data = self.child.squeeze(axis)\n\n # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n # For these data types, the squeeze operation is meaningless, so don't change them.\n min_vals = self.min_vals\n # print(f'Warning: Tensor data was of type {type(data)}, squeeze operation had no effect.')\n else:\n min_vals = self.min_vals.squeeze(axis)\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n # For these data types, the squeeze operation is meaningless, so don't change them.\n max_vals = self.max_vals\n # print(f'Warning: Tensor data was of type {type(data)}, squeeze operation had no effect.')\n else:\n max_vals = self.max_vals.squeeze(axis)\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def sum(self, *args: Any, **kwargs: Any) -> SingleEntityPhiTensor:\n\n data = self.child.sum(*args, **kwargs)\n min_vals = self.min_vals.sum(*args, **kwargs)\n max_vals = self.max_vals.sum(*args, **kwargs)\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def swapaxes(self, axis1: int, axis2: int) -> SingleEntityPhiTensor:\n \"\"\"Interchange two axes of the Tensor\"\"\"\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n # For these singleton data types, the swapaxes operation is meaningless, so don't change them.\n data = self.child\n print(\n f\"Warning: Tensor data was of type {type(data)}, swapaxes operation had no effect.\"\n )\n else:\n data = self.child.swapaxes(axis1, axis2)\n\n # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n # For these singleton data types, the swapaxes operation is meaningless, so don't change them.\n min_vals = self.min_vals\n # print(f'Warning: Tensor data was of type {type(data)}, swapaxes operation had no effect.')\n else:\n min_vals = self.min_vals.swapaxes(axis1, axis2)\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n # For these singleton data types, the swapaxes operation is meaningless, so don't change them.\n max_vals = self.max_vals\n # print(f'Warning: Tensor data was of type {type(data)}, swapaxes operation had no effect.')\n else:\n max_vals = self.max_vals.swapaxes(axis1, axis2)\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def transpose(self, *args: Any, **kwargs: Any) -> SingleEntityPhiTensor:\n \"\"\"Transposes self.child, min_vals, and max_vals if these can be transposed, otherwise doesn't change them.\"\"\"\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n # For these data types, the transpose operation is meaningless, so don't change them.\n data = self.child\n print(\n f\"Warning: Tensor data was of type {type(data)}, transpose operation had no effect.\"\n )\n else:\n data = self.child.transpose(*args)\n\n # TODO: Should we give warnings for min_val and max_val being single floats/integers/booleans too?\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n # For these data types, the transpose operation is meaningless, so don't change them.\n min_vals = self.min_vals\n # print(f'Warning: Tensor data was of type {type(data)}, transpose operation had no effect.')\n else:\n min_vals = self.min_vals.transpose(*args)\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n # For these data types, the transpose operation is meaningless, so don't change them.\n max_vals = self.max_vals\n # print(f'Warning: Tensor data was of type {type(data)}, transpose operation had no effect.')\n else:\n max_vals = self.max_vals.transpose(*args)\n\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def __le__(\n self, other: SupportedChainType\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n\n # if the tensor being compared is also private\n if isinstance(other, SingleEntityPhiTensor):\n\n if self.entity != other.entity:\n # this should return a GammaTensor\n return convert_to_gamma_tensor(self) <= convert_to_gamma_tensor(other)\n\n if len(self.child) != len(other.child):\n raise Exception(\n f\"Tensor dims do not match for __le__: {len(self.child)} != {len(other.child)}\" # type: ignore\n )\n\n data = (\n self.child <= other.child\n ) * 1 # the * 1 just makes sure it returns integers instead of True/False\n min_vals = self.min_vals * 0\n max_vals = (self.max_vals * 0) + 1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n # if the tensor being compared is a public tensor / int / float / etc.\n elif is_acceptable_simple_type(other):\n\n data = (self.child <= other) * 1\n min_vals = self.min_vals * 0\n max_vals = (self.max_vals * 0) + 1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n else:\n return NotImplemented\n\n def __ge__(\n self, other: SupportedChainType\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n\n # if the tensor being compared is also private\n if isinstance(other, SingleEntityPhiTensor):\n\n if self.entity != other.entity:\n # this should return a GammaTensor\n return convert_to_gamma_tensor(self) >= convert_to_gamma_tensor(other)\n\n if len(self.child) != len(other.child):\n raise Exception(\n f\"Tensor dims do not match for __ge__: {len(self.child)} != {len(other.child)}\" # type: ignore\n )\n\n data = (\n self.child >= other.child\n ) * 1 # the * 1 just makes sure it returns integers instead of True/False\n min_vals = self.min_vals * 0\n max_vals = (self.max_vals * 0) + 1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n # if the tensor being compared is a public tensor / int / float / etc.\n elif is_acceptable_simple_type(other):\n\n data = (self.child >= other) * 1\n min_vals = self.min_vals * 0\n max_vals = (self.max_vals * 0) + 1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n else:\n return NotImplemented\n\n def __lt__(\n self, other: SupportedChainType\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n\n # if the tensor being compared is also private\n if isinstance(other, SingleEntityPhiTensor):\n\n if self.entity != other.entity:\n # this should return a GammaTensor\n return convert_to_gamma_tensor(self) < convert_to_gamma_tensor(other)\n\n if len(self.child) != len(other.child):\n raise Exception(\n f\"Tensor dims do not match for __lt__: {len(self.child)} != {len(other.child)}\" # type: ignore\n )\n\n data = (\n self.child < other.child\n ) * 1 # the * 1 just makes sure it returns integers instead of True/False\n min_vals = self.min_vals * 0\n max_vals = (self.max_vals * 0) + 1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n # if the tensor being compared is a public tensor / int / float / etc.\n elif is_acceptable_simple_type(other):\n\n data = (self.child < other) * 1\n min_vals = self.min_vals * 0\n max_vals = (self.max_vals * 0) + 1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n else:\n return NotImplemented\n\n def __gt__(self, other: SupportedChainType) -> SingleEntityPhiTensor:\n\n # if the tensor being compared is also private\n if isinstance(other, SingleEntityPhiTensor):\n\n if self.entity != other.entity:\n # this should return a GammaTensor\n return convert_to_gamma_tensor(self) > convert_to_gamma_tensor(other)\n\n if len(self.child) != len(other.child):\n raise Exception(\n f\"Tensor dims do not match for __gt__: {len(self.child)} != {len(other.child)}\" # type: ignore\n )\n\n data = (\n self.child > other.child\n ) * 1 # the * 1 just makes sure it returns integers instead of True/False\n min_vals = self.min_vals * 0\n max_vals = (self.max_vals * 0) + 1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n # if the tensor being compared is a public tensor / int / float / etc.\n elif is_acceptable_simple_type(other):\n\n data = (self.child > other) * 1\n min_vals = self.min_vals * 0\n max_vals = (self.max_vals * 0) + 1\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n else:\n return NotImplemented\n\n def clip(\n self, a_min: npt.ArrayLike, a_max: npt.ArrayLike, *args: Any\n ) -> SingleEntityPhiTensor:\n\n if a_min is None and a_max is None:\n raise Exception(\"ValueError: clip: must set either max or min\")\n\n if is_acceptable_simple_type(self.child):\n if isinstance(self.child, np.ndarray):\n data = self.child.clip(a_min, a_max)\n else:\n # self.child is a singleton\n data = max(a_min, min(self.child, a_max)) if a_min <= a_max else a_max\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().clip(a_min, a_max)\n\n if isinstance(self.min_vals, np.ndarray):\n min_vals = np.clip(self.min_vals, a_min=a_min, a_max=a_max, *args)\n else:\n min_vals = (\n max(a_min, min(self.min_vals, a_max)) if a_min <= a_max else a_max\n )\n\n if isinstance(self.max_vals, np.ndarray):\n max_vals = np.clip(self.max_vals, a_min=a_min, a_max=a_max, *args)\n else:\n max_vals = (\n max(a_min, min(self.max_vals, a_max)) if a_min <= a_max else a_max\n )\n\n return SingleEntityPhiTensor(\n child=data,\n entity=self.entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def any(\n self,\n axis: Optional[int] = None,\n keepdims: Optional[bool] = False,\n where: Optional[bool] = True,\n ) -> SingleEntityPhiTensor:\n \"\"\"Test whether any element along a given axis evaluates to True\"\"\"\n\n if is_acceptable_simple_type(self.child):\n if isinstance(self.child, np.ndarray):\n data = self.child.any(\n axis=axis, out=np.array(True), keepdims=keepdims, where=where\n )\n else:\n # self.child is a singleton\n data = self.child != 0\n\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().any(\n axis=axis, out=np.array(True), keepdims=keepdims, where=where\n )\n\n if isinstance(self.min_vals, np.ndarray):\n # test whether any min val evaluates to True\n min_vals = self.min_vals.any(\n axis=axis, out=np.array(True), keepdims=keepdims, where=where\n )\n else:\n min_vals = self.min_vals != 0\n\n if isinstance(self.max_vals, np.ndarray):\n # test whether any max val evaluates to True\n max_vals = self.max_vals.any(\n axis=axis, out=np.array(True), keepdims=keepdims, where=where\n )\n else:\n max_vals = self.max_vals != 0\n\n # * 1 just makes sure it returns integers instead of True/False\n return SingleEntityPhiTensor(\n child=data * 1,\n entity=self.entity,\n min_vals=min_vals * 1,\n max_vals=max_vals * 1,\n scalar_manager=self.scalar_manager,\n )\n\n def all(\n self,\n axis: Optional[int] = None,\n keepdims: Optional[bool] = False,\n where: Optional[bool] = True,\n ) -> SingleEntityPhiTensor:\n \"\"\"Test whether all elements along a given axis evaluates to True\"\"\"\n\n if is_acceptable_simple_type(self.child):\n if isinstance(self.child, np.ndarray):\n data = self.child.all(\n axis=axis, out=np.array(True), keepdims=keepdims, where=where\n )\n else:\n # self.child is a singleton\n data = self.child != 0\n\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().all(\n axis=axis, out=np.array(True), keepdims=keepdims, where=where\n )\n\n if isinstance(self.min_vals, np.ndarray):\n # test whether all min vals evaluate to True\n min_vals = self.min_vals.all(\n axis=axis, out=np.array(True), keepdims=keepdims, where=where\n )\n else:\n min_vals = self.min_vals != 0\n\n if isinstance(self.max_vals, np.ndarray):\n # test whether all max vals evaluate to True\n max_vals = self.max_vals.all(\n axis=axis, out=np.array(True), keepdims=keepdims, where=where\n )\n else:\n max_vals = self.max_vals != 0\n\n # * 1 just makes sure it returns integers instead of True/False\n return SingleEntityPhiTensor(\n child=data * 1,\n entity=self.entity,\n min_vals=min_vals * 1,\n max_vals=max_vals * 1,\n scalar_manager=self.scalar_manager,\n )\n\n def abs(\n self,\n out: Optional[np.ndarray] = None,\n ) -> SingleEntityPhiTensor:\n \"\"\"Calculate the absolute value element-wise\"\"\"\n if is_acceptable_simple_type(self.child):\n data = self.child.__abs__()\n\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().__abs__()\n\n if isinstance(self.min_vals, np.ndarray):\n min_vals = np.abs(self.min_vals, out)\n else:\n min_vals = abs(self.min_vals)\n\n if isinstance(self.max_vals, np.ndarray):\n max_vals = np.abs(self.max_vals, out)\n else:\n max_vals = abs(self.max_vals)\n\n return SingleEntityPhiTensor(\n child=data,\n entity=self.entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def pow(self, value: SupportedChainType) -> SingleEntityPhiTensor:\n \"\"\"Return elements raised to powers from value, element-wise\"\"\"\n\n if isinstance(value, SingleEntityPhiTensor):\n\n if self.entity != value.entity:\n return NotImplemented\n\n data = self.child ** value.child\n\n min_min = self.min_vals ** value.min_vals\n min_max = self.min_vals ** value.max_vals\n max_min = self.max_vals ** value.min_vals\n max_max = self.max_vals ** value.max_vals\n\n min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n elif is_acceptable_simple_type(value):\n\n data = self.child ** value\n\n min_min = self.min_vals ** value\n min_max = self.min_vals ** value\n max_min = self.max_vals ** value\n max_max = self.max_vals ** value\n\n min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore\n entity = self.entity\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n else:\n return NotImplemented\n\n def copy(\n self, order: Optional[str] = \"K\", subok: Optional[bool] = True\n ) -> SingleEntityPhiTensor:\n \"\"\"Return copy of the given object\"\"\"\n if is_acceptable_simple_type(self.child):\n data = self.child.copy()\n\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().copy()\n\n elif isinstance(self.child, ShareTensor):\n data = self.child.copy()\n\n if isinstance(self.min_vals, np.ndarray):\n min_vals = np.array(self.min_vals, order=order, subok=subok, copy=True)\n else:\n min_vals = self.min_vals\n\n if isinstance(self.max_vals, np.ndarray):\n max_vals = np.array(self.max_vals, order=order, subok=subok, copy=True)\n else:\n max_vals = self.max_vals\n\n return SingleEntityPhiTensor(\n child=data,\n entity=self.entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n @staticmethod\n def sept_like(tensor: SingleEntityPhiTensor, ent: Entity) -> SingleEntityPhiTensor:\n \"\"\"Create a SEPT with identical data, but belonging to a different entity\"\"\"\n return SingleEntityPhiTensor(\n child=tensor.child,\n entity=ent,\n min_vals=tensor.min_vals,\n max_vals=tensor.max_vals,\n scalar_manager=tensor.scalar_manager,\n )\n\n @staticmethod\n def zeros_like(\n tensor: SingleEntityPhiTensor, ent: Optional[Entity] = None\n ) -> SingleEntityPhiTensor:\n \"\"\"Create a SEPT of the same shape, but with zeros as its data.\n By default, the tensor will have the same entity, but a different entity can be passed instead.\n \"\"\"\n if ent:\n return SingleEntityPhiTensor(\n child=np.zeros_like(tensor.child),\n entity=ent,\n min_vals=np.zeros_like(tensor.min_vals),\n max_vals=np.ones_like(tensor.max_vals),\n scalar_manager=tensor.scalar_manager,\n )\n else:\n return SingleEntityPhiTensor(\n child=np.zeros_like(tensor.child),\n entity=tensor.entity,\n min_vals=np.zeros_like(tensor.min_vals),\n max_vals=np.ones_like(tensor.max_vals),\n scalar_manager=tensor.scalar_manager,\n )\n\n @staticmethod\n def ones_like(\n tensor: SingleEntityPhiTensor, ent: Optional[Entity] = None\n ) -> SingleEntityPhiTensor:\n \"\"\"Create a SEPT of the same shape, but with ones as its data\n By default, the tensor will have the same entity, but a different entity can be passed instead.\n \"\"\"\n if ent:\n return SingleEntityPhiTensor(\n child=np.ones_like(tensor.child),\n entity=ent,\n min_vals=np.zeros_like(tensor.min_vals),\n max_vals=np.ones_like(tensor.max_vals),\n scalar_manager=tensor.scalar_manager,\n )\n else:\n return SingleEntityPhiTensor(\n child=np.ones_like(tensor.child),\n entity=tensor.entity,\n min_vals=np.zeros_like(tensor.min_vals),\n max_vals=np.ones_like(tensor.max_vals),\n scalar_manager=tensor.scalar_manager,\n )\n\n def take(\n self,\n indices: np.ArrayLike,\n axis: Optional[int] = None,\n mode: Optional[str] = \"raise\",\n ) -> SingleEntityPhiTensor:\n \"\"\"Take elements from an array along an axis\"\"\"\n if is_acceptable_simple_type(self.child):\n data = self.child.take(indices=indices, axis=axis, mode=mode)\n\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().take(indices=indices, axis=axis, mode=mode)\n\n min_vals = self.min_vals.take(indices=indices, axis=axis, mode=mode)\n max_vals = self.max_vals.take(indices=indices, axis=axis, mode=mode)\n\n return SingleEntityPhiTensor(\n child=data,\n entity=self.entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def diagonal(\n self,\n offset: Optional[int] = 0,\n axis1: Optional[int] = 0,\n axis2: Optional[int] = 1,\n ) -> SingleEntityPhiTensor:\n \"\"\"Return specified diagonals\"\"\"\n if is_acceptable_simple_type(self.child):\n if (\n isinstance(self.child, int)\n or isinstance(self.child, float)\n or isinstance(self.child, bool)\n ):\n raise Exception(\n \"ValueError: diag requires an array of at least two dimensions\"\n )\n\n elif isinstance(self.child, np.matrix):\n # Make diagonal of matrix 1-D to preserve backward compatibility.\n data = np.asarray(self.child).diagonal(\n offset=offset, axis1=axis1, axis2=axis2\n )\n else:\n data = np.asanyarray(self.child).diagonal(\n offset=offset, axis1=axis1, axis2=axis2\n )\n elif isinstance(self.child, torch.Tensor):\n if isinstance(self.child.numpy(), np.matrix):\n # Make diagonal of matrix 1-D to preserve backward compatibility.\n data = np.asarray(self.child.numpy()).diagonal(\n offset=offset, axis1=axis1, axis2=axis2\n )\n else:\n data = np.asanyarray(self.child.numpy()).diagonal(\n offset=offset, axis1=axis1, axis2=axis2\n )\n\n if (\n isinstance(self.min_vals, int)\n or isinstance(self.min_vals, float)\n or isinstance(self.min_vals, bool)\n ):\n raise Exception(\n \"ValueError: diag requires an array of at least two dimensions\"\n )\n\n elif isinstance(self.min_vals, np.matrix):\n # Make diagonal of matrix 1-D to preserve backward compatibility.\n min_vals = np.asarray(self.min_vals).diagonal(\n offset=offset, axis1=axis1, axis2=axis2\n )\n else:\n min_vals = np.asanyarray(self.min_vals).diagonal(\n offset=offset, axis1=axis1, axis2=axis2\n )\n\n if (\n isinstance(self.max_vals, int)\n or isinstance(self.max_vals, float)\n or isinstance(self.max_vals, bool)\n ):\n raise Exception(\n \"ValueError: diag requires an array of at least two dimensions\"\n )\n\n elif isinstance(self.max_vals, np.matrix):\n # Make diagonal of matrix 1-D to preserve backward compatibility.\n max_vals = np.asarray(self.max_vals).diagonal(\n offset=offset, axis1=axis1, axis2=axis2\n )\n else:\n max_vals = np.asanyarray(self.max_vals).diagonal(\n offset=offset, axis1=axis1, axis2=axis2\n )\n\n return SingleEntityPhiTensor(\n child=data,\n entity=self.entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=self.scalar_manager,\n )\n\n def max(\n self,\n axis: Optional[int] = None,\n out: Optional[np.ndarray] = None,\n keepdims: Optional[bool] = False,\n initial: Optional[int] = None,\n where: bool = True,\n ) -> SingleEntityPhiTensor:\n # Note: Who knew this method had SO MANY ARGUMENTS?!?!?\n if is_acceptable_simple_type(self.child):\n if isinstance(self.child, np.ndarray):\n data = self.child.max(axis, out, keepdims, initial, where)\n else:\n # This implies self.child is a singleton (int, float, bool, etc)\n data = self.child\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().max(axis, out, keepdims, initial, where)\n else:\n raise NotImplementedError\n\n if isinstance(self.min_vals, np.ndarray):\n min_vals = self.min_vals.max(axis, out, keepdims, initial, where)\n else:\n min_vals = self.min_vals\n\n if isinstance(self.max_vals, np.ndarray):\n max_vals = self.max_vals.max(axis, out, keepdims, initial, where)\n else:\n max_vals = self.max_vals\n\n return SingleEntityPhiTensor(\n child=data,\n max_vals=max_vals,\n min_vals=min_vals,\n entity=self.entity,\n )\n\n def min(\n self,\n axis: Optional[int] = None,\n out: Optional[np.ndarray] = None,\n keepdims: Optional[bool] = False,\n initial: Optional[int] = None,\n where: bool = True,\n ) -> SingleEntityPhiTensor:\n # Note: Who knew this method had SO MANY ARGUMENTS?!?!?\n if is_acceptable_simple_type(self.child):\n if isinstance(self.child, np.ndarray):\n data = self.child.min(axis, out, keepdims, initial, where)\n else:\n # This implies self.child is a singleton (int, float, bool, etc)\n data = self.child\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().min(axis, out, keepdims, initial, where)\n else:\n raise NotImplementedError\n\n if isinstance(self.min_vals, np.ndarray):\n min_vals = self.min_vals.min(axis, out, keepdims, initial, where)\n else:\n min_vals = self.min_vals\n\n if isinstance(self.max_vals, np.ndarray):\n max_vals = self.max_vals.min(axis, out, keepdims, initial, where)\n else:\n max_vals = self.max_vals\n\n return SingleEntityPhiTensor(\n child=data,\n max_vals=max_vals,\n min_vals=min_vals,\n entity=self.entity,\n )\n\n # TODO: Figure out how to do type annotation for dtype\n def trace(\n self,\n offset: int = 0,\n axis1: Optional[int] = 0,\n axis2: Optional[int] = 1,\n dtype: Optional[Any] = None,\n out: np.ndarray = None,\n ) -> SingleEntityPhiTensor:\n if isinstance(self.child, np.ndarray):\n data = self.child.trace(offset, axis1, axis2, dtype, out)\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().trace(offset, axis1, axis2, dtype, out)\n else:\n data = self.child * len(self.child)\n\n if isinstance(self.min_vals, np.ndarray):\n mins = self.min_vals.trace(offset, axis1, axis2, dtype, out)\n else:\n mins = self.min_vals * len(self.child)\n\n if isinstance(self.max_vals, np.ndarray):\n maxes = self.max_vals.trace(offset, axis1, axis2, dtype, out)\n else:\n maxes = self.max_vals * len(self.child)\n\n return SingleEntityPhiTensor(\n child=data,\n min_vals=mins,\n max_vals=maxes,\n entity=self.entity,\n )\n\n def prod(\n self,\n axis: Optional[int] = None,\n dtype: Optional[Any] = None,\n out: Optional[np.ndarray] = None,\n keepdims: Optional[bool] = False,\n initial: int = 1,\n where: Optional[bool] = True,\n ) -> SingleEntityPhiTensor:\n return SingleEntityPhiTensor(\n child=self.child.prod(axis, dtype, out, keepdims, initial, where),\n min_vals=self.min_vals.prod(axis, dtype, out, keepdims, initial, where),\n max_vals=self.max_vals.prod(axis, dtype, out, keepdims, initial, where),\n entity=self.entity,\n )\n\n def round(self, decimals: int = 0) -> SingleEntityPhiTensor:\n if decimals != 0:\n raise Exception(\n \"We currently only support np.int32. Sorry about the inconvenience-\"\n \"we plan to support more types soon!\"\n )\n return SingleEntityPhiTensor(\n child=self.child.astype(dtype=np.int32),\n min_vals=self.min_vals.astype(dtype=np.int32),\n max_vals=self.max_vals.astype(dtype=np.int32),\n scalar_manager=self.scalar_manager,\n entity=self.entity,\n )\n\n def __floordiv__(\n self, other: Union[AcceptableSimpleType, SingleEntityPhiTensor]\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n if is_acceptable_simple_type(other):\n if isinstance(other, np.ndarray) and not is_broadcastable(\n self.shape, other.shape\n ):\n raise Exception(\n f\"Shapes not broadcastable: {self.shape} and {other.shape}\"\n )\n else:\n data = self.child // other\n mins = self.min_vals // other\n maxes = self.max_vals // other\n elif isinstance(other, SingleEntityPhiTensor):\n if is_broadcastable(self.shape, other.shape):\n if self.entity == other.entity:\n data = self.child // other.child\n mins = self.min_vals // other.min_vals\n maxes = self.max_vals // other.max_vals\n else:\n # return convert_to_gamma_tensor(self) // convert_to_gamma_tensor(other)\n raise NotImplementedError\n else:\n raise Exception(\n f\"Shapes not broadcastable: {self.shape} and {other.shape}\"\n )\n else:\n raise NotImplementedError\n return SingleEntityPhiTensor(\n child=data,\n max_vals=maxes,\n min_vals=mins,\n entity=self.entity,\n scalar_manager=self.scalar_manager,\n )\n\n #\n # # TODO: Check to see if non-integers are ever introduced\n # def __mod__(\n # self, other: Union[AcceptableSimpleType, SingleEntityPhiTensor]\n # ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n # if is_acceptable_simple_type(other):\n # if isinstance(other, np.ndarray) and not is_broadcastable(\n # self.shape, other.shape\n # ):\n # raise Exception(\n # f\"Shapes not broadcastable: {self.shape} and {other.shape}\"\n # )\n # else:\n # data = self.child % other\n # mins = self.min_vals % other\n # maxes = self.max_vals % other\n # elif isinstance(other, SingleEntityPhiTensor):\n # if is_broadcastable(self.shape, other.shape):\n # if self.entity == other.entity:\n # data = self.child % other.child\n # mins = self.min_vals % other.min_vals\n # maxes = self.max_vals % other.max_vals\n # else:\n # # return convert_to_gamma_tensor(self) % convert_to_gamma_tensor(other)\n # raise NotImplementedError\n # else:\n # raise Exception(\n # f\"Shapes not broadcastable: {self.shape} and {other.shape}\"\n # )\n # else:\n # raise NotImplementedError\n # return SingleEntityPhiTensor(\n # child=data,\n # max_vals=maxes,\n # min_vals=mins,\n # entity=self.entity,\n # scalar_manager=self.scalar_manager,\n # )\n # #\n # # def __divmod__(\n # # self, other: Union[AcceptableSimpleType, SingleEntityPhiTensor]\n # # ) -> TypeTuple:\n # # return self // other, self % other\n\n def __matmul__(\n self, other: Union[np.ndarray, SingleEntityPhiTensor]\n ) -> Union[SingleEntityPhiTensor, IntermediateGammaTensor]:\n if not isinstance(other, (np.ndarray, SingleEntityPhiTensor)):\n raise Exception(\n f\"Matrix multiplication not yet implemented for type {type(other)}\"\n )\n else:\n if not is_broadcastable(self.shape, other.shape):\n raise Exception(\n f\"Shapes not broadcastable: {self.shape} and {other.shape}\"\n )\n else:\n if isinstance(other, np.ndarray):\n data = self.child.__matmul__(other)\n mins = self.min_vals.__matmul__(other)\n maxes = self.max_vals.__matmul__(other)\n elif isinstance(other, SingleEntityPhiTensor):\n if self.entity != other.entity:\n # return convert_to_gamma_tensor(self).__matmul__(convert_to_gamma_tensor(other))\n raise NotImplementedError\n else:\n data = self.child.__matmul__(other.child)\n mins = self.min_vals.__matmul__(other.min_vals)\n maxes = self.max_vals.__matmul__(other.max_vals)\n else:\n raise NotImplementedError\n return SingleEntityPhiTensor(\n child=data,\n max_vals=maxes,\n min_vals=mins,\n entity=self.entity,\n scalar_manager=self.scalar_manager,\n )\n\n def cumsum(\n self,\n axis: Optional[int] = None,\n dtype: Optional[Any] = None,\n out: np.ndarray = None,\n ) -> SingleEntityPhiTensor:\n if dtype and dtype != np.int32:\n raise Exception(\n \"We currently only support np.int32 dtypes. \"\n \"We have plans to support more in the future though!\"\n )\n if isinstance(self.child, np.ndarray):\n data = self.child.cumsum(axis, dtype, out)\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().cumsum(axis, dtype, out)\n else:\n data = self.child * len(self.child)\n\n if isinstance(self.min_vals, np.ndarray):\n mins = self.min_vals.cumsum(axis, dtype, out)\n else:\n mins = self.min_vals * len(self.child)\n\n if isinstance(self.max_vals, np.ndarray):\n maxes = self.max_vals.cumsum(axis, dtype, out)\n else:\n maxes = self.max_vals * len(self.child)\n\n return SingleEntityPhiTensor(\n child=data, min_vals=mins, max_vals=maxes, entity=self.entity\n )\n\n def cumprod(\n self,\n axis: Optional[int] = None,\n dtype: Optional[Any] = None,\n out: np.ndarray = None,\n ) -> SingleEntityPhiTensor:\n if dtype and dtype != np.int32:\n raise Exception(\n \"We currently only support np.int32 dtypes. \"\n \"We have plans to support more in the future though!\"\n )\n if isinstance(self.child, np.ndarray):\n data = self.child.cumprod(axis, dtype, out)\n elif isinstance(self.child, torch.Tensor):\n data = self.child.numpy().cumprod(axis, dtype, out)\n else:\n data = self.child * len(self.child)\n\n if isinstance(self.min_vals, np.ndarray):\n mins = self.min_vals.cumprod(axis, dtype, out)\n else:\n mins = self.min_vals * len(self.child)\n\n if isinstance(self.max_vals, np.ndarray):\n maxes = self.max_vals.cumprod(axis, dtype, out)\n else:\n maxes = self.max_vals * len(self.child)\n\n return SingleEntityPhiTensor(\n child=data, min_vals=mins, max_vals=maxes, entity=self.entity\n )\n\n\n@implements(SingleEntityPhiTensor, np.expand_dims)\ndef expand_dims(a: npt.ArrayLike, axis: Optional[int] = None) -> SingleEntityPhiTensor:\n\n entity = a.entity # type: ignore\n\n min_vals = np.expand_dims(a=a.min_vals, axis=axis) # type: ignore\n max_vals = np.expand_dims(a=a.max_vals, axis=axis) # type: ignore\n\n data = np.expand_dims(a.child, axis=axis) # type: ignore\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=a.scalar_manager, # type: ignore\n )\n\n\n@implements(SingleEntityPhiTensor, np.mean)\ndef mean(*args: Any, **kwargs: Any) -> SingleEntityPhiTensor:\n entity = args[0].entity\n scalar_manager = args[0].scalar_manager\n\n for arg in args[1:]:\n if not isinstance(arg, SingleEntityPhiTensor):\n raise Exception(\"Can only call np.mean on objects of the same type.\")\n\n if arg.entity != entity:\n return NotImplemented\n\n min_vals = np.mean([x.min_vals for x in args], **kwargs)\n max_vals = np.mean([x.max_vals for x in args], **kwargs)\n\n args, kwargs = inputs2child(*args, **kwargs) # type: ignore\n\n data = np.mean(args, **kwargs)\n\n return SingleEntityPhiTensor(\n child=data,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=scalar_manager,\n )\n"
] | [
[
"pandas.DataFrame"
],
[
"numpy.expand_dims",
"numpy.resize",
"numpy.abs",
"numpy.ones_like",
"numpy.min",
"numpy.reshape",
"numpy.clip",
"numpy.invert",
"numpy.asarray",
"numpy.dtype",
"numpy.max",
"numpy.asanyarray",
"numpy.mean",
"numpy.zeros_like",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ibivu/protein-glue | [
"47f68b4789c6750dfb9bf6d6ae382a9061514bfd",
"47f68b4789c6750dfb9bf6d6ae382a9061514bfd"
] | [
"dataset/ss3.py",
"generate_embed.py"
] | [
"from numpy import float32\nimport tensorflow as tf\nimport constants as c\n\ndef _parse_example(example_proto):\n features = {\n \"sequence\": tf.io.FixedLenFeature((), tf.string, default_value=\"\"),\n \"ss3\": tf.io.VarLenFeature(tf.int64),\n \"ss8\": tf.io.VarLenFeature(tf.int64)\n }\n parsed_features = tf.io.parse_single_example(example_proto, features)\n\n input_seq = tf.io.decode_raw(parsed_features[\"sequence\"], tf.dtypes.uint8)\n # We get raw ASCII bytes from the tensorflow file format, shift their values so 'A' maps to index 3,\n # because we reserve 0 for padding / masked values, 1 for the start of sequence marker, and 2 for\n # the end of sequence marker\n input_seq = input_seq - 65 + c.NUM_SPECIAL_SYMBOLS\n input_seq = tf.cast(input_seq, tf.int32)\n\n segment_label = tf.ones_like(input_seq, dtype=tf.float32)\n\n target_seq = tf.sparse.to_dense(parsed_features['ss3'],\n default_value=0)\n target_seq = target_seq + 1\n target_seq = tf.cast(target_seq, tf.int32)\n\n return (input_seq, target_seq, segment_label)\n\ndef create_dataset_ss3(filenames, batch_size=16, max_length=128):\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(_parse_example)\n if max_length:\n dataset = dataset.filter(lambda x, y, z: tf.shape(x)[0] <= max_length)\n dataset = dataset.padded_batch(batch_size, padded_shapes=([None], [None], [None])).prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset\n",
"import tensorflow as tf\nimport numpy as np\nimport click\n\nfrom dataset.fasta import read_fasta, to_seq_array\nfrom model import BERTTransformer\n\npredict_step_signature = [\n tf.TensorSpec(shape=(None, None), dtype=tf.int32),\n]\n\[email protected]()\[email protected]('checkpoint_dir')\[email protected]('input_fasta')\[email protected]('output_file')\[email protected]('--num-layers', default=12)\[email protected]('--num-heads', default=8)\[email protected]('--d-ff', default=-1)\[email protected]('--d-model', default=768)\[email protected]('--reduced-target-alphabet/--no-reduced-target-alphabet', default=False)\[email protected]('--embed-num-layers', default=4)\ndef main(num_layers, num_heads, d_ff, d_model, reduced_target_alphabet, embed_num_layers, input_fasta, checkpoint_dir, output_file):\n @tf.function(input_signature=predict_step_signature)\n def predict_step(inp):\n predictions, activations = transformer(inp, False, True)\n predictions = tf.argmax(predictions, axis=2)\n\n return predictions, activations\n\n if d_ff == -1:\n d_ff = d_model * 4\n input_vocab_size = 30\n target_vocab_size = 12 if reduced_target_alphabet else 30\n\n transformer = BERTTransformer(\n num_layers,\n d_model,\n num_heads,\n d_ff,\n input_vocab_size,\n target_vocab_size,\n pe=513,\n rate=0.0,\n )\n\n ckpt = tf.train.Checkpoint(transformer=transformer)\n ckpt.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()\n\n outputs = []\n\n for name, _, seq in read_fasta(input_fasta):\n inp = tf.constant(to_seq_array([seq]))\n pred, emb = predict_step(inp)\n pred = tf.cast(pred, tf.int32)\n\n emb_arr = tf.squeeze(emb, 0).numpy()\n emb_arr = emb_arr[:, -embed_num_layers:, :]\n emb_arr = np.reshape(emb_arr, (emb_arr.shape[0], emb_arr.shape[1] * emb_arr.shape[2]))\n\n outputs.append((name, emb_arr))\n\n np.save(output_file, np.array(outputs, dtype=object))\n\n\nif __name__ == \"__main__\":\n # pylint: disable=no-value-for-parameter\n main()"
] | [
[
"tensorflow.sparse.to_dense",
"tensorflow.shape",
"tensorflow.data.TFRecordDataset",
"tensorflow.cast",
"tensorflow.io.parse_single_example",
"tensorflow.ones_like",
"tensorflow.io.decode_raw",
"tensorflow.io.VarLenFeature",
"tensorflow.io.FixedLenFeature"
],
[
"tensorflow.train.latest_checkpoint",
"numpy.reshape",
"tensorflow.train.Checkpoint",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.function",
"tensorflow.argmax",
"numpy.array",
"tensorflow.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13"
]
}
] |
gabrieldemarmiesse/tracks_separation | [
"81b0b1cb0bce269a76403ad5e16c3e0469160f40"
] | [
"converter.py"
] | [
"from glob import glob\nfrom tqdm import tqdm as tq\nfrom scipy.io.wavfile import read, write\nfrom resampy import resample\nnew_rate = 16000\npath = \"./data/DSD100_16kHz/Sources/*/*/*.wav\"\n\nfor file in tq(glob(path)):\n rate, array = read(file)\n new_array = resample(array,rate, new_rate, axis=0)\n write(file, new_rate, new_array)\n"
] | [
[
"scipy.io.wavfile.write",
"scipy.io.wavfile.read"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Yu-AnChen/tabbi | [
"bf4655905d0f3fc5b7dd49a1cd12c69cb83e5bb5"
] | [
"tabbi/gmm.py"
] | [
"import sklearn.mixture\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom matplotlib import ticker\r\nimport matplotlib.patheffects as mpatheffects\r\n\r\n\r\ndef get_gmm_and_pos_label(\r\n array, n_components=2, n_steps=5000\r\n):\r\n gmm = sklearn.mixture.GaussianMixture(\r\n n_components=n_components, covariance_type='spherical', random_state=0\r\n )\r\n gmm.fit(array.reshape(-1, 1))\r\n label = np.argmax(gmm.means_)\r\n\r\n # low = array.min()\r\n # high = array.max()\r\n\r\n low = gmm.means_.min() - 2*np.sqrt(gmm.covariances_[np.argmin(gmm.means_)])\r\n high = gmm.means_.max() + 2*np.sqrt(gmm.covariances_[np.argmax(gmm.means_)])\r\n\r\n ref_space = np.linspace(low, high, n_steps)\r\n result = gmm.predict(ref_space.reshape(-1, 1))\r\n\r\n idx = np.where(np.ediff1d(result) != 0)\r\n cutoffs = ref_space[idx]\r\n\r\n return gmm, label, cutoffs\r\n\r\n\r\ndef _get_gmm_and_pos_label(array, n_components=2):\r\n gmm = sklearn.mixture.GaussianMixture(\r\n n_components=n_components, covariance_type='spherical', random_state=0\r\n )\r\n gmm.fit(array.reshape(-1, 1))\r\n label = np.argmax(gmm.means_)\r\n\r\n low = np.expm1(array.min())\r\n high = np.expm1(array.max())\r\n ref_space = np.arange(low, high)\r\n ref_space = np.log1p(ref_space)\r\n result = gmm.predict(ref_space.reshape(-1, 1))\r\n\r\n idx = np.where(np.ediff1d(result) != 0)\r\n _cutoffs = ref_space[idx]\r\n\r\n diff_mean = np.absolute(_cutoffs - np.mean(array))\r\n diff_high = np.absolute(_cutoffs - np.log1p(high))\r\n\r\n cutoffs = _cutoffs[diff_mean < diff_high]\r\n cutoff = np.expm1(cutoffs.max())\r\n\r\n # cutoff = cutoffs[np.argmin(diff_mean < diff_high)]\r\n # return gmm, label, cutoff\r\n return gmm, label, _cutoffs\r\n\r\n diff_mean = np.absolute(_cutoffs - np.mean(np.expm1(array)))\r\n diff_high = np.absolute(_cutoffs - high)\r\n diff_low = np.absolute(_cutoffs - low)\r\n\r\n between = (diff_mean < diff_high) & (diff_mean < diff_low)\r\n cutoffs = _cutoffs[between]\r\n\r\n cutoff = cutoffs[np.argmax(between)]\r\n return gmm, label, cutoff\r\n\r\n\r\ndef plot_gmm_fitting(array, gmm, ax):\r\n plt.sca(ax)\r\n _ = plt.hist(array.flatten(), color='lightgray', bins=200, density=True)\r\n x = np.linspace(array.min(), array.max(), 200)\r\n\r\n log_prob = gmm.score_samples(x.reshape(-1, 1))\r\n responsibilities = gmm.predict_proba(x.reshape(-1, 1))\r\n pdf = np.exp(log_prob)\r\n pdf_individual = responsibilities * pdf[:, np.newaxis]\r\n\r\n mean_index = np.argmax(pdf_individual, axis=0)\r\n rank_map = mean_index.argsort().argsort()\r\n\r\n ax.set_prop_cycle(\r\n color=plt.get_cmap('Dark2')(rank_map)\r\n )\r\n ax.plot(x, pdf_individual)\r\n ax.plot(x, pdf, '--k')\r\n return ax\r\n\r\n\r\ndef auto_gate_func(array, n_components=3, n_stds=3, log_transform=True):\r\n gmm = sklearn.mixture.GaussianMixture(\r\n n_components=n_components, covariance_type='spherical', random_state=0\r\n )\r\n if log_transform:\r\n gmm.fit(np.log1p(array).reshape(-1, 1))\r\n else:\r\n gmm.fit(array.reshape(-1, 1))\r\n means = gmm.means_\r\n stds = np.sqrt(gmm.covariances_)\r\n idx = np.argmax(means)\r\n lower_bound = means[idx] - n_stds * stds[idx]\r\n if log_transform:\r\n return np.expm1(lower_bound)\r\n else:\r\n return lower_bound\r\n\r\n\r\ndef plot_cumulative(array, ax, hist_kwargs={}):\r\n formatter = ticker.ScalarFormatter(useMathText=True)\r\n formatter.set_scientific(True) \r\n formatter.set_powerlimits((-1,1))\r\n ax.yaxis.set_major_formatter(formatter) \r\n _ = ax.hist(array, histtype='step', bins=300, cumulative=1, **hist_kwargs)\r\n \r\n return ax\r\n\r\n\r\ndef gmm_label_map_by_mean(gmm):\r\n return {\r\n o:n \r\n for o, n in zip(\r\n range(len(gmm.means_)),\r\n sorted(range(len(gmm.means_)), key=lambda x: gmm.means_[x][0])\r\n )\r\n }\r\n\r\n\r\ndef sort_predict_label(gmm, labels):\r\n mapping = gmm_label_map_by_mean(gmm)\r\n sorted_labels = labels.copy()\r\n for k, v in mapping.iteritems():\r\n sorted_labels[labels==k] = v\r\n return sorted_labels\r\n\r\n\r\ndef plot_hist_gmm(\r\n df,\r\n markers,\r\n n_components=2,\r\n subplot_grid_shape=None,\r\n transform_log=True,\r\n xlim_percentiles=(0, 100),\r\n cum_density=False,\r\n hide_yaxis_left=True\r\n): \r\n if transform_log:\r\n df = df.transform(np.log1p)\r\n revert_func = np.expm1\r\n else:\r\n revert_func = np.array\r\n if subplot_grid_shape is None:\r\n subplot_grid_shape = (1, len(markers))\r\n n_rows, n_cols = subplot_grid_shape\r\n fig, axes = plt.subplots(n_rows, n_cols, sharex=True)\r\n axes = np.array(axes)\r\n\r\n for m, ax in zip(markers, axes.ravel()):\r\n gmm, _, cutoffs = get_gmm_and_pos_label(\r\n df[m].values, n_components=n_components\r\n )\r\n plot_gmm_fitting(df[m].values, gmm, ax)\r\n ax.title.set_text(m)\r\n if hide_yaxis_left:\r\n ax.yaxis.set_visible(False)\r\n\r\n p1, p2 = np.array(xlim_percentiles) / 100\r\n axis_min = df.loc[:, markers].quantile(p1).min()\r\n axis_max = df.loc[:, markers].quantile(p2).max()\r\n\r\n color_cum = 'gray'\r\n\r\n pax = ax.twinx()\r\n pax = plot_cumulative(\r\n df[m].values, pax, \r\n hist_kwargs=dict(color=color_cum, density=cum_density)\r\n )\r\n pax.tick_params(axis='y', labelsize=8, colors=color_cum)\r\n\r\n print(cutoffs)\r\n\r\n cutoff_range = np.ptp(cutoffs)\r\n if cutoff_range == 0: cutoff_range = 1\r\n cutoff_colors = plt.get_cmap('plasma')(\r\n (cutoffs - np.min(cutoffs)) / cutoff_range\r\n )\r\n\r\n for co, cc in zip(cutoffs, cutoff_colors):\r\n ax.axvline(x=co, c=cc, alpha=0.2)\r\n ax.annotate(\r\n '',\r\n xy=(co, 0), xytext=(co, -0.05),\r\n xycoords=('data', 'axes fraction'),\r\n arrowprops=dict(arrowstyle='wedge, tail_width=0.7, shrink_factor=0.5', color=cc)\r\n )\r\n ax.set_xlim(axis_min, axis_max)\r\n # cutoff_string = np.round(revert_func(cutoffs)).astype(int)\r\n\r\n for i, (co, cc) in enumerate(\r\n zip(revert_func(cutoffs)[::-1], cutoff_colors[::-1])\r\n ):\r\n text = ax.text(\r\n ax.get_xlim()[0] + 0.02*np.diff(ax.get_xlim()), \r\n ax.get_ylim()[1] - 0.05*(i+1)*np.diff(ax.get_ylim()), \r\n f'{np.round(co).astype(int)}', \r\n fontsize=10, c=cc\r\n )\r\n text_outline = mpatheffects.Stroke(linewidth=1, foreground='#000')\r\n text.set_path_effects(\r\n [text_outline, mpatheffects.Normal()]\r\n )\r\n plt.tight_layout()\r\n for aax in fig.axes:\r\n aax.spines['right'].set_color(color_cum)\r\n power_label = aax.yaxis.get_offset_text()\r\n power_label.set_visible(False)\r\n aax.annotate(\r\n power_label.get_text(), xy=(1.02, 1.01),\r\n xycoords='axes fraction', fontsize=10,\r\n color=color_cum\r\n )\r\n plt.sca(ax)\r\n"
] | [
[
"matplotlib.patheffects.Normal",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.get_cmap",
"numpy.round",
"numpy.mean",
"numpy.argmin",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.argmax",
"matplotlib.ticker.ScalarFormatter",
"numpy.log1p",
"numpy.min",
"numpy.array",
"numpy.absolute",
"numpy.ediff1d",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.subplots",
"numpy.expm1",
"numpy.ptp",
"matplotlib.patheffects.Stroke"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Dtananaev/tf_lstm_depth | [
"94f83e8671e8928eba24eac6936a02cd9d123686"
] | [
"layers/conv.py"
] | [
"#\n# Author: Denis Tananaev\n# File: conv.py\n# Date: 9.02.2017\n# Description: convolution functions for neural networks\n#\n\n#include libs\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n#import os\nfrom six.moves import xrange\n#import os\n#import re\n#import sys\n#import tarfile\n#import math \nimport tensorflow as tf\nimport layers.summary as sm\n\ndef _variable_on_cpu(name, shape, initializer, FLOAT16=False):\n \"\"\"Helper to create a Variable stored on CPU memory.\n\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n\n Returns:\n Variable Tensor\n \"\"\"\n with tf.device('/cpu:0'):\n if(FLOAT16==True):\n dtype = tf.float16 \n else:\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var\n\n\ndef _variable_with_weight_decay(name, shape, stddev, wd,FLOAT16=False):\n \"\"\"Helper to create an initialized Variable with weight decay.\n \n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n\n Returns:\n Variable Tensor\n \"\"\"\n if(FLOAT16==True):\n dtype = tf.float16 \n else:\n dtype = tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\ndef _const_variable_with_weight_decay(name, shape, stddev, wd,FLOAT16=False):\n if(FLOAT16==True):\n dtype = tf.float16 \n else:\n dtype = tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.constant_initializer(1.0))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\ndef conv(data,scope,shape,stride=[1, 1, 1, 1],padding='SAME',wd=0.0,FLOAT16=False,reuse=None):\n with tf.variable_scope(scope, 'Conv', [data], reuse=reuse):\n STDdev=1/tf.sqrt(shape[0]*shape[1]*shape[2]/2) #Xavier/2 initialization \n kernel = _variable_with_weight_decay('weights',\n shape=shape,\n stddev=STDdev,\n wd=wd,FLOAT16=FLOAT16)\n conv = tf.nn.conv2d(data, kernel, stride, padding=padding)\n biases = _variable_on_cpu('biases', [shape[3]], tf.constant_initializer(0.0001))#positive biases\n pre_activation = tf.nn.bias_add(conv, biases)\n sm._activation_summary(pre_activation)\n return pre_activation \n\ndef dilated_conv(data,scope,shape,rate=1,padding='SAME',wd=0.0,FLOAT16=False,reuse=None):\n with tf.variable_scope(scope, 'Dilated_Conv', [data], reuse=reuse):\n STDdev=1/tf.sqrt(shape[0]*shape[1]*shape[2]/2) #Xavier/2 initialization \n kernel = _variable_with_weight_decay('weights',\n shape=shape,\n stddev=STDdev,\n wd=wd,FLOAT16=FLOAT16)\n conv = tf.nn.atrous_conv2d(data, kernel, rate, padding=padding)\n biases = _variable_on_cpu('biases', [shape[3]], tf.constant_initializer(0.0001))#positive biases\n pre_activation = tf.nn.bias_add(conv, biases)\n sm._activation_summary(pre_activation)\n return pre_activation \n\ndef fclayer(data,batch_size,hidden,scope,wd=0.0,FLOAT16=False,reuse=None):\n with tf.variable_scope(scope, 'fc',[data],reuse=reuse):\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(data, [batch_size,-1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, hidden],\n stddev=0.04, wd=wd,FLOAT16=FLOAT16)\n biases = _variable_on_cpu('biases', [hidden], tf.constant_initializer(0.00001))\n pre_activation = tf.matmul(reshape, weights) + biases\n sm._activation_summary(pre_activation)\n return pre_activation \n\n\n\n\n\n\n"
] | [
[
"tensorflow.nn.bias_add",
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.truncated_normal_initializer",
"tensorflow.constant_initializer",
"tensorflow.nn.l2_loss",
"tensorflow.variable_scope",
"tensorflow.sqrt",
"tensorflow.add_to_collection",
"tensorflow.nn.atrous_conv2d",
"tensorflow.nn.conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ravi-0841/spect-pitch-gan | [
"ea4b9ea8396df753e25e0b2cb210288f683d3903",
"ea4b9ea8396df753e25e0b2cb210288f683d3903"
] | [
"utils/convert.py",
"modules/obsolete_modules/modules_spect_mmd.py"
] | [
"import argparse\nimport os\nimport numpy as np\nimport librosa\nimport scipy.io.wavfile as scwav\nimport scipy.signal as scisig\nimport pylab\nimport numpy.matlib as npmat\n\nimport utils.preprocess as preproc\nfrom utils.helper import smooth, generate_interpolation\n#from nn_models.model_embedding_wasserstein import VariationalCycleGAN as VCGAN_embedding\nfrom nn_models.model_pitch_mfc_discriminate_wasserstein import VariationalCycleGAN as VCGAN_embedding\nfrom nn_models.model_separate_discriminate_id import VariationalCycleGAN as VCGAN\nfrom encoder_decoder import AE\n#from model_pair_lvi import CycleGAN as CycleGAN_f0s\n\n\nnum_mfcc = 23\nnum_pitch = 1\nsampling_rate = 16000\nframe_period = 5.0\n\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n\ndef conversion(model_dir=None, model_name=None, audio_file=None, \n data_dir=None, conversion_direction=None, \n output_dir=None, embedding=True, only_energy=False):\n \n if embedding:\n ae_model = AE(dim_mfc=num_mfcc)\n ae_model.load(filename='./model/AE_cmu_pre_trained_noise_std_1.ckpt')\n model = VCGAN_embedding(dim_mfc=1, dim_pitch=1, mode='test')\n model.load(filepath=os.path.join(model_dir, model_name))\n else:\n model = VCGAN(dim_mfc=23, dim_pitch=1, mode='test')\n model.load(filepath=os.path.join(model_dir, model_name))\n \n if audio_file is not None:\n wav, sr = librosa.load(audio_file, sr=sampling_rate, mono=True)\n assert (sr==sampling_rate)\n wav = preproc.wav_padding(wav=wav, sr=sampling_rate, \\\n frame_period=frame_period, multiple=4)\n f0, sp, ap = preproc.world_decompose(wav=wav, \\\n fs=sampling_rate, frame_period=frame_period)\n coded_sp = preproc.world_encode_spectral_envelope(sp=sp, \\\n fs=sampling_rate, dim=num_mfcc)\n \n coded_sp = np.expand_dims(coded_sp, axis=0)\n coded_sp = np.transpose(coded_sp, (0,2,1))\n \n if embedding:\n sp_embedding = ae_model.get_embedding(mfc_features=coded_sp)\n coded_sp = sp_embedding\n \n f0 = scisig.medfilt(f0, kernel_size=3)\n z_idx = np.where(f0<10.0)[0]\n f0 = generate_interpolation(f0)\n f0 = smooth(f0, window_len=13)\n f0 = np.reshape(f0, (1,1,-1))\n\n f0_converted, coded_sp_converted = model.test(input_pitch=f0, \n input_mfc=coded_sp, \n direction=conversion_direction)\n \n\n if embedding:\n coded_sp_converted = ae_model.get_mfcc(embeddings=coded_sp_converted)\n\n coded_sp_converted = np.asarray(np.transpose(coded_sp_converted[0]), np.float64)\n coded_sp_converted = np.ascontiguousarray(coded_sp_converted)\n f0_converted = np.asarray(np.reshape(f0_converted[0], (-1,)), np.float64)\n f0_converted = np.ascontiguousarray(f0_converted)\n f0_converted[z_idx] = 0\n \n decoded_sp_converted = preproc.world_decode_spectral_envelope(coded_sp=coded_sp_converted, \n fs=sampling_rate)\n # Normalization of converted features\n decoded_sp_converted = decoded_sp_converted / np.max(decoded_sp_converted)\n wav_transformed = preproc.world_speech_synthesis(f0=f0_converted, \n decoded_sp=decoded_sp_converted, \n ap=ap, fs=sampling_rate, \n frame_period=frame_period)\n scwav.write(os.path.join('/home/ravi/Desktop', \n os.path.basename(audio_file)), \n sampling_rate, wav_transformed)\n print('Processed: ' + audio_file)\n \n else:\n os.makedirs(output_dir, exist_ok=True)\n \n for file in os.listdir(data_dir):\n \n filepath = os.path.join(data_dir, file)\n \n wav, sr = librosa.load(filepath, sr=sampling_rate, mono=True)\n wav = (wav - np.min(wav)) / (np.max(wav) - np.min(wav))\n \n assert (sr==sampling_rate)\n wav = preproc.wav_padding(wav=wav, sr=sampling_rate, \\\n frame_period=frame_period, multiple=4)\n f0, sp, ap = preproc.world_decompose(wav=wav, \\\n fs=sampling_rate, frame_period=frame_period)\n coded_sp = preproc.world_encode_spectral_envelope(sp=sp, \\\n fs=sampling_rate, dim=num_mfcc)\n \n coded_sp = np.expand_dims(coded_sp, axis=0)\n coded_sp = np.transpose(coded_sp, (0,2,1))\n \n if embedding:\n sp_embedding = ae_model.get_embedding(mfc_features=coded_sp)\n else:\n sp_embedding = coded_sp\n \n f0 = scisig.medfilt(f0, kernel_size=3)\n z_idx = np.where(f0<10.0)[0]\n f0 = generate_interpolation(f0)\n f0 = smooth(f0, window_len=13)\n f0 = np.reshape(f0, (1,1,-1))\n \n f0_converted, coded_sp_converted = model.test(input_pitch=f0, \n input_mfc=sp_embedding, \n direction=conversion_direction)\n \n# f0_converted = cgan_f0.test(input_pitch=f0, input_mfc=coded_sp, \n# direction='A2B')\n \n if embedding:\n coded_sp_converted = ae_model.get_mfcc(embeddings=coded_sp_converted)\n\n coded_sp_converted = np.asarray(np.transpose(np.squeeze(coded_sp_converted)), np.float64)\n coded_sp_converted = np.ascontiguousarray(coded_sp_converted)\n f0_converted = np.asarray(np.reshape(f0_converted[0], (-1,)), np.float64)\n f0_converted = np.ascontiguousarray(f0_converted)\n f0_converted[z_idx] = 0\n \n # Mixing the mfcc features\n print(np.min(coded_sp_converted), np.min(coded_sp))\n \n if embedding and not only_energy:\n coded_sp_converted = 0.6*coded_sp_converted + 0.4*np.transpose(np.squeeze(coded_sp))\n elif embedding and only_energy:\n energy_contour_converted = np.sum(coded_sp_converted**2, axis=1, keepdims=True)\n energy_contour = np.sum(np.squeeze(coded_sp).T**2, axis=1, keepdims=True)\n factor = (energy_contour_converted / energy_contour)**(0.5)\n coded_sp_converted = np.squeeze(coded_sp).T * (npmat.repmat(factor, 1,coded_sp.shape[1]))\n \n # Pyworld decoding\n decoded_sp_converted = preproc.world_decode_spectral_envelope(coded_sp=coded_sp_converted, \n fs=sampling_rate)\n \n # Normalization of converted features\n# decoded_sp_converted = decoded_sp_converted / np.max(decoded_sp_converted)\n wav_transformed = preproc.world_speech_synthesis(f0=f0_converted, \n decoded_sp=decoded_sp_converted, \n ap=ap, fs=sampling_rate, \n frame_period=frame_period)\n \n wav_transformed = (wav_transformed - np.min(wav_transformed)) \\\n / (np.max(wav_transformed) - np.min(wav_transformed))\n wav_transformed = wav_transformed - np.mean(wav_transformed)\n \n scwav.write(os.path.join(output_dir, os.path.basename(file)), \n 16000, wav_transformed)\n print('Processed: ' + file)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description = 'Convert Emotion using pre-trained VariationalCycleGAN model.')\n\n model_dir_default = './model/neu-ang/lp_1e-05_lm_0.1_lmo_1e-06_lrg_2e-06_lrd_1e-07_li_0.05_pre_trained_pitch_mfc_discriminate_wasserstein_all_spk'\n model_name_default = 'neu-ang_450.ckpt'\n data_dir_default = 'data/evaluation/neu-ang/test/neutral'\n conversion_direction_default = 'A2B'\n output_dir_default = '/home/ravi/Desktop/AE_wasserstein_energy'\n audio_file_default = None\n\n parser.add_argument('--model_dir', type = str, help='Directory for the pre-trained model.', default=model_dir_default)\n parser.add_argument('--model_name', type = str, help='Filename for the pre-trained model.', default=model_name_default)\n parser.add_argument('--data_dir', type=str, help='Directory for the voices for conversion.', default=data_dir_default)\n parser.add_argument('--conversion_direction', type=str, help='Conversion direction for VCGAN, A2B or B2A', default=conversion_direction_default)\n parser.add_argument('--output_dir', type=str, help='Directory for the converted voices.', default=output_dir_default)\n parser.add_argument('--audio_file', type=str, help='convert a single audio file', default=audio_file_default)\n\n argv = parser.parse_args()\n\n model_dir = argv.model_dir\n model_name = argv.model_name\n data_dir = argv.data_dir\n conversion_direction = argv.conversion_direction\n output_dir = argv.output_dir\n audio_file = argv.audio_file\n \n conversion(model_dir=model_dir, model_name=model_name, audio_file=audio_file, \n data_dir=data_dir, conversion_direction=conversion_direction, \n output_dir=output_dir, embedding=True, only_energy=True)\n\n\n",
"import tensorflow as tf \nfrom modules.base_modules_default_init import *\n\n\ndef sampler(input_pitch, input_mfc, final_filters=1, reuse=False, \\\n scope_name='sampler'):\n\n # Inputs have shape [batch_size, num_features, time]\n inputs = tf.concat([input_mfc, input_pitch], axis=1, \\\n name='sampler_input')\n \n # Cnvert it to [batch_size, time, num_features] for 1D convolution\n inputs_tranposed = tf.transpose(inputs, perm = [0, 2, 1], \\\n name='sampler_input_transpose')\n\n with tf.variable_scope(scope_name) as scope:\n # Discriminator would be reused in CycleGAN\n if reuse:\n scope.reuse_variables()\n else:\n assert scope.reuse is False\n\n h1 = conv1d_layer(inputs=inputs_tranposed, filters=64, \\\n kernel_size=15, strides=1, \\\n activation=None, name='h1_conv')\n h1_gates = conv1d_layer(inputs=inputs_tranposed, filters=64, \\\n kernel_size=15, strides=1, \\\n activation=None, name='h1_conv_gates')\n h1_glu = gated_linear_layer(inputs=h1, \\\n gates=h1_gates, name='h1_glu')\n \n # Downsample\n d1 = downsample1d_block(inputs=h1_glu, filters=128, \\\n kernel_size=5, strides=2, \\\n name_prefix='downsample1d_block1_')\n d2 = downsample1d_block(inputs=d1, filters=256, \\\n kernel_size=5, strides=2, \\\n name_prefix='downsample1d_block2_')\n\n # Residual blocks\n r1 = residual1d_block(inputs=d2, filters=512, \\\n kernel_size=3, strides=1, \\\n name_prefix='residual1d_block1_')\n r2 = residual1d_block(inputs=r1, filters=512, \\\n kernel_size=3, strides=1, \\\n name_prefix='residual1d_block2_')\n# r3 = residual1d_block(inputs=r2, filters=512, \\\n# kernel_size=3, strides=1, \\\n# name_prefix='residual1d_block3_')\n\n # Upsample\n u1 = upsample1d_block(inputs=r2, filters=512, \\\n kernel_size=5, strides=1, \\\n shuffle_size=2, name_prefix='upsample1d_block1_')\n u2 = upsample1d_block(inputs=u1, filters=256, \\\n kernel_size=5, strides=1, \\\n shuffle_size=2, name_prefix='upsample1d_block2_')\n \n # Dropout for stochasticity\n u2 = tf.nn.dropout(u2, keep_prob=0.5)\n\n # Output\n o1 = conv1d_layer(inputs=u2, filters=final_filters, \\\n kernel_size=15, strides=1, \\\n activation=None, name='o1_conv')\n\n o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')\n return o2\n\n\ndef generator(input_pitch, input_mfc, final_filters=23, reuse=False, \\\n scope_name='generator'):\n\n # Inputs have shape [batch_size, num_features, time]\n inputs = tf.concat([input_mfc, input_pitch], axis=1, \\\n name='generator_input')\n \n # Cnvert it to [batch_size, time, num_features] for 1D convolution\n inputs_tranposed = tf.transpose(inputs, perm = [0, 2, 1], \\\n name='generator_input_transpose')\n\n with tf.variable_scope(scope_name) as scope:\n # Discriminator would be reused in CycleGAN\n if reuse:\n scope.reuse_variables()\n else:\n assert scope.reuse is False\n\n h1 = conv1d_layer(inputs=inputs_tranposed, filters=64, \\\n kernel_size=15, strides=1, \\\n activation=None, name='h1_conv')\n h1_gates = conv1d_layer(inputs=inputs_tranposed, filters=64, \\\n kernel_size=15, strides=1, \\\n activation=None, name='h1_conv_gates')\n h1_glu = gated_linear_layer(inputs=h1, \\\n gates=h1_gates, name='h1_glu')\n \n # Downsample\n d1 = downsample1d_block(inputs=h1_glu, filters=128, \\\n kernel_size=5, strides=2, \\\n name_prefix='downsample1d_block1_')\n d2 = downsample1d_block(inputs=d1, filters=256, \\\n kernel_size=5, strides=2, \\\n name_prefix='downsample1d_block2_')\n\n # Residual blocks\n r1 = residual1d_block(inputs=d2, filters=512, \\\n kernel_size=3, strides=1, \\\n name_prefix='residual1d_block1_')\n r2 = residual1d_block(inputs=r1, filters=512, \\\n kernel_size=3, strides=1, \\\n name_prefix='residual1d_block2_')\n r3 = residual1d_block(inputs=r2, filters=512, \\\n kernel_size=3, strides=1, \\\n name_prefix='residual1d_block3_')\n\n # Upsample\n u1 = upsample1d_block(inputs=r3, filters=512, \\\n kernel_size=5, strides=1, \\\n shuffle_size=2, name_prefix='upsample1d_block1_')\n u2 = upsample1d_block(inputs=u1, filters=256, \\\n kernel_size=5, strides=1, \\\n shuffle_size=2, name_prefix='upsample1d_block2_')\n \n # Dropout for stochasticity\n u2 = tf.nn.dropout(u2, keep_prob=0.5)\n\n # Output\n o1 = conv1d_layer(inputs=u2, filters=final_filters, \\\n kernel_size=15, strides=1, \\\n activation=None, name='o1_conv')\n\n o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')\n return o2\n \n\ndef joint_discriminator(input_mfc, input_pitch, \n reuse=False, scope_name='joint_discriminator'):\n\n # input_mfc and input_pitch has shape [batch_size, num_features, time]\n input_mfc = tf.transpose(input_mfc, perm=[0,2,1], \n name='joint_discriminator_mfc_transpose')\n input_pitch = tf.transpose(input_pitch, perm=[0,2,1], \n name='joint_discriminator_pitch_transpose')\n\n with tf.variable_scope(scope_name) as scope:\n # Discriminator would be reused in CycleGAN\n if reuse:\n scope.reuse_variables()\n else:\n assert scope.reuse is False\n\n h1_mfc = conv1d_layer(inputs=input_mfc, filters=64, \n kernel_size=3, strides=1, \n activation=None, name='h1_mfc_conv')\n h1_mfc_gates = conv1d_layer(inputs=input_mfc, filters=64, \n kernel_size=3, strides=1, \n activation=None, name='h1_mfc_conv_gates')\n h1_mfc_glu = gated_linear_layer(inputs=h1_mfc, \n gates=h1_mfc_gates, name='h1_mfc_glu')\n\n h1_pitch = conv1d_layer(inputs=input_pitch, filters=64, \n kernel_size=3, strides=1, \n activation=None, name='h1_pitch_conv')\n h1_pitch_gates = conv1d_layer(inputs=input_pitch, filters=64, \n kernel_size=3, strides=1, \n activation=None, name='h1_pitch_conv_gates')\n h1_pitch_glu = gated_linear_layer(inputs=h1_pitch, \n gates=h1_pitch_gates, name='h1_pitch_glu')\n\n\n h1_glu = tf.concat([h1_mfc_glu, h1_pitch_glu], axis=-1, \n name='concat_inputs')\n \n d1 = downsample1d_block(inputs=h1_glu, filters=128, \n kernel_size=3, strides=2, \n name_prefix='downsample2d_block1_')\n d2 = downsample1d_block(inputs=d1, filters=256, \n kernel_size=3, strides=2, \n name_prefix='downsample2d_block2_')\n d3 = downsample1d_block(inputs=d2, filters=256, \n kernel_size=3, strides=2, \n name_prefix='downsample2d_block3_')\n\n # Output\n o1 = tf.layers.dense(inputs=d3, units=1, \\\n activation=tf.nn.sigmoid)\n\n return o1\n\n\ndef spect_kernel(input_mfc, reuse=False, \n scope_name='spect_kernel'):\n\n # input_mfc and input_pitch has shape [batch_size, num_features, time]\n input_mfc = tf.transpose(input_mfc, perm=[0,2,1], \n name='spect_kernel_mfc_transpose')\n\n with tf.variable_scope(scope_name) as scope:\n # Discriminator would be reused in CycleGAN\n if reuse:\n scope.reuse_variables()\n else:\n assert scope.reuse is False\n\n h1 = conv1d_layer(inputs=input_mfc, filters=64, \n kernel_size=3, strides=1, \n activation=None, name='h1_conv')\n h1_gates = conv1d_layer(inputs=input_mfc, filters=64, \n kernel_size=3, strides=1, \n activation=None, name='h1_conv_gates')\n h1_glu = gated_linear_layer(inputs=h1, \n gates=h1_gates, name='h1_glu')\n\n # Downsample\n d1 = downsample1d_block(inputs=h1_glu, filters=128, \\\n kernel_size=5, strides=2, \\\n name_prefix='downsample1d_block1_')\n d2 = downsample1d_block(inputs=d1, filters=256, \\\n kernel_size=5, strides=2, \\\n name_prefix='downsample1d_block2_')\n\n # Residual blocks\n r1 = residual1d_block(inputs=d2, filters=512, \\\n kernel_size=3, strides=1, \\\n name_prefix='residual1d_block1_')\n r2 = residual1d_block(inputs=r1, filters=512, \\\n kernel_size=3, strides=1, \\\n name_prefix='residual1d_block2_')\n\n # Upsample\n u1 = upsample1d_block(inputs=r2, filters=512, \\\n kernel_size=5, strides=1, \\\n shuffle_size=2, name_prefix='upsample1d_block1_')\n u2 = upsample1d_block(inputs=u1, filters=256, \\\n kernel_size=5, strides=1, \\\n shuffle_size=2, name_prefix='upsample1d_block2_')\n \n # Output\n o1 = conv1d_layer(inputs=u2, filters=1, \\\n kernel_size=15, strides=1, \\\n activation=None, name='o1_conv')\n\n o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')\n\n return o2\n"
] | [
[
"numpy.expand_dims",
"numpy.min",
"scipy.signal.medfilt",
"numpy.ascontiguousarray",
"numpy.reshape",
"numpy.squeeze",
"numpy.max",
"numpy.mean",
"numpy.transpose",
"numpy.matlib.repmat",
"numpy.where",
"numpy.sum"
],
[
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.layers.dense",
"tensorflow.variable_scope",
"tensorflow.nn.dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
fabibo3/pytorch3d | [
"36b7656753ae759aed2eb7ffb432b6eca4d42fe2",
"36b7656753ae759aed2eb7ffb432b6eca4d42fe2"
] | [
"pytorch3d/io/experimental_gltf_io.py",
"tests/test_chamfer_curvature.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n\"\"\"\nThis module implements loading meshes from glTF 2 assets stored in a\nGLB container file or a glTF JSON file with embedded binary data.\nIt is experimental.\n\nThe module provides a MeshFormatInterpreter called\nMeshGlbFormat which must be used explicitly.\ne.g.\n\n.. code-block:: python\n\n from pytorch3d.io import IO\n from pytorch3d.io.experimental_gltf_io import MeshGlbFormat\n\n io = IO()\n io.register_meshes_format(MeshGlbFormat())\n io.load_mesh(...)\n\nThis implementation is quite restricted in what it supports.\n\n - It does not try to validate the input against the standard.\n - It loads the default scene only.\n - Only triangulated geometry is supported.\n - The geometry of all meshes of the entire scene is aggregated into a single mesh.\n Use `load_meshes()` instead to get un-aggregated (but transformed) ones.\n - All material properties are ignored except for either vertex color, baseColorTexture\n or baseColorFactor. If available, one of these (in this order) is exclusively\n used which does not match the semantics of the standard.\n\"\"\"\n\nimport json\nimport struct\nimport warnings\nfrom base64 import b64decode\nfrom collections import deque\nfrom enum import IntEnum\nfrom io import BytesIO\nfrom typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union, cast\n\nimport numpy as np\nimport torch\nfrom iopath.common.file_io import PathManager\nfrom PIL import Image\nfrom pytorch3d.io.utils import PathOrStr, _open_file\nfrom pytorch3d.renderer.mesh import TexturesBase, TexturesUV, TexturesVertex\nfrom pytorch3d.structures import Meshes, join_meshes_as_scene\nfrom pytorch3d.transforms import Transform3d, quaternion_to_matrix\n\nfrom .pluggable_formats import MeshFormatInterpreter, endswith\n\n\n_GLTF_MAGIC = 0x46546C67\n_JSON_CHUNK_TYPE = 0x4E4F534A\n_BINARY_CHUNK_TYPE = 0x004E4942\n_DATA_URI_PREFIX = \"data:application/octet-stream;base64,\"\n\n\nclass _PrimitiveMode(IntEnum):\n POINTS = 0\n LINES = 1\n LINE_LOOP = 2\n LINE_STRIP = 3\n TRIANGLES = 4\n TRIANGLE_STRIP = 5\n TRIANGLE_FAN = 6\n\n\nclass _ComponentType(IntEnum):\n BYTE = 5120\n UNSIGNED_BYTE = 5121\n SHORT = 5122\n UNSIGNED_SHORT = 5123\n UNSIGNED_INT = 5125\n FLOAT = 5126\n\n\n_ITEM_TYPES: Dict[int, Any] = {\n 5120: np.int8,\n 5121: np.uint8,\n 5122: np.int16,\n 5123: np.uint16,\n 5125: np.uint32,\n 5126: np.float32,\n}\n\n\n_ElementShape = Union[Tuple[int], Tuple[int, int]]\n_ELEMENT_SHAPES: Dict[str, _ElementShape] = {\n \"SCALAR\": (1,),\n \"VEC2\": (2,),\n \"VEC3\": (3,),\n \"VEC4\": (4,),\n \"MAT2\": (2, 2),\n \"MAT3\": (3, 3),\n \"MAT4\": (4, 4),\n}\n\n\ndef _read_header(stream: BinaryIO) -> Optional[Tuple[int, int]]:\n header = stream.read(12)\n magic, version, length = struct.unpack(\"<III\", header)\n\n if magic != _GLTF_MAGIC:\n return None\n\n return version, length\n\n\ndef _read_chunks(\n stream: BinaryIO, length: int\n) -> Optional[Tuple[Dict[str, Any], np.ndarray]]:\n \"\"\"\n Get the json header and the binary data from a\n GLB file.\n \"\"\"\n json_data = None\n binary_data = None\n\n while stream.tell() < length:\n chunk_header = stream.read(8)\n chunk_length, chunk_type = struct.unpack(\"<II\", chunk_header)\n chunk_data = stream.read(chunk_length)\n if chunk_type == _JSON_CHUNK_TYPE:\n json_data = json.loads(chunk_data)\n elif chunk_type == _BINARY_CHUNK_TYPE:\n binary_data = chunk_data\n else:\n warnings.warn(\"Unsupported chunk type\")\n return None\n\n if json_data is None:\n raise ValueError(\"Missing json header\")\n\n if binary_data is not None:\n binary_data = np.frombuffer(binary_data, dtype=np.uint8)\n\n return json_data, binary_data\n\n\ndef _make_node_transform(node: Dict[str, Any]) -> Transform3d:\n \"\"\"\n Convert a transform from the json data in to a PyTorch3D\n Transform3d format.\n \"\"\"\n array = node.get(\"matrix\")\n if array is not None: # Stored in column-major order\n M = np.array(array, dtype=np.float32).reshape(4, 4, order=\"F\")\n return Transform3d(matrix=torch.from_numpy(M))\n\n out = Transform3d()\n\n # Given some of (scale/rotation/translation), we do them in that order to\n # get points in to the world space.\n # See https://github.com/KhronosGroup/glTF/issues/743 .\n\n array = node.get(\"scale\", None)\n if array is not None:\n scale_vector = torch.FloatTensor(array)\n out = out.scale(scale_vector[None])\n\n # Rotation quaternion (x, y, z, w) where w is the scalar\n array = node.get(\"rotation\", None)\n if array is not None:\n x, y, z, w = array\n # We negate w. This is equivalent to inverting the rotation.\n # This is needed as quaternion_to_matrix makes a matrix which\n # operates on column vectors, whereas Transform3d wants a\n # matrix which operates on row vectors.\n rotation_quaternion = torch.FloatTensor([-w, x, y, z])\n rotation_matrix = quaternion_to_matrix(rotation_quaternion)\n out = out.rotate(R=rotation_matrix)\n\n array = node.get(\"translation\", None)\n if array is not None:\n translation_vector = torch.FloatTensor(array)\n out = out.translate(x=translation_vector[None])\n\n return out\n\n\nclass _GLTFLoader:\n def __init__(self, stream: BinaryIO) -> None:\n self._json_data = None\n # Map from buffer index to (decoded) binary data\n self._binary_data = {}\n\n version_and_length = _read_header(stream)\n if version_and_length is None: # GLTF\n stream.seek(0)\n json_data = json.load(stream)\n else: # GLB\n version, length = version_and_length\n if version != 2:\n warnings.warn(\"Unsupported version\")\n return\n json_and_binary_data = _read_chunks(stream, length)\n if json_and_binary_data is None:\n raise ValueError(\"Data not found\")\n json_data, binary_data = json_and_binary_data\n self._binary_data[0] = binary_data\n\n self._json_data = json_data\n self._accessors = json_data.get(\"accessors\", [])\n self._buffer_views = json_data.get(\"bufferViews\", [])\n self._buffers = json_data.get(\"buffers\", [])\n self._texture_map_images = {}\n\n def _access_image(self, image_index: int) -> np.ndarray:\n \"\"\"\n Get the data for an image from the file. This is only called\n by _get_texture_map_image which caches it.\n \"\"\"\n\n image_json = self._json_data[\"images\"][image_index]\n buffer_view = self._buffer_views[image_json[\"bufferView\"]]\n if \"byteStride\" in buffer_view:\n raise NotImplementedError(\"strided buffer views\")\n\n length = buffer_view[\"byteLength\"]\n offset = buffer_view.get(\"byteOffset\", 0)\n\n binary_data = self.get_binary_data(buffer_view[\"buffer\"])\n\n bytesio = BytesIO(binary_data[offset : offset + length].tobytes())\n # pyre-fixme[16]: `Image.Image` has no attribute `__enter__`.\n with Image.open(bytesio) as f:\n array = np.array(f)\n if array.dtype == np.uint8:\n return array.astype(np.float32) / 255.0\n else:\n return array\n\n def _get_texture_map_image(self, image_index: int) -> torch.Tensor:\n \"\"\"\n Return a texture map image as a torch tensor.\n Calling this function repeatedly with the same arguments returns\n the very same tensor, this allows a memory optimization to happen\n later in TexturesUV.join_scene.\n Any alpha channel is ignored.\n \"\"\"\n im = self._texture_map_images.get(image_index)\n if im is not None:\n return im\n\n im = torch.from_numpy(self._access_image(image_index))[:, :, :3]\n self._texture_map_images[image_index] = im\n return im\n\n def _access_data(self, accessor_index: int) -> np.ndarray:\n \"\"\"\n Get the raw data from an accessor as a numpy array.\n \"\"\"\n accessor = self._accessors[accessor_index]\n\n buffer_view_index = accessor.get(\"bufferView\")\n # Undefined buffer view (all zeros) are not (yet) supported\n if buffer_view_index is None:\n raise NotImplementedError(\"Undefined buffer view\")\n\n accessor_byte_offset = accessor.get(\"byteOffset\", 0)\n component_type = accessor[\"componentType\"]\n element_count = accessor[\"count\"]\n element_type = accessor[\"type\"]\n\n # Sparse accessors are not (yet) supported\n if accessor.get(\"sparse\") is not None:\n raise NotImplementedError(\"Sparse Accessors\")\n\n buffer_view = self._buffer_views[buffer_view_index]\n buffer_index = buffer_view[\"buffer\"]\n buffer_byte_length = buffer_view[\"byteLength\"]\n element_byte_offset = buffer_view.get(\"byteOffset\", 0)\n element_byte_stride = buffer_view.get(\"byteStride\", 0)\n if element_byte_stride != 0 and element_byte_stride < 4:\n raise ValueError(\"Stride is too small.\")\n if element_byte_stride > 252:\n raise ValueError(\"Stride is too big.\")\n\n element_shape = _ELEMENT_SHAPES[element_type]\n item_type = _ITEM_TYPES[component_type]\n item_dtype = np.dtype(item_type)\n item_count = np.prod(element_shape)\n item_size = item_dtype.itemsize\n size = element_count * item_count * item_size\n if size > buffer_byte_length:\n raise ValueError(\"Buffer did not have enough data for the accessor\")\n\n buffer_ = self._buffers[buffer_index]\n binary_data = self.get_binary_data(buffer_index)\n if len(binary_data) < buffer_[\"byteLength\"]:\n raise ValueError(\"Not enough binary data for the buffer\")\n\n if element_byte_stride == 0:\n element_byte_stride = item_size * item_count\n # The same buffer can store interleaved elements\n if element_byte_stride < item_size * item_count:\n raise ValueError(\"Items should not overlap\")\n\n dtype = np.dtype(\n {\n \"names\": [\"element\"],\n \"formats\": [str(element_shape) + item_dtype.str],\n \"offsets\": [0],\n \"itemsize\": element_byte_stride,\n }\n )\n\n byte_offset = accessor_byte_offset + element_byte_offset\n if byte_offset % item_size != 0:\n raise ValueError(\"Misaligned data\")\n byte_length = element_count * element_byte_stride\n buffer_view = binary_data[byte_offset : byte_offset + byte_length].view(dtype)[\n \"element\"\n ]\n\n # Convert matrix data from column-major (OpenGL) to row-major order\n if element_type in (\"MAT2\", \"MAT3\", \"MAT4\"):\n buffer_view = np.transpose(buffer_view, (0, 2, 1))\n\n return buffer_view\n\n def _get_primitive_attribute(\n self, primitive_attributes: Dict[str, Any], key: str, dtype\n ) -> Optional[np.ndarray]:\n accessor_index = primitive_attributes.get(key)\n if accessor_index is None:\n return None\n primitive_attribute = self._access_data(accessor_index)\n if key == \"JOINTS_0\":\n pass\n elif dtype == np.uint8:\n primitive_attribute /= 255.0\n elif dtype == np.uint16:\n primitive_attribute /= 65535.0\n else:\n if dtype != np.float32:\n raise ValueError(\"Unexpected data type\")\n primitive_attribute = primitive_attribute.astype(dtype)\n return primitive_attribute\n\n def get_binary_data(self, buffer_index: int):\n \"\"\"\n Get the binary data from a buffer as a 1D numpy array of bytes.\n This is implemented for explicit uri data buffers or the main GLB data\n segment.\n \"\"\"\n buffer_ = self._buffers[buffer_index]\n binary_data = self._binary_data.get(buffer_index)\n if binary_data is None: # Lazily decode binary data\n uri = buffer_.get(\"uri\")\n if not uri.startswith(_DATA_URI_PREFIX):\n raise NotImplementedError(\"Unexpected URI type\")\n binary_data = b64decode(uri[len(_DATA_URI_PREFIX) :])\n binary_data = np.frombuffer(binary_data, dtype=np.uint8)\n self._binary_data[buffer_index] = binary_data\n return binary_data\n\n def get_texture_for_mesh(\n self, primitive: Dict[str, Any], indices: torch.Tensor\n ) -> Optional[TexturesBase]:\n \"\"\"\n Get the texture object representing the given mesh primitive.\n\n Args:\n primitive: the mesh primitive being loaded.\n indices: the face indices of the mesh\n \"\"\"\n attributes = primitive[\"attributes\"]\n vertex_colors = self._get_primitive_attribute(attributes, \"COLOR_0\", np.float32)\n if vertex_colors is not None:\n return TexturesVertex(torch.from_numpy(vertex_colors))\n\n vertex_texcoords_0 = self._get_primitive_attribute(\n attributes, \"TEXCOORD_0\", np.float32\n )\n if vertex_texcoords_0 is not None:\n verts_uvs = torch.from_numpy(vertex_texcoords_0)\n verts_uvs[:, 1] = 1 - verts_uvs[:, -1]\n faces_uvs = indices\n material_index = primitive.get(\"material\", 0)\n material = self._json_data[\"materials\"][material_index]\n material_roughness = material[\"pbrMetallicRoughness\"]\n if \"baseColorTexture\" in material_roughness:\n texture_index = material_roughness[\"baseColorTexture\"][\"index\"]\n texture_json = self._json_data[\"textures\"][texture_index]\n # Todo - include baseColorFactor when also given\n # Todo - look at the sampler\n image_index = texture_json[\"source\"]\n map = self._get_texture_map_image(image_index)\n elif \"baseColorFactor\" in material_roughness:\n # Constant color?\n map = torch.FloatTensor(material_roughness[\"baseColorFactor\"])[\n None, None, :3\n ]\n texture = TexturesUV(\n # pyre-fixme[61]: `map` may not be initialized here.\n maps=[map], # alpha channel ignored\n faces_uvs=[faces_uvs],\n verts_uvs=[verts_uvs],\n )\n return texture\n\n return None\n\n def load(self, include_textures: bool) -> List[Tuple[Optional[str], Meshes]]:\n \"\"\"\n Attempt to load all the meshes making up the default scene from\n the file as a list of possibly-named Meshes objects.\n\n Args:\n include_textures: Whether to try loading textures.\n\n Returns:\n Meshes object containing one mesh.\n \"\"\"\n if self._json_data is None:\n raise ValueError(\"Initialization problem\")\n\n # This loads the default scene from the file.\n # This is usually the only one.\n # It is possible to have multiple scenes, in which case\n # you could choose another here instead of taking the default.\n scene_index = self._json_data.get(\"scene\")\n\n if scene_index is None:\n raise ValueError(\"Default scene is not specified.\")\n\n scene = self._json_data[\"scenes\"][scene_index]\n nodes = self._json_data.get(\"nodes\", [])\n meshes = self._json_data.get(\"meshes\", [])\n root_node_indices = scene[\"nodes\"]\n\n mesh_transform = Transform3d()\n names_meshes_list: List[Tuple[Optional[str], Meshes]] = []\n\n # Keep track and apply the transform of the scene node to mesh vertices\n Q = deque([(Transform3d(), node_index) for node_index in root_node_indices])\n\n while Q:\n parent_transform, current_node_index = Q.popleft()\n\n current_node = nodes[current_node_index]\n\n transform = _make_node_transform(current_node)\n current_transform = transform.compose(parent_transform)\n\n if \"mesh\" in current_node:\n mesh_index = current_node[\"mesh\"]\n mesh = meshes[mesh_index]\n mesh_name = mesh.get(\"name\", None)\n mesh_transform = current_transform\n\n for primitive in mesh[\"primitives\"]:\n attributes = primitive[\"attributes\"]\n accessor_index = attributes[\"POSITION\"]\n positions = torch.from_numpy(\n self._access_data(accessor_index).copy()\n )\n positions = mesh_transform.transform_points(positions)\n\n mode = primitive.get(\"mode\", _PrimitiveMode.TRIANGLES)\n if mode != _PrimitiveMode.TRIANGLES:\n raise NotImplementedError(\"Non triangular meshes\")\n\n if \"indices\" in primitive:\n accessor_index = primitive[\"indices\"]\n indices = self._access_data(accessor_index).astype(np.int64)\n else:\n indices = np.arange(0, len(positions), dtype=np.int64)\n indices = torch.from_numpy(indices.reshape(-1, 3))\n\n texture = None\n if include_textures:\n texture = self.get_texture_for_mesh(primitive, indices)\n\n mesh_obj = Meshes(\n verts=[positions], faces=[indices], textures=texture\n )\n names_meshes_list.append((mesh_name, mesh_obj))\n\n if \"children\" in current_node:\n children_node_indices = current_node[\"children\"]\n Q.extend(\n [\n (current_transform, node_index)\n for node_index in children_node_indices\n ]\n )\n\n return names_meshes_list\n\n\ndef load_meshes(\n path: PathOrStr,\n path_manager: PathManager,\n include_textures: bool = True,\n) -> List[Tuple[Optional[str], Meshes]]:\n \"\"\"\n Loads all the meshes from the default scene in the given GLB file.\n and returns them separately.\n\n Args:\n path: path to read from\n path_manager: PathManager object for interpreting the path\n include_textures: whether to load textures\n\n Returns:\n List of (name, mesh) pairs, where the name is the optional name property\n from the GLB file, or None if it is absent, and the mesh is a Meshes\n object containing one mesh.\n \"\"\"\n with _open_file(path, path_manager, \"rb\") as f:\n loader = _GLTFLoader(cast(BinaryIO, f))\n names_meshes_list = loader.load(include_textures=include_textures)\n return names_meshes_list\n\n\nclass MeshGlbFormat(MeshFormatInterpreter):\n \"\"\"\n Implements loading meshes from glTF 2 assets stored in a\n GLB container file or a glTF JSON file with embedded binary data.\n\n This implementation is quite restricted in what it supports.\n\n - It does not try to validate the input against the standard.\n - It loads the default scene only.\n - Only triangulated geometry is supported.\n - The geometry of all meshes of the entire scene is aggregated into a single mesh.\n Use `load_meshes()` instead to get un-aggregated (but transformed) ones.\n - All material properties are ignored except for either vertex color, baseColorTexture\n or baseColorFactor. If available, one of these (in this order) is exclusively\n used which does not match the semantics of the standard.\n \"\"\"\n\n def __init__(self) -> None:\n self.known_suffixes = (\".glb\",)\n\n def read(\n self,\n path: PathOrStr,\n include_textures: bool,\n device,\n path_manager: PathManager,\n **kwargs,\n ) -> Optional[Meshes]:\n if not endswith(path, self.known_suffixes):\n return None\n\n names_meshes_list = load_meshes(\n path=path,\n path_manager=path_manager,\n include_textures=include_textures,\n )\n\n meshes_list = [mesh for name, mesh in names_meshes_list]\n mesh = join_meshes_as_scene(meshes_list)\n return mesh.to(device)\n\n def save(\n self,\n data: Meshes,\n path: PathOrStr,\n path_manager: PathManager,\n binary: Optional[bool],\n **kwargs,\n ) -> bool:\n return False\n",
"\n\"\"\" Test the implementation of Chamfer loss + curvature \"\"\"\n\n__author__ = \"Fabi Bongratz\"\n__email__ = \"[email protected]\"\n\nimport unittest\n\nimport torch\n\nfrom pytorch3d.structures import Meshes\nfrom pytorch3d.utils import ico_sphere\nfrom pytorch3d.loss.chamfer import chamfer_distance\nfrom pytorch3d.ops import cot_laplacian\n\nclass TestChamferCurvature(unittest.TestCase):\n def setUp(self):\n self.mesh1 = ico_sphere(level=1)\n self.mesh2 = Meshes(self.mesh1.verts_padded() + 0.01,\n self.mesh1.faces_padded())\n\n def test_curvature_comparison(self):\n curv= []\n\n for m in (self.mesh1, self.mesh2):\n verts_packed, faces_packed = m.verts_packed(), m.faces_packed()\n L, inv_areas = cot_laplacian(verts_packed, faces_packed)\n L_sum = torch.sparse.sum(L, dim=1).to_dense().view(-1,1)\n norm_w = 0.25 * inv_areas\n\n curv.append(torch.norm(\n (L.mm(verts_packed) - L_sum * verts_packed) * norm_w,\n dim=1\n ))\n\n d, _, d_curv = chamfer_distance(self.mesh1.verts_padded(),\n self.mesh2.verts_padded(),\n x_curvatures=curv[0].unsqueeze(0).unsqueeze(-1),\n y_curvatures=curv[1].unsqueeze(0).unsqueeze(-1))\n\n print(\"Chamfer: \", str(d))\n print(\"Difference in curvature: \", str(d_curv))\n\n self.assertTrue(torch.allclose(d_curv, torch.tensor(0.0), atol=1e-6))\n\n def test_curvature_weights(self):\n\n verts_packed, faces_packed = self.mesh2.verts_packed(), self.mesh2.faces_packed()\n L, inv_areas = cot_laplacian(verts_packed, faces_packed)\n L_sum = torch.sparse.sum(L, dim=1).to_dense().view(-1,1)\n norm_w = 0.25 * inv_areas\n\n curv = torch.norm(\n (L.mm(verts_packed) - L_sum* verts_packed) * norm_w,\n dim=1\n )\n\n d, _, _ = chamfer_distance(self.mesh1.verts_padded(), self.mesh2.verts_padded(),\n point_weights = 1 + curv)\n\n print(\"Chamfer: \", str(d))\n"
] | [
[
"torch.from_numpy",
"numpy.dtype",
"numpy.frombuffer",
"torch.FloatTensor",
"numpy.prod",
"numpy.transpose",
"numpy.array"
],
[
"torch.sparse.sum",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dmnlk/namedivider-python | [
"d87a488d4696bc26d2f6444ed399d83a6a1911a7"
] | [
"namedivider/name_divider.py"
] | [
"import numpy as np\nimport pandas as pd\nimport regex\nfrom namedivider.divided_name import DividedName\nfrom namedivider.kanji_statistics import KanjiStatistics\nfrom pathlib import Path\nfrom typing import Optional\nCURRENT_DIR = Path(__file__).resolve().parent\n\n\nclass NameDivider:\n def __init__(self, path_csv: str = f\"{CURRENT_DIR}/assets/kanji.csv\", separator: str = \" \"):\n \"\"\"\n Class for dividing an undivided name\n (\"undivided name\" means names with no space between the family name and given name.)\n :param path_csv: Path of the file containing the kanji information\n :param separator: Characters to separate first and last names\n \"\"\"\n kanji_records = pd.read_csv(path_csv).to_numpy()\n kanjis = kanji_records[:, 0]\n orders = kanji_records[:, 1:7]\n lengths = kanji_records[:, 7:]\n\n self.kanji_dict = {}\n for _kanji, _order, _length in zip(kanjis, orders, lengths):\n self.kanji_dict[_kanji] = KanjiStatistics(kanji=_kanji, order_counts=_order, length_counts=_length)\n\n self.default_kanji = KanjiStatistics.default()\n self.separator = separator\n self.compiled_regex_kanji = regex.compile(r'\\p{Script=Han}+')\n\n def _create_divided_name(self, family: str, given: str, score: float = 1., algorithm: str = \"\") -> DividedName:\n \"\"\"\n Generates DividedName.\n :param family: Family name\n :param given: Given name\n :param score: Confidence level, from 0 to 1\n :param algorithm: The name of dividing algorithm\n :return: Divided name\n :rtype: DividedName\n \"\"\"\n return DividedName(family, given, separator=self.separator, score=score, algorithm=algorithm)\n\n @staticmethod\n def _create_order_mask(full_name_length: int, char_idx: int) -> np.ndarray:\n \"\"\"\n Create order mask.\n Order mask is one-hot mask for calculate order score.\n :param full_name_length: Length of full name.\n :param char_idx: The order of the character in full name\n :return: Order mask\n :rtype: np.ndarray\n \"\"\"\n if char_idx == 0 or char_idx == full_name_length - 1:\n raise ValueError(\"First character and last character must not be created order mask.\")\n\n if full_name_length == 3:\n return np.array([0, 0, 1, 1, 0, 0])\n\n if char_idx == 1:\n return np.array([0, 1, 1, 1, 0, 0])\n\n if char_idx == full_name_length - 2:\n return np.array([0, 0, 1, 1, 1, 0])\n\n return np.array([0, 1, 1, 1, 1, 0])\n\n @staticmethod\n def _create_length_mask(full_name_length, char_idx) -> np.ndarray:\n \"\"\"\n Create length mask.\n Length mask is one-hot mask for calculate length score.\n :param full_name_length: Length of full name.\n :param char_idx: The order of the character in full name\n :return: Length mask\n :rtype: np.ndarray\n \"\"\"\n min_family = char_idx + 1\n max_family = full_name_length - 1\n max_family = 4 if max_family > 4 else max_family\n min_given = full_name_length - char_idx\n max_given = full_name_length - 1\n max_given = 4 if max_given > 4 else max_given\n lc_family = np.array([0, 0, 0, 0])\n if min_family <= max_family:\n lc_family[min_family - 1: max_family] = 1\n lc_given = np.array([0, 0, 0, 0])\n if min_given <= max_given:\n lc_given[min_given - 1: max_given] = 1\n return np.concatenate([lc_family, lc_given])\n\n @staticmethod\n def _calc_current_order_status(piece_of_divided_name: str,\n idx_in_piece_of_divided_name: int,\n is_family: bool) -> int:\n \"\"\"\n Determine which index of order_counts the kanji corresponds to.\n :param piece_of_divided_name: Family name or given name\n :param idx_in_piece_of_divided_name: Index in family or given name\n :param is_family: True if piece_of_divided_name is family name\n :return: The index of order_counts\n :rtype: int\n \"\"\"\n if idx_in_piece_of_divided_name == 0:\n return 0 if is_family else 3\n if idx_in_piece_of_divided_name == len(piece_of_divided_name) - 1:\n return 2 if is_family else 5\n else:\n return 1 if is_family else 4\n\n @staticmethod\n def _calc_current_length_status(piece_of_divided_name: str, is_family: bool) -> int:\n \"\"\"\n Determine which index of length_counts the kanji corresponds to.\n :param piece_of_divided_name: Family name or given name\n :param is_family: True if piece_of_divided_name is family name\n :return: The index of length_counts\n :rtype: int\n \"\"\"\n piece_of_divided_name_length = len(piece_of_divided_name) if len(piece_of_divided_name) <= 4 else 4\n return piece_of_divided_name_length - 1 if is_family else piece_of_divided_name_length - 1 + 4\n\n def _calc_order_score(self, piece_of_divided_name: str, full_name_length: int, start_index: int = 0) -> float:\n \"\"\"\n Calculates order score.\n Order score is a feature, which is a kind of frequency, calculated from where each kanji in full name is used.\n See this link if you need more explanation: https://rskmoi.hatenablog.com/entry/2017/01/15/190837\n :param piece_of_divided_name: Family name or given name\n :param full_name_length: Length of fullname\n :param start_index: The order of the first charactar of piece_of_divided_name in full name\n :return: Order score\n :rtype: float\n\n example:\n -----------------------------------------------------\n >>> namedivider = NameDivider()\n >>> # Full name: 新海誠\n >>> namedivider._calc_order_score(piece_of_divided_name='新海', full_name_length=3, start_index=0)\n 0.8305084745762712\n >>> namedivider._calc_order_score(piece_of_divided_name='誠', full_name_length=3, start_index=2)\n 0\n >>> # Full name: 清武弘嗣\n >>> namedivider._calc_order_score(piece_of_divided_name='清武', full_name_length=4, start_index=0)\n 0.2222222222222222\n >>> namedivider._calc_order_score(piece_of_divided_name='弘嗣', full_name_length=4, start_index=2)\n 0.9919571045576407\n -----------------------------------------------------\n \"\"\"\n is_family = True if start_index == 0 else False\n scores = 0\n for idx_in_piece_of_divided_name, _kanji in enumerate(piece_of_divided_name):\n current_idx = start_index + idx_in_piece_of_divided_name\n if current_idx == 0:\n continue\n if current_idx == full_name_length - 1:\n continue\n mask = self._create_order_mask(full_name_length, current_idx)\n current_order_status_idx = self._calc_current_order_status(piece_of_divided_name,\n idx_in_piece_of_divided_name,\n is_family)\n masked_order = self.kanji_dict.get(_kanji, self.default_kanji).order_counts * mask\n if np.sum(masked_order) == 0:\n continue\n scores += masked_order[current_order_status_idx] / np.sum(masked_order)\n return scores\n\n def _calc_length_score(self, piece_of_divided_name, full_name_length, start_index=0) -> float:\n \"\"\"\n Calculates length score.\n Length score is a feature, which is a kind of frequency,\n calculated from how long is family/given name containing the kanji.\n See this link if you need more explanation: https://rskmoi.hatenablog.com/entry/2017/01/15/190837\n :param piece_of_divided_name: Family name or given name\n :param full_name_length: Length of fullname\n :param start_index: The order of the first charactar of piece_of_divided_name in full name\n :return: Length score\n :rtype: float\n\n example:\n -----------------------------------------------------\n >>> namedivider = NameDivider()\n >>> # Full name: 新海誠\n >>> namedivider._calc_length_score(piece_of_divided_name='新海', full_name_length=3, start_index=0)\n 1.6721919841662545\n >>> namedivider._calc_length_score(piece_of_divided_name='誠', full_name_length=3, start_index=2)\n 0.5414201183431953\n >>> # Full name: 清武弘嗣\n >>> namedivider._calc_length_score(piece_of_divided_name='清武', full_name_length=4, start_index=0)\n 1.9431977559607292\n >>> namedivider._calc_length_score(piece_of_divided_name='弘嗣', full_name_length=4, start_index=2)\n 1.982873228774868\n -----------------------------------------------------\n \"\"\"\n is_family = True if start_index == 0 else False\n scores = 0\n for i, _kanji in enumerate(piece_of_divided_name):\n current_idx = start_index + i\n mask = self._create_length_mask(full_name_length, current_idx)\n current_length_status_idx = self._calc_current_length_status(piece_of_divided_name, is_family)\n masked_length_scores = self.kanji_dict.get(_kanji, self.default_kanji).length_counts * mask\n if np.sum(masked_length_scores) == 0:\n continue\n scores += masked_length_scores[current_length_status_idx] / np.sum(masked_length_scores)\n return scores\n\n def calc_score(self, family: str, given: str) -> float:\n \"\"\"\n Calculates the score for correct division.\n :param family: Family name\n :param given: Given name\n :return: Score for correct division\n :rtype: float\n \"\"\"\n name = family + given\n order_score_family = self._calc_order_score(family, len(name), 0)\n order_score_given = self._calc_order_score(given, len(name), len(family))\n order_score = (order_score_family + order_score_given) / (len(name) - 2)\n\n # If full name consists of 4 chars, the accuracy is better when using only order score.\n if len(name) == 4:\n return order_score\n\n length_score_family = self._calc_length_score(family, len(name), 0)\n length_score_given = self._calc_length_score(given, len(name), len(family))\n length_score = (length_score_family + length_score_given) / len(name)\n\n return (order_score + length_score) / 2\n\n @staticmethod\n def _validate(undivided_name: str):\n \"\"\"\n Determines if it is an assumed input.\n :param undivided_name: Names with no space between the first and last name\n \"\"\"\n if len(undivided_name) < 2:\n raise ValueError(\"Name length needs at least 2 chars\")\n\n def _divide_by_rule_base(self, undivided_name: str) -> Optional[DividedName]:\n \"\"\"\n Divides undivided name without using kanji statistics.\n :param undivided_name: Names with no space between the family name and given name\n :return:\n if fits the rules: Divided name\n else: None\n :rtype:\n if fits the rules: DividedName\n else: None\n \"\"\"\n # If the undivided name consists of 2 characters,\n # the first characters is family name, and the last characters is given name.\n if len(undivided_name) == 2:\n return self._create_divided_name(family=undivided_name[0],\n given=undivided_name[-1],\n algorithm=\"rule\")\n\n # If the undivided name consists of kanji and other types of characters (hiragana, katakana, etc...),\n # the undivided name will be divided where the kanji and other character types are switched.\n # The criterion for determining switched is whether \"two\" consecutive characters are having\n # different type of characters from first character type.\n # The reason of \"two\" is some family names consist of some kanji and one katakana.\n # (ex: \"井ノ原\", \"三ツ又\", \"関ヶ原\" contains \"ノ\", \"ツ\", \"ヶ\". They are all katakana.)\n is_kanji_list = []\n for i, _char in enumerate(undivided_name):\n is_kanji = True if self.compiled_regex_kanji.fullmatch(_char) else False\n is_kanji_list.append(is_kanji)\n if i >= 2:\n if is_kanji_list[0] != is_kanji and is_kanji_list[-2] == is_kanji:\n return self._create_divided_name(family=undivided_name[:i - 1],\n given=undivided_name[i - 1:],\n algorithm=\"rule\")\n\n @staticmethod\n def _softmax(x) -> np.ndarray:\n \"\"\"\n Calculates softmax score\n :param x: array_like\n :return: Softmax scores\n :rtype: np.ndarray\n \"\"\"\n u = np.sum(np.exp(x))\n return np.exp(x) / u\n\n def _divide_by_statistics(self, undivided_name: str) -> DividedName:\n \"\"\"\n Divides undivided name using kanji statistics.\n :param undivided_name: Names with no space between the family name and given name\n :return: Divided name\n :rtype: DividedName\n \"\"\"\n total_scores = []\n for i in range(1, len(undivided_name)):\n family = undivided_name[:i]\n given = undivided_name[i:]\n score = self.calc_score(family, given)\n total_scores.append(score)\n\n total_scores = self._softmax(total_scores)\n max_idx = np.argmax(np.array(total_scores)) + 1\n return self._create_divided_name(family=undivided_name[:max_idx],\n given=undivided_name[max_idx:],\n score=total_scores[max_idx - 1],\n algorithm=\"kanji_feature\")\n\n def divide_name(self, undivided_name: str) -> DividedName:\n \"\"\"\n Divides undivided name.\n :param undivided_name: Names with no space between the family name and given name\n :return: Divided name\n :rtype: DividedName\n\n example:\n -----------------------------------------------------\n >>> namedivider = NameDivider()\n >>> divided_name = namedivider.divide_name(\"菅義偉\")\n >>> print(divided_name)\n \"菅 義偉\"\n >>> print(divided_name.to_dict())\n {'family': '菅', 'given': '義偉', 'separator': ' ', 'score': 0.6328842762252201, 'algorithm': 'kanji_feature'}\n -----------------------------------------------------\n \"\"\"\n self._validate(undivided_name)\n divided_name_by_rule_base = self._divide_by_rule_base(undivided_name)\n if divided_name_by_rule_base:\n return divided_name_by_rule_base\n return self._divide_by_statistics(undivided_name)\n"
] | [
[
"pandas.read_csv",
"numpy.concatenate",
"numpy.exp",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
siril-pivotchain/ETAG-OCR | [
"216c27e8ab63acc5b3686da2948a6881da70350e"
] | [
"keras_ocr/recognition.py"
] | [
"# pylint: disable=invalid-name,too-many-locals,too-many-arguments\nimport typing\nimport string\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport cv2\n\nfrom . import tools\n\nDEFAULT_BUILD_PARAMS = {\n 'height': 31,\n 'width': 200,\n 'color': False,\n 'filters': (64, 128, 256, 256, 512, 512, 512),\n 'rnn_units': (128, 128),\n 'dropout': 0.25,\n 'rnn_steps_to_discard': 2,\n 'pool_size': 2,\n 'stn': True,\n}\n\nDEFAULT_ALPHABET = string.digits + string.ascii_lowercase\n\nPRETRAINED_WEIGHTS = {\n 'kurapan': {\n 'alphabet': DEFAULT_ALPHABET,\n 'build_params': DEFAULT_BUILD_PARAMS,\n 'weights': {\n 'notop': {\n 'url': 'https://www.mediafire.com/file/n9yfn5wueu82rgf/crnn_kurapan_notop.h5/file',\n 'filename': 'crnn_kurapan_notop.h5',\n 'sha256': '027fd2cced3cbea0c4f5894bb8e9e85bac04f11daf96b8fdcf1e4ee95dcf51b9'\n },\n 'top': {\n 'url': 'https://www.mediafire.com/file/pkj2p29b1f6fpil/crnn_kurapan.h5/file',\n 'filename': 'crnn_kurapan.h5',\n 'sha256': 'a7d8086ac8f5c3d6a0a828f7d6fbabcaf815415dd125c32533013f85603be46d'\n }\n }\n },\n 'captcha': {\n 'alphabet': DEFAULT_ALPHABET,\n 'build_params': DEFAULT_BUILD_PARAMS,\n 'weights': {\n 'notop': {\n 'url': 'https://drive.google.com/uc?export=download&id=1drJ9rlIb6WtnW8ysoWU9kTp3TBAS-q6v',\n 'filename': 'captcha.hdf5',\n 'sha256': '701e0947beab802624ba562200da7b7684b87fd516a7d720c6a7453d0e3db805'\n },\n 'top': {\n 'url': 'https://drive.google.com/uc?export=download&id=1drJ9rlIb6WtnW8ysoWU9kTp3TBAS-q6v',\n 'filename': 'captcha.hdf5',\n 'sha256': '701e0947beab802624ba562200da7b7684b87fd516a7d720c6a7453d0e3db805'\n }\n }\n },\n 'etag': {\n 'alphabet': DEFAULT_ALPHABET,\n 'build_params': DEFAULT_BUILD_PARAMS,\n 'weights': {\n 'notop': {\n 'url': 'https://drive.google.com/uc?export=download&id=15rVYQCiMz1wLQUQFFbaJ3DbC3FBHPHX8',\n 'filename': 'recognizer_etag.h5',\n 'sha256': '00178a3ed1e5d1585bdf26252d35a4c03716963c9b2f868b733fe41b718c350e'\n },\n 'top': {\n 'url': 'https://drive.google.com/uc?export=download&id=15rVYQCiMz1wLQUQFFbaJ3DbC3FBHPHX8',\n 'filename': 'recognizer_etag.h5',\n 'sha256': '00178a3ed1e5d1585bdf26252d35a4c03716963c9b2f868b733fe41b718c350e'\n }\n }\n },\n 'etag_2': {\n 'alphabet': DEFAULT_ALPHABET,\n 'build_params': DEFAULT_BUILD_PARAMS,\n 'weights': {\n 'notop': {\n 'url': 'https://drive.google.com/uc?export=download&id=1mqYCyUD1cJzsCbYLzBUYYZ3-1VoPopOZ',\n 'filename': 'recognizer_etag_detect.h5',\n 'sha256': 'fc9f8acfdfe5a3a5dd4880a2bb4a3d0ce353c1469dda507a41faaf52d4dc3afc'\n },\n 'top': {\n 'url': 'https://drive.google.com/uc?export=download&id=1mqYCyUD1cJzsCbYLzBUYYZ3-1VoPopOZ',\n 'filename': 'recognizer_etag_detect.h5',\n 'sha256': 'fc9f8acfdfe5a3a5dd4880a2bb4a3d0ce353c1469dda507a41faaf52d4dc3afc'\n }\n }\n },\n 'etag_3': {\n 'alphabet': DEFAULT_ALPHABET,\n 'build_params': DEFAULT_BUILD_PARAMS,\n 'weights': {\n 'notop': {\n 'url': 'https://drive.google.com/uc?export=download&id=1-1NK1D4AHT5zxQPdNraNT2pNVdgi5qvI',\n 'filename': 'recognizer_etag_detect_aug.h5',\n 'sha256': '84a8d8e56576d99d6ca9c7d792ef6b9b4fc42c16c316fbbb2e26c66bdbb46286'\n },\n 'top': {\n 'url': 'https://drive.google.com/uc?export=download&id=1-1NK1D4AHT5zxQPdNraNT2pNVdgi5qvI',\n 'filename': 'recognizer_etag_detect_aug.h5',\n 'sha256': '84a8d8e56576d99d6ca9c7d792ef6b9b4fc42c16c316fbbb2e26c66bdbb46286'\n }\n }\n }\n}\n\n\ndef swish(x, beta=1):\n return x * keras.backend.sigmoid(beta * x)\n\n\nkeras.utils.get_custom_objects().update({'swish': keras.layers.Activation(swish)})\n\n\ndef _repeat(x, num_repeats):\n ones = tf.ones((1, num_repeats), dtype='int32')\n x = tf.reshape(x, shape=(-1, 1))\n x = tf.matmul(x, ones)\n return tf.reshape(x, [-1])\n\n\ndef _meshgrid(height, width):\n x_linspace = tf.linspace(-1., 1., width)\n y_linspace = tf.linspace(-1., 1., height)\n x_coordinates, y_coordinates = tf.meshgrid(x_linspace, y_linspace)\n x_coordinates = tf.reshape(x_coordinates, shape=(1, -1))\n y_coordinates = tf.reshape(y_coordinates, shape=(1, -1))\n ones = tf.ones_like(x_coordinates)\n indices_grid = tf.concat([x_coordinates, y_coordinates, ones], 0)\n return indices_grid\n\n\n# pylint: disable=too-many-statements\ndef _transform(inputs):\n locnet_x, locnet_y = inputs\n output_size = locnet_x.shape[1:]\n batch_size = tf.shape(locnet_x)[0]\n height = tf.shape(locnet_x)[1]\n width = tf.shape(locnet_x)[2]\n num_channels = tf.shape(locnet_x)[3]\n\n locnet_y = tf.reshape(locnet_y, shape=(batch_size, 2, 3))\n\n locnet_y = tf.reshape(locnet_y, (-1, 2, 3))\n locnet_y = tf.cast(locnet_y, 'float32')\n\n output_height = output_size[0]\n output_width = output_size[1]\n indices_grid = _meshgrid(output_height, output_width)\n indices_grid = tf.expand_dims(indices_grid, 0)\n indices_grid = tf.reshape(indices_grid, [-1]) # flatten?\n indices_grid = tf.tile(indices_grid, tf.stack([batch_size]))\n indices_grid = tf.reshape(indices_grid, tf.stack([batch_size, 3, -1]))\n\n transformed_grid = tf.matmul(locnet_y, indices_grid)\n x_s = tf.slice(transformed_grid, [0, 0, 0], [-1, 1, -1])\n y_s = tf.slice(transformed_grid, [0, 1, 0], [-1, 1, -1])\n x = tf.reshape(x_s, [-1])\n y = tf.reshape(y_s, [-1])\n\n # Interpolate\n height_float = tf.cast(height, dtype='float32')\n width_float = tf.cast(width, dtype='float32')\n\n output_height = output_size[0]\n output_width = output_size[1]\n\n x = tf.cast(x, dtype='float32')\n y = tf.cast(y, dtype='float32')\n x = .5 * (x + 1.0) * width_float\n y = .5 * (y + 1.0) * height_float\n\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n\n max_y = tf.cast(height - 1, dtype='int32')\n max_x = tf.cast(width - 1, dtype='int32')\n zero = tf.zeros([], dtype='int32')\n\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n\n flat_image_dimensions = width * height\n pixels_batch = tf.range(batch_size) * flat_image_dimensions\n flat_output_dimensions = output_height * output_width\n base = _repeat(pixels_batch, flat_output_dimensions)\n base_y0 = base + y0 * width\n base_y1 = base + y1 * width\n indices_a = base_y0 + x0\n indices_b = base_y1 + x0\n indices_c = base_y0 + x1\n indices_d = base_y1 + x1\n\n flat_image = tf.reshape(locnet_x, shape=(-1, num_channels))\n flat_image = tf.cast(flat_image, dtype='float32')\n pixel_values_a = tf.gather(flat_image, indices_a)\n pixel_values_b = tf.gather(flat_image, indices_b)\n pixel_values_c = tf.gather(flat_image, indices_c)\n pixel_values_d = tf.gather(flat_image, indices_d)\n\n x0 = tf.cast(x0, 'float32')\n x1 = tf.cast(x1, 'float32')\n y0 = tf.cast(y0, 'float32')\n y1 = tf.cast(y1, 'float32')\n\n area_a = tf.expand_dims(((x1 - x) * (y1 - y)), 1)\n area_b = tf.expand_dims(((x1 - x) * (y - y0)), 1)\n area_c = tf.expand_dims(((x - x0) * (y1 - y)), 1)\n area_d = tf.expand_dims(((x - x0) * (y - y0)), 1)\n transformed_image = tf.add_n([\n area_a * pixel_values_a, area_b * pixel_values_b, area_c * pixel_values_c,\n area_d * pixel_values_d\n ])\n # Finished interpolation\n\n transformed_image = tf.reshape(transformed_image,\n shape=(batch_size, output_height, output_width, num_channels))\n return transformed_image\n\n\ndef CTCDecoder():\n def decoder(y_pred):\n input_shape = tf.keras.backend.shape(y_pred)\n input_length = tf.ones(shape=input_shape[0]) * tf.keras.backend.cast(\n input_shape[1], 'float32')\n unpadded = tf.keras.backend.ctc_decode(y_pred, input_length)[0][0]\n unpadded_shape = tf.keras.backend.shape(unpadded)\n padded = tf.pad(unpadded,\n paddings=[[0, 0], [0, input_shape[1] - unpadded_shape[1]]],\n constant_values=-1)\n return padded\n\n return tf.keras.layers.Lambda(decoder, name='decode')\n\n\ndef build_model(alphabet,\n height,\n width,\n color,\n filters,\n rnn_units,\n dropout,\n rnn_steps_to_discard,\n pool_size,\n stn=True):\n \"\"\"Build a Keras CRNN model for character recognition.\n\n Args:\n height: The height of cropped images\n width: The width of cropped images\n color: Whether the inputs should be in color (RGB)\n filters: The number of filters to use for each of the 7 convolutional layers\n rnn_units: The number of units for each of the RNN layers\n dropout: The dropout to use for the final layer\n rnn_steps_to_discard: The number of initial RNN steps to discard\n pool_size: The size of the pooling steps\n stn: Whether to add a Spatial Transformer layer\n \"\"\"\n assert len(filters) == 7, '7 CNN filters must be provided.'\n assert len(rnn_units) == 2, '2 RNN filters must be provided.'\n inputs = keras.layers.Input((height, width, 3 if color else 1))\n x = keras.layers.Permute((2, 1, 3))(inputs)\n x = keras.layers.Lambda(lambda x: x[:, :, ::-1])(x)\n x = keras.layers.Conv2D(filters[0], (3, 3), activation='relu', padding='same', name='conv_1')(x)\n x = keras.layers.Conv2D(filters[1], (3, 3), activation='relu', padding='same', name='conv_2')(x)\n x = keras.layers.Conv2D(filters[2], (3, 3), activation='relu', padding='same', name='conv_3')(x)\n x = keras.layers.BatchNormalization(name='bn_3')(x)\n x = keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size), name='maxpool_3')(x)\n x = keras.layers.Conv2D(filters[3], (3, 3), activation='relu', padding='same', name='conv_4')(x)\n x = keras.layers.Conv2D(filters[4], (3, 3), activation='relu', padding='same', name='conv_5')(x)\n x = keras.layers.BatchNormalization(name='bn_5')(x)\n x = keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size), name='maxpool_5')(x)\n x = keras.layers.Conv2D(filters[5], (3, 3), activation='relu', padding='same', name='conv_6')(x)\n x = keras.layers.Conv2D(filters[6], (3, 3), activation='relu', padding='same', name='conv_7')(x)\n x = keras.layers.BatchNormalization(name='bn_7')(x)\n if stn:\n # pylint: disable=pointless-string-statement\n \"\"\"Spatial Transformer Layer\n Implements a spatial transformer layer as described in [1]_.\n Borrowed from [2]_:\n downsample_fator : float\n A value of 1 will keep the orignal size of the image.\n Values larger than 1 will down sample the image. Values below 1 will\n upsample the image.\n example image: height= 100, width = 200\n downsample_factor = 2\n output image will then be 50, 100\n References\n ----------\n .. [1] Spatial Transformer Networks\n Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu\n Submitted on 5 Jun 2015\n .. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py\n .. [3] https://github.com/EderSantana/seya/blob/keras1/seya/layers/attention.py\n \"\"\"\n stn_input_output_shape = (width // pool_size**2, height // pool_size**2, filters[6])\n stn_input_layer = keras.layers.Input(shape=stn_input_output_shape)\n locnet_y = keras.layers.Conv2D(16, (5, 5), padding='same',\n activation='relu')(stn_input_layer)\n locnet_y = keras.layers.Conv2D(32, (5, 5), padding='same', activation='relu')(locnet_y)\n locnet_y = keras.layers.Flatten()(locnet_y)\n locnet_y = keras.layers.Dense(64, activation='relu')(locnet_y)\n locnet_y = keras.layers.Dense(6,\n weights=[\n np.zeros((64, 6), dtype='float32'),\n np.float32([[1, 0, 0], [0, 1, 0]]).flatten()\n ])(locnet_y)\n localization_net = keras.models.Model(inputs=stn_input_layer, outputs=locnet_y)\n x = keras.layers.Lambda(_transform,\n output_shape=stn_input_output_shape)([x, localization_net(x)])\n x = keras.layers.Reshape(target_shape=(width // pool_size**2,\n (height // pool_size**2) * filters[-1]),\n name='reshape')(x)\n\n x = keras.layers.Dense(rnn_units[0], activation='relu', name='fc_9')(x)\n\n rnn_1_forward = keras.layers.LSTM(rnn_units[0],\n kernel_initializer=\"he_normal\",\n return_sequences=True,\n name='lstm_10')(x)\n rnn_1_back = keras.layers.LSTM(rnn_units[0],\n kernel_initializer=\"he_normal\",\n go_backwards=True,\n return_sequences=True,\n name='lstm_10_back')(x)\n rnn_1_add = keras.layers.Add()([rnn_1_forward, rnn_1_back])\n rnn_2_forward = keras.layers.LSTM(rnn_units[1],\n kernel_initializer=\"he_normal\",\n return_sequences=True,\n name='lstm_11')(rnn_1_add)\n rnn_2_back = keras.layers.LSTM(rnn_units[1],\n kernel_initializer=\"he_normal\",\n go_backwards=True,\n return_sequences=True,\n name='lstm_11_back')(rnn_1_add)\n x = keras.layers.Concatenate()([rnn_2_forward, rnn_2_back])\n backbone = keras.models.Model(inputs=inputs, outputs=x)\n x = keras.layers.Dropout(dropout, name='dropout')(x)\n x = keras.layers.Dense(len(alphabet) + 1,\n kernel_initializer='he_normal',\n activation='softmax',\n name='fc_12')(x)\n x = keras.layers.Lambda(lambda x: x[:, rnn_steps_to_discard:])(x)\n model = keras.models.Model(inputs=inputs, outputs=x)\n\n prediction_model = keras.models.Model(inputs=inputs, outputs=CTCDecoder()(model.output))\n labels = keras.layers.Input(name='labels', shape=[model.output_shape[1]], dtype='float32')\n label_length = keras.layers.Input(shape=[1])\n input_length = keras.layers.Input(shape=[1])\n loss = keras.layers.Lambda(lambda inputs: keras.backend.ctc_batch_cost(\n y_true=inputs[0], y_pred=inputs[1], input_length=inputs[2], label_length=inputs[3]))(\n [labels, model.output, input_length, label_length])\n training_model = keras.models.Model(inputs=[model.input, labels, input_length, label_length],\n outputs=loss)\n return backbone, model, training_model, prediction_model\n\n\nclass Recognizer:\n \"\"\"A text detector using the CRNN architecture.\n\n Args:\n alphabet: The alphabet the model should recognize.\n build_params: A dictionary of build parameters for the model.\n See `keras_ocr.recognition.build_model` for details.\n weights: The starting weight configuration for the model.\n include_top: Whether to include the final classification layer in the model (set\n to False to use a custom alphabet).\n \"\"\"\n def __init__(self, alphabet=None, weights='kurapan', build_params=None):\n assert alphabet or weights, 'At least one of alphabet or weights must be provided.'\n if weights is not None:\n build_params = build_params or PRETRAINED_WEIGHTS[weights]['build_params']\n alphabet = alphabet or PRETRAINED_WEIGHTS[weights]['alphabet']\n build_params = build_params or DEFAULT_BUILD_PARAMS\n if alphabet is None:\n alphabet = DEFAULT_ALPHABET\n self.alphabet = alphabet\n self.blank_label_idx = len(alphabet)\n self.backbone, self.model, self.training_model, self.prediction_model = build_model(\n alphabet=alphabet, **build_params)\n if weights is not None:\n weights_dict = PRETRAINED_WEIGHTS[weights]\n if alphabet == weights_dict['alphabet']:\n self.model.load_weights(\n tools.download_and_verify(url=weights_dict['weights']['top']['url'],\n filename=weights_dict['weights']['top']['filename'],\n sha256=weights_dict['weights']['top']['sha256']))\n else:\n print('Provided alphabet does not match pretrained alphabet. '\n 'Using backbone weights only.')\n self.backbone.load_weights(\n tools.download_and_verify(url=weights_dict['weights']['notop']['url'],\n filename=weights_dict['weights']['notop']['filename'],\n sha256=weights_dict['weights']['notop']['sha256']))\n\n def get_batch_generator(self, image_generator, batch_size=8, lowercase=False):\n \"\"\"\n Generate batches of training data from an image generator. The generator\n should yield tuples of (image, sentence) where image contains a single\n line of text and sentence is a string representing the contents of\n the image. If a sample weight is desired, it can be provided as a third\n entry in the tuple, making each tuple an (image, sentence, weight) tuple.\n\n Args:\n image_generator: An image / sentence tuple generator. The images should\n be in color even if the OCR is setup to handle grayscale as they\n will be converted here.\n batch_size: How many images to generate at a time.\n lowercase: Whether to convert all characters to lowercase before\n encoding.\n \"\"\"\n y = np.zeros((batch_size, 1))\n if self.training_model is None:\n raise Exception('You must first call create_training_model().')\n max_string_length = self.training_model.input_shape[1][1]\n while True:\n batch = [sample for sample, _ in zip(image_generator, range(batch_size))]\n if not self.model.input_shape[-1] == 3:\n images = [\n cv2.cvtColor(sample[0], cv2.COLOR_RGB2GRAY)[..., np.newaxis] for sample in batch\n ]\n else:\n images = [sample[0] for sample in batch]\n images = np.array([image.astype('float32') / 255 for image in images])\n sentences = [sample[1].strip() for sample in batch]\n if lowercase:\n sentences = [sentence.lower() for sentence in sentences]\n assert all(c in self.alphabet\n for c in ''.join(sentences)), 'Found illegal characters in sentence.'\n assert all(sentences), 'Found a zero length sentence.'\n assert all(\n len(sentence) <= max_string_length\n for sentence in sentences), 'A sentence is longer than this model can predict.'\n assert all(\" \" not in sentence for sentence in sentences), (\n 'Strings with multiple sequential spaces are not permitted. '\n 'See https://github.com/faustomorales/keras-ocr/issues/54')\n label_length = np.array([len(sentence) for sentence in sentences])[:, np.newaxis]\n labels = np.array([[self.alphabet.index(c)\n for c in sentence] + [-1] * (max_string_length - len(sentence))\n for sentence in sentences])\n input_length = np.ones((batch_size, 1)) * max_string_length\n if len(batch[0]) == 3:\n sample_weights = np.array([sample[2] for sample in batch])\n yield (images, labels, input_length, label_length), y, sample_weights\n else:\n yield (images, labels, input_length, label_length), y\n\n def recognize(self, image):\n \"\"\"Recognize text from a single image.\n\n Args:\n image: A pre-cropped image containing characters\n \"\"\"\n image = tools.read_and_fit(filepath_or_array=image,\n width=self.prediction_model.input_shape[2],\n height=self.prediction_model.input_shape[1],\n cval=0)\n if self.prediction_model.input_shape[-1] == 1 and image.shape[-1] == 3:\n # Convert color to grayscale\n image = cv2.cvtColor(image, code=cv2.COLOR_RGB2GRAY)[..., np.newaxis]\n image = image.astype('float32') / 255\n return ''.join([\n self.alphabet[idx] for idx in self.prediction_model.predict(image[np.newaxis])[0]\n if idx not in [self.blank_label_idx, -1]\n ])\n\n def recognize_from_boxes(self, images, box_groups, **kwargs) -> typing.List[str]:\n \"\"\"Recognize text from images using lists of bounding boxes.\n\n Args:\n images: A list of input images, supplied as numpy arrays with shape\n (H, W, 3).\n boxes: A list of groups of boxes, one for each image\n \"\"\"\n assert len(box_groups) == len(images), \\\n 'You must provide the same number of box groups as images.'\n crops = []\n start_end = []\n for image, boxes in zip(images, box_groups):\n image = tools.read(image)\n if self.prediction_model.input_shape[-1] == 1 and image.shape[-1] == 3:\n # Convert color to grayscale\n image = cv2.cvtColor(image, code=cv2.COLOR_RGB2GRAY)\n for box in boxes:\n crops.append(\n tools.warpBox(image=image,\n box=box,\n target_height=self.model.input_shape[1],\n target_width=self.model.input_shape[2]))\n start = 0 if not start_end else start_end[-1][1]\n start_end.append((start, start + len(boxes)))\n if not crops:\n return [[] for image in images]\n X = np.float32(crops) / 255\n if len(X.shape) == 3:\n X = X[..., np.newaxis]\n predictions = [\n ''.join([self.alphabet[idx] for idx in row if idx not in [self.blank_label_idx, -1]])\n for row in self.prediction_model.predict(X, **kwargs)\n ]\n return [predictions[start:end] for start, end in start_end]\n\n def compile(self, *args, **kwargs):\n \"\"\"Compile the training model.\"\"\"\n if 'optimizer' not in kwargs:\n kwargs['optimizer'] = 'RMSprop'\n if 'loss' not in kwargs:\n kwargs['loss'] = lambda _, y_pred: y_pred\n self.training_model.compile(*args, **kwargs)\n"
] | [
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.keras.utils.get_custom_objects",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.pad",
"tensorflow.linspace",
"tensorflow.add_n",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Lambda",
"tensorflow.floor",
"tensorflow.keras.layers.Conv2D",
"tensorflow.gather",
"tensorflow.keras.layers.Permute",
"numpy.float32",
"tensorflow.keras.layers.Add",
"numpy.zeros",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.backend.sigmoid",
"tensorflow.matmul",
"tensorflow.keras.models.Model",
"tensorflow.shape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.ctc_batch_cost",
"tensorflow.meshgrid",
"tensorflow.keras.layers.Reshape",
"numpy.array",
"tensorflow.clip_by_value",
"tensorflow.keras.layers.Activation",
"tensorflow.range",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.keras.backend.cast",
"numpy.ones",
"tensorflow.keras.backend.shape",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.backend.ctc_decode",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
tomoino/GagRocket | [
"1f0bbef1a48cc12774d11bc380f0b72b9ffaf9f1"
] | [
"src/humorcalc.py"
] | [
"import numpy as np\nfrom keras.models import load_model\nimport MeCab\nimport re\n\ndef calc_humor_score(text, model, word_index):\n (words, reading) = morph(text)\n if not is_dajare(reading):\n return 0\n \n return predict(words, model, word_index)\n \ndef morph(text):\n words = [] # 単語の原形\n reading = \"\" # 単語の読み(発音)\n\n # 発音に関係のない不要な文字を削除\n char_to_trim = [\" \",\" \", \"、\", \"。\", \"!\", \"?\", \"!\", \"?\", \"「\", \"」\", \"『\", \"』\", \"(\", \")\", \"(\", \")\", \"・\", \"~\", \"\\\"\", \"\\'\"];\n for c in char_to_trim:\n text = text.replace(c, \"\")\n\n res = MeCab.Tagger().parse(text)\n\n lines = res.split('\\n')\n items = (re.split('[\\t]',line) for line in lines)\n for item in items:\n if len(item) == 1:\n continue\n\n info = re.split('[,]', item[1])\n words.append(info[6])\n\n if len(info) == 9:\n reading += info[8] if info[8] != \"ヲ\" else \"オ\"\n else:\n reading += item[0] # 登録されていない語やカタカナ語への対応\n\n return (words, reading)\n\ndef is_dajare(reading):\n reading = reading.replace(\"ッ\", \"\").replace(\"ー\", \"\") # 音韻変化を考慮してトリム\n reading_len = len(reading)\n\n for i in range(2, int(reading_len / 2) + 1): # i文字の繰り返しを検出する\n parts = [reading[j:j + i] for j in range(0, reading_len - i + 1)] # 1文字ずつずらしながら検出\n if len(parts) != len(set(parts)):\n return True\n\n return False\n\ndef predict(words, model, word_index):\n max_length = 32 # 含まれる単語の最大の数\n words = [word_index[word] for word in words if word in word_index]\n words = words + [0]*(max_length - len(words))\n ret = model.predict(np.array([words]))\n \n return ret[0][0]"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LukasDegitz/kge | [
"0ff3d2623d520e0634374e81d4184d525c189a25"
] | [
"kge/model/hmcn_model.py"
] | [
"import torch\nfrom torch import Tensor\nfrom kge import Config, Dataset\nfrom kge.model.kge_model import KgeModel\nimport json\nimport os\nimport numpy as np\nimport time\n\nclass hmcn_model(KgeModel):\n\n \"\"\"\n Implements hierarchical Multi-Label classification Network as defined in Wehrmann et al. (2018)\n Codes are adapted from:\n https://github.com/Tencent/NeuralNLP-NeuralClassifier/blob/master/model/classification/hmcn.py\n\n \"\"\"\n\n def __init__(\n self,\n config: Config,\n dataset: Dataset,\n configuration_key=None,\n init_for_load_only=False,\n ):\n self._init_configuration(config, configuration_key)\n\n # Initialize embedding model\n embedding_model = KgeModel.create(\n config=config,\n dataset=dataset,\n configuration_key=self.configuration_key + \".embedding_model\",\n init_for_load_only=init_for_load_only,\n )\n\n # Initialize this model\n super().__init__(\n config=config,\n dataset=dataset,\n scorer=embedding_model.get_scorer(),\n create_embedders=False,\n init_for_load_only=init_for_load_only,\n )\n self._embedding_model = embedding_model\n\n\n types_path = self.config.get('hmcn_model.types_path')\n y, idx, pos_weights, hier_tuple_ids, hier, hierarchical_depth, global2local, hierarchy_classes\\\n = self.load_types(types_dataset_path=types_path, num_entities=dataset.num_entities())\n self.types = y\n self.type_ids = idx\n self.hier_tuple_ids = hier_tuple_ids\n self.hier = hier\n self.pos_weights = pos_weights\n\n #HMCN setup\n self.hierarchical_depth = hierarchical_depth\n self.hierarchical_class = hierarchy_classes\n self.global2local = global2local\n hidden_dimension = self._embedding_model.get_s_embedder().dim\n\n #predictions\n self.beta = self.config.get(\"hmcn_model.beta\")\n self.p = self.config.get(\"hmcn_model.hiddenlayer_dropout\")\n self.lamb = torch.Tensor([self.config.get(\"hmcn_model.lambda\")])\n\n # Setup HMCN model according to Wehrmann et al. (2018)\n # Code adapted from\n # https://github.com/Tencent/NeuralNLP-NeuralClassifier/blob/master/model/classification/hmcn.py\n self.local_layers = torch.nn.ModuleList()\n self.global_layers = torch.nn.ModuleList()\n for i in range(1, len(self.hierarchical_depth)):\n self.global_layers.append(\n torch.nn.Sequential(\n torch.nn.Linear(hidden_dimension + self.hierarchical_depth[i - 1], self.hierarchical_depth[i]),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(self.hierarchical_depth[i]),\n torch.nn.Dropout(p=0.5)\n ))\n self.local_layers.append(\n torch.nn.Sequential(\n torch.nn.Linear(self.hierarchical_depth[i], self.global2local[i]),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(self.global2local[i]),\n torch.nn.Linear(self.global2local[i], self.hierarchical_class[i])\n ))\n\n self.global_layers.apply(self._init_weight)\n self.local_layers.apply(self._init_weight)\n self.linear = torch.nn.Linear(self.hierarchical_depth[-1], len(hier_tuple_ids))\n self.linear.apply(self._init_weight)\n self.dropout = torch.nn.Dropout(p=self.p)\n\n def prepare_job(self, job, **kwargs):\n self._embedding_model.prepare_job(job, **kwargs)\n\n def penalty(self, **kwargs):\n ''' penalty calculated in training as it depends on confidence estimates '''\n penalties = self._embedding_model.penalty(**kwargs)\n return penalties\n\n def get_lambda(self):\n return self.lamb\n\n def get_tuple_ids(self):\n return self.hier_tuple_ids\n\n def get_train_mask(self, idx):\n return self.train_mask[idx]\n\n # pass embedding methods down to wrapped embedder\n def get_s_embedder(self):\n return self._embedding_model.get_s_embedder()\n\n def get_o_embedder(self):\n return self._embedding_model.get_o_embedder()\n\n def get_p_embedder(self):\n return self._embedding_model.get_p_embedder()\n\n def get_scorer(self):\n return self._embedding_model.get_scorer()\n\n def score_spo(self, s, p, o, direction=None):\n return self._embedding_model.score_spo(s, p, o, direction)\n\n def score_po(self, p, o, s=None):\n return self._embedding_model.score_po(p, o, s)\n\n def score_so(self, s, o, p=None):\n return self._embedding_model.score_so(s, o, p)\n\n def score_sp_po(self, s, p, o, entity_subset=None):\n return self._embedding_model.score_sp_po(s, p, o, entity_subset)\n\n # mimics forward\n def predict_all(self, idx, device):\n\n entity_embeddings = self._embedding_model.get_s_embedder().to(device).embed(indexes=idx)\n local_layer_outputs = []\n global_layer_activation = entity_embeddings\n #batch_size = len(idx)\n for i, (local_layer, global_layer) in enumerate(zip(self.local_layers, self.global_layers)):\n local_layer_activation = global_layer(global_layer_activation)\n local_layer_outputs.append(local_layer(local_layer_activation))\n if i < len(self.global_layers) - 1:\n global_layer_activation = torch.cat((local_layer_activation, entity_embeddings), 1)\n else:\n global_layer_activation = local_layer_activation\n\n global_layer_output = self.linear(global_layer_activation)\n local_layer_output = torch.cat(local_layer_outputs, 1)\n probits = self.beta * torch.sigmoid(local_layer_output) + (1 - self.beta) * torch.sigmoid(global_layer_output)\n return global_layer_output, local_layer_output, probits\n\n # function to zero all child class predictions, that dont have the relative parent type assigned\n # type = 'proba' used for violation calculation by lagging parent confidence to child\n # type = 'binbary' used to ensure hierarchy consistent prediction\n def build_mask(self, y, type='binary', device=None):\n\n mask = []\n y_parent = {}\n # Assume root type is predicted for all instances\n for root_type in self.hier[1].keys():\n if type == 'binary':\n y_parent[(1, root_type)] = torch.ones(len(y), dtype=torch.int).to(device)\n else:\n y_parent[(1, root_type)] = torch.ones(len(y), dtype=torch.float).to(device)\n\n for hier_tuple, tuple_id in self.hier_tuple_ids.items():\n mask.append(y_parent[hier_tuple])\n type_level, type_id = hier_tuple\n for child in self.hier[type_level][type_id]:\n child_tuple = (type_level + 1, child)\n if child_tuple not in y_parent:\n y_parent[child_tuple] = y[:, tuple_id]\n # DAG!\n else:\n if type == 'binary':\n # Tie handling when both parent cast predictions: logical or\n y_parent[child_tuple] = torch.logical_or(y_parent[child_tuple], y[:, tuple_id]).int()\n else:\n # Tie Handling use maximum confidence of parent predictions\n y_parent[child_tuple] = torch.max(y_parent[child_tuple], y[:, tuple_id]).float()\n\n return torch.stack(mask).transpose(0, 1).float()\n\n def load_types(self, types_dataset_path, num_entities):\n \"\"\"\n @param types_dataset_path: Path to type dataset. Requires hier.json, train.del, valid.del and test.del.\n @param num_entities: Number of unique entities in the KG.\n @return:\n y: Binary map of types with shape (num_entities, num_types-1). Root type not considered.\n idx: dict with keys ['train', 'valid', 'test'] containing respective entity ids.\n pos_weights: positive weights of class computed from training split for weighted bce_with_logits_loss.\n hier_tuple_ids: dict with keys [(level, type_id)] for mapping y to type id\n hierarchical_depth: number of ReLU neurons per hierarchy level: 384.\n global2local: local ReLU neurons. same as hierarchy_classes.\n hierarchy_classes: Number of classes per hierarchy level. root class excluded.\n \"\"\"\n # load the hierarchy to receive type information\n hier_path = os.path.join(types_dataset_path, 'hier.json')\n with open(hier_path, 'r') as json_file:\n hier = json.load(json_file)\n\n # reshape hierarchy and build binary type map (usefull to map predictions to type_ids)\n # build required shapes for HMCN\n # ReLU neurons set to 384 per level see Wehrmann et al (2018)\n hier_t, train_freq, hier_tuple_ids = {}, [], {}\n for hier_level, parents in hier.items():\n hier_t[int(hier_level)] = {}\n if int(hier_level) == 0:\n #no prediction for level 0\n hierarchical_depth = [0] # Global ReLU neurons\n global2local = [0] # Local transfer neurons\n hierarchy_classes = [0] #number of classes per level\n continue\n else:\n hierarchical_depth.append(384) # Global ReLU neurons\n global2local.append(len(parents)) # Local transfer neurons\n hierarchy_classes.append(len(parents)) # number of classes per level\n for level_type in parents:\n hier_t[int(hier_level)][int(level_type)] = hier[hier_level][level_type].copy()\n if (int(hier_level), int(level_type)) not in hier_tuple_ids:\n hier_tuple_ids[(int(hier_level), int(level_type))] = len(hier_tuple_ids)\n train_freq.append(0)\n hier = hier_t\n\n # build type maps keeping track of ids of respective split\n type_idx = {}\n y = np.zeros((num_entities, len(hier_tuple_ids)))\n # load types\n for split in ['train', 'valid', 'test']:\n idx = []\n types_path = os.path.join(types_dataset_path, split + '.del')\n with open(types_path, 'r') as file:\n for line in file:\n entity_id, type_list = line.split(\"\\t\", maxsplit=1)\n type_list = type_list.rstrip(\"\\n\")\n # iterate through hierarchical type structure\n for level, level_types in enumerate(json.loads(type_list)):\n if level == 0:\n continue\n for type_id in level_types:\n bin_type_id = hier_tuple_ids[(level, int(type_id))]\n y[int(entity_id), bin_type_id] = 1\n if split == 'train':\n train_freq[bin_type_id] += 1\n idx.append(int(entity_id))\n type_idx[split] = idx.copy()\n\n # compute weights for loss function\n pos_weights = []\n for class_count in train_freq:\n if class_count == 0:\n pos_weight = len(type_idx['train'])\n else:\n neg_count = len(type_idx['train']) - class_count\n pos_weight = neg_count / class_count\n pos_weights.append(pos_weight)\n\n # create output numpy arrays and tensors\n y = torch.from_numpy(y)\n pos_weights = torch.from_numpy(np.array(pos_weights))\n idx = {split: torch.from_numpy(np.array(entity_ids)) for split, entity_ids in type_idx.items()}\n return y, idx, pos_weights, hier_tuple_ids, hier, hierarchical_depth, global2local, hierarchy_classes\n\n\n def _init_weight(self, m):\n if isinstance(m, torch.nn.Linear):\n torch.nn.init.normal_(m.weight, std=0.1)"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.sigmoid",
"torch.max",
"torch.cat",
"torch.nn.ModuleList",
"torch.from_numpy",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.stack",
"torch.nn.ReLU",
"numpy.array",
"torch.logical_or"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EricPWilliamson/bhattacharyya-distance | [
"d67498d58bed342151c9d820a520254a503abdc8"
] | [
"bhatta_dist.py"
] | [
"\"\"\"\r\nThe function bhatta_dist() calculates the Bhattacharyya distance between two classes on a single feature.\r\n The distance is positively correlated to the class separation of this feature. Four different methods are\r\n provided for calculating the Bhattacharyya coefficient.\r\n\r\nCreated on 4/14/2018\r\nAuthor: Eric Williamson ([email protected])\r\n\"\"\"\r\nimport numpy as np\r\nfrom math import sqrt\r\nfrom scipy.stats import gaussian_kde\r\n\r\ndef bhatta_dist(X1, X2, method='continuous'):\r\n #Calculate the Bhattacharyya distance between X1 and X2. X1 and X2 should be 1D numpy arrays representing the same\r\n # feature in two separate classes. \r\n\r\n def get_density(x, cov_factor=0.1):\r\n #Produces a continuous density function for the data in 'x'. Some benefit may be gained from adjusting the cov_factor.\r\n density = gaussian_kde(x)\r\n density.covariance_factor = lambda:cov_factor\r\n density._compute_covariance()\r\n return density\r\n\r\n #Combine X1 and X2, we'll use it later:\r\n cX = np.concatenate((X1,X2))\r\n\r\n if method == 'noiseless':\r\n ###This method works well when the feature is qualitative (rather than quantitative). Each unique value is\r\n ### treated as an individual bin.\r\n uX = np.unique(cX)\r\n A1 = len(X1) * (max(cX)-min(cX)) / len(uX)\r\n A2 = len(X2) * (max(cX)-min(cX)) / len(uX)\r\n bht = 0\r\n for x in uX:\r\n p1 = (X1==x).sum() / A1\r\n p2 = (X2==x).sum() / A2\r\n bht += sqrt(p1*p2) * (max(cX)-min(cX))/len(uX)\r\n\r\n elif method == 'hist':\r\n ###Bin the values into a hardcoded number of bins (This is sensitive to N_BINS)\r\n N_BINS = 10\r\n #Bin the values:\r\n h1 = np.histogram(X1,bins=N_BINS,range=(min(cX),max(cX)), density=True)[0]\r\n h2 = np.histogram(X2,bins=N_BINS,range=(min(cX),max(cX)), density=True)[0]\r\n #Calc coeff from bin densities:\r\n bht = 0\r\n for i in range(N_BINS):\r\n p1 = h1[i]\r\n p2 = h2[i]\r\n bht += sqrt(p1*p2) * (max(cX)-min(cX))/N_BINS\r\n\r\n elif method == 'autohist':\r\n ###Bin the values into bins automatically set by np.histogram:\r\n #Create bins from the combined sets:\r\n # bins = np.histogram(cX, bins='fd')[1]\r\n bins = np.histogram(cX, bins='doane')[1] #Seems to work best\r\n # bins = np.histogram(cX, bins='auto')[1]\r\n\r\n h1 = np.histogram(X1,bins=bins, density=True)[0]\r\n h2 = np.histogram(X2,bins=bins, density=True)[0]\r\n\r\n #Calc coeff from bin densities:\r\n bht = 0\r\n for i in range(len(h1)):\r\n p1 = h1[i]\r\n p2 = h2[i]\r\n bht += sqrt(p1*p2) * (max(cX)-min(cX))/len(h1)\r\n\r\n elif method == 'continuous':\r\n ###Use a continuous density function to calculate the coefficient (This is the most consistent, but also slightly slow):\r\n N_STEPS = 200\r\n #Get density functions:\r\n d1 = get_density(X1)\r\n d2 = get_density(X2)\r\n #Calc coeff:\r\n xs = np.linspace(min(cX),max(cX),N_STEPS)\r\n bht = 0\r\n for x in xs:\r\n p1 = d1(x)\r\n p2 = d2(x)\r\n bht += sqrt(p1*p2)*(max(cX)-min(cX))/N_STEPS\r\n\r\n else:\r\n raise ValueError(\"The value of the 'method' parameter does not match any known method\")\r\n\r\n ###Lastly, convert the coefficient into distance:\r\n if bht==0:\r\n return float('Inf')\r\n else:\r\n return -np.log(bht)\r\n\r\n\r\ndef bhatta_dist2(x, Y, Y_selection=None, method='continuous'):\r\n #Same as bhatta_dist, but takes different inputs. Takes a feature 'x' and separates it by class ('Y').\r\n if Y_selection is None:\r\n Y_selection = list(set(Y))\r\n #Make sure Y_selection is just 2 classes:\r\n if len(Y_selection) != 2:\r\n raise ValueError(\"Use parameter Y_selection to select just 2 classes.\")\r\n #Separate x into X1 and X2:\r\n X1 = np.array(x,dtype=np.float64)[Y==Y_selection[0]]\r\n X2 = np.array(x,dtype=np.float64)[Y==Y_selection[1]]\r\n #Plug X1 and X2 into bhatta_dist():\r\n return bhatta_dist(X1, X2, method=method)\r\n"
] | [
[
"numpy.log",
"numpy.unique",
"numpy.concatenate",
"scipy.stats.gaussian_kde",
"numpy.array",
"numpy.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
zenetio/AI-4-Clinical-Workflow | [
"7128f2eeafb5e5fc5a70e7c3770847ca1c924dea"
] | [
"src/inference/UNetInferenceAgent.py"
] | [
"\"\"\"\nContains class that runs inferencing\n\"\"\"\nimport torch\nimport numpy as np\n\nfrom networks.RecursiveUNet import UNet\n\nfrom utils.utils import med_reshape\n\nclass UNetInferenceAgent:\n \"\"\"\n Stores model and parameters and some methods to handle inferencing\n \"\"\"\n def __init__(self, parameter_file_path='', model=None, device=\"cpu\", patch_size=64):\n\n self.model = model\n self.patch_size = patch_size\n self.device = device\n\n if model is None:\n self.model = UNet(num_classes=3)\n\n if parameter_file_path:\n self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))\n\n self.model.to(device)\n\n def single_volume_inference_unpadded(self, volume):\n \"\"\"\n Runs inference on a single volume of arbitrary patch size,\n padding it to the conformant size first\n\n Arguments:\n volume {Numpy array} -- 3D array representing the volume\n\n Returns:\n 3D NumPy array with prediction mask\n \"\"\"\n \n # normalize the data volume \n image = (volume.astype(np.single) - np.min(volume))/(np.max(volume) - np.min(volume))\n # reshape the image volume to the same patch size used for training\n img_reshaped = med_reshape(image, new_shape=(self.patch_size, self.patch_size, image.shape[2]))\n # create a new 3d mask to store predicted results\n mask3d = np.zeros(img_reshaped.shape)\n # iterate over the image array and predict the all the slices\n for slc_idx in range(img_reshaped.shape[2]):\n # compute for each slice\n slc = torch.from_numpy(img_reshaped[:,:,slc_idx].astype(np.single)).unsqueeze(0).unsqueeze(0)\n # make prediction\n pred = self.model(slc.to(self.device))\n pred = np.squeeze(pred.cpu().detach())\n # store predicted data\n mask3d[:,:,slc_idx] = torch.argmax(pred, dim=0)\n # return the predicted volume\n return mask3d\n\n def single_volume_inference(self, volume):\n \"\"\"\n Runs inference on a single volume of conformant patch size\n\n Arguments:\n volume {Numpy array} -- 3D array representing the volume\n\n Returns:\n 3D NumPy array with prediction mask\n \"\"\"\n self.model.eval()\n\n # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis\n slices = []\n\n # Write code that will create mask for each slice across the X (0th) dimension. After \n # that, put all slices into a 3D Numpy array. You can verify if your method is \n # correct by running it on one of the volumes in your training set and comparing \n # with the label in 3D Slicer.\n \n # normalize\n image = (volume.astype(np.single) - np.min(volume))/(np.max(volume) - np.min(volume))\n \n new_image = med_reshape(image, new_shape=(self.patch_size, self.patch_size, image.shape[2]))\n mask3d = np.zeros(new_image.shape)\n \n for slc_ix in range(new_image.shape[2]):\n tsr_test = torch.from_numpy(new_image[:,:,slc_ix].astype(np.single)).unsqueeze(0).unsqueeze(0)\n #image = torch.from_numpy(self.data[slc[0]][\"image\"][:,:,slc[1]]).unsqueeze(0)\n #tsr_test = torch.from_numpy(slc.astype(np.single)).unsqueeze(0).unsqueeze(0)\n pred = self.model(tsr_test.to(self.device))\n pred = np.squeeze(pred.cpu().detach())\n mask3d[:,:,slc_ix] = torch.argmax(pred, dim=0)\n\n return mask3d\n"
] | [
[
"numpy.min",
"torch.load",
"numpy.max",
"numpy.zeros",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WouterKoch/Naturalis_data_preparation | [
"05d60d2913838297d30959ccf1388028ac8553bc"
] | [
"combine.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport os\nimport sys\nimport hashlib\n\ndef generateId(image_url, prefix, exisiting_ids):\n id = f\"{prefix}:{hashlib.md5(image_url.encode()).hexdigest()}\"\n # while id in exisiting_ids:\n # print(f\"id {id} exists!\")\n # image_url = image_url + \"1\"\n # id = f\"{prefix}:{hashlib.blake2s(image_url.encode()).hexdigest()}\"\n # print(f\"Changed to {id}\")\n return id\n\n\ndef combine(taxonfiles, imagefiles, outputfolder, previousImageList):\n tqdm.pandas()\n taxa = pd.read_csv(taxonfiles[0])\n for i, file in enumerate(taxonfiles):\n if i > 0:\n taxa = pd.concat([taxa, pd.read_csv(file)], ignore_index=True)\n\n taxa.drop_duplicates(subset=['taxon_id_at_source'], inplace=True)\n taxa['taxon_id_at_source'] = taxa['taxon_id_at_source'].apply(lambda x: \"NBIC:\" + str(int(x)))\n taxa['accepted_taxon_id_at_source'] = taxa['accepted_taxon_id_at_source'].apply(lambda x: \"NBIC:\" + str(int(x)))\n\n if 'specific_epithet' not in taxa.columns.values:\n taxa['specific_epithet'] = \"\"\n\n if 'infraspecific_epithet' not in taxa.columns.values:\n taxa['infraspecific_epithet'] = \"\"\n\n taxa.to_csv(os.path.join(outputfolder, 'taxa.csv'), index=False)\n\n print(f\"{len(taxa)} taxa\")\n\n\n images = pd.read_csv(imagefiles[0])\n images[\"dataset\"] = imagefiles[0].split(\"/\")[-1].split(\"_\")[0].upper()\n for i, file in enumerate(imagefiles):\n if i > 0:\n adding = pd.read_csv(file)\n adding[\"dataset\"] = imagefiles[i].split(\"/\")[-1].split(\"_\")[0].upper()\n images = pd.concat([images, adding], ignore_index=True)\n\n if 'sex' not in images.columns.values:\n images['sex'] = \"\"\n if 'morph' not in images.columns.values:\n images['morph'] = \"\"\n if 'morph_id' not in images.columns.values:\n images['morph_id'] = \"\"\n if 'rijkdriehoeksstelsel_x' not in images.columns.values:\n images['rijkdriehoeksstelsel_x'] = \"\"\n if 'rijkdriehoeksstelsel_y' not in images.columns.values:\n images['rijkdriehoeksstelsel_y'] = \"\"\n\n images['taxon_id_at_source'] = images['taxon_id_at_source'].apply(lambda x: \"NBIC:\" + str(int(x)))\n images['accepted_taxon_id_at_source'] = images['accepted_taxon_id_at_source'].apply(lambda x: \"NBIC:\" + str(int(x)))\n\n images['location_latitude'] = images['location_latitude'].apply(lambda x: x if np.absolute(x) < 90 else 0)\n images['location_longitude'] = images['location_longitude'].apply(lambda x: x if np.absolute(x) < 180 else 0)\n\n images.drop_duplicates(subset=['image_url'], inplace=True)\n\n url_replacements = {\n 'https://purl.org/gbifnorway/img/ipt-specimens/barstow-garden/new/2011/P5159387.jpg': 'http://folk.ntnu.no/wouterk/replacements/P5159387.jpg',\n 'https://purl.org/gbifnorway/img/ipt-specimens/barstow-garden/new/2013/P6230493.jpg': 'http://folk.ntnu.no/wouterk/replacements/P6230493.jpg',\n 'https://purl.org/gbifnorway/img/ipt-specimens/barstow-garden/new/2011/P5159386.jpg': 'http://folk.ntnu.no/wouterk/replacements/P5159386.jpg',\n 'https://purl.org/gbifnorway/img/ipt-specimens/barstow-garden/new/2011/P5159381.jpg': 'http://folk.ntnu.no/wouterk/replacements/P5159381.jpg',\n 'https://purl.org/gbifnorway/img/ipt-specimens/barstow-garden/new/2011/P5159385.jpg': 'http://folk.ntnu.no/wouterk/replacements/P5159385.jpg',\n 'https://purl.org/gbifnorway/img/ipt-specimens/barstow-garden/new/2011/P5159382.jpg': 'http://folk.ntnu.no/wouterk/replacements/P5159382.jpg',\n 'https://purl.org/gbifnorway/img/ipt-specimens/barstow-garden/new/2011/P5159383.jpg': 'http://folk.ntnu.no/wouterk/replacements/P5159383.jpg',\n 'https://www.dagbladet.no/images/70576123.jpg?imageId=70576123&width=980&height=559&compression=80': 'http://folk.ntnu.no/wouterk/replacements/70576123.jpg',\n 'https://image.klikk.no/6732049.jpg?imageId=6732049&x=0&y=0&cropw=100&croph=85.581395348837&width=1600&height=913': 'http://folk.ntnu.no/wouterk/replacements/6732049.jpg',\n 'https://image.klikk.no/6732047.jpg?imageId=6732047&width=500&height=285': 'http://folk.ntnu.no/wouterk/replacements/6732047.jpg',\n 'https://image.forskning.no/1559391.jpg?imageId=1559391&width=706&height=403': 'http://folk.ntnu.no/wouterk/replacements/1559391.jpg',\n 'https://image.klikk.no/6732051.jpg?imageId=6732051&x=0&y=0&cropw=100&croph=85.549964054637&width=1600&height=912': 'http://folk.ntnu.no/wouterk/replacements/6732051.jpg',\n 'https://image.klikk.no/6732046.jpg?imageId=6732046&x=0&y=0&cropw=100&croph=83.4375&width=468&height=267': 'http://folk.ntnu.no/wouterk/replacements/6732046.jpg',\n 'https://image.klikk.no/6732048.jpg?imageId=6732048&width=1600&height=912': 'http://folk.ntnu.no/wouterk/replacements/6732048.jpg',\n 'https://1jv6g9n0pik3nvjug2t92dlh-wpengine.netdna-ssl.com/wp-content/uploads/2015/07/thinkstockphotos-178588991-352x431.jpg': 'http://folk.ntnu.no/wouterk/replacements/thinkstockphotos-178588991.jpg',\n 'https://1jv6g9n0pik3nvjug2t92dlh-wpengine.netdna-ssl.com/wp-content/uploads/2015/07/ulv-1100x551.jpg': 'http://folk.ntnu.no/wouterk/replacements/ulv-1100x551.jpg',\n 'https://1jv6g9n0pik3nvjug2t92dlh-wpengine.netdna-ssl.com/wp-content/uploads/2015/06/ulvespor_mogens_totsas-300x201.jpg': 'http://folk.ntnu.no/wouterk/replacements/ulvespor_mogens_totsas.jpg',\n 'https://1jv6g9n0pik3nvjug2t92dlh-wpengine.netdna-ssl.com/wp-content/uploads/2015/06/ulv_vinter-300x207.jpg': 'http://folk.ntnu.no/wouterk/replacements/ulv_vinter-300x207.jpg',\n 'https://1jv6g9n0pik3nvjug2t92dlh-wpengine.netdna-ssl.com/wp-content/uploads/2015/06/ulv_1-300x234.jpg': 'http://folk.ntnu.no/wouterk/replacements/ulv_1.jpg',\n 'https://image.forskning.no/1363982.jpg?imageId=1363982&x=0&y=11.714285714286&cropw=100&croph=86.714285714286&width=1050&height=608': 'http://folk.ntnu.no/wouterk/replacements/1363982.jpg',\n 'https://image.forskning.no/1347259.jpg?imageId=1347259&x=0&y=7.1005917159763&cropw=100&croph=42.011834319527&width=1058&height=604': 'http://folk.ntnu.no/wouterk/replacements/1347259.jpg',\n 'https://www.dagbladet.no/images/63196038.jpg?imageId=63196038&x=0&y=0&cropw=100.00&croph=100.00&width=980&height=552&compression=80': 'http://folk.ntnu.no/wouterk/replacements/63196038.jpg',\n 'https://www.dagbladet.no/images/63196036.jpg?imageId=63196036&x=0&y=0&cropw=100.00&croph=100.00&width=1470&height=754.5': 'http://folk.ntnu.no/wouterk/replacements/63196036.jpg',\n 'https://www.miljodirektoratet.no/globalassets/bilder/nyhetsbilder-2020/jerv-bard-bredesen.jpg?w=1150': 'http://folk.ntnu.no/wouterk/replacements/jerv-bard-bredesen.jpg',\n 'https://www.regjeringen.no/contentassets/b895dbabc684463fb805f5f0e970e2fc/dn_006326.jpg?preset=article&v=-444535606': 'http://folk.ntnu.no/wouterk/replacements/dn_006326.jpg',\n 'https://www.dagbladet.no/images/63445639.jpg?imageId=63445639&x=0&y=0&cropw=100.00&croph=100.00&width=1470&height=831': 'http://folk.ntnu.no/wouterk/replacements/63445639.jpg',\n 'https://www.dagbladet.no/images/63445641.jpg?imageId=63445641&x=0&y=0&cropw=100.00&croph=100.00&width=980&height=554&compression=80': 'http://folk.ntnu.no/wouterk/replacements/63445641.jpg',\n 'https://www.dagbladet.no/images/67460750.jpg?imageId=67460750&width=980&height=559&compression=80': 'http://folk.ntnu.no/wouterk/replacements/67460750.jpg',\n 'https://www.dagbladet.no/images/63947838.jpg?imageId=63947838&x=2.0040080160321&y=31.108144192256&cropw=95.145631067961&croph=39.301874595992&width=938&height=582&compression=80': 'http://folk.ntnu.no/wouterk/replacements/63947838.jpg',\n 'https://www.dagbladet.no/images/63971492.jpg?imageId=63971492&width=980&height=559&compression=80': 'http://folk.ntnu.no/wouterk/replacements/63971492.jpg',\n 'https://www.nysgjerrigper.no/siteassets/bilder-artikler/2017-2/isbjorn-foto-shutterstock.jpg?transform=DownFit&width=700': 'http://folk.ntnu.no/wouterk/replacements/isbjorn-foto-shutterstock.jpg',\n 'https://image.forskning.no/1348957.jpg?imageId=1348957&width=1058&height=604': 'http://folk.ntnu.no/wouterk/replacements/1348957.jpg',\n }\n\n images['image_url'] = images['image_url'].apply(lambda x: url_replacements[x] if x in url_replacements.keys() else x)\n\n\n # Ensure using the same id's as last time\n lasttime = pd.read_csv(previousImageList, usecols=[\"image_url\", \"image_id\"])\n lasttime.columns = [\"image_url\", \"old_image_id\"]\n\n images = pd.merge(images, lasttime, on=\"image_url\", how=\"left\")\n existing_ids = lasttime[\"old_image_id\"].to_list()\n\n images[\"image_id\"] = images.progress_apply(lambda x: x[\"old_image_id\"] if not pd.isna(x[\"old_image_id\"]) else (x[\"image_id\"] if \"AO:\" in x[\"image_id\"] else generateId(x[\"image_url\"], x[\"dataset\"], existing_ids)) , axis=1)\n images.drop([\"old_image_id\",\"dataset\"], axis=1, inplace=True)\n\n images.to_csv(os.path.join(outputfolder, 'images.csv'), index=False)\n # images = images[images['image_url'].apply(lambda x: x in url_replacements.keys())]\n # images['image_url'] = images['image_url'].apply(lambda x: url_replacements[x])\n # images.to_csv('Output/images_replacements.csv', index=False)\n print(f\"{len(images)} images\")\n\n taxon_counts = pd.DataFrame(taxa.value_counts(subset=['taxon_full_name']))\n taxon_counts.columns = ['Count']\n\n print(f\"{len(images.drop_duplicates(subset=['image_id']))} image id's\")\n\n\n print('Taxon names occurring more than once in taxa:')\n print(taxon_counts[taxon_counts['Count'] > 1])\n print('Taxon names in images but not in taxa:', list(set(images['taxon_full_name'].to_list()) - set(taxa['taxon_full_name'].to_list())))\n\n\n\n\n\n\nif __name__ == \"__main__\":\n print(\"USAGE: combine(inputfiles, outputfolder)\")\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"numpy.absolute",
"pandas.concat",
"pandas.isna"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dcdanko/MetaSUB_CAP | [
"db5672b0206afb3ffe3204b0577a4a5f84b9bcd4"
] | [
"scripts/beta_diversity_stats.py"
] | [
"#! /usr/bin/env python3\n\n\nimport sys\nimport click\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.stats import gmean, entropy\nfrom numpy.linalg import norm\nimport numpy as np\nfrom math import sqrt\nfrom json import dumps as jdumps\nimport pandas as pd\n\n\nclass LevelNotFoundException(Exception):\n pass\n\n\ndef checkLevel(taxon, level):\n if level == 'species':\n return ('s__' in taxon) and ('t__' not in taxon)\n elif level == 'genus':\n return ('g__' in taxon) and ('s__' not in taxon)\n raise LevelNotFoundException()\n\n\ndef clr(X):\n _X = X + 0.0000001\n _X = _X / norm(_X, ord=1)\n g = gmean(_X)\n _X = np.divide(_X, g)\n _X = np.log(_X)\n return _X\n\n\ndef rhoProportionality(P, Q):\n _P, _Q = clr(P), clr(Q)\n N = np.var(_P - _Q)\n D = np.var(_P) + np.var(_Q)\n return 1 - (N / D)\n\n\ndef jensenShannonDistance(P, Q):\n _P = P / norm(P, ord=1)\n _Q = Q / norm(Q, ord=1)\n _M = 0.5 * (_P + _Q)\n J = 0.5 * (entropy(_P, _M) + entropy(_Q, _M))\n return sqrt(J)\n\n\nclass SampleSet:\n\n def __init__(self, tool, mpas):\n self.tool = tool\n self.mpaFiles = mpas\n\n def parse(self, level):\n mpas = {name: Sample.parseMPA(name, mpaf, level).abunds\n for name, mpaf in self.mpaFiles}\n self.mpas = pd.DataFrame(mpas).transpose()\n self.mpas.fillna(value=0, inplace=True)\n\n def distanceMatrix(self, metric):\n X = self.mpas.as_matrix()\n if metric == 'jensen_shannon_distance':\n distm = squareform(pdist(X, jensenShannonDistance))\n elif metric == 'rho_proportionality':\n distm = squareform(pdist(X, rhoProportionality))\n distm = pd.DataFrame(distm, \n index=self.mpas.index,\n columns=self.mpas.index)\n return distm.to_dict()\n\n\nclass Sample:\n\n def __init__(self, sname, level):\n self.sname = sname\n self.level = level\n self.abunds = {}\n\n def addLine(self, line):\n taxon, abund = line.split()\n if checkLevel(taxon, self.level):\n self.abunds[taxon] = float(abund)\n\n @classmethod\n def parseMPA(ctype, name, mpaFile, level):\n sample = Sample(name, level)\n with open(mpaFile) as mF:\n for line in mF:\n sample.addLine(line)\n return sample\n\n\[email protected]()\[email protected]('-t', '--tool-set', nargs=3, multiple=True)\ndef main(tool_set):\n toolSets = tool_set\n condensed = {}\n for toolSet in toolSets:\n tool = toolSet[0]\n sampleName = toolSet[1]\n mpa = toolSet[2]\n try:\n condensed[tool].append((sampleName, mpa))\n except KeyError:\n condensed[tool] = [(sampleName, mpa)]\n sampleSets = [SampleSet(tool, mpas) for tool, mpas in condensed.items()]\n\n obj = {\n 'species': {\n 'jensen_shannon_distance': {},\n 'rho_proportionality': {},\n },\n 'genus': {\n 'jensen_shannon_distance': {},\n 'rho_proportionality': {},\n }\n }\n for level in obj.keys():\n for sampleSet in sampleSets:\n sampleSet.parse(level)\n tool = sampleSet.tool\n for metric in obj[level].keys():\n obj[level][metric][tool] = sampleSet.distanceMatrix(metric)\n sys.stdout.write(jdumps(obj))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"scipy.stats.gmean",
"numpy.log",
"numpy.linalg.norm",
"pandas.DataFrame",
"scipy.stats.entropy",
"scipy.spatial.distance.pdist",
"numpy.var",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
PratyushTripathy/momepy | [
"eac89eaff63dd6cb35dfd9a736981723ec77f496",
"eac89eaff63dd6cb35dfd9a736981723ec77f496"
] | [
"momepy/graph.py",
"tests/test_diversity.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# connectivity.py\n# definitions of connectivity characters\nimport math\nimport warnings\n\nimport networkx as nx\nimport numpy as np\nfrom tqdm import tqdm\n\n__all__ = [\n \"node_degree\",\n \"meshedness\",\n \"mean_node_dist\",\n \"cds_length\",\n \"mean_node_degree\",\n \"proportion\",\n \"cyclomatic\",\n \"edge_node_ratio\",\n \"gamma\",\n \"clustering\",\n \"local_closeness_centrality\",\n \"closeness_centrality\",\n \"betweenness_centrality\",\n \"local_betweenness_centrality\",\n \"local_straightness_centrality\",\n \"straightness_centrality\",\n \"subgraph\",\n \"mean_nodes\",\n]\n\n\ndef node_degree(graph, name=\"degree\"):\n \"\"\"\n Calculates node degree for each node.\n\n Wrapper around ``networkx.degree()``.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n name : str (default 'degree')\n calculated attribute name\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.node_degree(network_graph)\n \"\"\"\n netx = graph.copy()\n\n degree = dict(nx.degree(netx))\n nx.set_node_attributes(netx, degree, name)\n\n return netx\n\n\ndef _meshedness(graph):\n \"\"\"\n Calculates meshedness of a graph.\n \"\"\"\n e = graph.number_of_edges()\n v = graph.number_of_nodes()\n return (e - v + 1) / (2 * v - 5)\n\n\ndef meshedness(graph, radius=5, name=\"meshedness\", distance=None, verbose=True):\n \"\"\"\n Calculates meshedness for subgraph around each node if radius is set, or for\n whole graph, if ``radius=None``.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in distance\n attribute.\n\n .. math::\n \\\\alpha=\\\\frac{e-v+1}{2 v-5}\n\n where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.\n\n Adapted from :cite:`feliciotti2018`.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int, optional\n Include all neighbors of distance <= radius from n\n name : str, optional\n calculated attribute name\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n Returns\n -------\n Graph\n networkx.Graph if radius is set\n float\n meshedness for graph if ``radius=None``\n\n Examples\n --------\n >>> network_graph = mm.meshedness(network_graph, radius=800, distance='edge_length')\n \"\"\"\n netx = graph.copy()\n\n if radius:\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = _meshedness(\n sub\n ) # save value calulated for subgraph to node\n\n return netx\n\n return _meshedness(netx)\n\n\ndef mean_node_dist(graph, name=\"meanlen\", length=\"mm_len\", verbose=True):\n \"\"\"\n Calculates mean distance to neighbouring nodes.\n\n Mean of values in ``length`` attribute.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n name : str, optional\n calculated attribute name\n length : str, optional\n name of attribute of segment length (geographical)\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.mean_node_dist(network_graph)\n\n \"\"\"\n netx = graph.copy()\n\n for n, nbrs in tqdm(netx.adj.items(), total=len(netx), disable=not verbose):\n lengths = []\n for nbr, keydict in nbrs.items():\n for key, eattr in keydict.items():\n lengths.append(eattr[length])\n netx.nodes[n][name] = np.mean(lengths)\n\n return netx\n\n\ndef _cds_length(graph, mode, length):\n \"\"\"\n Calculates cul-de-sac length in a graph.\n \"\"\"\n lens = []\n for u, v, k, cds in graph.edges.data(\"cdsbool\", keys=True):\n if cds:\n lens.append(graph[u][v][k][length])\n if mode == \"sum\":\n return sum(lens)\n if mode == \"mean\":\n return np.mean(lens)\n raise ValueError(\"Mode {} is not supported. Use 'sum' or 'mean'.\".format(mode))\n\n\ndef cds_length(\n graph,\n radius=5,\n mode=\"sum\",\n name=\"cds_len\",\n degree=\"degree\",\n length=\"mm_len\",\n distance=None,\n verbose=True,\n):\n \"\"\"\n Calculates length of cul-de-sacs for subgraph around each node if radius is set, or for\n whole graph, if ``radius=None``.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in distance\n attribute.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius : int\n Include all neighbors of distance <= radius from n\n mode : str (default 'sum')\n if ``'sum'``, calculate total length, if ``'mean'`` calculate mean length\n name : str, optional\n calculated attribute name\n degree : str\n name of attribute of node degree (:py:func:`momepy.node_degree`)\n length : str, optional\n name of attribute of segment length (geographical)\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n\n Returns\n -------\n Graph\n networkx.Graph if radius is set\n float\n length of cul-de-sacs for graph if ``radius=None``\n\n Examples\n --------\n >>> network_graph = mm.cds_length(network_graph, radius=9, mode='mean')\n \"\"\"\n # node degree needed beforehand\n netx = graph.copy()\n\n for u, v, k in netx.edges(keys=True):\n if netx.nodes[u][degree] == 1 or netx.nodes[v][degree] == 1:\n netx[u][v][k][\"cdsbool\"] = True\n else:\n netx[u][v][k][\"cdsbool\"] = False\n\n if radius:\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = _cds_length(\n sub, mode=mode, length=length\n ) # save value calculated for subgraph to node\n\n return netx\n\n return _cds_length(netx, mode=mode, length=length)\n\n\ndef _mean_node_degree(graph, degree):\n \"\"\"\n Calculates mean node degree in a graph.\n \"\"\"\n return np.mean(list(dict(graph.nodes(degree)).values()))\n\n\ndef mean_node_degree(\n graph, radius=5, name=\"mean_nd\", degree=\"degree\", distance=None, verbose=True\n):\n \"\"\"\n Calculates mean node degree for subgraph around each node if radius is set, or for\n whole graph, if ``radius=None``.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in ``distance``\n attribute.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n radius defining the extent of subgraph\n name : str, optional\n calculated attribute name\n degree : str\n name of attribute of node degree (:py:func:`momepy.node_degree`)\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n Returns\n -------\n Graph\n networkx.Graph if radius is set\n float\n mean node degree for graph if ``radius=None``\n\n Examples\n --------\n >>> network_graph = mm.mean_node_degree(network_graph, radius=3)\n \"\"\"\n netx = graph.copy()\n\n if radius:\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = _mean_node_degree(sub, degree=degree)\n\n return netx\n\n return _mean_node_degree(netx, degree=degree)\n\n\ndef _proportion(graph, degree):\n \"\"\"\n Calculates the proportion of intersection types in a graph.\n \"\"\"\n import collections\n\n values = list(dict(graph.nodes(degree)).values())\n counts = collections.Counter(values)\n return counts\n\n\ndef proportion(\n graph,\n radius=5,\n three=None,\n four=None,\n dead=None,\n degree=\"degree\",\n distance=None,\n verbose=True,\n):\n \"\"\"\n Calculates the proportion of intersection types for subgraph around each node if radius is set, or for\n whole graph, if ``radius=None``.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in ``distance``\n attribute.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n Include all neighbors of distance <= radius from n\n three : str, optional\n attribute name for 3-way intersections proportion\n four : str, optional\n attribute name for 4-way intersections proportion\n dead : str, optional\n attribute name for deadends proportion\n degree : str\n name of attribute of node degree (:py:func:`momepy.node_degree`)\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n Returns\n -------\n Graph\n networkx.Graph if radius is set\n dict\n dict with proportions for graph if ``radius=None``\n\n Examples\n --------\n >>> network_graph = mm.proportion(network_graph, three='threeway', four='fourway', dead='deadends')\n \"\"\"\n if not three and not four and not dead:\n raise ValueError(\n \"Nothing to calculate. Define names for at least one proportion to be calculated: three, four, dead.\"\n )\n netx = graph.copy()\n\n if radius:\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n counts = _proportion(sub, degree=degree)\n if three:\n netx.nodes[n][three] = counts[3] / len(sub)\n if four:\n netx.nodes[n][four] = counts[4] / len(sub)\n if dead:\n netx.nodes[n][dead] = counts[1] / len(sub)\n return netx\n\n # add example to docs explaining keys\n counts = _proportion(netx, degree=degree)\n result = {}\n if three:\n result[three] = counts[3] / len(netx)\n if four:\n result[four] = counts[4] / len(netx)\n if dead:\n result[dead] = counts[1] / len(netx)\n\n return result\n\n\ndef _cyclomatic(graph):\n \"\"\"\n Calculates the cyclomatic complexity of a graph.\n \"\"\"\n e = graph.number_of_edges()\n v = graph.number_of_nodes()\n return e - v + 1\n\n\ndef cyclomatic(graph, radius=5, name=\"cyclomatic\", distance=None, verbose=True):\n \"\"\"\n Calculates cyclomatic complexity for subgraph around each node if radius is set, or for\n whole graph, if ``radius=None``.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in ``distance``\n attribute.\n\n .. math::\n \\\\alpha=e-v+1\n\n where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.\n\n Adapted from :cite:`bourdic2012`.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n Include all neighbors of distance <= radius from n\n name : str, optional\n calculated attribute name\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n Returns\n -------\n Graph\n networkx.Graph if radius is set\n float\n cyclomatic complexity for graph if ``radius=None``\n\n Examples\n --------\n >>> network_graph = mm.cyclomatic(network_graph, radius=3)\n \"\"\"\n netx = graph.copy()\n\n if radius:\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = _cyclomatic(\n sub\n ) # save value calulated for subgraph to node\n\n return netx\n\n return _cyclomatic(netx)\n\n\ndef _edge_node_ratio(graph):\n \"\"\"\n Calculates edge / node ratio of a graph.\n \"\"\"\n e = graph.number_of_edges()\n v = graph.number_of_nodes()\n return e / v\n\n\ndef edge_node_ratio(\n graph, radius=5, name=\"edge_node_ratio\", distance=None, verbose=True\n):\n \"\"\"\n Calculates edge / node ratio for subgraph around each node if radius is set, or for\n whole graph, if ``radius=None``.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in ``distance``\n attribute.\n\n .. math::\n \\\\alpha=e/v\n\n where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.\n\n Adapted from :cite:`dibble2017`.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n Include all neighbors of distance <= radius from n\n name : str, optional\n calculated attribute name\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n Returns\n -------\n Graph\n networkx.Graph if radius is set\n float\n edge / node ratio for graph if ``radius=None``\n\n Examples\n --------\n >>> network_graph = mm.edge_node_ratio(network_graph, radius=3)\n \"\"\"\n netx = graph.copy()\n\n if radius:\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = _edge_node_ratio(\n sub\n ) # save value calulated for subgraph to node\n\n return netx\n\n return _edge_node_ratio(netx)\n\n\ndef _gamma(graph):\n \"\"\"\n Calculates gamma index of a graph.\n \"\"\"\n e = graph.number_of_edges()\n v = graph.number_of_nodes()\n if v == 2:\n return np.nan\n return e / (3 * (v - 2)) # save value calulated for subgraph to node\n\n\ndef gamma(graph, radius=5, name=\"gamma\", distance=None, verbose=True):\n \"\"\"\n Calculates connectivity gamma index for subgraph around each node if radius is set, or for\n whole graph, if ``radius=None``.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in ``distance``\n attribute.\n\n .. math::\n \\\\alpha=\\\\frac{e}{3(v-2)}\n\n where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.\n\n Adapted from :cite:`dibble2017`.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n Include all neighbors of distance <= radius from n\n name : str, optional\n calculated attribute name\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n Returns\n -------\n Graph\n networkx.Graph if radius is set\n float\n gamma index for graph if ``radius=None``\n\n Examples\n --------\n >>> network_graph = mm.gamma(network_graph, radius=3)\n\n \"\"\"\n netx = graph.copy()\n\n if radius:\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = _gamma(sub)\n\n return netx\n\n return _gamma(netx)\n\n\ndef clustering(graph, name=\"cluster\"):\n \"\"\"\n Calculates the squares clustering coefficient for nodes.\n\n Wrapper around ``networkx.square_clustering``.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n name : str, optional\n calculated attribute name\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.clustering(network_graph)\n \"\"\"\n netx = graph.copy()\n\n vals = nx.square_clustering(netx)\n nx.set_node_attributes(netx, vals, name)\n\n return netx\n\n\ndef _closeness_centrality(G, u=None, length=None, wf_improved=True, len_graph=None):\n r\"\"\"Compute closeness centrality for nodes. Slight adaptation of networkx\n `closeness_centrality` to allow normalisation for local closeness.\n Adapted script used in networkx.\n\n Closeness centrality [1]_ of a node `u` is the reciprocal of the\n average shortest path distance to `u` over all `n-1` reachable nodes.\n\n .. math::\n\n C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n\n where `d(v, u)` is the shortest-path distance between `v` and `u`,\n and `n` is the number of nodes that can reach `u`. Notice that the\n closeness distance function computes the incoming distance to `u`\n for directed graphs. To use outward distance, act on `G.reverse()`.\n\n Notice that higher values of closeness indicate higher centrality.\n\n Wasserman and Faust propose an improved formula for graphs with\n more than one connected component. The result is \"a ratio of the\n fraction of actors in the group who are reachable, to the average\n distance\" from the reachable actors [2]_. You might think this\n scale factor is inverted but it is not. As is, nodes from small\n components receive a smaller closeness value. Letting `N` denote\n the number of nodes in the graph,\n\n .. math::\n\n C_{WF}(u) = \\frac{n-1}{N-1} \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n u : node, optional\n Return only the value for node u\n\n distance : edge attribute key, optional (default=None)\n Use the specified edge attribute as the edge distance in shortest\n path calculations\n\n len_graph : int\n length of complete graph\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with closeness centrality as the value.\n\n References\n ----------\n .. [1] Linton C. Freeman: Centrality in networks: I.\n Conceptual clarification. Social Networks 1:215-239, 1979.\n http://leonidzhukov.ru/hse/2013/socialnetworks/papers/freeman79-centrality.pdf\n .. [2] pg. 201 of Wasserman, S. and Faust, K.,\n Social Network Analysis: Methods and Applications, 1994,\n Cambridge University Press.\n \"\"\"\n\n if length is not None:\n import functools\n\n # use Dijkstra's algorithm with specified attribute as edge weight\n path_length = functools.partial(\n nx.single_source_dijkstra_path_length, weight=length\n )\n else:\n path_length = nx.single_source_shortest_path_length\n\n nodes = [u]\n closeness_centrality = {}\n for n in nodes:\n sp = dict(path_length(G, n))\n totsp = sum(sp.values())\n if totsp > 0.0 and len(G) > 1:\n closeness_centrality[n] = (len(sp) - 1.0) / totsp\n # normalize to number of nodes-1 in connected part\n s = (len(sp) - 1.0) / (len_graph - 1)\n closeness_centrality[n] *= s\n else:\n closeness_centrality[n] = 0.0\n\n return closeness_centrality[u]\n\n\ndef local_closeness_centrality(\n graph, radius=5, name=\"closeness\", distance=None, weight=None\n):\n \"\"\"\n Calculates local closeness for each node based on the defined distance.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in ``distance``\n attribute. Based on ``networkx.closeness_centrality``.\n\n Local closeness centrality of a node `u` is the reciprocal of the\n average shortest path distance to `u` over all `n-1` nodes within subgraph.\n\n .. math::\n\n C(u) = \\\\frac{n - 1}{\\\\sum_{v=1}^{n-1} d(v, u)},\n\n where :math:`d(v, u)` is the shortest-path distance between :math:`v` and :math:`u`,\n and :math:`n` is the number of nodes that can reach :math:`u`.\n\n Adapted from :cite:`porta2006`.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n Include all neighbors of distance <= radius from n\n name : str, optional\n calculated attribute name\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n during ego_graph generation.\n weight : str, optional\n Use the specified edge attribute as the edge distance in shortest\n path calculations in closeness centrality algorithm\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.local_closeness_centrality(network_graph, radius=400, distance='edge_length')\n\n \"\"\"\n warnings.warn(\n \"local_closeness_centrality() is deprecated and will be removed in momepy 0.4.0. \"\n \"Use closeness_centrality() instead.\",\n FutureWarning,\n )\n\n return closeness_centrality(\n graph=graph, radius=radius, name=name, distance=distance, weight=weight\n )\n\n\ndef closeness_centrality(\n graph,\n name=\"closeness\",\n weight=\"mm_len\",\n radius=None,\n distance=None,\n verbose=True,\n **kwargs\n):\n \"\"\"\n Calculates the closeness centrality for nodes.\n\n Wrapper around ``networkx.closeness_centrality``.\n\n Closeness centrality of a node `u` is the reciprocal of the\n average shortest path distance to `u` over all `n-1` nodes within reachable nodes.\n\n .. math::\n\n C(u) = \\\\frac{n - 1}{\\\\sum_{v=1}^{n-1} d(v, u)},\n\n where :math:`d(v, u)` is the shortest-path distance between :math:`v` and :math:`u`,\n and :math:`n` is the number of nodes that can reach :math:`u`.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n name : str, optional\n calculated attribute name\n weight : str (default 'mm_len')\n attribute holding the weight of edge (e.g. length, angle)\n radius: int\n Include all neighbors of distance <= radius from n\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n during ego_graph generation.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n **kwargs\n kwargs for ``networkx.closeness_centrality``\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.closeness_centrality(network_graph)\n \"\"\"\n netx = graph.copy()\n\n if radius:\n lengraph = len(netx)\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = _closeness_centrality(\n sub, n, length=weight, len_graph=lengraph\n )\n else:\n vals = nx.closeness_centrality(netx, distance=weight, **kwargs)\n nx.set_node_attributes(netx, vals, name)\n\n return netx\n\n\ndef betweenness_centrality(\n graph,\n name=\"betweenness\",\n mode=\"nodes\",\n weight=\"mm_len\",\n endpoints=True,\n radius=None,\n distance=None,\n normalized=False,\n verbose=True,\n **kwargs\n):\n \"\"\"\n Calculates the shortest-path betweenness centrality for nodes.\n\n Wrapper around ``networkx.betweenness_centrality`` or ``networkx.edge_betweenness_centrality``.\n\n Betweenness centrality of a node `v` is the sum of the\n fraction of all-pairs shortest paths that pass through `v`\n\n .. math::\n\n c_B(v) =\\\\sum_{s,t \\\\in V} \\\\frac{\\\\sigma(s, t|v)}{\\\\sigma(s, t)}\n\n where `V` is the set of nodes, :math:`\\\\sigma(s, t)` is the number of\n shortest :math:`(s, t)`-paths, and :math:`\\\\sigma(s, t|v)` is the number of\n those paths passing through some node `v` other than `s, t`.\n If `s = t`, :math:`\\\\sigma(s, t) = 1`, and if `v` in `{s, t}``,\n :math:`\\\\sigma(s, t|v) = 0`.\n\n Betweenness centrality of an edge `e` is the sum of the\n fraction of all-pairs shortest paths that pass through `e`\n\n .. math::\n\n c_B(e) =\\\\sum_{s,t \\\\in V} \\\\frac{\\\\sigma(s, t|e)}{\\\\sigma(s, t)}\n\n where `V` is the set of nodes, :math:`\\\\sigma(s, t)` is the number of\n shortest :math:`(s, t)`-paths, and :math:`\\\\sigma(s, t|e)` is the number of\n those paths passing through edge `e`.\n\n Adapted from :cite:`porta2006`.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n name : str, optional\n calculated attribute name\n mode : str, default 'nodes'\n mode of betweenness calculation. 'node' for node-based, 'edges' for edge-based\n weight : str (default 'mm_len')\n attribute holding the weight of edge (e.g. length, angle)\n radius: int\n Include all neighbors of distance <= radius from n\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n during ego_graph generation.\n normalized : bool, optional\n If True the betweenness values are normalized by `2/((n-1)(n-2))`,\n where n is the number of nodes in subgraph.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n **kwargs\n kwargs for ``networkx.betweenness_centrality`` or ``networkx.edge_betweenness_centrality``\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.betweenness_centrality(network_graph)\n\n Notes\n -----\n In case of angular betweenness, implementation is based on \"Tasos Implementation\".\n \"\"\"\n netx = graph.copy()\n\n # has to be Graph not MultiGraph as MG is not supported by networkx2.4\n G = nx.Graph()\n for u, v, k, data in netx.edges(data=True, keys=True):\n if G.has_edge(u, v):\n if G[u][v][weight] > netx[u][v][k][weight]:\n nx.set_edge_attributes(G, {(u, v): data})\n else:\n G.add_edge(u, v, **data)\n\n if radius:\n for n in tqdm(G, total=len(G), disable=not verbose):\n sub = nx.ego_graph(\n G, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = nx.betweenness_centrality(\n sub, weight=weight, normalized=normalized, **kwargs\n )[n]\n\n elif mode == \"nodes\":\n vals = nx.betweenness_centrality(\n G, weight=weight, endpoints=endpoints, **kwargs\n )\n nx.set_node_attributes(netx, vals, name)\n elif mode == \"edges\":\n vals = nx.edge_betweenness_centrality(G, weight=weight, **kwargs)\n for u, v, k in netx.edges(keys=True):\n try:\n val = vals[u, v]\n except KeyError:\n val = vals[v, u]\n netx[u][v][k][name] = val\n else:\n raise ValueError(\n \"Mode {} is not supported. Use 'nodes' or 'edges'.\".format(mode)\n )\n\n return netx\n\n\ndef local_betweenness_centrality(\n graph,\n radius=5,\n name=\"betweenness\",\n distance=None,\n weight=None,\n normalized=False,\n **kwargs\n):\n \"\"\"\n Calculates the shortest-path betweenness centrality for nodes within subgraph.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in ``distance``\n attribute. Based on ``networkx.betweenness_centrality``.\n\n Betweenness centrality of a node `v` is the sum of the\n fraction of all-pairs shortest paths that pass through `v`\n\n .. math::\n\n c_B(v) =\\\\sum_{s,t \\\\in V} \\\\frac{\\\\sigma(s, t|v)}{\\\\sigma(s, t)}\n\n where `V` is the set of nodes, :math:`\\\\sigma(s, t)` is the number of\n shortest :math:`(s, t)`-paths, and :math:`\\\\sigma(s, t|v)` is the number of\n those paths passing through some node `v` other than `s, t`.\n If `s = t`, :math:`\\\\sigma(s, t) = 1`, and if `v` in `{s, t}``,\n :math:`\\\\sigma(s, t|v) = 0`.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n Include all neighbors of distance <= radius from n\n name : str, optional\n calculated attribute name\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n during ego_graph generation.\n weight : str, optional\n Use the specified edge attribute as the edge distance in shortest\n path calculations in closeness centrality algorithm\n normalized : bool, optional\n If True the betweenness values are normalized by `2/((n-1)(n-2))`,\n where n is the number of nodes in subgraph.\n **kwargs\n kwargs for ``networkx.betweenness_centrality_subset``\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.local_betweenness_centrality(network_graph, radius=800, distance='edge_length')\n\n \"\"\"\n warnings.warn(\n \"local_betweenness_centrality() is deprecated and will be removed in momepy 0.4.0. \"\n \"Use betweenness_centrality() instead.\",\n FutureWarning,\n )\n\n return betweenness_centrality(\n graph,\n radius=radius,\n name=name,\n distance=distance,\n weight=weight,\n normalized=normalized,\n **kwargs\n )\n\n\ndef _euclidean(n, m):\n \"\"\"helper for straightness\"\"\"\n return math.sqrt((n[0] - m[0]) ** 2 + (n[1] - m[1]) ** 2)\n\n\ndef _straightness_centrality(G, weight, normalized=True):\n \"\"\"\n Calculates straightness centrality.\n \"\"\"\n straightness_centrality = {}\n\n for n in G.nodes():\n straightness = 0\n sp = nx.single_source_dijkstra_path_length(G, n, weight=weight)\n\n if len(sp) > 0 and len(G) > 1:\n for target in sp:\n if n != target:\n network_dist = sp[target]\n euclidean_dist = _euclidean(n, target)\n straightness = straightness + (euclidean_dist / network_dist)\n straightness_centrality[n] = straightness * (1.0 / (len(G) - 1.0))\n # normalize to number of nodes-1 in connected part\n if normalized:\n if len(sp) > 1:\n s = (len(G) - 1.0) / (len(sp) - 1.0)\n straightness_centrality[n] *= s\n else:\n straightness_centrality[n] = 0\n else:\n straightness_centrality[n] = 0.0\n return straightness_centrality\n\n\ndef straightness_centrality(\n graph,\n weight=\"mm_len\",\n normalized=True,\n name=\"straightness\",\n radius=None,\n distance=None,\n verbose=True,\n):\n \"\"\"\n Calculates the straightness centrality for nodes.\n\n .. math::\n C_{S}(i)=\\\\frac{1}{n-1} \\\\sum_{j \\\\in V, j \\\\neq i} \\\\frac{d_{i j}^{E u}}{d_{i j}}\n\n where :math:`\\\\mathrm{d}^{\\\\mathrm{E} \\\\mathrm{u}}_{\\\\mathrm{ij}}` is the Euclidean distance\n between nodes `i` and `j` along a straight line.\n\n Adapted from :cite:`porta2006`.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n weight : str (default 'mm_len')\n attribute holding length of edge\n normalized : bool\n normalize to number of nodes-1 in connected part (for local straightness\n is recommended to set to normalized False)\n name : str, optional\n calculated attribute name\n radius: int\n Include all neighbors of distance <= radius from n\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n during ego_graph generation.\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.straightness_centrality(network_graph)\n \"\"\"\n netx = graph.copy()\n\n if radius:\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n netx.nodes[n][name] = _straightness_centrality(\n sub, weight=weight, normalized=normalized\n )[n]\n else:\n vals = _straightness_centrality(netx, weight=weight, normalized=normalized)\n nx.set_node_attributes(netx, vals, name)\n\n return netx\n\n\ndef local_straightness_centrality(\n graph, radius=5, name=\"straightness\", distance=None, weight=\"mm_len\"\n):\n \"\"\"\n Calculates local straightness for each node based on the defined distance.\n\n Subgraph is generated around each node within set radius. If ``distance=None``,\n radius will define topological distance, otherwise it uses values in ``distance``\n attribute.\n\n .. math::\n C_{S}(i)=\\\\frac{1}{n-1} \\\\sum_{j \\\\in V, j \\\\neq i} \\\\frac{d_{i j}^{E u}}{d_{i j}}\n\n where :math:`\\\\mathrm{d}^{\\\\mathrm{E} \\\\mathrm{u}}_{\\\\mathrm{ij}}` is the Euclidean distance\n between nodes `i` and `j` along a straight line.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n Include all neighbors of distance <= radius from n\n name : str, optional\n calculated attribute name\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n during ego_graph generation.\n weight : str, optional\n Use the specified edge attribute as the edge distance in shortest\n path calculations in closeness centrality algorithm\n\n Returns\n -------\n Graph\n networkx.Graph\n\n\n Examples\n --------\n >>> network_graph = mm.local_straightness_centrality(network_graph, radius=400, distance='edge_length')\n\n \"\"\"\n warnings.warn(\n \"local_straightness_centrality() is deprecated and will be removed in momepy 0.4.0. \"\n \"Use straightness_centrality() instead.\",\n FutureWarning,\n )\n\n return straightness_centrality(\n graph=graph, radius=radius, name=name, distance=distance, weight=weight\n )\n\n\ndef subgraph(\n graph,\n radius=5,\n distance=None,\n meshedness=True,\n cds_length=True,\n mode=\"sum\",\n degree=\"degree\",\n length=\"mm_len\",\n mean_node_degree=True,\n proportion={3: True, 4: True, 0: True},\n cyclomatic=True,\n edge_node_ratio=True,\n gamma=True,\n local_closeness=True,\n closeness_weight=None,\n verbose=True,\n):\n \"\"\"\n Calculates all subgraph-based characters.\n\n Generating subgraph might be a time consuming activity. If we want to use the same\n subgraph for more characters, ``subgraph`` allows this by generating subgraph and\n then analysing it using selected options.\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`\n radius: int\n radius defining the extent of subgraph\n distance : str, optional\n Use specified edge data key as distance.\n For example, setting ``distance=’weight’`` will use the edge ``weight`` to\n measure the distance from the node n.\n meshedness : bool, default True\n Calculate meshedness (True/False)\n cds_length : bool, default True\n Calculate cul-de-sac length (True/False)\n mode : str (defualt 'sum')\n if ``'sum'``, calculate total cds_length, if ``'mean'`` calculate mean cds_length\n degree : str\n name of attribute of node degree (:py:func:`momepy.node_degree`)\n length : str, default `mm_len`\n name of attribute of segment length (geographical)\n mean_node_degree : bool, default True\n Calculate mean node degree (True/False)\n proportion : dict, default {3: True, 4: True, 0: True}\n Calculate proportion {3: True/False, 4: True/False, 0: True/False}\n cyclomatic : bool, default True\n Calculate cyclomatic complexity (True/False)\n edge_node_ratio : bool, default True\n Calculate edge node ratio (True/False)\n gamma : bool, default True\n Calculate gamma index (True/False)\n local_closeness : bool, default True\n Calculate local closeness centrality (True/False)\n closeness_weight : str, optional\n Use the specified edge attribute as the edge distance in shortest\n path calculations in closeness centrality algorithm\n verbose : bool (default True)\n if True, shows progress bars in loops and indication of steps\n\n\n Returns\n -------\n Graph\n networkx.Graph\n\n Examples\n --------\n >>> network_graph = mm.subgraph(network_graph)\n \"\"\"\n\n netx = graph.copy()\n\n for n in tqdm(netx, total=len(netx), disable=not verbose):\n sub = nx.ego_graph(\n netx, n, radius=radius, distance=distance\n ) # define subgraph of steps=radius\n\n if meshedness:\n netx.nodes[n][\"meshedness\"] = _meshedness(sub)\n\n if cds_length:\n for u, v, k in netx.edges(keys=True):\n if netx.nodes[u][degree] == 1 or netx.nodes[v][degree] == 1:\n netx[u][v][k][\"cdsbool\"] = True\n else:\n netx[u][v][k][\"cdsbool\"] = False\n\n netx.nodes[n][\"cds_length\"] = _cds_length(sub, mode=mode, length=length)\n\n if mean_node_degree:\n netx.nodes[n][\"mean_node_degree\"] = _mean_node_degree(sub, degree=degree)\n\n if proportion:\n counts = _proportion(sub, degree=degree)\n if proportion[3]:\n netx.nodes[n][\"proportion_3\"] = counts[3] / len(sub)\n if proportion[4]:\n netx.nodes[n][\"proportion_4\"] = counts[4] / len(sub)\n if proportion[0]:\n netx.nodes[n][\"proportion_0\"] = counts[1] / len(sub)\n\n if cyclomatic:\n netx.nodes[n][\"cyclomatic\"] = _cyclomatic(sub)\n\n if edge_node_ratio:\n netx.nodes[n][\"edge_node_ratio\"] = _edge_node_ratio(sub)\n\n if gamma:\n netx.nodes[n][\"gamma\"] = _gamma(sub)\n\n if local_closeness:\n lengraph = len(netx)\n netx.nodes[n][\"local_closeness\"] = _closeness_centrality(\n sub, n, length=closeness_weight, len_graph=lengraph\n )\n\n return netx\n\n\ndef mean_nodes(G, attr):\n \"\"\"\n Calculates mean value of nodes attr for each edge.\n \"\"\"\n for u, v, k in G.edges(keys=True):\n mean = (G.nodes[u][attr] + G.nodes[v][attr]) / 2\n G[u][v][k][attr] = mean\n",
"import geopandas as gpd\nimport momepy as mm\nimport numpy as np\nimport pytest\nfrom momepy import sw_high\nfrom pytest import approx\n\n\nclass TestDiversity:\n def setup_method(self):\n\n test_file_path = mm.datasets.get_path(\"bubenec\")\n self.df_buildings = gpd.read_file(test_file_path, layer=\"buildings\")\n self.df_streets = gpd.read_file(test_file_path, layer=\"streets\")\n self.df_tessellation = gpd.read_file(test_file_path, layer=\"tessellation\")\n self.df_buildings[\"height\"] = np.linspace(10.0, 30.0, 144)\n self.df_tessellation[\"area\"] = mm.Area(self.df_tessellation).series\n self.sw = sw_high(k=3, gdf=self.df_tessellation, ids=\"uID\")\n self.sw.neighbors[100] = []\n self.sw_drop = sw_high(k=3, gdf=self.df_tessellation[2:], ids=\"uID\")\n\n def test_Range(self):\n full_sw = mm.Range(self.df_tessellation, \"area\", self.sw, \"uID\").series\n assert full_sw[0] == approx(8255.372, rel=1e-3)\n area = self.df_tessellation[\"area\"]\n full2 = mm.Range(self.df_tessellation, area, self.sw, \"uID\").series\n assert full2[0] == approx(8255.372, rel=1e-3)\n limit = mm.Range(\n self.df_tessellation, \"area\", self.sw, \"uID\", rng=(10, 90)\n ).series\n assert limit[0] == approx(4122.139, rel=1e-3)\n assert (\n mm.Range(self.df_tessellation, \"area\", self.sw_drop, \"uID\")\n .series.isna()\n .any()\n )\n\n def test_Theil(self):\n full_sw = mm.Theil(self.df_tessellation, \"area\", self.sw, \"uID\").series\n assert full_sw[0] == approx(0.25744684)\n limit = mm.Theil(\n self.df_tessellation,\n self.df_tessellation.area,\n self.sw,\n \"uID\",\n rng=(10, 90),\n ).series\n assert limit[0] == approx(0.1330295)\n zeros = mm.Theil(\n self.df_tessellation, np.zeros(len(self.df_tessellation)), self.sw, \"uID\"\n ).series\n assert zeros[0] == 0\n assert (\n mm.Theil(self.df_tessellation, \"area\", self.sw_drop, \"uID\")\n .series.isna()\n .any()\n )\n\n def test_Simpson(self):\n ht_sw = mm.Simpson(self.df_tessellation, \"area\", self.sw, \"uID\").series\n assert ht_sw[0] == 0.385\n quan_sw = mm.Simpson(\n self.df_tessellation,\n self.df_tessellation.area,\n self.sw,\n \"uID\",\n binning=\"quantiles\",\n k=3,\n ).series\n assert quan_sw[0] == 0.395\n with pytest.raises(ValueError):\n ht_sw = mm.Simpson(\n self.df_tessellation, \"area\", self.sw, \"uID\", binning=\"nonexistent\"\n )\n assert (\n mm.Simpson(self.df_tessellation, \"area\", self.sw_drop, \"uID\")\n .series.isna()\n .any()\n )\n gs = mm.Simpson(\n self.df_tessellation, \"area\", self.sw, \"uID\", gini_simpson=True\n ).series\n assert gs[0] == 1 - 0.385\n\n inv = mm.Simpson(\n self.df_tessellation, \"area\", self.sw, \"uID\", inverse=True\n ).series\n assert inv[0] == 1 / 0.385\n\n self.df_tessellation[\"cat\"] = list(range(8)) * 18\n cat = mm.Simpson(\n self.df_tessellation, \"cat\", self.sw, \"uID\", categorical=True\n ).series\n assert cat[0] == pytest.approx(0.15)\n\n cat2 = mm.Simpson(\n self.df_tessellation,\n \"cat\",\n self.sw,\n \"uID\",\n categorical=True,\n categories=range(15),\n ).series\n assert cat2[0] == pytest.approx(0.15)\n\n def test_Gini(self):\n full_sw = mm.Gini(self.df_tessellation, \"area\", self.sw, \"uID\").series\n assert full_sw[0] == approx(0.3945388)\n limit = mm.Gini(\n self.df_tessellation, \"area\", self.sw, \"uID\", rng=(10, 90)\n ).series\n assert limit[0] == approx(0.28532814)\n self.df_tessellation[\"negative\"] = (\n self.df_tessellation.area - self.df_tessellation.area.mean()\n )\n with pytest.raises(ValueError):\n mm.Gini(self.df_tessellation, \"negative\", self.sw, \"uID\").series\n assert (\n mm.Gini(self.df_tessellation, \"area\", self.sw_drop, \"uID\")\n .series.isna()\n .any()\n )\n\n def test_Shannon(self):\n ht_sw = mm.Shannon(self.df_tessellation, \"area\", self.sw, \"uID\").series\n assert ht_sw[0] == 1.094056456831614\n quan_sw = mm.Shannon(\n self.df_tessellation,\n self.df_tessellation.area,\n self.sw,\n \"uID\",\n binning=\"quantiles\",\n k=3,\n ).series\n assert quan_sw[0] == 0.9985793315873921\n with pytest.raises(ValueError):\n ht_sw = mm.Shannon(\n self.df_tessellation, \"area\", self.sw, \"uID\", binning=\"nonexistent\"\n )\n assert (\n mm.Shannon(self.df_tessellation, \"area\", self.sw_drop, \"uID\")\n .series.isna()\n .any()\n )\n\n self.df_tessellation[\"cat\"] = list(range(8)) * 18\n cat = mm.Shannon(\n self.df_tessellation, \"cat\", self.sw, \"uID\", categorical=True\n ).series\n assert cat[0] == pytest.approx(1.973)\n\n cat2 = mm.Shannon(\n self.df_tessellation,\n \"cat\",\n self.sw,\n \"uID\",\n categorical=True,\n categories=range(15),\n ).series\n assert cat2[0] == pytest.approx(1.973)\n\n def test_Unique(self):\n self.df_tessellation[\"cat\"] = list(range(8)) * 18\n un = mm.Unique(self.df_tessellation, \"cat\", self.sw, \"uID\").series\n assert un[0] == 8\n un = mm.Unique(self.df_tessellation, list(range(8)) * 18, self.sw, \"uID\").series\n assert un[0] == 8\n un = mm.Unique(self.df_tessellation, \"cat\", self.sw_drop, \"uID\").series\n assert un.isna().any()\n assert un[5] == 8\n\n def test_Percentile(self):\n perc = mm.Percentiles(self.df_tessellation, \"area\", self.sw, \"uID\").frame\n assert np.all(\n perc.loc[0].values - np.array([1085.11492833, 2623.9962661, 4115.47168328])\n < 0.00001\n )\n perc = mm.Percentiles(\n self.df_tessellation, list(range(8)) * 18, self.sw, \"uID\"\n ).frame\n assert np.all(perc.loc[0].values == np.array([1.0, 3.5, 6.0]))\n"
] | [
[
"numpy.mean"
],
[
"numpy.array",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yangguohao/Paddle | [
"81622708a7c904092185ef04897b1e81629f51a6",
"81622708a7c904092185ef04897b1e81629f51a6"
] | [
"python/paddle/fluid/tests/unittests/auto_parallel/test_dist_context.py",
"python/paddle/vision/transforms/functional.py"
] | [
"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport os\nimport json\n\nimport paddle\nimport numpy as np\nimport paddle.nn as nn\nimport paddle.utils as utils\nimport paddle.static as static\nimport paddle.nn.functional as F\n\nfrom paddle.distributed import fleet\nimport paddle.distributed.auto_parallel as auto\nfrom paddle.distributed.auto_parallel.dist_context import DistributedContext\nfrom paddle.distributed.auto_parallel.utils import print_program_with_dist_attr\n\npaddle.enable_static()\n\nbatch_size = 4\nhidden_size = 1024\nsequence_len = 512\n_g_process_mesh = [[0, 1], [2, 3]]\n\n\ndef get_random_inputs_and_labels(input_shape, label_shape):\n input = np.random.random(size=input_shape).astype('float32')\n label = np.random.random(size=label_shape).astype('float32')\n return input, label\n\n\ndef batch_generator_creator():\n def __reader__():\n for _ in range(batch_size):\n batch_input, batch_label = get_random_inputs_and_labels(\n [batch_size, sequence_len, hidden_size],\n [batch_size, sequence_len, 1])\n yield batch_input, batch_label\n\n return __reader__\n\n\nclass MLPLayer(nn.Layer):\n def __init__(self,\n hidden_size=1024,\n intermediate_size=4 * 1024,\n dropout_ratio=0.1,\n initializer_range=0.02):\n super(MLPLayer, self).__init__()\n d_model = hidden_size\n dim_feedforward = intermediate_size\n param_initializer = nn.initializer.Normal(\n mean=0.0, std=initializer_range)\n\n self.norm = nn.LayerNorm(d_model, epsilon=1e-5)\n self.linear0 = nn.Linear(\n d_model,\n dim_feedforward,\n weight_attr=paddle.ParamAttr(initializer=param_initializer),\n bias_attr=None)\n self.linear1 = nn.Linear(\n dim_feedforward,\n d_model,\n weight_attr=paddle.ParamAttr(initializer=param_initializer),\n bias_attr=None)\n\n def forward(self, input):\n out = self.norm(input)\n auto.shard_tensor(\n self.linear0.weight,\n dist_attr={\n \"process_mesh\": _g_process_mesh[0],\n \"dims_mapping\": [-1, 0]\n })\n out = self.linear0(out)\n out = F.gelu(out, approximate=True)\n auto.shard_tensor(\n self.linear1.weight,\n dist_attr={\n \"process_mesh\": _g_process_mesh[1],\n \"dims_mapping\": [0, -1]\n })\n out = self.linear1(out)\n\n return out\n\n\ndef get_program():\n dist_strategy = fleet.DistributedStrategy()\n dist_strategy.semi_auto = True\n # fleet.init(is_collective=True, strategy=dist_strategy)\n\n train_program = static.Program()\n start_program = static.Program()\n with static.program_guard(train_program, start_program):\n # input\n input = static.data(\n name=\"input\",\n shape=[batch_size, sequence_len, hidden_size],\n dtype='float32')\n label = static.data(\n name=\"label\", shape=[batch_size, sequence_len, 1], dtype='float32')\n data_holder = [input, label]\n # dataloader\n dataloader = paddle.io.DataLoader.from_generator(\n feed_list=data_holder, capacity=4 * batch_size, iterable=False)\n dataloader.set_batch_generator(\n batch_generator_creator(), places=paddle.static.cuda_places())\n # data dist_attr\n auto.shard_tensor(\n input,\n dist_attr={\n \"process_mesh\": _g_process_mesh[0],\n \"dims_mapping\": [0, -1, -1]\n })\n auto.shard_tensor(\n label,\n dist_attr={\n \"process_mesh\": _g_process_mesh[0],\n \"dims_mapping\": [0, -1, -1]\n })\n\n mlp_start = MLPLayer(\n hidden_size=hidden_size,\n intermediate_size=4 * hidden_size,\n dropout_ratio=0.1,\n initializer_range=0.02)\n pred = mlp_start(input)\n\n mlp_mid = MLPLayer(\n hidden_size=hidden_size,\n intermediate_size=4 * hidden_size,\n dropout_ratio=0.1,\n initializer_range=0.02)\n pred = mlp_mid(pred)\n\n mlp_end = MLPLayer(\n hidden_size=hidden_size,\n intermediate_size=4 * hidden_size,\n dropout_ratio=0.1,\n initializer_range=0.02)\n pred = mlp_end(pred)\n\n error_cost = paddle.nn.functional.square_error_cost(pred, label)\n loss = paddle.mean(error_cost)\n\n optimizer = paddle.optimizer.Adam(\n learning_rate=0.00001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-08,\n grad_clip=None)\n\n feed_vars = {\"inputs\": [input], \"labels\": [label]}\n fetch_vars = {\"loss\": [loss]}\n\n return train_program, start_program, dataloader, loss, optimizer, feed_vars, fetch_vars\n\n\nclass TestDistributedContext(unittest.TestCase):\n def test_backup_restore(self):\n train_program, start_program, dataloader, loss, optimizer, feed_vars, fetch_vars = get_program(\n )\n dist_context = DistributedContext(train_program, start_program,\n optimizer, loss, feed_vars,\n fetch_vars)\n dist_context.initialize()\n\n dist_context._backup(serial=True, dist=True)\n dist_context._restore(\n serial=True,\n serial_mode=\"to_backup\",\n dist=True,\n dist_mode=\"to_backup\")\n\n dist_context._backup(serial=True, dist=True)\n dist_context._restore(\n serial=True,\n serial_mode=\"to_original\",\n dist=True,\n dist_mode=\"to_original\")\n\n dist_context._backup(serial=True, dist=True)\n dist_context._restore(serial=True, dist=True, dist_mode=\"to_default\")\n\n dist_context._backup(serial=True, dist=True)\n dist_context._restore(serial=True, dist=True, dist_mode=\"to_nothing\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\n\nimport sys\nimport math\nimport numbers\nimport warnings\nimport collections\n\nimport numpy as np\nfrom PIL import Image\nfrom numpy import sin, cos, tan\nimport paddle\n\nfrom . import functional_pil as F_pil\nfrom . import functional_cv2 as F_cv2\nfrom . import functional_tensor as F_t\n\n__all__ = []\n\n\ndef _is_pil_image(img):\n return isinstance(img, Image.Image)\n\n\ndef _is_tensor_image(img):\n return isinstance(img, paddle.Tensor)\n\n\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray) and (img.ndim in {2, 3})\n\n\ndef to_tensor(pic, data_format='CHW'):\n \"\"\"Converts a ``PIL.Image`` or ``numpy.ndarray`` to paddle.Tensor.\n\n See ``ToTensor`` for more details.\n\n Args:\n pic (PIL.Image|np.ndarray): Image to be converted to tensor.\n data_format (str, optional): Data format of output tensor, should be 'HWC' or \n 'CHW'. Default: 'CHW'.\n\n Returns:\n Tensor: Converted image. Data type is same as input img.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n tensor = F.to_tensor(fake_img)\n print(tensor.shape)\n\n \"\"\"\n if not (_is_pil_image(pic) or _is_numpy_image(pic) or\n _is_tensor_image(pic)):\n raise TypeError(\n 'pic should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(pic)))\n\n if _is_pil_image(pic):\n return F_pil.to_tensor(pic, data_format)\n elif _is_numpy_image(pic):\n return F_cv2.to_tensor(pic, data_format)\n else:\n return pic if data_format.lower() == 'chw' else pic.transpose((1, 2, 0))\n\n\ndef resize(img, size, interpolation='bilinear'):\n \"\"\"\n Resizes the image to given size\n\n Args:\n input (PIL.Image|np.ndarray): Image to be resized.\n size (int|list|tuple): Target size of input data, with (height, width) shape.\n interpolation (int|str, optional): Interpolation method. when use pil backend, \n support method are as following: \n - \"nearest\": Image.NEAREST, \n - \"bilinear\": Image.BILINEAR, \n - \"bicubic\": Image.BICUBIC, \n - \"box\": Image.BOX, \n - \"lanczos\": Image.LANCZOS, \n - \"hamming\": Image.HAMMING\n when use cv2 backend, support method are as following: \n - \"nearest\": cv2.INTER_NEAREST, \n - \"bilinear\": cv2.INTER_LINEAR, \n - \"area\": cv2.INTER_AREA, \n - \"bicubic\": cv2.INTER_CUBIC, \n - \"lanczos\": cv2.INTER_LANCZOS4\n\n Returns:\n PIL.Image or np.array: Resized image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n converted_img = F.resize(fake_img, 224)\n print(converted_img.size)\n # (262, 224)\n\n converted_img = F.resize(fake_img, (200, 150))\n print(converted_img.size)\n # (150, 200)\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.resize(img, size, interpolation)\n elif _is_tensor_image(img):\n return F_t.resize(img, size, interpolation)\n else:\n return F_cv2.resize(img, size, interpolation)\n\n\ndef pad(img, padding, fill=0, padding_mode='constant'):\n \"\"\"\n Pads the given PIL.Image or numpy.array on all sides with specified padding mode and fill value.\n\n Args:\n img (PIL.Image|np.array): Image to be padded.\n padding (int|list|tuple): Padding on each border. If a single int is provided this\n is used to pad all borders. If list/tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a list/tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill (float, optional): Pixel fill value for constant fill. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant. Default: 0. \n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default: 'constant'.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n Returns:\n PIL.Image or np.array: Padded image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n padded_img = F.pad(fake_img, padding=1)\n print(padded_img.size)\n\n padded_img = F.pad(fake_img, padding=(2, 1))\n print(padded_img.size)\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.pad(img, padding, fill, padding_mode)\n elif _is_tensor_image(img):\n return F_t.pad(img, padding, fill, padding_mode)\n else:\n return F_cv2.pad(img, padding, fill, padding_mode)\n\n\ndef crop(img, top, left, height, width):\n \"\"\"Crops the given Image.\n\n Args:\n img (PIL.Image|np.array): Image to be cropped. (0,0) denotes the top left \n corner of the image.\n top (int): Vertical component of the top left corner of the crop box.\n left (int): Horizontal component of the top left corner of the crop box.\n height (int): Height of the crop box.\n width (int): Width of the crop box.\n\n Returns:\n PIL.Image or np.array: Cropped image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n cropped_img = F.crop(fake_img, 56, 150, 200, 100)\n print(cropped_img.size)\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.crop(img, top, left, height, width)\n elif _is_tensor_image(img):\n return F_t.crop(img, top, left, height, width)\n else:\n return F_cv2.crop(img, top, left, height, width)\n\n\ndef center_crop(img, output_size):\n \"\"\"Crops the given Image and resize it to desired size.\n\n Args:\n img (PIL.Image|np.array): Image to be cropped. (0,0) denotes the top left corner of the image.\n output_size (sequence or int): (height, width) of the crop box. If int,\n it is used for both directions\n \n Returns:\n PIL.Image or np.array: Cropped image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n cropped_img = F.center_crop(fake_img, (150, 100))\n print(cropped_img.size)\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.center_crop(img, output_size)\n elif _is_tensor_image(img):\n return F_t.center_crop(img, output_size)\n else:\n return F_cv2.center_crop(img, output_size)\n\n\ndef hflip(img):\n \"\"\"Horizontally flips the given Image or np.array.\n\n Args:\n img (PIL.Image|np.array): Image to be flipped.\n\n Returns:\n PIL.Image or np.array: Horizontall flipped image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n flpped_img = F.hflip(fake_img)\n print(flpped_img.size)\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.hflip(img)\n elif _is_tensor_image(img):\n return F_t.hflip(img)\n else:\n return F_cv2.hflip(img)\n\n\ndef vflip(img):\n \"\"\"Vertically flips the given Image or np.array.\n\n Args:\n img (PIL.Image|np.array): Image to be flipped.\n\n Returns:\n PIL.Image or np.array: Vertically flipped image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n flpped_img = F.vflip(fake_img)\n print(flpped_img.size)\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.vflip(img)\n elif _is_tensor_image(img):\n return F_t.vflip(img)\n else:\n return F_cv2.vflip(img)\n\n\ndef adjust_brightness(img, brightness_factor):\n \"\"\"Adjusts brightness of an Image.\n\n Args:\n img (PIL.Image|np.array|paddle.Tensor): Image to be adjusted.\n brightness_factor (float): How much to adjust the brightness. Can be\n any non negative number. 0 gives a black image, 1 gives the\n original image while 2 increases the brightness by a factor of 2.\n\n Returns:\n PIL.Image|np.array|paddle.Tensor: Brightness adjusted image.\n\n Examples:\n .. code-block:: python\n :name: code-example1\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n print(fake_img.size) # (300, 256)\n print(fake_img.load()[1,1]) # (95, 127, 202)\n converted_img = F.adjust_brightness(fake_img, 0.5)\n print(converted_img.size) # (300, 256)\n print(converted_img.load()[1,1]) # (47, 63, 101)\n\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.adjust_brightness(img, brightness_factor)\n elif _is_numpy_image(img):\n return F_cv2.adjust_brightness(img, brightness_factor)\n else:\n return F_t.adjust_brightness(img, brightness_factor)\n\n\ndef adjust_contrast(img, contrast_factor):\n \"\"\"Adjusts contrast of an Image.\n\n Args:\n img (PIL.Image|np.array|paddle.Tensor): Image to be adjusted.\n contrast_factor (float): How much to adjust the contrast. Can be any\n non negative number. 0 gives a solid gray image, 1 gives the\n original image while 2 increases the contrast by a factor of 2.\n\n Returns:\n PIL.Image|np.array|paddle.Tensor: Contrast adjusted image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n converted_img = F.adjust_contrast(fake_img, 0.4)\n print(converted_img.size)\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.adjust_contrast(img, contrast_factor)\n elif _is_numpy_image(img):\n return F_cv2.adjust_contrast(img, contrast_factor)\n else:\n return F_t.adjust_contrast(img, contrast_factor)\n\n\ndef adjust_saturation(img, saturation_factor):\n \"\"\"Adjusts color saturation of an image.\n\n Args:\n img (PIL.Image|np.array|paddle.Tensor): Image to be adjusted.\n saturation_factor (float): How much to adjust the saturation. 0 will\n give a black and white image, 1 will give the original image while\n 2 will enhance the saturation by a factor of 2.\n\n Returns:\n PIL.Image|np.array|paddle.Tensor: Saturation adjusted image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n converted_img = F.adjust_saturation(fake_img, 0.4)\n print(converted_img.size)\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.adjust_saturation(img, saturation_factor)\n elif _is_numpy_image(img):\n return F_cv2.adjust_saturation(img, saturation_factor)\n else:\n return F_t.adjust_saturation(img, saturation_factor)\n\n\ndef adjust_hue(img, hue_factor):\n \"\"\"Adjusts hue of an image.\n\n The image hue is adjusted by converting the image to HSV and\n cyclically shifting the intensities in the hue channel (H).\n The image is then converted back to original image mode.\n\n `hue_factor` is the amount of shift in H channel and must be in the\n interval `[-0.5, 0.5]`.\n\n Args:\n img (PIL.Image|np.array|paddle.Tensor): Image to be adjusted.\n hue_factor (float): How much to shift the hue channel. Should be in\n [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in\n HSV space in positive and negative direction respectively.\n 0 means no shift. Therefore, both -0.5 and 0.5 will give an image\n with complementary colors while 0 gives the original image.\n\n Returns:\n PIL.Image|np.array|paddle.Tensor: Hue adjusted image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n converted_img = F.adjust_hue(fake_img, 0.4)\n print(converted_img.size)\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.adjust_hue(img, hue_factor)\n elif _is_numpy_image(img):\n return F_cv2.adjust_hue(img, hue_factor)\n else:\n return F_t.adjust_hue(img, hue_factor)\n\n\ndef _get_affine_matrix(center, angle, translate, scale, shear):\n # Affine matrix is : M = T * C * RotateScaleShear * C^-1\n # Ihe inverse one is : M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1\n rot = math.radians(angle)\n sx = math.radians(shear[0])\n sy = math.radians(shear[1])\n\n # Rotate and Shear without scaling \n a = math.cos(rot - sy) / math.cos(sy)\n b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)\n c = math.sin(rot - sy) / math.cos(sy)\n d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)\n\n # Center Translation\n cx, cy = center\n tx, ty = translate\n\n # Inverted rotation matrix with scale and shear\n # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1\n matrix = [d, -b, 0.0, -c, a, 0.0]\n matrix = [x / scale for x in matrix]\n # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1\n matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)\n matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)\n # Apply center translation: C * RSS^-1 * C^-1 * T^-1\n matrix[2] += cx\n matrix[5] += cy\n\n return matrix\n\n\ndef affine(img,\n angle,\n translate,\n scale,\n shear,\n interpolation=\"nearest\",\n fill=0,\n center=None):\n \"\"\"Apply affine transformation on the image.\n\n Args:\n img (PIL.Image|np.array|paddle.Tensor): Image to be affined.\n angle (int|float): The angle of the random rotation in clockwise order.\n translate (list[float]): Maximum absolute fraction for horizontal and vertical translations.\n scale (float): Scale factor for the image, scale should be positive.\n shear (list[float]): Shear angle values which are parallel to the x-axis and y-axis in clockwise order.\n interpolation (str, optional): Interpolation method. If omitted, or if the \n image has only one channel, it is set to PIL.Image.NEAREST or cv2.INTER_NEAREST \n according the backend. \n When use pil backend, support method are as following: \n - \"nearest\": Image.NEAREST, \n - \"bilinear\": Image.BILINEAR, \n - \"bicubic\": Image.BICUBIC\n When use cv2 backend, support method are as following: \n - \"nearest\": cv2.INTER_NEAREST, \n - \"bilinear\": cv2.INTER_LINEAR, \n - \"bicubic\": cv2.INTER_CUBIC\n fill (int|list|tuple, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n center (2-tuple, optional): Optional center of rotation, (x, y).\n Origin is the upper left corner.\n Default is the center of the image.\n\n Returns:\n PIL.Image|np.array|paddle.Tensor: Affine Transformed image.\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.vision.transforms import functional as F\n\n fake_img = paddle.randn((3, 256, 300)).astype(paddle.float32)\n\n affined_img = F.affine(fake_img, 45, translate=[0.2, 0.2], scale=0.5, shear=[-10, 10])\n print(affined_img.shape)\n \"\"\"\n\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if not isinstance(angle, (int, float)):\n raise TypeError(\"Argument angle should be int or float\")\n\n if not isinstance(translate, (list, tuple)):\n raise TypeError(\"Argument translate should be a sequence\")\n\n if len(translate) != 2:\n raise ValueError(\"Argument translate should be a sequence of length 2\")\n\n if scale <= 0.0:\n raise ValueError(\"Argument scale should be positive\")\n\n if not isinstance(shear, (numbers.Number, (list, tuple))):\n raise TypeError(\n \"Shear should be either a single value or a sequence of two values\")\n\n if not isinstance(interpolation, str):\n raise TypeError(\"Argument interpolation should be a string\")\n\n if isinstance(angle, int):\n angle = float(angle)\n\n if isinstance(translate, tuple):\n translate = list(translate)\n\n if isinstance(shear, numbers.Number):\n shear = [shear, 0.0]\n\n if isinstance(shear, tuple):\n shear = list(shear)\n\n if len(shear) == 1:\n shear = [shear[0], shear[0]]\n\n if len(shear) != 2:\n raise ValueError(\n f\"Shear should be a sequence containing two values. Got {shear}\")\n\n if center is not None and not isinstance(center, (list, tuple)):\n raise TypeError(\"Argument center should be a sequence\")\n\n if _is_pil_image(img):\n width, height = img.size\n # center = (width * 0.5 + 0.5, height * 0.5 + 0.5)\n # it is visually better to estimate the center without 0.5 offset\n # otherwise image rotated by 90 degrees is shifted vs output image of F_t.affine\n if center is None:\n center = [width * 0.5, height * 0.5]\n matrix = _get_affine_matrix(center, angle, translate, scale, shear)\n return F_pil.affine(img, matrix, interpolation, fill)\n\n if _is_numpy_image(img):\n # get affine_matrix in F_cv2.affine() using cv2's functions\n width, height = img.shape[0:2]\n # center = (width * 0.5 + 0.5, height * 0.5 + 0.5)\n # it is visually better to estimate the center without 0.5 offset\n # otherwise image rotated by 90 degrees is shifted vs output image of F_t.affine\n if center is None:\n center = (width * 0.5, height * 0.5)\n return F_cv2.affine(img, angle, translate, scale, shear, interpolation,\n fill, center)\n\n if _is_tensor_image(img):\n center_f = [0.0, 0.0]\n if center is not None:\n height, width = img.shape[-1], img.shape[-2]\n # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.\n center_f = [\n 1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])\n ]\n translate_f = [1.0 * t for t in translate]\n matrix = _get_affine_matrix(center_f, angle, translate_f, scale, shear)\n return F_t.affine(img, matrix, interpolation, fill)\n\n\ndef rotate(img,\n angle,\n interpolation=\"nearest\",\n expand=False,\n center=None,\n fill=0):\n \"\"\"Rotates the image by angle.\n\n\n Args:\n img (PIL.Image|np.array): Image to be rotated.\n angle (float or int): In degrees degrees counter clockwise order.\n interpolation (str, optional): Interpolation method. If omitted, or if the \n image has only one channel, it is set to PIL.Image.NEAREST or cv2.INTER_NEAREST \n according the backend. when use pil backend, support method are as following: \n - \"nearest\": Image.NEAREST, \n - \"bilinear\": Image.BILINEAR, \n - \"bicubic\": Image.BICUBIC\n when use cv2 backend, support method are as following: \n - \"nearest\": cv2.INTER_NEAREST, \n - \"bilinear\": cv2.INTER_LINEAR, \n - \"bicubic\": cv2.INTER_CUBIC\n expand (bool, optional): Optional expansion flag.\n If true, expands the output image to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-list|2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n fill (3-list|3-tuple or int): RGB pixel fill value for area outside the rotated image.\n If int, it is used for all channels respectively.\n\n\n Returns:\n PIL.Image or np.array: Rotated image.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n rotated_img = F.rotate(fake_img, 90)\n print(rotated_img.size)\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if isinstance(center, list):\n center = tuple(center)\n if isinstance(fill, list):\n fill = tuple(fill)\n\n if _is_pil_image(img):\n return F_pil.rotate(img, angle, interpolation, expand, center, fill)\n elif _is_tensor_image(img):\n return F_t.rotate(img, angle, interpolation, expand, center, fill)\n else:\n return F_cv2.rotate(img, angle, interpolation, expand, center, fill)\n\n\ndef _get_perspective_coeffs(startpoints, endpoints):\n \"\"\"\n get coefficients (a, b, c, d, e, f, g, h) of the perspective transforms.\n\n In Perspective Transform each pixel (x, y) in the original image gets transformed as,\n (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )\n\n Args:\n startpoints (list[list[int]]): [top-left, top-right, bottom-right, bottom-left] of the original image,\n endpoints (list[list[int]]): [top-left, top-right, bottom-right, bottom-left] of the transformed image.\n\n Returns:\n output (list): octuple (a, b, c, d, e, f, g, h) for transforming each pixel.\n \"\"\"\n a_matrix = np.zeros((2 * len(startpoints), 8))\n\n for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):\n a_matrix[2 * i, :] = [\n p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]\n ]\n a_matrix[2 * i + 1, :] = [\n 0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]\n ]\n\n b_matrix = np.array(startpoints).reshape([8])\n res = np.linalg.lstsq(a_matrix, b_matrix)[0]\n\n output = list(res)\n return output\n\n\ndef perspective(img, startpoints, endpoints, interpolation='nearest', fill=0):\n \"\"\"Perform perspective transform of the given image.\n\n Args:\n img (PIL.Image|np.array|paddle.Tensor): Image to be transformed.\n startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners\n ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.\n endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners\n ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.\n interpolation (str, optional): Interpolation method. If omitted, or if the \n image has only one channel, it is set to PIL.Image.NEAREST or cv2.INTER_NEAREST \n according the backend. \n When use pil backend, support method are as following: \n - \"nearest\": Image.NEAREST, \n - \"bilinear\": Image.BILINEAR, \n - \"bicubic\": Image.BICUBIC\n When use cv2 backend, support method are as following: \n - \"nearest\": cv2.INTER_NEAREST, \n - \"bilinear\": cv2.INTER_LINEAR, \n - \"bicubic\": cv2.INTER_CUBIC\n fill (int|list|tuple, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n\n Returns:\n PIL.Image|np.array|paddle.Tensor: transformed Image.\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.vision.transforms import functional as F\n\n fake_img = paddle.randn((3, 256, 300)).astype(paddle.float32)\n\n startpoints = [[0, 0], [33, 0], [33, 25], [0, 25]]\n endpoints = [[3, 2], [32, 3], [30, 24], [2, 25]]\n\n perspectived_img = F.perspective(fake_img, startpoints, endpoints)\n print(perspectived_img.shape)\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n coeffs = _get_perspective_coeffs(startpoints, endpoints)\n return F_pil.perspective(img, coeffs, interpolation, fill)\n elif _is_tensor_image(img):\n coeffs = _get_perspective_coeffs(startpoints, endpoints)\n return F_t.perspective(img, coeffs, interpolation, fill)\n else:\n return F_cv2.perspective(img, startpoints, endpoints, interpolation,\n fill)\n\n\ndef to_grayscale(img, num_output_channels=1):\n \"\"\"Converts image to grayscale version of image.\n\n Args:\n img (PIL.Image|np.array): Image to be converted to grayscale.\n\n Returns:\n PIL.Image or np.array: Grayscale version of the image.\n if num_output_channels = 1 : returned image is single channel\n\n if num_output_channels = 3 : returned image is 3 channel with r = g = b\n \n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n gray_img = F.to_grayscale(fake_img)\n print(gray_img.size)\n\n \"\"\"\n if not (_is_pil_image(img) or _is_numpy_image(img) or\n _is_tensor_image(img)):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.\n format(type(img)))\n\n if _is_pil_image(img):\n return F_pil.to_grayscale(img, num_output_channels)\n elif _is_tensor_image(img):\n return F_t.to_grayscale(img, num_output_channels)\n else:\n return F_cv2.to_grayscale(img, num_output_channels)\n\n\ndef normalize(img, mean, std, data_format='CHW', to_rgb=False):\n \"\"\"Normalizes a tensor or image with mean and standard deviation.\n\n Args:\n img (PIL.Image|np.array|paddle.Tensor): input data to be normalized.\n mean (list|tuple): Sequence of means for each channel.\n std (list|tuple): Sequence of standard deviations for each channel.\n data_format (str, optional): Data format of input img, should be 'HWC' or \n 'CHW'. Default: 'CHW'.\n to_rgb (bool, optional): Whether to convert to rgb. If input is tensor, \n this option will be igored. Default: False.\n\n Returns:\n np.ndarray or Tensor: Normalized mage. Data format is same as input img.\n \n Examples:\n .. code-block:: python\n\n import numpy as np\n from PIL import Image\n from paddle.vision.transforms import functional as F\n\n fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')\n\n fake_img = Image.fromarray(fake_img)\n\n mean = [127.5, 127.5, 127.5]\n std = [127.5, 127.5, 127.5]\n\n normalized_img = F.normalize(fake_img, mean, std, data_format='HWC')\n print(normalized_img.max(), normalized_img.min())\n\n \"\"\"\n\n if _is_tensor_image(img):\n return F_t.normalize(img, mean, std, data_format)\n else:\n if _is_pil_image(img):\n img = np.array(img).astype(np.float32)\n\n return F_cv2.normalize(img, mean, std, data_format, to_rgb)\n\n\ndef erase(img, i, j, h, w, v, inplace=False):\n \"\"\"Erase the pixels of selected area in input image with given value.\n \n Args:\n img (paddle.Tensor | np.array | PIL.Image): input Tensor image. \n For Tensor input, the shape should be (C, H, W). For np.array input, \n the shape should be (H, W, C).\n i (int): y coordinate of the top-left point of erased region.\n j (int): x coordinate of the top-left point of erased region.\n h (int): Height of the erased region.\n w (int): Width of the erased region.\n v (paddle.Tensor | np.array): value used to replace the pixels in erased region. It \n should be np.array when img is np.array or PIL.Image.\n inplace (bool, optional): Whether this transform is inplace. Default: False.\n\n Returns:\n paddle.Tensor | np.array | PIL.Image: Erased image. The type is same with input image.\n\n Examples:\n .. code-block:: python\n\n import paddle\n \n fake_img = paddle.randn((3, 2, 4)).astype(paddle.float32)\n print(fake_img)\n\n #Tensor(shape=[3, 2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,\n # [[[ 0.02169025, -0.97859967, -1.39175487, -1.07478464],\n # [ 0.20654772, 1.74624777, 0.32268861, -0.13857445]],\n #\n # [[-0.14993843, 1.10793507, -0.40056887, -1.94395220],\n # [ 0.41686651, 0.44551995, -0.09356714, -0.60898107]],\n #\n # [[-0.24998808, -1.47699273, -0.88838995, 0.42629015],\n # [ 0.56948012, -0.96200180, 0.53355658, 3.20450878]]])\n\n values = paddle.zeros((1,1,1), dtype=paddle.float32)\n result = paddle.vision.transforms.erase(fake_img, 0, 1, 1, 2, values)\n \n print(result)\n\n #Tensor(shape=[3, 2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,\n # [[[ 0.02169025, 0. , 0. , -1.07478464],\n # [ 0.20654772, 1.74624777, 0.32268861, -0.13857445]],\n #\n # [[-0.14993843, 0. , 0. , -1.94395220],\n # [ 0.41686651, 0.44551995, -0.09356714, -0.60898107]],\n #\n # [[-0.24998808, 0. , 0. , 0.42629015],\n # [ 0.56948012, -0.96200180, 0.53355658, 3.20450878]]])\n\n \"\"\"\n if _is_tensor_image(img):\n return F_t.erase(img, i, j, h, w, v, inplace=inplace)\n elif _is_pil_image(img):\n return F_pil.erase(img, i, j, h, w, v, inplace=inplace)\n else:\n return F_cv2.erase(img, i, j, h, w, v, inplace=inplace)\n"
] | [
[
"numpy.random.random"
],
[
"numpy.linalg.lstsq",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siddharthteotia/arrow | [
"b33dfd9c6bd800308bb1619b237dbf24dea159be"
] | [
"python/pyarrow/tests/test_convert_pandas.py"
] | [
"# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom collections import OrderedDict\n\nfrom datetime import date, datetime, time, timedelta\nimport decimal\nimport json\n\nimport pytest\n\nimport numpy as np\nimport numpy.testing as npt\n\nimport pandas as pd\nimport pandas.util.testing as tm\n\nfrom pyarrow.compat import u, PY2\nimport pyarrow as pa\nimport pyarrow.types as patypes\n\nfrom .pandas_examples import dataframe_with_arrays, dataframe_with_lists\n\n\ndef _alltypes_example(size=100):\n return pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n # TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,\n # us, ns\n 'datetime': np.arange(\"2016-01-01T00:00:00.001\", size,\n dtype='datetime64[ms]'),\n 'str': [str(x) for x in range(size)],\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'empty_str': [''] * size\n })\n\n\ndef _check_pandas_roundtrip(df, expected=None, nthreads=1,\n expected_schema=None,\n check_dtype=True, schema=None,\n preserve_index=False,\n as_batch=False):\n klass = pa.RecordBatch if as_batch else pa.Table\n table = klass.from_pandas(df, schema=schema,\n preserve_index=preserve_index,\n nthreads=nthreads)\n\n result = table.to_pandas(nthreads=nthreads)\n if expected_schema:\n assert table.schema.equals(expected_schema)\n if expected is None:\n expected = df\n tm.assert_frame_equal(result, expected, check_dtype=check_dtype,\n check_index_type=('equiv' if preserve_index\n else False))\n\n\ndef _check_series_roundtrip(s, type_=None):\n arr = pa.array(s, from_pandas=True, type=type_)\n\n result = pd.Series(arr.to_pandas(), name=s.name)\n if patypes.is_timestamp(arr.type) and arr.type.tz is not None:\n result = (result.dt.tz_localize('utc')\n .dt.tz_convert(arr.type.tz))\n\n tm.assert_series_equal(s, result)\n\n\ndef _check_array_roundtrip(values, expected=None, mask=None,\n type=None):\n arr = pa.array(values, from_pandas=True, mask=mask, type=type)\n result = arr.to_pandas()\n\n values_nulls = pd.isnull(values)\n if mask is None:\n assert arr.null_count == values_nulls.sum()\n else:\n assert arr.null_count == (mask | values_nulls).sum()\n\n if mask is None:\n tm.assert_series_equal(pd.Series(result), pd.Series(values),\n check_names=False)\n else:\n expected = pd.Series(np.ma.masked_array(values, mask=mask))\n tm.assert_series_equal(pd.Series(result), expected,\n check_names=False)\n\n\ndef _check_array_from_pandas_roundtrip(np_array):\n arr = pa.array(np_array, from_pandas=True)\n result = arr.to_pandas()\n npt.assert_array_equal(result, np_array)\n\n\nclass TestConvertMetadata(object):\n \"\"\"\n Conversion tests for Pandas metadata & indices.\n \"\"\"\n\n def test_non_string_columns(self):\n df = pd.DataFrame({0: [1, 2, 3]})\n table = pa.Table.from_pandas(df)\n assert table.column(0).name == '0'\n\n def test_column_index_names_are_preserved(self):\n df = pd.DataFrame({'data': [1, 2, 3]})\n df.columns.names = ['a']\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns(self):\n columns = pd.MultiIndex.from_arrays([\n ['one', 'two'], ['X', 'Y']\n ])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns_with_dtypes(self):\n columns = pd.MultiIndex.from_arrays(\n [\n ['one', 'two'],\n pd.DatetimeIndex(['2017-08-01', '2017-08-02']),\n ],\n names=['level_1', 'level_2'],\n )\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns_unicode(self):\n columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_integer_index_column(self):\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_index_metadata_field_name(self):\n # test None case, and strangely named non-index columns\n df = pd.DataFrame(\n [(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],\n index=pd.MultiIndex.from_arrays(\n [['c', 'b', 'a'], [3, 2, 1]],\n names=[None, 'foo']\n ),\n columns=['a', None, '__index_level_0__'],\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n raw_metadata = t.schema.metadata\n\n js = json.loads(raw_metadata[b'pandas'].decode('utf8'))\n\n col1, col2, col3, idx0, foo = js['columns']\n\n assert col1['name'] == 'a'\n assert col1['name'] == col1['field_name']\n\n assert col2['name'] is None\n assert col2['field_name'] == 'None'\n\n assert col3['name'] == '__index_level_0__'\n assert col3['name'] == col3['field_name']\n\n idx0_name, foo_name = js['index_columns']\n assert idx0_name == '__index_level_0__'\n assert idx0['field_name'] == idx0_name\n assert idx0['name'] is None\n\n assert foo_name == 'foo'\n assert foo['field_name'] == foo_name\n assert foo['name'] == foo_name\n\n def test_categorical_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.Index(list('def'), dtype='category')\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n raw_metadata = t.schema.metadata\n js = json.loads(raw_metadata[b'pandas'].decode('utf8'))\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] is None\n assert column_indexes['pandas_type'] == 'categorical'\n assert column_indexes['numpy_type'] == 'int8'\n\n md = column_indexes['metadata']\n assert md['num_categories'] == 3\n assert md['ordered'] is False\n\n def test_string_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.Index(list('def'), name='stringz')\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n raw_metadata = t.schema.metadata\n js = json.loads(raw_metadata[b'pandas'].decode('utf8'))\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] == 'stringz'\n assert column_indexes['name'] == column_indexes['field_name']\n assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')\n assert column_indexes['numpy_type'] == 'object'\n\n md = column_indexes['metadata']\n\n if not PY2:\n assert len(md) == 1\n assert md['encoding'] == 'UTF-8'\n else:\n assert md is None or 'encoding' not in md\n\n def test_datetimetz_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.date_range(\n start='2017-01-01', periods=3, tz='America/New_York'\n )\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n raw_metadata = t.schema.metadata\n js = json.loads(raw_metadata[b'pandas'].decode('utf8'))\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] is None\n assert column_indexes['pandas_type'] == 'datetimetz'\n assert column_indexes['numpy_type'] == 'datetime64[ns]'\n\n md = column_indexes['metadata']\n assert md['timezone'] == 'America/New_York'\n\n def test_datetimetz_row_index(self):\n df = pd.DataFrame({\n 'a': pd.date_range(\n start='2017-01-01', periods=3, tz='America/New_York'\n )\n })\n df = df.set_index('a')\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_categorical_row_index(self):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})\n df['a'] = df.a.astype('category')\n df = df.set_index('a')\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_duplicate_column_names_does_not_crash(self):\n df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df)\n\n def test_dictionary_indices_boundscheck(self):\n # ARROW-1658. No validation of indices leads to segfaults in pandas\n indices = [[0, 1], [0, -1]]\n\n for inds in indices:\n arr = pa.DictionaryArray.from_arrays(inds, ['a'])\n batch = pa.RecordBatch.from_arrays([arr], ['foo'])\n table = pa.Table.from_batches([batch, batch, batch])\n\n with pytest.raises(pa.ArrowException):\n arr.to_pandas()\n\n with pytest.raises(pa.ArrowException):\n table.to_pandas()\n\n def test_unicode_with_unicode_column_and_index(self):\n df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_mixed_unicode_column_names(self):\n df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])\n\n # TODO(phillipc): Should this raise?\n with pytest.raises(AssertionError):\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_binary_column_name(self):\n column_data = [u'い']\n data = {u'あ'.encode('utf8'): column_data}\n df = pd.DataFrame(data)\n\n # we can't use _check_pandas_roundtrip here because our metdata\n # is always decoded as utf8: even if binary goes in, utf8 comes out\n t = pa.Table.from_pandas(df, preserve_index=True)\n df2 = t.to_pandas()\n assert df.values[0] == df2.values[0]\n assert df.index.values[0] == df2.index.values[0]\n assert df.columns[0] == df2.columns[0].encode('utf8')\n\n def test_multiindex_duplicate_values(self):\n num_rows = 3\n numbers = list(range(num_rows))\n index = pd.MultiIndex.from_arrays(\n [['foo', 'foo', 'bar'], numbers],\n names=['foobar', 'some_numbers'],\n )\n\n df = pd.DataFrame({'numbers': numbers}, index=index)\n\n table = pa.Table.from_pandas(df)\n result_df = table.to_pandas()\n tm.assert_frame_equal(result_df, df)\n\n def test_metadata_with_mixed_types(self):\n df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})\n table = pa.Table.from_pandas(df)\n metadata = table.schema.metadata\n assert b'mixed' not in metadata[b'pandas']\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'bytes'\n assert data_column['numpy_type'] == 'object'\n\n def test_list_metadata(self):\n df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})\n schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])\n table = pa.Table.from_pandas(df, schema=schema)\n metadata = table.schema.metadata\n assert b'mixed' not in metadata[b'pandas']\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'list[int64]'\n assert data_column['numpy_type'] == 'object'\n\n def test_decimal_metadata(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('394092382910493.12341234678'),\n -decimal.Decimal('314292388910493.12343437128'),\n ]\n })\n table = pa.Table.from_pandas(expected)\n metadata = table.schema.metadata\n assert b'mixed' not in metadata[b'pandas']\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'decimal'\n assert data_column['numpy_type'] == 'object'\n assert data_column['metadata'] == {'precision': 26, 'scale': 11}\n\n def test_table_column_subset_metadata(self):\n # ARROW-1883\n df = pd.DataFrame({\n 'a': [1, 2, 3],\n 'b': pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')})\n table = pa.Table.from_pandas(df)\n\n table_subset = table.remove_column(1)\n result = table_subset.to_pandas()\n tm.assert_frame_equal(result, df[['a']])\n\n table_subset2 = table_subset.remove_column(1)\n result = table_subset2.to_pandas()\n tm.assert_frame_equal(result, df[['a']])\n\n # non-default index\n for index in [\n pd.Index(['a', 'b', 'c'], name='index'),\n pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')]:\n df = pd.DataFrame({'a': [1, 2, 3],\n 'b': [.1, .2, .3]}, index=index)\n table = pa.Table.from_pandas(df)\n\n table_subset = table.remove_column(1)\n result = table_subset.to_pandas()\n tm.assert_frame_equal(result, df[['a']])\n\n table_subset2 = table_subset.remove_column(1)\n result = table_subset2.to_pandas()\n tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))\n\n def test_empty_list_metadata(self):\n # Create table with array of empty lists, forced to have type\n # list(string) in pyarrow\n c1 = [[\"test\"], [\"a\", \"b\"], None]\n c2 = [[], [], []]\n arrays = OrderedDict([\n ('c1', pa.array(c1, type=pa.list_(pa.string()))),\n ('c2', pa.array(c2, type=pa.list_(pa.string()))),\n ])\n rb = pa.RecordBatch.from_arrays(\n list(arrays.values()),\n list(arrays.keys())\n )\n tbl = pa.Table.from_batches([rb])\n\n # First roundtrip changes schema, because pandas cannot preserve the\n # type of empty lists\n df = tbl.to_pandas()\n tbl2 = pa.Table.from_pandas(df, preserve_index=True)\n md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))\n\n # Second roundtrip\n df2 = tbl2.to_pandas()\n expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))\n\n tm.assert_frame_equal(df2, expected)\n\n assert md2['columns'] == [\n {\n 'name': 'c1',\n 'field_name': 'c1',\n 'metadata': None,\n 'numpy_type': 'object',\n 'pandas_type': 'list[unicode]',\n },\n {\n 'name': 'c2',\n 'field_name': 'c2',\n 'metadata': None,\n 'numpy_type': 'object',\n 'pandas_type': 'list[empty]',\n },\n {\n 'name': None,\n 'field_name': '__index_level_0__',\n 'metadata': None,\n 'numpy_type': 'int64',\n 'pandas_type': 'int64',\n }\n ]\n\n\nclass TestConvertPrimitiveTypes(object):\n \"\"\"\n Conversion tests for primitive (e.g. numeric) types.\n \"\"\"\n\n def test_float_no_nulls(self):\n data = {}\n fields = []\n dtypes = [('f4', pa.float32()), ('f8', pa.float64())]\n num_values = 100\n\n for numpy_dtype, arrow_dtype in dtypes:\n values = np.random.randn(num_values)\n data[numpy_dtype] = values.astype(numpy_dtype)\n fields.append(pa.field(numpy_dtype, arrow_dtype))\n\n df = pd.DataFrame(data)\n schema = pa.schema(fields)\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_float_nulls(self):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n dtypes = [('f4', pa.float32()), ('f8', pa.float64())]\n names = ['f4', 'f8']\n expected_cols = []\n\n arrays = []\n fields = []\n for name, arrow_dtype in dtypes:\n values = np.random.randn(num_values).astype(name)\n\n arr = pa.array(values, from_pandas=True, mask=null_mask)\n arrays.append(arr)\n fields.append(pa.field(name, arrow_dtype))\n values[null_mask] = np.nan\n\n expected_cols.append(values)\n\n ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),\n columns=names)\n\n table = pa.Table.from_arrays(arrays, names)\n assert table.schema.equals(pa.schema(fields))\n result = table.to_pandas()\n tm.assert_frame_equal(result, ex_frame)\n\n def test_integer_no_nulls(self):\n data = OrderedDict()\n fields = []\n\n numpy_dtypes = [\n ('i1', pa.int8()), ('i2', pa.int16()),\n ('i4', pa.int32()), ('i8', pa.int64()),\n ('u1', pa.uint8()), ('u2', pa.uint16()),\n ('u4', pa.uint32()), ('u8', pa.uint64()),\n ('longlong', pa.int64()), ('ulonglong', pa.uint64())\n ]\n num_values = 100\n\n for dtype, arrow_dtype in numpy_dtypes:\n info = np.iinfo(dtype)\n values = np.random.randint(max(info.min, np.iinfo(np.int_).min),\n min(info.max, np.iinfo(np.int_).max),\n size=num_values)\n data[dtype] = values.astype(dtype)\n fields.append(pa.field(dtype, arrow_dtype))\n\n df = pd.DataFrame(data)\n schema = pa.schema(fields)\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_integer_with_nulls(self):\n # pandas requires upcast to float dtype\n\n int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n\n expected_cols = []\n arrays = []\n for name in int_dtypes:\n values = np.random.randint(0, 100, size=num_values)\n\n arr = pa.array(values, mask=null_mask)\n arrays.append(arr)\n\n expected = values.astype('f8')\n expected[null_mask] = np.nan\n\n expected_cols.append(expected)\n\n ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),\n columns=int_dtypes)\n\n table = pa.Table.from_arrays(arrays, int_dtypes)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, ex_frame)\n\n def test_array_from_pandas_type_cast(self):\n arr = np.arange(10, dtype='int64')\n\n target_type = pa.int8()\n\n result = pa.array(arr, type=target_type)\n expected = pa.array(arr.astype('int8'))\n assert result.equals(expected)\n\n def test_boolean_no_nulls(self):\n num_values = 100\n\n np.random.seed(0)\n\n df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_boolean_nulls(self):\n # pandas requires upcast to object dtype\n num_values = 100\n np.random.seed(0)\n\n mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 10, size=num_values) < 5\n\n arr = pa.array(values, mask=mask)\n\n expected = values.astype(object)\n expected[mask] = None\n\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n ex_frame = pd.DataFrame({'bools': expected})\n\n table = pa.Table.from_arrays([arr], ['bools'])\n assert table.schema.equals(schema)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, ex_frame)\n\n def test_float_object_nulls(self):\n arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)\n df = pd.DataFrame({'floats': arr})\n expected = pd.DataFrame({'floats': pd.to_numeric(arr)})\n field = pa.field('floats', pa.float64())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected=expected,\n expected_schema=schema)\n\n def test_int_object_nulls(self):\n arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)\n df = pd.DataFrame({'ints': arr})\n expected = pd.DataFrame({'ints': pd.to_numeric(arr)})\n field = pa.field('ints', pa.int64())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected=expected,\n expected_schema=schema)\n\n def test_boolean_object_nulls(self):\n arr = np.array([False, None, True] * 100, dtype=object)\n df = pd.DataFrame({'bools': arr})\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_all_nulls_cast_numeric(self):\n arr = np.array([None], dtype=object)\n\n def _check_type(t):\n a2 = pa.array(arr, type=t)\n assert a2.type == t\n assert a2[0].as_py() is None\n\n _check_type(pa.int32())\n _check_type(pa.float64())\n\n\nclass TestConvertDateTimeLikeTypes(object):\n \"\"\"\n Conversion tests for datetime- and timestamp-like types (date64, etc.).\n \"\"\"\n\n def test_timestamps_notimezone_no_nulls(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n field = pa.field('datetime64', pa.timestamp('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timestamps_notimezone_nulls(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n field = pa.field('datetime64', pa.timestamp('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timestamps_with_timezone(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123',\n '2006-01-13T12:34:56.432',\n '2010-08-13T05:46:57.437'],\n dtype='datetime64[ms]')\n })\n df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')\n .to_frame())\n _check_pandas_roundtrip(df)\n\n _check_series_roundtrip(df['datetime64'])\n\n # drop-in a null and ns instead of ms\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')\n .to_frame())\n\n _check_pandas_roundtrip(df)\n\n def test_python_datetime(self):\n # ARROW-2106\n date_array = [datetime.today() + timedelta(days=x) for x in range(10)]\n df = pd.DataFrame({\n 'datetime': pd.Series(date_array, dtype=object)\n })\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].data.chunk(0), pa.TimestampArray)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame({\n 'datetime': date_array\n })\n tm.assert_frame_equal(expected_df, result)\n\n def test_datetime64_to_date32(self):\n # ARROW-1718\n arr = pa.array([date(2017, 10, 23), None])\n c = pa.Column.from_array(\"d\", arr)\n s = c.to_pandas()\n\n arr2 = pa.Array.from_pandas(s, type=pa.date32())\n\n assert arr2.equals(arr.cast('date32'))\n\n def test_date_infer(self):\n df = pd.DataFrame({\n 'date': [date(2000, 1, 1),\n None,\n date(1970, 1, 1),\n date(2040, 2, 26)]})\n table = pa.Table.from_pandas(df, preserve_index=False)\n field = pa.field('date', pa.date32())\n schema = pa.schema([field])\n assert table.schema.equals(schema)\n result = table.to_pandas()\n expected = df.copy()\n expected['date'] = pd.to_datetime(df['date'])\n tm.assert_frame_equal(result, expected)\n\n def test_date_mask(self):\n arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],\n dtype='datetime64[D]')\n mask = [True, False]\n result = pa.array(arr, mask=np.array(mask))\n expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')\n expected = pa.array(expected, from_pandas=True)\n assert expected.equals(result)\n\n def test_date_objects_typed(self):\n arr = np.array([\n date(2017, 4, 3),\n None,\n date(2017, 4, 4),\n date(2017, 4, 5)], dtype=object)\n\n arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')\n arr_i8 = arr_i4.astype('int64') * 86400000\n mask = np.array([False, True, False, False])\n\n t32 = pa.date32()\n t64 = pa.date64()\n\n a32 = pa.array(arr, type=t32)\n a64 = pa.array(arr, type=t64)\n\n a32_expected = pa.array(arr_i4, mask=mask, type=t32)\n a64_expected = pa.array(arr_i8, mask=mask, type=t64)\n\n assert a32.equals(a32_expected)\n assert a64.equals(a64_expected)\n\n # Test converting back to pandas\n colnames = ['date32', 'date64']\n table = pa.Table.from_arrays([a32, a64], colnames)\n table_pandas = table.to_pandas()\n\n ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',\n '2017-04-05'],\n dtype='datetime64[D]')\n .astype('datetime64[ns]'))\n ex_values[1] = pd.NaT.value\n expected_pandas = pd.DataFrame({'date32': ex_values,\n 'date64': ex_values},\n columns=colnames)\n tm.assert_frame_equal(table_pandas, expected_pandas)\n\n def test_dates_from_integers(self):\n t1 = pa.date32()\n t2 = pa.date64()\n\n arr = np.array([17259, 17260, 17261], dtype='int32')\n arr2 = arr.astype('int64') * 86400000\n\n a1 = pa.array(arr, type=t1)\n a2 = pa.array(arr2, type=t2)\n\n expected = date(2017, 4, 3)\n assert a1[0].as_py() == expected\n assert a2[0].as_py() == expected\n\n @pytest.mark.xfail(reason=\"not supported ATM\",\n raises=NotImplementedError)\n def test_timedelta(self):\n # TODO(jreback): Pandas only support ns resolution\n # Arrow supports ??? for resolution\n df = pd.DataFrame({\n 'timedelta': np.arange(start=0, stop=3 * 86400000,\n step=86400000,\n dtype='timedelta64[ms]')\n })\n pa.Table.from_pandas(df)\n\n def test_pytime_from_pandas(self):\n pytimes = [time(1, 2, 3, 1356),\n time(4, 5, 6, 1356)]\n\n # microseconds\n t1 = pa.time64('us')\n\n aobjs = np.array(pytimes + [None], dtype=object)\n parr = pa.array(aobjs)\n assert parr.type == t1\n assert parr[0].as_py() == pytimes[0]\n assert parr[1].as_py() == pytimes[1]\n assert parr[2] is pa.NA\n\n # DataFrame\n df = pd.DataFrame({'times': aobjs})\n batch = pa.RecordBatch.from_pandas(df)\n assert batch[0].equals(parr)\n\n # Test ndarray of int64 values\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n\n a1 = pa.array(arr, type=pa.time64('us'))\n assert a1[0].as_py() == pytimes[0]\n\n a2 = pa.array(arr * 1000, type=pa.time64('ns'))\n assert a2[0].as_py() == pytimes[0]\n\n a3 = pa.array((arr / 1000).astype('i4'),\n type=pa.time32('ms'))\n assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)\n\n a4 = pa.array((arr / 1000000).astype('i4'),\n type=pa.time32('s'))\n assert a4[0].as_py() == pytimes[0].replace(microsecond=0)\n\n def test_arrow_time_to_pandas(self):\n pytimes = [time(1, 2, 3, 1356),\n time(4, 5, 6, 1356),\n time(0, 0, 0)]\n\n expected = np.array(pytimes[:2] + [None])\n expected_ms = np.array([x.replace(microsecond=1000)\n for x in pytimes[:2]] +\n [None])\n expected_s = np.array([x.replace(microsecond=0)\n for x in pytimes[:2]] +\n [None])\n\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n\n null_mask = np.array([False, False, True], dtype=bool)\n\n a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))\n a2 = pa.array(arr * 1000, mask=null_mask,\n type=pa.time64('ns'))\n\n a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,\n type=pa.time32('ms'))\n a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,\n type=pa.time32('s'))\n\n names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']\n batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)\n arr = a1.to_pandas()\n assert (arr == expected).all()\n\n arr = a2.to_pandas()\n assert (arr == expected).all()\n\n arr = a3.to_pandas()\n assert (arr == expected_ms).all()\n\n arr = a4.to_pandas()\n assert (arr == expected_s).all()\n\n df = batch.to_pandas()\n expected_df = pd.DataFrame({'time64[us]': expected,\n 'time64[ns]': expected,\n 'time32[ms]': expected_ms,\n 'time32[s]': expected_s},\n columns=names)\n\n tm.assert_frame_equal(df, expected_df)\n\n def test_numpy_datetime64_columns(self):\n datetime64_ns = np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n _check_array_from_pandas_roundtrip(datetime64_ns)\n\n datetime64_us = np.array([\n '2007-07-13T01:23:34.123456',\n None,\n '2006-01-13T12:34:56.432539',\n '2010-08-13T05:46:57.437699'],\n dtype='datetime64[us]')\n _check_array_from_pandas_roundtrip(datetime64_us)\n\n datetime64_ms = np.array([\n '2007-07-13T01:23:34.123',\n None,\n '2006-01-13T12:34:56.432',\n '2010-08-13T05:46:57.437'],\n dtype='datetime64[ms]')\n _check_array_from_pandas_roundtrip(datetime64_ms)\n\n datetime64_s = np.array([\n '2007-07-13T01:23:34',\n None,\n '2006-01-13T12:34:56',\n '2010-08-13T05:46:57'],\n dtype='datetime64[s]')\n _check_array_from_pandas_roundtrip(datetime64_s)\n\n def test_numpy_datetime64_day_unit(self):\n datetime64_d = np.array([\n '2007-07-13',\n None,\n '2006-01-15',\n '2010-08-19'],\n dtype='datetime64[D]')\n _check_array_from_pandas_roundtrip(datetime64_d)\n\n def test_array_from_pandas_date_with_mask(self):\n m = np.array([True, False, True])\n data = pd.Series([\n date(1990, 1, 1),\n date(1991, 1, 1),\n date(1992, 1, 1)\n ])\n\n result = pa.Array.from_pandas(data, mask=m)\n\n expected = pd.Series([None, date(1991, 1, 1), None])\n assert pa.Array.from_pandas(expected).equals(result)\n\n\nclass TestConvertStringLikeTypes(object):\n \"\"\"\n Conversion tests for string and binary types.\n \"\"\"\n\n def test_unicode(self):\n repeats = 1000\n values = [u'foo', None, u'bar', u'mañana', np.nan]\n df = pd.DataFrame({'strings': values * repeats})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_bytes_to_binary(self):\n values = [u('qux'), b'foo', None, 'bar', 'qux', np.nan]\n df = pd.DataFrame({'strings': values})\n\n table = pa.Table.from_pandas(df)\n assert table[0].type == pa.binary()\n\n values2 = [b'qux', b'foo', None, b'bar', b'qux', np.nan]\n expected = pd.DataFrame({'strings': values2})\n _check_pandas_roundtrip(df, expected)\n\n @pytest.mark.large_memory\n def test_bytes_exceed_2gb(self):\n val = 'x' * (1 << 20)\n df = pd.DataFrame({\n 'strings': np.array([val] * 4000, dtype=object)\n })\n arr = pa.array(df['strings'])\n assert isinstance(arr, pa.ChunkedArray)\n assert arr.num_chunks == 2\n arr = None\n\n table = pa.Table.from_pandas(df)\n assert table[0].data.num_chunks == 2\n\n def test_fixed_size_bytes(self):\n values = [b'foo', None, b'bar', None, None, b'hey']\n df = pd.DataFrame({'strings': values})\n schema = pa.schema([pa.field('strings', pa.binary(3))])\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema[0].type == schema[0].type\n assert table.schema[0].name == schema[0].name\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n\n def test_fixed_size_bytes_does_not_accept_varying_lengths(self):\n values = [b'foo', None, b'ba', None, None, b'hey']\n df = pd.DataFrame({'strings': values})\n schema = pa.schema([pa.field('strings', pa.binary(3))])\n with pytest.raises(pa.ArrowInvalid):\n pa.Table.from_pandas(df, schema=schema)\n\n def test_table_empty_str(self):\n values = ['', '', '', '', '']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result1 = table.to_pandas(strings_to_categorical=False)\n expected1 = pd.DataFrame({'strings': values})\n tm.assert_frame_equal(result1, expected1, check_dtype=True)\n\n result2 = table.to_pandas(strings_to_categorical=True)\n expected2 = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result2, expected2, check_dtype=True)\n\n def test_table_str_to_categorical_without_na(self):\n values = ['a', 'a', 'b', 'b', 'c']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result = table.to_pandas(strings_to_categorical=True)\n expected = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result, expected, check_dtype=True)\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas(strings_to_categorical=True,\n zero_copy_only=True)\n\n def test_table_str_to_categorical_with_na(self):\n values = [None, 'a', 'b', np.nan]\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result = table.to_pandas(strings_to_categorical=True)\n expected = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result, expected, check_dtype=True)\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas(strings_to_categorical=True,\n zero_copy_only=True)\n\n\nclass TestConvertDecimalTypes(object):\n \"\"\"\n Conversion test for decimal types.\n \"\"\"\n\n def test_decimal_32_from_pandas(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('-1234.123'),\n decimal.Decimal('1234.439'),\n ]\n })\n converted = pa.Table.from_pandas(expected, preserve_index=False)\n field = pa.field('decimals', pa.decimal128(7, 3))\n schema = pa.schema([field])\n assert converted.schema.equals(schema)\n\n def test_decimal_32_to_pandas(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('-1234.123'),\n decimal.Decimal('1234.439'),\n ]\n })\n converted = pa.Table.from_pandas(expected)\n df = converted.to_pandas()\n tm.assert_frame_equal(df, expected)\n\n def test_decimal_64_from_pandas(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('-129934.123331'),\n decimal.Decimal('129534.123731'),\n ]\n })\n converted = pa.Table.from_pandas(expected, preserve_index=False)\n field = pa.field('decimals', pa.decimal128(12, 6))\n schema = pa.schema([field])\n assert converted.schema.equals(schema)\n\n def test_decimal_64_to_pandas(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('-129934.123331'),\n decimal.Decimal('129534.123731'),\n ]\n })\n converted = pa.Table.from_pandas(expected)\n df = converted.to_pandas()\n tm.assert_frame_equal(df, expected)\n\n def test_decimal_128_from_pandas(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('394092382910493.12341234678'),\n -decimal.Decimal('314292388910493.12343437128'),\n ]\n })\n converted = pa.Table.from_pandas(expected, preserve_index=False)\n field = pa.field('decimals', pa.decimal128(26, 11))\n schema = pa.schema([field])\n assert converted.schema.equals(schema)\n\n def test_decimal_128_to_pandas(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('394092382910493.12341234678'),\n -decimal.Decimal('314292388910493.12343437128'),\n ]\n })\n converted = pa.Table.from_pandas(expected)\n df = converted.to_pandas()\n tm.assert_frame_equal(df, expected)\n\n\nclass TestListTypes(object):\n \"\"\"\n Conversion tests for list<> types.\n \"\"\"\n\n def test_column_of_arrays(self):\n df, schema = dataframe_with_arrays()\n _check_pandas_roundtrip(df, schema=schema, expected_schema=schema)\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n assert table.schema.equals(schema)\n\n for column in df.columns:\n field = schema.field_by_name(column)\n _check_array_roundtrip(df[column], type=field.type)\n\n def test_column_of_arrays_to_py(self):\n # Test regression in ARROW-1199 not caught in above test\n dtype = 'i1'\n arr = np.array([\n np.arange(10, dtype=dtype),\n np.arange(5, dtype=dtype),\n None,\n np.arange(1, dtype=dtype)\n ])\n type_ = pa.list_(pa.int8())\n parr = pa.array(arr, type=type_)\n\n assert parr[0].as_py() == list(range(10))\n assert parr[1].as_py() == list(range(5))\n assert parr[2].as_py() is None\n assert parr[3].as_py() == [0]\n\n def test_column_of_lists(self):\n df, schema = dataframe_with_lists()\n _check_pandas_roundtrip(df, schema=schema, expected_schema=schema)\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n assert table.schema.equals(schema)\n\n for column in df.columns:\n field = schema.field_by_name(column)\n _check_array_roundtrip(df[column], type=field.type)\n\n def test_column_of_lists_first_empty(self):\n # ARROW-2124\n num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]\n series = pd.Series([np.array(s, dtype=float) for s in num_lists])\n arr = pa.array(series)\n result = pd.Series(arr.to_pandas())\n tm.assert_series_equal(result, series)\n\n def test_column_of_lists_chunked(self):\n # ARROW-1357\n df = pd.DataFrame({\n 'lists': np.array([\n [1, 2],\n None,\n [2, 3],\n [4, 5],\n [6, 7],\n [8, 9]\n ], dtype=object)\n })\n\n schema = pa.schema([\n pa.field('lists', pa.list_(pa.int64()))\n ])\n\n t1 = pa.Table.from_pandas(df[:2], schema=schema)\n t2 = pa.Table.from_pandas(df[2:], schema=schema)\n\n table = pa.concat_tables([t1, t2])\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_column_of_lists_chunked2(self):\n data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],\n [12, 13], [14, 15], [16, 17]]\n data2 = [[8, 9], [18, 19]]\n\n a1 = pa.array(data1)\n a2 = pa.array(data2)\n\n t1 = pa.Table.from_arrays([a1], names=['a'])\n t2 = pa.Table.from_arrays([a2], names=['a'])\n\n concatenated = pa.concat_tables([t1, t2])\n\n result = concatenated.to_pandas()\n expected = pd.DataFrame({'a': data1 + data2})\n\n tm.assert_frame_equal(result, expected)\n\n def test_column_of_lists_strided(self):\n df, schema = dataframe_with_lists()\n df = pd.concat([df] * 6, ignore_index=True)\n\n arr = df['int64'].values[::3]\n assert arr.strides[0] != 8\n\n _check_array_roundtrip(arr)\n\n def test_nested_lists_all_none(self):\n data = np.array([[None, None], None], dtype=object)\n\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.null())\n\n data2 = np.array([None, None, [None, None],\n np.array([None, None], dtype=object)],\n dtype=object)\n arr = pa.array(data2)\n expected = pa.array([None, None, [None, None], [None, None]])\n assert arr.equals(expected)\n\n def test_nested_lists_all_empty(self):\n # ARROW-2128\n data = pd.Series([[], [], []])\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.null())\n\n def test_infer_lists(self):\n data = OrderedDict([\n ('nan_ints', [[None, 1], [2, 3]]),\n ('ints', [[0, 1], [2, 3]]),\n ('strs', [[None, u'b'], [u'c', u'd']]),\n ('nested_strs', [[[None, u'b'], [u'c', u'd']], None])\n ])\n df = pd.DataFrame(data)\n\n expected_schema = pa.schema([\n pa.field('nan_ints', pa.list_(pa.int64())),\n pa.field('ints', pa.list_(pa.int64())),\n pa.field('strs', pa.list_(pa.string())),\n pa.field('nested_strs', pa.list_(pa.list_(pa.string())))\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n def test_infer_numpy_array(self):\n data = OrderedDict([\n ('ints', [\n np.array([0, 1], dtype=np.int64),\n np.array([2, 3], dtype=np.int64)\n ])\n ])\n df = pd.DataFrame(data)\n expected_schema = pa.schema([\n pa.field('ints', pa.list_(pa.int64()))\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n @pytest.mark.parametrize('t,data,expected', [\n (\n pa.int64,\n [[1, 2], [3], None],\n [None, [3], None]\n ),\n (\n pa.string,\n [[u'aaa', u'bb'], [u'c'], None],\n [None, [u'c'], None]\n ),\n (\n pa.null,\n [[None, None], [None], None],\n [None, [None], None]\n )\n ])\n def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):\n m = np.array([True, False, True])\n\n s = pd.Series(data)\n result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))\n\n assert pa.Array.from_pandas(expected,\n type=pa.list_(t())).equals(result)\n\n def test_empty_list_roundtrip(self):\n empty_list_array = np.empty((3,), dtype=object)\n empty_list_array.fill([])\n\n df = pd.DataFrame({'a': np.array(['1', '2', '3']),\n 'b': empty_list_array})\n tbl = pa.Table.from_pandas(df)\n\n result = tbl.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_array_from_nested_arrays(self):\n df, schema = dataframe_with_arrays()\n for field in schema:\n arr = df[field.name].values\n expected = pa.array(list(arr), type=field.type)\n result = pa.array(arr)\n assert result.type == field.type # == list<scalar>\n assert result.equals(expected)\n\n\nclass TestConvertStructTypes(object):\n \"\"\"\n Conversion tests for struct types.\n \"\"\"\n\n def test_structarray(self):\n ints = pa.array([None, 2, 3], type=pa.int64())\n strs = pa.array([u'a', None, u'c'], type=pa.string())\n bools = pa.array([True, False, None], type=pa.bool_())\n arr = pa.StructArray.from_arrays(\n [ints, strs, bools],\n ['ints', 'strs', 'bools'])\n\n expected = pd.Series([\n {'ints': None, 'strs': u'a', 'bools': True},\n {'ints': 2, 'strs': None, 'bools': False},\n {'ints': 3, 'strs': u'c', 'bools': None},\n ])\n\n series = pd.Series(arr.to_pandas())\n tm.assert_series_equal(series, expected)\n\n\nclass TestZeroCopyConversion(object):\n \"\"\"\n Tests that zero-copy conversion works with some types.\n \"\"\"\n\n def test_zero_copy_success(self):\n result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)\n npt.assert_array_equal(result, [0, 1, 2])\n\n def test_zero_copy_dictionaries(self):\n arr = pa.DictionaryArray.from_arrays(\n np.array([0, 0]),\n np.array([5]))\n\n result = arr.to_pandas(zero_copy_only=True)\n values = pd.Categorical([5, 5])\n\n tm.assert_series_equal(pd.Series(result), pd.Series(values),\n check_names=False)\n\n def test_zero_copy_failure_on_object_types(self):\n with pytest.raises(pa.ArrowException):\n pa.array(['A', 'B', 'C']).to_pandas(zero_copy_only=True)\n\n def test_zero_copy_failure_with_int_when_nulls(self):\n with pytest.raises(pa.ArrowException):\n pa.array([0, 1, None]).to_pandas(zero_copy_only=True)\n\n def test_zero_copy_failure_with_float_when_nulls(self):\n with pytest.raises(pa.ArrowException):\n pa.array([0.0, 1.0, None]).to_pandas(zero_copy_only=True)\n\n def test_zero_copy_failure_on_bool_types(self):\n with pytest.raises(pa.ArrowException):\n pa.array([True, False]).to_pandas(zero_copy_only=True)\n\n def test_zero_copy_failure_on_list_types(self):\n arr = np.array([[1, 2], [8, 9]], dtype=object)\n\n with pytest.raises(pa.ArrowException):\n pa.array(arr).to_pandas(zero_copy_only=True)\n\n def test_zero_copy_failure_on_timestamp_types(self):\n arr = np.array(['2007-07-13'], dtype='datetime64[ns]')\n\n with pytest.raises(pa.ArrowException):\n pa.array(arr).to_pandas(zero_copy_only=True)\n\n\nclass TestConvertMisc(object):\n \"\"\"\n Miscellaneous conversion tests.\n \"\"\"\n\n type_pairs = [\n (np.int8, pa.int8()),\n (np.int16, pa.int16()),\n (np.int32, pa.int32()),\n (np.int64, pa.int64()),\n (np.uint8, pa.uint8()),\n (np.uint16, pa.uint16()),\n (np.uint32, pa.uint32()),\n (np.uint64, pa.uint64()),\n # (np.float16, pa.float16()), # XXX unsupported\n (np.float32, pa.float32()),\n (np.float64, pa.float64()),\n # XXX unsupported\n # (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),\n (np.object, pa.string()),\n # (np.object, pa.binary()), # XXX unsupported\n (np.object, pa.binary(10)),\n (np.object, pa.list_(pa.int64())),\n ]\n\n def test_all_none_objects(self):\n df = pd.DataFrame({'a': [None, None, None]})\n _check_pandas_roundtrip(df)\n\n def test_all_none_category(self):\n df = pd.DataFrame({'a': [None, None, None]})\n df['a'] = df['a'].astype('category')\n _check_pandas_roundtrip(df)\n\n def test_empty_arrays(self):\n for dtype, pa_type in self.type_pairs:\n arr = np.array([], dtype=dtype)\n _check_array_roundtrip(arr, type=pa_type)\n\n def test_threaded_conversion(self):\n df = _alltypes_example()\n _check_pandas_roundtrip(df, nthreads=2)\n _check_pandas_roundtrip(df, nthreads=2, as_batch=True)\n\n def test_category(self):\n repeats = 5\n v1 = ['foo', None, 'bar', 'qux', np.nan]\n v2 = [4, 5, 6, 7, 8]\n v3 = [b'foo', None, b'bar', b'qux', np.nan]\n df = pd.DataFrame({'cat_strings': pd.Categorical(v1 * repeats),\n 'cat_ints': pd.Categorical(v2 * repeats),\n 'cat_binary': pd.Categorical(v3 * repeats),\n 'cat_strings_ordered': pd.Categorical(\n v1 * repeats, categories=['bar', 'qux', 'foo'],\n ordered=True),\n 'ints': v2 * repeats,\n 'ints2': v2 * repeats,\n 'strings': v1 * repeats,\n 'strings2': v1 * repeats,\n 'strings3': v3 * repeats})\n _check_pandas_roundtrip(df)\n\n arrays = [\n pd.Categorical(v1 * repeats),\n pd.Categorical(v2 * repeats),\n pd.Categorical(v3 * repeats)\n ]\n for values in arrays:\n _check_array_roundtrip(values)\n\n def test_mixed_types_fails(self):\n data = pd.DataFrame({'a': ['a', 1, 2.0]})\n with pytest.raises(pa.ArrowException):\n pa.Table.from_pandas(data)\n\n data = pd.DataFrame({'a': [1, True]})\n with pytest.raises(pa.ArrowException):\n pa.Table.from_pandas(data)\n\n def test_strided_data_import(self):\n cases = []\n\n columns = ['a', 'b', 'c']\n N, K = 100, 3\n random_numbers = np.random.randn(N, K).copy() * 100\n\n numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',\n 'f4', 'f8']\n\n for type_name in numeric_dtypes:\n cases.append(random_numbers.astype(type_name))\n\n # strings\n cases.append(np.array([tm.rands(10) for i in range(N * K)],\n dtype=object)\n .reshape(N, K).copy())\n\n # booleans\n boolean_objects = (np.array([True, False, True] * N, dtype=object)\n .reshape(N, K).copy())\n\n # add some nulls, so dtype comes back as objects\n boolean_objects[5] = None\n cases.append(boolean_objects)\n\n cases.append(np.arange(\"2016-01-01T00:00:00.001\", N * K,\n dtype='datetime64[ms]')\n .reshape(N, K).copy())\n\n strided_mask = (random_numbers > 0).astype(bool)[:, 0]\n\n for case in cases:\n df = pd.DataFrame(case, columns=columns)\n col = df['a']\n\n _check_pandas_roundtrip(df)\n _check_array_roundtrip(col)\n _check_array_roundtrip(col, mask=strided_mask)\n\n def test_all_nones(self):\n def _check_series(s):\n converted = pa.array(s)\n assert isinstance(converted, pa.NullArray)\n assert len(converted) == 3\n assert converted.null_count == 3\n assert converted[0] is pa.NA\n\n _check_series(pd.Series([None] * 3, dtype=object))\n _check_series(pd.Series([np.nan] * 3, dtype=object))\n _check_series(pd.Series([np.sqrt(-1)] * 3, dtype=object))\n\n def test_partial_schema(self):\n data = OrderedDict([\n ('a', [0, 1, 2, 3, 4]),\n ('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)),\n ('c', [-10, -5, 0, 5, 10])\n ])\n df = pd.DataFrame(data)\n\n partial_schema = pa.schema([\n pa.field('a', pa.int64()),\n pa.field('b', pa.int32())\n ])\n\n expected_schema = pa.schema([\n pa.field('a', pa.int64()),\n pa.field('b', pa.int32()),\n pa.field('c', pa.int64())\n ])\n\n _check_pandas_roundtrip(df, schema=partial_schema,\n expected_schema=expected_schema)\n\n def test_table_batch_empty_dataframe(self):\n df = pd.DataFrame({})\n _check_pandas_roundtrip(df)\n _check_pandas_roundtrip(df, as_batch=True)\n\n df2 = pd.DataFrame({}, index=[0, 1, 2])\n _check_pandas_roundtrip(df2, preserve_index=True)\n _check_pandas_roundtrip(df2, as_batch=True, preserve_index=True)\n\n def test_convert_empty_table(self):\n arr = pa.array([], type=pa.int64())\n tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=np.int64))\n arr = pa.array([], type=pa.string())\n tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))\n arr = pa.array([], type=pa.list_(pa.int64()))\n tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))\n arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())]))\n tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))\n\n\ndef _fully_loaded_dataframe_example():\n from distutils.version import LooseVersion\n\n index = pd.MultiIndex.from_arrays([\n pd.date_range('2000-01-01', periods=5).repeat(2),\n np.tile(np.array(['foo', 'bar'], dtype=object), 5)\n ])\n\n c1 = pd.date_range('2000-01-01', periods=10)\n data = {\n 0: c1,\n 1: c1.tz_localize('utc'),\n 2: c1.tz_localize('US/Eastern'),\n 3: c1[::2].tz_localize('utc').repeat(2).astype('category'),\n 4: ['foo', 'bar'] * 5,\n 5: pd.Series(['foo', 'bar'] * 5).astype('category').values,\n 6: [True, False] * 5,\n 7: np.random.randn(10),\n 8: np.random.randint(0, 100, size=10),\n 9: pd.period_range('2013', periods=10, freq='M')\n }\n\n if LooseVersion(pd.__version__) >= '0.21':\n # There is an issue with pickling IntervalIndex in pandas 0.20.x\n data[10] = pd.interval_range(start=1, freq=1, periods=10)\n\n return pd.DataFrame(data, index=index)\n\n\ndef _check_serialize_components_roundtrip(df):\n ctx = pa.default_serialization_context()\n\n components = ctx.serialize(df).to_components()\n deserialized = ctx.deserialize_components(components)\n\n tm.assert_frame_equal(df, deserialized)\n\n\ndef test_serialize_deserialize_pandas():\n # ARROW-1784, serialize and deserialize DataFrame by decomposing\n # BlockManager\n df = _fully_loaded_dataframe_example()\n _check_serialize_components_roundtrip(df)\n\n\ndef _pytime_from_micros(val):\n microseconds = val % 1000000\n val //= 1000000\n seconds = val % 60\n val //= 60\n minutes = val % 60\n hours = val // 60\n return time(hours, minutes, seconds, microseconds)\n\n\ndef _pytime_to_micros(pytime):\n return (pytime.hour * 3600000000 +\n pytime.minute * 60000000 +\n pytime.second * 1000000 +\n pytime.microsecond)\n"
] | [
[
"pandas.to_datetime",
"pandas.Series",
"numpy.sqrt",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"numpy.iinfo",
"numpy.random.randint",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.to_numeric",
"pandas.concat",
"pandas.interval_range",
"pandas.Categorical",
"numpy.int64",
"pandas.date_range",
"numpy.array",
"pandas.isnull",
"numpy.random.seed",
"pandas.period_range",
"pandas.MultiIndex.from_arrays",
"numpy.testing.assert_array_equal",
"pandas.util.testing.rands",
"numpy.float64",
"numpy.ma.masked_array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
pattonw/diluvian | [
"3df1e0666f6e65c7719f703e629239b7f0493f86",
"3df1e0666f6e65c7719f703e629239b7f0493f86",
"3df1e0666f6e65c7719f703e629239b7f0493f86"
] | [
"diluvian/volumes.py",
"diluvian/__main__.py",
"diluvian/training.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Volumes of raw image and labeled object data.\"\"\"\n\n\nfrom __future__ import division\n\nfrom collections import namedtuple\nimport csv\nimport logging\nimport os\nimport re\n\nimport h5py\nimport math\nimport numpy as np\nfrom PIL import Image\nimport pytoml as toml\nimport requests\nfrom scipy import ndimage\nimport six\nfrom six.moves import range as xrange\nimport pyn5\n\nfrom .config import CONFIG\nfrom .octrees import OctreeVolume\nfrom .util import get_nonzero_aabb\n\n\nDimOrder = namedtuple('DimOrder', ('X', 'Y', 'Z'))\n\n\ndef partition_volumes(volumes, downsample=True):\n \"\"\"Paritition volumes into training and validation based on configuration.\n\n Uses the regexes mapping partition sizes and indices in\n diluvian.config.TrainingConfig by applying them to matching volumes based\n on name.\n\n Parameters\n ----------\n volumes : dict\n Dictionary mapping volume name to diluvian.volumes.Volume.\n downsample : bool, optional\n Whether to downsample partitions automatically.\n\n Returns\n -------\n training_volumes, validation_volumes : dict\n Dictionary mapping volume name to partitioned, downsampled volumes.\n \"\"\"\n def apply_partitioning(volumes, partitioning):\n partitioned = {}\n for name, vol in six.iteritems(volumes):\n partitions = [p for rgx, p in CONFIG.training.partitions.items() if re.match(rgx, name)]\n partition_index = [idx for rgx, idx in partitioning.items() if re.match(rgx, name)]\n if len(partitions) > 1 or len(partition_index) > 1:\n raise ValueError('Volume \"{}\" matches more than one partition specifier'.format(name))\n elif len(partitions) == 1 and len(partition_index) == 1:\n v = vol.partition(partitions[0], partition_index[0])\n if downsample:\n v = v.downsample(CONFIG.volume.resolution)\n partitioned[name] = v\n\n return partitioned\n\n training_volumes = apply_partitioning(volumes, CONFIG.training.training_partition)\n validation_volumes = apply_partitioning(volumes, CONFIG.training.validation_partition)\n\n return training_volumes, validation_volumes\n\n\nclass SubvolumeBounds(object):\n \"\"\"Sufficient parameters to extract a subvolume from a volume.\"\"\"\n __slots__ = ('start', 'stop', 'seed', 'label_id', 'label_margin',)\n\n def __init__(self, start=None, stop=None, seed=None, label_id=None, label_margin=None):\n assert (start is not None and stop is not None) or seed is not None, \"Bounds or seed must be provided\"\n self.start = start\n self.stop = stop\n self.seed = seed\n self.label_id = label_id\n if label_margin is None:\n label_margin = np.zeros(3, dtype=np.int64)\n self.label_margin = label_margin\n\n @classmethod\n def iterable_from_csv(cls, filename):\n bounds = []\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n for k, v in six.iteritems(row):\n if not v:\n row[k] = None\n elif v[0] == '[':\n row[k] = np.fromstring(v[1:-1], sep=' ', dtype=np.int64)\n else:\n row[k] = int(v)\n bounds.append(cls(**row))\n\n return bounds\n\n @classmethod\n def iterable_to_csv(cls, bounds, filename):\n with open(filename, 'w') as csvfile:\n fieldnames = cls.__slots__\n writer = csv.writer(csvfile)\n writer.writerow(fieldnames)\n for bound in bounds:\n writer.writerow([getattr(bound, f) for f in fieldnames])\n\n\nclass Subvolume(object):\n \"\"\"A subvolume of image data and an optional ground truth object mask.\"\"\"\n __slots__ = ('image', 'label_mask', 'seed', 'label_id',)\n\n def __init__(self, image, label_mask, seed, label_id):\n self.image = image\n self.label_mask = label_mask\n self.seed = seed\n self.label_id = label_id\n\n def f_a(self):\n \"\"\"Calculate the mask filling fraction of this subvolume.\n\n Returns\n -------\n float\n Fraction of the subvolume voxels in the object mask.\n \"\"\"\n return np.count_nonzero(self.label_mask) / float(self.label_mask.size)\n\n def has_seed_in_mask(self):\n ctr = self.seed - (np.asarray(self.image.shape) - np.asarray(self.label_mask.shape)) // 2\n return self.label_mask[tuple(ctr)]\n\n def has_uniform_seed_margin(self, seed_margin=20.0):\n \"\"\"Test if a subvolume has a margin of uniform label around its seed.\n\n Parameters\n ----------\n seed_margin : float, optional\n The minimum acceptable margin of uniform target label around the seed\n voxel (in nm, default 20.0).\n\n Returns\n -------\n bool\n True if the rectangular margin around the seed position is uniform.\n \"\"\"\n margin = np.ceil(np.reciprocal(np.array(CONFIG.volume.resolution),\n dtype=np.float64) * seed_margin).astype(np.int64)\n\n mask_target = self.label_mask\n # If data is unlabeled, can not test so always succeed.\n if mask_target is None:\n return True\n # Seed location in the mask accounting for offset of label from image.\n ctr = self.seed - (np.asarray(self.image.shape) - np.asarray(mask_target.shape)) // 2\n seed_fov = (ctr - margin, ctr + margin + 1)\n seed_region = mask_target[seed_fov[0][0]:seed_fov[1][0],\n seed_fov[0][1]:seed_fov[1][1],\n seed_fov[0][2]:seed_fov[1][2]]\n return np.all(seed_region)\n\n\nclass SubvolumeGenerator(six.Iterator):\n \"\"\"Combines a volume and a subvolume bounds generator into a generator.\n\n Parameters\n ----------\n volume : Volume\n bounds_generator : SubvolumeBoundsGenerator\n \"\"\"\n def __init__(self, volume, bounds_generator):\n self.volume = volume\n self.bounds_generator = bounds_generator\n\n @property\n def shape(self):\n return self.bounds_generator.shape\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.bounds_generator.reset()\n\n def __next__(self):\n return self.volume.get_subvolume(six.next(self.bounds_generator))\n\n\nclass ErodedMaskGenerator(six.Iterator):\n def __init__(self, subvolume_generator, erosion_px):\n self.subvolume_generator = subvolume_generator\n self.sel = np.ones(erosion_px * 2 + 1)\n\n @property\n def shape(self):\n return self.subvolume_generator.shape\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.subvolume_generator.reset()\n\n def __next__(self):\n while True:\n subv = six.next(self.subvolume_generator)\n\n subv.label_mask = ndimage.binary_erosion(subv.label_mask, structure=self.sel, border_value=1)\n\n if subv.has_seed_in_mask():\n return subv\n\n\nclass RelabelSeedComponentGenerator(six.Iterator):\n def __init__(self, subvolume_generator):\n self.subvolume_generator = subvolume_generator\n\n @property\n def shape(self):\n return self.subvolume_generator.shape\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.subvolume_generator.reset()\n\n def __next__(self):\n subv = six.next(self.subvolume_generator)\n\n label_im, _ = ndimage.label(subv.label_mask)\n label_axis_margin = (np.array(subv.image.shape) - np.array(subv.label_mask.shape)) // 2\n seed_label = label_im[tuple(subv.seed - label_axis_margin)]\n\n subv.label_mask = label_im == seed_label\n\n return subv\n\n\nclass SubvolumeAugmentGenerator(six.Iterator):\n \"\"\"Base class for subvolume generator augmenters.\n\n Parameters\n ----------\n subvolume_generator : SubvolumeGenerator\n return_both : bool\n If true, return both the original and augmented volume in sequence.\n If false, return either with equal probability.\n \"\"\"\n def __init__(self, subvolume_generator, return_both):\n self.subvolume_generator = subvolume_generator\n self.return_both = return_both\n self.return_single_p = 0.5\n self.subvolume = None\n\n @property\n def shape(self):\n return self.subvolume_generator.shape\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.subvolume = None\n self.subvolume_generator.reset()\n\n def __next__(self):\n if self.return_both:\n if self.subvolume is None:\n self.subvolume = six.next(self.subvolume_generator)\n return self.subvolume\n else:\n subv = self.augment_subvolume()\n self.subvolume = None\n if subv is None:\n return six.next(self)\n else:\n return subv\n else:\n self.subvolume = six.next(self.subvolume_generator)\n if np.random.sample() < self.return_single_p:\n return self.subvolume\n else:\n subv = self.augment_subvolume()\n if subv is None:\n return self.subvolume\n else:\n return subv\n\n def augment_subvolume(self):\n raise NotImplementedError('Subclasses must implement this method.')\n\n\nclass ClipSubvolumeImageGenerator(six.Iterator):\n \"\"\"Clip subvolume image range (default between zero and one).\n\n Useful to apply after a sequence of augmentations.\n\n Parameters\n ----------\n subvolume_generator : SubvolumeGenerator\n min_val, max_val : float, optional\n \"\"\"\n def __init__(self, subvolume_generator, min_val=0.0, max_val=1.0):\n self.subvolume_generator = subvolume_generator\n self.min_val = min_val\n self.max_val = max_val\n\n @property\n def shape(self):\n return self.subvolume_generator.shape\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.subvolume_generator.reset()\n\n def __next__(self):\n subv = six.next(self.subvolume_generator)\n return Subvolume(np.clip(subv.image, self.min_val, self.max_val),\n subv.label_mask,\n subv.seed,\n subv.label_id)\n\n\nclass MirrorAugmentGenerator(SubvolumeAugmentGenerator):\n \"\"\"Repeats subvolumes from a subvolume generator mirrored along an axis.\n\n For each subvolume in the original generator, this generator will yield two\n subvolumes: the original subvolume and the subvolume with the image,\n label mask, and seed mirrored along a given axis.\n\n Parameters\n ----------\n subvolume_generator : SubvolumeGenerator\n return_both : bool\n If true, return both the original and augmented volume in sequence.\n If false, return either with equal probability.\n axis : int\n \"\"\"\n def __init__(self, subvolume_generator, return_both, axis):\n super(MirrorAugmentGenerator, self).__init__(subvolume_generator, return_both)\n self.axis = axis\n\n def augment_subvolume(self):\n subv = self.subvolume\n shape = subv.image.shape[self.axis]\n seed = subv.seed.copy()\n seed[self.axis] = shape - subv.seed[self.axis] - 1\n subv = Subvolume(np.flip(subv.image, self.axis),\n np.flip(subv.label_mask, self.axis) if subv.label_mask is not None else None,\n seed,\n subv.label_id)\n return subv\n\n\nclass PermuteAxesAugmentGenerator(SubvolumeAugmentGenerator):\n \"\"\"Repeats subvolumes from a subvolume generator with an axes permutation.\n\n For each subvolume in the original generator, this generator will yield two\n subvolumes: the original subvolume and the subvolume with the image,\n label mask, and seed axes permuted according to a given axes order.\n\n Parameters\n ----------\n subvolume_generator : SubvolumeGenerator\n return_both : bool\n If true, return both the original and augmented volume in sequence.\n If false, return either with equal probability.\n axes : sequence of int\n \"\"\"\n def __init__(self, subvolume_generator, return_both, axes):\n super(PermuteAxesAugmentGenerator, self).__init__(subvolume_generator, return_both)\n self.axes = list(axes)\n\n def augment_subvolume(self):\n subv = self.subvolume\n subv = Subvolume(np.transpose(subv.image, self.axes),\n np.transpose(subv.label_mask, self.axes) if subv.label_mask is not None else None,\n subv.seed[self.axes],\n self.subvolume.label_id)\n return subv\n\n\nclass MissingDataAugmentGenerator(SubvolumeAugmentGenerator):\n \"\"\"Repeats subvolumes from a subvolume generator with missing data planes.\n\n For each subvolume in the original generator, this generator will yield the\n original subvolume and may yield a subvolume with missing planes of image\n and/or label mask data.\n\n Parameters\n ----------\n subvolume_generator : SubvolumeGenerator\n return_both : bool\n If true, return both the original and augmented volume in sequence.\n If false, return either with equal probability.\n axis : int\n probability : float\n Independent probability that each plane of data along axis is missing.\n remove_label : bool\n Whether to also remove label mask data.\n \"\"\"\n def __init__(self, subvolume_generator, return_both, axis, probability, remove_label=False):\n super(MissingDataAugmentGenerator, self).__init__(subvolume_generator, return_both)\n self.axis = axis\n self.probability = probability\n self.remove_label = remove_label\n\n def augment_subvolume(self):\n rolls = np.random.sample(self.shape[self.axis])\n # Remove the seed plane from possibilities.\n rolls[self.subvolume.seed[self.axis]] = 1.1\n missing_sections = np.where(rolls < self.probability)\n\n if missing_sections and missing_sections[0].size:\n subv = self.subvolume\n mask = subv.label_mask.copy() if subv.label_mask is not None and self.remove_label else subv.label_mask\n subv = Subvolume(subv.image.copy(),\n mask,\n subv.seed,\n subv.label_id)\n slices = [slice(None), slice(None), slice(None)]\n slices[self.axis] = missing_sections\n subv.image[slices] = 0\n if self.remove_label:\n label_axis_margin = (subv.image.shape[self.axis] - subv.label_mask.shape[self.axis]) // 2\n label_sections = missing_sections[0] - label_axis_margin\n label_sections = label_sections[(label_sections >= 0) &\n (label_sections < subv.label_mask.shape[self.axis])]\n slices[self.axis] = (label_sections,)\n subv.label_mask[slices] = False\n return subv\n else:\n # No augmentations to be made. Superclass will automatically return\n # next subvolume.\n return None\n\n\nclass GaussianNoiseAugmentGenerator(SubvolumeAugmentGenerator):\n \"\"\"Repeats subvolumes from a subvolume generator with Gaussian noise.\n\n For each subvolume in the original generator, this generator will yield two\n subvolumes: the original subvolume and the subvolume with multiplicative\n and additive Gaussian noise applied to the image data.\n\n Parameters\n ----------\n subvolume_generator : SubvolumeGenerator\n return_both : bool\n If true, return both the original and augmented volume in sequence.\n If false, return either with equal probability.\n axis : int\n Axis along which noise will be applied independently. For example,\n 0 will apply different noise to each z-section. -1 will apply\n uniform noise to the entire subvolume.\n multiplicative : float\n Standard deviation for 1-mean Gaussian multiplicative noise.\n multiplicative : float\n Standard deviation for 0-mean Gaussian additive noise.\n \"\"\"\n def __init__(self, subvolume_generator, return_both, axis, multiplicative, additive):\n super(GaussianNoiseAugmentGenerator, self).__init__(subvolume_generator, return_both)\n self.axis = axis\n self.multiplicative = multiplicative\n self.additive = additive\n\n def augment_subvolume(self):\n subv = self.subvolume\n\n # Generate a transformed shape that will apply vector addition\n # and multiplication along to correct axis.\n shape_xform = np.ones((1, 3), dtype=np.int32).ravel()\n shape_xform[self.axis] = -1\n\n dim_size = 1 if self.axis == -1 else self.shape[self.axis]\n mul_noise = np.random.normal(1.0, self.multiplicative, dim_size).astype(subv.image.dtype)\n add_noise = np.random.normal(0.0, self.additive, dim_size).astype(subv.image.dtype)\n\n subv = Subvolume(subv.image * mul_noise.reshape(shape_xform) + add_noise.reshape(shape_xform),\n subv.label_mask,\n subv.seed,\n subv.label_id)\n return subv\n\n\nclass ContrastAugmentGenerator(SubvolumeAugmentGenerator):\n \"\"\"Repeats subvolumes from a subvolume generator with altered contrast.\n\n For each subvolume in the original generator, this generator will yield the\n original subvolume and may yield a subvolume with image intensity contrast.\n\n Currently this augmentation performs simple rescaling of intensity values,\n not histogram based methods. This simple approach still yields results\n resembling TEM artifacts. A single rescaling is chosen for all selected\n sections in each subvolume, not independently per selected section.\n\n Parameters\n ----------\n subvolume_generator : SubvolumeGenerator\n return_both : bool\n If true, return both the original and augmented volume in sequence.\n If false, return either with equal probability.\n axis : int\n Axis along which contrast may be altered. For example, 0 will alter\n contrast by z-sections.\n probability : float\n Independent probability that each plane of data along axis is altered.\n scaling_mean, scaling_std, center_mean, center_std : float\n Normal distribution parameters for the rescaling of intensity values.\n \"\"\"\n def __init__(self, subvolume_generator, return_both, axis, probability,\n scaling_mean, scaling_std, center_mean, center_std):\n super(ContrastAugmentGenerator, self).__init__(subvolume_generator, return_both)\n self.axis = axis\n self.probability = probability\n self.scaling_mean = scaling_mean\n self.scaling_std = scaling_std\n self.center_mean = center_mean\n self.center_std = center_std\n\n def augment_subvolume(self):\n rolls = np.random.sample(self.shape[self.axis])\n sections = np.where(rolls < self.probability)\n\n if sections and sections[0].size:\n subv = self.subvolume\n subv = Subvolume(subv.image.copy(),\n subv.label_mask,\n subv.seed,\n subv.label_id)\n slices = [slice(None), slice(None), slice(None)]\n slices[self.axis] = sections\n data = subv.image[slices]\n old_min = data.min()\n old_max = data.max()\n scaling = np.random.normal(self.scaling_mean, self.scaling_std)\n center = np.random.normal(self.center_mean, self.center_std)\n data = scaling*(data - old_min) + 0.5*scaling*center*(old_max - old_min) + old_min\n subv.image[slices] = data\n return subv\n else:\n return None\n\n\nclass MaskedArtifactAugmentGenerator(SubvolumeAugmentGenerator):\n \"\"\"Repeats subvolumes from a subvolume generator with artifact data added.\n\n For each subvolume in the original generator, this generator will yield the\n original subvolume and may yield a subvolume with planes of image mixed\n with artifact data from a separate volume.\n\n Parameters\n ----------\n subvolume_generator : SubvolumeGenerator\n return_both : bool\n If true, return both the original and augmented volume in sequence.\n If false, return either with equal probability.\n axis : int\n probability : float\n Independent probability that each plane of data along axis has\n artifacts.\n artifact_volume_file : string\n Filename of an TOML descriptor of an HDF5 dataset with image and mask\n data channels. Only the dataset named 'Artifacts' from this descriptor\n will be used. Mask data should be a float that will be interpreted\n as an alpha for blending image data from this artifact file with\n the original subvolume image data.\n \"\"\"\n def __init__(self, subvolume_generator, return_both, axis, probability, artifact_volume_file, cache):\n super(MaskedArtifactAugmentGenerator, self).__init__(subvolume_generator, return_both)\n self.axis = axis\n self.probability = probability\n if 'artifacts' not in cache:\n vol = HDF5Volume.from_toml(artifact_volume_file)['Artifacts']\n cache['mask'] = NdarrayVolume(\n vol.world_coord_to_local(vol.resolution),\n image_data=vol.world_mat_to_local(vol.mask_data[:]))\n vol.mask_data = None\n cache['artifacts'] = vol.to_memory_volume()\n self.mask = cache['mask']\n self.artifacts = cache['artifacts']\n artifact_shape = self.shape.copy()\n artifact_shape[self.axis] = 1\n self.art_bounds_gen = self.artifacts.subvolume_bounds_generator(shape=artifact_shape)\n\n def augment_subvolume(self):\n rolls = np.random.sample(self.shape[self.axis])\n artifact_sections = np.where(rolls < self.probability)\n\n if artifact_sections and artifact_sections[0].size:\n subv = self.subvolume\n subv = Subvolume(subv.image.copy(),\n subv.label_mask,\n subv.seed,\n subv.label_id)\n slices = [slice(None), slice(None), slice(None)]\n for z in artifact_sections[0]:\n slices[self.axis] = z\n mask_found = False\n # Since artifact data is usually sparse, reject patches\n # that have all zero mask.\n while not mask_found:\n art_bounds = six.next(self.art_bounds_gen)\n mask = self.mask.get_subvolume(art_bounds).image\n if mask.max() == 0.0:\n continue\n mask_found = True\n art = self.artifacts.get_subvolume(art_bounds).image\n raw = subv.image[slices]\n subv.image[slices] = raw * (1.0 - mask) + art * mask\n return subv\n else:\n return None\n\n\nclass Volume(object):\n DIM = DimOrder(Z=0, Y=1, X=2)\n\n def __init__(self, resolution, image_data=None, label_data=None, mask_data=None):\n self.resolution = resolution\n self.image_data = image_data\n self.label_data = label_data\n self.mask_data = mask_data\n self._mask_bounds = None\n\n def local_coord_to_world(self, a):\n return a\n\n def world_coord_to_local(self, a):\n return a\n\n def world_mat_to_local(self, m):\n return m\n\n @property\n def mask_bounds(self):\n if self._mask_bounds is not None:\n return self._mask_bounds\n if self.mask_data is None:\n return None\n\n # Explicitly copy the channel to memory. 3x speedup for np ops.\n mask_data = self.mask_data[:]\n\n self._mask_bounds = get_nonzero_aabb(mask_data)\n\n return self._mask_bounds\n\n @property\n def shape(self):\n return tuple(self.world_coord_to_local(np.array(self.image_data.shape)))\n\n def _get_downsample_from_resolution(self, resolution):\n resolution = np.asarray(resolution)\n downsample = np.log2(np.true_divide(resolution, self.resolution))\n if np.any(downsample < 0):\n raise ValueError('Requested resolution ({}) is higher than volume resolution ({}). '\n 'Upsampling is not supported.'.format(resolution, self.resolution))\n if not np.all(np.equal(np.mod(downsample, 1), 0)):\n raise ValueError('Requested resolution ({}) is not a power-of-2 downsample of '\n 'volume resolution ({}). '\n 'This is currently unsupported.'.format(resolution, self.resolution))\n return downsample.astype(np.int64)\n\n def downsample(self, resolution):\n downsample = self._get_downsample_from_resolution(resolution)\n if np.all(np.equal(downsample, 0)):\n return self\n return DownsampledVolume(self, downsample)\n\n def partition(self, partitioning, partition_index):\n if np.array_equal(partitioning, np.ones(3)) and np.array_equal(partition_index, np.zeros(3)):\n return self\n return PartitionedVolume(self, partitioning, partition_index)\n\n def sparse_wrapper(self, *args):\n return SparseWrappedVolume(self, *args)\n\n def subvolume_bounds_generator(self, shape=None, label_margin=None):\n return self.SubvolumeBoundsGenerator(self, shape, label_margin)\n\n def subvolume_generator(self, bounds_generator=None, **kwargs):\n if bounds_generator is None:\n if not kwargs:\n raise ValueError('Bounds generator arguments must be provided if no bounds generator is provided.')\n bounds_generator = self.subvolume_bounds_generator(**kwargs)\n return SubvolumeGenerator(self, bounds_generator)\n\n def get_subvolume(self, bounds):\n if bounds.start is None or bounds.stop is None:\n raise ValueError('This volume does not support sparse subvolume access.')\n\n image_subvol = self.image_data[\n bounds.start[0]:bounds.stop[0],\n bounds.start[1]:bounds.stop[1],\n bounds.start[2]:bounds.stop[2]]\n\n image_subvol = self.world_mat_to_local(image_subvol)\n if np.issubdtype(image_subvol.dtype, np.integer):\n image_subvol = image_subvol.astype(np.float32) / 256.0\n\n seed = bounds.seed\n if seed is None:\n seed = np.array(image_subvol.shape, dtype=np.int64) // 2\n\n if self.label_data is not None:\n label_start = bounds.start + bounds.label_margin\n label_stop = bounds.stop - bounds.label_margin\n\n label_subvol = self.label_data[\n label_start[0]:label_stop[0],\n label_start[1]:label_stop[1],\n label_start[2]:label_stop[2]]\n\n label_subvol = self.world_mat_to_local(label_subvol)\n\n label_id = bounds.label_id\n if label_id is None:\n label_id = label_subvol[tuple(seed - bounds.label_margin)]\n label_mask = label_subvol == label_id\n else:\n label_mask = None\n label_id = None\n\n return Subvolume(image_subvol, label_mask, seed, label_id)\n\n class SubvolumeBoundsGenerator(six.Iterator):\n def __init__(self, volume, shape, label_margin=None):\n self.volume = volume\n self.shape = shape\n self.margin = np.floor_divide(self.shape, 2).astype(np.int64)\n if label_margin is None:\n label_margin = np.zeros(3, dtype=np.int64)\n self.label_margin = label_margin\n self.skip_blank_sections = True\n self.ctr_min = self.margin\n self.ctr_max = (np.array(self.volume.shape) - self.margin - 1).astype(np.int64)\n self.random = np.random.RandomState(CONFIG.random_seed)\n\n # If the volume has a mask channel, further limit ctr_min and\n # ctr_max to lie inside a margin in the AABB of the mask.\n if self.volume.mask_data is not None:\n mask_min, mask_max = self.volume.mask_bounds\n\n mask_min = self.volume.world_coord_to_local(mask_min)\n mask_max = self.volume.world_coord_to_local(mask_max)\n\n self.ctr_min = np.maximum(self.ctr_min, mask_min + self.label_margin)\n self.ctr_max = np.minimum(self.ctr_max, mask_max - self.label_margin - 1)\n\n if np.any(self.ctr_min >= self.ctr_max):\n raise ValueError('Cannot generate subvolume bounds: bounds ({}, {}) too small for shape ({})'.format(\n np.array_str(self.ctr_min), np.array_str(self.ctr_max), np.array_str(self.shape)))\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.random.seed(0)\n\n def __next__(self):\n while True:\n ctr = np.array([self.random.randint(self.ctr_min[n], self.ctr_max[n])\n for n in range(3)]).astype(np.int64)\n start = ctr - self.margin\n stop = ctr + self.margin + np.mod(self.shape, 2).astype(np.int64)\n\n # If the volume has a mask channel, only accept subvolumes\n # entirely contained in it.\n if self.volume.mask_data is not None:\n start_local = self.volume.world_coord_to_local(start + self.label_margin)\n stop_local = self.volume.world_coord_to_local(stop - self.label_margin)\n mask = self.volume.mask_data[\n start_local[0]:stop_local[0],\n start_local[1]:stop_local[1],\n start_local[2]:stop_local[2]]\n if not mask.all():\n logging.debug('Skipping subvolume not entirely in mask.')\n continue\n\n # Skip subvolumes with seeds in blank sections.\n if self.skip_blank_sections and self.volume.image_data is not None:\n if self.volume.image_data[tuple(self.volume.local_coord_to_world(ctr))] == 0:\n logging.debug('Skipping subvolume with seed in blank section.')\n continue\n\n # Only accept subvolumes where the central seed voxel will be\n # of a uniform label after downsampling. For more stringent\n # seed region uniformity filtering, see has_uniform_seed_margin.\n if self.volume.label_data is None:\n label_id = None\n break\n seed_min = self.volume.local_coord_to_world(ctr)\n seed_max = self.volume.local_coord_to_world(ctr + 1)\n label_ids = self.volume.label_data[\n seed_min[0]:seed_max[0],\n seed_min[1]:seed_max[1],\n seed_min[2]:seed_max[2]]\n if (label_ids == label_ids.item(0)).all():\n label_id = label_ids.item(0)\n break\n\n return SubvolumeBounds(start, stop, label_id=label_id, label_margin=self.label_margin)\n\n\nclass NdarrayVolume(Volume):\n \"\"\"A NumPy ndarray-backed volume.\n\n Since all volumes assume image and label data are ndarray-like, this class\n exists mostly as a bookkeeping convenience to make actual ndarray volumes\n explicit.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(NdarrayVolume, self).__init__(*args, **kwargs)\n self.image_data.flags.writeable = False\n if self.label_data is not None:\n self.label_data.flags.writeable = False\n\n\nclass VolumeView(Volume):\n def __init__(self, parent, *args, **kwargs):\n super(VolumeView, self).__init__(*args, **kwargs)\n self.parent = parent\n\n def local_to_parent(self, a):\n return a\n\n def local_coord_to_world(self, a):\n return self.parent.local_coord_to_world(self.local_to_parent(a))\n\n def parent_to_local(self, a):\n return a\n\n def world_coord_to_local(self, a):\n return self.parent_to_local(self.parent.world_coord_to_local(a))\n\n def world_mat_to_local(self, m):\n return self.parent.world_mat_to_local(m)\n\n @property\n def mask_bounds(self):\n return self.parent.mask_bounds\n\n @property\n def shape(self):\n return self.parent.shape\n\n def get_subvolume(self, bounds):\n # assumes bounds given are in local coordinates\n parent_start = self.local_to_parent(bounds.start) if bounds.start is not None else None\n parent_stop = self.local_to_parent(bounds.stop) if bounds.stop is not None else None\n parent_seed = self.local_to_parent(bounds.seed) if bounds.seed is not None else None\n parent_bounds = SubvolumeBounds(start=parent_start,\n stop=parent_stop,\n seed=parent_seed,\n label_id=bounds.label_id,\n label_margin=bounds.label_margin)\n return self.parent.get_subvolume(parent_bounds)\n\n\nclass PartitionedVolume(VolumeView):\n \"\"\"Wrap an existing volume for partitioned access.\n\n Subvolume accesses to this volume will be offset and clipped to a partition\n of the wrapped volume.\n\n Parameters\n ----------\n parent : Volume\n The volume to wrap.\n partitioning : iterable of int\n Number of partitions along each axis. Only one axis should be greater\n than 1.\n partition_index : iterable of int\n Index of the partition which this volume will represent.\n \"\"\"\n def __init__(self, parent, partitioning, partition_index):\n super(PartitionedVolume, self).__init__(\n parent,\n parent.resolution,\n image_data=parent.image_data,\n label_data=parent.label_data,\n mask_data=parent.mask_data)\n self.partitioning = np.asarray(partitioning)\n self.partition_index = np.asarray(partition_index)\n partition_shape = np.floor_divide(np.array(self.parent.shape), self.partitioning)\n self.bounds = ((np.multiply(partition_shape, self.partition_index)).astype(np.int64),\n (np.multiply(partition_shape, self.partition_index + 1)).astype(np.int64))\n\n def local_to_parent(self, a):\n return a + self.bounds[0]\n\n def parent_to_local(self, a):\n return a - self.bounds[0]\n\n @property\n def mask_bounds(self):\n if self.parent.mask_bounds is None:\n return None\n else:\n bound_min = np.maximum(self.parent.mask_bounds[0], self.bounds[0])\n bound_max = np.minimum(self.parent.mask_bounds[1], self.bounds[1])\n return bound_min, bound_max\n\n @property\n def shape(self):\n return tuple(self.bounds[1] - self.bounds[0])\n\n\nclass DownsampledVolume(VolumeView):\n \"\"\"Wrap an existing volume for downsampled access.\n\n Subvolume accesses to this volume will be downsampled, but continue to use\n the wrapped volume and its data at the original resolution.\n\n Parameters\n ----------\n parent : Volume\n The volume to wrap.\n downsample : iterable of int\n Integral zoom levels to downsample the wrapped volume.\n \"\"\"\n def __init__(self, parent, downsample):\n self.scale = np.exp2(downsample).astype(np.int64)\n super(DownsampledVolume, self).__init__(\n parent,\n np.multiply(parent.resolution, self.scale),\n image_data=parent.image_data,\n label_data=parent.label_data,\n mask_data=parent.mask_data)\n\n def local_to_parent(self, a):\n return np.multiply(a, self.scale)\n\n def parent_to_local(self, a):\n return np.floor_divide(a, self.scale)\n\n @property\n def shape(self):\n return tuple(np.floor_divide(np.array(self.parent.shape), self.scale))\n\n def get_subvolume(self, bounds):\n subvol_shape = bounds.stop - bounds.start\n label_shape = subvol_shape - 2 * bounds.label_margin\n parent_bounds = SubvolumeBounds(self.local_to_parent(bounds.start),\n self.local_to_parent(bounds.stop),\n label_margin=self.local_to_parent(bounds.label_margin))\n subvol = self.parent.get_subvolume(parent_bounds)\n subvol.image = subvol.image.reshape(\n [subvol_shape[0], self.scale[0],\n subvol_shape[1], self.scale[1],\n subvol_shape[2], self.scale[2]]).mean(5).mean(3).mean(1)\n\n if subvol.label_mask is not None:\n # Downsample body mask by considering blocks where the majority\n # of voxels are in the body to be in the body. Alternatives are:\n # - Conjunction (tends to introduce false splits)\n # - Disjunction (tends to overdilate and merge)\n # - Mode label (computationally expensive)\n if CONFIG.volume.label_downsampling == 'conjunction':\n subvol.label_mask = subvol.label_mask.reshape(\n [label_shape[0], self.scale[0],\n label_shape[1], self.scale[1],\n label_shape[2], self.scale[2]]).all(5).all(3).all(1)\n else:\n subvol.label_mask = subvol.label_mask.reshape(\n [label_shape[0], self.scale[0],\n label_shape[1], self.scale[1],\n label_shape[2], self.scale[2]]).mean(5).mean(3).mean(1) > 0.5\n\n # Note that this is not a coordinate xform to parent in the typical\n # sense, just a rescaling of the coordinate in the subvolume-local\n # coordinates. Hence no similar call in VolumeView.get_subvolume.\n subvol.seed = self.parent_to_local(subvol.seed)\n\n return subvol\n\n\nclass SparseWrappedVolume(VolumeView):\n \"\"\"Wrap a existing volume for memory cached block sparse access.\"\"\"\n def __init__(self, parent, image_leaf_shape=None, label_leaf_shape=None):\n if image_leaf_shape is None:\n image_leaf_shape = list(CONFIG.model.input_fov_shape)\n if label_leaf_shape is None:\n label_leaf_shape = list(CONFIG.model.input_fov_shape)\n\n image_data = OctreeVolume(image_leaf_shape,\n (np.zeros(3), parent.image_data.shape),\n parent.image_data.dtype,\n populator=self.image_populator)\n label_data = OctreeVolume(label_leaf_shape,\n (np.zeros(3), parent.label_data.shape),\n parent.label_data.dtype,\n populator=self.label_populator)\n\n super(SparseWrappedVolume, self).__init__(\n parent,\n parent.resolution,\n image_data=image_data,\n label_data=label_data)\n\n def image_populator(self, bounds):\n return self.parent.image_data[\n bounds[0][0]:bounds[1][0],\n bounds[0][1]:bounds[1][1],\n bounds[0][2]:bounds[1][2]]\n\n def label_populator(self, bounds):\n return self.parent.label_data[\n bounds[0][0]:bounds[1][0],\n bounds[0][1]:bounds[1][1],\n bounds[0][2]:bounds[1][2]]\n\n\nclass HDF5Volume(Volume):\n \"\"\"A volume backed by data views to HDF5 file arrays.\n\n Parameters\n ----------\n orig_file : str\n Filename of the HDF5 file to load.\n image_dataaset : str\n Full dataset path including groups to the raw image data array.\n label_dataset : str\n Full dataset path including groups to the object label data array.\n \"\"\"\n @staticmethod\n def from_toml(filename):\n from keras.utils.data_utils import get_file\n\n volumes = {}\n with open(filename, 'rb') as fin:\n datasets = toml.load(fin).get('dataset', [])\n for dataset in datasets:\n hdf5_file = dataset['hdf5_file']\n if dataset.get('use_keras_cache', False):\n hdf5_file = get_file(hdf5_file, dataset['download_url'], md5_hash=dataset.get('download_md5', None))\n image_dataset = dataset.get('image_dataset', None)\n label_dataset = dataset.get('label_dataset', None)\n mask_dataset = dataset.get('mask_dataset', None)\n mask_bounds = dataset.get('mask_bounds', None)\n resolution = dataset.get('resolution', None)\n hdf5_pathed_file = os.path.join(os.path.dirname(filename), hdf5_file)\n volume = HDF5Volume(hdf5_pathed_file,\n image_dataset,\n label_dataset,\n mask_dataset,\n mask_bounds=mask_bounds)\n # If the volume configuration specifies an explicit resolution,\n # override any provided in the HDF5 itself.\n if resolution:\n logging.info('Overriding resolution for volume \"%s\"', dataset['name'])\n volume.resolution = np.array(resolution)\n volumes[dataset['name']] = volume\n\n return volumes\n\n @staticmethod\n def write_file(filename, resolution, **kwargs):\n h5file = h5py.File(filename, 'w')\n config = {'hdf5_file': os.path.basename(filename)}\n channels = ['image', 'label', 'mask']\n default_datasets = {\n 'image': 'volumes/raw',\n 'label': 'volumes/labels/neuron_ids',\n 'mask': 'volumes/labels/mask',\n }\n for channel in channels:\n data = kwargs.get('{}_data'.format(channel), None)\n dataset_name = kwargs.get('{}_dataset'.format(channel), default_datasets[channel])\n if data is not None:\n dataset = h5file.create_dataset(dataset_name, data=data, dtype=data.dtype)\n dataset.attrs['resolution'] = resolution\n config['{}_dataset'.format(channel)] = dataset_name\n\n h5file.close()\n\n return config\n\n def __init__(self, orig_file, image_dataset, label_dataset, mask_dataset, mask_bounds=None):\n logging.debug('Loading HDF5 file \"{}\"'.format(orig_file))\n self.file = h5py.File(orig_file, 'r')\n self.resolution = None\n self._mask_bounds = tuple(map(np.asarray, mask_bounds)) if mask_bounds is not None else None\n\n if image_dataset is None and label_dataset is None:\n raise ValueError('HDF5 volume must have either an image or label dataset: {}'.format(orig_file))\n\n if image_dataset is not None:\n self.image_data = self.file[image_dataset]\n if 'resolution' in self.file[image_dataset].attrs:\n self.resolution = np.array(self.file[image_dataset].attrs['resolution'])\n\n if label_dataset is not None:\n self.label_data = self.file[label_dataset]\n if 'resolution' in self.file[label_dataset].attrs:\n resolution = np.array(self.file[label_dataset].attrs['resolution'])\n if self.resolution is not None and not np.array_equal(self.resolution, resolution):\n logging.warning('HDF5 image and label dataset resolutions differ in %s: %s, %s',\n orig_file, self.resolution, resolution)\n else:\n self.resolution = resolution\n else:\n self.label_data = None\n\n if mask_dataset is not None:\n self.mask_data = self.file[mask_dataset]\n else:\n self.mask_data = None\n\n if image_dataset is None:\n self.image_data = np.full_like(self.label_data, np.NaN, dtype=np.float32)\n\n if self.resolution is None:\n self.resolution = np.ones(3)\n\n def to_memory_volume(self):\n data = ['image_data', 'label_data', 'mask_data']\n data = {\n k: self.world_mat_to_local(getattr(self, k)[:])\n for k in data if getattr(self, k) is not None}\n return NdarrayVolume(self.world_coord_to_local(self.resolution), **data)\n\n\nclass ImageStackVolume(Volume):\n \"\"\"A volume for block sparse access to image pyramids over HTTP.\n\n Coordinate Systems\n ----------\n Real: Physical coordinates, generally measured in nanometers\n World: pixel coordinates, starts at (0,0,0) and accounts for pixel resolution\n often (4x4x40) nanometers per pixel\n Local: Downsampled pixel space\n\n Parameters\n ----------\n bounds : iterable of int\n Shape of the stack at zoom level 0 in pixels.\n resolution : iterable of float\n Resolution of the stack at zoom level 0 in nm.\n tile_width, tile_height : int\n Size of tiles in pixels\n format_url : str\n Format string for building tile URLs from tile parameters.\n zoom_level : int, optional\n Zoom level to use for this volume.\n missing_z : iterable of int, optional\n Voxel z-indices where data is not available.\n image_leaf_shape : tuple of int or ndarray, optional\n Shape of image octree leaves in voxels. Defaults to 10 stacked tiles.\n label_leaf_shape : tuple of int or ndarray, optional\n Shape of label octree leaves in voxels. Defaults to FFN model FOV.\n \"\"\"\n @staticmethod\n def from_catmaid_stack(stack_info, tile_source_parameters):\n # See https://catmaid.readthedocs.io/en/stable/tile_sources.html\n format_url = {\n 1: '{source_base_url}{{z}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}',\n 4: '{source_base_url}{{z}}/{{zoom_level}}/{{row}}_{{col}}.{file_extension}',\n 5: '{source_base_url}{{zoom_level}}/{{z}}/{{row}}/{{col}}.{file_extension}',\n 7: '{source_base_url}largeDataTileSource/{tile_width}/{tile_height}/'\n '{{zoom_level}}/{{z}}/{{row}}/{{col}}.{file_extension}',\n 9: '{source_base_url}{{z}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}',\n }[tile_source_parameters['tile_source_type']].format(**tile_source_parameters)\n bounds = np.flipud(np.array(stack_info['bounds'], dtype=np.int64))\n resolution = np.flipud(np.array(stack_info['resolution']))\n translation = np.flipud(np.array(stack_info['translation']))\n tile_width = int(tile_source_parameters['tile_width'])\n tile_height = int(tile_source_parameters['tile_height'])\n return ImageStackVolume(bounds, resolution, translation, tile_width, tile_height,\n format_url, missing_z=stack_info.get(\"broken_slices\", None))\n\n def from_toml(filename):\n volumes = {}\n with open(filename, \"rb\") as fin:\n datasets = toml.load(fin).get(\"ImageStack\", [])\n for dataset in datasets:\n # stack info\n si = [\n \"bounds\",\n \"resolution\",\n \"translation\",\n \"broken_slices\",\n ]\n # tile stack parameters\n tsp = [\n \"source_base_url\",\n \"file_extension\",\n \"tile_width\",\n \"tile_height\",\n \"tile_source_type\",\n ]\n volume = ImageStackVolume.from_catmaid_stack(\n {si[key]: dataset[key] for key in si},\n {tsp[key]: dataset[key] for key in tsp},\n )\n volumes[dataset[\"title\"]] = volume\n\n return volumes\n\n def __init__(self, bounds, orig_resolution, translation, tile_width, tile_height,\n tile_format_url, zoom_level=0, missing_z=None, image_leaf_shape=None):\n self.orig_bounds = bounds\n self.orig_resolution = orig_resolution\n self.translation = translation\n self.tile_width = tile_width\n self.tile_height = tile_height\n self.tile_format_url = tile_format_url\n self.mask_data = None\n\n self.zoom_level = int(zoom_level)\n if missing_z is None:\n missing_z = []\n self.missing_z = frozenset(missing_z)\n if image_leaf_shape is None:\n image_leaf_shape = [10, tile_height, tile_width]\n\n self.scale = np.exp2(np.array([0, self.zoom_level, self.zoom_level])).astype(np.int64)\n\n data_shape = (np.zeros(3), np.divide(bounds, self.scale).astype(np.int64))\n self.image_data = OctreeVolume(image_leaf_shape,\n data_shape,\n 'float32',\n populator=self.image_populator)\n\n self.label_data = None\n\n def local_coord_to_world(self, a):\n return np.multiply(a, self.scale)\n\n def world_coord_to_local(self, a):\n return np.floor_divide(a, self.scale)\n\n def real_coord_to_world(self, a):\n return np.floor_divide(a - self.translation, self.orig_resolution)\n\n def world_coord_to_real(self, a):\n return np.multiply(a, self.orig_resolution) + self.translation\n\n @property\n def resolution(self):\n return self.orig_resolution * np.exp2([0, self.zoom_level, self.zoom_level])\n\n def downsample(self, resolution):\n downsample = self._get_downsample_from_resolution(resolution)\n zoom_level = np.min(downsample[[self.DIM.X, self.DIM.Y]])\n if zoom_level > 0:\n return ImageStackVolume(\n self.orig_bounds,\n self.orig_resolution,\n self.translation,\n self.tile_width,\n self.tile_height,\n self.tile_format_url,\n zoom_level=self.zoom_level + zoom_level,\n missing_z=self.missing_z,\n image_leaf_shape=self.image_data.leaf_shape).downsample(resolution)\n if np.all(np.equal(downsample, 0)):\n return self\n return DownsampledVolume(self, downsample)\n\n def subvolume_bounds_generator(self, sparse_margin=None, **kwargs):\n if sparse_margin is not None:\n if kwargs:\n raise ValueError('sparse_margin can not be combined with other arguments.')\n return self.SparseSubvolumeBoundsGenerator(self, sparse_margin)\n return super(ImageStackVolume, self).subvolume_bounds_generator(**kwargs)\n\n def get_subvolume(self, bounds):\n if bounds.start is None or bounds.stop is None:\n image_subvol = self.image_data\n label_subvol = self.label_data\n else:\n image_subvol = self.image_data[\n bounds.start[0]:bounds.stop[0],\n bounds.start[1]:bounds.stop[1],\n bounds.start[2]:bounds.stop[2]]\n label_subvol = None\n\n if np.issubdtype(image_subvol.dtype, np.integer):\n raise ValueError('Sparse volume access does not support image data coercion.')\n\n seed = bounds.seed\n if seed is None:\n seed = np.array(image_subvol.shape, dtype=np.int64) // 2\n\n return Subvolume(image_subvol, label_subvol, seed, bounds.label_id)\n\n def image_populator(self, bounds):\n image_subvol = np.zeros(tuple(bounds[1] - bounds[0]), dtype=np.float32)\n col_range = list(map(int, (math.floor(bounds[0][self.DIM.X] / self.tile_width),\n math.ceil(bounds[1][self.DIM.X] / self.tile_width))))\n row_range = list(map(int, (math.floor(bounds[0][self.DIM.Y] / self.tile_height),\n math.ceil(bounds[1][self.DIM.Y] / self.tile_height))))\n tile_size = np.array([1, self.tile_height, self.tile_width]).astype(np.int64)\n for z in xrange(bounds[0][self.DIM.Z], bounds[1][self.DIM.Z]):\n if z in self.missing_z:\n image_subvol[int(z - bounds[0][self.DIM.Z]), :, :] = 0\n continue\n for r in xrange(*row_range):\n for c in xrange(*col_range):\n url = self.tile_format_url.format(zoom_level=self.zoom_level, z=z, row=r, col=c)\n try:\n im = np.array(Image.open(requests.get(url, stream=True).raw))\n # If the image is multichannel, throw our hands up and\n # just use the first channel.\n if im.ndim > 2:\n im = im[:, :, 0].squeeze()\n im = im / 256.0\n except IOError:\n logging.debug('Failed to load tile: %s', url)\n im = np.full((self.tile_height, self.tile_width), 0, dtype=np.float32)\n tile_coord = np.array([z, r, c]).astype(np.int64)\n tile_loc = np.multiply(tile_coord, tile_size)\n\n subvol = (np.maximum(np.zeros(3), tile_loc - bounds[0]).astype(np.int64),\n np.minimum(np.array(image_subvol.shape),\n tile_loc + tile_size - bounds[0]).astype(np.int64))\n tile_sub = (np.maximum(np.zeros(3), bounds[0] - tile_loc).astype(np.int64),\n np.minimum(tile_size, bounds[1] - tile_loc).astype(np.int64))\n\n image_subvol[subvol[0][self.DIM.Z],\n subvol[0][self.DIM.Y]:subvol[1][self.DIM.Y],\n subvol[0][self.DIM.X]:subvol[1][self.DIM.X]] = \\\n im[tile_sub[0][self.DIM.Y]:tile_sub[1][self.DIM.Y],\n tile_sub[0][self.DIM.X]:tile_sub[1][self.DIM.X]]\n\n return image_subvol\n\n class SparseSubvolumeBoundsGenerator(six.Iterator):\n def __init__(self, volume, margin):\n self.volume = volume\n self.margin = np.asarray(margin).astype(np.int64)\n self.ctr_min = self.margin\n self.ctr_max = (np.array(self.volume.shape) - self.margin - 1).astype(np.int64)\n self.random = np.random.RandomState(CONFIG.random_seed)\n\n @property\n def shape(self):\n return self.volume.shape\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.random.seed(0)\n\n def __next__(self):\n ctr = np.array([self.random.randint(self.ctr_min[n], self.ctr_max[n])\n for n in range(3)]).astype(np.int64)\n return SubvolumeBounds(seed=ctr)\n\n\nclass N5Volume(Volume):\n \"\"\"A Volume for using an N5 filesystem for image retrieval\n\n Parameters\n ----------\n root_path : string\n /absolute/path/to/data.n5\n dataset : dict of dicts (dataset name to dataset config)\n possible keys: (\"mask\",\"labels\",\"image\")\n values: {\"path\": path, \"dtype\": dtype, \"read_only\": read_only}\n resolution : iterable of float\n Resolution of the pixels at zoom level 0 in nm.\n translation : iterable of float\n Translational offset in nm s.t. for given coordinate\n a in pixel space, a*resolution+translation = b where\n b is in the desired nm coordinates\n bounds: iterable of int, optional\n Shape of the stack at zoom level 0 in pixels.\n necessary if the volume is missing an attributes file\n tile_width, tile_height : int, optional\n Size of tiles in pixels\n necessary if the volume is missing an attributes file\n \"\"\"\n\n def from_toml(filename):\n volumes = {}\n with open(filename, \"rb\") as fin:\n volume_configs = toml.load(fin).get(\"N5Volume\", [])\n for volume_config in volume_configs:\n root_path = volume_config[\"root_path\"]\n datasets = volume_config[\"datasets\"]\n resolution = volume_config.get(\"resolution\", None)\n translation = volume_config.get[\"translation\", None]\n bounds = volume_config.get(\"bounds\", None)\n volume = N5Volume(\n root_path,\n datasets,\n bounds,\n resolution,\n translation,\n )\n volumes[volume_config[\"title\"]] = volume\n\n return volumes\n\n def __init__(\n self,\n root_path,\n datasets,\n bounds=None,\n resolution=None,\n translation=None,\n ):\n\n self._dtype_map = {\n \"UINT8\": np.uint8,\n \"UINT16\": np.uint16,\n \"UINT32\": np.uint32,\n \"UINT64\": np.uint64,\n \"INT8\": np.int8,\n \"INT16\": np.int16,\n \"INT32\": np.int32,\n \"INT64\": np.int64,\n \"FLOAT32\": np.float32,\n \"FLOAT64\": np.float64,\n }\n self.bounds = bounds\n self.resolution = resolution\n self.translation = translation\n\n self.scale = np.exp2(np.array([0, 0, 0])).astype(np.int64)\n self.data_shape = (np.array([0, 0, 0]), self.bounds / self.scale)\n\n # Initialization of data sources done in setter methods\n self.root_path = root_path\n self.image_config = datasets.get(\"image\", None)\n self.mask_config = datasets.get(\"mask\", None)\n self.label_config = datasets.get(\"label\", None)\n\n @property\n def dtype_map(self):\n return self._dtype_map\n\n def local_coord_to_world(self, a):\n return np.multiply(a, self.scale)\n\n def world_coord_to_local(self, a):\n return np.floor_divide(a, self.scale)\n\n def real_coord_to_world(self, a):\n return np.floor_divide(a - self.translation, self.orig_resolution)\n\n def world_coord_to_real(self, a):\n return np.multiply(a, self.orig_resolution) + self.translation\n\n @property\n def octree_leaf_shape(self):\n return np.array([10, 10, 10])\n\n @property\n def image_config(self):\n return self._image_config\n\n @image_config.setter\n def image_config(self, dataset):\n self._image_config = dataset\n if dataset is not None:\n self._image_data = OctreeVolume(\n self.octree_leaf_shape,\n self.data_shape,\n self.dtype_map[dataset.get(\"dtype\", \"FLOAT32\")],\n populator=self.image_populator,\n )\n else:\n self._image_data = None\n\n @property\n def image_data(self):\n return self._image_data\n\n @property\n def mask_config(self):\n return self._mask_config\n\n @mask_config.setter\n def mask_config(self, dataset):\n self._mask_config = dataset\n if dataset is not None:\n self._mask_data = OctreeVolume(\n self.octree_leaf_shape,\n self.data_shape,\n self.dtype_map[dataset.get(\"dtype\", \"FLOAT32\")],\n populator=self.mask_populator,\n )\n else:\n self._mask_data = None\n\n @property\n def mask_data(self):\n return self._mask_data\n\n @property\n def label_config(self):\n return self._label_config\n\n @label_config.setter\n def label_config(self, dataset):\n self._label_config = dataset\n if dataset is not None:\n self._label_data = OctreeVolume(\n self.octree_leaf_shape,\n self.data_shape,\n self.dtype_map[dataset.get(\"dtype\", \"FLOAT32\")],\n populator=self.label_populator,\n )\n else:\n self._label_data = None\n\n @property\n def label_data(self):\n return self._label_data\n\n @property\n def image_n5(self):\n \"\"\"\n Create a new pyn5.Dataset every time you ask for image_n5.\n This is necessary to accomadate parrallel reads since multiple\n threads can't use the same reader.\n \"\"\"\n if self.image_config is not None:\n return pyn5.open(\n self.root_path,\n self.image_config.get(\"path\"),\n self.image_config.get(\"dtype\", \"UINT8\"),\n self.image_config.get(\"read_only\", True),\n )\n else:\n return None\n\n def image_populator(self, bounds):\n return pyn5.read(self.image_n5, (bounds[0], bounds[1]))\n\n @property\n def mask_n5(self):\n if self.mask_config is not None:\n return pyn5.open(\n self.root_path,\n self.mask_config.get(\"path\"),\n self.mask_config.get(\"dtype\", \"UINT8\"),\n self.mask_config.get(\"read_only\", True),\n )\n else:\n return None\n\n def mask_populator(self, bounds):\n return pyn5.read(self.mask_n5, (bounds[0], bounds[1]))\n\n @property\n def label_n5(self):\n if self.label_config is not None:\n return pyn5.open(\n self.root_path,\n self.label_config.get(\"path\"),\n self.label_config.get(\"dtype\", \"UINT8\"),\n self.label_config.get(\"read_only\", True),\n )\n else:\n return None\n\n def label_populator(self, bounds):\n return pyn5.read(self.label_n5, bounds)\n",
"# -*- coding: utf-8 -*-\n\"\"\"Command line interface for diluvian.\"\"\"\n\n\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport os\nimport random\nimport re\n\nimport six\n\nfrom .config import CONFIG\n\n\ndef _make_main_parser():\n \"\"\"Construct the argparse parser for the main CLI.\n\n This exists as a separate function so the parser can be used to\n auto-generate CLI documentation in Sphinx.\n\n Returns\n -------\n argparse.ArgumentParser\n Parser for the main CLI and all subcommands.\n \"\"\"\n common_parser = argparse.ArgumentParser(add_help=False)\n\n common_parser.add_argument(\n '-c', '--config-file', action='append', dest='config_files', default=[],\n help='Configuration files to use. For defaults, see `diluvian/conf/default.toml`. '\n 'Values are overwritten in the order provided.')\n common_parser.add_argument(\n '-cd', action='append_const', dest='config_files',\n const=os.path.join(os.path.dirname(__file__), 'conf', 'default.toml'),\n help='Add default configuration file to chain of configuration files.')\n common_parser.add_argument(\n '-m', '--model-file', dest='model_file', default=None,\n help='Existing network model file to use for prediction or continued training.')\n common_parser.add_argument(\n '-v', '--volume-file', action='append', dest='volume_files', default=[],\n help='Volume configuration files. For example, see `diluvian/conf/cremi_datasets.toml`.'\n 'Values are overwritten in the order provided.')\n common_parser.add_argument(\n '--no-in-memory', action='store_false', dest='in_memory', default=True,\n help='Do not preload entire volumes into memory.')\n common_parser.add_argument(\n '-rs', '--random-seed', action='store', dest='random_seed', type=int,\n help='Seed for initializing the Python and NumPy random generators. '\n 'Overrides any seed specified in configuration files.')\n common_parser.add_argument(\n '-l', '--log', dest='log_level',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='Set the logging level.')\n\n parser = argparse.ArgumentParser(description='Train or run flood-filling networks on EM data.')\n\n commandparsers = parser.add_subparsers(help='Commands', dest='command')\n\n train_parser = commandparsers.add_parser(\n 'train', parents=[common_parser],\n help='Train a network from labeled volumes.')\n train_parser.add_argument(\n '-mo', '--model-output-filebase', dest='model_output_filebase', default=None,\n help='Base filename for the best trained model and other output artifacts, '\n 'such as metric plots and configuration state.')\n train_parser.add_argument(\n '-mc', '--model-checkpoint-file', dest='model_checkpoint_file', default=None,\n help='Filename for model checkpoints at every epoch. '\n 'This is different than the model output file; if provided, this HDF5 model '\n 'file is saved every epoch regardless of validation performance.'\n 'Can use Keras format arguments: https://keras.io/callbacks/#modelcheckpoint')\n train_parser.add_argument(\n '--early-restart', action='store_true', dest='early_restart', default=False,\n help='If training is aborted early because an early abort metric '\n 'criteria, restart training with a new random seed.')\n train_parser.add_argument(\n '--tensorboard', action='store_true', dest='tensorboard', default=False,\n help='Output tensorboard log files while training (limited to network graph).')\n train_parser.add_argument(\n '--viewer', action='store_true', dest='viewer', default=False,\n help='Create a neuroglancer viewer for a training sample at the end of training.')\n train_parser.add_argument(\n '--metric-plot', action='store_true', dest='metric_plot', default=False,\n help='Plot metric history at the end of training. '\n 'Will be saved as a PNG with the model output base filename.')\n\n fill_common_parser = argparse.ArgumentParser(add_help=False)\n fill_common_parser.add_argument(\n '--partition-volumes', action='store_true', dest='partition_volumes', default=False,\n help='Partition volumes and only fill the validation partition.')\n fill_common_parser.add_argument(\n '--no-bias', action='store_false', dest='bias', default=True,\n help='Overwrite prediction mask at the end of each field of view inference '\n 'rather than using the anti-merge bias update.')\n fill_common_parser.add_argument(\n '--move-batch-size', dest='move_batch_size', default=1, type=int,\n help='Maximum number of fill moves to process in each prediction batch.')\n fill_common_parser.add_argument(\n '--max-moves', dest='max_moves', default=None, type=int,\n help='Cancel filling after this many moves.')\n fill_common_parser.add_argument(\n '--remask-interval', dest='remask_interval', default=None, type=int,\n help='Interval in moves to reset filling region mask based on '\n 'the seeded connected component.')\n\n fill_parser = commandparsers.add_parser(\n 'fill', parents=[common_parser, fill_common_parser],\n help='Use a trained network to densely segment a volume.')\n fill_parser.add_argument(\n '--seed-generator', dest='seed_generator', default='sobel', nargs='?',\n # Would be nice to pull these from .preprocessing.SEED_GENERATORS,\n # but want to avoid importing so that CLI is responsive.\n choices=['grid', 'sobel'],\n help='Method to generate seed locations for flood filling.')\n fill_parser.add_argument(\n '--ordered-seeds', action='store_false', dest='shuffle_seeds', default=True,\n help='Do not shuffle order in which seeds are processed.')\n fill_parser.add_argument(\n '--ignore-mask', dest='ignore_mask', default=False,\n help='Ignore the mask channel when generating seeds.')\n fill_parser.add_argument(\n '--background-label-id', dest='background_label_id', default=0, type=int,\n help='Label ID to output for voxels not belonging to any filled body.')\n fill_parser.add_argument(\n '--viewer', action='store_true', dest='viewer', default=False,\n help='Create a neuroglancer viewer for a each volume after filling.')\n fill_parser.add_argument(\n '--max-bodies', dest='max_bodies', default=None, type=int,\n help='Cancel filling after this many bodies (only useful for '\n 'diagnostics).')\n fill_parser.add_argument(\n '--reject-early-termination', action='store_true',\n dest='reject_early_termination', default=False,\n help='Reject seeds that terminate early, e.g., due to maximum '\n 'move limits.')\n fill_parser.add_argument(\n '--resume-file', dest='resume_filename', default=None,\n help='Filename for the TOML configuration file of a segmented '\n 'label volume from which to resume filling. The configuration '\n 'should only contain one dataset.')\n fill_parser.add_argument(\n 'segmentation_output_file', default=None,\n help='Filename for the HDF5 segmentation output, without '\n 'extension. Should contain \"{volume}\", which will be '\n 'substituted with the volume name for each respective '\n 'volume\\'s bounds.')\n\n bounds_common_parser = argparse.ArgumentParser(add_help=False)\n bounds_common_parser.add_argument(\n '--bounds-num-moves', dest='bounds_num_moves', default=None, nargs=3, type=int,\n help='Number of moves in direction to size the subvolume bounds.')\n\n sparse_fill_parser = commandparsers.add_parser(\n 'sparse-fill', parents=[common_parser, fill_common_parser, bounds_common_parser],\n help='Use a trained network to fill random regions in a volume.')\n sparse_fill_parser.add_argument(\n '--augment', action='store_true', dest='augment', default=False,\n help='Apply training augmentations to subvolumes before filling.')\n sparse_fill_parser.add_argument(\n '-bi', '--bounds-input-file', dest='bounds_input_file', default=None,\n help='Filename for bounds CSV input. Should contain \"{volume}\", which will be '\n 'substituted with the volume name for each respective volume\\'s bounds.')\n\n validate_parser = commandparsers.add_parser( # noqa\n 'validate', parents=[common_parser],\n help='Run a model on validation data.')\n\n evaluate_parser = commandparsers.add_parser(\n 'evaluate', parents=[common_parser],\n help='Evaluate a filling result versus a ground truth.')\n evaluate_parser.add_argument(\n '--border-threshold', dest='border_threshold', default=25, type=float,\n help='Region border threshold (in nm) to ignore. Official CREMI '\n 'default is 25nm.')\n evaluate_parser.add_argument(\n '--partition-volumes', action='store_true', dest='partition_volumes', default=False,\n help='Partition volumes and only evaluate the validation partitions.')\n evaluate_parser.add_argument(\n 'ground_truth_name', default=None,\n help='Name of the ground truth volume.')\n evaluate_parser.add_argument(\n 'prediction_name', default=None,\n help='Name of the prediction volume.')\n\n view_parser = commandparsers.add_parser(\n 'view', parents=[common_parser],\n help='View a set of co-registered volumes in neuroglancer.')\n view_parser.add_argument(\n '--partition-volumes', action='store_true', dest='partition_volumes', default=False,\n help='Partition volumes and view centered at the validation '\n 'partitions.')\n view_parser.add_argument(\n 'volume_name_regex', default='.', nargs='?',\n help='Regex to filter which volumes of those defined in the '\n 'volume configuration should be loaded.')\n\n check_config_parser = commandparsers.add_parser(\n 'check-config', parents=[common_parser],\n help='Check a configuration value.')\n check_config_parser.add_argument(\n 'config_property', default=None, nargs='?',\n help='Name of the property to show, e.g., `training.batch_size`.')\n\n gen_subv_bounds_parser = commandparsers.add_parser(\n 'gen-subv-bounds', parents=[common_parser, bounds_common_parser],\n help='Generate subvolume bounds.')\n gen_subv_bounds_parser.add_argument(\n 'bounds_output_file', default=None,\n help='Filename for the CSV output. Should contain \"{volume}\", which will be '\n 'substituted with the volume name for each respective volume\\'s bounds.')\n gen_subv_bounds_parser.add_argument(\n 'num_bounds', default=None, type=int,\n help='Number of bounds to generate.')\n\n return parser\n\n\ndef main():\n \"\"\"Entry point for the diluvian command line interface.\"\"\"\n parser = _make_main_parser()\n\n args = parser.parse_args()\n\n if args.log_level:\n logging.basicConfig(level=logging.getLevelName(args.log_level))\n\n if args.config_files:\n CONFIG.from_toml(*args.config_files)\n\n if args.random_seed:\n CONFIG.random_seed = args.random_seed\n\n def init_seeds():\n random.seed(CONFIG.random_seed)\n import numpy as np\n np.random.seed(CONFIG.random_seed)\n import tensorflow as tf\n tf.set_random_seed(CONFIG.random_seed)\n\n if args.command == 'train':\n # Late import to prevent loading large modules for short CLI commands.\n init_seeds()\n from .training import EarlyAbortException, train_network\n\n volumes = load_volumes(args.volume_files, args.in_memory)\n while True:\n try:\n train_network(model_file=args.model_file,\n volumes=volumes,\n model_output_filebase=args.model_output_filebase,\n model_checkpoint_file=args.model_checkpoint_file,\n tensorboard=args.tensorboard,\n viewer=args.viewer,\n metric_plot=args.metric_plot)\n except EarlyAbortException as inst:\n if args.early_restart:\n import numpy as np\n new_seed = CONFIG.random_seed\n while new_seed == CONFIG.random_seed:\n new_seed = np.random.randint(int(1e8))\n CONFIG.random_seed = new_seed\n logging.warning(str(inst))\n logging.warning('Training aborted, restarting with random seed %s', new_seed)\n init_seeds()\n continue\n else:\n logging.critical(str(inst))\n break\n break\n\n elif args.command == 'fill':\n # Late import to prevent loading large modules for short CLI commands.\n init_seeds()\n from .diluvian import fill_volumes_with_model\n\n volumes = load_volumes(args.volume_files, args.in_memory)\n fill_volumes_with_model(args.model_file,\n volumes,\n args.segmentation_output_file,\n resume_filename=args.resume_filename,\n partition=args.partition_volumes,\n viewer=args.viewer,\n seed_generator=args.seed_generator,\n background_label_id=args.background_label_id,\n bias=args.bias,\n move_batch_size=args.move_batch_size,\n max_moves=args.max_moves,\n max_bodies=args.max_bodies,\n filter_seeds_by_mask=not args.ignore_mask,\n reject_early_termination=args.reject_early_termination,\n remask_interval=args.remask_interval,\n shuffle_seeds=args.shuffle_seeds)\n\n elif args.command == 'sparse-fill':\n # Late import to prevent loading large modules for short CLI commands.\n init_seeds()\n from .diluvian import fill_region_with_model\n\n volumes = load_volumes(args.volume_files, args.in_memory)\n fill_region_with_model(args.model_file,\n volumes=volumes,\n partition=args.partition_volumes,\n augment=args.augment,\n bounds_input_file=args.bounds_input_file,\n bias=args.bias,\n move_batch_size=args.move_batch_size,\n max_moves=args.max_moves,\n remask_interval=args.remask_interval,\n moves=args.bounds_num_moves)\n\n elif args.command == 'validate':\n # Late import to prevent loading large modules for short CLI commands.\n init_seeds()\n from .training import validate_model\n\n volumes = load_volumes(args.volume_files, args.in_memory)\n validate_model(args.model_file, volumes)\n\n elif args.command == 'evaluate':\n from .diluvian import evaluate_volume\n\n volumes = load_volumes(args.volume_files, args.in_memory)\n evaluate_volume(volumes,\n args.ground_truth_name,\n args.prediction_name,\n partition=args.partition_volumes,\n border_threshold=args.border_threshold)\n\n elif args.command == 'view':\n # Late import to prevent loading large modules for short CLI commands.\n from .diluvian import view_volumes\n\n volumes = load_volumes(args.volume_files, args.in_memory, name_regex=args.volume_name_regex)\n view_volumes(volumes, partition=args.partition_volumes)\n\n elif args.command == 'check-config':\n prop = CONFIG\n if args.config_property is not None:\n properties = args.config_property.split('.')\n for p in properties:\n prop = getattr(prop, p)\n print(prop)\n\n elif args.command == 'gen-subv-bounds':\n # Late import to prevent loading large modules for short CLI commands.\n init_seeds()\n from .diluvian import generate_subvolume_bounds\n\n volumes = load_volumes(args.volume_files, args.in_memory)\n generate_subvolume_bounds(args.bounds_output_file,\n volumes,\n args.num_bounds,\n moves=args.bounds_num_moves)\n\n\ndef load_volumes(volume_files, in_memory, name_regex=None):\n \"\"\"Load HDF5 volumes specified in a TOML description file.\n\n Parameters\n ----------\n volume_file : list of str\n Filenames of the TOML volume descriptions to load.\n in_memory : bool\n If true, the entire dataset is read into an in-memory volume.\n\n Returns\n -------\n diluvian.volumes.Volume\n \"\"\"\n # Late import to prevent loading large modules for short CLI commands.\n from .volumes import HDF5Volume\n\n print('Loading volumes...')\n if volume_files:\n volumes = {}\n for volume_file in volume_files:\n volumes.update(HDF5Volume.from_toml(volume_file))\n else:\n volumes = HDF5Volume.from_toml(os.path.join(os.path.dirname(__file__), 'conf', 'cremi_datasets.toml'))\n\n if name_regex is not None:\n name_regex = re.compile(name_regex)\n volumes = {k: v for k, v in six.iteritems(volumes) if name_regex.match(k)}\n\n if in_memory:\n print('Copying volumes to memory...')\n volumes = {k: v.to_memory_volume() for k, v in six.iteritems(volumes)}\n\n print('Done.')\n return volumes\n\n\nif __name__ == \"__main__\":\n main()\n",
"# -*- coding: utf-8 -*-\n\"\"\"Functions for generating training data and training networks.\"\"\"\n\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport itertools\nimport logging\nimport random\n\nimport matplotlib as mpl\n# Use the 'Agg' backend to allow the generation of plots even if no X server\n# is available. The matplotlib backend must be set before importing pyplot.\nmpl.use('Agg') # noqa\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport six\nfrom six.moves import range as xrange\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nimport keras.backend as K\nfrom keras.callbacks import (\n Callback,\n EarlyStopping,\n ModelCheckpoint,\n TensorBoard,\n )\n\nfrom .config import CONFIG\nfrom .network import compile_network, load_model, make_parallel\nfrom .util import (\n get_color_shader,\n get_function,\n pad_dims,\n Roundrobin,\n WrappedViewer,\n write_keras_history_to_csv,\n )\nfrom .volumes import (\n ClipSubvolumeImageGenerator,\n ContrastAugmentGenerator,\n ErodedMaskGenerator,\n GaussianNoiseAugmentGenerator,\n MaskedArtifactAugmentGenerator,\n MirrorAugmentGenerator,\n MissingDataAugmentGenerator,\n partition_volumes,\n PermuteAxesAugmentGenerator,\n RelabelSeedComponentGenerator,\n )\nfrom .regions import (\n Region,\n )\n\n\ndef plot_history(history):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(history.history['loss'])\n ax.plot(history.history['val_loss'])\n ax.plot(history.history['val_subv_metric'])\n fig.suptitle('model loss')\n ax.set_ylabel('loss')\n ax.set_xlabel('epoch')\n ax.legend(['train', 'validation', 'val subvolumes'], loc='upper right')\n\n return fig\n\n\ndef patch_prediction_copy(model):\n \"\"\"Patch a Keras model to copy outputs to a kludge during training.\n\n This is necessary for mask updates to a region during training.\n\n Parameters\n ----------\n model : keras.engine.Model\n \"\"\"\n model.train_function = None\n model.test_function = None\n\n model._orig_train_on_batch = model.train_on_batch\n\n def train_on_batch(self, x, y, **kwargs):\n kludge = x.pop('kludge', None)\n outputs = self._orig_train_on_batch(x, y, **kwargs)\n kludge['outputs'] = outputs.pop()\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n model.train_on_batch = six.create_bound_method(train_on_batch, model)\n\n model._orig_test_on_batch = model.test_on_batch\n\n def test_on_batch(self, x, y, **kwargs):\n kludge = x.pop('kludge', None)\n outputs = self._orig_test_on_batch(x, y, **kwargs)\n kludge['outputs'] = outputs.pop()\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n model.test_on_batch = six.create_bound_method(test_on_batch, model)\n\n # Below is copied and modified from Keras Model._make_train_function.\n # The only change is the addition of `self.outputs` to the train function.\n def _make_train_function(self):\n if not hasattr(self, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n if self.train_function is None:\n inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n\n with K.name_scope('training'):\n with K.name_scope(self.optimizer.__class__.__name__):\n training_updates = self.optimizer.get_updates(\n params=self._collected_trainable_weights,\n loss=self.total_loss)\n updates = self.updates + training_updates\n # Gets loss and metrics. Updates weights at each call.\n self.train_function = K.function(inputs,\n [self.total_loss] + self.metrics_tensors + self.outputs,\n updates=updates,\n name='train_function',\n **self._function_kwargs)\n\n model._make_train_function = six.create_bound_method(_make_train_function, model)\n\n def _make_test_function(self):\n if not hasattr(self, 'test_function'):\n raise RuntimeError('You must compile your model before using it.')\n if self.test_function is None:\n inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n # Return loss and metrics, no gradient updates.\n # Does update the network states.\n self.test_function = K.function(inputs,\n [self.total_loss] + self.metrics_tensors + self.outputs,\n updates=self.state_updates,\n name='test_function',\n **self._function_kwargs)\n\n model._make_test_function = six.create_bound_method(_make_test_function, model)\n\n\nclass GeneratorReset(Callback):\n \"\"\"Keras epoch end callback to reset prediction copy kludges.\n \"\"\"\n def __init__(self, gens):\n self.gens = gens\n\n def on_epoch_end(self, epoch, logs=None):\n for gen in self.gens:\n gen.reset()\n\n\nclass GeneratorSubvolumeMetric(Callback):\n \"\"\"Add a data generator's subvolume metric to Keras' metric logs.\n\n Parameters\n ----------\n gens : iterable of diluvian.training.MovingTrainingGenerator\n metric_name : string\n \"\"\"\n def __init__(self, gens, metric_name):\n self.gens = gens\n self.metric_name = metric_name\n\n def on_epoch_end(self, epoch, logs=None):\n if self.metric_name not in self.params['metrics']:\n self.params['metrics'].append(self.metric_name)\n if logs:\n metric = np.mean([np.mean(gen.get_epoch_metric()) for gen in self.gens])\n logs[self.metric_name] = metric\n\n\nclass EarlyAbortException(Exception):\n pass\n\n\nclass EarlyAbort(Callback):\n \"\"\"Keras epoch end callback that aborts if a metric is above a threshold.\n\n This is useful when convergence is sensitive to initial conditions and\n models are obviously not useful to continue training after only a few\n epochs. Unlike the early stopping callback, this is considered an\n abnormal termination and throws an exception so that behaviors like\n restarting with a new random seed are possible.\n \"\"\"\n def __init__(self, monitor='val_loss', threshold_epoch=None, threshold_value=None):\n if threshold_epoch is None or threshold_value is None:\n raise ValueError('Epoch and value to enforce threshold must be provided.')\n\n self.monitor = monitor\n self.threshold_epoch = threshold_epoch - 1\n self.threshold_value = threshold_value\n\n def on_epoch_end(self, epoch, logs=None):\n if epoch == self.threshold_epoch:\n current = logs.get(self.monitor)\n if current >= self.threshold_value:\n raise EarlyAbortException('Aborted after epoch {} because {} was {} >= {}'.format(\n self.threshold_epoch, self.monitor, current, self.threshold_value))\n\n\ndef preprocess_subvolume_generator(subvolume_generator):\n \"\"\"Apply non-augmentation preprocessing to a subvolume generator.\n\n Parameters\n ----------\n subvolume_generator : diluvian.volumes.SubvolumeGenerator\n\n Returns\n -------\n diluvian.volumes.SubvolumeGenerator\n \"\"\"\n gen = subvolume_generator\n if np.any(CONFIG.training.label_erosion):\n gen = ErodedMaskGenerator(gen, CONFIG.training.label_erosion)\n if CONFIG.training.relabel_seed_component:\n gen = RelabelSeedComponentGenerator(gen)\n\n return gen\n\n\ndef augment_subvolume_generator(subvolume_generator):\n \"\"\"Apply data augmentations to a subvolume generator.\n\n Parameters\n ----------\n subvolume_generator : diluvian.volumes.SubvolumeGenerator\n\n Returns\n -------\n diluvian.volumes.SubvolumeGenerator\n \"\"\"\n gen = subvolume_generator\n for axes in CONFIG.training.augment_permute_axes:\n gen = PermuteAxesAugmentGenerator(gen, CONFIG.training.augment_use_both, axes)\n for axis in CONFIG.training.augment_mirrors:\n gen = MirrorAugmentGenerator(gen, CONFIG.training.augment_use_both, axis)\n for v in CONFIG.training.augment_noise:\n gen = GaussianNoiseAugmentGenerator(gen, CONFIG.training.augment_use_both, v['axis'], v['mul'], v['add'])\n for v in CONFIG.training.augment_artifacts:\n if 'cache' not in v:\n v['cache'] = {}\n gen = MaskedArtifactAugmentGenerator(gen, CONFIG.training.augment_use_both,\n v['axis'], v['prob'], v['volume_file'], v['cache'])\n for v in CONFIG.training.augment_missing_data:\n gen = MissingDataAugmentGenerator(gen, CONFIG.training.augment_use_both, v['axis'], v['prob'])\n for v in CONFIG.training.augment_contrast:\n gen = ContrastAugmentGenerator(gen, CONFIG.training.augment_use_both, v['axis'], v['prob'],\n v['scaling_mean'], v['scaling_std'],\n v['center_mean'], v['center_std'])\n gen = ClipSubvolumeImageGenerator(gen)\n\n return gen\n\n\nclass MovingTrainingGenerator(six.Iterator):\n \"\"\"Generate Keras moving FOV training tuples from a subvolume generator.\n\n This generator expects a subvolume generator that will provide subvolumes\n larger than the network FOV, and will allow the output of training at one\n batch to generate moves within these subvolumes to produce training data\n for the subsequent batch.\n\n Parameters\n ----------\n subvolumes : generator of Subvolume\n batch_size : int\n kludge : dict\n A kludge object to allow this generator to provide inputs and receive\n outputs from the network.\n See ``diluvian.training.patch_prediction_copy``.\n f_a_bins : sequence of float, optional\n Bin boundaries for filling fractions. If provided, sample loss will be\n weighted to increase loss contribution from less-frequent f_a bins.\n Otherwise all samples are weighted equally.\n reset_generators : bool\n Whether to reset subvolume generators when this generator is reset.\n If true subvolumes will be sampled in the same order each epoch.\n subv_per_epoch : int, optional\n If specified, the generator will only return moves from this many\n subvolumes before being reset. Once this number of subvolumes is\n exceeded, the generator will yield garbage batches (this is\n necessary because Keras currently uses a fixed number of batches\n per epoch). If specified, once each subvolume is complete its\n total loss will be calculated.\n subv_metric_fn : function, option\n Metric function to run on subvolumes when `subv_per_epoch` is set.\n subv_metric_threshold : bool, optional\n Whether to threshold subvolume masks for metrics.\n subv_metric_args : dict, optional\n Keyword arguments that will be passed to the subvolume metric.\n \"\"\"\n def __init__(self, subvolumes, batch_size, kludge,\n f_a_bins=None, reset_generators=True, subv_per_epoch=None,\n subv_metric_fn=None, subv_metric_threshold=False, subv_metric_args=None):\n self.subvolumes = subvolumes\n self.batch_size = batch_size\n self.kludge = kludge\n self.reset_generators = reset_generators\n self.subv_per_epoch = subv_per_epoch\n self.subv_metric_fn = subv_metric_fn\n self.subv_metric_threshold = subv_metric_threshold\n self.subv_metric_args = subv_metric_args\n if self.subv_metric_args is None:\n self.subv_metric_args = {}\n\n self.regions = [None] * batch_size\n self.region_pos = [None] * batch_size\n self.move_counts = [0] * batch_size\n self.epoch_move_counts = []\n self.epoch_subv_metrics = []\n self.epoch_subvolumes = 0\n self.batch_image_input = [None] * batch_size\n\n self.f_a_bins = f_a_bins\n self.f_a_init = False\n if f_a_bins is not None:\n self.f_a_init = True\n self.f_a_counts = np.ones_like(f_a_bins, dtype=np.int64)\n self.f_as = np.zeros(batch_size)\n\n self.fake_block = None\n self.fake_mask = [False] * batch_size\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.f_a_init = False\n if self.reset_generators:\n self.subvolumes.reset()\n self.regions = [None] * self.batch_size\n self.kludge['inputs'] = None\n self.kludge['outputs'] = None\n if len(self.epoch_move_counts):\n logging.info(' Average moves (%s): %s',\n self.subvolumes.name,\n sum(self.epoch_move_counts)/float(len(self.epoch_move_counts)))\n self.epoch_move_counts = []\n self.epoch_subvolumes = 0\n self.epoch_subv_metrics = []\n self.fake_mask = [False] * self.batch_size\n\n def get_epoch_metric(self):\n assert len(self.epoch_subv_metrics) == self.subv_per_epoch, \\\n 'Not all validation subvs completed: {}/{} (Finished moves: {}, ongoing: {})'.format(\n len(self.epoch_subv_metrics), self.subv_per_epoch, self.epoch_move_counts, self.move_counts)\n return self.epoch_subv_metrics\n\n def __next__(self):\n # If in the fixed-subvolumes-per-epoch mode and completed, yield fake\n # data quickly.\n if all(self.fake_mask):\n inputs = collections.OrderedDict({\n 'image_input': np.repeat(pad_dims(self.fake_block['image']),\n CONFIG.training.num_gpus, axis=0),\n 'mask_input': np.repeat(pad_dims(self.fake_block['mask']),\n CONFIG.training.num_gpus, axis=0)\n })\n inputs['kludge'] = self.kludge\n outputs = np.repeat(pad_dims(self.fake_block['target']), CONFIG.training.num_gpus, axis=0)\n return (inputs, outputs)\n\n # Before clearing last batches, reuse them to predict mask outputs\n # for move training. Add mask outputs to regions.\n active_regions = [n for n, region in enumerate(self.regions) if region is not None]\n if active_regions and self.kludge['outputs'] is not None and self.kludge['inputs'] is not None:\n for n in active_regions:\n assert np.array_equal(self.kludge['inputs'][n, :],\n self.batch_image_input[n, 0, 0, :, 0])\n self.regions[n].add_mask(self.kludge['outputs'][n, :, :, :, 0], self.region_pos[n])\n\n self.batch_image_input = [None] * self.batch_size\n batch_mask_input = [None] * self.batch_size\n batch_mask_target = [None] * self.batch_size\n\n for r, region in enumerate(self.regions):\n block_data = region.get_next_block() if region is not None else None\n if block_data is None:\n if self.subv_per_epoch:\n if region is not None:\n metric = region.prediction_metric(\n self.subv_metric_fn,\n threshold=self.subv_metric_threshold,\n **self.subv_metric_args)\n self.epoch_subv_metrics.append(metric)\n self.regions[r] = None\n if self.epoch_subvolumes >= self.subv_per_epoch:\n block_data = self.fake_block\n self.fake_mask[r] = True\n while block_data is None:\n subvolume = six.next(self.subvolumes)\n self.epoch_subvolumes += 1\n self.f_as[r] = subvolume.f_a()\n\n self.regions[r] = Region.from_subvolume(subvolume)\n if region is not None:\n self.epoch_move_counts.append(self.move_counts[r])\n region = self.regions[r]\n self.move_counts[r] = 0\n block_data = region.get_next_block()\n else:\n self.move_counts[r] += 1\n\n if self.subv_per_epoch and self.fake_block is None:\n assert block_data is not None\n self.fake_block = copy.deepcopy(block_data)\n\n self.batch_image_input[r] = pad_dims(block_data['image'])\n batch_mask_input[r] = pad_dims(block_data['mask'])\n batch_mask_target[r] = pad_dims(block_data['target'])\n self.region_pos[r] = block_data['position']\n\n self.batch_image_input = np.concatenate(self.batch_image_input)\n batch_mask_input = np.concatenate(batch_mask_input)\n batch_mask_target = np.concatenate(batch_mask_target)\n\n inputs = collections.OrderedDict({'image_input': self.batch_image_input,\n 'mask_input': batch_mask_input})\n inputs['kludge'] = self.kludge\n # These inputs are only necessary for assurance the correct FOV is updated.\n self.kludge['inputs'] = self.batch_image_input[:, 0, 0, :, 0].copy()\n self.kludge['outputs'] = None\n\n if self.f_a_bins is None:\n return (inputs,\n [batch_mask_target])\n else:\n f_a_inds = np.digitize(self.f_as, self.f_a_bins) - 1\n inds, counts = np.unique(f_a_inds, return_counts=True)\n if self.f_a_init:\n self.f_a_counts[inds] += counts.astype(np.int64)\n sample_weights = np.ones(self.f_as.size, dtype=np.float64)\n else:\n sample_weights = np.reciprocal(self.f_a_counts[f_a_inds], dtype=np.float64) * float(self.f_as.size)\n return (inputs,\n [batch_mask_target],\n sample_weights)\n\n\nDataGenerator = collections.namedtuple('DataGenerator', ['data', 'gens', 'callbacks', 'steps_per_epoch'])\n\n\ndef get_output_margin(model_config):\n return np.floor_divide(model_config.input_fov_shape - model_config.output_fov_shape, 2)\n\n\ndef build_validation_gen(validation_volumes):\n output_margin = get_output_margin(CONFIG.model)\n\n # If there is only one volume, duplicate since more than one is needed\n # for Keras queuing.\n if len(validation_volumes) == 1:\n single_vol = six.next(six.itervalues(validation_volumes))\n validation_volumes = {'dupe {}'.format(n): single_vol for n in range(CONFIG.training.num_workers)}\n\n validation_gens = [\n preprocess_subvolume_generator(\n v.subvolume_generator(shape=CONFIG.model.validation_subv_shape,\n label_margin=output_margin))\n for v in six.itervalues(validation_volumes)]\n if CONFIG.training.augment_validation:\n validation_gens = list(map(augment_subvolume_generator, validation_gens))\n\n # Divide training generators up for workers.\n validation_worker_gens = [\n validation_gens[i::CONFIG.training.num_workers]\n for i in xrange(CONFIG.training.num_workers)]\n\n # Some workers may not receive any generators.\n validation_worker_gens = [g for g in validation_worker_gens if len(g) > 0]\n subv_per_worker = CONFIG.training.validation_size // len(validation_worker_gens)\n logging.debug('# of validation workers: %s', len(validation_worker_gens))\n\n validation_metric = get_function(CONFIG.training.validation_metric['metric'])\n validation_kludges = [{'inputs': None, 'outputs': None} for _ in range(CONFIG.training.num_workers)]\n validation_data = [MovingTrainingGenerator(\n Roundrobin(*gen, name='validation {}'.format(i)),\n CONFIG.training.batch_size,\n kludge,\n f_a_bins=CONFIG.training.fill_factor_bins,\n reset_generators=True,\n subv_per_epoch=subv_per_worker,\n subv_metric_fn=validation_metric,\n subv_metric_threshold=CONFIG.training.validation_metric['threshold'],\n subv_metric_args=CONFIG.training.validation_metric['args'])\n for i, (gen, kludge) in enumerate(zip(validation_worker_gens, validation_kludges))]\n\n callbacks = []\n callbacks.append(GeneratorSubvolumeMetric(validation_data, 'val_subv_metric'))\n callbacks.append(GeneratorReset(validation_data))\n\n VALIDATION_STEPS = np.ceil(CONFIG.training.validation_size / CONFIG.training.batch_size)\n # Number of all-move sequences must be a multiple of number of worker gens.\n VALIDATION_STEPS = np.ceil(VALIDATION_STEPS / len(validation_worker_gens)) * len(validation_worker_gens)\n VALIDATION_STEPS = VALIDATION_STEPS * CONFIG.model.validation_subv_moves + len(validation_worker_gens)\n VALIDATION_STEPS = VALIDATION_STEPS.astype(np.int64)\n\n return DataGenerator(\n data=validation_data,\n gens=validation_worker_gens,\n callbacks=callbacks,\n steps_per_epoch=VALIDATION_STEPS)\n\n\ndef build_training_gen(training_volumes):\n output_margin = get_output_margin(CONFIG.model)\n\n # If there is only one volume, duplicate since more than one is needed\n # for Keras queuing.\n if len(training_volumes) == 1:\n single_vol = six.next(six.itervalues(training_volumes))\n training_volumes = {'dupe {}'.format(n): single_vol for n in range(CONFIG.training.num_workers)}\n\n training_gens = [\n augment_subvolume_generator(\n preprocess_subvolume_generator(\n v.subvolume_generator(shape=CONFIG.model.training_subv_shape,\n label_margin=output_margin)))\n for v in six.itervalues(training_volumes)]\n random.shuffle(training_gens)\n\n # Divide training generators up for workers.\n worker_gens = [\n training_gens[i::CONFIG.training.num_workers]\n for i in xrange(CONFIG.training.num_workers)]\n\n # Some workers may not receive any generators.\n worker_gens = [g for g in worker_gens if len(g) > 0]\n logging.debug('# of training workers: %s', len(worker_gens))\n\n kludges = [{'inputs': None, 'outputs': None} for _ in range(CONFIG.training.num_workers)]\n # Create a training data generator for each worker.\n training_data = [MovingTrainingGenerator(\n Roundrobin(*gen, name='training {}'.format(i)),\n CONFIG.training.batch_size,\n kludge,\n f_a_bins=CONFIG.training.fill_factor_bins,\n reset_generators=CONFIG.training.reset_generators)\n for i, (gen, kludge) in enumerate(zip(worker_gens, kludges))]\n training_reset_callback = GeneratorReset(training_data)\n callbacks = [training_reset_callback]\n\n TRAINING_STEPS_PER_EPOCH = CONFIG.training.training_size // CONFIG.training.batch_size\n\n return DataGenerator(\n data=training_data,\n gens=worker_gens,\n callbacks=callbacks,\n steps_per_epoch=TRAINING_STEPS_PER_EPOCH)\n\n\ndef train_network(\n model_file=None,\n volumes=None,\n model_output_filebase=None,\n model_checkpoint_file=None,\n tensorboard=False,\n viewer=False,\n metric_plot=False):\n random.seed(CONFIG.random_seed)\n\n tf_device = 'cpu:0' if CONFIG.training.num_gpus > 1 else 'gpu:0'\n\n if model_file is None:\n factory = get_function(CONFIG.network.factory)\n with tf.device(tf_device):\n ffn = factory(CONFIG.model.input_fov_shape,\n CONFIG.model.output_fov_shape,\n CONFIG.network)\n else:\n with tf.device(tf_device):\n ffn = load_model(model_file, CONFIG.network)\n\n # Multi-GPU models are saved as a single-GPU model prior to compilation,\n # so if loading from such a model file it will need to be recompiled.\n if not hasattr(ffn, 'optimizer'):\n if CONFIG.training.num_gpus > 1:\n ffn = make_parallel(ffn, CONFIG.training.num_gpus)\n compile_network(ffn, CONFIG.optimizer)\n\n patch_prediction_copy(ffn)\n\n if model_output_filebase is None:\n model_output_filebase = 'model_output'\n\n if volumes is None:\n raise ValueError('Volumes must be provided.')\n\n CONFIG.to_toml(model_output_filebase + '.toml')\n\n training_volumes, validation_volumes = partition_volumes(volumes)\n\n num_training = len(training_volumes)\n num_validation = len(validation_volumes)\n\n logging.info('Using {} volumes for training, {} for validation.'.format(num_training, num_validation))\n\n validation = build_validation_gen(validation_volumes)\n training = build_training_gen(training_volumes)\n\n callbacks = []\n callbacks.extend(validation.callbacks)\n callbacks.extend(training.callbacks)\n\n validation_mode = CONFIG.training.validation_metric['mode']\n\n if CONFIG.training.early_abort_epoch is not None and \\\n CONFIG.training.early_abort_loss is not None:\n callbacks.append(EarlyAbort(threshold_epoch=CONFIG.training.early_abort_epoch,\n threshold_value=CONFIG.training.early_abort_loss))\n\n callbacks.append(ModelCheckpoint(model_output_filebase + '.hdf5',\n monitor='val_subv_metric',\n save_best_only=True,\n mode=validation_mode))\n if model_checkpoint_file:\n callbacks.append(ModelCheckpoint(model_checkpoint_file))\n callbacks.append(EarlyStopping(monitor='val_subv_metric',\n patience=CONFIG.training.patience,\n mode=validation_mode))\n # Activation histograms and weight images for TensorBoard will not work\n # because the Keras callback does not currently support validation data\n # generators.\n if tensorboard:\n callbacks.append(TensorBoard())\n\n history = ffn.fit_generator(\n Roundrobin(*training.data, name='training outer'),\n steps_per_epoch=training.steps_per_epoch,\n epochs=CONFIG.training.total_epochs,\n max_queue_size=len(training.gens) - 1,\n workers=1,\n callbacks=callbacks,\n validation_data=Roundrobin(*validation.data, name='validation outer'),\n validation_steps=validation.steps_per_epoch)\n\n write_keras_history_to_csv(history, model_output_filebase + '.csv')\n\n if viewer:\n viz_ex = itertools.islice(validation.data[0], 1)\n\n for inputs, targets in viz_ex:\n viewer = WrappedViewer(voxel_size=list(np.flipud(CONFIG.volume.resolution)))\n output_offset = np.array(inputs['image_input'].shape[1:4]) - np.array(targets[0].shape[1:4])\n output_offset = np.flipud(output_offset // 2)\n viewer.add(inputs['image_input'][0, :, :, :, 0],\n name='Image')\n viewer.add(inputs['mask_input'][0, :, :, :, 0],\n name='Mask Input',\n shader=get_color_shader(2))\n viewer.add(targets[0][0, :, :, :, 0],\n name='Mask Target',\n shader=get_color_shader(0),\n voxel_offset=output_offset)\n output = ffn.predict_on_batch(inputs)\n viewer.add(output[0, :, :, :, 0],\n name='Mask Output',\n shader=get_color_shader(1),\n voxel_offset=output_offset)\n\n viewer.print_view_prompt()\n\n if metric_plot:\n fig = plot_history(history)\n fig.savefig(model_output_filebase + '.png')\n\n return history\n\n\ndef validate_model(model_file, volumes):\n from .network import load_model\n\n _, volumes = partition_volumes(volumes)\n\n validation = build_validation_gen(volumes)\n\n tf_device = 'cpu:0' if CONFIG.training.num_gpus > 1 else 'gpu:0'\n with tf.device(tf_device):\n model = load_model(model_file, CONFIG.network)\n\n # Multi-GPU models are saved as a single-GPU model prior to compilation,\n # so if loading from such a model file it will need to be recompiled.\n if not hasattr(model, 'optimizer'):\n if CONFIG.training.num_gpus > 1:\n model = make_parallel(model, CONFIG.training.num_gpus)\n compile_network(model, CONFIG.optimizer)\n\n patch_prediction_copy(model)\n\n pbar = tqdm(desc='Validation batches', total=validation.steps_per_epoch)\n finished = [False] * len(validation.gens)\n\n for n, data in itertools.cycle(enumerate(validation.data)):\n if all(finished):\n break\n\n pbar.update(1)\n\n if all(data.fake_mask):\n finished[n] = True\n continue\n\n batch = six.next(data)\n model.test_on_batch(*batch)\n\n pbar.close()\n\n metrics = []\n for gen in validation.data:\n metrics.extend(gen.get_epoch_metric())\n\n print('Metric: ', np.mean(metrics))\n print('All: ', metrics)\n"
] | [
[
"numpy.true_divide",
"scipy.ndimage.binary_erosion",
"numpy.minimum",
"numpy.exp2",
"numpy.asarray",
"numpy.issubdtype",
"numpy.all",
"numpy.any",
"numpy.random.sample",
"numpy.where",
"numpy.divide",
"numpy.clip",
"numpy.full",
"numpy.array_str",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.multiply",
"numpy.min",
"numpy.floor_divide",
"numpy.full_like",
"scipy.ndimage.label",
"numpy.equal",
"numpy.transpose",
"numpy.array",
"numpy.flip",
"numpy.random.RandomState",
"numpy.maximum",
"numpy.array_equal",
"numpy.ones",
"numpy.random.normal",
"numpy.fromstring",
"numpy.mod"
],
[
"tensorflow.set_random_seed",
"numpy.random.seed"
],
[
"tensorflow.device",
"numpy.ones_like",
"numpy.array_equal",
"numpy.unique",
"numpy.reciprocal",
"matplotlib.use",
"numpy.floor_divide",
"numpy.flipud",
"numpy.ones",
"numpy.concatenate",
"numpy.ceil",
"numpy.mean",
"numpy.any",
"numpy.digitize",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
DavidNaizheZhou/stanpy | [
"257072bd52154e9e4d68be957fd12eee1ad3dc56"
] | [
"tests/src/stanpy/test_reduction.py"
] | [
"import stanpy as stp\nimport numpy as np\nimport os\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef test_multiple_w0():\n\n EI = 32000 # kN/m2\n l = 3 # m\n\n hinged_support = {\"w\": 0, \"M\": 0}\n roller_support = {\"w\": 0, \"M\": 0, \"H\": 0}\n fixed_support = {\"w\": 0, \"phi\": 0}\n\n s1 = {\"EI\": EI, \"l\": l, \"bc_i\": hinged_support, \"bc_k\": {\"w\": 0}, \"q\": 10}\n s2 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}, \"q\": 10}\n # s2 = {\"EI\": EI, \"l\": l, \"q\": 10}\n s3 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}, \"q\": 10}\n s4 = {\"EI\": EI, \"l\": l, \"bc_k\": roller_support}\n\n s = [s1, s2, s3, s4]\n\n x = np.linspace(0,4*l,5)\n Zi, Zk = stp.tr_solver(*s)\n Fx = stp.tr(*s, x=x)\n Zx = Fx.dot(Zi)\n\n path = os.path.join(dir_path, \"reduction_method_npz\", \"test_multiple_w0.npz\")\n # np.savez_compressed(path, Fx=Fx, Zx=Zx)\n npz = np.load(path)\n Zx_test = npz[\"Zx\"]\n Fx_test = npz[\"Fx\"]\n\n np.testing.assert_allclose(Fx, Fx_test,rtol=1e-5)\n np.testing.assert_allclose(Zx.round(10), Zx_test,rtol=1e-5)\n\ndef test_multiple_w0_M0():\n\n import numpy as np\n np.set_printoptions(precision=6, threshold=5000)\n import matplotlib.pyplot as plt\n import stanpy as stp\n\n EI = 32000 # kN/m2\n l = 3 # m\n\n hinged_support = {\"w\": 0, \"M\": 0}\n roller_support = {\"w\": 0, \"M\": 0, \"H\": 0}\n fixed_support = {\"w\": 0, \"phi\": 0}\n\n s1 = {\"EI\": EI, \"l\": l, \"bc_i\": hinged_support, \"bc_k\": {\"w\": 0}, \"q\": 10}\n s2 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"M\": 0}, \"q\": 8}\n # s2 = {\"EI\": EI, \"l\": l, \"q\": 10}\n s3 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}, \"q\": 6}\n s4 = {\"EI\": EI, \"l\": l, \"bc_k\": roller_support}\n\n s = [s1, s2, s3, s4]\n\n\n x = np.sort(np.append(np.linspace(0,4*l,4000), [l,2*l, 3*l, 4*l]))\n\n Zi, Zk = stp.tr_solver(*s)\n Fx = stp.tr(*s, x=x)\n Zx = Fx.dot(Zi).round(10)\n \n path = os.path.join(dir_path, \"reduction_method_npz\", \"test_multiple_w0_M0.npz\")\n # np.savez_compressed(path, Fx=Fx, Zx=Zx)\n npz = np.load(path)\n Zx_test = npz[\"Zx\"]\n Fx_test = npz[\"Fx\"]\n\n np.testing.assert_allclose(Fx, Fx_test,rtol=1e-5)\n np.testing.assert_allclose(Zx, Zx_test,rtol=1e-5)\n\ndef test_multiple_w0_combination():\n \"\"\"testet einwertige Bindungen mit zusammengesetzten Stäben\n \"\"\"\n\n import numpy as np\n np.set_printoptions(precision=6, threshold=5000)\n import matplotlib.pyplot as plt\n import stanpy as stp\n\n EI = 32000 # kN/m2\n l = 3 # m\n\n hinged_support = {\"w\": 0, \"M\": 0}\n roller_support = {\"w\": 0, \"M\": 0, \"H\": 0}\n fixed_support = {\"w\": 0, \"phi\": 0}\n\n s1 = {\"EI\": EI, \"l\": l, \"bc_i\": hinged_support, \"bc_k\": {\"w\": 0}, \"q\": 10}\n # s2 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"M\": 0}, \"q\": 10}\n s2 = {\"EI\": EI, \"l\": l, \"q\": 8}\n s3 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}, \"q\": 6}\n s4 = {\"EI\": EI, \"l\": l, \"bc_k\": roller_support}\n\n s = [s1, s2, s3, s4]\n\n x = np.sort(np.append(np.linspace(0,4*l,4000), [l,2*l, 3*l, 4*l]))\n\n Zi, Zk = stp.tr_solver(*s)\n Fx = stp.tr(*s, x=x)\n Zx = Fx.dot(Zi).round(10)\n \n path = os.path.join(dir_path, \"reduction_method_npz\", \"test_multiple_w0_combination.npz\")\n # np.savez_compressed(path, Fx=Fx, Zx=Zx)\n\n npz = np.load(path)\n Zx_test = npz[\"Zx\"]\n Fx_test = npz[\"Fx\"]\n\n np.testing.assert_allclose(Fx, Fx_test,rtol=1e-5)\n np.testing.assert_allclose(Zx, Zx_test,rtol=1e-5)\n\ndef test_large_system():\n import numpy as np\n import sympy as sym\n import stanpy as stp\n import matplotlib.pyplot as plt\n\n EI = 32000 # kN/m2\n P = 5 # kN\n q = 4 # kN/m\n l = 3 # m\n\n roller_support = {\"w\": 0, \"M\": 0, \"H\": 0}\n fixed_support = {\"w\": 0, \"phi\": 0}\n hinge = {\"M\": 0}\n\n s0 = {\"EI\": EI, \"l\": l, \"bc_i\": fixed_support, \"bc_k\": {\"w\": 0}}\n s1 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}, \"q\": q}\n s2 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}}\n s3 = {\"EI\": EI, \"l\": l, \"bc_k\": hinge, \"q\": q, \"P\": (P, l)}\n s4 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}}\n s5 = {\"EI\": EI, \"l\": l, \"bc_k\": hinge}\n s6 = {\"EI\": EI, \"l\": l, \"bc_k\": roller_support, \"P\": (P, l / 2), }\n\n s = [s0, s1, s2, s3, s4, s5, s6]\n\n # fig, ax = plt.subplots(figsize=(12, 5))\n # stp.plot_system(ax, s=22, *s)\n # stp.plot_load(ax, *s, P_scale=0.5, q_scale=0.5)\n # ax.set_ylim(-0.5, 1)\n # plt.show()\n\n x = np.sort(np.append(np.linspace(0,7*l,70),[l,2*l, 3*l, 4*l, 5*l, 6*l]))\n Zi, Zk = stp.tr_solver(*s)\n Fx = stp.tr(*s, x=x)\n Zx = Fx.dot(Zi)\n\n path = os.path.join(dir_path, \"reduction_method_npz\", \"test_large_system.npz\")\n # np.savez_compressed(path, Fx=Fx, Zx=Zx)\n\n npz = np.load(path)\n Zx_test = npz[\"Zx\"]\n Fx_test = npz[\"Fx\"]\n\n np.testing.assert_allclose(Fx, Fx_test,rtol=1e-5)\n np.testing.assert_allclose(Zx, Zx_test,rtol=1e-5)\n\n # scale = 0.5\n # fig, ax = plt.subplots()\n # stp.plot_system(ax, *s, watermark=False)\n # stp.plot_M(\n # ax,\n # x=x,\n # Mx=Zx[:, 2],\n # annotate_x=[l,2*l, 3*l, 4*l, 5*l, 6*l],\n # fill_p=\"red\",\n # fill_n=\"blue\",\n # scale=scale,\n # alpha=0.2,\n # )\n\n # ax.set_ylim(-1, 1)\n # ax.axis('off')\n # plt.show()\n\ndef test_large_system_II():\n import numpy as np\n import sympy as sym\n import stanpy as stp\n import matplotlib.pyplot as plt\n\n EI = 32000 # kN/m2\n P = 5 # kN\n q = 4 # kN/m\n l = 3 # m\n\n roller_support = {\"w\": 0, \"M\": 0, \"H\": 0}\n fixed_support = {\"w\": 0, \"phi\": 0}\n hinge = {\"M\": 0}\n\n s0 = {\"EI\": EI, \"l\": l, \"bc_i\": fixed_support, \"bc_k\": {\"w\": 0}, \"N\": -1000}\n s1 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}, \"q\": q, \"N\": -1000}\n s2 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}, \"N\": -1000}\n s3 = {\"EI\": EI, \"l\": l, \"bc_k\": hinge, \"q\": q, \"P\": (P, l), \"N\": -1000}\n s4 = {\"EI\": EI, \"l\": l, \"bc_k\": {\"w\": 0}, \"N\": -1000}\n s5 = {\"EI\": EI, \"l\": l, \"bc_k\": hinge, \"N\": -1000}\n s6 = {\"EI\": EI, \"l\": l, \"bc_k\": roller_support, \"P\": (P, l / 2), \"N\": -1000}\n\n s = [s0, s1, s2, s3, s4, s5, s6]\n\n # fig, ax = plt.subplots(figsize=(12, 5))\n # stp.plot_system(ax, s=22, *s)\n # stp.plot_load(ax, *s, P_scale=0.5, q_scale=0.5)\n # ax.set_ylim(-0.5, 1)\n # plt.show()\n\n x = np.sort(np.append(np.linspace(0,7*l,70),[l,2*l, 3*l, 4*l, 5*l, 6*l]))\n Zi, Zk = stp.tr_solver(*s)\n Fx = stp.tr(*s, x=x)\n Zx = Fx.dot(Zi)\n\n path = os.path.join(dir_path, \"reduction_method_npz\", \"test_large_system_II.npz\")\n # np.savez_compressed(path, Fx=Fx, Zx=Zx)\n\n npz = np.load(path)\n Zx_test = npz[\"Zx\"]\n Fx_test = npz[\"Fx\"]\n\n np.testing.assert_allclose(Fx, Fx_test,rtol=1e-5)\n np.testing.assert_allclose(Zx, Zx_test,rtol=1e-5)\n\n # scale = 0.5\n # fig, ax = plt.subplots()\n # stp.plot_system(ax, *s, watermark=False)\n # stp.plot_M(\n # ax,\n # x=x,\n # Mx=Zx[:, 2],\n # annotate_x=[l,2*l, 3*l, 4*l, 5*l, 6*l],\n # fill_p=\"red\",\n # fill_n=\"blue\",\n # scale=scale,\n # alpha=0.2,\n # )\n\n # ax.set_ylim(-1, 1)\n # ax.axis('off')\n # plt.show()\n\ndef empty():\n import matplotlib.pyplot as plt\n import numpy as np\n\n EI = 32000 # kNm²\n GA = 20000 # kN\n l = 6 # m\n q = 4 # kN/m\n P = 1500 # kN\n H = 10 # kN\n\n fixed = {\"w\":0, \"phi\":0}\n hinged = {\"w\":0, \"M\":0, \"H\":0}\n\n s = {\"EI\":EI, \"GA\":GA, \"l\":l, \"q\":q, \"w_0\":0.03,\"N\":(-P ),\"P1\":(H, l/2),\"P2\":(H, l/3),\"P3\":(H, 2*l/3), \"bc_i\":fixed, \"bc_k\":hinged}\n fig, ax = plt.subplots()\n stp.plot_system(ax, s)\n stp.plot_load(ax, s)\n stp.plot_w_0(ax, s, scale=0.4, dy=-0.2)\n ax.set_ylim(-1,1)\n plt.show()\n x = np\n\n Zi, Zk = stp.tr_solver(s)\n Fxx = stp.tr(s)\n Zx = Fxx.dot(Zi)\n\n print(Zx)\n\nif __name__==\"__main__\":\n # test_large_system_II()\n empty()"
] | [
[
"numpy.linspace",
"numpy.set_printoptions",
"matplotlib.pyplot.subplots",
"numpy.testing.assert_allclose",
"numpy.load",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lleiou/causalml | [
"2d3cacacad5ed3b0e57b593803a33c61c554f3b2"
] | [
"causalml/inference/tree/models.py"
] | [
"\"\"\"\nForest of trees-based ensemble methods for Uplift modeling on Classification\nProblem. Those methods include random forests and extremely randomized trees.\n\nThe module structure is the following:\n- The ``UpliftRandomForestClassifier`` base class implements different\n variants of uplift models based on random forest, with 'fit' and 'predict'\n method.\n- The ``UpliftTreeClassifier`` base class implements the uplift trees (without\n Bootstrapping for random forest), this class is called within\n ``UpliftRandomForestClassifier`` for constructing random forest.\n\"\"\"\n\n# Authors: Zhenyu Zhao <[email protected]>\n# Totte Harinen <[email protected]>\n\nfrom collections import defaultdict\nfrom joblib import Parallel, delayed\nimport multiprocessing as mp\nimport numpy as np\nfrom packaging import version\nimport pandas as pd\nimport scipy.stats as stats\nimport sklearn\nif version.parse(sklearn.__version__) >= version.parse('0.22.0'):\n from sklearn.utils._testing import ignore_warnings\nelse:\n from sklearn.utils.testing import ignore_warnings\n\n\nclass DecisionTree:\n \"\"\" Tree Node Class\n\n Tree node class to contain all the statistics of the tree node.\n\n Parameters\n ----------\n\n col : int, optional (default = -1)\n The column index for splitting the tree node to children nodes.\n\n value : float, optional (default = None)\n The value of the feature column to split the tree node to children nodes.\n\n trueBranch : object of DecisionTree\n The true branch tree node (feature > value).\n\n falseBranch : object of DecisionTree\n The false branch tree node (feature > value).\n\n results : dictionary\n The classification probability Pr(1) for each experiment group in the tree node.\n\n summary : dictionary\n Summary statistics of the tree nodes, including impurity, sample size, uplift score, etc.\n\n maxDiffTreatment : string\n The treatment name generating the maximum difference between treatment and control group.\n\n maxDiffSign : float\n The sign of the maximum difference (1. or -1.).\n\n nodeSummary : dictionary\n Summary statistics of the tree nodes {treatment: [y_mean, n]}, where y_mean stands for the target metric mean\n and n is the sample size.\n\n backupResults : dictionary\n The conversion probabilities in each treatment in the parent node {treatment: y_mean}. The parent node\n information is served as a backup for the children node, in case no valid statistics can be calculated from the\n children node, the parent node information will be used in certain cases.\n\n bestTreatment : string\n The treatment name providing the best uplift (treatment effect).\n\n upliftScore : list\n The uplift score of this node: [max_Diff, p_value], where max_Diff stands for the maximum treatment effect, and\n p_value stands for the p_value of the treatment effect.\n\n matchScore : float\n The uplift score by filling a trained tree with validation dataset or testing dataset.\n\n \"\"\"\n\n def __init__(self, col=-1, value=None, trueBranch=None, falseBranch=None,\n results=None, summary=None, maxDiffTreatment=None,\n maxDiffSign=1., nodeSummary=None, backupResults=None,\n bestTreatment=None, upliftScore=None, matchScore=None):\n self.col = col\n self.value = value\n self.trueBranch = trueBranch\n self.falseBranch = falseBranch\n self.results = results # None for nodes, not None for leaves\n self.summary = summary\n # the treatment with max( |p(y|treatment) - p(y|control)| )\n self.maxDiffTreatment = maxDiffTreatment\n # the sign for p(y|maxDiffTreatment) - p(y|control)\n self.maxDiffSign = maxDiffSign\n self.nodeSummary = nodeSummary\n self.backupResults = backupResults\n self.bestTreatment = bestTreatment\n self.upliftScore = upliftScore\n # match actual treatment for validation and testing\n self.matchScore = matchScore\n\n\n# Uplift Tree Classifier\nclass UpliftTreeClassifier:\n \"\"\" Uplift Tree Classifier for Classification Task.\n\n A uplift tree classifier estimates the individual treatment effect by modifying the loss function in the\n classification trees.\n\n The uplift tree classifier is used in uplift random forest to construct the trees in the forest.\n\n Parameters\n ----------\n\n evaluationFunction : string\n Choose from one of the models: 'KL', 'ED', 'Chi', 'CTS'.\n\n max_features: int, optional (default=None)\n The number of features to consider when looking for the best split.\n\n max_depth: int, optional (default=3)\n The maximum depth of the tree.\n\n min_samples_leaf: int, optional (default=100)\n The minimum number of samples required to be split at a leaf node.\n\n min_samples_treatment: int, optional (default=10)\n The minimum number of samples required of the experiment group to be split at a leaf node.\n\n n_reg: int, optional (default=100)\n The regularization parameter defined in Rzepakowski et al. 2012, the weight (in terms of sample size) of the\n parent node influence on the child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.\n\n control_name: string\n The name of the control group (other experiment groups will be regarded as treatment groups)\n\n normalization: boolean, optional (default=True)\n The normalization factor defined in Rzepakowski et al. 2012, correcting for tests with large number of splits\n and imbalanced treatment and control splits\n\n \"\"\"\n def __init__(self, max_features=None, max_depth=3, min_samples_leaf=100,\n min_samples_treatment=10, n_reg=100, evaluationFunction='KL',\n control_name=None, normalization=True):\n self.max_depth = max_depth\n self.min_samples_leaf = min_samples_leaf\n self.min_samples_treatment = min_samples_treatment\n self.n_reg = n_reg\n self.max_features = max_features\n if evaluationFunction == 'KL':\n self.evaluationFunction = self.evaluate_KL\n elif evaluationFunction == 'ED':\n self.evaluationFunction = self.evaluate_ED\n elif evaluationFunction == 'Chi':\n self.evaluationFunction = self.evaluate_Chi\n else:\n self.evaluationFunction = self.evaluate_CTS\n self.fitted_uplift_tree = None\n self.control_name = control_name\n self.normalization = normalization\n\n def fit(self, X, treatment, y):\n \"\"\" Fit the uplift model.\n\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n\n Returns\n -------\n self : object\n \"\"\"\n assert len(X) == len(y) and len(X) == len(treatment), 'Data length must be equal for X, treatment, and y.'\n\n self.treatment_group = list(set(treatment))\n self.feature_imp_dict = defaultdict(float)\n\n self.fitted_uplift_tree = self.growDecisionTreeFrom(\n X, treatment, y, evaluationFunction=self.evaluationFunction,\n max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf,\n depth=1, min_samples_treatment=self.min_samples_treatment,\n n_reg=self.n_reg, parentNodeSummary=None\n )\n\n self.feature_importances_ = np.zeros(X.shape[1])\n for col, imp in self.feature_imp_dict.items():\n self.feature_importances_[col] = imp\n self.feature_importances_ /= self.feature_importances_.sum() # normalize to add to 1\n\n # Prune Trees\n def prune(self, X, treatment, y, minGain=0.0001, rule='maxAbsDiff'):\n \"\"\" Prune the uplift model.\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n minGain : float, optional (default = 0.0001)\n The minimum gain required to make a tree node split. The children\n tree branches are trimmed if the actual split gain is less than\n the minimum gain.\n rule : string, optional (default = 'maxAbsDiff')\n The prune rules. Supported values are 'maxAbsDiff' for optimizing\n the maximum absolute difference, and 'bestUplift' for optimizing\n the node-size weighted treatment effect.\n Returns\n -------\n self : object\n \"\"\"\n assert len(X) == len(y) and len(X) == len(treatment), 'Data length must be equal for X, treatment, and y.'\n\n self.pruneTree(X, treatment, y,\n tree=self.fitted_uplift_tree,\n rule=rule,\n minGain=minGain,\n evaluationFunction=self.evaluationFunction,\n notify=False,\n n_reg=self.n_reg,\n parentNodeSummary=None)\n return self\n\n def pruneTree(self, X, treatment, y, tree, rule='maxAbsDiff', minGain=0.,\n evaluationFunction=None, notify=False, n_reg=0,\n parentNodeSummary=None):\n \"\"\"Prune one single tree node in the uplift model.\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n rule : string, optional (default = 'maxAbsDiff')\n The prune rules. Supported values are 'maxAbsDiff' for optimizing the maximum absolute difference, and\n 'bestUplift' for optimizing the node-size weighted treatment effect.\n minGain : float, optional (default = 0.)\n The minimum gain required to make a tree node split. The children tree branches are trimmed if the actual\n split gain is less than the minimum gain.\n evaluationFunction : string, optional (default = None)\n Choose from one of the models: 'KL', 'ED', 'Chi', 'CTS'.\n notify: bool, optional (default = False)\n n_reg: int, optional (default=0)\n The regularization parameter defined in Rzepakowski et al. 2012, the weight (in terms of sample size) of the\n parent node influence on the child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.\n parentNodeSummary : dictionary, optional (default = None)\n Node summary statistics of the parent tree node.\n Returns\n -------\n self : object\n \"\"\"\n # Current Node Summary for Validation Data Set\n currentNodeSummary = self.tree_node_summary(\n treatment, y, min_samples_treatment=self.min_samples_treatment,\n n_reg=n_reg, parentNodeSummary=parentNodeSummary\n )\n tree.nodeSummary = currentNodeSummary\n # Divide sets for child nodes\n X_l, X_r, w_l, w_r, y_l, y_r = self.divideSet(X, treatment, y, tree.col, tree.value)\n\n # recursive call for each branch\n if tree.trueBranch.results is None:\n self.pruneTree(X_l, w_l, y_l, tree.trueBranch, rule, minGain,\n evaluationFunction, notify, n_reg,\n parentNodeSummary=currentNodeSummary)\n if tree.falseBranch.results is None:\n self.pruneTree(X_r, w_r, y_r, tree.falseBranch, rule, minGain,\n evaluationFunction, notify, n_reg,\n parentNodeSummary=currentNodeSummary)\n\n # merge leaves (potentially)\n if (tree.trueBranch.results is not None and\n tree.falseBranch.results is not None):\n if rule == 'maxAbsDiff':\n # Current D\n if (tree.maxDiffTreatment in currentNodeSummary and\n self.control_name in currentNodeSummary):\n currentScoreD = tree.maxDiffSign * (currentNodeSummary[tree.maxDiffTreatment][0]\n - currentNodeSummary[self.control_name][0])\n else:\n currentScoreD = 0\n\n # trueBranch D\n trueNodeSummary = self.tree_node_summary(\n w_l, y_l, min_samples_treatment=self.min_samples_treatment,\n n_reg=n_reg, parentNodeSummary=currentNodeSummary\n )\n if (tree.trueBranch.maxDiffTreatment in trueNodeSummary and\n self.control_name in trueNodeSummary):\n trueScoreD = tree.trueBranch.maxDiffSign * (trueNodeSummary[tree.trueBranch.maxDiffTreatment][0]\n - trueNodeSummary[self.control_name][0])\n trueScoreD = (\n trueScoreD\n * (trueNodeSummary[tree.trueBranch.maxDiffTreatment][1]\n + trueNodeSummary[self.control_name][1])\n / (currentNodeSummary[tree.trueBranch.maxDiffTreatment][1]\n + currentNodeSummary[self.control_name][1])\n )\n else:\n trueScoreD = 0\n\n # falseBranch D\n falseNodeSummary = self.tree_node_summary(\n w_r, y_r, min_samples_treatment=self.min_samples_treatment,\n n_reg=n_reg, parentNodeSummary=currentNodeSummary\n )\n if (tree.falseBranch.maxDiffTreatment in falseNodeSummary and\n self.control_name in falseNodeSummary):\n falseScoreD = (\n tree.falseBranch.maxDiffSign *\n (falseNodeSummary[tree.falseBranch.maxDiffTreatment][0]\n - falseNodeSummary[self.control_name][0])\n )\n\n falseScoreD = (\n falseScoreD *\n (falseNodeSummary[tree.falseBranch.maxDiffTreatment][1]\n + falseNodeSummary[self.control_name][1])\n / (currentNodeSummary[tree.falseBranch.maxDiffTreatment][1]\n + currentNodeSummary[self.control_name][1])\n )\n else:\n falseScoreD = 0\n\n if ((trueScoreD + falseScoreD) - currentScoreD <= minGain or\n (trueScoreD + falseScoreD < 0.)):\n tree.trueBranch, tree.falseBranch = None, None\n tree.results = tree.backupResults\n\n elif rule == 'bestUplift':\n # Current D\n if (tree.bestTreatment in currentNodeSummary and\n self.control_name in currentNodeSummary):\n currentScoreD = (\n currentNodeSummary[tree.bestTreatment][0]\n - currentNodeSummary[self.control_name][0]\n )\n else:\n currentScoreD = 0\n\n # trueBranch D\n trueNodeSummary = self.tree_node_summary(\n w_l, y_l, min_samples_treatment=self.min_samples_treatment,\n n_reg=n_reg, parentNodeSummary=currentNodeSummary\n )\n if (tree.trueBranch.bestTreatment in trueNodeSummary and\n self.control_name in trueNodeSummary):\n trueScoreD = (\n trueNodeSummary[tree.trueBranch.bestTreatment][0]\n - trueNodeSummary[self.control_name][0]\n )\n else:\n trueScoreD = 0\n\n # falseBranch D\n falseNodeSummary = self.tree_node_summary(\n w_r, y_r, min_samples_treatment=self.min_samples_treatment,\n n_reg=n_reg, parentNodeSummary=currentNodeSummary\n )\n if (tree.falseBranch.bestTreatment in falseNodeSummary and\n self.control_name in falseNodeSummary):\n falseScoreD = (\n falseNodeSummary[tree.falseBranch.bestTreatment][0]\n - falseNodeSummary[self.control_name][0]\n )\n else:\n falseScoreD = 0\n gain = ((1. * len(y_l) / len(y) * trueScoreD\n + 1. * len(y_r) / len(y) * falseScoreD)\n - currentScoreD)\n if gain <= minGain or (trueScoreD + falseScoreD < 0.):\n tree.trueBranch, tree.falseBranch = None, None\n tree.results = tree.backupResults\n return self\n\n def fill(self, X, treatment, y):\n \"\"\" Fill the data into an existing tree.\n This is a higher-level function to transform the original data inputs\n into lower level data inputs (list of list and tree).\n\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n\n Returns\n -------\n self : object\n \"\"\"\n assert len(X) == len(y) and len(X) == len(treatment), 'Data length must be equal for X, treatment, and y.'\n\n self.fillTree(X, treatment, y, tree=self.fitted_uplift_tree)\n return self\n\n def fillTree(self, X, treatment, y, tree):\n \"\"\" Fill the data into an existing tree.\n This is a lower-level function to execute on the tree filling task.\n\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n tree : object\n object of DecisionTree class\n\n Returns\n -------\n self : object\n \"\"\"\n # Current Node Summary for Validation Data Set\n currentNodeSummary = self.tree_node_summary(treatment, y,\n min_samples_treatment=0,\n n_reg=0,\n parentNodeSummary=None)\n tree.nodeSummary = currentNodeSummary\n # Divide sets for child nodes\n X_l, X_r, w_l, w_r, y_l, y_r = self.divideSet(X, treatment, y, tree.col, tree.value)\n\n # recursive call for each branch\n if tree.trueBranch is not None:\n self.fillTree(X_l, w_l, y_l, tree.trueBranch)\n if tree.falseBranch is not None:\n self.fillTree(X_r, w_r, y_r, tree.falseBranch)\n\n # Update Information\n\n # matchScore\n matchScore = (currentNodeSummary[tree.bestTreatment][0] - currentNodeSummary[self.control_name][0])\n tree.matchScore = round(matchScore, 4)\n tree.summary['matchScore'] = round(matchScore, 4)\n\n # Samples, Group_size\n tree.summary['samples'] = len(y)\n tree.summary['group_size'] = ''\n for treatment_group in currentNodeSummary:\n tree.summary['group_size'] += ' ' + treatment_group + ': ' + str(currentNodeSummary[treatment_group][1])\n # classProb\n if tree.results is not None:\n tree.results = self.uplift_classification_results(treatment, y)\n return self\n\n def predict(self, X, full_output=False):\n '''\n Returns the recommended treatment group and predicted optimal\n probability conditional on using the recommended treatment group.\n\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n\n full_output : bool, optional (default=False)\n Whether the UpliftTree algorithm returns upliftScores, pred_nodes\n alongside the recommended treatment group and p_hat in the treatment group.\n\n Returns\n -------\n df_res : DataFrame, shape = [num_samples, (num_treatments + 1)]\n A DataFrame containing the predicted delta in each treatment group,\n the best treatment group and the maximum delta.\n\n '''\n\n p_hat_optimal = []\n treatment_optimal = []\n pred_nodes = {}\n upliftScores = []\n for xi in range(len(X)):\n pred_leaf, upliftScore = self.classify(X[xi], self.fitted_uplift_tree, dataMissing=False)\n # Predict under uplift optimal treatment\n opt_treat = max(pred_leaf, key=pred_leaf.get)\n p_hat_optimal.append(pred_leaf[opt_treat])\n treatment_optimal.append(opt_treat)\n if full_output:\n if xi == 0:\n for key_i in pred_leaf:\n pred_nodes[key_i] = [pred_leaf[key_i]]\n else:\n for key_i in pred_leaf:\n pred_nodes[key_i].append(pred_leaf[key_i])\n upliftScores.append(upliftScore)\n if full_output:\n return treatment_optimal, p_hat_optimal, upliftScores, pred_nodes\n else:\n return treatment_optimal, p_hat_optimal\n\n @staticmethod\n def divideSet(X, treatment, y, column, value):\n '''\n Tree node split.\n\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n column : int\n The column used to split the data.\n value : float or int\n The value in the column for splitting the data.\n\n Returns\n -------\n (X_l, X_r, treatment_l, treatment_r, y_l, y_r) : list of ndarray\n The covariates, treatments and outcomes of left node and the right node.\n '''\n # for int and float values\n if isinstance(value, int) or isinstance(value, float):\n filt = X[:, column] >= value\n else: # for strings\n filt = X[:, column] == value\n\n return X[filt], X[~filt], treatment[filt], treatment[~filt], y[filt], y[~filt]\n\n def group_uniqueCounts(self, treatment, y):\n '''\n Count sample size by experiment group.\n\n Args\n ----\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n\n Returns\n -------\n results : dictionary\n The control and treatment sample size.\n '''\n results = {}\n for t in self.treatment_group:\n filt = treatment == t\n n_t = y[filt].sum()\n results[t] = (filt.sum() - n_t, n_t)\n\n return results\n\n @staticmethod\n def kl_divergence(pk, qk):\n '''\n Calculate KL Divergence for binary classification.\n\n sum(np.array(pk) * np.log(np.array(pk) / np.array(qk)))\n\n Args\n ----\n pk : float\n The probability of 1 in one distribution.\n qk : float\n The probability of 1 in the other distribution.\n\n Returns\n -------\n S : float\n The KL divergence.\n '''\n\n eps = 1e-6\n qk = np.clip(qk, eps, 1 - eps)\n\n if pk == 0:\n S = -np.log(1 - qk)\n elif pk == 1:\n S = -np.log(qk)\n else:\n S = pk * np.log(pk / qk) + (1 - pk) * np.log((1 - pk) / (1 - qk))\n\n return S\n\n def evaluate_KL(self, nodeSummary, control_name):\n '''\n Calculate KL Divergence as split evaluation criterion for a given node.\n\n Args\n ----\n nodeSummary : dictionary\n The tree node summary statistics, produced by tree_node_summary()\n method.\n\n control_name : string\n The control group name.\n\n Returns\n -------\n d_res : KL Divergence\n '''\n if control_name not in nodeSummary:\n return 0\n pc = nodeSummary[control_name][0]\n d_res = 0\n for treatment_group in nodeSummary:\n if treatment_group != control_name:\n d_res += self.kl_divergence(nodeSummary[treatment_group][0], pc)\n return d_res\n\n @staticmethod\n def evaluate_ED(nodeSummary, control_name):\n '''\n Calculate Euclidean Distance as split evaluation criterion for a given node.\n\n Args\n ----\n nodeSummary : dictionary\n The tree node summary statistics, produced by tree_node_summary()\n method.\n\n control_name : string\n The control group name.\n\n Returns\n -------\n d_res : Euclidean Distance\n '''\n if control_name not in nodeSummary:\n return 0\n pc = nodeSummary[control_name][0]\n d_res = 0\n for treatment_group in nodeSummary:\n if treatment_group != control_name:\n d_res += 2*(nodeSummary[treatment_group][0] - pc)**2\n return d_res\n\n @staticmethod\n def evaluate_Chi(nodeSummary, control_name):\n '''\n Calculate Chi-Square statistic as split evaluation criterion for a given node.\n\n Args\n ----\n nodeSummary : dictionary\n The tree node summary statistics, produced by tree_node_summary() method.\n\n control_name : string\n The control group name.\n\n Returns\n -------\n d_res : Chi-Square\n '''\n if control_name not in nodeSummary:\n return 0\n pc = nodeSummary[control_name][0]\n d_res = 0\n for treatment_group in nodeSummary:\n if treatment_group != control_name:\n d_res += ((nodeSummary[treatment_group][0] - pc) ** 2 / max(0.1 ** 6, pc)\n + (nodeSummary[treatment_group][0] - pc) ** 2 / max(0.1 ** 6, 1 - pc))\n return d_res\n\n @staticmethod\n def evaluate_CTS(currentNodeSummary):\n '''\n Calculate CTS (conditional treatment selection) as split evaluation criterion for a given node.\n\n Args\n ----\n nodeSummary : dictionary\n The tree node summary statistics, produced by tree_node_summary() method.\n\n control_name : string\n The control group name.\n\n Returns\n -------\n d_res : Chi-Square\n '''\n mu = 0.0\n # iterate treatment group\n for r in currentNodeSummary:\n mu = max(mu, currentNodeSummary[r][0])\n return -mu\n\n @staticmethod\n def entropyH(p, q=None):\n '''\n Entropy\n\n Entropy calculation for normalization.\n\n Args\n ----\n p : float\n The probability used in the entropy calculation.\n\n q : float, optional, (default = None)\n The second probability used in the entropy calculation.\n\n Returns\n -------\n entropy : float\n '''\n if q is None and p > 0:\n return -p * np.log(p)\n elif q > 0:\n return -p * np.log(q)\n else:\n return 0\n\n def normI(self, currentNodeSummary, leftNodeSummary, rightNodeSummary, control_name, alpha=0.9):\n '''\n Normalization factor.\n\n Args\n ----\n currentNodeSummary : dictionary\n The summary statistics of the current tree node.\n\n leftNodeSummary : dictionary\n The summary statistics of the left tree node.\n\n rightNodeSummary : dictionary\n The summary statistics of the right tree node.\n\n control_name : string\n The control group name.\n\n alpha : float\n The weight used to balance different normalization parts.\n\n Returns\n -------\n norm_res : float\n Normalization factor.\n '''\n norm_res = 0\n # n_t, n_c: sample size for all treatment, and control\n # pt_a, pc_a: % of treatment is in left node, % of control is in left node\n n_c = currentNodeSummary[control_name][1]\n n_c_left = leftNodeSummary[control_name][1]\n n_t = []\n n_t_left = []\n for treatment_group in currentNodeSummary:\n if treatment_group != control_name:\n n_t.append(currentNodeSummary[treatment_group][1])\n if treatment_group in leftNodeSummary:\n n_t_left.append(leftNodeSummary[treatment_group][1])\n else:\n n_t_left.append(0)\n pt_a = 1. * np.sum(n_t_left) / (np.sum(n_t) + 0.1)\n pc_a = 1. * n_c_left / (n_c + 0.1)\n # Normalization Part 1\n norm_res += (\n alpha * self.entropyH(1. * np.sum(n_t) / (np.sum(n_t) + n_c), 1. * n_c / (np.sum(n_t) + n_c))\n * self.kl_divergence(pt_a, pc_a)\n )\n # Normalization Part 2 & 3\n for i in range(len(n_t)):\n pt_a_i = 1. * n_t_left[i] / (n_t[i] + 0.1)\n norm_res += (\n (1 - alpha) * self.entropyH(1. * n_t[i] / (n_t[i] + n_c), 1. * n_c / (n_t[i] + n_c))\n * self.kl_divergence(1. * pt_a_i, pc_a)\n )\n norm_res += (1. * n_t[i] / (np.sum(n_t) + n_c) * self.entropyH(pt_a_i))\n # Normalization Part 4\n norm_res += 1. * n_c/(np.sum(n_t) + n_c) * self.entropyH(pc_a)\n\n # Normalization Part 5\n norm_res += 0.5\n return norm_res\n\n def tree_node_summary(self, treatment, y, min_samples_treatment=10, n_reg=100, parentNodeSummary=None):\n '''\n Tree node summary statistics.\n\n Args\n ----\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n min_samples_treatment: int, optional (default=10)\n The minimum number of samples required of the experiment group t be split at a leaf node.\n n_reg : int, optional (default=10)\n The regularization parameter defined in Rzepakowski et al. 2012,\n the weight (in terms of sample size) of the parent node influence\n on the child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.\n parentNodeSummary : dictionary\n Node summary statistics of the parent tree node.\n\n Returns\n -------\n nodeSummary : dictionary\n The node summary of the current tree node.\n '''\n # returns {treatment_group: p(1)}\n results = self.group_uniqueCounts(treatment, y)\n # node Summary: {treatment_group: [p(1), size]}\n nodeSummary = {}\n # iterate treatment group\n for r in results:\n n1 = results[r][1]\n ntot = results[r][0] + n1\n if parentNodeSummary is None:\n y_mean = n1 / ntot\n elif ntot > min_samples_treatment:\n y_mean = (n1 + parentNodeSummary[r][0] * n_reg) / (ntot + n_reg)\n else:\n y_mean = parentNodeSummary[r][0]\n\n nodeSummary[r] = [y_mean, ntot]\n\n return nodeSummary\n\n def uplift_classification_results(self, treatment, y):\n '''\n Classification probability for each treatment in the tree node.\n\n Args\n ----\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n\n Returns\n -------\n res : dictionary\n The probability of 1 in each treatment in the tree node.\n '''\n results = self.group_uniqueCounts(treatment, y)\n res = {}\n for r in results:\n p = float(results[r][1]) / (results[r][0] + results[r][1])\n res[r] = round(p, 6)\n return res\n\n def growDecisionTreeFrom(self, X, treatment, y, evaluationFunction, max_depth=10,\n min_samples_leaf=100, depth=1,\n min_samples_treatment=10, n_reg=100,\n parentNodeSummary=None):\n '''\n Train the uplift decision tree.\n\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n evaluationFunction : string\n Choose from one of the models: 'KL', 'ED', 'Chi', 'CTS'.\n max_depth: int, optional (default=10)\n The maximum depth of the tree.\n min_samples_leaf: int, optional (default=100)\n The minimum number of samples required to be split at a leaf node.\n depth : int, optional (default = 1)\n The current depth.\n min_samples_treatment: int, optional (default=10)\n The minimum number of samples required of the experiment group to be split at a leaf node.\n n_reg: int, optional (default=10)\n The regularization parameter defined in Rzepakowski et al. 2012,\n the weight (in terms of sample size) of the parent node influence\n on the child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.\n parentNodeSummary : dictionary, optional (default = None)\n Node summary statistics of the parent tree node.\n\n Returns\n -------\n object of DecisionTree class\n '''\n\n if len(X) == 0:\n return DecisionTree()\n\n # Current Node Info and Summary\n currentNodeSummary = self.tree_node_summary(treatment, y,\n min_samples_treatment=min_samples_treatment,\n n_reg=n_reg,\n parentNodeSummary=parentNodeSummary)\n if evaluationFunction == self.evaluate_CTS:\n currentScore = evaluationFunction(currentNodeSummary)\n else:\n currentScore = evaluationFunction(currentNodeSummary, control_name=self.control_name)\n\n # Prune Stats\n maxAbsDiff = 0\n maxDiff = -1.\n bestTreatment = self.control_name\n suboptTreatment = self.control_name\n maxDiffTreatment = self.control_name\n maxDiffSign = 0\n for treatment_group in currentNodeSummary:\n if treatment_group != self.control_name:\n diff = (currentNodeSummary[treatment_group][0]\n - currentNodeSummary[self.control_name][0])\n if abs(diff) >= maxAbsDiff:\n maxDiffTreatment = treatment_group\n maxDiffSign = np.sign(diff)\n maxAbsDiff = abs(diff)\n if diff >= maxDiff:\n maxDiff = diff\n suboptTreatment = treatment_group\n if diff > 0:\n bestTreatment = treatment_group\n if maxDiff > 0:\n pt = currentNodeSummary[bestTreatment][0]\n nt = currentNodeSummary[bestTreatment][1]\n pc = currentNodeSummary[self.control_name][0]\n nc = currentNodeSummary[self.control_name][1]\n p_value = (1. - stats.norm.cdf((pt - pc) / np.sqrt(pt * (1 - pt) / nt + pc * (1 - pc) / nc))) * 2\n else:\n pt = currentNodeSummary[suboptTreatment][0]\n nt = currentNodeSummary[suboptTreatment][1]\n pc = currentNodeSummary[self.control_name][0]\n nc = currentNodeSummary[self.control_name][1]\n p_value = (1. - stats.norm.cdf((pc - pt) / np.sqrt(pt * (1 - pt) / nt + pc * (1 - pc) / nc))) * 2\n upliftScore = [maxDiff, p_value]\n\n bestGain = 0.0\n bestAttribute = None\n\n # last column is the result/target column, 2nd to the last is the treatment group\n columnCount = X.shape[1]\n if (self.max_features and self.max_features > 0 and self.max_features <= columnCount):\n max_features = self.max_features\n else:\n max_features = columnCount\n\n for col in list(np.random.choice(a=range(columnCount), size=max_features, replace=False)):\n columnValues = X[:, col]\n # unique values\n lsUnique = np.unique(columnValues)\n\n if (isinstance(lsUnique[0], int) or\n isinstance(lsUnique[0], float)):\n if len(lsUnique) > 10:\n lspercentile = np.percentile(columnValues, [3, 5, 10, 20, 30, 50, 70, 80, 90, 95, 97])\n else:\n lspercentile = np.percentile(lsUnique, [10, 50, 90])\n lsUnique = np.unique(lspercentile)\n\n for value in lsUnique:\n X_l, X_r, w_l, w_r, y_l, y_r = self.divideSet(X, treatment, y, col, value)\n # check the split validity on min_samples_leaf 372\n if (len(X_l) < min_samples_leaf or len(X_r) < min_samples_leaf):\n continue\n # summarize notes\n # Gain -- Entropy or Gini\n p = float(len(X_l)) / len(X)\n leftNodeSummary = self.tree_node_summary(w_l, y_l,\n min_samples_treatment=min_samples_treatment,\n n_reg=n_reg,\n parentNodeSummary=currentNodeSummary)\n\n rightNodeSummary = self.tree_node_summary(w_r, y_r,\n min_samples_treatment=min_samples_treatment,\n n_reg=n_reg,\n parentNodeSummary=currentNodeSummary)\n\n # check the split validity on min_samples_treatment\n if set(leftNodeSummary.keys()) != set(rightNodeSummary.keys()):\n continue\n node_mst = 10**8\n for ti in leftNodeSummary:\n node_mst = np.min([node_mst, leftNodeSummary[ti][1]])\n node_mst = np.min([node_mst, rightNodeSummary[ti][1]])\n if node_mst < min_samples_treatment:\n continue\n # evaluate the split\n\n if evaluationFunction == self.evaluate_CTS:\n leftScore1 = evaluationFunction(leftNodeSummary)\n rightScore2 = evaluationFunction(rightNodeSummary)\n gain = (currentScore - p * leftScore1 - (1 - p) * rightScore2)\n gain_for_imp = (len(X) * currentScore - len(X_l) * leftScore1 - len(X_r) * rightScore2)\n else:\n if (self.control_name in leftNodeSummary and\n self.control_name in rightNodeSummary):\n leftScore1 = evaluationFunction(leftNodeSummary, control_name=self.control_name)\n rightScore2 = evaluationFunction(rightNodeSummary, control_name=self.control_name)\n gain = (p * leftScore1 + (1 - p) * rightScore2 - currentScore)\n gain_for_imp = (len(X_l) * leftScore1 + len(X_r) * rightScore2 - len(X) * currentScore)\n if self.normalization:\n norm_factor = self.normI(currentNodeSummary,\n leftNodeSummary,\n rightNodeSummary,\n self.control_name,\n alpha=0.9)\n else:\n norm_factor = 1\n gain = gain / norm_factor\n else:\n gain = 0\n if (gain > bestGain and len(X_l) > min_samples_leaf and len(X_r) > min_samples_leaf):\n bestGain = gain\n bestAttribute = (col, value)\n best_set_left = [X_l, w_l, y_l]\n best_set_right = [X_r, w_r, y_r]\n self.feature_imp_dict[bestAttribute[0]] += gain_for_imp\n\n dcY = {'impurity': '%.3f' % currentScore, 'samples': '%d' % len(X)}\n # Add treatment size\n dcY['group_size'] = ''\n for treatment_group in currentNodeSummary:\n dcY['group_size'] += ' ' + treatment_group + ': ' + str(currentNodeSummary[treatment_group][1])\n dcY['upliftScore'] = [round(upliftScore[0], 4), round(upliftScore[1], 4)]\n dcY['matchScore'] = round(upliftScore[0], 4)\n\n if bestGain > 0 and depth < max_depth:\n trueBranch = self.growDecisionTreeFrom(\n *best_set_left, evaluationFunction, max_depth, min_samples_leaf,\n depth + 1, min_samples_treatment=min_samples_treatment,\n n_reg=n_reg, parentNodeSummary=currentNodeSummary\n )\n falseBranch = self.growDecisionTreeFrom(\n *best_set_right, evaluationFunction, max_depth, min_samples_leaf,\n depth + 1, min_samples_treatment=min_samples_treatment,\n n_reg=n_reg, parentNodeSummary=currentNodeSummary\n )\n\n return DecisionTree(\n col=bestAttribute[0], value=bestAttribute[1],\n trueBranch=trueBranch, falseBranch=falseBranch, summary=dcY,\n maxDiffTreatment=maxDiffTreatment, maxDiffSign=maxDiffSign,\n nodeSummary=currentNodeSummary,\n backupResults=self.uplift_classification_results(treatment, y),\n bestTreatment=bestTreatment, upliftScore=upliftScore\n )\n else:\n if evaluationFunction == self.evaluate_CTS:\n return DecisionTree(\n results=self.uplift_classification_results(treatment, y),\n summary=dcY, nodeSummary=currentNodeSummary,\n bestTreatment=bestTreatment, upliftScore=upliftScore\n )\n else:\n return DecisionTree(\n results=self.uplift_classification_results(treatment, y),\n summary=dcY, maxDiffTreatment=maxDiffTreatment,\n maxDiffSign=maxDiffSign, nodeSummary=currentNodeSummary,\n bestTreatment=bestTreatment, upliftScore=upliftScore\n )\n\n @staticmethod\n def classify(observations, tree, dataMissing=False):\n '''\n Classifies (prediction) the observations according to the tree.\n\n Args\n ----\n observations : list of list\n The internal data format for the training data (combining X, Y, treatment).\n\n dataMissing: boolean, optional (default = False)\n An indicator for if data are missing or not.\n\n Returns\n -------\n tree.results, tree.upliftScore :\n The results in the leaf node.\n '''\n\n def classifyWithoutMissingData(observations, tree):\n '''\n Classifies (prediction) the observations according to the tree, assuming without missing data.\n\n Args\n ----\n observations : list of list\n The internal data format for the training data (combining X, Y, treatment).\n\n Returns\n -------\n tree.results, tree.upliftScore :\n The results in the leaf node.\n '''\n if tree.results is not None: # leaf\n return tree.results, tree.upliftScore\n else:\n v = observations[tree.col]\n branch = None\n if isinstance(v, int) or isinstance(v, float):\n if v >= tree.value:\n branch = tree.trueBranch\n else:\n branch = tree.falseBranch\n else:\n if v == tree.value:\n branch = tree.trueBranch\n else:\n branch = tree.falseBranch\n return classifyWithoutMissingData(observations, branch)\n\n def classifyWithMissingData(observations, tree):\n '''\n Classifies (prediction) the observations according to the tree, assuming with missing data.\n\n Args\n ----\n observations : list of list\n The internal data format for the training data (combining X, Y, treatment).\n\n Returns\n -------\n tree.results, tree.upliftScore :\n The results in the leaf node.\n '''\n if tree.results is not None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n if v is None:\n tr = classifyWithMissingData(observations, tree.trueBranch)\n fr = classifyWithMissingData(observations, tree.falseBranch)\n tcount = sum(tr.values())\n fcount = sum(fr.values())\n tw = float(tcount) / (tcount + fcount)\n fw = float(fcount) / (tcount + fcount)\n\n # Problem description: http://blog.ludovf.net/python-collections-defaultdict/\n result = defaultdict(int)\n for k, v in tr.items():\n result[k] += v * tw\n for k, v in fr.items():\n result[k] += v * fw\n return dict(result)\n else:\n branch = None\n if isinstance(v, int) or isinstance(v, float):\n if v >= tree.value:\n branch = tree.trueBranch\n else:\n branch = tree.falseBranch\n else:\n if v == tree.value:\n branch = tree.trueBranch\n else:\n branch = tree.falseBranch\n return classifyWithMissingData(observations, branch)\n\n # function body\n if dataMissing:\n return classifyWithMissingData(observations, tree)\n else:\n return classifyWithoutMissingData(observations, tree)\n\n\n# Uplift Random Forests\nclass UpliftRandomForestClassifier:\n \"\"\" Uplift Random Forest for Classification Task.\n\n Parameters\n ----------\n n_estimators : integer, optional (default=10)\n The number of trees in the uplift random forest.\n\n evaluationFunction : string\n Choose from one of the models: 'KL', 'ED', 'Chi', 'CTS'.\n\n max_features: int, optional (default=10)\n The number of features to consider when looking for the best split.\n\n random_state: int, optional (default=2019)\n The seed used by the random number generator.\n\n max_depth: int, optional (default=5)\n The maximum depth of the tree.\n\n min_samples_leaf: int, optional (default=100)\n The minimum number of samples required to be split at a leaf node.\n\n min_samples_treatment: int, optional (default=10)\n The minimum number of samples required of the experiment group to be split at a leaf node.\n\n n_reg: int, optional (default=10)\n The regularization parameter defined in Rzepakowski et al. 2012, the\n weight (in terms of sample size) of the parent node influence on the\n child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.\n\n control_name: string\n The name of the control group (other experiment groups will be regarded as treatment groups)\n\n normalization: boolean, optional (default=True)\n The normalization factor defined in Rzepakowski et al. 2012,\n correcting for tests with large number of splits and imbalanced\n treatment and control splits\n \n n_jobs: int, optional (default=-1)\n The parallelization parameter to define how many parallel jobs need to be created. \n This is passed on to joblib library for parallelizing uplift-tree creation.\n\n Outputs\n ----------\n df_res: pandas dataframe\n A user-level results dataframe containing the estimated individual treatment effect.\n \"\"\"\n def __init__(self,\n n_estimators=10,\n max_features=10,\n random_state=2019,\n max_depth=5,\n min_samples_leaf=100,\n min_samples_treatment=10,\n n_reg=10,\n evaluationFunction=None,\n control_name=None,\n normalization=True,\n n_jobs=-1):\n \"\"\"\n Initialize the UpliftRandomForestClassifier class.\n \"\"\"\n self.classes_ = {}\n self.n_estimators = n_estimators\n self.max_features = max_features\n self.random_state = random_state\n self.max_depth = max_depth\n self.min_samples_leaf = min_samples_leaf\n self.min_samples_treatment = min_samples_treatment\n self.n_reg = n_reg\n self.evaluationFunction = evaluationFunction\n self.control_name = control_name\n self.n_jobs = n_jobs\n\n # Create forest\n self.uplift_forest = []\n for _ in range(n_estimators):\n uplift_tree = UpliftTreeClassifier(\n max_features=self.max_features, max_depth=self.max_depth,\n min_samples_leaf=self.min_samples_leaf,\n min_samples_treatment=self.min_samples_treatment,\n n_reg=self.n_reg,\n evaluationFunction=self.evaluationFunction,\n control_name=self.control_name,\n normalization=normalization)\n\n self.uplift_forest.append(uplift_tree)\n\n if self.n_jobs == -1:\n self.n_jobs = mp.cpu_count()\n\n def fit(self, X, treatment, y):\n \"\"\"\n Fit the UpliftRandomForestClassifier.\n\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n\n treatment : array-like, shape = [num_samples]\n An array containing the treatment group for each unit.\n\n y : array-like, shape = [num_samples]\n An array containing the outcome of interest for each unit.\n \"\"\"\n np.random.seed(self.random_state)\n\n # Get treatment group keys\n treatment_group_keys = list(set(treatment))\n treatment_group_keys.remove(self.control_name)\n treatment_group_keys.sort()\n self.classes_ = {}\n for i, treatment_group_key in enumerate(treatment_group_keys):\n self.classes_[treatment_group_key] = i\n\n self.uplift_forest = (\n Parallel(n_jobs=self.n_jobs)\n (delayed(self.bootstrap)(X, treatment, y, tree) for tree in self.uplift_forest)\n )\n\n all_importances = [tree.feature_importances_ for tree in self.uplift_forest]\n self.feature_importances_ = np.mean(all_importances, axis=0)\n self.feature_importances_ /= self.feature_importances_.sum() # normalize to add to 1\n\n @staticmethod\n def bootstrap(X, treatment, y, tree):\n bt_index = np.random.choice(len(X), len(X))\n x_train_bt = X[bt_index]\n y_train_bt = y[bt_index]\n treatment_train_bt = treatment[bt_index]\n tree.fit(X=x_train_bt, treatment=treatment_train_bt, y=y_train_bt)\n return tree\n\n @ignore_warnings(category=FutureWarning)\n def predict(self, X, full_output=False):\n '''\n Returns the recommended treatment group and predicted optimal\n probability conditional on using the recommended treatment group.\n\n Args\n ----\n X : ndarray, shape = [num_samples, num_features]\n An ndarray of the covariates used to train the uplift model.\n\n full_output : bool, optional (default=False)\n Whether the UpliftTree algorithm returns upliftScores, pred_nodes\n alongside the recommended treatment group and p_hat in the treatment group.\n\n Returns\n -------\n y_pred_list : ndarray, shape = (num_samples, num_treatments])\n An ndarray containing the predicted delta in each treatment group,\n the best treatment group and the maximum delta.\n \n df_res : DataFrame, shape = [num_samples, (num_treatments + 1)]\n If full_output, a DataFrame containing the predicted delta in each treatment group,\n the best treatment group and the maximum delta.\n\n '''\n df_res = pd.DataFrame()\n y_pred_ensemble = dict()\n y_pred_list = np.zeros((X.shape[0], len(self.classes_)))\n\n # Make prediction by each tree\n for tree_i in range(len(self.uplift_forest)):\n\n _, _, _, y_pred_full = self.uplift_forest[tree_i].predict(X=X, full_output=True)\n\n if tree_i == 0:\n for treatment_group in y_pred_full:\n y_pred_ensemble[treatment_group] = (\n np.array(y_pred_full[treatment_group]) / len(self.uplift_forest)\n )\n else:\n for treatment_group in y_pred_full:\n y_pred_ensemble[treatment_group] = (\n np.array(y_pred_ensemble[treatment_group])\n + np.array(y_pred_full[treatment_group]) / len(self.uplift_forest)\n )\n\n # Summarize results into dataframe\n for treatment_group in y_pred_ensemble:\n df_res[treatment_group] = y_pred_ensemble[treatment_group]\n\n df_res['recommended_treatment'] = df_res.apply(np.argmax, axis=1)\n\n # Calculate delta\n delta_cols = []\n for treatment_group in y_pred_ensemble:\n if treatment_group != self.control_name:\n delta_cols.append('delta_%s' % (treatment_group))\n df_res['delta_%s' % (treatment_group)] = df_res[treatment_group] - df_res[self.control_name]\n # Add deltas to results list\n y_pred_list[:, self.classes_[treatment_group]] = df_res['delta_%s' % (treatment_group)].values\n df_res['max_delta'] = df_res[delta_cols].max(axis=1)\n\n if full_output:\n return df_res\n else:\n return y_pred_list\n"
] | [
[
"numpy.log",
"numpy.sqrt",
"numpy.random.seed",
"numpy.clip",
"numpy.unique",
"numpy.min",
"pandas.DataFrame",
"numpy.percentile",
"numpy.sign",
"numpy.mean",
"sklearn.utils.testing.ignore_warnings",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Open-Speech-EkStep/speech_music_classification | [
"0e394ec0f0d0ac7a177171f7ac7c254b1db38c2a"
] | [
"check.py"
] | [
"import torch\nfrom torch.utils.data import DataLoader\n\nfrom configs.train_config import SpectConfig\nfrom loader.data_loader import SpectrogramDataset, collate_fn\nfrom models.model import Conformer, get_conv_output_sizes\n\nif __name__ == \"__main__\":\n spect_cfg = SpectConfig()\n songs_dset = SpectrogramDataset('/home/soma/song_speech/speech', 1, spect_cfg)\n print(len(songs_dset))\n feat, label = songs_dset[1000]\n print(feat.shape, label) # [257, T]\n\n model = Conformer()\n batch = feat.unsqueeze(0)\n print(batch.shape)\n\n # out = model(batch)\n # print(\"out shape: \", out.shape)\n\n lengths = get_conv_output_sizes([feat.shape[1]])\n print('conv out lengths: ', lengths)\n\n loader = DataLoader(songs_dset, batch_size=10, collate_fn=collate_fn)\n print('data loader len: ', len(loader))\n\n mini_batch = iter(loader).next()\n out = model(mini_batch[0], mini_batch[1], mini_batch[2])\n print('mini batch output ', out.shape)\n "
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
buzem/inzpeech | [
"9e03b876bb3fd1956774c84683cd02661d650c81",
"9e03b876bb3fd1956774c84683cd02661d650c81"
] | [
"models/model_keras_params.py",
"preprocessing/preprocessed_feature_extraction.py"
] | [
"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Input\nfrom tensorflow.keras.layers import Dropout, GlobalMaxPooling2D\nfrom tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling1D, BatchNormalization ,Reshape\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.layers import Activation, Layer\nfrom tensorflow.keras.initializers import GlorotUniform\nimport tensorflow.keras.backend as K\n\nclass SelfAttention(Layer):\n def __init__(self, \n n_hop,\n hidden_dim,\n nc=256,\n penalty=1.0,\n return_attention=False,\n kernel_initializer=GlorotUniform(),\n kernel_regularizer=None,\n kernel_constraint=None,\n **kwargs):\n self.n_hop = n_hop\n self.hidden_dim = hidden_dim\n self.nc=nc\n self.penalty = penalty\n self.kernel_initializer = GlorotUniform() # tf.keras.initializers.get(kernel_initializer)\n self.kernel_regularizer = None #tf.keras.regularizers.get(kernel_regularizer)\n self.kernel_constraint = None #tf.keras.constraints.get(kernel_constraint)\n self.return_attention = return_attention\n super(SelfAttention, self).__init__(**kwargs)\n\n def build(self, input_shape):\n # input_shape: (None, Sequence_size, Sequence_hidden_dim)\n assert len(input_shape) >= 3\n batch_size, T, nh = input_shape\n \n self.Ws1 = self.add_weight(shape=(self.hidden_dim, self.nc),\n initializer=self.kernel_initializer,\n name='SelfAttention-Ws1',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n \n self.Ws2 = self.add_weight(shape=(self.nc, self.n_hop), \n initializer=self.kernel_initializer,\n name='SelfAttention-Ws2',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n \n super(SelfAttention, self).build(input_shape)\n\n def compute_output_shape(self, input_shape):\n assert input_shape and len(input_shape) >= 3\n assert input_shape[-1]\n batch_size, sequence_size, sequence_hidden_dim = input_shape\n output_shape = tuple([batch_size, self.n_hop, sequence_hidden_dim])\n \n if self.return_attention:\n attention_shape = tuple([batch_size, self.n_hop, sequence_size])\n return [output_shape, attention_shape]\n else: return output_shape\n\n\n\n \n def _frobenius_norm(self, inputs):\n outputs = K.sqrt(K.sum(K.square(inputs)))\n return outputs \n\n def call(self, inputs):\n shape=inputs.shape\n H=inputs\n x = K.tanh(tf.matmul(H,self.Ws1))\n x = tf.matmul(x,self.Ws2)\n A = K.softmax(x,axis=0) # A = softmax(dot(Ws2, d1))\n At=K.permute_dimensions(A,(0,2,1))\n E = tf.matmul(At,H)\n \n return E\n \n def get_config(self):\n\n config = super().get_config().copy()\n config.update({\n 'n_hop': self.n_hop,\n 'hidden_dim': self.hidden_dim,\n 'nc': self.nc,\n 'penalty': self.penalty,\n 'kernel_initializer': self.kernel_initializer,\n 'kernel_regularizer': self.kernel_regularizer,\n 'kernel_constraint': self.kernel_constraint,\n 'return_attention': self.return_attention,\n })\n return config\n\n\ndef vgg_att(n_class):\n inputs = Input(shape=(300,40,1))\n x=Conv2D(64, (3, 3), padding='same', name='block1_conv1',activation='relu')(inputs)\n x=Conv2D(64, (3, 3), padding='same', name='block1_conv2',activation='relu')(x)\n x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)\n x=BatchNormalization()(x)\n x=Dropout(0.2)(x)\n\n\n print(x.shape)\n\n x=Conv2D(128, (3, 3), padding='same', name='block2_conv1',activation='relu')(x)\n x=Conv2D(128, (3, 3), padding='same', name='block2_conv2',activation='relu')(x)\n x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)\n x=BatchNormalization()(x)\n x=Dropout(0.2)(x)\n print(x.shape)\n\n\n x=Conv2D(256, (3, 3), padding='same', name='block3_conv1',activation='relu')(x)\n x=Conv2D(256, (3, 3), padding='same', name='block3_conv2',activation='relu')(x)\n x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2),padding=\"same\")(x)\n x=BatchNormalization()(x)\n x=Dropout(0.2)(x)\n print(x.shape)\n\n x=Conv2D(512, (3, 3), padding='same', name='block4_conv1',activation='relu')(x)\n x=Conv2D(512, (3, 3), padding='same', name='block4_conv2',activation='relu')(x)\n x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2),padding=\"same\")(x)\n x=BatchNormalization()(x)\n x=Dropout(0.2)(x)\n print(x.shape)\n\n att=SelfAttention(n_hop=4,hidden_dim=1536)\n x=Reshape((x.shape[1], x.shape[2]*x.shape[3]))(x)\n print(\"after reshape\")\n print(x.shape)\n x=att(x)\n print(\"after attention\")\n print(x.shape)\n x=AveragePooling1D(pool_size=4,data_format=\"channels_last\")(x)\n #x = GlobalMaxPooling2D()(x)\n print(\"after avgpool\")\n print(x.shape)\n x = Flatten()(x)\n x = Dense(256, activation = 'relu')(x)\n x=Dropout(0.4)(x)\n output = Dense(n_class,activation = 'softmax')(x)\n model = Model(inputs=inputs, outputs=output)\n\n model.compile(loss='categorical_crossentropy',optimizer ='adam')#need hyperparam-tuning \n model.summary()\n return model\n\n\n\n\n\n\n\n\n",
"import os\nimport glob\nimport torch\nimport pickle\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import dct\nfrom torch.utils.data import random_split, Dataset, DataLoader\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\ndef display_spectrogram(spectrogram):\n librosa.display.specshow(spectrogram.transpose(), hop_length=220.5,y_axis='mel', fmax=8000, x_axis='s')\n #getting 7 second in time axis, it should be 3, why???\n plt.title('Mel Spectrogram')\n plt.colorbar(format='%+2.0f dB')\n plt.show()\n\ndef logmel_filterbanks(filename,pre_emphasis=0.97,frame_size = 0.025,frame_stride = 0.01,nfilt=40,normalize=True):\n target_len = 66150\n \n signal, sample_rate = librosa.load(filename,duration=3)\n\n while(signal.shape[0] != target_len):\n signal = np.append(signal, signal[:target_len - signal.shape[0]])\n \n #Pre-Emphasis step\n emphasized_signal = np.empty(shape=len(signal)+1)\n emphasized_signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])\n \n #Framing\n \n frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples\n signal_length = len(emphasized_signal)\n frame_length = int(round(frame_length))\n frame_step = int(round(frame_step))\n num_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) + 1 # Make sure that we have at least 1 frame\n\n pad_signal_length = num_frames * frame_step + frame_length\n z = np.zeros((pad_signal_length - signal_length))\n pad_signal = np.append(emphasized_signal, z) # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal\n\n indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T\n frames = pad_signal[indices.astype(np.int32, copy=False)]\n \n #Hamming-Window\n frames *= np.hamming(frame_length)\n \n #FFT\n NFFT = 512\n mag_frames = np.absolute(np.fft.rfft(frames, NFFT)) # Magnitude of the FFT\n pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2))\n \n #Filter-Bank\n low_freq_mel = 0\n high_freq_mel = (2595 * np.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel\n mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale\n hz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz\n bin = np.floor((NFFT + 1) * hz_points / sample_rate)\n\n fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))\n for m in range(1, nfilt + 1):\n f_m_minus = int(bin[m - 1]) # left\n f_m = int(bin[m]) # center\n f_m_plus = int(bin[m + 1]) # right\n\n for k in range(f_m_minus, f_m):\n fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])\n for k in range(f_m, f_m_plus):\n fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])\n filter_banks = np.dot(pow_frames, fbank.T)\n filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability\n filter_banks = 20 * np.log10(filter_banks) # dB\n\n if normalize==True:\n filter_banks = (filter_banks - filter_banks.mean()) / (filter_banks.max() - filter_banks.min())\n \n return filter_banks\n\ndef mfcc(filter_banks,num_ceps=13):\n return dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1 : (num_ceps + 1)]\n\ndataset_dir = '/home/bbekci/datasets/vctk/wav48_silence_trimmed'\ndata = []\nc2i, i2c = {}, {}\nfor indx, cla in enumerate(os.listdir(dataset_dir)):\n main_path = dataset_dir + '/' + cla + '/*.flac'\n for file_path in glob.glob(main_path):\n data.append((file_path, cla))\n c2i[cla] = indx\n i2c[indx] = cla\n\n\nwith open('preprocessed_vctk.pkl', 'wb') as pickle_file:\n result=[]\n for i in range(0,len(data)):\n sample = []\n sound_path, class_name = data[i]\n sound_data = logmel_filterbanks(sound_path)\n label = c2i[class_name]\n\n sample = [label, sound_data]\n\n result.append((sample))\n\n pickle.dump(result, pickle_file, protocol=pickle.HIGHEST_PROTOCOL)\n f.close()\n\nclass PreprocessedDataset(Dataset):\n def __init__(self, file_dir):\n self.file_dir = file_dir\n self.lst = 0\n with open(file_dir, 'rb') as pickle_load:\n self.lst = pickle.load(pickle_load)\n\n def __len__(self):\n return len(self.lst)\n\n def n_class(self):\n return self.lst[-1][0]\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n sound_data = self.lst[idx][1]\n label = self.lst[idx][0]\n\n sample = (sound_data, label)\n\n return sample\n \ndataset_dir = '/home/bbekci/inzpeech/preprocessed_vctk.pkl'\noffset_dict = {}\nmax_epochs = 25\nbatch_size = 256\n\nsound_data = PreprocessedDataset(file_dir=dataset_dir)\n\nn_classes = sound_data.n_class()\n\n\ntrain_data, test_data = random_split(sound_data,\n [int(len(sound_data) * 0.8),\n len(sound_data) - int(len(sound_data) * 0.8)]\n )\n\ntrain_dataset_loader = torch.utils.data.DataLoader(train_data,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4)\n\ntest_dataset_loader = torch.utils.data.DataLoader(test_data,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4)\n\n\n"
] | [
[
"tensorflow.keras.layers.AveragePooling1D",
"tensorflow.matmul",
"tensorflow.keras.backend.softmax",
"tensorflow.keras.backend.permute_dimensions",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.backend.square",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.initializers.GlorotUniform",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
],
[
"numpy.dot",
"numpy.linspace",
"torch.utils.data.DataLoader",
"numpy.hamming",
"torch.cuda.is_available",
"torch.device",
"scipy.fftpack.dct",
"numpy.arange",
"numpy.finfo",
"numpy.zeros",
"matplotlib.pyplot.title",
"torch.is_tensor",
"numpy.append",
"numpy.log10",
"numpy.floor",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.fft.rfft",
"matplotlib.pyplot.colorbar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.21",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
latticetower/dr-derks-mutants | [
"5c3ab86137ecb478a3013985172d160568a86b13"
] | [
"experiments/train_v1.py"
] | [
"\"\"\"Basic 'network'.\n\nThis code is based on example\nhttp://docs.gym.derkgame.com/#neural-network-example\n\"\"\"\nimport gym\nfrom gym_derk.envs import DerkEnv\nfrom gym_derk import ObservationKeys\nimport math\nimport numpy as np\nimport os.path\n\nfrom models.network_v1 import Network\n\nSEED = 137\nnp.random.seed(SEED)\n\nNPZ_FILENAME = \"weights/model_v2.npz\"\nREWARD_FUNCTION = {\n \"damageEnemyStatue\": 4,\n \"damageEnemyUnit\": 2,\n \"killEnemyStatue\": 4,\n \"killEnemyUnit\": 2,\n \"healFriendlyStatue\": 1,\n \"healTeammate1\": 2,\n \"healTeammate2\": 2,\n \"timeSpentHomeBase\": 0,\n \"timeSpentHomeTerritory\": 0,\n \"timeSpentAwayTerritory\": 0,\n \"timeSpentAwayBase\": 0,\n \"damageTaken\": -1,\n \"friendlyFire\": -1,\n \"healEnemy\": -1,\n \"fallDamageTaken\": -10,\n \"statueDamageTaken\": 0,\n \"manualBonus\": 0,\n \"victory\": 100,\n \"loss\": -100,\n \"tie\": 0,\n \"teamSpirit\": 0.5,\n \"timeScaling\": 0.8,\n}\n\n\nenv = DerkEnv(\n mode=\"train\",\n turbo_mode=True,\n home_team=[\n {'primaryColor': '#ff00ff'},\n {'primaryColor': '#00ff00', 'slots': ['Talons', None, None]},\n {'primaryColor': '#ff0000', 'rewardFunction': {'healTeammate1': 1}}\n ],\n away_team=[\n {'primaryColor': '#c0c0c0'},\n {'primaryColor': 'navy', 'slots': ['Talons', None, None]},\n {'primaryColor': 'red', 'rewardFunction': {'healTeammate1': 1}}\n ],\n session_args = {\n \"reward_function\": REWARD_FUNCTION\n }\n)\n\n\nif os.path.exists(NPZ_FILENAME):\n with np.load(NPZ_FILENAME) as data:\n weights = np.asarray(data['weights']).copy()\n biases = np.asarray(data['biases']).copy()\nelse:\n weights = None\n biases = None\n\n\nnetworks = [\n Network(weights, biases) for i in range(env.n_agents)\n]\n\nfor e in range(1): # 20\n observation_n = env.reset()\n while True:\n action_n = [\n networks[i].forward(observation_n[i])\n for i in range(env.n_agents)\n ]\n observation_n, reward_n, done_n, info = env.step(action_n)\n if all(done_n):\n print(\"Episode finished\")\n break\n if env.mode == 'train':\n reward_n = env.total_reward\n print(reward_n)\n top_network_i = np.argmax(reward_n)\n top_network = networks[top_network_i].clone()\n for network in networks:\n network.copy_and_mutate(top_network)\n print(f'Round {e} top reward', reward_n[top_network_i])\n np.savez_compressed(\n NPZ_FILENAME,\n weights=top_network.weights,\n biases=top_network.biases\n )\nenv.close()\n"
] | [
[
"numpy.random.seed",
"numpy.asarray",
"numpy.savez_compressed",
"numpy.argmax",
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TingFree/AI_Conference_Timeline | [
"788dd5a4a8cd5009d8de3a1306ddb38f044cc7c5"
] | [
"codes/nlper/models/text_clf.py"
] | [
"r\"\"\"\n各种文本分类模型的实现\n\"\"\"\n\nimport os\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.optim import AdamW\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoModel\nfrom transformers.models.bert.modeling_bert import BertModel\nfrom transformers import DataCollatorWithPadding, get_linear_schedule_with_warmup\nfrom codes.nlper.modules import MLP\nfrom codes.nlper.utils import DatasetCLF, Dict2Obj\nfrom codes.nlper.utils import load_nlp_data, save_data\nfrom codes.nlper import mini_pytorch_lightning as mpl\n\n\nclass LightningCLF(mpl.StandardModel):\n def __init__(self, model, tokenizer, configs: Dict2Obj, metrics, convert_fn):\n super(LightningCLF, self).__init__(configs, metrics)\n self.configs = configs\n self.aux_configs = Dict2Obj()\n self.metrics = metrics\n self.model = model\n self.tokenizer = tokenizer\n self.convert_fn = convert_fn\n\n def training_step(self, batch, batch_idx):\n labels = batch['labels']\n logits = self.model(**batch)\n loss = F.cross_entropy(logits.view(-1, self.configs.num_class),\n labels.view(-1))\n return loss,\n\n def validation_step(self, batch, batch_idx):\n labels = batch['labels']\n logits = self.model(**batch)\n loss = F.cross_entropy(logits.view(-1, self.configs.num_class),\n labels.view(-1))\n batch_preds = logits.argmax(1).cpu().tolist()\n batch_golds = labels.cpu().tolist()\n return loss, batch_preds, batch_golds\n\n def validation_epoch_end(self, outputs):\n epoch_preds, epoch_golds = [], []\n for (batch_loss, batch_preds, batch_golds) in outputs:\n epoch_preds += batch_preds\n epoch_golds += batch_golds\n self.metrics.scores(epoch_golds, epoch_preds)\n self.metrics.print_values()\n return self.metrics.return_target_score()\n\n def test_step(self, batch, batch_idx):\n logits = self.model(**batch)\n # prob, pred\n return F.softmax(logits, dim=-1).cpu().tolist(),\\\n logits.argmax(1).cpu().tolist()\n\n def test_epoch_end(self, outputs):\n probs, preds = [], []\n for (batch_probs, batch_preds) in outputs:\n probs += [' '.join([str(p) for p in prob]) for prob in batch_probs]\n preds += batch_preds\n save_data(probs, os.path.join(self.configs.out_dir, 'test_pred.probs.txt'))\n save_data(preds, os.path.join(self.configs.out_dir, 'test_pred.txt'))\n\n def configure_optimizers(self):\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': self.configs.weight_decay},\n {'params': [p for n, p in self.named_parameters()if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters,\n lr=self.configs.lr)\n scheduler = get_linear_schedule_with_warmup(optimizer,\n self.configs.warmup_steps,\n self.configs.trainer_args.max_epochs * self.aux_configs.num_train_batch)\n return optimizer, scheduler\n\n def prepare_data(self) -> None:\n \"\"\" check & load data, the format of each line is 'text label', separated by tab and 'label'\n must be int, such as 0~num_labels-1\n \"\"\"\n train_file = self.configs.train_file\n val_file = self.configs.val_file\n test_file = self.configs.test_file\n self.collate_fn = DataCollatorWithPadding(tokenizer=self.tokenizer)\n if self.convert_fn:\n self._train_data = self.convert_fn(train_file, load_label=True)\n self._val_data = self.convert_fn(val_file, load_label=True)\n self._test_data = self.convert_fn(test_file, load_label=self.configs.is_eval_test)\n else:\n self._train_data = load_nlp_data(train_file, task_name=self.configs.task_name)\n self._val_data = load_nlp_data(val_file, task_name=self.configs.task_name)\n self._test_data = load_nlp_data(test_file, task_name=self.configs.task_name)\n\n def train_dataloader(self):\n self.train_data = DatasetCLF(self._train_data,\n self.tokenizer,\n self.configs.max_len,\n load_label=True)\n return DataLoader(self.train_data,\n batch_size=self.configs.train_batch_size,\n collate_fn=self.collate_fn,\n shuffle=True,\n num_workers=16)\n\n def val_dataloader(self):\n self.val_data = DatasetCLF(self._val_data,\n self.tokenizer,\n self.configs.max_len,\n load_label=True)\n return DataLoader(self.val_data,\n batch_size=self.configs.val_batch_size,\n collate_fn=self.collate_fn,\n num_workers=16)\n\n def test_dataloader(self):\n self.test_data = DatasetCLF(self._test_data,\n self.tokenizer,\n self.configs.max_len,\n load_label=self.configs.is_eval_test)\n return DataLoader(self.test_data,\n batch_size=self.configs.val_batch_size,\n collate_fn=self.collate_fn,\n num_workers=16)\n\n\nclass BertCLF(nn.Module):\n def __init__(self, args):\n super(BertCLF, self).__init__()\n self.bert = AutoModel.from_pretrained(args.pretrained_model)\n self.dropout = nn.Dropout(self.bert.config.hidden_dropout_prob)\n self.clf = MLP([self.bert.config.hidden_size, args.num_class],\n 'tanh',\n dropout=args.dropout)\n\n def forward(self, input_ids, attention_mask, token_type_ids, return_pooler_output=False, **kwargs):\n \"\"\"\n\n :param input_ids:\n :param attention_mask:\n :param token_type_ids:\n :param return_pooler_output: 是否返回最后用于分类的句子表示\n :return:\n \"\"\"\n outputs = self.bert(input_ids, attention_mask, token_type_ids)\n logits = self.clf(outputs[1])\n if return_pooler_output:\n return logits, outputs[1]\n return logits\n"
] | [
[
"torch.nn.Dropout",
"torch.optim.AdamW",
"torch.utils.data.DataLoader",
"torch.nn.functional.softmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shadiakiki1986/garage | [
"095bb5d25b32df1d44b47e99a78a9b01796941d9",
"095bb5d25b32df1d44b47e99a78a9b01796941d9",
"095bb5d25b32df1d44b47e99a78a9b01796941d9",
"095bb5d25b32df1d44b47e99a78a9b01796941d9"
] | [
"garage/replay_buffer/simple_replay_buffer.py",
"examples/tf/example_tensorboard_logger.py",
"garage/tf/regressors/categorical_mlp_regressor.py",
"garage/tf/policies/categorical_gru_policy.py"
] | [
"\"\"\"This module implements a simple replay buffer.\"\"\"\nimport numpy as np\n\nfrom garage.misc.overrides import overrides\nfrom garage.replay_buffer.base import ReplayBuffer\n\n\nclass SimpleReplayBuffer(ReplayBuffer):\n \"\"\"\n This class implements SimpleReplayBuffer.\n\n It uses random batch sample to minimize correlations between samples.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize the data used in SimpleReplayBuffer.\"\"\"\n super(SimpleReplayBuffer, self).__init__(**kwargs)\n\n @overrides\n def sample(self, batch_size):\n \"\"\"Sample a transition of batch_size.\"\"\"\n assert self._n_transitions_stored > batch_size\n buffer = {}\n for key in self._buffer.keys():\n buffer[key] = self._buffer[key][:self._current_size]\n\n # Select which episodes to use\n time_horizon = buffer[\"action\"].shape[1]\n rollout_batch_size = buffer[\"action\"].shape[0]\n episode_idxs = np.random.randint(rollout_batch_size, size=batch_size)\n # Select time steps to use\n t_samples = np.random.randint(time_horizon, size=batch_size)\n\n transitions = {}\n for key in buffer.keys():\n samples = buffer[key][episode_idxs, t_samples].copy()\n transitions[key] = samples.reshape(batch_size, *samples.shape[1:])\n\n assert (transitions[\"action\"].shape[0] == batch_size)\n return transitions\n",
"import tensorflow as tf\n\nfrom garage.misc import logger\n\nlogger.set_tensorboard_dir(\"data/local/histogram_example\")\nN = 400\nfor i in range(N):\n sess = tf.Session()\n sess.__enter__()\n k_val = i / float(N)\n logger.record_histogram_by_type('gamma', key='gamma', alpha=k_val)\n logger.record_histogram_by_type(\n 'normal', key='normal', mean=5 * k_val, stddev=1.0)\n logger.record_histogram_by_type('poisson', key='poisson', lam=k_val)\n logger.record_histogram_by_type(\n 'uniform', key='uniform', maxval=k_val * 10)\n logger.record_tabular(\"app\", k_val)\n logger.record_histogram(\"gass\", k_val)\n logger.dump_tensorboard(step=i)\n",
"import numpy as np\nimport tensorflow as tf\n\nfrom garage.core import Serializable\nfrom garage.misc import logger\nfrom garage.tf.core import LayersPowered\nfrom garage.tf.core import Parameterized\nimport garage.tf.core.layers as L\nfrom garage.tf.core.network import MLP\nfrom garage.tf.distributions import Categorical\nfrom garage.tf.misc import tensor_utils\nfrom garage.tf.optimizers import ConjugateGradientOptimizer\nfrom garage.tf.optimizers import LbfgsOptimizer\n\nNONE = list()\n\n\nclass CategoricalMLPRegressor(LayersPowered, Serializable, Parameterized):\n \"\"\"\n A class for performing regression (or classification, really) by fitting a\n categorical distribution to the outputs. Assumes that the outputs will be\n always a one hot vector.\n \"\"\"\n\n def __init__(\n self,\n input_shape,\n output_dim,\n name=\"CategoricalMLPRegressor\",\n prob_network=None,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.tanh,\n optimizer=None,\n tr_optimizer=None,\n use_trust_region=True,\n step_size=0.01,\n normalize_inputs=True,\n no_initial_trust_region=True,\n ):\n \"\"\"\n :param input_shape: Shape of the input data.\n :param output_dim: Dimension of output.\n :param hidden_sizes: Number of hidden units of each layer of the mean\n network.\n :param hidden_nonlinearity: Non-linearity used for each layer of the\n mean network.\n :param optimizer: Optimizer for minimizing the negative log-likelihood.\n :param use_trust_region: Whether to use trust region constraint.\n :param step_size: KL divergence constraint for each iteration\n \"\"\"\n Parameterized.__init__(self)\n Serializable.quick_init(self, locals())\n\n with tf.variable_scope(name, \"CategoricalMLPRegressor\"):\n if optimizer is None:\n optimizer = LbfgsOptimizer()\n if tr_optimizer is None:\n tr_optimizer = ConjugateGradientOptimizer()\n\n self.output_dim = output_dim\n self.optimizer = optimizer\n self.tr_optimizer = tr_optimizer\n\n self._prob_network_name = \"prob_network\"\n if prob_network is None:\n prob_network = MLP(\n input_shape=input_shape,\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=tf.nn.softmax,\n name=self._prob_network_name)\n\n l_prob = prob_network.output_layer\n\n LayersPowered.__init__(self, [l_prob])\n\n xs_var = prob_network.input_layer.input_var\n ys_var = tf.placeholder(\n dtype=tf.float32, shape=[None, output_dim], name=\"ys\")\n old_prob_var = tf.placeholder(\n dtype=tf.float32, shape=[None, output_dim], name=\"old_prob\")\n\n x_mean_var = tf.get_variable(\n name=\"x_mean\",\n shape=(1, ) + input_shape,\n initializer=tf.constant_initializer(0., dtype=tf.float32))\n x_std_var = tf.get_variable(\n name=\"x_std\",\n shape=(1, ) + input_shape,\n initializer=tf.constant_initializer(1., dtype=tf.float32))\n\n normalized_xs_var = (xs_var - x_mean_var) / x_std_var\n\n with tf.name_scope(\n self._prob_network_name, values=[normalized_xs_var]):\n prob_var = L.get_output(\n l_prob, {prob_network.input_layer: normalized_xs_var})\n\n old_info_vars = dict(prob=old_prob_var)\n info_vars = dict(prob=prob_var)\n\n dist = self._dist = Categorical(output_dim)\n\n mean_kl = tf.reduce_mean(dist.kl_sym(old_info_vars, info_vars))\n\n loss = -tf.reduce_mean(dist.log_likelihood_sym(ys_var, info_vars))\n\n predicted = tensor_utils.to_onehot_sym(\n tf.argmax(prob_var, axis=1), output_dim)\n\n self.prob_network = prob_network\n self.f_predict = tensor_utils.compile_function([xs_var], predicted)\n self.f_prob = tensor_utils.compile_function([xs_var], prob_var)\n self.l_prob = l_prob\n\n self.optimizer.update_opt(\n loss=loss,\n target=self,\n network_outputs=[prob_var],\n inputs=[xs_var, ys_var])\n self.tr_optimizer.update_opt(\n loss=loss,\n target=self,\n network_outputs=[prob_var],\n inputs=[xs_var, ys_var, old_prob_var],\n leq_constraint=(mean_kl, step_size))\n\n self.use_trust_region = use_trust_region\n self.name = name\n\n self.normalize_inputs = normalize_inputs\n self.x_mean_var = x_mean_var\n self.x_std_var = x_std_var\n self.first_optimized = not no_initial_trust_region\n\n def fit(self, xs, ys):\n if self.normalize_inputs:\n # recompute normalizing constants for inputs\n new_mean = np.mean(xs, axis=0, keepdims=True)\n new_std = np.std(xs, axis=0, keepdims=True) + 1e-8\n tf.get_default_session().run(\n tf.group(\n tf.assign(self.x_mean_var, new_mean),\n tf.assign(self.x_std_var, new_std),\n ))\n if self.use_trust_region and self.first_optimized:\n old_prob = self.f_prob(xs)\n inputs = [xs, ys, old_prob]\n optimizer = self.tr_optimizer\n else:\n inputs = [xs, ys]\n optimizer = self.optimizer\n loss_before = optimizer.loss(inputs)\n if self.name:\n prefix = self.name + \"/\"\n else:\n prefix = \"\"\n logger.record_tabular(prefix + 'LossBefore', loss_before)\n optimizer.optimize(inputs)\n loss_after = optimizer.loss(inputs)\n logger.record_tabular(prefix + 'LossAfter', loss_after)\n logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)\n self.first_optimized = True\n\n def predict(self, xs):\n return self.f_predict(np.asarray(xs))\n\n def predict_log_likelihood(self, xs, ys):\n prob = self.f_prob(np.asarray(xs))\n return self._dist.log_likelihood(np.asarray(ys), dict(prob=prob))\n\n def dist_info_sym(self, x_var, name=None):\n with tf.name_scope(name, \"dist_info_sym\", [x_var]):\n normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var\n with tf.name_scope(\n self._prob_network_name, values=[normalized_xs_var]):\n prob = L.get_output(\n self.l_prob,\n {self.prob_network.input_layer: normalized_xs_var})\n return dict(prob=prob)\n\n def log_likelihood_sym(self, x_var, y_var, name=None):\n with tf.name_scope(name, \"log_likelihood_sym\", [x_var, y_var]):\n normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var\n with tf.name_scope(\n self._prob_network_name, values=[normalized_xs_var]):\n prob = L.get_output(\n self.l_prob,\n {self.prob_network.input_layer: normalized_xs_var})\n return self._dist.log_likelihood_sym(y_var, dict(prob=prob))\n",
"import numpy as np\nimport tensorflow as tf\n\nfrom garage.core import Serializable\nfrom garage.misc import special\nfrom garage.misc.overrides import overrides\nfrom garage.tf.core import LayersPowered\nimport garage.tf.core.layers as L\nfrom garage.tf.core.network import GRUNetwork\nfrom garage.tf.distributions import RecurrentCategorical\nfrom garage.tf.misc import tensor_utils\nfrom garage.tf.policies import StochasticPolicy\nfrom garage.tf.spaces import Discrete\n\n\nclass CategoricalGRUPolicy(StochasticPolicy, LayersPowered, Serializable):\n def __init__(\n self,\n env_spec,\n name=\"CategoricalGRUPolicy\",\n hidden_dim=32,\n feature_network=None,\n state_include_action=True,\n hidden_nonlinearity=tf.tanh,\n gru_layer_cls=L.GRULayer,\n ):\n \"\"\"\n :param env_spec: A spec for the env.\n :param hidden_dim: dimension of hidden layer\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :return:\n \"\"\"\n assert isinstance(env_spec.action_space, Discrete)\n\n self._prob_network_name = \"prob_network\"\n with tf.variable_scope(name, \"CategoricalGRUPolicy\"):\n Serializable.quick_init(self, locals())\n super(CategoricalGRUPolicy, self).__init__(env_spec)\n\n obs_dim = env_spec.observation_space.flat_dim\n action_dim = env_spec.action_space.flat_dim\n\n if state_include_action:\n input_dim = obs_dim + action_dim\n else:\n input_dim = obs_dim\n\n l_input = L.InputLayer(shape=(None, None, input_dim), name=\"input\")\n\n if feature_network is None:\n feature_dim = input_dim\n l_flat_feature = None\n l_feature = l_input\n else:\n feature_dim = feature_network.output_layer.output_shape[-1]\n l_flat_feature = feature_network.output_layer\n l_feature = L.OpLayer(\n l_flat_feature,\n extras=[l_input],\n name=\"reshape_feature\",\n op=lambda flat_feature, input: tf.reshape(\n flat_feature,\n tf.stack([\n tf.shape(input)[0],\n tf.shape(input)[1], feature_dim\n ])),\n shape_op=lambda _, input_shape: (\n input_shape[0], input_shape[1], feature_dim))\n\n prob_network = GRUNetwork(\n input_shape=(feature_dim, ),\n input_layer=l_feature,\n output_dim=env_spec.action_space.n,\n hidden_dim=hidden_dim,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=tf.nn.softmax,\n gru_layer_cls=gru_layer_cls,\n name=self._prob_network_name)\n\n self.prob_network = prob_network\n self.feature_network = feature_network\n self.l_input = l_input\n self.state_include_action = state_include_action\n\n flat_input_var = tf.placeholder(\n dtype=tf.float32, shape=(None, input_dim), name=\"flat_input\")\n if feature_network is None:\n feature_var = flat_input_var\n else:\n with tf.name_scope(\"feature_network\", values=[flat_input_var]):\n feature_var = L.get_output(\n l_flat_feature,\n {feature_network.input_layer: flat_input_var})\n\n with tf.name_scope(self._prob_network_name, values=[feature_var]):\n out_prob_step, out_prob_hidden = L.get_output(\n [\n prob_network.step_output_layer,\n prob_network.step_hidden_layer\n ], {prob_network.step_input_layer: feature_var})\n out_prob_step = tf.identity(out_prob_step, \"prob_step_output\")\n out_prob_hidden = tf.identity(out_prob_hidden,\n \"prob_step_hidden\")\n\n self.f_step_prob = tensor_utils.compile_function(\n [flat_input_var, prob_network.step_prev_state_layer.input_var],\n [out_prob_step, out_prob_hidden])\n\n self.input_dim = input_dim\n self.action_dim = action_dim\n self.hidden_dim = hidden_dim\n self.name = name\n\n self.prev_actions = None\n self.prev_hiddens = None\n self.dist = RecurrentCategorical(env_spec.action_space.n)\n\n out_layers = [prob_network.output_layer]\n if feature_network is not None:\n out_layers.append(feature_network.output_layer)\n\n LayersPowered.__init__(self, out_layers)\n\n @overrides\n def dist_info_sym(self, obs_var, state_info_vars, name=None):\n with tf.name_scope(name, \"dist_info_sym\", [obs_var, state_info_vars]):\n n_batches = tf.shape(obs_var)[0]\n n_steps = tf.shape(obs_var)[1]\n obs_var = tf.reshape(obs_var, tf.stack([n_batches, n_steps, -1]))\n obs_var = tf.cast(obs_var, tf.float32)\n if self.state_include_action:\n prev_action_var = tf.cast(state_info_vars[\"prev_action\"],\n tf.float32)\n all_input_var = tf.concat(\n axis=2, values=[obs_var, prev_action_var])\n else:\n all_input_var = obs_var\n if self.feature_network is None:\n with tf.name_scope(\n self._prob_network_name, values=[all_input_var]):\n prob = L.get_output(self.prob_network.output_layer,\n {self.l_input: all_input_var})\n return dict(prob=prob)\n else:\n flat_input_var = tf.reshape(all_input_var,\n (-1, self.input_dim))\n with tf.name_scope(\n self._prob_network_name,\n values=[all_input_var, flat_input_var]):\n prob = L.get_output(\n self.prob_network.output_layer, {\n self.l_input: all_input_var,\n self.feature_network.input_layer: flat_input_var\n })\n return dict(prob=prob)\n\n @property\n def vectorized(self):\n return True\n\n def reset(self, dones=None):\n if dones is None:\n dones = [True]\n dones = np.asarray(dones)\n if self.prev_actions is None or len(dones) != len(self.prev_actions):\n self.prev_actions = np.zeros((len(dones),\n self.action_space.flat_dim))\n self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))\n\n self.prev_actions[dones] = 0.\n self.prev_hiddens[\n dones] = self.prob_network.hid_init_param.eval() # get_value()\n\n # The return value is a pair. The first item is a matrix (N, A), where each\n # entry corresponds to the action value taken. The second item is a vector\n # of length N, where each entry is the density value for that action, under\n # the current policy\n @overrides\n def get_action(self, observation):\n actions, agent_infos = self.get_actions([observation])\n return actions[0], {k: v[0] for k, v in agent_infos.items()}\n\n @overrides\n def get_actions(self, observations):\n flat_obs = self.observation_space.flatten_n(observations)\n if self.state_include_action:\n assert self.prev_actions is not None\n all_input = np.concatenate([flat_obs, self.prev_actions], axis=-1)\n else:\n all_input = flat_obs\n probs, hidden_vec = self.f_step_prob(all_input, self.prev_hiddens)\n actions = special.weighted_sample_n(probs,\n np.arange(self.action_space.n))\n prev_actions = self.prev_actions\n self.prev_actions = self.action_space.flatten_n(actions)\n self.prev_hiddens = hidden_vec\n agent_info = dict(prob=probs)\n if self.state_include_action:\n agent_info[\"prev_action\"] = np.copy(prev_actions)\n return actions, agent_info\n\n @property\n @overrides\n def recurrent(self):\n return True\n\n @property\n def distribution(self):\n return self.dist\n\n @property\n def state_info_specs(self):\n if self.state_include_action:\n return [\n (\"prev_action\", (self.action_dim, )),\n ]\n else:\n return []\n"
] | [
[
"numpy.random.randint"
],
[
"tensorflow.Session"
],
[
"tensorflow.get_default_session",
"numpy.asarray",
"tensorflow.assign",
"tensorflow.placeholder",
"tensorflow.constant_initializer",
"numpy.std",
"numpy.mean",
"tensorflow.name_scope",
"tensorflow.variable_scope",
"tensorflow.argmax"
],
[
"tensorflow.concat",
"tensorflow.shape",
"numpy.asarray",
"numpy.arange",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.reshape",
"numpy.concatenate",
"numpy.copy",
"tensorflow.name_scope",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Jee-King/STNet | [
"221ab60c4fccfce5a03e8878fb168e0baa7152f4",
"221ab60c4fccfce5a03e8878fb168e0baa7152f4",
"221ab60c4fccfce5a03e8878fb168e0baa7152f4"
] | [
"videoanalyst/model/backbone/backbone_impl/snn3.py",
"videoanalyst/model/backbone/backbone_impl/alexnet_bn.py",
"videoanalyst/data/sampler/sampler_base.py"
] | [
"# -*- coding: utf-8 -*\n# --------------------------------------------------------\n# SNNformer Feature Extractor (SFE) - SNN branch\n# --------------------------------------------------------\n\nimport torch.nn as nn\nimport torch\n\nfrom videoanalyst.model.backbone.backbone_base import (TRACK_BACKBONES,\n VOS_BACKBONES)\nfrom videoanalyst.model.common_opr.common_block import conv_bn_relu\nfrom videoanalyst.model.module_base import ModuleBase\n\nthresh_bais = 0.3\n# thresh = 0.3 # neuronal threshold\nlens = 0.5 # hyper-parameters of approximate function\ndecay = 0.2 # decay constants\nglobal thresh\n\nclass SpatialGroupEnhance(nn.Module):\n \"\"\" Dynamic Spiking Threshold from spatial features\"\"\"\n def __init__(self):\n super(SpatialGroupEnhance, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.weight = nn.Parameter(torch.zeros(1, 1, 1, 1))\n self.bias = nn.Parameter(torch.ones(1, 1, 1, 1))\n self.sig = nn.Sigmoid()\n\n def forward(self, x): # (b, c, h, w)\n b, c, h, w = x.size()\n xn = x * self.avg_pool(x)\n xn = xn.mean(dim=1, keepdim=True)\n entro = torch.mean(xn, dim=0).squeeze()\n h,w = entro.size()\n entro = entro.view(-1)\n max = torch.max(entro)\n min = torch.min(entro)\n entro = (entro - min) / (max-min) * 255\n his = torch.histc(entro, bins=256, min=0, max=255) / (h*w)\n entro_final = torch.sum(his * -torch.log(his + 0.00000001))\n entro_final = entro_final / torch.count_nonzero(his)\n x = self.sig(xn)\n x = torch.mean(x)\n return x + entro_final*10\n\nclass ActFun(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n ctx.save_for_backward(input)\n return input.gt(thresh).float()\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n temp = abs(input - thresh) < lens\n return grad_input * temp.float()\n\nact_fun = ActFun.apply\n\n\n# membrane potential update\ndef mem_update(ops, x, mem, spike):\n mem = mem * decay * (1. - spike) + ops(x)\n spike = act_fun(mem) # act_fun : approximation firing function\n return mem, spike\n\ncfg_cnn = [(6, 64, 2, 0, 11),\n (64, 128, 2, 0, 9),\n (128, 256, 2, 0, 5),\n (64, 128, 1, 1, 3),\n (128, 256, 1, 1, 3)]\n# kernel size\ncfg_kernel = [147, 70, 33, 31, 31]\ncfg_kernel_first = [59, 26, 11, 15, 15]\n# fc layer\ncfg_fc = [128, 10]\n\n@VOS_BACKBONES.register\n@TRACK_BACKBONES.register\n\n\nclass SNN3(ModuleBase):\n r\"\"\"\n SNN branch\n\n Hyper-parameters\n ----------------\n pretrain_model_path: string\n Path to pretrained backbone parameter file,\n Parameter to be loaded in _update_params_\n \"\"\"\n default_hyper_params = {\"pretrain_model_path\": \"\"}\n\n def __init__(self):\n super(SNN3, self).__init__()\n\n cfg_cnn = [(3, 64, 2, 0, 11),\n (64, 128, 2, 0, 9),\n (128, 256, 2, 0, 5),\n (64, 128, 1, 1, 3),\n (128, 256, 1, 1, 3)]\n # kernel size\n cfg_kernel = [147, 70, 33, 31, 31]\n cfg_kernel_first = [59, 26, 11, 15, 15]\n\n in_planes, out_planes, stride, padding, kernel_size = cfg_cnn[0]\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding)\n in_planes, out_planes, stride, padding, kernel_size = cfg_cnn[1]\n self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding)\n in_planes, out_planes, stride, padding, kernel_size = cfg_cnn[2]\n self.conv3 = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding)\n self.bn_tem = nn.BatchNorm2d(256)\n self.relu_tem = nn.ReLU()\n\n self.fuse_snn_transfor = nn.Conv2d(out_planes*2, out_planes, kernel_size=1, stride=1, padding=0)\n self.thre_w = SpatialGroupEnhance()\n self.conv33_11 = nn.Conv2d(256, 256, kernel_size=13, stride=2, padding=0)\n self.bn_spa = nn.BatchNorm2d(256)\n self.relu_spa = nn.ReLU()\n\n def forward(self, input_pos, input_neg, trans_snn, transformer_sig, transformer_fea, first_seq):\n global thresh\n if transformer_fea is None:\n thresh = 0.3\n else:\n thresh = self.thre_w(transformer_fea) * thresh_bais\n if first_seq:\n time_window = len(input_pos)\n tem_c3m = 0\n for step in range(time_window):\n x_pos = input_pos[step]\n x_neg = input_neg[step]\n x = torch.where(x_pos > x_neg, x_pos, x_neg)\n c1_mem, c1_spike = mem_update(self.conv1, x.float(), trans_snn[0], trans_snn[1])\n c2_mem, c2_spike = mem_update(self.conv2, c1_spike, trans_snn[2], trans_snn[3])\n c3_mem, c3_spike = mem_update(self.conv3, c2_spike, trans_snn[4], trans_snn[5])\n trans_snn = [c1_mem, c1_spike, c2_mem, c2_spike, c3_mem, c3_spike]\n tem_c3m = tem_c3m + c3_mem\n tem_fea = tem_c3m / time_window\n tem_fea = self.relu_tem(self.bn_tem(tem_fea))\n spa_fea = self.relu_spa(self.bn_spa(self.conv33_11(transformer_fea)))\n return tem_fea, spa_fea, trans_snn\n else:\n time_window = len(input_pos)\n tem_c3m = 0\n for step in range(time_window):\n x_pos = input_pos[step]\n x_neg = input_neg[step]\n x = torch.where(x_pos > x_neg, x_pos, x_neg)\n c1_mem, c1_spike = mem_update(self.conv1, x.float(), trans_snn[0], trans_snn[1])\n c2_mem, c2_spike = mem_update(self.conv2, c1_spike, trans_snn[2], trans_snn[3])\n c3_mem, c3_spike = mem_update(self.conv3, c2_spike, trans_snn[4], trans_snn[5])\n trans_snn = [c1_mem, c1_spike, c2_mem, c2_spike, c3_mem, c3_spike]\n tem_c3m = tem_c3m + c3_mem\n tem_fea = tem_c3m / time_window\n tem_fea = self.relu_tem(self.bn_tem(tem_fea))\n spa_fea = transformer_fea\n return tem_fea, spa_fea, trans_snn\n",
"# -*- coding: utf-8 -*\n\nimport torch.nn as nn\n\nfrom videoanalyst.model.backbone.backbone_base import (TRACK_BACKBONES,\n VOS_BACKBONES)\nfrom videoanalyst.model.common_opr.common_block import conv_bn_relu\nfrom videoanalyst.model.module_base import ModuleBase\n\n\n@VOS_BACKBONES.register\n@TRACK_BACKBONES.register\n\nclass AlexNet(ModuleBase):\n r\"\"\"\n AlexNet\n\n Hyper-parameters\n ----------------\n pretrain_model_path: string\n Path to pretrained backbone parameter file,\n Parameter to be loaded in _update_params_\n \"\"\"\n default_hyper_params = {\"pretrain_model_path\": \"\"}\n\n def __init__(self):\n super(AlexNet, self).__init__()\n self.conv1 = conv_bn_relu(3, 96, stride=2, kszie=11, pad=0)\n self.pool1 = nn.MaxPool2d(3, 2, 0, ceil_mode=True)\n self.conv2 = conv_bn_relu(96, 256, 1, 5, 0)\n self.pool2 = nn.MaxPool2d(3, 2, 0, ceil_mode=True)\n self.conv3 = conv_bn_relu(256, 384, 1, 3, 0)\n self.conv4 = conv_bn_relu(384, 384, 1, 3, 0)\n self.conv5 = conv_bn_relu(384, 256, 1, 3, 0, has_relu=False)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n return x\n",
"# -*- coding: utf-8 -*-\n\nfrom abc import ABCMeta\nfrom typing import Dict, List\n\nimport numpy as np\nfrom loguru import logger\n\nfrom videoanalyst.utils import Registry\n\nfrom ..dataset.dataset_base import DatasetBase\n\nTRACK_SAMPLERS = Registry('TRACK_SAMPLERS')\nVOS_SAMPLERS = Registry('VOS_SAMPLERS')\n\nTASK_SAMPLERS = dict(\n track=TRACK_SAMPLERS,\n vos=VOS_SAMPLERS,\n)\n\n\nclass SamplerBase:\n __metaclass__ = ABCMeta\n r\"\"\"\n base class for Sampler. Reponsible for sampling from multiple datasets and forming training pair / sequence.\n\n Define your hyper-parameters here in your sub-class.\n \"\"\"\n default_hyper_params = dict()\n\n def __init__(self, datasets: List[DatasetBase] = [], seed: int = 0) -> None:\n r\"\"\"\n Dataset Sampler, reponsible for sampling from different dataset\n\n Arguments\n ---------\n cfg: CfgNode\n data config, including cfg for datasset / sampler\n datasets: List[DatasetBase]\n collections of datasets\n seed: int\n seed to initialize random number generator\n important while using multi-worker data loader\n \"\"\"\n self._hyper_params = self.default_hyper_params\n self._state = dict()\n self._state[\"rng\"] = np.random.RandomState(seed)\n self.datasets = datasets\n for d in datasets:\n dataset_name = type(d).__name__\n logger.info(\"Sampler's underlying datasets: {}, length {}\".format(\n dataset_name, len(d)))\n\n def get_hps(self) -> Dict:\n r\"\"\"\n Getter function for hyper-parameters\n\n Returns\n -------\n dict\n hyper-parameters\n \"\"\"\n return self._hyper_params\n\n def set_hps(self, hps: Dict) -> None:\n r\"\"\"\n Set hyper-parameters\n\n Arguments\n ---------\n hps: dict\n dict of hyper-parameters, the keys must in self.__hyper_params__\n \"\"\"\n for key in hps:\n if key not in self._hyper_params:\n raise KeyError\n self._hyper_params[key] = hps[key]\n\n def update_params(self) -> None:\n r\"\"\"\n an interface for update params\n \"\"\"\n def __getitem__(self, item) -> Dict:\n r\"\"\"\n An interface to sample data\n \"\"\"\n"
] | [
[
"torch.mean",
"torch.ones",
"torch.max",
"torch.histc",
"torch.zeros",
"torch.min",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool2d",
"torch.log",
"torch.where",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.count_nonzero"
],
[
"torch.nn.MaxPool2d"
],
[
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tobsen2code/pyleecan | [
"1faedde4b24acc6361fa1fdd4e980eaec4ca3a62",
"4d7f0cbabf0311006963e7a2f435db2ecd901118",
"4d7f0cbabf0311006963e7a2f435db2ecd901118",
"1faedde4b24acc6361fa1fdd4e980eaec4ca3a62",
"5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9",
"1faedde4b24acc6361fa1fdd4e980eaec4ca3a62",
"4d7f0cbabf0311006963e7a2f435db2ecd901118"
] | [
"Tests/Plot/LamWind/test_Slot_12_plot.py",
"pyleecan/Methods/Simulation/InputVoltage/set_Ud_Uq.py",
"Tests/Methods/Simulation/test_magelmer.py",
"pyleecan/Methods/Geometry/Line/comp_normal.py",
"pyleecan/Methods/Machine/Conductor/comp_skin_effect_round_wire.py",
"Tests/Methods/Machine/test_get_polar_eq.py",
"Tests/Plot/LamWind/test_Slot_LSRPM_plot.py"
] | [
"# -*- coding: utf-8 -*-\nfrom os.path import join\n\nimport matplotlib.pyplot as plt\nfrom numpy import array, pi, zeros\n\nfrom pyleecan.Classes.Frame import Frame\nfrom pyleecan.Classes.LamSlotWind import LamSlotWind\nfrom pyleecan.Classes.LamSquirrelCage import LamSquirrelCage\nfrom pyleecan.Classes.MachineDFIM import MachineDFIM\nfrom pyleecan.Classes.Shaft import Shaft\nfrom pyleecan.Classes.VentilationCirc import VentilationCirc\nfrom pyleecan.Classes.VentilationPolar import VentilationPolar\nfrom pyleecan.Classes.VentilationTrap import VentilationTrap\nfrom pyleecan.Classes.Winding import Winding\nfrom pyleecan.Classes.WindingUD import WindingUD\nfrom pyleecan.Classes.MatMagnetics import MatMagnetics\nfrom pyleecan.Classes.SlotW12 import SlotW12\n\nfrom Tests import save_plot_path as save_path\nfrom Tests.Plot.LamWind import wind_mat\n\nimport pytest\n\n\n\"\"\"pytest for Lamination with winding plot\"\"\"\n\n\nclass Test_Slot_12_plot(object):\n def test_Lam_Wind_12_wind_22(self):\n \"\"\"Test machine plot with Slot 12 and winding rad=2, tan=2\"\"\"\n print(\"\\nTest plot Slot 12\")\n plt.close(\"all\")\n test_obj = MachineDFIM()\n test_obj.rotor = LamSlotWind(\n Rint=0.2,\n Rext=0.5,\n is_internal=True,\n is_stator=False,\n L1=0.9,\n Nrvd=2,\n Wrvd=0.05,\n )\n test_obj.rotor.axial_vent = [\n VentilationPolar(Zh=6, Alpha0=pi / 6, W1=pi / 6, D0=100e-3, H0=0.3)\n ]\n test_obj.rotor.slot = SlotW12(Zs=6, R2=35e-3, H0=20e-3, R1=17e-3, H1=130e-3)\n test_obj.rotor.winding = WindingUD(wind_mat=wind_mat, qs=4, p=4, Lewout=60e-3)\n test_obj.rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)\n test_obj.shaft = Shaft(Drsh=test_obj.rotor.Rint * 2, Lshaft=1)\n\n test_obj.stator = LamSlotWind(\n Rint=0.51,\n Rext=0.8,\n is_internal=False,\n is_stator=True,\n L1=0.9,\n Nrvd=2,\n Wrvd=0.05,\n )\n test_obj.stator.slot = SlotW12(Zs=18, R2=25e-3, H0=30e-3, R1=0, H1=150e-3)\n test_obj.stator.winding.Lewout = 60e-3\n test_obj.stator.winding = Winding(qs=3, p=3, Nlayer=2, coil_pitch=2)\n test_obj.stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)\n test_obj.frame = Frame(Rint=0.8, Rext=0.9, Lfra=1)\n\n test_obj.plot(is_show_fig=False)\n fig = plt.gcf()\n fig.savefig(join(save_path, \"test_Lam_Wind_s12_1-Machine.png\"))\n # Rotor + Stator + 2 for frame + 1 for Shaft\n assert len(fig.axes[0].patches) == 73\n\n test_obj.rotor.plot(is_show_fig=False)\n fig = plt.gcf()\n fig.savefig(join(save_path, \"test_Lam_Wind_s12_2-Rotor.png\"))\n # 2 for lam + Zs*4 for wind + 6 vents\n assert len(fig.axes[0].patches) == 32\n\n test_obj.stator.plot(is_show_fig=False)\n fig = plt.gcf()\n fig.savefig(join(save_path, \"test_Lam_Wind_s12_3-Stator.png\"))\n # 2 for lam + Zs*2 for wind\n assert len(fig.axes[0].patches) == 38\n\n tooth = test_obj.rotor.slot.get_surface_tooth()\n tooth.plot(color=\"r\", is_show_fig=False)\n fig = plt.gcf()\n fig.savefig(join(save_path, \"test_Lam_Wind_s12_Tooth_in.png\"))\n\n tooth = test_obj.stator.slot.get_surface_tooth()\n tooth.plot(color=\"r\", is_show_fig=False)\n fig = plt.gcf()\n fig.savefig(join(save_path, \"test_Lam_Wind_s12_Tooth_out.png\"))\n",
"from numpy import cos, sin\n\n\ndef set_Ud_Uq(self, U0, Phi0):\n \"\"\"Set Ud_ref and Uq_ref according to U0, Phi0\n\n Parameters\n ----------\n self : InputVoltage\n An InputVoltage object\n U0 : float\n Voltage amplitude [Arms]\n Phi0 : float\n Voltage phase [rad]\n \"\"\"\n\n self.OP.Ud_ref = U0 * cos(Phi0)\n self.OP.Uq_ref = U0 * sin(Phi0)\n if abs(self.OP.Ud_ref) < 1e-10:\n self.OP.Ud_ref = 0\n if abs(self.OP.Uq_ref) < 1e-10:\n self.OP.Uq_ref = 0\n",
"from os import makedirs\nfrom os.path import join, isdir\nimport pytest\nfrom numpy import array, linspace, ones, pi, zeros, sqrt, cos\nfrom Tests import save_plot_path\nfrom pyleecan.Classes.InputCurrent import InputCurrent\nfrom pyleecan.Classes.MagElmer import MagElmer\nfrom pyleecan.Classes.SlotM10 import SlotM10\nfrom pyleecan.Classes.Simu1 import Simu1\nfrom pyleecan.Classes.OPdq import OPdq\nfrom pyleecan.Classes.Output import Output\nfrom pyleecan.Functions.load import load\nfrom pyleecan.Functions.Plot import dict_2D\nfrom pyleecan.definitions import DATA_DIR\n\n# Gather results in the same folder\nsave_path = join(save_plot_path, \"Elmer\")\nif not isdir(save_path):\n makedirs(save_path)\n\n\nmesh_dict = {\n \"Lamination_Rotor_Bore_Radius_Ext\": 180,\n \"surface_line_0\": 5,\n \"surface_line_1\": 10,\n \"surface_line_2\": 5,\n \"surface_line_3\": 5,\n \"surface_line_4\": 10,\n \"surface_line_5\": 5,\n \"Lamination_Stator_Bore_Radius_Int\": 10,\n \"Lamination_Stator_Yoke_Side_Right\": 30,\n \"Lamination_Stator_Yoke_Side_Left\": 30,\n \"int_airgap_arc\": 120,\n \"int_sb_arc\": 120,\n \"ext_airgap_arc\": 120,\n \"ext_sb_arc\": 120,\n \"airbox_line_1\": 10,\n \"airbox_line_2\": 10,\n \"airbox_arc\": 20,\n}\n\n\[email protected]\[email protected]_5s\[email protected]\[email protected]\[email protected]\ndef test_ipm_Elmer():\n\n Toyota_Prius = load(join(DATA_DIR, \"Machine\", \"Toyota_Prius.json\"))\n Toyota_Prius.stator.slot.H1 = 1e-3\n simu = Simu1(name=\"test_ipm_Elmer\", machine=Toyota_Prius)\n\n # Definition of the enforced output of the electrical module\n # N0 = 1500\n # Is = ImportMatrixVal(value=array([[20, -10, -10],[20, -10, -10],[20, -10, -10]]))\n # Ir = ImportMatrixVal(value=zeros((1, 28)))\n # Nt_tot = 3\n # Na_tot = 4096\n # simu.input = InputCurrent(\n # Is=Is,\n # Ir=Ir, # zero current for the rotor\n # N0=N0,\n # Nt_tot=Nt_tot,\n # Na_tot=Na_tot,\n # angle_rotor_initial=0.2244,\n # )\n\n # Definition of a sinusoidal current\n simu.input = InputCurrent()\n # simu.input.Id_ref = 0 # [A]\n # simu.input.Iq_ref = 250 # [A]\n # simu.input.Nt_tot = 32 * 8 # Number of time step\n # simu.input.Na_tot = 2048 # Spatial discretization\n simu.input.OP = OPdq(N0=2000) # Rotor speed [rpm]\n p = Toyota_Prius.stator.winding.p\n time = linspace(0, 60 / simu.input.OP.N0, num=32 * p, endpoint=False)\n simu.input.time = time\n simu.input.angle = linspace(0, 2 * pi, num=2048, endpoint=False)\n I0 = 250\n felec = p * simu.input.OP.N0 / 60\n rot_dir = simu.machine.stator.comp_mmf_dir()\n Phi0 = 140 * pi / 180\n Ia = I0 * cos(2 * pi * felec * time + 0 * rot_dir * 2 * pi / 3 + Phi0)\n Ib = I0 * cos(2 * pi * felec * time + 1 * rot_dir * 2 * pi / 3 + Phi0)\n Ic = I0 * cos(2 * pi * felec * time + 2 * rot_dir * 2 * pi / 3 + Phi0)\n # simu.input.set_Id_Iq(I0=250/sqrt(2), Phi0=140*pi/180)\n simu.input.Is = array([Ia, Ib, Ic]).transpose()\n\n # Definition of the magnetic simulation\n # 2 sym + antiperiodicity = 1/4 Lamination\n simu.mag = MagElmer(\n type_BH_stator=0,\n type_BH_rotor=0,\n is_periodicity_a=True,\n is_periodicity_t=True,\n FEA_dict=mesh_dict,\n is_get_mesh=True,\n is_save_FEA=True,\n )\n # Stop after magnetic computation\n simu.force = None\n simu.struct = None\n # Run simulation\n outp = Output(simu=simu)\n simu.run()\n # outp.mag.Tem.plot_2D_Data(\"time\", **dict_2D)\n # outp.elec.get_Is().plot_2D_Data(\"time\", \"phase\", **dict_2D)\n # outp.mag.Tem.plot_2D_Data(\"time[smallestperiod]\", **dict_2D)\n # outp.mag.meshsolution.plot_contour(label=\"B\")\n # outp.mag.meshsolution.plot_contour(label=\"A\")\n # outp.mag.meshsolution.plot_contour(label=\"J\")\n return outp\n\n\[email protected]\[email protected]_5s\[email protected]\[email protected]\[email protected]\ndef test_spm_Elmer():\n # Import the machine from a script\n PMSM_A = load(join(DATA_DIR, \"Machine\", \"SPMSM_001.json\"))\n PMSM_A.rotor.slot = SlotM10(Wmag=15e-3, Hmag=3e-3, H0=0.0, W0=15e-3, Zs=8)\n # PMSM_A.rotor.slot = SlotMFlat(H0=0.0, W0=15e-3, Zs=8)\n # PMSM_A.rotor.slot.magnet = [MagnetType10(Wmag=15e-3, Hmag=3e-3)]\n mesh_dict[\"Lamination_Rotor_Bore_Radius_Ext\"] = 20\n\n # Create the Simulation\n simu = Simu1(name=\"test_spm_Elmer\", machine=PMSM_A)\n\n # Definition of a sinusoidal current\n simu.input = InputCurrent()\n # simu.input.Id_ref = 0 # [A]\n # simu.input.Iq_ref = 250 # [A]\n # simu.input.Nt_tot = 32 * 8 # Number of time step\n # simu.input.Na_tot = 2048 # Spatial discretization\n simu.input.OP = OPdq(N0=2000) # Rotor speed [rpm]\n p = PMSM_A.stator.winding.p\n time = linspace(0, 60 / simu.input.OP.N0, num=32 * p, endpoint=False)\n simu.input.time = time\n simu.input.angle = linspace(0, 2 * pi, num=2048, endpoint=False)\n I0 = 150\n felec = p * simu.input.OP.N0 / 60\n rot_dir = simu.machine.stator.comp_mmf_dir()\n Phi0 = 140 * pi / 180\n Ia = I0 * cos(2 * pi * felec * time + 0 * rot_dir * 2 * pi / 3 + Phi0)\n Ib = I0 * cos(2 * pi * felec * time + 1 * rot_dir * 2 * pi / 3 + Phi0)\n Ic = I0 * cos(2 * pi * felec * time + 2 * rot_dir * 2 * pi / 3 + Phi0)\n # simu.input.set_Id_Iq(I0=250/sqrt(2), Phi0=140*pi/180)\n simu.input.Is = array([Ia, Ib, Ic]).transpose()\n\n # Definition of the magnetic simulation\n # 2 sym + antiperiodicity = 1/4 Lamination\n simu.mag = MagElmer(\n type_BH_stator=0,\n type_BH_rotor=0,\n is_periodicity_a=True,\n is_periodicity_t=True,\n FEA_dict=mesh_dict,\n is_get_mesh=False,\n is_save_FEA=False,\n )\n # Stop after magnetic computation\n simu.force = None\n simu.struct = None\n # Run simulation\n outp = Output(simu=simu)\n simu.run()\n outp.mag.Tem.plot_2D_Data(\"time\", **dict_2D)\n # outp.elec.get_Is().plot_2D_Data(\"time\", \"phase\", **dict_2D)\n # outp.mag.Tem.plot_2D_Data(\"time[smallestperiod]\", **dict_2D)\n # outp.mag.meshsolution.plot_contour(label=\"B\")\n # outp.mag.meshsolution.plot_contour(label=\"A\")\n # outp.mag.meshsolution.plot_contour(label=\"J\")\n\n return outp\n\n\nif __name__ == \"__main__\":\n out = test_ipm_Elmer()\n out = test_spm_Elmer()\n",
"from numpy import angle, exp\n\n\ndef comp_normal(self):\n \"\"\"Compute the normal direction of the Line\n Normal point is \"on top\" (begin=O and end on Ox)\n\n Parameters\n ----------\n self : Line\n a Line object\n\n Returns\n -------\n normal: float\n Angle of the vector between (Zbegin+Zend)/2 (even for arc) and the normal point [rad]\n \"\"\"\n\n Zb = self.get_begin()\n Ze = self.get_end()\n Zm = (Zb + Ze) / 2 # \"Middle\" of the line\n L = self.comp_length() / 3\n\n # In ref begin=O and end on Ox\n Zm2 = (Zm - Zb) * exp(-1j * angle(Ze - Zb))\n Zn2 = Zm2 + 1j * L\n\n Zn = Zn2 * exp(1j * angle(Ze - Zb)) + Zb\n\n return angle(Zn - Zm)\n",
"# -*- coding: utf-8 -*-\nimport numpy as np\n\n\ndef comp_skin_effect_round_wire(self, f, rho=None, mu=None):\n\n \"\"\"Compute skin effect factor for round wires\n\n\n Parameters\n ----------\n Inputs:\n f: float\n Frequency (Hz)\n rho: float\n Resistivity of wire material (Ohm meter)\n mu: float\n Relative permeability of wire material\n self : Conductor\n an Conductor object\n\n Outputs:\n K_R: float\n Skin effect resistance factor of round wires\n\n K_I: float\n Skin effect inductance factor of round wires\n \"\"\"\n\n # Resistivity of wire material (Ohm meter)\n if rho is None:\n rho = self.cond_mat.elec.get_resistivity()\n # Wire diameter\n d_w = self.Wwire\n # Vaccum or air permeability\n mu0 = 4 * np.pi * 1e-7\n # Wire material magnetic permeability (mu*mu0)\n if mu is None:\n mu = self.cond_mat.mag.mur_lin * mu0\n else:\n mu = mu * mu0\n # Thickness of skin\n delta = np.sqrt(rho / (np.pi * f * mu))\n\n \"\"\"\n cf. SKIN EFFECT, PROXIMITY EFFECT AND THE RESISTANCE OF CIRCULAR AND RECTANGULAR\n CONDUCTORS, page 6\n\n \"\"\"\n # # factor of skin effect (R_AC=R_DC*K)\n # K_R = 0.25*(d_w)**2/(d_w*delta-delta**2)\n\n \"\"\"\n cf. A simple derivation for the skin effect in a round wire, page 8-9 eqa 30-31\n\n \"\"\"\n # Radius of wire\n r_w = d_w / 2\n\n # Resistance factor of skin effect (R_AC=R_DC*K)\n K_R = 1 + 1 / 48 * (r_w / delta) ** 4\n # Inductance factor of skin effect (I_AC=I_DC*K)\n K_I = 1 - 1 / 96 * (r_w / delta) ** 4\n\n return K_R, K_I\n",
"# -*- coding: utf-8 -*-\nfrom os.path import join\nimport pytest\n\nimport matplotlib.pyplot as plt\nfrom numpy import pi\n\nfrom Tests import save_plot_path as save_path\nfrom pyleecan.Functions.load import load\nfrom pyleecan.definitions import DATA_DIR\n\n\nclass Test_get_polar_eq(object):\n \"\"\"unittest to convert machine to polar and plot them\"\"\"\n\n def test_get_polar_eq_SCIM(self):\n \"\"\"Test that you can create polar equivalent of SCIM machine\"\"\"\n SCIM_001 = load(join(DATA_DIR, \"Machine\", \"SCIM_001.json\"))\n polar_eq = SCIM_001.get_polar_eq()\n\n plt.close(\"all\")\n SCIM_001.plot(comp_machine=polar_eq, is_show_fig=False)\n\n fig = plt.gcf()\n fig.savefig(join(save_path, \"test_get_polar_eq_SCIM_001.png\"))\n assert len(fig.axes[0].patches) == 219\n",
"# -*- coding: utf-8 -*-\nfrom os.path import join\nimport pytest\n\nimport matplotlib.pyplot as plt\nfrom numpy import array, pi, zeros\n\nfrom pyleecan.Classes.Frame import Frame\nfrom pyleecan.Classes.LamSlotWind import LamSlotWind\nfrom pyleecan.Classes.LamSquirrelCage import LamSquirrelCage\nfrom pyleecan.Classes.MachineDFIM import MachineDFIM\nfrom pyleecan.Classes.Shaft import Shaft\nfrom pyleecan.Classes.VentilationCirc import VentilationCirc\nfrom pyleecan.Classes.VentilationPolar import VentilationPolar\nfrom pyleecan.Classes.VentilationTrap import VentilationTrap\nfrom pyleecan.Classes.Winding import Winding\nfrom pyleecan.Classes.WindingUD import WindingUD\nfrom pyleecan.Classes.SlotWLSRPM import SlotWLSRPM\n\nfrom Tests import save_plot_path as save_path\n\n# from Tests.Plot.LamWind import wind_mat, wind_mat2\n\n\n\"\"\"unittest for Lamination with winding plot\"\"\"\n\n\[email protected]\ndef machine():\n \"\"\"Run at the begining of every test to setup the machine\"\"\"\n plt.close(\"all\")\n test_obj = LamSlotWind(\n Rint=50.7e-3,\n Rext=72.5e-3,\n is_internal=False,\n is_stator=True,\n L1=0.95,\n Nrvd=0,\n Wrvd=0,\n )\n test_obj.slot = SlotWLSRPM(\n Zs=12, W1=8e-3, W3=11.6e-3, H2=14.8e-3, R1=0.75e-3, H3=2e-3\n )\n\n return test_obj\n\n\n# wind_mat = zeros((2, 2, 6, 4)) # Nrad, Ntan, Zs, qs\n# wind_mat[0, 0, :, :] = array(\n# [[1, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, -1, -1, 0], [0, -1, 0, 0, 0, 1]]\n# ).T\n\n# wind_mat[1, 0, :, :] = array(\n# [[0, 0, 0, 0, 0, 0], [-1, 0, -1, 0, 0, -1], [0, 0, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0]]\n# ).T\n\n# wind_mat[0, 1, :, :] = array(\n# [[-1, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, -1, 0, 0, -1]]\n# ).T\n\n# wind_mat[1, 1, :, :] = array(\n# [[0, 0, 0, -1, -1, 0], [1, 0, 0, 0, 0, 1], [0, -1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]]\n# ).T\n###\nwind_mat_LSRPM = zeros((2, 2, 12, 6)) # Nrad, Ntan, Zs, qs\nwind_mat_LSRPM[0, 0, :, :] = array(\n [\n [-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0],\n [0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0],\n [0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n).T\n\nwind_mat_LSRPM[1, 0, :, :] = array(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0],\n [0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0],\n [0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1],\n ]\n).T\n\nwind_mat_LSRPM[0, 1, :, :] = array(\n [\n [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1],\n [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n).T\n\nwind_mat_LSRPM[1, 1, :, :] = array(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1],\n [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0],\n ]\n).T\n\n\ndef test_Lam_Wind_LSRPM_wind_tan(machine):\n \"\"\"Test machine plot with Slot LSRPM and winding rad=1, tan=2\"\"\"\n machine.winding = WindingUD(wind_mat=wind_mat_LSRPM, qs=6, p=4, Lewout=0)\n machine.plot(is_show_fig=False)\n fig = plt.gcf()\n fig.savefig(join(save_path, \"test_Lam_Wind_sLSRPM_2-tan-wind.png\"))\n # 2 for lam + Zs*2 for wind\n # assert len(fig.axes[0].patches) == 26\n\n\ndef test_stator_slot_angle_opening(machine):\n \"\"\"Test calculate the angle opening\"\"\"\n machine.slot.comp_angle_opening()\n\n\ndef test_stator_slot_height_damper(machine):\n \"\"\"Test calculate the damper height\"\"\"\n machine.slot.comp_height_damper()\n\n\ndef test_stator_slot_height_wind(machine):\n \"\"\"Test calculate the winding height\"\"\"\n machine.slot.comp_height_wind()\n\n\ndef test_stator_slot_height(machine):\n \"\"\"Test calculate the total height\"\"\"\n machine.slot.comp_height()\n"
] | [
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.gcf"
],
[
"numpy.cos",
"numpy.sin"
],
[
"numpy.array",
"numpy.cos",
"numpy.linspace"
],
[
"numpy.angle"
],
[
"numpy.sqrt"
],
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.gcf"
],
[
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.close",
"matplotlib.pyplot.gcf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brianhie/icml18-jtnn | [
"fabede920d7def1d248c3157dd31f7cc5a2132e0"
] | [
"jtnn/datautils.py"
] | [
"from torch.utils.data import Dataset\nfrom .mol_tree import MolTree\nimport numpy as np\n\nclass MoleculeDataset(Dataset):\n\n def __init__(self, data_file):\n with open(data_file) as f:\n self.data = [line.strip(\"\\r\\n \").split()[0] for line in f]\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n smiles = self.data[idx]\n mol_tree = MolTree(smiles)\n mol_tree.recover()\n mol_tree.assemble()\n return mol_tree\n\nclass PropDataset(Dataset):\n\n def __init__(self, data_file, prop_file):\n self.prop_data = np.loadtxt(prop_file)\n with open(data_file) as f:\n self.data = [line.strip(\"\\r\\n \").split()[0] for line in f]\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n smiles = self.data[idx]\n mol_tree = MolTree(smiles)\n mol_tree.recover()\n mol_tree.assemble()\n return mol_tree, self.prop_data[idx]\n"
] | [
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Takezo87/torchtools | [
"4230305d9063dabee3614f0dcd8557739b90f817"
] | [
"torchtools/models.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: 01_models.ipynb (unless otherwise specified).\n\n__all__ = ['noop', 'shortcut', 'Inception', 'InceptionBlock', 'InceptionTime', 'Squeeze', 'Unsqueeze', 'Add', 'Concat',\n 'Permute', 'Transpose', 'View', 'Reshape', 'Max', 'LastStep', 'Noop', 'TransformerModel',\n 'ScaledDotProductAttention', 'MultiHeadAttention', 'TSTEncoderLayer', 'TSTEncoder', 'TST', 'Sigmoid',\n 'InceptionTimeSgmOld', 'InceptionTimeSgm', 'TransformerSgm', 'TransformerSgmD', 'InceptionTimeD',\n 'InceptionTime_NH', 'InceptionTimeD_Mixed', 'InceptionTime_Mixed', 'TabNetTT', 'InceptionTimeVar',\n 'nll_regression', 'nll_leaky_loss', 'qd_loss', 'InceptionTimeBounds']\n\n# Cell\nfrom .core import *\n\n# Cell\nimport torch.nn as nn\nimport torch as torch\nimport torch.nn.functional as F\n\nfrom functools import partial\n\nfrom fastai.layers import SigmoidRange\nfrom fastai.torch_basics import *\n# from ..imports import *\n# from .layers import *\n# from .utils import *\nfrom torch.nn.modules.transformer import TransformerEncoder, TransformerEncoderLayer\n\n# Cell\n# This is an unofficial PyTorch implementation by Ignacio Oguiza - [email protected] based on:\n\n# Fawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J., ... & Petitjean, F. (2019). InceptionTime: Finding AlexNet for Time Series Classification. arXiv preprint arXiv:1909.04939.\n# Official InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime\n\n\ndef noop(x):\n return x\n\ndef shortcut(c_in, c_out):\n return nn.Sequential(*[nn.Conv1d(c_in, c_out, kernel_size=1),\n nn.BatchNorm1d(c_out)])\n\nclass Inception(nn.Module):\n def __init__(self, c_in, bottleneck=32, ks=40, nb_filters=32):\n\n super().__init__()\n self.bottleneck = nn.Conv1d(c_in, bottleneck, 1) if bottleneck and c_in > 1 else noop\n mts_feat = bottleneck or c_in\n conv_layers = []\n kss = [ks // (2**i) for i in range(3)]\n # ensure odd kss until nn.Conv1d with padding='same' is available in pytorch 1.3\n kss = [ksi if ksi % 2 != 0 else ksi - 1 for ksi in kss]\n for i in range(len(kss)):\n conv_layers.append(\n nn.Conv1d(mts_feat, nb_filters, kernel_size=kss[i], padding=kss[i] // 2))\n self.conv_layers = nn.ModuleList(conv_layers)\n self.maxpool = nn.MaxPool1d(3, stride=1, padding=1)\n self.conv = nn.Conv1d(c_in, nb_filters, kernel_size=1)\n self.bn = nn.BatchNorm1d(nb_filters * 4)\n self.act = nn.ReLU()\n\n def forward(self, x):\n input_tensor = x.to(torch.float)\n x = self.bottleneck(input_tensor)\n for i in range(3):\n out_ = self.conv_layers[i](x)\n if i == 0: out = out_\n else: out = torch.cat((out, out_), 1)\n mp = self.conv(self.maxpool(input_tensor))\n inc_out = torch.cat((out, mp), 1)\n return self.act(self.bn(inc_out))\n\n\nclass InceptionBlock(nn.Module):\n def __init__(self,c_in,bottleneck=32,ks=40,nb_filters=32,residual=True,depth=6):\n\n super().__init__()\n\n self.residual = residual\n self.depth = depth\n\n #inception & residual layers\n inc_mods = []\n res_layers = []\n res = 0\n for d in range(depth):\n inc_mods.append(\n Inception(c_in if d == 0 else nb_filters * 4, bottleneck=bottleneck if d > 0 else 0,ks=ks,\n nb_filters=nb_filters))\n if self.residual and d % 3 == 2:\n res_layers.append(shortcut(c_in if res == 0 else nb_filters * 4, nb_filters * 4))\n res += 1\n else: res_layer = res_layers.append(None)\n self.inc_mods = nn.ModuleList(inc_mods)\n self.res_layers = nn.ModuleList(res_layers)\n self.act = nn.ReLU()\n\n def forward(self, x):\n res = x\n for d, l in enumerate(range(self.depth)):\n x = self.inc_mods[d](x)\n if self.residual and d % 3 == 2:\n res = self.res_layers[d](res)\n x += res\n res = x\n x = self.act(x)\n return x\n\n# Cell\nclass InceptionTime(nn.Module):\n def __init__(self,c_in,c_out,bottleneck=32,ks=40,nb_filters=32,residual=True,depth=6):\n super().__init__()\n self.block = InceptionBlock(c_in,bottleneck=bottleneck,ks=ks,nb_filters=nb_filters,\n residual=residual,depth=depth)\n self.gap = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Linear(nb_filters * 4, c_out)\n\n def forward(self, *x):\n x = torch.cat(x, dim=-2)\n x = self.block(x)\n x = self.gap(x).squeeze(-1)\n x = self.fc(x)\n return x\n\n# Cell\nclass Squeeze(Module):\n def __init__(self, dim=-1): self.dim = dim\n def forward(self, x): return x.squeeze(dim=self.dim)\n def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'\n\n\nclass Unsqueeze(Module):\n def __init__(self, dim=-1): self.dim = dim\n def forward(self, x): return x.unsqueeze(dim=self.dim)\n def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'\n\n\nclass Add(Module):\n def forward(self, x, y): return x.add(y)\n def __repr__(self): return f'{self.__class__.__name__}'\n\n\nclass Concat(Module):\n def __init__(self, dim=1): self.dim = dim\n def forward(self, *x): return torch.cat(*x, dim=self.dim)\n def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'\n\n\nclass Permute(Module):\n def __init__(self, *dims): self.dims = dims\n def forward(self, x): return x.permute(self.dims)\n def __repr__(self): return f\"{self.__class__.__name__}(dims={', '.join([str(d) for d in self.dims])})\"\n\n\nclass Transpose(Module):\n def __init__(self, *dims, contiguous=False): self.dims, self.contiguous = dims, contiguous\n def forward(self, x):\n if self.contiguous: return x.transpose(*self.dims).contiguous()\n else: return x.transpose(*self.dims)\n def __repr__(self):\n if self.contiguous: return f\"{self.__class__.__name__}(dims={', '.join([str(d) for d in self.dims])}).contiguous()\"\n else: return f\"{self.__class__.__name__}({', '.join([str(d) for d in self.dims])})\"\n\n\nclass View(Module):\n def __init__(self, *shape): self.shape = shape\n def forward(self, x): return x.view(x.shape[0], *self.shape)\n def __repr__(self): return f\"{self.__class__.__name__}({', '.join(['bs'] + [str(s) for s in self.shape])})\"\n\n\nclass Reshape(Module):\n def __init__(self, *shape): self.shape = shape\n def forward(self, x): return x.reshape(x.shape[0], *self.shape)\n def __repr__(self): return f\"{self.__class__.__name__}({', '.join(['bs'] + [str(s) for s in self.shape])})\"\n\n\nclass Max(Module):\n def __init__(self, dim=None, keepdim=False): self.dim, self.keepdim = dim, keepdim\n def forward(self, x): return x.max(self.dim, keepdim=self.keepdim)[0]\n def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim}, keepdim={self.keepdim})'\n\n\nclass LastStep(Module):\n def forward(self, x): return x[..., -1]\n def __repr__(self): return f'{self.__class__.__name__}()'\n\n\nNoop = nn.Sequential()\n\n# Cell\nclass TransformerModel(Module):\n def __init__(self, c_in, c_out, d_model=64, n_head=1, d_ffn=128, dropout=0.1, activation=\"relu\", n_layers=1):\n \"\"\"\n Args:\n c_in: the number of features (aka variables, dimensions, channels) in the time series dataset\n c_out: the number of target classes\n d_model: total dimension of the model.\n nhead: parallel attention heads.\n d_ffn: the dimension of the feedforward network model.\n dropout: a Dropout layer on attn_output_weights.\n activation: the activation function of intermediate layer, relu or gelu.\n num_layers: the number of sub-encoder-layers in the encoder.\n Input shape:\n bs (batch size) x nvars (aka variables, dimensions, channels) x seq_len (aka time steps)\n \"\"\"\n self.permute = Permute(2, 0, 1)\n self.inlinear = nn.Linear(c_in, d_model)\n self.relu = nn.ReLU()\n encoder_layer = TransformerEncoderLayer(d_model, n_head, dim_feedforward=d_ffn, dropout=dropout, activation=activation)\n encoder_norm = nn.LayerNorm(d_model)\n self.transformer_encoder = TransformerEncoder(encoder_layer, n_layers, norm=encoder_norm)\n self.transpose = Transpose(1, 0)\n self.max = Max(1)\n self.outlinear = nn.Linear(d_model, c_out)\n\n def forward(self,x):\n x = self.permute(x) # bs x nvars x seq_len -> seq_len x bs x nvars\n x = self.inlinear(x) # seq_len x bs x nvars -> seq_len x bs x d_model\n x = self.relu(x)\n x = self.transformer_encoder(x)\n x = self.transpose(x) # seq_len x bs x d_model -> bs x seq_len x d_model\n x = self.max(x)\n x = self.relu(x)\n x = self.outlinear(x)\n return x\n\n# Cell\nclass ScaledDotProductAttention(Module):\n def __init__(self, d_k:int): self.d_k = d_k\n def forward(self, q:Tensor, k:Tensor, v:Tensor, mask:Optional[Tensor]=None):\n\n # MatMul (q, k) - similarity scores for all pairs of positions in an input sequence\n scores = torch.matmul(q, k) # scores : [bs x n_heads x q_len x q_len]\n\n # Scale\n scores = scores / (self.d_k ** 0.5)\n\n # Mask (optional)\n if mask is not None: scores.masked_fill_(mask, -1e9)\n\n # SoftMax\n attn = F.softmax(scores, dim=-1) # attn : [bs x n_heads x q_len x q_len]\n\n # MatMul (attn, v)\n context = torch.matmul(attn, v) # context: [bs x n_heads x q_len x d_v]\n\n return context, attn\n\n# Cell\nclass MultiHeadAttention(Module):\n def __init__(self, d_model:int, n_heads:int, d_k:int, d_v:int):\n r\"\"\"\n Input shape: Q, K, V:[batch_size (bs) x q_len x d_model], mask:[q_len x q_len]\n \"\"\"\n self.n_heads, self.d_k, self.d_v = n_heads, d_k, d_v\n\n self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False)\n self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False)\n self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False)\n\n self.W_O = nn.Linear(n_heads * d_v, d_model, bias=False)\n\n def forward(self, Q:Tensor, K:Tensor, V:Tensor, mask:Optional[Tensor]=None):\n\n bs = Q.size(0)\n\n # Linear (+ split in multiple heads)\n q_s = self.W_Q(Q).view(bs, -1, self.n_heads, self.d_k).transpose(1,2) # q_s : [bs x n_heads x q_len x d_k]\n k_s = self.W_K(K).view(bs, -1, self.n_heads, self.d_k).permute(0,2,3,1) # k_s : [bs x n_heads x d_k x q_len] - transpose(1,2) + transpose(2,3)\n v_s = self.W_V(V).view(bs, -1, self.n_heads, self.d_v).transpose(1,2) # v_s : [bs x n_heads x q_len x d_v]\n\n # Scaled Dot-Product Attention (multiple heads)\n context, attn = ScaledDotProductAttention(self.d_k)(q_s, k_s, v_s) # context: [bs x n_heads x q_len x d_v], attn: [bs x n_heads x q_len x q_len]\n\n # Concat\n context = context.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.d_v) # context: [bs x q_len x n_heads * d_v]\n\n # Linear\n output = self.W_O(context) # context: [bs x q_len x d_model]\n\n return output, attn\n\n# Cell\nclass TSTEncoderLayer(Module):\n def __init__(self, d_model:int, n_heads:int, d_k:Optional[int]=None, d_v:Optional[int]=None, d_ff:int=256, res_dropout:float=0.1, activation:str=\"gelu\"):\n\n assert d_model // n_heads, f\"d_model ({d_model}) must be divisible by n_heads ({n_heads})\"\n d_k = ifnone(d_k, d_model // n_heads)\n d_v = ifnone(d_v, d_model // n_heads)\n\n # Multi-Head attention\n self.self_attn = MultiHeadAttention(d_model, n_heads, d_k, d_v)\n\n # Add & Norm\n self.dropout_attn = nn.Dropout(res_dropout)\n self.batchnorm_attn = nn.BatchNorm1d(d_model)\n\n # Position-wise Feed-Forward\n self.ff = nn.Sequential(nn.Linear(d_model, d_ff), self._get_activation_fn(activation), nn.Linear(d_ff, d_model))\n\n # Add & Norm\n self.dropout_ffn = nn.Dropout(res_dropout)\n self.batchnorm_ffn = nn.BatchNorm1d(d_model)\n\n def forward(self, src:Tensor, mask:Optional[Tensor]=None) -> Tensor:\n\n # Multi-Head attention sublayer\n ## Multi-Head attention\n src2, attn = self.self_attn(src, src, src, mask=mask)\n ## Add & Norm\n src = src + self.dropout_attn(src2) # Add: residual connection with residual dropout\n src = self.batchnorm_attn(src.permute(1,2,0)).permute(2,0,1) # Norm: batchnorm (requires d_model features to be in dim 1)\n\n # Feed-forward sublayer\n ## Position-wise Feed-Forward\n src2 = self.ff(src)\n ## Add & Norm\n src = src + self.dropout_ffn(src2) # Add: residual connection with residual dropout\n src = self.batchnorm_ffn(src.permute(1,2,0)).permute(2,0,1) # Norm: batchnorm (requires d_model features to be in dim 1)\n\n return src\n\n def _get_activation_fn(self, activation):\n if activation == \"relu\": return nn.ReLU()\n elif activation == \"gelu\": return nn.GELU()\n raise ValueError(f'{activation} is not available. You can use \"relu\" or \"gelu\"')\n\n# Cell\nclass TSTEncoder(Module):\n def __init__(self, encoder_layer, n_layers):\n self.layers = nn.ModuleList([deepcopy(encoder_layer) for i in range(n_layers)])\n\n def forward(self, src:Tensor, mask:Optional[Tensor]=None) -> Tensor:\n output = src\n for mod in self.layers: output = mod(output, mask=mask)\n return output\n\n\n# Cell\nclass TST(Module):\n def __init__(self, c_in:int, c_out:int, seq_len:int, max_seq_len:Optional[int]=None,\n n_layers:int=3, d_model:int=128, n_heads:int=16, d_k:Optional[int]=None, d_v:Optional[int]=None,\n d_ff:int=256, res_dropout:float=0.1, activation:str=\"gelu\", fc_dropout:float=0.,\n y_range:Optional[tuple]=None, verbose:bool=False, **kwargs):\n r\"\"\"TST (Time Series Transformer) is a Transformer that takes continuous time series as inputs.\n As mentioned in the paper, the input must be standardized by_var based on the entire training set.\n Args:\n c_in: the number of features (aka variables, dimensions, channels) in the time series dataset.\n c_out: the number of target classes.\n seq_len: number of time steps in the time series.\n max_seq_len: useful to control the temporal resolution in long time series to avoid memory issues.\n d_model: total dimension of the model (number of features created by the model)\n n_heads: parallel attention heads.\n d_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.\n d_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.\n d_ff: the dimension of the feedforward network model.\n res_dropout: amount of residual dropout applied in the encoder.\n activation: the activation function of intermediate layer, relu or gelu.\n num_layers: the number of sub-encoder-layers in the encoder.\n fc_dropout: dropout applied to the final fully connected layer.\n y_range: range of possible y values (used in regression tasks).\n kwargs: nn.Conv1d kwargs. If not {}, a nn.Conv1d with those kwargs will be applied to original time series.\n Input shape:\n bs (batch size) x nvars (aka features, variables, dimensions, channels) x seq_len (aka time steps)\n \"\"\"\n self.c_out, self.seq_len = c_out, seq_len\n\n # Input encoding\n q_len = seq_len\n self.new_q_len = False\n if max_seq_len is not None and seq_len > max_seq_len: # Control temporal resolution\n self.new_q_len = True\n q_len = max_seq_len\n tr_factor = math.ceil(seq_len / q_len)\n total_padding = (tr_factor * q_len - seq_len)\n padding = (total_padding // 2, total_padding - total_padding // 2)\n self.W_P = nn.Sequential(Pad1d(padding), Conv1d(c_in, d_model, kernel_size=tr_factor, stride=tr_factor))\n pv(f'temporal resolution modified: {seq_len} --> {q_len} time steps: kernel_size={tr_factor}, stride={tr_factor}, padding={padding}.\\n', verbose)\n elif kwargs:\n self.new_q_len = True\n t = torch.rand(1, 1, seq_len)\n q_len = nn.Conv1d(1, 1, **kwargs)(t).shape[-1]\n self.W_P = nn.Conv1d(c_in, d_model, **kwargs) # Eq 2\n pv(f'Conv1d with kwargs={kwargs} applied to input to create input encodings\\n', verbose)\n else:\n self.W_P = nn.Linear(c_in, d_model) # Eq 1: projection of feature vectors onto a d-dim vector space\n\n # Positional encoding\n W_pos = torch.normal(0, .1, (q_len, d_model), device=default_device())\n self.W_pos = nn.Parameter(W_pos, requires_grad=True)\n\n # Residual dropout\n self.res_dropout = nn.Dropout(res_dropout)\n\n # Encoder\n encoder_layer = TSTEncoderLayer(d_model, n_heads, d_k=d_k, d_v=d_v, d_ff=d_ff, res_dropout=res_dropout, activation=activation)\n self.encoder = TSTEncoder(encoder_layer, n_layers)\n self.flatten = Flatten()\n\n # Head\n self.head_nf = q_len * d_model\n self.head = self.create_head(self.head_nf, c_out, fc_dropout=fc_dropout, y_range=y_range)\n\n def create_head(self, nf, c_out, fc_dropout=0., y_range=None, **kwargs):\n layers = [nn.Dropout(fc_dropout)] if fc_dropout else []\n layers += [nn.Linear(nf, c_out)]\n if y_range: layers += [SigmoidRange(*y_range)]\n return nn.Sequential(*layers)\n\n\n def forward(self, x:Tensor, mask:Optional[Tensor]=None) -> Tensor: # x: [bs x nvars x q_len]\n\n # Input encoding\n if self.new_q_len: u = self.W_P(x).transpose(2,1) # Eq 2 # u: [bs x d_model x q_len] transposed to [bs x q_len x d_model]\n else: u = self.W_P(x.transpose(2,1)) # Eq 1 # u: [bs x q_len x d_model] transposed to [bs x q_len x d_model]\n\n # Positional encoding\n u = self.res_dropout(u + self.W_pos)\n\n # Encoder\n z = self.encoder(u) # z: [bs x q_len x d_model]\n if self.flatten is not None: z = self.flatten(z) # z: [bs x q_len * d_model]\n else: z = z.transpose(2,1).contiguous() # z: [bs x d_model x q_len]\n\n # Classification/ Regression head\n return self.head(z) # output: [bs x c_out]\n\n# Cell\nclass Sigmoid(nn.Module):\n '''\n sigmoid layer\n '''\n def __init__(self, low, high):\n super().__init__()\n self.high, self.low = high, low\n\n def forward(self, x):\n return torch.sigmoid(x)*(self.high-self.low)+self.low\n\n# Cell\nclass InceptionTimeSgmOld(nn.Module):\n '''\n add a sigmoid layer to InceptionTime to get the ouput in a certain range\n '''\n\n def __init__(self, n_in, n_out):\n super().__init__()\n nn.Sequential()\n self.inc = InceptionTime(n_in, n_out)\n self.low, self.high = -1., 1.\n\n def forward(self, x):\n return torch.sigmoid(self.inc(x)) * (self.high - self.low) + self.low\n\n\n# Cell\nclass InceptionTimeSgm(nn.Module):\n '''\n add a sigmoid layer to InceptionTime to get the ouput in a certain range\n '''\n\n def __init__(self, n_in, n_out, range=(-1,1)):\n super().__init__()\n self.mod = nn.Sequential(InceptionTime(n_in, n_out), SigmoidRange(*range))\n\n def forward(self, x):\n x = x.float()\n return self.mod(x)\n\n\n# Cell\nclass TransformerSgm(nn.Module):\n '''\n add a sigmoid layer to Transformer to get the ouput in a certain range\n '''\n\n def __init__(self, n_in, n_out, seq_len=10, range=(-1,1), **kwargs):\n super().__init__()\n self.mod = nn.Sequential(TST(n_in, n_out, seq_len, **kwargs), SigmoidRange(*range))\n\n def forward(self, x):\n x = x.float()\n return self.mod(x)\n\n\n# Cell\nclass TransformerSgmD(nn.Module):\n '''\n add a sigmoid layer to Transformer to get the ouput in a certain range\n discrete input channels\n '''\n\n def __init__(self, n_in, n_out, seq_len=10, range=(-1,1), **kwargs):\n super().__init__()\n self.mod = nn.Sequential(TST(n_in, n_out, seq_len, **kwargs), SigmoidRange(*range))\n\n def forward(self, xc, xd):\n xc, xd = TensorBase(xc), TensorBase(xd)\n x = torch.cat([xc.float(), xd.float()], dim=-2)\n x = x.float()\n return self.mod(x)\n\n# Cell\nclass InceptionTimeD(nn.Module):\n '''\n add a sigmoid layer to InceptionTime to get the ouput in a certain range\n '''\n\n def __init__(self, n_in, n_out):\n super().__init__()\n self.mod = nn.Sequential(InceptionTime(n_in, n_out), Sigmoid(-1., 1.))\n\n def forward(self, xc, xd):\n #cast to TensorBase for pytorch 1.7 compatibility\n xc, xd = TensorBase(xc), TensorBase(xd)\n x = torch.cat([xc.float(), xd.float()], dim=-2)\n x = x.float()\n# print(f'InceptionTimeSgm dtype {x.dtype}')\n return self.mod(x)\n\n# Cell\nclass InceptionTime_NH(nn.Module):\n '''inceptiontime, no final layer'''\n def __init__(self,c_in,c_out,bottleneck=32,ks=40,nb_filters=32,residual=True,depth=6):\n super().__init__()\n self.block = InceptionBlock(c_in,bottleneck=bottleneck,ks=ks,nb_filters=nb_filters,\n residual=residual,depth=depth)\n self.gap = nn.AdaptiveAvgPool1d(1)\n# self.fc = nn.Linear(nb_filters * 4, c_out)\n\n def forward(self, x):\n x = self.block(x)\n# print(x.shape)\n x = self.gap(x).squeeze(-1)\n# x = self.fc(x)\n return x\n\n# Cell\ndef _map_xs(xs, xs_mask):\n '''\n xs: i-tuple of tensors\n xs_mask: length j>=i mask\n xs_id: lenght j>=i string list of x identifiers\n '''\n assert np.array(xs_mask).sum()==len(xs)\n res = np.array([None]*len(xs_mask))\n res[np.where(xs_mask)[0]]=xs\n return res\n\n# Cell\nclass InceptionTimeD_Mixed(nn.Module):\n '''\n mixed model for timeseries and tabular data\n ts_mod: InceptionTime model without final fully connected lay\n tab_mod: MLP or TabNet, currently both cont and cat is required\n outputs are concatenated, then put through a fully connected layer, then sigmoid range\n '''\n\n def __init__(self, n_c, n_d, n_out, n_cont, emb_szs=None):\n super().__init__()\n self.n_c, self.n_d, self.n_cont, self.emb_szs = n_c, n_d, n_out, emb_szs\n assert n_c>0, 'at least one continuous channel required'\n self.ts_mod = InceptionTime_NH(n_c+n_d, n_out) #128\n self.sgm = Sigmoid(-1,1)\n# self.mod = nn.Sequential(InceptionTime(n_in, n_out), Sigmoid(-1., 1.))\n# self.tab_mod = nn.Sequential(nn.Linear(2,100), nn.ReLU(), nn.Linear(100,64))\n self.tab_mod = TabNetModel(emb_szs=emb_szs, n_cont=n_cont, out_sz=64)\n self.fc = nn.Linear(192,n_out)\n\n# def forward(self, xc, xd, xt, xcat=None):\n def forward(self, *xs):\n\n\n xs_mask = [self.n_c>0, self.n_d>0, self.n_cont>0, len(self.emb_szs)>0]\n# x_type_idxs = [i for i in range(4) if has_x[i]]\n xc,xd,xt,xcat = map_xs(xs, xs_mask)\n\n x_ts=xc.float()\n if xd is not None: x_ts = torch.cat([x_ts, xd.float()], dim=-2)\n\n# x_ts=torch.cat([xs[0].float(), xd.float()], dim=-2) if self.n_d>0 else x_ts\n\n\n# x = t\n# x = x.float()\n# print(f'InceptionTimeSgm dtype {x.dtype}')\n# print(self.ts_mod(x).shape, self.tab_mod(xt.float().squeeze(-2)).shape )\n xcat=xcat.long() if xcat is not None else None\n xt=xt.float() if xt is not None else None\n x_all = torch.cat([self.ts_mod(x_ts), self.tab_mod(xcat, xt)], dim=-1)\n return self.sgm(self.fc(x_all))\n\n# Cell\nclass InceptionTime_Mixed(nn.Module):\n '''\n mixed model for timeseries and tabular data\n ts_mod: InceptionTime model without final fully connected lay\n tab_mod: MLP or TabNet, currently both cont and cat is required\n outputs are concatenated, then put through a fully connected layer, no sigmoid for classification\n '''\n\n def __init__(self, n_c, n_d, n_out, n_cont, emb_szs=None):\n super().__init__()\n self.n_c, self.n_d, self.n_cont, self.emb_szs = n_c, n_d, n_out, emb_szs\n assert n_c>0, 'at least one continuous channel required'\n self.ts_mod = InceptionTime_NH(n_c+n_d, n_out) #128\n# self.mod = nn.Sequential(InceptionTime(n_in, n_out), Sigmoid(-1., 1.))\n# self.tab_mod = nn.Sequential(nn.Linear(2,100), nn.ReLU(), nn.Linear(100,64))\n self.tab_mod = TabNetModel(emb_szs=emb_szs, n_cont=n_cont, out_sz=64)\n self.fc = nn.Linear(192,n_out)\n\n# def forward(self, xc, xd, xt, xcat=None):\n def forward(self, *xs):\n\n\n xs_mask = [self.n_c>0, self.n_d>0, self.n_cont>0, len(self.emb_szs)>0]\n# x_type_idxs = [i for i in range(4) if has_x[i]]\n xc,xd,xt,xcat = map_xs(xs, xs_mask)\n\n x_ts=xc.float()\n if xd is not None: x_ts = torch.cat([x_ts, xd.float()], dim=-2)\n\n# x_ts=torch.cat([xs[0].float(), xd.float()], dim=-2) if self.n_d>0 else x_ts\n\n\n# x = t\n# x = x.float()\n# print(f'InceptionTimeSgm dtype {x.dtype}')\n# print(self.ts_mod(x).shape, self.tab_mod(xt.float().squeeze(-2)).shape )\n xcat=xcat.long() if xcat is not None else None\n xt=xt.float() if xt is not None else None\n x_all = torch.cat([self.ts_mod(x_ts), self.tab_mod(xcat, xt)], dim=-1)\n return self.fc(x_all)\n\n# Cell\nclass TabNetTT(nn.Module):\n '''\n convenience wrapper for pure TabNetModel models\n '''\n def __init__(self, emb_szs, n_cont, out_sz, **kwargs):\n super().__init__()\n self.tab = TabNetModel(emb_szs, n_cont, out_sz, **kwargs)\n\n def forward(self, xt, xcat):\n xcat=xcat.long() if xcat is not None else None\n xt=xt.float() if xt is not None else None\n return self.tab(xcat, xt)\n\n# Cell\nclass InceptionTimeVar(nn.Module):\n '''\n output mean and variance\n regression model, sigmoid for the mean output optional\n '''\n\n def __init__(self, n_in, n_out, meanrange=None):\n super().__init__()\n models = [InceptionTime(n_in, n_out+1)]\n if meanrange:\n self.sigmoid = Sigmoid(*meanrange)\n self.mod = nn.Sequential(*models)\n\n def forward(self, x):\n x = x.float()\n output = self.mod(x)\n ## enforce positivity of sigma^2\n ##output_sig_pos = tf.log(1 + tf.exp(output_sig)) + 1e-06\n# output[:,-1] = (output[:,-1].exp()+1).log_() + 1e-06\n output[:,-1] = F.softplus(output[:,-1].clone())\n\n if getattr(self, 'sigmoid', None): output[:,:-1] = self.sigmoid(output[:,:-1])\n return output\n\n\n# Cell\ndef nll_regression(preds, y_true, c=5):\n '''\n negative log likelihood loss for regression, both mu and sigma are predicted\n\n Simple and Scalable Predictive UncertaintyEstimation using Deep Ensembles\n Balaji Lakshminarayanan, Alexander Pritzel, Charles Blundell, DeepMind\n\n '''\n\n s1 = 0.5*preds[:,1].log()\n s2 = 0.5*(yb.squeeze()-preds[:,0]).pow(2).div(preds[:,1])\n loss = (s1+s2).mean() + c\n return loss\n\n# Cell\ndef nll_leaky_loss(preds, y_true, c=5, alpha=0.5):\n '''\n leaky_loss with variance\n\n Simple and Scalable Predictive UncertaintyEstimation using Deep Ensembles\n Balaji Lakshminarayanan, Alexander Pritzel, Charles Blundell, DeepMind\n\n '''\n\n s1 = 0.5*preds[:,1].log()\n l1 = -F.leaky_relu(preds[:,0], alpha)*y_true.float().squeeze()\n s2 = 0.5*(l1.div(preds[:,1]+1)) ## +1 to prevent optimizing for variance, maybe just an artifical problem\n loss = (s1+s2).mean() + c\n return loss\n\n# Cell\ndef qd_loss(preds, y_true, alpha=0.4, l=0.01, s=0.01, add=False, slope=1.):\n '''\n qd loss implementation adapted for \"leaky loss problems\"\n preds: predictions for both lower and upper bounds\n alpha: confidence intervall parameter, different from alpha in leaky_loss\n s: smoothing factor for sigmoid\n l: agrangian controlling width vs coverage (default in the paper impl. is 0.01 which seems lowI)\n '''\n ll = lambda x: F.leaky_relu(x, negative_slope=slope)\n\n y_lower = preds[:,0].clone()\n y_upper = preds[:,1].clone() if not add else y_lower+preds[:,1]\n\n# if not add:\n# y_lower, y_upper = preds[:, 0].clone(), preds[:, 1].clone()\n# else:\n# y_lower, y_upper = preds[:, 0].clone(), preds[:,0].clone()+preds[:, 1].clone()\n# # hard counts, how many of the predictions have the right sign?\n khu = (torch.sign(y_upper*y_true) > 0).int()\n khl = (torch.sign(y_lower*y_true) > 0).int()\n\n# return preds.mean()\n # soft counts, sign step function replaced by a smoother sigmoid\n\n ksu = torch.sigmoid((ll(y_upper)*y_true)*s)\n ksl = torch.sigmoid((y_true*ll(y_lower))*s)\n kh,ks = khu*khl, ksu*ksl\n# print(kh)\n# print(kh.sum(), ks.sum())\n\n #mpiw: mean predicted interval width\n f = 1/(kh.sum()+1e-6)\n# print((y_upper-y_lower))\n mpiw = ((y_upper-y_lower)*kh).sum()*f\n\n #picp: predicted interval coverage probability\n picp_s = ks.mean()\n\n print(f'mpiw {mpiw}, pcip_soft: {picp_s}')\n s2 = l*preds.shape[0]/(alpha*(1-alpha))\n s3 = torch.max(torch.zeros(1, device=preds.device), picp_s).pow(2)\n loss_s = mpiw + l*preds.shape[0]/(alpha*(1-alpha)) * torch.max(torch.zeros(1, device=preds.device),\n picp_s).pow(2)\n return loss_s\n\n# Cell\nclass InceptionTimeBounds(nn.Module):\n '''\n use InceptionTimeVar implementation for bounds\n output[:, -1] is positive and y_upper corresponds to output[:,0]+output[:,1] --> loss\n '''\n\n def __init__(self, n_in, n_out, meanrange=None):\n super().__init__()\n models = [InceptionTime(n_in, n_out+1)]\n if meanrange:\n self.sigmoid = Sigmoid(*meanrange)\n self.mod = nn.Sequential(*models)\n\n def forward(self, x):\n x = x.float()\n output = self.mod(x)\n ## enforce positivity of sigma^2\n ##output_sig_pos = tf.log(1 + tf.exp(output_sig)) + 1e-06\n# output[:,-1] = (output[:,-1].exp()+1).log_() + 1e-06\n output[:,-1] = F.softplus(output[:,-1].clone()) ## autograd problems when not using clone, why???\n\n if getattr(self, 'sigmoid', None): output[:,:-1] = self.sigmoid(output[:,:-1])\n return output"
] | [
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.sign",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.MaxPool1d",
"torch.nn.modules.transformer.TransformerEncoderLayer",
"torch.rand",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.Parameter",
"torch.sigmoid",
"torch.nn.modules.transformer.TransformerEncoder",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.nn.functional.leaky_relu",
"torch.nn.Conv1d",
"torch.nn.AdaptiveAvgPool1d",
"torch.nn.GELU",
"torch.nn.LayerNorm",
"torch.matmul",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mehak-sachdeva/mgwr | [
"eae8ac3d61ecbbba60b180e9ed8bab074bfb3522"
] | [
"mgwr/gwr.py"
] | [
"# Main GWR classes\n\n__author__ = \"Taylor Oshan [email protected]\"\n\nimport copy\nimport numpy as np\nimport numpy.linalg as la\nfrom scipy.stats import t\nfrom scipy.special import factorial\nfrom itertools import combinations as combo\nfrom spglm.family import Gaussian, Binomial, Poisson\nfrom spglm.glm import GLM, GLMResults\nfrom spglm.iwls import iwls, _compute_betas_gwr\nfrom spglm.utils import cache_readonly\nfrom .diagnostics import get_AIC, get_AICc, get_BIC, corr\nfrom .kernels import *\nfrom .summary import *\nimport multiprocessing as mp\n\n\nclass GWR(GLM):\n \"\"\"\n Geographically weighted regression. Can currently estimate Gaussian,\n Poisson, and logistic models(built on a GLM framework). GWR object prepares\n model input. Fit method performs estimation and returns a GWRResults object.\n\n Parameters\n ----------\n coords : array-like\n n*2, collection of n sets of (x,y) coordinates of\n observatons; also used as calibration locations is\n 'points' is set to None\n\n y : array\n n*1, dependent variable\n\n X : array\n n*k, independent variable, exlcuding the constant\n\n bw : scalar\n bandwidth value consisting of either a distance or N\n nearest neighbors; user specified or obtained using\n Sel_BW\n\n family : family object\n underlying probability model; provides\n distribution-specific calculations\n\n offset : array\n n*1, the offset variable at the ith location. For Poisson model\n this term is often the size of the population at risk or\n the expected size of the outcome in spatial epidemiology\n Default is None where Ni becomes 1.0 for all locations;\n only for Poisson models\n\n sigma2_v1 : boolean\n specify form of corrected denominator of sigma squared to use for\n model diagnostics; Acceptable options are:\n\n 'True': n-tr(S) (defualt)\n 'False': n-2(tr(S)+tr(S'S))\n\n kernel : string\n type of kernel function used to weight observations;\n available options:\n 'gaussian'\n 'bisquare'\n 'exponential'\n\n fixed : boolean\n True for distance based kernel function and False for\n adaptive (nearest neighbor) kernel function (default)\n\n constant : boolean\n True to include intercept (default) in model and False to exclude\n intercept.\n\n spherical : boolean\n True for shperical coordinates (long-lat),\n False for projected coordinates (defalut).\n hat_matrix : boolean\n True to store full n by n hat matrix,\n False to not store full hat matrix to minimize memory footprint (defalut).\n\n Attributes\n ----------\n coords : array-like\n n*2, collection of n sets of (x,y) coordinates used for\n calibration locations\n\n y : array\n n*1, dependent variable\n\n X : array\n n*k, independent variable, exlcuding the constant\n\n bw : scalar\n bandwidth value consisting of either a distance or N\n nearest neighbors; user specified or obtained using\n Sel_BW\n\n family : family object\n underlying probability model; provides\n distribution-specific calculations\n\n offset : array\n n*1, the offset variable at the ith location. For Poisson model\n this term is often the size of the population at risk or\n the expected size of the outcome in spatial epidemiology\n Default is None where Ni becomes 1.0 for all locations\n\n sigma2_v1 : boolean\n specify form of corrected denominator of sigma squared to use for\n model diagnostics; Acceptable options are:\n\n 'True': n-tr(S) (defualt)\n 'False': n-2(tr(S)+tr(S'S))\n\n kernel : string\n type of kernel function used to weight observations;\n available options:\n 'gaussian'\n 'bisquare'\n 'exponential'\n\n fixed : boolean\n True for distance based kernel function and False for\n adaptive (nearest neighbor) kernel function (default)\n\n constant : boolean\n True to include intercept (default) in model and False to exclude\n intercept\n\n spherical : boolean\n True for shperical coordinates (long-lat),\n False for projected coordinates (defalut).\n \n hat_matrix : boolean\n True to store full n by n hat matrix,\n False to not store full hat matrix to minimize memory footprint (defalut).\n\n n : integer\n number of observations\n\n k : integer\n number of independent variables\n\n mean_y : float\n mean of y\n\n std_y : float\n standard deviation of y\n\n fit_params : dict\n parameters passed into fit method to define estimation\n routine\n\n points : array-like\n n*2, collection of n sets of (x,y) coordinates used for\n calibration locations instead of all observations;\n defaults to None unles specified in predict method\n\n P : array\n n*k, independent variables used to make prediction;\n exlcuding the constant; default to None unless specified\n in predict method\n\n exog_scale : scalar\n estimated scale using sampled locations; defualt is None\n unless specified in predict method\n\n exog_resid : array-like\n estimated residuals using sampled locations; defualt is None\n unless specified in predict method\n\n Examples\n --------\n #basic model calibration\n\n >>> import libpysal as ps\n >>> from mgwr.gwr import GWR\n >>> data = ps.io.open(ps.examples.get_path('GData_utm.csv'))\n >>> coords = list(zip(data.by_col('X'), data.by_col('Y')))\n >>> y = np.array(data.by_col('PctBach')).reshape((-1,1))\n >>> rural = np.array(data.by_col('PctRural')).reshape((-1,1))\n >>> pov = np.array(data.by_col('PctPov')).reshape((-1,1))\n >>> african_amer = np.array(data.by_col('PctBlack')).reshape((-1,1))\n >>> X = np.hstack([rural, pov, african_amer])\n >>> model = GWR(coords, y, X, bw=90.000, fixed=False, kernel='bisquare')\n >>> results = model.fit()\n >>> print(results.params.shape)\n (159, 4)\n\n #predict at unsampled locations\n\n >>> index = np.arange(len(y))\n >>> test = index[-10:]\n >>> X_test = X[test]\n >>> coords_test = np.array(coords)[test]\n >>> model = GWR(coords, y, X, bw=94, fixed=False, kernel='bisquare')\n >>> results = model.predict(coords_test, X_test)\n >>> print(results.params.shape)\n (10, 4)\n\n \"\"\"\n\n def __init__(self, coords, y, X, bw, family=Gaussian(), offset=None,\n sigma2_v1=True, kernel='bisquare', fixed=False, constant=True,\n spherical=False, hat_matrix=False):\n \"\"\"\n Initialize class\n \"\"\"\n GLM.__init__(self, y, X, family, constant=constant)\n self.constant = constant\n self.sigma2_v1 = sigma2_v1\n self.coords = np.array(coords)\n self.bw = bw\n self.kernel = kernel\n self.fixed = fixed\n if offset is None:\n self.offset = np.ones((self.n, 1))\n else:\n self.offset = offset * 1.0\n self.fit_params = {}\n\n self.points = None\n self.exog_scale = None\n self.exog_resid = None\n self.P = None\n self.spherical = spherical\n self.hat_matrix = hat_matrix\n\n def _build_wi(self, i, bw):\n\n try:\n wi = Kernel(i, self.coords, bw, fixed=self.fixed,\n function=self.kernel, points=self.points,\n spherical=self.spherical).kernel\n except BaseException:\n raise # TypeError('Unsupported kernel function ', kernel)\n\n return wi\n\n def _local_fit(self, i):\n \"\"\"\n Local fitting at location i.\n \"\"\"\n wi = self._build_wi(i, self.bw).reshape(-1, 1) #local spatial weights\n\n if isinstance(self.family, Gaussian):\n betas, inv_xtx_xt = _compute_betas_gwr(self.y, self.X, wi)\n predy = np.dot(self.X[i], betas)[0]\n resid = self.y[i] - predy\n influ = np.dot(self.X[i], inv_xtx_xt[:, i])\n w = 1\n\n elif isinstance(self.family, (Poisson, Binomial)):\n rslt = iwls(self.y, self.X, self.family, self.offset, None,\n self.fit_params['ini_params'], self.fit_params['tol'],\n self.fit_params['max_iter'], wi=wi)\n inv_xtx_xt = rslt[5]\n w = rslt[3][i][0]\n influ = np.dot(self.X[i], inv_xtx_xt[:, i]) * w\n predy = rslt[1][i]\n resid = self.y[i] - predy\n betas = rslt[0]\n\n if self.fit_params['lite']:\n return influ, resid, predy, betas.reshape(-1)\n else:\n Si = np.dot(self.X[i], inv_xtx_xt).reshape(-1)\n tr_STS_i = np.sum(Si * Si * w * w)\n CCT = np.diag(np.dot(inv_xtx_xt, inv_xtx_xt.T)).reshape(-1)\n if not self.hat_matrix:\n Si = None\n return influ, resid, predy, betas.reshape(-1), w, Si, tr_STS_i, CCT\n\n def fit(self, ini_params=None, tol=1.0e-5, max_iter=20, solve='iwls',\n lite=False, pool=None):\n \"\"\"\n Method that fits a model with a particular estimation routine.\n\n Parameters\n ----------\n\n ini_betas : array, optional\n k*1, initial coefficient values, including constant.\n Default is None, which calculates initial values during\n estimation.\n tol: float, optional\n Tolerence for estimation convergence.\n Default is 1.0e-5.\n max_iter : integer, optional\n Maximum number of iterations if convergence not\n achieved. Default is 20.\n solve : string, optional\n Technique to solve MLE equations.\n Default is 'iwls', meaning iteratively (\n re)weighted least squares.\n lite : bool, optional\n Whether to estimate a lightweight GWR that\n computes the minimum diagnostics needed for\n bandwidth selection (could speed up\n bandwidth selection for GWR) or to estimate\n a full GWR. Default is False.\n pool : A multiprocessing Pool object to enable parallel fitting; default is None.\n\n Returns\n -------\n :\n If lite=False, return a GWRResult\n instance; otherwise, return a GWRResultLite\n instance.\n\n \"\"\"\n self.fit_params['ini_params'] = ini_params\n self.fit_params['tol'] = tol\n self.fit_params['max_iter'] = max_iter\n self.fit_params['solve'] = solve\n self.fit_params['lite'] = lite\n\n if solve.lower() == 'iwls':\n\n if self.points is None:\n m = self.y.shape[0]\n else:\n m = self.points.shape[0]\n\n if pool:\n rslt = pool.map(self._local_fit,\n range(m)) #parallel using mp.Pool\n else:\n rslt = map(self._local_fit, range(m)) #sequential\n\n rslt_list = list(zip(*rslt))\n influ = np.array(rslt_list[0]).reshape(-1, 1)\n resid = np.array(rslt_list[1]).reshape(-1, 1)\n params = np.array(rslt_list[3])\n\n if lite:\n return GWRResultsLite(self, resid, influ, params)\n else:\n predy = np.array(rslt_list[2]).reshape(-1, 1)\n w = np.array(rslt_list[-4]).reshape(-1, 1)\n if self.hat_matrix:\n S = np.array(rslt_list[-3])\n else:\n S = None\n tr_STS = np.sum(np.array(rslt_list[-2]))\n CCT = np.array(rslt_list[-1])\n return GWRResults(self, params, predy, S, CCT, influ, tr_STS,\n w)\n\n def predict(self, points, P, exog_scale=None, exog_resid=None,\n fit_params={}):\n \"\"\"\n Method that predicts values of the dependent variable at un-sampled\n locations\n\n Parameters\n ----------\n points : array-like\n n*2, collection of n sets of (x,y) coordinates used for\n calibration prediction locations\n P : array\n n*k, independent variables used to make prediction;\n exlcuding the constant\n exog_scale : scalar\n estimated scale using sampled locations; defualt is None\n which estimates a model using points from \"coords\"\n exog_resid : array-like\n estimated residuals using sampled locations; defualt is None\n which estimates a model using points from \"coords\"; if\n given it must be n*1 where n is the length of coords\n fit_params : dict\n key-value pairs of parameters that will be passed into fit\n method to define estimation routine; see fit method for more details\n\n \"\"\"\n if (exog_scale is None) & (exog_resid is None):\n train_gwr = self.fit(**fit_params)\n self.exog_scale = train_gwr.scale\n self.exog_resid = train_gwr.resid_response\n elif (exog_scale is not None) & (exog_resid is not None):\n self.exog_scale = exog_scale\n self.exog_resid = exog_resid\n else:\n raise InputError('exog_scale and exog_resid must both either be'\n 'None or specified')\n self.points = points\n if self.constant:\n P = np.hstack([np.ones((len(P), 1)), P])\n self.P = P\n else:\n self.P = P\n gwr = self.fit(**fit_params)\n\n return gwr\n\n @cache_readonly\n def df_model(self):\n return None\n\n @cache_readonly\n def df_resid(self):\n return None\n\n\nclass GWRResults(GLMResults):\n \"\"\"\n Basic class including common properties for all GWR regression models\n\n Parameters\n ----------\n model : GWR object\n pointer to GWR object with estimation parameters\n\n params : array\n n*k, estimated coefficients\n\n predy : array\n n*1, predicted y values\n\n S : array\n n*n, hat matrix\n\n CCT : array\n n*k, scaled variance-covariance matrix\n\n w : array\n n*1, final weight used for iteratively re-weighted least\n sqaures; default is None\n\n Attributes\n ----------\n model : GWR Object\n points to GWR object for which parameters have been\n estimated\n\n params : array\n n*k, parameter estimates\n\n predy : array\n n*1, predicted value of y\n\n y : array\n n*1, dependent variable\n\n X : array\n n*k, independent variable, including constant\n\n family : family object\n underlying probability model; provides\n distribution-specific calculations\n\n n : integer\n number of observations\n\n k : integer\n number of independent variables\n\n df_model : integer\n model degrees of freedom\n\n df_resid : integer\n residual degrees of freedom\n\n offset : array\n n*1, the offset variable at the ith location.\n For Poisson model this term is often the size of\n the population at risk or the expected size of\n the outcome in spatial epidemiology; Default is\n None where Ni becomes 1.0 for all locations\n\n scale : float\n sigma squared used for subsequent computations\n\n w : array\n n*1, final weights from iteratively re-weighted least\n sqaures routine\n\n resid_response : array\n n*1, residuals of the repsonse\n\n resid_ss : scalar\n residual sum of sqaures\n\n W : array\n n*n; spatial weights for each observation from each\n calibration point\n\n S : array\n n*n, hat matrix\n\n CCT : array\n n*k, scaled variance-covariance matrix\n\n ENP : scalar\n effective number of paramters, which depends on\n sigma2\n\n tr_S : float\n trace of S (hat) matrix\n\n tr_STS : float\n trace of STS matrix\n\n y_bar : array\n n*1, weighted mean value of y\n\n TSS : array\n n*1, geographically weighted total sum of squares\n\n RSS : array\n n*1, geographically weighted residual sum of squares\n\n R2 : float\n R-squared for the entire model (1- RSS/TSS)\n \n adj_R2 : float\n adjusted R-squared for the entire model\n \n aic : float\n Akaike information criterion\n\n aicc : float\n corrected Akaike information criterion to account\n to account for model complexity (smaller\n bandwidths)\n\n bic : float\n Bayesian information criterio\n\n localR2 : array\n n*1, local R square\n\n sigma2 : float\n sigma squared (residual variance) that has been\n corrected to account for the ENP\n\n std_res : array\n n*1, standardised residuals\n\n bse : array\n n*k, standard errors of parameters (betas)\n\n influ : array\n n*1, leading diagonal of S matrix\n\n CooksD : array\n n*1, Cook's D\n\n tvalues : array\n n*k, local t-statistics\n\n adj_alpha : array\n 3*1, corrected alpha values to account for multiple\n hypothesis testing for the 90%, 95%, and 99% confidence\n levels; tvalues with an absolute value larger than the\n corrected alpha are considered statistically\n significant.\n\n deviance : array\n n*1, local model deviance for each calibration point\n\n resid_deviance : array\n n*1, local sum of residual deviance for each\n calibration point\n\n llf : scalar\n log-likelihood of the full model; see\n pysal.contrib.glm.family for damily-sepcific\n log-likelihoods\n\n pDev : float\n local percent of deviation accounted for; analogous to\n r-squared for GLM's\n \n D2 : float\n percent deviance explained for GLM, equivaleng to R2 for\n Gaussian.\n \n adj_D2 : float\n adjusted percent deviance explained, equivaleng to adjusted\n R2 for Gaussian.\n\n mu : array\n n*, flat one dimensional array of predicted mean\n response value from estimator\n\n fit_params : dict\n parameters passed into fit method to define estimation\n routine\n\n predictions : array\n p*1, predicted values generated by calling the GWR\n predict method to predict dependent variable at\n unsampled points ()\n \"\"\"\n\n def __init__(self, model, params, predy, S, CCT, influ, tr_STS=None,\n w=None):\n GLMResults.__init__(self, model, params, predy, w)\n self.offset = model.offset\n if w is not None:\n self.w = w\n self.predy = predy\n self.S = S\n self.tr_STS = tr_STS\n self.influ = influ\n self.CCT = self.cov_params(CCT, model.exog_scale)\n self._cache = {}\n\n @cache_readonly\n def W(self):\n W = np.array(\n [self.model._build_wi(i, self.model.bw) for i in range(self.n)])\n return W\n\n @cache_readonly\n def resid_ss(self):\n if self.model.points is not None:\n raise NotImplementedError('Not available for GWR prediction')\n else:\n u = self.resid_response.flatten()\n return np.dot(u, u.T)\n\n @cache_readonly\n def scale(self, scale=None):\n if isinstance(self.family, Gaussian):\n scale = self.sigma2\n else:\n scale = 1.0\n return scale\n\n def cov_params(self, cov, exog_scale=None):\n \"\"\"\n Returns scaled covariance parameters\n\n Parameters\n ----------\n cov : array\n estimated covariance parameters\n\n Returns\n -------\n Scaled covariance parameters\n\n \"\"\"\n if exog_scale is not None:\n return cov * exog_scale\n else:\n return cov * self.scale\n\n @cache_readonly\n def tr_S(self):\n \"\"\"\n trace of S (hat) matrix\n \"\"\"\n return np.sum(self.influ)\n\n @cache_readonly\n def ENP(self):\n \"\"\"\n effective number of parameters\n\n Defualts to tr(s) as defined in yu et. al (2018) Inference in\n Multiscale GWR\n\n but can alternatively be based on 2tr(s) - tr(STS)\n\n and the form depends on the specification of sigma2\n \"\"\"\n if self.model.sigma2_v1:\n return self.tr_S\n else:\n return 2 * self.tr_S - self.tr_STS\n\n @cache_readonly\n def y_bar(self):\n \"\"\"\n weighted mean of y\n \"\"\"\n if self.model.points is not None:\n n = len(self.model.points)\n else:\n n = self.n\n off = self.offset.reshape((-1, 1))\n arr_ybar = np.zeros(shape=(self.n, 1))\n for i in range(n):\n w_i = np.reshape(self.model._build_wi(i, self.model.bw), (-1, 1))\n sum_yw = np.sum(self.y.reshape((-1, 1)) * w_i)\n arr_ybar[i] = 1.0 * sum_yw / np.sum(w_i * off)\n return arr_ybar\n\n @cache_readonly\n def TSS(self):\n \"\"\"\n geographically weighted total sum of squares\n\n Methods: p215, (9.9)\n Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).\n Geographically weighted regression: the analysis of spatially varying\n relationships.\n\n \"\"\"\n if self.model.points is not None:\n n = len(self.model.points)\n else:\n n = self.n\n TSS = np.zeros(shape=(n, 1))\n for i in range(n):\n TSS[i] = np.sum(\n np.reshape(self.model._build_wi(i, self.model.bw),\n (-1, 1)) * (self.y.reshape(\n (-1, 1)) - self.y_bar[i])**2)\n return TSS\n\n @cache_readonly\n def RSS(self):\n \"\"\"\n geographically weighted residual sum of squares\n\n Methods: p215, (9.10)\n Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).\n Geographically weighted regression: the analysis of spatially varying\n relationships.\n \"\"\"\n if self.model.points is not None:\n n = len(self.model.points)\n resid = self.model.exog_resid.reshape((-1, 1))\n else:\n n = self.n\n resid = self.resid_response.reshape((-1, 1))\n RSS = np.zeros(shape=(n, 1))\n for i in range(n):\n RSS[i] = np.sum(\n np.reshape(self.model._build_wi(i, self.model.bw),\n (-1, 1)) * resid**2)\n return RSS\n\n @cache_readonly\n def localR2(self):\n \"\"\"\n local R square\n\n Methods: p215, (9.8)\n Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).\n Geographically weighted regression: the analysis of spatially varying\n relationships.\n \"\"\"\n if isinstance(self.family, Gaussian):\n return (self.TSS - self.RSS) / self.TSS\n else:\n raise NotImplementedError('Only applicable to Gaussian')\n\n @cache_readonly\n def sigma2(self):\n \"\"\"\n residual variance\n\n if sigma2_v1 is True: only use n-tr(S) in denominator\n\n Methods: p214, (9.6),\n Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).\n Geographically weighted regression: the analysis of spatially varying\n relationships.\n\n and as defined in Yu et. al. (2018) Inference in Multiscale GWR\n\n if sigma2_v1 is False (v1v2): use n-2(tr(S)+tr(S'S)) in denominator\n\n Methods: p55 (2.16)-(2.18)\n Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).\n Geographically weighted regression: the analysis of spatially varying\n relationships.\n\n \"\"\"\n if self.model.sigma2_v1:\n return (self.resid_ss / (self.n - self.tr_S))\n else:\n # could be changed to SWSTW - nothing to test against\n return self.resid_ss / (self.n - 2.0 * self.tr_S + self.tr_STS)\n\n @cache_readonly\n def std_res(self):\n \"\"\"\n standardized residuals\n\n Methods: p215, (9.7)\n Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).\n Geographically weighted regression: the analysis of spatially varying\n relationships.\n \"\"\"\n return self.resid_response.reshape(\n (-1, 1)) / (np.sqrt(self.scale * (1.0 - self.influ)))\n\n @cache_readonly\n def bse(self):\n \"\"\"\n standard errors of Betas\n\n Methods: p215, (2.15) and (2.21)\n Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).\n Geographically weighted regression: the analysis of spatially varying\n relationships.\n \"\"\"\n return np.sqrt(self.CCT)\n\n @cache_readonly\n def cooksD(self):\n \"\"\"\n Influence: leading diagonal of S Matrix\n\n Methods: p216, (9.11),\n Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).\n Geographically weighted regression: the analysis of spatially varying\n relationships.\n Note: in (9.11), p should be tr(S), that is, the effective number of parameters\n \"\"\"\n return self.std_res**2 * self.influ / (self.tr_S * (1.0 - self.influ))\n\n @cache_readonly\n def deviance(self):\n off = self.offset.reshape((-1, 1)).T\n y = self.y\n ybar = self.y_bar\n if isinstance(self.family, Gaussian):\n raise NotImplementedError(\n 'deviance not currently used for Gaussian')\n elif isinstance(self.family, Poisson):\n dev = np.sum(\n 2.0 * self.W * (y * np.log(y / (ybar * off)) -\n (y - ybar * off)), axis=1)\n elif isinstance(self.family, Binomial):\n dev = self.family.deviance(self.y, self.y_bar, self.W, axis=1)\n return dev.reshape((-1, 1))\n\n @cache_readonly\n def resid_deviance(self):\n if isinstance(self.family, Gaussian):\n raise NotImplementedError(\n 'deviance not currently used for Gaussian')\n else:\n off = self.offset.reshape((-1, 1)).T\n y = self.y\n ybar = self.y_bar\n global_dev_res = ((self.family.resid_dev(self.y, self.mu))**2)\n dev_res = np.repeat(global_dev_res.flatten(), self.n)\n dev_res = dev_res.reshape((self.n, self.n))\n dev_res = np.sum(dev_res * self.W.T, axis=0)\n return dev_res.reshape((-1, 1))\n\n @cache_readonly\n def pDev(self):\n \"\"\"\n Local percentage of deviance accounted for. Described in the GWR4\n manual. Equivalent to 1 - (deviance/null deviance)\n \"\"\"\n if isinstance(self.family, Gaussian):\n raise NotImplementedError('Not implemented for Gaussian')\n else:\n return 1.0 - (self.resid_deviance / self.deviance)\n\n @cache_readonly\n def adj_alpha(self):\n \"\"\"\n Corrected alpha (critical) values to account for multiple testing during hypothesis\n testing. Includes corrected value for 90% (.1), 95% (.05), and 99%\n (.01) confidence levels. Correction comes from:\n\n da Silva, A. R., & Fotheringham, A. S. (2015). The Multiple Testing Issue in\n Geographically Weighted Regression. Geographical Analysis.\n\n \"\"\"\n alpha = np.array([.1, .05, .001])\n pe = self.ENP\n p = self.k\n return (alpha * p) / pe\n\n def critical_tval(self, alpha=None):\n \"\"\"\n Utility function to derive the critial t-value based on given alpha\n that are needed for hypothesis testing\n\n Parameters\n ----------\n alpha : scalar\n critical value to determine which tvalues are\n associated with statistically significant parameter\n estimates. Default to None in which case the adjusted\n alpha value at the 95 percent CI is automatically\n used.\n\n Returns\n -------\n critical : scalar\n critical t-val based on alpha\n \"\"\"\n n = self.n\n if alpha is not None:\n alpha = np.abs(alpha) / 2.0\n critical = t.ppf(1 - alpha, n - 1)\n else:\n alpha = np.abs(self.adj_alpha[1]) / 2.0\n critical = t.ppf(1 - alpha, n - 1)\n return critical\n\n def filter_tvals(self, critical_t=None, alpha=None):\n \"\"\"\n Utility function to set tvalues with an absolute value smaller than the\n absolute value of the alpha (critical) value to 0. If critical_t\n is supplied than it is used directly to filter. If alpha is provided\n than the critical t value will be derived and used to filter. If neither\n are critical_t nor alpha are provided, an adjusted alpha at the 95\n percent CI will automatically be used to define the critical t-value and\n used to filter. If both critical_t and alpha are supplied then the alpha\n value will be ignored.\n\n Parameters\n ----------\n critical : scalar\n critical t-value to determine whether parameters are\n statistically significant\n\n alpha : scalar\n alpha value to determine which tvalues are\n associated with statistically significant parameter\n estimates\n\n Returns\n -------\n filtered : array\n n*k; new set of n tvalues for each of k variables\n where absolute tvalues less than the absolute value of\n alpha have been set to 0.\n \"\"\"\n n = self.n\n if critical_t is not None:\n critical = critical_t\n else:\n critical = self.critical_tval(alpha=alpha)\n\n subset = (self.tvalues < critical) & (self.tvalues > -1.0 * critical)\n tvalues = self.tvalues.copy()\n tvalues[subset] = 0\n return tvalues\n\n @cache_readonly\n def df_model(self):\n return self.n - self.tr_S\n\n @cache_readonly\n def df_resid(self):\n return self.n - 2.0 * self.tr_S + self.tr_STS\n\n @cache_readonly\n def normalized_cov_params(self):\n return None\n\n @cache_readonly\n def resid_pearson(self):\n return None\n\n @cache_readonly\n def resid_working(self):\n return None\n\n @cache_readonly\n def resid_anscombe(self):\n return None\n\n @cache_readonly\n def pearson_chi2(self):\n return None\n\n @cache_readonly\n def llnull(self):\n return None\n\n @cache_readonly\n def null_deviance(self):\n return self.family.deviance(self.y, self.null)\n\n @cache_readonly\n def global_deviance(self):\n deviance = np.sum(self.family.resid_dev(self.y, self.mu)**2)\n return deviance\n\n @cache_readonly\n def D2(self):\n \"\"\"\n Percentage of deviance explanied. Equivalent to 1 - (deviance/null deviance)\n \"\"\"\n D2 = 1.0 - (self.global_deviance / self.null_deviance)\n return D2\n\n @cache_readonly\n def R2(self):\n \"\"\"\n Global r-squared value for a Gaussian model.\n \"\"\"\n if isinstance(self.family, Gaussian):\n return self.D2\n else:\n raise NotImplementedError('R2 only for Gaussian')\n\n @cache_readonly\n def adj_D2(self):\n \"\"\"\n Adjusted percentage of deviance explanied.\n \"\"\"\n adj_D2 = 1 - (1 - self.D2) * (self.n - 1) / (self.n - self.ENP - 1)\n return adj_D2\n\n @cache_readonly\n def adj_R2(self):\n \"\"\"\n Adjusted global r-squared for a Gaussian model.\n \"\"\"\n if isinstance(self.family, Gaussian):\n return self.adj_D2\n else:\n raise NotImplementedError('adjusted R2 only for Gaussian')\n\n @cache_readonly\n def aic(self):\n return get_AIC(self)\n\n @cache_readonly\n def aicc(self):\n return get_AICc(self)\n\n @cache_readonly\n def bic(self):\n return get_BIC(self)\n\n @cache_readonly\n def pseudoR2(self):\n return None\n\n @cache_readonly\n def adj_pseudoR2(self):\n return None\n\n @cache_readonly\n def pvalues(self):\n return None\n\n @cache_readonly\n def conf_int(self):\n return None\n\n @cache_readonly\n def use_t(self):\n return None\n\n def local_collinearity(self):\n \"\"\"\n Computes several indicators of multicollinearity within a geographically\n weighted design matrix, including:\n\n local correlation coefficients (n, ((p**2) + p) / 2)\n local variance inflation factors (VIF) (n, p-1)\n local condition number (n, 1)\n local variance-decomposition proportions (n, p)\n\n Returns four arrays with the order and dimensions listed above where n\n is the number of locations used as calibrations points and p is the\n nubmer of explanatory variables. Local correlation coefficient and local\n VIF are not calculated for constant term.\n\n \"\"\"\n x = self.X\n w = self.W\n nvar = x.shape[1]\n nrow = len(w)\n if self.model.constant:\n ncor = (((nvar - 1)**2 + (nvar - 1)) / 2) - (nvar - 1)\n jk = list(combo(range(1, nvar), 2))\n else:\n ncor = (((nvar)**2 + (nvar)) / 2) - nvar\n jk = list(combo(range(nvar), 2))\n corr_mat = np.ndarray((nrow, int(ncor)))\n if self.model.constant:\n vifs_mat = np.ndarray((nrow, nvar - 1))\n else:\n vifs_mat = np.ndarray((nrow, nvar))\n vdp_idx = np.ndarray((nrow, nvar))\n vdp_pi = np.ndarray((nrow, nvar, nvar))\n\n for i in range(nrow):\n wi = self.model._build_wi(i, self.model.bw)\n sw = np.sum(wi)\n wi = wi / sw\n tag = 0\n\n for j, k in jk:\n corr_mat[i, tag] = corr(np.cov(x[:, j], x[:, k],\n aweights=wi))[0][1]\n tag = tag + 1\n\n if self.model.constant:\n corr_mati = corr(np.cov(x[:, 1:].T, aweights=wi))\n vifs_mat[i, ] = np.diag(\n np.linalg.solve(corr_mati, np.identity((nvar - 1))))\n\n else:\n corr_mati = corr(np.cov(x.T, aweights=wi))\n vifs_mat[i, ] = np.diag(\n np.linalg.solve(corr_mati, np.identity((nvar))))\n\n xw = x * wi.reshape((nrow, 1))\n sxw = np.sqrt(np.sum(xw**2, axis=0))\n sxw = np.transpose(xw.T / sxw.reshape((nvar, 1)))\n svdx = np.linalg.svd(sxw)\n vdp_idx[i, ] = svdx[1][0] / svdx[1]\n phi = np.dot(svdx[2].T, np.diag(1 / svdx[1]))\n phi = np.transpose(phi**2)\n pi_ij = phi / np.sum(phi, axis=0)\n vdp_pi[i, :, :] = pi_ij\n\n local_CN = vdp_idx[:, nvar - 1].reshape((-1, 1))\n VDP = vdp_pi[:, nvar - 1, :]\n\n return corr_mat, vifs_mat, local_CN, VDP\n\n def spatial_variability(self, selector, n_iters=1000, seed=None):\n \"\"\"\n Method to compute a Monte Carlo test of spatial variability for each\n estimated coefficient surface.\n\n WARNING: This test is very computationally demanding!\n\n Parameters\n ----------\n selector : sel_bw object\n should be the sel_bw object used to select a bandwidth\n for the gwr model that produced the surfaces that are\n being tested for spatial variation\n\n n_iters : int\n the number of Monte Carlo iterations to include for\n the tests of spatial variability.\n\n seed : int\n optional parameter to select a custom seed to ensure\n stochastic results are replicable. Default is none\n which automatically sets the seed to 5536\n\n Returns\n -------\n\n p values : list\n a list of psuedo p-values that correspond to the model\n parameter surfaces. Allows us to assess the\n probability of obtaining the observed spatial\n variation of a given surface by random chance.\n\n\n \"\"\"\n temp_sel = copy.deepcopy(selector)\n temp_gwr = copy.deepcopy(self.model)\n\n if seed is None:\n np.random.seed(5536)\n else:\n np.random.seed(seed)\n\n fit_params = temp_gwr.fit_params\n search_params = temp_sel.search_params\n kernel = temp_gwr.kernel\n fixed = temp_gwr.fixed\n\n if self.model.constant:\n X = self.X[:, 1:]\n else:\n X = self.X\n\n init_sd = np.std(self.params, axis=0)\n SDs = []\n\n for x in range(n_iters):\n temp_coords = np.random.permutation(self.model.coords)\n temp_sel.coords = temp_coords\n temp_bw = temp_sel.search(**search_params)\n temp_gwr.bw = temp_bw\n temp_gwr.coords = temp_coords\n temp_params = temp_gwr.fit(**fit_params).params\n temp_sd = np.std(temp_params, axis=0)\n SDs.append(temp_sd)\n\n p_vals = (np.sum(np.array(SDs) > init_sd, axis=0) / float(n_iters))\n return p_vals\n\n @cache_readonly\n def predictions(self):\n P = self.model.P\n if P is None:\n raise TypeError('predictions only avaialble if predict'\n 'method is previously called on GWR model')\n else:\n predictions = np.sum(P * self.params, axis=1).reshape((-1, 1))\n return predictions\n\n def summary(self):\n \"\"\"\n Print out GWR summary\n \"\"\"\n summary = summaryModel(self) + summaryGLM(self) + summaryGWR(self)\n print(summary)\n return\n\n\nclass GWRResultsLite(object):\n \"\"\"\n Lightweight GWR that computes the minimum diagnostics needed for bandwidth\n selection\n\n Parameters\n ----------\n model : GWR object\n pointer to GWR object with estimation parameters\n\n resid : array\n n*1, residuals of the repsonse\n\n influ : array\n n*1, leading diagonal of S matrix\n\n Attributes\n ----------\n tr_S : float\n trace of S (hat) matrix\n\n llf : scalar\n log-likelihood of the full model; see\n pysal.contrib.glm.family for damily-sepcific\n log-likelihoods\n\n mu : array\n n*, flat one dimensional array of predicted mean\n response value from estimator\n\n resid_ss : scalar\n residual sum of sqaures\n\n \"\"\"\n\n def __init__(self, model, resid, influ, params):\n self.y = model.y\n self.family = model.family\n self.n = model.n\n self.influ = influ\n self.resid_response = resid\n self.model = model\n self.params = params\n\n @cache_readonly\n def tr_S(self):\n return np.sum(self.influ)\n\n @cache_readonly\n def llf(self):\n return self.family.loglike(self.y, self.mu)\n\n @cache_readonly\n def mu(self):\n return self.y - self.resid_response\n\n @cache_readonly\n def predy(self):\n return self.y - self.resid_response\n\n @cache_readonly\n def resid_ss(self):\n u = self.resid_response.flatten()\n return np.dot(u, u.T)\n\n\nclass MGWR(GWR):\n \"\"\"\n Multiscale GWR estimation and inference.\n\n Parameters\n ----------\n coords : array-like\n n*2, collection of n sets of (x,y) coordinates of\n observatons; also used as calibration locations is\n 'points' is set to None\n\n y : array\n n*1, dependent variable\n\n X : array\n n*k, independent variable, exlcuding the constant\n\n selector : sel_bw object\n valid sel_bw object that has successfully called\n the \"search\" method. This parameter passes on\n information from GAM model estimation including optimal\n bandwidths.\n\n family : family object\n underlying probability model; provides\n distribution-specific calculations\n\n sigma2_v1 : boolean\n specify form of corrected denominator of sigma squared to use for\n model diagnostics; Acceptable options are:\n\n 'True': n-tr(S) (defualt)\n 'False': n-2(tr(S)+tr(S'S))\n\n kernel : string\n type of kernel function used to weight observations;\n available options:\n 'gaussian'\n 'bisquare'\n 'exponential'\n\n fixed : boolean\n True for distance based kernel function and False for\n adaptive (nearest neighbor) kernel function (default)\n\n constant : boolean\n True to include intercept (default) in model and False to exclude\n intercept.\n\n spherical : boolean\n True for shperical coordinates (long-lat),\n False for projected coordinates (defalut).\n hat_matrix : boolean\n True for computing and storing covariate-specific\n hat matrices R (n,n,k) and model hat matrix S (n,n).\n False (default) for computing MGWR inference on the fly.\n\n Attributes\n ----------\n coords : array-like\n n*2, collection of n sets of (x,y) coordinates of\n observatons; also used as calibration locations is\n 'points' is set to None\n\n y : array\n n*1, dependent variable\n\n X : array\n n*k, independent variable, exlcuding the constant\n\n selector : sel_bw object\n valid sel_bw object that has successfully called\n the \"search\" method. This parameter passes on\n information from GAM model estimation including optimal\n bandwidths.\n\n bw : array-like\n collection of bandwidth values consisting of either a distance or N\n nearest neighbors; user specified or obtained using\n Sel_BW with fb=True. Order of values should the same as\n the order of columns associated with X\n\n family : family object\n underlying probability model; provides\n distribution-specific calculations\n\n sigma2_v1 : boolean\n specify form of corrected denominator of sigma squared to use for\n model diagnostics; Acceptable options are:\n\n 'True': n-tr(S) (defualt)\n 'False': n-2(tr(S)+tr(S'S))\n\n kernel : string\n type of kernel function used to weight observations;\n available options:\n 'gaussian'\n 'bisquare'\n 'exponential'\n\n fixed : boolean\n True for distance based kernel function and False for\n adaptive (nearest neighbor) kernel function (default)\n\n constant : boolean\n True to include intercept (default) in model and False to exclude\n intercept.\n\n spherical : boolean\n True for shperical coordinates (long-lat),\n False for projected coordinates (defalut).\n\n n : integer\n number of observations\n\n k : integer\n number of independent variables\n\n mean_y : float\n mean of y\n\n std_y : float\n standard deviation of y\n\n fit_params : dict\n parameters passed into fit method to define estimation\n routine\n\n W : array-like\n list of n*n arrays, spatial weights matrices for weighting all\n observations from each calibration point: one for each\n covariate (k)\n\n Examples\n --------\n\n #basic model calibration\n\n >>> import libpysal as ps\n >>> from mgwr.gwr import MGWR\n >>> from mgwr.sel_bw import Sel_BW\n >>> data = ps.io.open(ps.examples.get_path('GData_utm.csv'))\n >>> coords = list(zip(data.by_col('X'), data.by_col('Y')))\n >>> y = np.array(data.by_col('PctBach')).reshape((-1,1))\n >>> rural = np.array(data.by_col('PctRural')).reshape((-1,1))\n >>> fb = np.array(data.by_col('PctFB')).reshape((-1,1))\n >>> african_amer = np.array(data.by_col('PctBlack')).reshape((-1,1))\n >>> X = np.hstack([fb, african_amer, rural])\n >>> X = (X - X.mean(axis=0)) / X.std(axis=0)\n >>> y = (y - y.mean(axis=0)) / y.std(axis=0)\n >>> selector = Sel_BW(coords, y, X, multi=True)\n >>> selector.search(multi_bw_min=[2])\n [92.0, 101.0, 136.0, 158.0]\n >>> model = MGWR(coords, y, X, selector, fixed=False, kernel='bisquare', sigma2_v1=True)\n >>> results = model.fit()\n >>> print(results.params.shape)\n (159, 4)\n\n \"\"\"\n\n def __init__(self, coords, y, X, selector, sigma2_v1=True,\n kernel='bisquare', fixed=False, constant=True,\n spherical=False, hat_matrix=False):\n \"\"\"\n Initialize class\n \"\"\"\n self.selector = selector\n self.bws = self.selector.bw[0] #final set of bandwidth\n self.bws_history = selector.bw[1] #bws history in backfitting\n self.bw_init = self.selector.bw_init #initialization bandiwdth\n self.family = Gaussian(\n ) # manually set since we only support Gassian MGWR for now\n GWR.__init__(self, coords, y, X, self.bw_init, family=self.family,\n sigma2_v1=sigma2_v1, kernel=kernel, fixed=fixed,\n constant=constant, spherical=spherical,\n hat_matrix=hat_matrix)\n self.selector = selector\n self.sigma2_v1 = sigma2_v1\n self.points = None\n self.P = None\n self.offset = None\n self.exog_resid = None\n self.exog_scale = None\n self_fit_params = None\n\n def _chunk_compute_R(self, chunk_id=0):\n \"\"\"\n Compute MGWR inference by chunks to reduce memory footprint.\n \"\"\"\n n = self.n\n k = self.k\n n_chunks = self.n_chunks\n chunk_size = int(np.ceil(float(n / n_chunks)))\n ENP_j = np.zeros(self.k)\n CCT = np.zeros((self.n, self.k))\n\n chunk_index = np.arange(n)[chunk_id * chunk_size:(chunk_id + 1) *\n chunk_size]\n init_pR = np.zeros((n, len(chunk_index)))\n init_pR[chunk_index, :] = np.eye(len(chunk_index))\n pR = np.zeros((n, len(chunk_index),\n k)) #partial R: n by chunk_size by k\n\n for i in range(n):\n wi = self._build_wi(i, self.bw_init).reshape(-1, 1)\n xT = (self.X * wi).T\n P = np.linalg.solve(xT.dot(self.X), xT).dot(init_pR).T\n pR[i, :, :] = P * self.X[i]\n\n err = init_pR - np.sum(pR, axis=2) #n by chunk_size\n\n for iter_i in range(self.bws_history.shape[0]):\n for j in range(k):\n pRj_old = pR[:, :, j] + err\n Xj = self.X[:, j]\n n_chunks_Aj = n_chunks\n chunk_size_Aj = int(np.ceil(float(n / n_chunks_Aj)))\n for chunk_Aj in range(n_chunks_Aj):\n chunk_index_Aj = np.arange(n)[chunk_Aj * chunk_size_Aj:(\n chunk_Aj + 1) * chunk_size_Aj]\n pAj = np.empty((len(chunk_index_Aj), n))\n for i in range(len(chunk_index_Aj)):\n index = chunk_index_Aj[i]\n wi = self._build_wi(index, self.bws_history[iter_i, j])\n xw = Xj * wi\n pAj[i, :] = Xj[index] / np.sum(xw * Xj) * xw\n pR[chunk_index_Aj, :, j] = pAj.dot(pRj_old)\n err = pRj_old - pR[:, :, j]\n\n for j in range(k):\n CCT[:, j] += ((pR[:, :, j] / self.X[:, j].reshape(-1, 1))**2).sum(\n axis=1)\n for i in range(len(chunk_index)):\n ENP_j += pR[chunk_index[i], i, :]\n\n if self.hat_matrix:\n return ENP_j, CCT, pR\n return ENP_j, CCT\n\n def fit(self, n_chunks=1, pool=None):\n \"\"\"\n Compute MGWR inference by chunk to reduce memory footprint.\n \n Parameters\n ----------\n\n n_chunks : integer, optional\n A number of chunks parameter to reduce memory usage. \n e.g. n_chunks=2 should reduce overall memory usage by 2.\n pool : A multiprocessing Pool object to enable parallel fitting; default is None.\n \n Returns\n -------\n : MGWRResults\n \"\"\"\n params = self.selector.params\n predy = np.sum(self.X * params, axis=1).reshape(-1, 1)\n\n try:\n from tqdm.autonotebook import tqdm #progress bar\n except ImportError:\n\n def tqdm(x, total=0,\n desc=''): #otherwise, just passthrough the range\n return x\n\n if pool:\n self.n_chunks = pool._processes * n_chunks\n rslt = tqdm(\n pool.imap(self._chunk_compute_R, range(self.n_chunks)),\n total=self.n_chunks, desc='Inference')\n else:\n self.n_chunks = n_chunks\n rslt = map(self._chunk_compute_R,\n tqdm(range(self.n_chunks), desc='Inference'))\n\n rslt_list = list(zip(*rslt))\n ENP_j = np.sum(np.array(rslt_list[0]), axis=0)\n CCT = np.sum(np.array(rslt_list[1]), axis=0)\n\n w = np.ones(self.n)\n if self.hat_matrix:\n R = np.hstack(rslt_list[2])\n else:\n R = None\n return MGWRResults(self, params, predy, CCT, ENP_j, w, R)\n\n def predict(self):\n '''\n Not implemented.\n '''\n raise NotImplementedError('N/A')\n\n\nclass MGWRResults(GWRResults):\n \"\"\"\n Class including common properties for a MGWR model.\n\n Parameters\n ----------\n model : MGWR object\n pointer to MGWR object with estimation parameters\n\n params : array\n n*k, estimated coefficients\n\n predy : array\n n*1, predicted y values\n\n S : array\n n*n, model hat matrix (if MGWR(hat_matrix=True))\n\n R : array\n n*n*k, covariate-specific hat matrices (if MGWR(hat_matrix=True))\n\n CCT : array\n n*k, scaled variance-covariance matrix\n\n w : array\n n*1, final weight used for iteratively re-weighted least\n sqaures; default is None\n\n Attributes\n ----------\n model : GWR Object\n points to GWR object for which parameters have been\n estimated\n\n params : array\n n*k, parameter estimates\n\n predy : array\n n*1, predicted value of y\n\n y : array\n n*1, dependent variable\n\n X : array\n n*k, independent variable, including constant\n\n family : family object\n underlying probability model; provides\n distribution-specific calculations\n\n n : integer\n number of observations\n\n k : integer\n number of independent variables\n\n df_model : integer\n model degrees of freedom\n\n df_resid : integer\n residual degrees of freedom\n\n scale : float\n sigma squared used for subsequent computations\n\n w : array\n n*1, final weights from iteratively re-weighted least\n sqaures routine\n\n resid_response : array\n n*1, residuals of the repsonse\n\n resid_ss : scalar\n residual sum of sqaures\n\n W : array-like\n list of n*n arrays, spatial weights matrices for weighting all\n observations from each calibration point: one for each\n covariate (k)\n\n S : array\n n*n, model hat matrix (if MGWR(hat_matrix=True))\n\n R : array\n n*n*k, covariate-specific hat matrices (if MGWR(hat_matrix=True))\n\n CCT : array\n n*k, scaled variance-covariance matrix\n\n ENP : scalar\n effective number of paramters, which depends on\n sigma2, for the entire model\n\n ENP_j : array-like\n effective number of paramters, which depends on\n sigma2, for each covariate in the model\n\n adj_alpha : array\n 3*1, corrected alpha values to account for multiple\n hypothesis testing for the 90%, 95%, and 99% confidence\n levels; tvalues with an absolute value larger than the\n corrected alpha are considered statistically\n significant.\n\n adj_alpha_j : array\n k*3, corrected alpha values to account for multiple\n hypothesis testing for the 90%, 95%, and 99% confidence\n levels; tvalues with an absolute value larger than the\n corrected alpha are considered statistically\n significant. A set of alpha calues is computed for\n each covariate in the model.\n\n tr_S : float\n trace of S (hat) matrix\n\n tr_STS : float\n trace of STS matrix\n\n R2 : float\n R-squared for the entire model (1- RSS/TSS)\n \n adj_R2 : float\n adjusted R-squared for the entire model\n\n aic : float\n Akaike information criterion\n\n aicc : float\n corrected Akaike information criterion to account\n to account for model complexity (smaller\n bandwidths)\n\n bic : float\n Bayesian information criterio\n\n sigma2 : float\n sigma squared (residual variance) that has been\n corrected to account for the ENP\n\n std_res : array\n n*1, standardised residuals\n\n bse : array\n n*k, standard errors of parameters (betas)\n\n influ : array\n n*1, leading diagonal of S matrix\n\n CooksD : array\n n*1, Cook's D\n\n tvalues : array\n n*k, local t-statistics\n\n llf : scalar\n log-likelihood of the full model; see\n pysal.contrib.glm.family for damily-sepcific\n log-likelihoods\n\n mu : array\n n*, flat one dimensional array of predicted mean\n response value from estimator\n\n \"\"\"\n\n def __init__(self, model, params, predy, CCT, ENP_j, w, R):\n \"\"\"\n Initialize class\n \"\"\"\n self.ENP_j = ENP_j\n self.R = R\n GWRResults.__init__(self, model, params, predy, None, CCT, None, w)\n if model.hat_matrix:\n self.S = np.sum(self.R, axis=2)\n self.predy = predy\n\n @cache_readonly\n def tr_S(self):\n return np.sum(self.ENP_j)\n\n @cache_readonly\n def W(self):\n Ws = []\n for bw_j in self.model.bws:\n W = np.array(\n [self.model._build_wi(i, bw_j) for i in range(self.n)])\n Ws.append(W)\n return Ws\n\n @cache_readonly\n def adj_alpha_j(self):\n \"\"\"\n Corrected alpha (critical) values to account for multiple testing during hypothesis\n testing. Includes corrected value for 90% (.1), 95% (.05), and 99%\n (.01) confidence levels. Correction comes from:\n\n da Silva, A. R., & Fotheringham, A. S. (2015). The Multiple Testing Issue in\n Geographically Weighted Regression. Geographical Analysis.\n\n \"\"\"\n alpha = np.array([.1, .05, .001])\n pe = np.array(self.ENP_j).reshape((-1, 1))\n p = 1.\n return (alpha * p) / pe\n\n def critical_tval(self, alpha=None):\n \"\"\"\n Utility function to derive the critial t-value based on given alpha\n that are needed for hypothesis testing\n\n Parameters\n ----------\n alpha : scalar\n critical value to determine which tvalues are\n associated with statistically significant parameter\n estimates. Default to None in which case the adjusted\n alpha value at the 95 percent CI is automatically\n used.\n\n Returns\n -------\n critical : scalar\n critical t-val based on alpha\n \"\"\"\n n = self.n\n if alpha is not None:\n alpha = np.abs(alpha) / 2.0\n critical = t.ppf(1 - alpha, n - 1)\n else:\n alpha = np.abs(self.adj_alpha_j[:, 1]) / 2.0\n critical = t.ppf(1 - alpha, n - 1)\n return critical\n\n def filter_tvals(self, critical_t=None, alpha=None):\n \"\"\"\n Utility function to set tvalues with an absolute value smaller than the\n absolute value of the alpha (critical) value to 0. If critical_t\n is supplied than it is used directly to filter. If alpha is provided\n than the critical t value will be derived and used to filter. If neither\n are critical_t nor alpha are provided, an adjusted alpha at the 95\n percent CI will automatically be used to define the critical t-value and\n used to filter. If both critical_t and alpha are supplied then the alpha\n value will be ignored.\n\n Parameters\n ----------\n critical : scalar\n critical t-value to determine whether parameters are\n statistically significant\n\n alpha : scalar\n alpha value to determine which tvalues are\n associated with statistically significant parameter\n estimates\n\n Returns\n -------\n filtered : array\n n*k; new set of n tvalues for each of k variables\n where absolute tvalues less than the absolute value of\n alpha have been set to 0.\n \"\"\"\n n = self.n\n if critical_t is not None:\n critical = np.array(critical_t)\n elif alpha is not None and critical_t is None:\n critical = self.critical_tval(alpha=alpha)\n elif alpha is None and critical_t is None:\n critical = self.critical_tval()\n\n subset = (self.tvalues < critical) & (self.tvalues > -1.0 * critical)\n tvalues = self.tvalues.copy()\n tvalues[subset] = 0\n return tvalues\n\n @cache_readonly\n def RSS(self):\n raise NotImplementedError(\n 'Not yet implemented for multiple bandwidths')\n\n @cache_readonly\n def TSS(self):\n raise NotImplementedError(\n 'Not yet implemented for multiple bandwidths')\n\n @cache_readonly\n def localR2(self):\n raise NotImplementedError(\n 'Not yet implemented for multiple bandwidths')\n\n @cache_readonly\n def y_bar(self):\n raise NotImplementedError(\n 'Not yet implemented for multiple bandwidths')\n\n @cache_readonly\n def predictions(self):\n raise NotImplementedError('Not yet implemented for MGWR')\n\n def local_collinearity(self):\n \"\"\"\n Computes several indicators of multicollinearity within a geographically\n weighted design matrix, including:\n\n local condition number (n, 1)\n local variance-decomposition proportions (n, p)\n\n Returns four arrays with the order and dimensions listed above where n\n is the number of locations used as calibrations points and p is the\n nubmer of explanatory variables\n\n \"\"\"\n x = self.X\n w = self.W\n nvar = x.shape[1]\n nrow = self.n\n vdp_idx = np.ndarray((nrow, nvar))\n vdp_pi = np.ndarray((nrow, nvar, nvar))\n\n for i in range(nrow):\n xw = np.zeros((x.shape))\n for j in range(nvar):\n wi = w[j][i]\n sw = np.sum(wi)\n wi = wi / sw\n xw[:, j] = x[:, j] * wi\n\n sxw = np.sqrt(np.sum(xw**2, axis=0))\n sxw = np.transpose(xw.T / sxw.reshape((nvar, 1)))\n svdx = np.linalg.svd(sxw)\n vdp_idx[i, ] = svdx[1][0] / svdx[1]\n\n phi = np.dot(svdx[2].T, np.diag(1 / svdx[1]))\n phi = np.transpose(phi**2)\n pi_ij = phi / np.sum(phi, axis=0)\n vdp_pi[i, :, :] = pi_ij\n\n local_CN = vdp_idx[:, nvar - 1].reshape((-1, 1))\n VDP = vdp_pi[:, nvar - 1, :]\n\n return local_CN, VDP\n\n def spatial_variability(self, selector, n_iters=1000, seed=None):\n \"\"\"\n Method to compute a Monte Carlo test of spatial variability for each\n estimated coefficient surface.\n\n WARNING: This test is very computationally demanding!\n\n Parameters\n ----------\n selector : sel_bw object\n should be the sel_bw object used to select a bandwidth\n for the gwr model that produced the surfaces that are\n being tested for spatial variation\n\n n_iters : int\n the number of Monte Carlo iterations to include for\n the tests of spatial variability.\n\n seed : int\n optional parameter to select a custom seed to ensure\n stochastic results are replicable. Default is none\n which automatically sets the seed to 5536\n\n Returns\n -------\n\n p values : list\n a list of psuedo p-values that correspond to the model\n parameter surfaces. Allows us to assess the\n probability of obtaining the observed spatial\n variation of a given surface by random chance.\n\n\n \"\"\"\n temp_sel = copy.deepcopy(selector)\n\n if seed is None:\n np.random.seed(5536)\n else:\n np.random.seed(seed)\n\n search_params = temp_sel.search_params\n\n if self.model.constant:\n X = self.X[:, 1:]\n else:\n X = self.X\n\n init_sd = np.std(self.params, axis=0)\n SDs = []\n\n for x in range(n_iters):\n temp_coords = np.random.permutation(self.model.coords)\n temp_sel.coords = temp_coords\n temp_sel.search(**search_params)\n temp_params = temp_sel.params\n temp_sd = np.std(temp_params, axis=0)\n SDs.append(temp_sd)\n\n p_vals = (np.sum(np.array(SDs) > init_sd, axis=0) / float(n_iters))\n return p_vals\n\n def summary(self):\n \"\"\"\n Print out MGWR summary\n \"\"\"\n summary = summaryModel(self) + summaryGLM(self) + summaryMGWR(self)\n print(summary)\n return\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"numpy.ndarray",
"numpy.hstack",
"numpy.linalg.svd",
"numpy.arange",
"numpy.std",
"numpy.zeros",
"numpy.log",
"numpy.cov",
"numpy.identity",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.random.seed",
"numpy.ones",
"scipy.stats.t.ppf",
"numpy.random.permutation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XiaoSanchez/autophase | [
"3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb"
] | [
"algos/rl/policies.py"
] | [
"# Code in this file is copied and adapted from\n# https://github.com/openai/evolution-strategies-starter.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nimport ray\nfrom ray.rllib.evaluation.sampler import _unbatch_tuple_actions\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.filter import get_filter\n\n\ndef rollout(policy, env, timestep_limit=None, add_noise=False):\n \"\"\"Do a rollout.\n\n If add_noise is True, the rollout will take noisy actions with\n noise drawn from that stream. Otherwise, no action noise will be added.\n \"\"\"\n env_timestep_limit = env.max_episode_steps\n timestep_limit = (env_timestep_limit if timestep_limit is None else min(\n timestep_limit, env_timestep_limit))\n rews = []\n t = 0\n observation = env.reset()\n for _ in range(timestep_limit or 999999):\n ac = policy.compute(observation, add_noise=add_noise)[0]\n observation, rew, done, _ = env.step(ac)\n rews.append(rew)\n t += 1\n if done:\n break\n rews = np.array(rews, dtype=np.float32)\n return rews, t\n\n\nclass GenericPolicy(object):\n def __init__(self, sess, action_space, obs_space, preprocessor,\n observation_filter, model_options, action_noise_std):\n self.sess = sess\n self.action_space = action_space\n self.action_noise_std = action_noise_std\n self.preprocessor = preprocessor\n self.observation_filter = get_filter(observation_filter,\n self.preprocessor.shape)\n self.inputs = tf.placeholder(tf.float32,\n [None] + list(self.preprocessor.shape))\n\n # Policy network.\n dist_class, dist_dim = ModelCatalog.get_action_dist(\n self.action_space, model_options, dist_type=\"deterministic\")\n model = ModelCatalog.get_model({\n \"obs\": self.inputs\n }, obs_space, dist_dim, model_options)\n dist = dist_class(model.outputs)\n self.sampler = dist.sample()\n\n self.variables = ray.experimental.TensorFlowVariables(\n model.outputs, self.sess)\n\n self.num_params = sum(\n np.prod(variable.shape.as_list())\n for _, variable in self.variables.variables.items())\n self.sess.run(tf.global_variables_initializer())\n\n def compute(self, observation, add_noise=False, update=True):\n observation = self.preprocessor.transform(observation)\n observation = self.observation_filter(observation[None], update=update)\n #observation = self.observation_filter(observation, update=update)\n action = self.sess.run(\n self.sampler, feed_dict={self.inputs: observation})\n action = _unbatch_tuple_actions(action)\n if add_noise and isinstance(self.action_space, gym.spaces.Box):\n action += np.random.randn(*action.shape) * self.action_noise_std\n return action\n\n def set_weights(self, x):\n self.variables.set_flat(x)\n\n def get_weights(self):\n return self.variables.get_flat()\n\n def get_filter(self):\n return self.observation_filter\n\n def set_filter(self, observation_filter):\n self.observation_filter = observation_filter\n"
] | [
[
"tensorflow.global_variables_initializer",
"numpy.array",
"numpy.random.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
koetjen/steinbock | [
"b0abcc16120f7a028167cc0a6f9b4f78d010844b"
] | [
"steinbock/preprocessing/imc.py"
] | [
"import logging\nimport numpy as np\nimport pandas as pd\nimport re\n\nfrom os import PathLike\nfrom pathlib import Path\nfrom scipy.ndimage import maximum_filter\nfrom typing import (\n Generator,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nfrom steinbock import io\n\ntry:\n from readimc import MCDFile, TXTFile\n from readimc.data import Acquisition, AcquisitionBase\n\n imc_available = True\nexcept:\n imc_available = False\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef list_mcd_files(mcd_dir: Union[str, PathLike]) -> List[Path]:\n return sorted(Path(mcd_dir).rglob(\"*.mcd\"))\n\n\ndef list_txt_files(txt_dir: Union[str, PathLike]) -> List[Path]:\n return sorted(Path(txt_dir).rglob(\"*.txt\"))\n\n\ndef create_panel_from_imc_panel(\n imc_panel_file: Union[str, PathLike],\n imc_panel_channel_col: str = \"Metal Tag\",\n imc_panel_name_col: str = \"Target\",\n imc_panel_keep_col: str = \"full\",\n imc_panel_ilastik_col: str = \"ilastik\",\n) -> pd.DataFrame:\n imc_panel = pd.read_csv(\n imc_panel_file,\n sep=\",|;\",\n dtype={\n imc_panel_channel_col: pd.StringDtype(),\n imc_panel_name_col: pd.StringDtype(),\n imc_panel_keep_col: pd.BooleanDtype(),\n imc_panel_ilastik_col: pd.BooleanDtype(),\n },\n engine=\"python\",\n true_values=[\"1\"],\n false_values=[\"0\"],\n )\n for required_col in (imc_panel_channel_col, imc_panel_name_col):\n if required_col not in imc_panel:\n raise ValueError(f\"Missing '{required_col}' column in IMC panel\")\n for notnan_col in (\n imc_panel_channel_col,\n imc_panel_keep_col,\n imc_panel_ilastik_col,\n ):\n if notnan_col in imc_panel and imc_panel[notnan_col].isna().any():\n raise ValueError(f\"Missing values for '{notnan_col}' in IMC panel\")\n rename_columns = {\n imc_panel_channel_col: \"channel\",\n imc_panel_name_col: \"name\",\n imc_panel_keep_col: \"keep\",\n imc_panel_ilastik_col: \"ilastik\",\n }\n drop_columns = [\n panel_col\n for imc_panel_col, panel_col in rename_columns.items()\n if panel_col in imc_panel.columns and panel_col != imc_panel_col\n ]\n panel = imc_panel.drop(columns=drop_columns).rename(columns=rename_columns)\n for _, g in panel.groupby(\"channel\"):\n panel.loc[g.index, \"name\"] = \" / \".join(g[\"name\"].dropna().unique())\n if \"keep\" in panel:\n panel.loc[g.index, \"keep\"] = g[\"keep\"].any()\n if \"ilastik\" in panel:\n panel.loc[g.index, \"ilastik\"] = g[\"ilastik\"].any()\n panel = panel.groupby(panel[\"channel\"].values).aggregate(\"first\")\n panel = _clean_panel(panel) # ilastik column may be nullable uint8 now\n ilastik_mask = panel[\"ilastik\"].fillna(False).astype(bool)\n panel[\"ilastik\"] = pd.Series(dtype=pd.UInt8Dtype())\n panel.loc[ilastik_mask, \"ilastik\"] = range(1, ilastik_mask.sum() + 1)\n return panel\n\n\ndef create_panel_from_mcd_files(\n mcd_files: Sequence[Union[str, PathLike]]\n) -> pd.DataFrame:\n panels = []\n for mcd_file in mcd_files:\n with MCDFile(mcd_file) as f:\n for slide in f.slides:\n for acquisition in slide.acquisitions:\n panel = _create_panel_from_acquisition(acquisition)\n panels.append(panel)\n panel = pd.concat(panels, ignore_index=True, copy=False)\n return _clean_panel(panel)\n\n\ndef create_panel_from_txt_files(\n txt_files: Sequence[Union[str, PathLike]]\n) -> pd.DataFrame:\n panels = []\n for txt_file in txt_files:\n with TXTFile(txt_file) as f:\n panel = _create_panel_from_acquisition(f)\n panels.append(panel)\n panel = pd.concat(panels, ignore_index=True, copy=False)\n return _clean_panel(panel)\n\n\ndef filter_hot_pixels(img: np.ndarray, thres: float) -> np.ndarray:\n kernel = np.ones((1, 3, 3), dtype=bool)\n kernel[0, 1, 1] = False\n max_neighbor_img = maximum_filter(img, footprint=kernel, mode=\"mirror\")\n return np.where(img - max_neighbor_img > thres, max_neighbor_img, img)\n\n\ndef preprocess_image(\n img: np.ndarray, hpf: Optional[float] = None\n) -> np.ndarray:\n img = img.astype(np.float32)\n if hpf is not None:\n img = filter_hot_pixels(img, hpf)\n return io._to_dtype(img, io.img_dtype)\n\n\ndef try_preprocess_images_from_disk(\n mcd_files: Sequence[Union[str, PathLike]],\n txt_files: Sequence[Union[str, PathLike]],\n channel_names: Optional[Sequence[str]] = None,\n hpf: Optional[float] = None,\n) -> Generator[\n Tuple[Path, Optional[\"Acquisition\"], np.ndarray, Optional[Path], bool],\n None,\n None,\n]:\n unmatched_txt_files = list(txt_files)\n for mcd_file in mcd_files:\n try:\n with MCDFile(mcd_file) as f_mcd:\n for slide in f_mcd.slides:\n for acquisition in slide.acquisitions:\n matched_txt_file = _match_txt_file(\n mcd_file, acquisition, unmatched_txt_files\n )\n if matched_txt_file is not None:\n unmatched_txt_files.remove(matched_txt_file)\n channel_ind = None\n if channel_names is not None:\n channel_ind = _get_channel_indices(\n acquisition, channel_names\n )\n if isinstance(channel_ind, str):\n _logger.warning(\n f\"Channel {channel_ind} not found for \"\n f\"acquisition {acquisition.id} in file \"\n \"{mcd_file}; skipping acquisition\"\n )\n continue\n img = None\n recovered = False\n try:\n img = f_mcd.read_acquisition(acquisition)\n except IOError:\n _logger.warning(\n f\"Error reading acquisition {acquisition.id} \"\n f\"from file {mcd_file}\"\n )\n if matched_txt_file is not None:\n _logger.warning(\n f\"Restoring from file {matched_txt_file}\"\n )\n try:\n with TXTFile(matched_txt_file) as f_txt:\n img = f_txt.read_acquisition()\n if channel_names is not None:\n channel_ind = _get_channel_indices(\n f_txt, channel_names\n )\n if isinstance(channel_ind, str):\n _logger.warning(\n f\"Channel {channel_ind} \"\n \"not found in file \"\n f\"{matched_txt_file}; \"\n \"skipping acquisition\"\n )\n continue\n recovered = True\n except IOError:\n _logger.exception(\n \"Error reading file \"\n f\"{matched_txt_file}\"\n )\n if img is not None: # exceptions ...\n if channel_ind is not None:\n img = img[channel_ind, :, :]\n img = preprocess_image(img, hpf=hpf)\n yield (\n Path(mcd_file),\n acquisition,\n img,\n Path(matched_txt_file)\n if matched_txt_file is not None\n else None,\n recovered,\n )\n del img\n except:\n _logger.exception(f\"Error reading file {mcd_file}\")\n while len(unmatched_txt_files) > 0:\n txt_file = unmatched_txt_files.pop(0)\n try:\n channel_ind = None\n with TXTFile(txt_file) as f:\n if channel_names is not None:\n channel_ind = _get_channel_indices(f, channel_names)\n if isinstance(channel_ind, str):\n _logger.warning(\n f\"Channel {channel_ind} not found in file \"\n f\"{txt_file}; skipping acquisition\"\n )\n continue\n img = f.read_acquisition()\n if channel_ind is not None:\n img = img[channel_ind, :, :]\n img = preprocess_image(img, hpf=hpf)\n yield Path(txt_file), None, img, None, False\n del img\n except:\n _logger.exception(f\"Error reading file {txt_file}\")\n\n\ndef _create_panel_from_acquisition(\n acquisition: \"AcquisitionBase\",\n) -> pd.DataFrame:\n panel = pd.DataFrame(\n data={\n \"channel\": acquisition.channel_names,\n \"name\": acquisition.channel_labels,\n \"keep\": True,\n \"ilastik\": range(1, acquisition.num_channels + 1),\n \"deepcell\": np.nan,\n },\n )\n panel[\"channel\"] = panel[\"channel\"].astype(pd.StringDtype())\n panel[\"name\"] = panel[\"name\"].astype(pd.StringDtype())\n panel[\"keep\"] = panel[\"keep\"].astype(pd.BooleanDtype())\n panel[\"ilastik\"] = panel[\"ilastik\"].astype(pd.UInt8Dtype())\n panel[\"deepcell\"] = panel[\"deepcell\"].astype(pd.UInt8Dtype())\n panel.sort_values(\n \"channel\",\n key=lambda s: pd.to_numeric(s.str.replace(\"[^0-9]\", \"\", regex=True)),\n inplace=True,\n )\n return panel\n\n\ndef _clean_panel(panel: pd.DataFrame) -> pd.DataFrame:\n panel.sort_values(\n \"channel\",\n key=lambda s: pd.to_numeric(s.str.replace(\"[^0-9]\", \"\", regex=True)),\n inplace=True,\n )\n name_dupl_mask = panel[\"name\"].duplicated(keep=False)\n name_suffixes = panel.groupby(\"name\").cumcount().map(lambda i: f\" {i + 1}\")\n panel.loc[name_dupl_mask, \"name\"] += name_suffixes[name_dupl_mask]\n if \"keep\" not in panel:\n panel[\"keep\"] = pd.Series(True, dtype=pd.BooleanDtype())\n if \"ilastik\" not in panel:\n panel[\"ilastik\"] = pd.Series(dtype=pd.UInt8Dtype())\n panel.loc[panel[\"keep\"], \"ilastik\"] = range(1, panel[\"keep\"].sum() + 1)\n if \"deepcell\" not in panel:\n panel[\"deepcell\"] = pd.Series(dtype=pd.UInt8Dtype())\n next_column_index = 0\n for column in (\"channel\", \"name\", \"keep\", \"ilastik\", \"deepcell\"):\n if column in panel:\n column_data = panel[column]\n panel.drop(columns=[column], inplace=True)\n panel.insert(next_column_index, column, column_data)\n next_column_index += 1\n return panel\n\n\ndef _match_txt_file(\n mcd_file: Union[str, PathLike],\n acquisition: Acquisition,\n txt_files: Sequence[Union[str, PathLike]],\n) -> Union[str, PathLike, None]:\n txt_file_name_pattern = re.compile(\n rf\"{Path(mcd_file).stem}.*_0*{acquisition.id}.txt\"\n )\n filtered_txt_files = [\n txt_file\n for txt_file in txt_files\n if txt_file_name_pattern.match(Path(txt_file).name)\n ]\n if len(filtered_txt_files) == 1:\n return filtered_txt_files[0]\n return None\n\n\ndef _get_channel_indices(\n acquisition: AcquisitionBase, channel_names: Sequence[str]\n) -> Union[Sequence[int], str]:\n channel_indices = []\n for channel_name in channel_names:\n if channel_name not in acquisition.channel_names:\n return channel_name\n channel_indices.append(acquisition.channel_names.index(channel_name))\n return channel_indices\n"
] | [
[
"pandas.concat",
"pandas.StringDtype",
"numpy.ones",
"scipy.ndimage.maximum_filter",
"pandas.BooleanDtype",
"pandas.UInt8Dtype",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
abarcis/robot-framework | [
"a2cef7850784ae4c12b47fc7fb297f3772c2e2fe"
] | [
"robot_framework/position_feedback/px4.py"
] | [
"#! /usr/bin/env python\n\nfrom pyquaternion import Quaternion\nimport numpy as np\n\nfrom rclpy.node import Node\nfrom px4_msgs.msg import VehicleGlobalPosition\n\n\nclass PX4PositionFeedback:\n def __init__(self, system_state, time_delta):\n self.node = Node('position_feedback')\n self.system_state = system_state\n self.time_delta = time_delta\n self.poses = {ident: {'position': None, 'orientation': None}\n for ident in self.system_state.states.keys()}\n self.subscribers = [\n self.node.create_subscription(\n VehicleGlobalPosition,\n f'/d{ident}/VehicleGlobalPosition_PubSubTopic',\n self.set_new_pose_callback(ident),\n 1,\n )\n for ident in self.system_state.states.keys()\n ]\n\n def set_new_pose_callback(self, ident):\n def _set_new_pose_callback(msg):\n self.poses[ident]['position'] = np.array([\n msg.lat, msg.lon, 0\n ])\n # self.poses[ident]['orientation'] = Quaternion([\n # msg.pose.orientation.w, msg.pose.orientation.x,\n # msg.pose.orientation.y, msg.pose.orientation.z\n # ])\n # vec = self.poses[ident]['orientation'].rotate([1, 0, 0])\n # vec_xy = vec\n # vec_xy[2] = 0\n return _set_new_pose_callback\n\n def get_new_position(self, ident):\n return self.poses[ident]['position']\n\n def get_new_orientation(self, ident):\n return self.poses[ident]['orientation']\n\n def get_node(self):\n return self.node\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bb912/MATS-DRS | [
"6e1ae9ba3b865e321d6a2d100d29693b776e1d36"
] | [
"autograph/play/maze_nn_aut_adv.py"
] | [
"import math\nimport os\nimport signal\nimport sys\nfrom typing import Callable, Any, Tuple, List, Union, Optional\n\nimport ptan\nimport torch\nimport torch.nn.functional as F\nfrom tensorboardX import SummaryWriter\nfrom torch import multiprocessing\nfrom torch.optim import Adam, SGD\n\nimport autograph.lib.envs.mazeenv\nfrom autograph.lib.automata import AutomatonSet\nfrom autograph.lib.envs.mazeenv import FuelMazeEnv, FuelMazeObservation\nfrom autograph.lib.envs.mazeenv import transform_coordinate\nfrom autograph.lib.envs.mineworldenv_adv import MineWorldEnv\nfrom autograph.lib.loss_functions import TakeSimilarActionsLossFunction, PPOLossFunction, \\\n AdvantageActorCriticLossFunction\nfrom autograph.lib.mcts_aut_adv import MCTSAut_adv, AutStats, ExponentialAnnealedAutStats, UCBAnnealedAutStats\nfrom autograph.lib.running import get_parallel_queue, RandomReplayTrainingLoop, run_episode_generic\nfrom autograph.lib.shaping import AutShapingWrapperAdv\nfrom autograph.lib.util import element_add\nfrom autograph.lib.util.checkpoint_manager import CheckpointManager, StateDictLoadHandler, CombinedLoadHandler, \\\n InitZeroLoadHandler, PickleLoadHandler, TransplantCheckpointManager\nfrom autograph.lib.util.trace_return_step import TraceStep, TraceReturnStep\nfrom autograph.net.curiosity.curiosity_optimizer import ModuleCuriosityOptimizer, NoopCuriosityOptimizer\nfrom autograph.net.maze_constructors import mazenet_v1, mazernd_v1, maze_obs_rewrite_creator\nfrom autograph.net.mine_constructors import minenet_v1, mine_obs_rewriter_creator, minernd_v1, mine_mazenet_v1\nfrom autograph.net.misc_constructors import gym_make, no_op_cur_make, basic_net, no_op_make\nimport random\n\nmath.sqrt(1) # So that the import isn't optimized away (very useful when setting conditional debug breakpoints)\n\nsys.modules[\"autograph.lib.mazeenv\"] = autograph.lib.envs.mazeenv # Fix broken pickle loading\n\n\ndef throwKeyInterr():\n raise KeyboardInterrupt()\n\ndef full_fuel(action, obs: FuelMazeObservation, rew, done, info):\n return obs.fuel_level == info[\"max_fuel\"]\n\n\ndef key(action, obs: FuelMazeObservation, rew, done, info):\n return len(obs.keys) == 0\n\n\ndef goal(action, obs: FuelMazeObservation, rew, done, info):\n corner = element_add(info[\"maze_shape\"], (-1, -1))\n trans_corner = transform_coordinate(corner)\n return obs.position == trans_corner\n\n\nclass MineInfoAutAP:\n def __init__(self, apname: str = None, ap_name: str = None):\n if not (apname or ap_name):\n raise ValueError(\"Did not provide ap_name to info aut\")\n self.name = apname or ap_name\n\n def __call__(self, action, obs, rew, done, info):\n return self.name in info[\"atomic_propositions\"]\n\n\nclass MineInventoryAP:\n def __init__(self, inventory_item, quantity):\n self.item = inventory_item\n self.quantity = quantity\n\n def __call__(self, action, obs, rew, done, info):\n return info[\"inventory\"][self.item] == self.quantity\n\n\nclass MineLocationAP:\n def __init__(self, location):\n self.location = tuple(location)\n\n def __call__(self, action, obs, rew, done, info):\n position, *_ = obs\n return position == self.location\n\n\noptimizers = {\n \"Adam\": Adam,\n \"SGD\": SGD\n}\n\naut_funcs = {\n \"full_fuel\": full_fuel,\n \"key\": key,\n \"goal\": goal,\n \"info_aut\": MineInfoAutAP,\n \"mine_inventory\": MineInventoryAP,\n \"mine_location\": MineLocationAP\n}\n\nenv_constructors = {\n \"minecraft\": MineWorldEnv.from_dict,\n \"maze\": FuelMazeEnv.from_dict,\n \"gym\": gym_make\n}\n\n\ndef no_op_rewriter(x):\n return torch.Tensor([0.0])\n\n\ntraining_nets = {\n \"mazenet_v1\": (mazenet_v1, maze_obs_rewrite_creator),\n \"minenet_v1\": (minenet_v1, mine_obs_rewriter_creator),\n \"mine_mazenet_v1\": (mine_mazenet_v1, mine_obs_rewriter_creator),\n \"basicnet\": (basic_net, lambda e: torch.Tensor),\n \"no-op\": (no_op_make, lambda e: no_op_rewriter)\n}\n\ncuriosity_nets = {\n \"mazernd_v1\": (mazernd_v1, maze_obs_rewrite_creator),\n \"minernd_v1\": (minernd_v1, mine_obs_rewriter_creator),\n \"no-op\": (no_op_cur_make, no_op_rewriter)\n}\n\nloss_funcs = {\n \"MCTS\": TakeSimilarActionsLossFunction,\n \"PPO\": PPOLossFunction,\n \"A2C\": AdvantageActorCriticLossFunction\n}\n\naut_transplant_anneals = {\n \"Exponential\": ExponentialAnnealedAutStats,\n \"UCB\": UCBAnnealedAutStats\n}\n\nif __name__ == '__main__':\n import argparse\n import json5 as json\n\n p = argparse.ArgumentParser()\n p.add_argument(\"config\")\n p.add_argument(\"--device\", default=(\"cuda:0\" if torch.cuda.is_available() else \"cpu\"))\n p.add_argument(\"--log\")\n p.add_argument(\"--checkpoint\")\n p.add_argument(\"--run-name\")\n p.add_argument(\"--do-not-load-from-checkpoint\", dest=\"load_checkpoint\", action=\"store_false\")\n p.add_argument(\"--do-not-save-checkpoint\", dest=\"save_checkpoint\", action=\"store_false\")\n p.add_argument(\"--checkpoint-every\", default=1)\n p.add_argument(\"--workers\", default=8)\n p.add_argument(\"--post\", help=\"Add a postfix to the checkpoint and tensorboard names\")\n p.add_argument(\"--stop-after\", dest=\"stop_after\",\n help=\"Stop after roughly a certain number of steps have been reached\")\n\n args = vars(p.parse_args())\n\n run_name = args.get(\"run_name\")\n STOP_AFTER = args.get(\"stop_after\")\n if STOP_AFTER:\n STOP_AFTER = int(STOP_AFTER)\n\n\n def interpolate(text):\n if not text:\n return text\n\n if run_name and \"%s\" in text:\n return text % (run_name,)\n else:\n return text\n\n\n config_file = interpolate(args[\"config\"])\n\n postfix = \"\"\n\n if args.get(\"post\"):\n postfix = \"_\" + args[\"post\"]\n\n with open(config_file) as f:\n config = json.load(f)\n\n aut: dict = config[\"automaton\"]\n\n LTLF_SPEC = aut[\"spec\"]\n AUT_PARAM_NAMES = [param[\"name\"] for param in aut[\"params\"]]\n\n\n def get_func(param: dict):\n func_or_generator = aut_funcs[param[\"func\"]]\n func_params = param.get(\"params\")\n if func_params is None:\n return func_or_generator\n else:\n return func_or_generator(**func_params)\n\n\n AUT_PARAM_FUNCS = [get_func(p) for p in aut[\"params\"]]\n\n AUT_OTHER_PARAMS = {\n \"terminate_on_fail\": aut.get(\"terminate_on_fail\", True),\n \"termination_fail_reward\": aut.get(\"termination_fail_reward\", 0),\n \"terminate_on_accept\": aut.get(\"terminate_on_accept\", False),\n \"termination_accept_reward\": aut.get(\"termination_accept_reward\", 1)\n }\n\n AUT_STATS_PARAMS = aut.get(\"aut_stats_params\", dict())\n\n DISCOUNT = config[\"discount\"]\n\n if \"maze\" in config:\n maze = config[\"maze\"]\n\n config[\"env\"] = dict()\n config[\"env\"][\"type\"] = \"maze\"\n config[\"env\"][\"max_episode_len\"] = maze[\"max_episode_len\"]\n del maze[\"max_episode_len\"]\n config[\"env\"][\"params\"] = maze\n del config[\"maze\"]\n\n env = config[\"env\"]\n MAX_EPISODE_LEN = env[\"max_episode_len\"]\n MAX_LEN_REWARD = env.get(\"max_len_reward\")\n ENV_CONFIG = env[\"params\"]\n ENV_TYPE = env[\"type\"]\n\n # Policy training hyperparameters\n training: dict = config[\"training\"]\n\n LEARNING_RATE = training[\"learning_rate\"]\n REPLAY_BUFFER = training[\"replay_buffer\"]\n MIN_TRACE_TO_TRAIN = training[\"min_trace_to_train\"]\n PPO_TRAIN_ROUNDS = training[\"train_rounds\"]\n NETWORK = training.get(\"network\", \"mazenet_v1\")\n NETWORK_PARAMS = training.get(\"params\", dict())\n\n OPTIMIZER = optimizers[training.get(\"optimizer\")]\n OPTIMIZER_PARAMS = training.get(\"opt_params\", {})\n\n # Loss function\n loss: dict = config.get(\"loss\")\n if loss:\n LOSS_FUNC = loss[\"type\"]\n LOSS_PARAMS = loss.get(\"params\", dict())\n else:\n LOSS_FUNC = \"MCTS\"\n LOSS_PARAMS = dict()\n\n if config.get(\"mcts\"):\n config[\"episode_runner\"] = {\n \"type\": \"mcts_aut_episode\",\n \"params\": config.pop(\"mcts\")\n }\n\n # Policy runner parameters\n episode_runner = config[\"episode_runner\"]\n EPISODE_RUNNER_TYPE = episode_runner[\"type\"]\n EPISODE_RUNNER_PARAMS = episode_runner.get(\"params\", dict())\n\n # Curiosity Parameters\n curiosity: dict = config.get(\"curiosity\")\n\n if curiosity:\n if \"feature_space\" in curiosity:\n curiosity[\"type\"] = \"mazernd_v1\"\n curiosity[\"params\"] = {\"feature_space\": curiosity[\"feature_space\"]}\n del curiosity[\"feature_space\"]\n\n CURIOSITY_LEARNING_RATE = curiosity[\"learning_rate\"]\n CURIOSITY_NET = curiosity[\"type\"]\n CURIOSITY_PARAMS = curiosity.get(\"params\", dict())\n else:\n CURIOSITY_NET = None\n\n # Logging and checkpointing\n\n LOG_FOLDER = interpolate(args.get(\"log\")) + postfix\n CHECKPOINT_EVERY = int(args[\"checkpoint_every\"])\n CHECKPOINT_PATH = interpolate(args.get(\"checkpoint\")) + postfix\n\n \"\"\"\n There are two types of \"transplants\":\n 1. \"Old\" transplant, this just literally loads the state from the \"from\" checkpoint instead of creating the state\n from scratch\n 2. \"Regular\" transplant, this is only for the automaton statistics, and it anneals between the imported values and\n the values created during this run.\"\"\"\n transplant_config = config.get(\"transplant\")\n TRANSPLANT = False\n OLD_TRANSPLANT: Union[bool, List[str]] = False\n\n if transplant_config:\n TRANSPLANT_FROM = transplant_config[\"from\"]\n if transplant_config.get(\"fields\"):\n OLD_TRANSPLANT = transplant_config[\"fields\"]\n else:\n TRANSPLANT = True\n aut_transplant = transplant_config[\"automaton\"]\n ANNEAL_AUT_TRANSPLANT = aut_transplant[\"type\"]\n ANNEAL_AUT_TRANSPLANT_PARAMS = aut_transplant.get(\"params\", {})\n\n if CHECKPOINT_PATH:\n LOAD_FROM_CHECKPOINT = args[\"load_checkpoint\"]\n if not os.path.isfile(CHECKPOINT_PATH):\n LOAD_FROM_CHECKPOINT = False\n print(\"NOTE: no existing checkpoint found, will create new one if checkpoint saving is enabled.\")\n else:\n if OLD_TRANSPLANT:\n OLD_TRANSPLANT = False\n print(\"NOTE: Loading from checkpoint, so transplant disabled\")\n\n SAVE_CHECKPOINTS = args[\"save_checkpoint\"]\n else:\n CHECKPOINT_PATH = None\n LOAD_FROM_CHECKPOINT = False\n SAVE_CHECKPOINTS = False\n if not args[\"save_checkpoint\"]:\n print(\"WARNING: This run is not being checkpointed! Use --do-not-save-checkpoint to suppress.\")\n\n NUM_PROCESSES = int(args[\"workers\"])\n DEVICE = torch.device(args[\"device\"])\n\n\ndef run_mcts_aut_episode(net: torch.nn.Module, env: AutShapingWrapperAdv, max_length: int,\n max_len_reward: Union[int, None],\n curiosity: ModuleCuriosityOptimizer,\n device, c_puct, c_aut,\n num_batches, batch_size, stats: AutStats, train_state_rewriter: Callable[[Any], torch.Tensor],\n state_observer: Callable[[Any], None] = None, c_sigma=1, c_intrins=1, **kwargs) \\\n -> Tuple[List[TraceStep], float]:\n \"\"\"\n Run an episode using MCTS with curiosity as the action selection\n :param net: The policy/value network\n :param env: The environment to run the simulation in\n :param max_length: When to cut off the simulation\n :param curiosity: Something to calculate the relative \"newness\" of a state\n :param device: The device to run the simulation on\n :param c_puct: Puct constant of MCTS\n :param num_batches: How many groups of MCTS sims to run\n :param batch_size: How many MCTS sims per group\n :param state_observer: Function to call for every state seen\n :return: A trace and final value estimate\n \"\"\"\n\n def curiosity_evaluator(sars):\n states, actions, rewards, next_states, _ = zip(*sars)\n rewards = curiosity.get_curiosity(states, actions, next_states)\n return rewards.tolist()\n\n def curiosity_trainer(sars):\n states, actions, rewards, next_states, _ = zip(*sars)\n curiosity.train(states, actions, next_states, train_rounds=1)\n\n def state_evaluator(states):\n states_transformed = torch.stack(tuple(train_state_rewriter(s) for s in states))\n pols, vals = net(states_transformed.to(device))\n pollist = F.softmax(pols, dim=-1).tolist()\n vallist = vals.squeeze(-1).tolist()\n\n return list(zip(pollist, vallist))\n\n stats.synchronize()\n\n mcts = MCTSAut_adv(env.action_space.n, curiosity_evaluator, state_evaluator, curiosity_trainer, c_puct=c_puct,\n aut_stats=stats, c_aut=c_aut, c_sigma=c_sigma, c_intrins=c_intrins, **kwargs)\n\n def action_value_generator(state, step):\n mcts.mcts_batch(env, state, num_batches, batch_size)\n probs, values = mcts.get_policy_value(state, 1)\n return probs, max(values)\n\n return run_episode_generic(env, action_value_generator, max_length, max_len_reward,\n ptan.actions.ProbabilityActionSelector(),\n state_observer)\n\n\ndef run_aut_episode(net: torch.nn.Module, env: AutShapingWrapperAdv, max_length: int, max_len_reward: Optional[int],\n curiosity: ModuleCuriosityOptimizer, device,\n train_state_rewriter: Callable[[Any], torch.Tensor], stats: AutStats,\n state_observer: Callable[[Any], None] = None, render_every_frame=False) -> Tuple[\n List[TraceStep], float]:\n stats.synchronize()\n\n def action_value_generator(state, step):\n obs_tensor = train_state_rewriter(state).to(device)\n obs_batch = obs_tensor.unsqueeze(dim=0)\n probs, values = net(obs_batch)\n pols_soft = F.softmax(probs.double(), dim=-1).squeeze(0)\n pols_soft /= pols_soft.sum()\n pols_soft = pols_soft.tolist()\n val = values.squeeze(0).tolist()[0]\n if render_every_frame:\n env.render()\n\n return pols_soft, val\n\n # TODO curiosity and automaton bonuses\n return run_episode_generic(env, action_value_generator, max_length, max_len_reward,\n ptan.actions.EpsilonGreedyActionSelector(\n selector=ptan.actions.ProbabilityActionSelector(),\n epsilon=.1),\n state_observer)\n\n\nepisode_runners = {\n \"mcts_aut_episode\": run_mcts_aut_episode,\n \"aut_episode\": run_aut_episode\n}\n\n\ndef run():\n torch.multiprocessing.set_start_method(\"spawn\", force=True)\n signal.signal(signal.SIGHUP, throwKeyInterr)\n\n try:\n cman = CheckpointManager(CHECKPOINT_PATH, LOAD_FROM_CHECKPOINT, SAVE_CHECKPOINTS, device=DEVICE)\n except EOFError:\n cman = CheckpointManager(CHECKPOINT_PATH + \"_copy\", LOAD_FROM_CHECKPOINT, SAVE_CHECKPOINTS, device=DEVICE)\n\n if TRANSPLANT:\n cman = TransplantCheckpointManager(cman, TRANSPLANT_FROM)\n cman.transplant(\"aut\") # Generating the automaton may not be completely deterministic, we want the same states\n elif OLD_TRANSPLANT:\n cman = TransplantCheckpointManager(cman, TRANSPLANT_FROM)\n for field in OLD_TRANSPLANT:\n cman.transplant(field)\n\n aut = cman.load(\"aut\", AutomatonSet.from_ltlf(LTLF_SPEC, AUT_PARAM_NAMES), PickleLoadHandler())\n\n orig_env = env_constructors[ENV_TYPE](ENV_CONFIG)\n\n env = AutShapingWrapperAdv(orig_env, AUT_PARAM_FUNCS, aut, use_potential=False, **AUT_OTHER_PARAMS)\n\n action_space = env.action_space.n\n writer = SummaryWriter(LOG_FOLDER)\n\n train_net_creator, train_rewriter_creator = training_nets[NETWORK]\n\n net = cman.load(\"net\", train_net_creator(orig_env, **NETWORK_PARAMS),\n CombinedLoadHandler(StateDictLoadHandler(), InitZeroLoadHandler())).to(DEVICE)\n net.share_memory()\n\n if CURIOSITY_NET:\n curiosity_net_creator, curiosity_rewriter_creator = curiosity_nets[CURIOSITY_NET]\n\n icm = cman.load(\"icm\", curiosity_net_creator(orig_env, **CURIOSITY_PARAMS), StateDictLoadHandler()).to(\n DEVICE)\n icm.share_memory()\n\n icm_opt = cman.load(\"icm_opt\", ModuleCuriosityOptimizer(icm, curiosity_rewriter_creator(orig_env), action_space,\n CURIOSITY_LEARNING_RATE,\n DEVICE), StateDictLoadHandler())\n else:\n icm_opt = NoopCuriosityOptimizer()\n\n loss_func = loss_funcs[LOSS_FUNC](net=net, device=DEVICE, discount=DISCOUNT, **LOSS_PARAMS)\n\n optimizer = cman.load(\"opt\", OPTIMIZER(net.parameters(), lr=LEARNING_RATE, **OPTIMIZER_PARAMS),\n StateDictLoadHandler())\n\n train_rewriter = train_rewriter_creator(orig_env)\n train_loop = cman.load(\"train_loop\",\n RandomReplayTrainingLoop(DISCOUNT, REPLAY_BUFFER, MIN_TRACE_TO_TRAIN, PPO_TRAIN_ROUNDS,\n train_rewriter, writer, DEVICE),\n StateDictLoadHandler())\n\n aut_stats = cman.load(\"aut_stats\", AutStats(len(aut.graph.network), **AUT_STATS_PARAMS), StateDictLoadHandler())\n\n if TRANSPLANT:\n orig_alt_stats = cman.load_from_alt(\"aut_stats\", AutStats(len(aut.graph.network)), StateDictLoadHandler())\n wrapped_aut_stats = aut_transplant_anneals[ANNEAL_AUT_TRANSPLANT](orig_alt_stats, aut_stats,\n **ANNEAL_AUT_TRANSPLANT_PARAMS)\n wrapped_aut_stats.set_step(train_loop.num_rounds)\n train_loop.add_round_hook(wrapped_aut_stats.set_step)\n else:\n wrapped_aut_stats = aut_stats\n\n def aut_hook(trace: List[TraceReturnStep], final_value):\n # Only count each state once per run\n prev_edges = set()\n last_state = None\n for trst in trace: # TODO does this need to be reversed?\n this_state = frozenset(trst.info[\"automaton_states\"])\n\n if len(this_state) > 0:\n this_state = set(this_state).pop()\n else:\n this_state = None\n\n edge = (last_state, this_state)\n last_state = this_state\n if edge[0] is not None and edge[1] is not None:\n if edge not in prev_edges:\n aut_stats.visit(edge, trst.discounted_return)\n prev_edges.add(edge)\n\n train_loop.add_trace_hook(aut_hook)\n\n with get_parallel_queue(num_processes=NUM_PROCESSES, episode_runner=episode_runners[EPISODE_RUNNER_TYPE],\n net=net, env=env, max_length=MAX_EPISODE_LEN, max_len_reward=MAX_LEN_REWARD,\n curiosity=icm_opt, state_observer=None, device=DEVICE,\n stats=wrapped_aut_stats, train_state_rewriter=train_rewriter,\n **EPISODE_RUNNER_PARAMS) as sim_round_queue:\n\n # random.seed(798)\n\n while True:\n\n\n train_loop(sim_round_queue, loss_func, optimizer)\n\n if train_loop.num_rounds % CHECKPOINT_EVERY == 0:\n print(\"num_rounds=\", train_loop.num_rounds)\n\n save_dict = {\n \"net\": net,\n \"opt\": optimizer,\n \"train_loop\": train_loop,\n \"aut_stats\": aut_stats,\n \"aut\": aut,\n }\n\n if CURIOSITY_NET:\n save_dict.update({\n \"icm\": icm,\n \"icm_opt\": icm_opt\n })\n cman.save(save_dict)\n\n if STOP_AFTER and train_loop.global_step > STOP_AFTER:\n print(\"STOPPING: step limit \" + str(train_loop.global_step) + \"/\" + str(STOP_AFTER))\n break\n\n\nif __name__ == '__main__':\n multiprocessing.freeze_support()\n run()\n"
] | [
[
"torch.multiprocessing.set_start_method",
"torch.nn.functional.softmax",
"torch.Tensor",
"torch.multiprocessing.freeze_support",
"torch.cuda.is_available",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EricUrbineer/OpenAeroStruct | [
"26c37a0e86074517680405687824e27b3b2caaec",
"26c37a0e86074517680405687824e27b3b2caaec",
"26c37a0e86074517680405687824e27b3b2caaec",
"26c37a0e86074517680405687824e27b3b2caaec",
"26c37a0e86074517680405687824e27b3b2caaec",
"26c37a0e86074517680405687824e27b3b2caaec",
"26c37a0e86074517680405687824e27b3b2caaec",
"26c37a0e86074517680405687824e27b3b2caaec",
"26c37a0e86074517680405687824e27b3b2caaec"
] | [
"openaerostruct/aerodynamics/panel_forces.py",
"openaerostruct/structures/tests/test_vonmises_wingbox.py",
"openaerostruct/structures/tests/test_structural_cg.py",
"openaerostruct/aerodynamics/mtx_rhs.py",
"openaerostruct/tests/test_aero_analysis_Sref.py",
"openaerostruct/structures/tests/test_add_point_masses.py",
"openaerostruct/tests/test_v1_aerostruct_opt.py",
"openaerostruct/structures/fem.py",
"openaerostruct/aerodynamics/viscous_drag.py"
] | [
"from __future__ import print_function\nimport numpy as np\n\nfrom openmdao.api import ExplicitComponent\n\nfrom openaerostruct.utils.vector_algebra import compute_cross, compute_cross_deriv1, compute_cross_deriv2\n\n\nclass PanelForces(ExplicitComponent):\n \"\"\"\n Compute the panel forces acting on all surfaces in the system.\n\n Parameters\n ----------\n rho : float\n Air density at the flight condition.\n horseshoe_circulations[system_size] : numpy array\n The equivalent horseshoe circulations obtained by intelligently summing\n the vortex ring circulations, accounting for overlaps between rings.\n bound_vecs[system_size, 3] : numpy array\n The vectors representing the bound vortices for each panel in the\n problem.\n This array contains points for all lifting surfaces in the problem.\n force_pts_velocities[system_size, 3] : numpy array\n The actual velocities experienced at the evaluation points for each\n lifting surface in the system. This is the summation of the freestream\n velocities and the induced velocities caused by the circulations.\n\n Returns\n -------\n panel_forces[system_size, 3] : numpy array\n All of the forces acting on all panels in the total system.\n \"\"\"\n\n def initialize(self):\n self.options.declare('surfaces', types=list)\n\n def setup(self):\n surfaces = self.options['surfaces']\n\n system_size = 0\n\n for surface in surfaces:\n mesh = surface['mesh']\n nx = mesh.shape[0]\n ny = mesh.shape[1]\n\n system_size += (nx - 1) * (ny - 1)\n\n self.system_size = system_size\n\n self.add_input('rho', units='kg/m**3')\n self.add_input('horseshoe_circulations', shape=system_size, units='m**2/s')\n self.add_input('force_pts_velocities', shape=(system_size, 3), units='m/s')\n self.add_input('bound_vecs', shape=(system_size, 3), units='m')\n\n self.add_output('panel_forces', shape=(system_size, 3), units='N')\n\n # Set up all the sparse Jacobians\n self.declare_partials('panel_forces', 'rho',\n rows=np.arange(3 * system_size),\n cols=np.zeros(3 * system_size, int),\n )\n self.declare_partials('panel_forces', 'horseshoe_circulations',\n rows=np.arange(3 * system_size),\n cols=np.outer(np.arange(system_size), np.ones(3, int)).flatten(),\n )\n self.declare_partials('panel_forces', 'force_pts_velocities',\n rows=np.einsum('ij,k->ijk',\n np.arange(3 * system_size).reshape((system_size, 3)),\n np.ones(3, int),\n ).flatten(),\n cols=np.einsum('ik,j->ijk',\n np.arange(3 * system_size).reshape((system_size, 3)),\n np.ones(3, int),\n ).flatten(),\n )\n self.declare_partials('panel_forces', 'bound_vecs',\n rows=np.einsum('ij,k->ijk',\n np.arange(3 * system_size).reshape((system_size, 3)),\n np.ones(3, int),\n ).flatten(),\n cols=np.einsum('ik,j->ijk',\n np.arange(3 * system_size).reshape((system_size, 3)),\n np.ones(3, int),\n ).flatten(),\n )\n\n def compute(self, inputs, outputs):\n rho = inputs['rho'][0]\n horseshoe_circulations = np.outer(inputs['horseshoe_circulations'], np.ones(3))\n velocities = inputs['force_pts_velocities']\n bound_vecs = inputs['bound_vecs']\n\n # Actually compute the forces by taking the cross of velocities acting\n # at the force points with the bound vortex filament vector.\n outputs['panel_forces'] = \\\n rho * horseshoe_circulations * compute_cross(velocities, bound_vecs)\n\n def compute_partials(self, inputs, partials):\n rho = inputs['rho'][0]\n horseshoe_circulations = np.outer(inputs['horseshoe_circulations'], np.ones(3))\n velocities = inputs['force_pts_velocities']\n bound_vecs = inputs['bound_vecs']\n\n horseshoe_circulations_ones = np.einsum('i,jk->ijk', inputs['horseshoe_circulations'], np.ones((3, 3)))\n\n deriv_array = np.einsum('i,jk->ijk',\n np.ones(self.system_size),\n np.eye(3))\n\n partials['panel_forces', 'rho'] = \\\n (horseshoe_circulations * compute_cross(velocities, bound_vecs)).flatten()\n partials['panel_forces', 'horseshoe_circulations'] = \\\n (rho * compute_cross(velocities, bound_vecs)).flatten()\n partials['panel_forces', 'force_pts_velocities'] = \\\n (rho * horseshoe_circulations_ones * compute_cross_deriv1(deriv_array, bound_vecs)).flatten()\n partials['panel_forces', 'bound_vecs'] = \\\n (rho * horseshoe_circulations_ones * compute_cross_deriv2(velocities, deriv_array)).flatten()\n",
"import unittest\nimport numpy as np\n\nfrom openmdao.api import Group, IndepVarComp\nfrom openaerostruct.structures.vonmises_wingbox import VonMisesWingbox\nfrom openaerostruct.utils.testing import run_test, get_default_surfaces\n\nclass Test(unittest.TestCase):\n\n def test(self):\n surface = get_default_surfaces()[0]\n\n # turn down some of these properties, so the absolute deriv error isn't magnified\n surface['E'] = 7\n surface['G'] = 3\n surface['yield'] = .02\n\n surface['strength_factor_for_upper_skin'] = 1.0\n\n comp = VonMisesWingbox(surface=surface)\n\n group = Group()\n\n indep_var_comp = IndepVarComp()\n\n ny = surface['mesh'].shape[1]\n\n nodesval = np.array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 2., 0.],\n [0., 3., 0.]])\n\n indep_var_comp.add_output('nodes', val=nodesval)\n indep_var_comp.add_output('disp', val=np.ones((ny, 6)))\n indep_var_comp.add_output('Qz', val=np.ones((ny - 1)))\n indep_var_comp.add_output('Iz', val=np.ones((ny - 1)))\n indep_var_comp.add_output('J', val=np.ones((ny - 1)))\n indep_var_comp.add_output('A_enc', val=np.ones((ny - 1)))\n indep_var_comp.add_output('spar_thickness', val=np.ones((ny - 1)))\n indep_var_comp.add_output('skin_thickness', val=np.ones((ny - 1)))\n indep_var_comp.add_output('htop', val=np.ones((ny - 1)))\n indep_var_comp.add_output('hbottom', val=np.ones((ny - 1)))\n indep_var_comp.add_output('hfront', val=np.ones((ny - 1)))\n indep_var_comp.add_output('hrear', val=np.ones((ny - 1)))\n\n group.add_subsystem('indep_var_comp', indep_var_comp, promotes=['*'])\n group.add_subsystem('vonmises_wingbox', comp, promotes=['*'])\n\n run_test(self, group, complex_flag=True, step=1e-8, atol=2e-5, compact_print=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import unittest\nimport numpy as np\n\nfrom openmdao.api import Group, IndepVarComp\nfrom openaerostruct.structures.structural_cg import StructuralCG\nfrom openaerostruct.utils.testing import run_test, get_default_surfaces\n\nnp.random.seed(1)\n\nclass Test(unittest.TestCase):\n\n def test(self):\n surface = get_default_surfaces()[0]\n\n group = Group()\n\n comp = StructuralCG(surface=surface)\n\n indep_var_comp = IndepVarComp()\n\n ny = surface['mesh'].shape[1]\n\n indep_var_comp.add_output('nodes', val=np.random.random((ny, 3)), units='m')\n indep_var_comp.add_output('structural_mass', val=1., units='kg')\n indep_var_comp.add_output('element_mass', val=np.ones((ny-1)), units='kg')\n\n group.add_subsystem('indep_var_comp', indep_var_comp, promotes=['*'])\n group.add_subsystem('structural_cg', comp, promotes=['*'])\n\n run_test(self, group, complex_flag=True, compact_print=False)\n\nif __name__ == '__main__':\n unittest.main()\n",
"from __future__ import print_function\nimport numpy as np\n\nfrom openmdao.api import ExplicitComponent\n\n\nclass VLMMtxRHSComp(ExplicitComponent):\n \"\"\"\n Compute the total velocities at each of the evaluation points for every\n panel in the entire system. This is the sum of the freestream and induced\n velocities caused by the circulations.\n\n Parameters\n ----------\n freestream_velocities[system_size, 3] : numpy array\n The rotated freestream velocities at each evaluation point for all\n lifting surfaces. system_size is the sum of the count of all panels\n for all lifting surfaces.\n vel_mtx[num_eval_points, nx - 1, ny - 1, 3] : numpy array\n The AIC matrix for the all lifting surfaces representing the aircraft.\n This has some sparsity pattern, but it is more dense than the FEM matrix\n and the entries have a wide range of magnitudes. One exists for each\n combination of surface name and evaluation points name.\n normals[nx-1, ny-1, 3] : numpy array\n The normal vector for each panel, computed as the cross of the two\n diagonals from the mesh points.\n\n Returns\n -------\n mtx[system_size, system_size] : numpy array\n Final fully assembled AIC matrix that is used to solve for the\n circulations.\n rhs[system_size] : numpy array\n Right-hand side of the AIC linear system, constructed from the\n freestream velocities and panel normals.\n \"\"\"\n\n def initialize(self):\n self.options.declare('surfaces', types=list)\n\n def setup(self):\n surfaces = self.options['surfaces']\n\n system_size = 0\n\n # Loop through the surfaces to compute the total number of panels;\n # the system_size\n for surface in surfaces:\n mesh = surface['mesh']\n nx = mesh.shape[0]\n ny = mesh.shape[1]\n system_size += (nx - 1) * (ny - 1)\n\n self.system_size = system_size\n\n self.add_input('freestream_velocities', shape=(system_size, 3), units='m/s')\n self.add_output('mtx', shape=(system_size, system_size), units='1/m')\n self.add_output('rhs', shape=system_size, units='m/s')\n\n # Set up indicies arrays for sparse Jacobians\n vel_indices = np.arange(system_size * 3).reshape((system_size, 3))\n mtx_indices = np.arange(system_size * system_size).reshape((system_size, system_size))\n rhs_indices = np.arange(system_size)\n\n self.declare_partials('rhs', 'freestream_velocities',\n rows=np.einsum('i,j->ij', rhs_indices, np.ones(3, int)).flatten(),\n cols=vel_indices.flatten()\n )\n\n ind_1 = 0\n ind_2 = 0\n\n # Loop through each surface to add inputs and set up derivatives.\n # We keep track of the surface's indices within the total system's\n # indices to access the matrix in the correct locations for the derivs.\n # This is because the AIC linear system has information for all surfaces\n # together.\n for surface in surfaces:\n mesh=surface['mesh']\n nx = mesh.shape[0]\n ny = mesh.shape[1]\n name = surface['name']\n num = (nx - 1) * (ny - 1)\n\n ind_2 += num\n\n # Get the correct names for each vel_mtx and normals, then\n # add them to the component\n vel_mtx_name = '{}_{}_vel_mtx'.format(name, 'coll_pts')\n normals_name = '{}_normals'.format(name)\n\n self.add_input(vel_mtx_name,\n shape=(system_size, nx - 1, ny - 1, 3), units='1/m')\n self.add_input(normals_name, shape=(nx - 1, ny - 1, 3))\n\n velocities_indices = np.arange(system_size * num * 3).reshape(\n (system_size, nx - 1, ny - 1, 3)\n )\n normals_indices = np.arange(num * 3).reshape((num, 3))\n\n # Declare each set of partials based on the indices, ind_1 and ind_2\n self.declare_partials('mtx', vel_mtx_name,\n rows=np.einsum('ij,k->ijk', mtx_indices[:, ind_1:ind_2], np.ones(3, int)).flatten(),\n cols=velocities_indices.flatten(),\n )\n self.declare_partials('mtx', normals_name,\n rows=np.einsum('ij,k->ijk', mtx_indices[ind_1:ind_2, :], np.ones(3, int)).flatten(),\n cols=np.einsum('ik,j->ijk', normals_indices, np.ones(system_size, int)).flatten(),\n )\n self.declare_partials('rhs', normals_name,\n rows=np.outer(rhs_indices[ind_1:ind_2], np.ones(3, int)).flatten(),\n cols=normals_indices.flatten(),\n )\n\n ind_1 += num\n\n self.mtx_n_n_3 = np.zeros((system_size, system_size, 3))\n self.normals_n_3 = np.zeros((system_size, 3))\n self.set_check_partial_options(wrt='*', method='fd', step=1e-5)\n\n def compute(self, inputs, outputs):\n surfaces = self.options['surfaces']\n\n system_size = self.system_size\n\n ind_1 = 0\n ind_2 = 0\n for surface in surfaces:\n nx = surface['mesh'].shape[0]\n ny = surface['mesh'].shape[1]\n name = surface['name']\n num = (nx - 1) * (ny - 1)\n\n ind_2 += num\n\n vel_mtx_name = '{}_{}_vel_mtx'.format(name, 'coll_pts')\n normals_name = '{}_normals'.format(name)\n\n # Construct the full matrix and all of the lifting surfaces\n # together\n self.mtx_n_n_3[:, ind_1:ind_2, :] = inputs[vel_mtx_name].reshape((system_size, num, 3))\n self.normals_n_3[ind_1:ind_2, :] = inputs[normals_name].reshape((num, 3))\n\n ind_1 += num\n\n # Actually obtain the final matrix by multiplying through with the\n # normals. Also create the rhs based on v dot n.\n outputs['mtx'] = np.einsum('ijk,ik->ij', self.mtx_n_n_3, self.normals_n_3)\n outputs['rhs'] = -np.einsum('ij,ij->i', inputs['freestream_velocities'], self.normals_n_3)\n\n def compute_partials(self, inputs, partials):\n surfaces = self.options['surfaces']\n\n system_size = self.system_size\n\n ind_1 = 0\n ind_2 = 0\n for surface in surfaces:\n nx = surface['mesh'].shape[0]\n ny = surface['mesh'].shape[1]\n name = surface['name']\n num = (nx - 1) * (ny - 1)\n\n ind_2 += num\n\n vel_mtx_name = '{}_{}_vel_mtx'.format(name, 'coll_pts')\n normals_name = '{}_normals'.format(name)\n\n partials['mtx', vel_mtx_name] = np.einsum('ijk,ik->ijk',\n np.ones((system_size, num, 3)),\n self.normals_n_3,\n ).flatten()\n\n partials['mtx', normals_name] = self.mtx_n_n_3[ind_1:ind_2, :, :].flatten()\n\n partials['rhs', normals_name] = -inputs['freestream_velocities'][ind_1:ind_2, :].flatten()\n\n ind_1 += num\n\n partials['rhs', 'freestream_velocities'] = -self.normals_n_3.flatten()\n",
"from __future__ import division, print_function\nfrom openmdao.utils.assert_utils import assert_rel_error\nimport unittest\nimport numpy as np\n\nfrom openaerostruct.geometry.utils import generate_mesh\nfrom openaerostruct.geometry.geometry_group import Geometry\nfrom openaerostruct.aerodynamics.aero_groups import AeroPoint\n\nfrom openmdao.api import IndepVarComp, Problem, Group, NewtonSolver, ScipyIterativeSolver, LinearBlockGS, NonlinearBlockGS, DirectSolver, LinearBlockGS, PetscKSP, ScipyOptimizeDriver, SqliteRecorder\n\n\nclass Test(unittest.TestCase):\n\n def test(self):\n\n # Create a dictionary to store options about the surface\n mesh_dict = {'num_y' : 7,\n 'num_x' : 3,\n 'wing_type' : 'CRM',\n 'symmetry' : True,\n 'num_twist_cp' : 5}\n\n mesh, twist_cp = generate_mesh(mesh_dict)\n\n surf_dict = {\n # Wing definition\n 'name' : 'wing', # name of the surface\n 'symmetry' : True, # if true, model one half of wing\n # reflected across the plane y = 0\n 'S_ref_type' : 'wetted', # how we compute the wing area,\n # can be 'wetted' or 'projected'\n 'fem_model_type' : 'tube',\n\n 'twist_cp' : twist_cp,\n 'mesh' : mesh,\n\n # Aerodynamic performance of the lifting surface at\n # an angle of attack of 0 (alpha=0).\n # These CL0 and CD0 values are added to the CL and CD\n # obtained from aerodynamic analysis of the surface to get\n # the total CL and CD.\n # These CL0 and CD0 values do not vary wrt alpha.\n 'CL0' : 0.0, # CL of the surface at alpha=0\n 'CD0' : 0.015, # CD of the surface at alpha=0\n\n # Airfoil properties for viscous drag calculation\n 'k_lam' : 0.05, # percentage of chord with laminar\n # flow, used for viscous drag\n 't_over_c_cp' : np.array([0.15]), # thickness over chord ratio (NACA0015)\n 'c_max_t' : .303, # chordwise location of maximum (NACA0015)\n # thickness\n 'with_viscous' : True, # if true, compute viscous drag\n 'with_wave' : False, # if true, compute wave drag\n }\n\n surfaces = [surf_dict]\n\n # Create the problem and the model group\n prob = Problem()\n\n indep_var_comp = IndepVarComp()\n indep_var_comp.add_output('v', val=248.136, units='m/s')\n indep_var_comp.add_output('alpha', val=5., units='deg')\n indep_var_comp.add_output('Mach_number', val=0.84)\n indep_var_comp.add_output('re', val=1.e6, units='1/m')\n indep_var_comp.add_output('rho', val=0.38, units='kg/m**3')\n indep_var_comp.add_output('cg', val=np.zeros((3)), units='m')\n indep_var_comp.add_output('S_ref_total', val=150.0, units='m**2')\n\n prob.model.add_subsystem('prob_vars',\n indep_var_comp,\n promotes=['*'])\n\n # Loop over each surface in the surfaces list\n for surface in surfaces:\n\n geom_group = Geometry(surface=surface)\n\n # Add tmp_group to the problem as the name of the surface.\n # Note that is a group and performance group for each\n # individual surface.\n prob.model.add_subsystem(surface['name'], geom_group)\n\n # Loop through and add a certain number of aero points\n for i in range(1):\n\n # Create the aero point group and add it to the model\n aero_group = AeroPoint(surfaces=surfaces, user_specified_Sref=True)\n point_name = 'aero_point_{}'.format(i)\n prob.model.add_subsystem(point_name, aero_group)\n\n # Connect flow properties to the analysis point\n prob.model.connect('v', point_name + '.v')\n prob.model.connect('alpha', point_name + '.alpha')\n prob.model.connect('Mach_number', point_name + '.Mach_number')\n prob.model.connect('re', point_name + '.re')\n prob.model.connect('rho', point_name + '.rho')\n prob.model.connect('cg', point_name + '.cg')\n prob.model.connect('S_ref_total', point_name + '.S_ref_total')\n\n # Connect the parameters within the model for each aero point\n for surface in surfaces:\n\n name = surface['name']\n\n # Connect the mesh from the geometry component to the analysis point\n prob.model.connect(name + '.mesh', point_name + '.' + name + '.def_mesh')\n\n # Perform the connections with the modified names within the\n # 'aero_states' group.\n prob.model.connect(name + '.mesh', point_name + '.aero_states.' + name + '_def_mesh')\n\n prob.model.connect(name + '.t_over_c', point_name + '.' + name + '_perf.' + 't_over_c')\n\n # recorder = SqliteRecorder(\"aero_analysis.db\")\n # prob.driver.add_recorder(recorder)\n # prob.driver.recording_options['record_derivatives'] = True\n # prob.driver.recording_options['includes'] = ['*']\n\n # Set up the problem\n prob.setup()\n\n # from openmdao.api import view_model\n # view_model(prob)\n\n prob.run_driver()\n\n assert_rel_error(self, prob['aero_point_0.CD'][0], 0.10534816690971655, 1e-6)\n assert_rel_error(self, prob['aero_point_0.CL'][0], 1.4158238516533308, 1e-6)\n assert_rel_error(self, prob['aero_point_0.CM'][1], -4.806188698195504, 1e-6)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import unittest\nimport numpy as np\n\nfrom openmdao.utils.assert_utils import assert_rel_error\nfrom openmdao.api import Group, IndepVarComp\nfrom openaerostruct.structures.compute_point_mass_loads import ComputePointMassLoads\nfrom openaerostruct.utils.testing import run_test, get_default_surfaces\n\n\nderivs_added = False\n\nclass Test(unittest.TestCase):\n\n @unittest.skipUnless(derivs_added, \"Analytic derivs not added yet\")\n def test_derivs(self):\n surface = get_default_surfaces()[0]\n\n surface['n_point_masses'] = 2\n\n comp = ComputePointMassLoads(surface=surface)\n\n group = Group()\n\n indep_var_comp = IndepVarComp()\n\n ny = surface['mesh'].shape[1]\n\n nodesval = np.array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 2., 0.],\n [0., 3., 0.]])\n\n point_masses = np.array([[2., 1.]])\n\n point_mass_locations = np.array([[2.1, 0.1, 0.2],\n [3.2, 1.2, 0.3]])\n\n indep_var_comp.add_output('nodes', val=nodesval, units='m')\n indep_var_comp.add_output('point_masses', val=point_masses, units='kg')\n indep_var_comp.add_output('point_mass_locations', val=point_mass_locations, units='m')\n\n group.add_subsystem('indep_var_comp', indep_var_comp, promotes=['*'])\n group.add_subsystem('compute_point_mass_loads', comp, promotes=['*'])\n\n prob = run_test(self, group, complex_flag=True, step=1e-8, atol=1e-5, compact_print=True)\n\n @unittest.skipUnless(derivs_added, \"Analytic derivs not added yet\")\n def test_simple_values(self):\n surface = get_default_surfaces()[0]\n\n surface['n_point_masses'] = 1\n\n comp = ComputePointMassLoads(surface=surface)\n\n group = Group()\n\n indep_var_comp = IndepVarComp()\n\n ny = surface['mesh'].shape[1]\n\n nodesval = np.array([[0., 0., 0.],\n [0., 1., 0.],\n [0., 2., 0.],\n [0., 3., 0.]])\n\n point_masses = np.array([[1/9.8]])\n\n point_mass_locations = np.array([[.55012, 0.1, 0.]])\n\n indep_var_comp.add_output('nodes', val=nodesval, units='m')\n indep_var_comp.add_output('point_masses', val=point_masses, units='kg')\n indep_var_comp.add_output('point_mass_locations', val=point_mass_locations, units='m')\n\n group.add_subsystem('indep_var_comp', indep_var_comp, promotes=['*'])\n group.add_subsystem('compute_point_mass_loads', comp, promotes=['*'])\n\n prob = run_test(self, group, complex_flag=True, step=1e-8, atol=1e-5, compact_print=True)\n\n truth_array = np.array([0, 0, -1., 0., 0.55012, 0.])\n\n assert_rel_error(self, prob['comp.loads_from_point_masses'][0, :], truth_array, 1e-6)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"from __future__ import division, print_function\nfrom openmdao.utils.assert_utils import assert_rel_error\nimport unittest\nimport numpy as np\n\nfrom openaerostruct.geometry.utils import generate_mesh\n\nfrom openaerostruct.integration.aerostruct_groups import AerostructGeometry, AerostructPoint\n\nfrom openmdao.api import IndepVarComp, Problem, Group, NewtonSolver, ScipyIterativeSolver, LinearBlockGS, NonlinearBlockGS, DirectSolver, LinearBlockGS, PetscKSP, ScipyOptimizeDriver\nfrom openaerostruct.utils.constants import grav_constant\n\n\nclass Test(unittest.TestCase):\n\n def test(self):\n # Create a dictionary to store options about the surface\n mesh_dict = {'num_y' : 7,\n 'num_x' : 2,\n 'wing_type' : 'CRM',\n 'symmetry' : False,\n 'num_twist_cp' : 2,\n 'span_cos_spacing' : 1.}\n\n mesh, twist_cp = generate_mesh(mesh_dict)\n\n surf_dict = {\n # Wing definition\n 'name' : 'wing', # name of the surface\n 'symmetry' : False, # if true, model one half of wing\n # reflected across the plane y = 0\n 'S_ref_type' : 'wetted', # how we compute the wing area,\n # can be 'wetted' or 'projected'\n 'fem_model_type' : 'tube',\n\n 'thickness_cp' : np.ones(2) * 0.06836728,\n\n 'twist_cp' : twist_cp,\n 'mesh' : mesh,\n\n # Aerodynamic performance of the lifting surface at\n # an angle of attack of 0 (alpha=0).\n # These CL0 and CD0 values are added to the CL and CD\n # obtained from aerodynamic analysis of the surface to get\n # the total CL and CD.\n # These CL0 and CD0 values do not vary wrt alpha.\n 'CL0' : 0.0, # CL of the surface at alpha=0\n 'CD0' : 0.015, # CD of the surface at alpha=0\n\n # Airfoil properties for viscous drag calculation\n 'k_lam' : 0.05, # percentage of chord with laminar\n # flow, used for viscous drag\n 't_over_c_cp' : np.array([0.12]), # thickness over chord ratio (NACA0015)\n 'c_max_t' : .303, # chordwise location of maximum (NACA0015)\n # thickness\n 'with_viscous' : True,\n 'with_wave' : False, # if true, compute wave drag\n\n # Structural values are based on aluminum 7075\n 'E' : 70.e9, # [Pa] Young's modulus of the spar\n 'G' : 30.e9, # [Pa] shear modulus of the spar\n 'yield' : 500.e6 / 2.5, # [Pa] yield stress divided by 2.5 for limiting case\n 'mrho' : 3.e3, # [kg/m^3] material density\n 'fem_origin' : 0.35, # normalized chordwise location of the spar\n 'wing_weight_ratio' : 1.,\n 'struct_weight_relief' : False, # True to add the weight of the structure to the loads on the structure\n 'distributed_fuel_weight' : False,\n # Constraints\n 'exact_failure_constraint' : False, # if false, use KS function\n }\n\n surfaces = [surf_dict]\n\n # Create the problem and assign the model group\n prob = Problem()\n\n # Add problem information as an independent variables component\n indep_var_comp = IndepVarComp()\n indep_var_comp.add_output('v', val=248.136, units='m/s')\n indep_var_comp.add_output('alpha', val=5., units='deg')\n indep_var_comp.add_output('Mach_number', val=0.84)\n indep_var_comp.add_output('re', val=1.e6, units='1/m')\n indep_var_comp.add_output('rho', val=0.38, units='kg/m**3')\n indep_var_comp.add_output('CT', val=grav_constant * 17.e-6, units='1/s')\n indep_var_comp.add_output('R', val=11.165e6, units='m')\n indep_var_comp.add_output('W0', val=0.4 * 3e5, units='kg')\n indep_var_comp.add_output('speed_of_sound', val=295.4, units='m/s')\n indep_var_comp.add_output('load_factor', val=1.)\n indep_var_comp.add_output('empty_cg', val=np.zeros((3)), units='m')\n\n prob.model.add_subsystem('prob_vars',\n indep_var_comp,\n promotes=['*'])\n\n # Loop over each surface in the surfaces list\n for surface in surfaces:\n\n # Get the surface name and create a group to contain components\n # only for this surface\n name = surface['name']\n\n aerostruct_group = AerostructGeometry(surface=surface)\n\n # Add tmp_group to the problem with the name of the surface.\n prob.model.add_subsystem(name, aerostruct_group)\n\n # Loop through and add a certain number of aero points\n for i in range(1):\n\n point_name = 'AS_point_{}'.format(i)\n # Connect the parameters within the model for each aero point\n\n # Create the aero point group and add it to the model\n AS_point = AerostructPoint(surfaces=surfaces)\n\n prob.model.add_subsystem(point_name, AS_point)\n\n # Connect flow properties to the analysis point\n prob.model.connect('v', point_name + '.v')\n prob.model.connect('alpha', point_name + '.alpha')\n prob.model.connect('Mach_number', point_name + '.Mach_number')\n prob.model.connect('re', point_name + '.re')\n prob.model.connect('rho', point_name + '.rho')\n prob.model.connect('CT', point_name + '.CT')\n prob.model.connect('R', point_name + '.R')\n prob.model.connect('W0', point_name + '.W0')\n prob.model.connect('speed_of_sound', point_name + '.speed_of_sound')\n prob.model.connect('empty_cg', point_name + '.empty_cg')\n prob.model.connect('load_factor', point_name + '.load_factor')\n\n for surface in surfaces:\n\n com_name = point_name + '.' + name + '_perf'\n prob.model.connect(name + '.local_stiff_transformed', point_name + '.coupled.' + name + '.local_stiff_transformed')\n prob.model.connect(name + '.nodes', point_name + '.coupled.' + name + '.nodes')\n\n # Connect aerodyamic mesh to coupled group mesh\n prob.model.connect(name + '.mesh', point_name + '.coupled.' + name + '.mesh')\n\n # Connect performance calculation variables\n prob.model.connect(name + '.radius', com_name + '.radius')\n prob.model.connect(name + '.thickness', com_name + '.thickness')\n prob.model.connect(name + '.nodes', com_name + '.nodes')\n prob.model.connect(name + '.cg_location', point_name + '.' + 'total_perf.' + name + '_cg_location')\n prob.model.connect(name + '.structural_mass', point_name + '.' + 'total_perf.' + name + '_structural_mass')\n prob.model.connect(name + '.t_over_c', com_name + '.t_over_c')\n\n from openmdao.api import ScipyOptimizeDriver\n prob.driver = ScipyOptimizeDriver()\n prob.driver.options['tol'] = 1e-9\n\n # Setup problem and add design variables, constraint, and objective\n prob.model.add_design_var('wing.twist_cp', lower=-15., upper=15.)\n prob.model.add_design_var('wing.thickness_cp', lower=0.01, upper=0.5, scaler=1e2)\n prob.model.add_constraint('AS_point_0.wing_perf.failure', upper=0.)\n prob.model.add_constraint('AS_point_0.wing_perf.thickness_intersects', upper=0.)\n\n # Add design variables, constraisnt, and objective on the problem\n prob.model.add_design_var('alpha', lower=-10., upper=10.)\n prob.model.add_constraint('AS_point_0.L_equals_W', equals=0.)\n prob.model.add_objective('AS_point_0.fuelburn', scaler=1e-5)\n\n # Set up the problem\n prob.setup()\n\n # from openmdao.api import view_model\n # view_model(prob)\n\n prob.run_driver()\n\n assert_rel_error(self, prob['AS_point_0.wing_perf.CL'][0], 0.469128339791, 1e-6)\n assert_rel_error(self, prob['AS_point_0.fuelburn'][0], 95393.7772462, 1.5e-6)\n assert_rel_error(self, prob['AS_point_0.wing_perf.failure'][0], 0., 1e-6)\n assert_rel_error(self, prob['AS_point_0.CM'][1], -1.3154462936779994, 1e-4)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"Define the LinearSystemComp class.\"\"\"\nfrom __future__ import division, print_function\n\nfrom six.moves import range\n\nimport numpy as np\nfrom scipy.sparse import coo_matrix\nfrom scipy.sparse.linalg import splu\n\nfrom openmdao.core.implicitcomponent import ImplicitComponent\n\n\nclass FEM(ImplicitComponent):\n \"\"\"\n Component that solves a linear system, Ax=b.\n\n Designed to handle small, dense linear systems (Ax=B) that can be efficiently solved with\n sparse lu-decomposition. It can be vectorized to either solve for multiple right hand sides,\n or to solve multiple linear systems.\n\n A is represented sparsely as a local_stiff_transformed, which is an ny x 12 x 12 array.\n\n Attributes\n ----------\n _lup : None or list(object)\n matrix factorizations returned from scipy.linag.lu_factor for each A matrix\n k_cols : ndarray\n Cached column indices for sparse representation of stiffness matrix.\n k_rows : ndarray\n Cached row indices for sparse representation of stiffness matrix.\n k_data : ndarray\n Cached values for sparse representation of stiffness matrix.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Intialize the LinearSystemComp component.\n\n Parameters\n ----------\n **kwargs : dict of keyword arguments\n Keyword arguments that will be mapped into the Component options.\n \"\"\"\n super(FEM, self).__init__(**kwargs)\n self._lup = None\n self.k_cols = None\n self.k_rows = None\n self.k_data = None\n\n def initialize(self):\n \"\"\"\n Declare options.\n \"\"\"\n self.options.declare('surface', types=dict)\n self.options.declare('vec_size', types=int, default=1,\n desc='Number of linear systems to solve.')\n\n def setup(self):\n \"\"\"\n Matrix and RHS are inputs, solution vector is the output.\n \"\"\"\n surface = self.options['surface']\n self.ny = ny = surface['mesh'].shape[1]\n self.size = size = int(6 * ny + 6)\n\n vec_size = self.options['vec_size']\n full_size = size * vec_size\n\n self._lup = []\n shape = (vec_size, size) if vec_size > 1 else (size, )\n\n init_locK = np.tile(np.eye(12).flatten(), ny-1).reshape(ny-1, 12, 12)\n\n self.add_input('local_stiff_transformed', val=init_locK)\n self.add_input('forces', val=np.ones(shape), units='N')\n self.add_output('disp_aug', shape=shape, val=.1, units='m')\n\n # Set up the derivatives.\n row_col = np.arange(full_size, dtype=\"int\")\n\n self.declare_partials('disp_aug', 'forces', val=np.full(full_size, -1.0),\n rows=row_col, cols=row_col)\n\n # The derivative of residual wrt displacements is the stiffness matrix K. We can use the\n # sparsity pattern here and when constucting the sparse matrix, so save rows and cols.\n\n base_row = np.repeat(0, 6)\n base_col = np.arange(6)\n\n # Upper diagonal blocks\n rows1 = np.tile(base_row, 6*(ny-1)) + np.repeat(np.arange(6*(ny-1)), 6)\n col = np.tile(base_col + 6, 6)\n cols1 = np.tile(col, ny-1) + np.repeat(6*np.arange(ny-1), 36)\n\n # Lower diagonal blocks\n rows2 = np.tile(base_row + 6, 6*(ny-1)) + np.repeat(np.arange(6*(ny-1)), 6)\n col = np.tile(base_col, 6)\n cols2 = np.tile(col, ny-1) + np.repeat(6*np.arange(ny-1), 36)\n\n # Main diagonal blocks, root\n rows3 = np.tile(base_row, 6) + np.repeat(np.arange(6), 6)\n cols3 = np.tile(base_col, 6)\n\n # Main diagonal blocks, tip\n rows4 = np.tile(base_row + (ny-1)*6, 6) + np.repeat(np.arange(6), 6)\n cols4 = np.tile(base_col + (ny-1)*6, 6)\n\n # Main diagonal blocks, interior\n rows5 = np.tile(base_row + 6, 6*(ny-2)) + np.repeat(np.arange(6*(ny-2)), 6)\n col = np.tile(base_col + 6, 6)\n cols5 = np.tile(col, ny-2) + np.repeat(6*np.arange(ny-2), 36)\n\n # Find constrained nodes based on closeness to specified cg point\n symmetry = self.options['surface']['symmetry']\n if symmetry:\n idx = self.ny - 1\n else:\n idx = (self.ny - 1) // 2\n\n index = 6 * idx\n num_dofs = 6 * ny\n arange = np.arange(6)\n\n # Fixed boundary condition.\n rows6 = index + arange\n cols6 = num_dofs + arange\n\n self.k_rows = rows = np.concatenate([rows1, rows2, rows3, rows4, rows5, rows6, cols6])\n self.k_cols = cols = np.concatenate([cols1, cols2, cols3, cols4, cols5, cols6, rows6])\n\n sp_size = len(rows)\n vec_rows = np.tile(rows, vec_size) + np.repeat(sp_size*np.arange(vec_size), sp_size)\n vec_cols = np.tile(cols, vec_size) + np.repeat(sp_size*np.arange(vec_size), sp_size)\n\n self.declare_partials(of='disp_aug', wrt='disp_aug', rows=vec_rows, cols=vec_cols)\n\n base_row = np.tile(0, 12)\n base_col = np.arange(12)\n row = np.tile(base_row, 12) + np.repeat(np.arange(12), 12)\n col = np.tile(base_col, 12) + np.repeat(12*np.arange(12), 12)\n rows = np.tile(row, ny-1) + np.repeat(6*np.arange(ny-1), 144)\n cols = np.tile(col, ny-1) + np.repeat(144*np.arange(ny-1), 144)\n\n self.declare_partials('disp_aug', 'local_stiff_transformed', rows=rows, cols=cols)\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n \"\"\"\n R = Ax - b.\n\n Parameters\n ----------\n inputs : Vector\n unscaled, dimensional input variables read via inputs[key]\n outputs : Vector\n unscaled, dimensional output variables read via outputs[key]\n residuals : Vector\n unscaled, dimensional residuals written to via residuals[key]\n \"\"\"\n K = self.assemble_CSC_K(inputs)\n residuals['disp_aug'] = K.dot(outputs['disp_aug']) - inputs['forces']\n\n def solve_nonlinear(self, inputs, outputs):\n \"\"\"\n Use numpy to solve Ax=b for x.\n\n Parameters\n ----------\n inputs : Vector\n unscaled, dimensional input variables read via inputs[key]\n outputs : Vector\n unscaled, dimensional output variables read via outputs[key]\n \"\"\"\n # lu factorization for use with solve_linear\n K = self.assemble_CSC_K(inputs)\n self._lup = splu(K)\n outputs['disp_aug'] = self._lup.solve(inputs['forces'])\n\n def linearize(self, inputs, outputs, J):\n \"\"\"\n Compute the non-constant partial derivatives.\n\n Parameters\n ----------\n inputs : Vector\n unscaled, dimensional input variables read via inputs[key]\n outputs : Vector\n unscaled, dimensional output variables read via outputs[key]\n J : Jacobian\n sub-jac components written to jacobian[output_name, input_name]\n \"\"\"\n x = outputs['disp_aug']\n vec_size = self.options['vec_size']\n ny = self.ny\n\n idx = np.tile(np.tile(np.arange(12), 12), ny-1) + np.repeat(6*np.arange(ny-1), 144)\n J['disp_aug', 'local_stiff_transformed'] = np.tile(x[idx], vec_size)\n\n J['disp_aug', 'disp_aug'] = np.tile(self.k_data, vec_size)\n\n def solve_linear(self, d_outputs, d_residuals, mode):\n r\"\"\"\n Back-substitution to solve the derivatives of the linear system.\n\n If mode is:\n 'fwd': d_residuals \\|-> d_outputs\n\n 'rev': d_outputs \\|-> d_residuals\n\n Parameters\n ----------\n d_outputs : Vector\n unscaled, dimensional quantities read via d_outputs[key]\n d_residuals : Vector\n unscaled, dimensional quantities read via d_residuals[key]\n mode : str\n either 'fwd' or 'rev'\n \"\"\"\n vec_size = self.options['vec_size']\n\n if mode == 'fwd':\n if vec_size > 1:\n for j in range(vec_size):\n d_outputs['disp_aug'] = self._lup.solve(d_residuals['disp_aug'][j])\n else:\n d_outputs['disp_aug'] = self._lup.solve(d_residuals['disp_aug'])\n else:\n if vec_size > 1:\n for j in range(vec_size):\n d_residuals['disp_aug'] = self._lup.solve(d_outputs['disp_aug'][j])\n else:\n d_residuals['disp_aug'] = self._lup.solve(d_outputs['disp_aug'])\n\n def assemble_CSC_K(self, inputs):\n \"\"\"\n Assemble the stiffness matrix in sparse CSC format.\n\n Returns\n -------\n ndarray\n Stiffness matrix as dense ndarray.\n \"\"\"\n k_loc = inputs['local_stiff_transformed']\n size = self.size\n\n data1 = k_loc[:, :6, 6:].flatten()\n data2 = k_loc[:, 6:, :6].flatten()\n data3 = k_loc[0, :6, :6].flatten()\n data4 = k_loc[-1, 6:, 6:].flatten()\n data5 = (k_loc[0:-1, 6:, 6:] + k_loc[1:, :6, :6]).flatten()\n data6 = np.full((6, ), 1e9)\n\n self.k_data = data = np.concatenate([data1, data2, data3, data4, data5, data6, data6])\n\n return coo_matrix((data, (self.k_rows, self.k_cols)), shape=(size, size)).tocsc()\n",
"from __future__ import print_function, division\r\nimport numpy as np\r\n\r\nfrom openmdao.api import ExplicitComponent\r\n\r\nclass ViscousDrag(ExplicitComponent):\r\n \"\"\"\r\n Compute the skin friction drag if the with_viscous option is True.\r\n If not, the CDv is 0.\r\n This component exists for each lifting surface.\r\n\r\n Parameters\r\n ----------\r\n re : float\r\n Dimensionalized (1/length) Reynolds number. This is used to compute the\r\n local Reynolds number based on the local chord length.\r\n Mach_number : float\r\n Mach number.\r\n S_ref : float\r\n The reference area of the lifting surface.\r\n sweep : float\r\n The angle (in degrees) of the wing sweep. This is used in the form\r\n factor calculation.\r\n widths[ny-1] : numpy array\r\n The spanwise width of each panel.\r\n lengths[ny] : numpy array\r\n The sum of the lengths of each line segment along a chord section.\r\n t_over_c[ny-1] : numpy array\r\n The streamwise thickness-to-chord ratio of each VLM panel.\r\n\r\n Returns\r\n -------\r\n CDv : float\r\n Viscous drag coefficient for the lifting surface computed using flat\r\n plate skin friction coefficient and a form factor to account for wing\r\n shape.\r\n \"\"\"\r\n\r\n def initialize(self):\r\n self.options.declare('surface', types=dict)\r\n self.options.declare('with_viscous', types=bool)\r\n\r\n def setup(self):\r\n self.surface = surface = self.options['surface']\r\n self.with_viscous = surface['with_viscous']\r\n\r\n # Percentage of chord with laminar flow\r\n self.k_lam = surface['k_lam']\r\n\r\n # Thickness over chord for the airfoil\r\n self.c_max_t = surface['c_max_t']\r\n\r\n ny = surface['mesh'].shape[1]\r\n\r\n self.add_input('re', val=5.e6, units='1/m')\r\n self.add_input('Mach_number', val=1.6)\r\n self.add_input('S_ref', val=1., units='m**2')\r\n self.add_input('cos_sweep', val=np.ones((ny-1)), units='m')\r\n self.add_input('widths', val=np.ones((ny-1)), units='m')\r\n self.add_input('lengths', val=np.ones((ny)), units='m')\r\n self.add_input('t_over_c', val=np.ones((ny-1)))\r\n self.add_output('CDv', val=0.)\r\n\r\n self.declare_partials('CDv', '*')\r\n\r\n self.set_check_partial_options(wrt='*', method='cs', step=1e-50)\r\n\r\n def compute(self, inputs, outputs):\r\n if self.with_viscous:\r\n re = inputs['re']\r\n M = inputs['Mach_number']\r\n S_ref = inputs['S_ref']\r\n widths = inputs['widths']\r\n lengths = inputs['lengths']\r\n cos_sweep = inputs['cos_sweep'] / widths\r\n t_over_c = inputs['t_over_c']\r\n\r\n # Take panel chord length to be average of its edge lengths\r\n chords = (lengths[1:] + lengths[:-1]) / 2.\r\n Re_c = re * chords\r\n\r\n cdturb_total = 0.455 / (np.log10(Re_c))**2.58 / \\\r\n (1.0 + 0.144*M**2)**0.65\r\n cdlam_tr = 1.328 / np.sqrt(Re_c * self.k_lam)\r\n\r\n # Use eq. 12.27 of Raymer for turbulent Cf\r\n if self.k_lam == 0:\r\n cdlam_tr = 0.\r\n cdturb_tr = 0.\r\n\r\n elif self.k_lam < 1.0:\r\n cdturb_tr = 0.455 / (np.log10(Re_c*self.k_lam))**2.58 / \\\r\n (1.0 + 0.144*M**2)**0.65\r\n\r\n else:\r\n cdturb_total = 0.\r\n cdturb_tr = 0.\r\n\r\n cd = (cdlam_tr - cdturb_tr)*self.k_lam + cdturb_total\r\n\r\n # Multiply by section width to get total normalized drag for section\r\n d_over_q = 2 * cd * chords\r\n\r\n # Calculate form factor (Raymer Eq. 12.30)\r\n k_FF = 1.34 * M**0.18 * \\\r\n (1.0 + 0.6*t_over_c/self.c_max_t + 100*t_over_c**4)\r\n FF = k_FF * cos_sweep**0.28\r\n\r\n # Sum individual panel drags to get total drag\r\n D_over_q = np.sum(d_over_q * widths * FF)\r\n\r\n outputs['CDv'] = D_over_q / S_ref\r\n\r\n if self.surface['symmetry']:\r\n outputs['CDv'] *= 2\r\n else:\r\n outputs['CDv'] = 0.0\r\n\r\n def compute_partials(self, inputs, partials):\r\n \"\"\" Jacobian for viscous drag.\"\"\"\r\n\r\n partials['CDv', 'lengths'] = np.zeros_like(partials['CDv', 'lengths'])\r\n re = inputs['re']\r\n t_over_c = inputs['t_over_c']\r\n\r\n if self.with_viscous:\r\n\r\n M = inputs['Mach_number']\r\n S_ref = inputs['S_ref']\r\n\r\n widths = inputs['widths']\r\n lengths = inputs['lengths']\r\n cos_sweep = inputs['cos_sweep'] / widths\r\n\r\n # Take panel chord length to be average of its edge lengths\r\n chords = (lengths[1:] + lengths[:-1]) / 2.\r\n Re_c = re * chords\r\n\r\n cdturb_total = 0.455 / (np.log10(Re_c))**2.58 / \\\r\n (1.0 + 0.144*M**2)**0.65\r\n cdlam_tr = 1.328 / np.sqrt(Re_c * self.k_lam)\r\n\r\n # Use eq. 12.27 of Raymer for turbulent Cf\r\n if self.k_lam == 0:\r\n cdlam_tr = 0.\r\n cdturb_tr = 0.\r\n\r\n elif self.k_lam < 1.0:\r\n cdturb_tr = 0.455 / (np.log10(Re_c*self.k_lam))**2.58 / \\\r\n (1.0 + 0.144*M**2)**0.65\r\n\r\n else:\r\n cdturb_total = 0.\r\n cdturb_tr = 0.\r\n\r\n cd = (cdlam_tr - cdturb_tr)*self.k_lam + cdturb_total\r\n\r\n # Multiply by section width to get total normalized drag for section\r\n d_over_q = 2 * cd * chords\r\n\r\n # Calculate form factor (Raymer Eq. 12.30)\r\n k_FF = 1.34 * M**0.18 * \\\r\n (1.0 + 0.6*t_over_c/self.c_max_t + 100*t_over_c**4)\r\n FF = k_FF * cos_sweep**0.28\r\n\r\n # Sum individual panel drags to get total drag\r\n D_over_q = np.sum(d_over_q * widths * FF)\r\n\r\n chords = (lengths[1:] + lengths[:-1]) / 2.\r\n Re_c = re * chords\r\n\r\n cdl_Re = 0.0\r\n cdt_Re = 0.0\r\n cdT_Re = 0.0\r\n\r\n B = (1. + 0.144*M**2)**0.65\r\n\r\n if self.k_lam == 0:\r\n cdT_Re = 0.455/(np.log10(Re_c))**3.58/B * \\\r\n -2.58 / np.log(10) / Re_c\r\n elif self.k_lam < 1.0:\r\n\r\n cdl_Re = 1.328 / (Re_c*self.k_lam)**1.5 * -0.5 * self.k_lam\r\n cdt_Re = 0.455/(np.log10(Re_c*self.k_lam))**3.58/B * \\\r\n -2.58 / np.log(10) / Re_c\r\n cdT_Re = 0.455/(np.log10(Re_c))**3.58/B * \\\r\n -2.58 / np.log(10) / Re_c\r\n\r\n else:\r\n cdl_Re = 1.328 / (Re_c*self.k_lam)**1.5 * -0.5 * self.k_lam\r\n\r\n cd_Re = (cdl_Re - cdt_Re)*self.k_lam + cdT_Re\r\n\r\n CDv_lengths = 2 * widths * FF / S_ref * \\\r\n (d_over_q / 4 / chords + chords * cd_Re * re / 2.)\r\n\r\n partials['CDv', 'lengths'][0, 1:] += CDv_lengths\r\n partials['CDv', 'lengths'][0, :-1] += CDv_lengths\r\n partials['CDv', 'widths'][0, :] = d_over_q * FF / S_ref * 0.72\r\n partials['CDv', 'S_ref'] = - D_over_q / S_ref**2\r\n partials['CDv', 'cos_sweep'][0, :] = 0.28 * k_FF * d_over_q / S_ref / cos_sweep**0.72\r\n partials['CDv', 't_over_c'] = (d_over_q * widths * 1.34 * M**0.18 * \\\r\n (0.6/self.c_max_t + 400*t_over_c**3) * cos_sweep**0.28) / S_ref\r\n\r\n term = (-0.65/(1+0.144*M**2)**1.65) * 2*0.144*M\r\n dcdturb_total__dM = 0.455 / (np.log10(Re_c))**2.58 * term\r\n dcdturb_tr__dM = 0.455 / (np.log10(Re_c*self.k_lam))**2.58 * term\r\n\r\n if self.k_lam == 0:\r\n dCd__dM = dcdturb_total__dM\r\n elif self.k_lam < 1:\r\n dCd__dM = -self.k_lam*dcdturb_tr__dM + dcdturb_total__dM\r\n else:\r\n dCd__dM = 0.\r\n dd_over_q__dM = 2*chords*dCd__dM\r\n\r\n dk_ff__dM = 1.34*0.18*M**-0.82 * (1.0 + 0.6*t_over_c/self.c_max_t + 100*t_over_c**4)\r\n dFF__dM = dk_ff__dM*cos_sweep**0.28\r\n\r\n dD_over_q__dM = np.sum(widths* (dd_over_q__dM*FF + dFF__dM*d_over_q))\r\n\r\n partials['CDv','Mach_number'] = dD_over_q__dM / S_ref\r\n\r\n term = 0.455/(1+0.144*M**2)**0.65\r\n dRe_c__dRe = chords\r\n dcdturb_tr__dRe = term * -2.58/np.log10(Re_c*self.k_lam)**3.58 / (Re_c*self.k_lam*np.log(10)) * self.k_lam * dRe_c__dRe\r\n dcdlam_tr__dRe = -1.328/2./(Re_c*self.k_lam)**1.5 * self.k_lam * dRe_c__dRe\r\n dcdturb_total__dRe = term * -2.58/np.log10(Re_c)**3.58 / (Re_c * np.log(10)) * dRe_c__dRe\r\n\r\n if self.k_lam == 0:\r\n dcd__dRe = dcdturb_total__dRe\r\n elif self.k_lam < 1:\r\n dcd__dRe = self.k_lam*(dcdlam_tr__dRe - dcdturb_tr__dRe) + dcdturb_total__dRe\r\n else:\r\n dcd__dRe = 0.\r\n ddoq__dRe = 2*chords*dcd__dRe\r\n\r\n dDoq__dRe = np.sum(widths*ddoq__dRe*FF)\r\n\r\n partials['CDv', 're'] = dDoq__dRe/S_ref\r\n\r\n if self.surface['symmetry']:\r\n partials['CDv', 'lengths'][0, :] *= 2\r\n partials['CDv', 'widths'][0, :] *= 2\r\n partials['CDv', 'S_ref'] *= 2\r\n partials['CDv', 'cos_sweep'][0, :] *= 2\r\n partials['CDv', 'Mach_number'][0, :] *= 2\r\n partials['CDv', 're'][0, :] *= 2\r\n partials['CDv', 't_over_c'][0, :] *= 2\r\n"
] | [
[
"numpy.arange",
"numpy.eye",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.array",
"numpy.ones"
],
[
"numpy.random.random",
"numpy.random.seed",
"numpy.ones"
],
[
"numpy.arange",
"numpy.zeros",
"numpy.einsum",
"numpy.ones"
],
[
"numpy.array",
"numpy.zeros"
],
[
"numpy.array"
],
[
"numpy.array",
"numpy.zeros",
"numpy.ones"
],
[
"scipy.sparse.coo_matrix",
"scipy.sparse.linalg.splu",
"numpy.arange",
"numpy.eye",
"numpy.tile",
"numpy.full",
"numpy.concatenate",
"numpy.ones",
"numpy.repeat"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.ones",
"numpy.log10",
"numpy.zeros_like",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MiseryForMe/MDC | [
"470b2db76a71d7e4260de7e39e6cf9a61c80ca2b"
] | [
"evaluate.py"
] | [
"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nimport numpy as np\nimport cv2\nfrom PIL import Image\nfrom tqdm import tqdm\nimport logging\nimport importlib\nimport argparse\nimport os.path as osp\nimport sys\nimport math\n\nfrom lib.model import DeepLabLargeFOV\nfrom lib.pascal_voc import PascalVoc\nfrom lib.pascal_voc_aug import PascalVoc_Aug\n# from utils.crf import crf\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Train a network')\n parser.add_argument(\n '--cfg',\n dest = 'cfg',\n type = str,\n default = 'config/pascal_voc_aug_multi_scale.py',\n help = 'config file used in training'\n )\n return parser.parse_args()\n\n\n\ndef compute_iou(mask, lb, ignore_lb = (255, )):\n assert mask.shape == lb.shape, 'prediction and gt do not agree in shape'\n classes = set(np.unique(lb).tolist())\n for cls in ignore_lb:\n if cls in classes:\n classes.remove(cls)\n\n iou_cls = []\n for cls in classes:\n gt = lb == cls\n pred = mask == cls\n intersection = np.logical_and(gt, pred)\n union = np.logical_or(gt, pred)\n iou = float(np.sum(intersection)) / float(np.sum(union))\n iou_cls.append(iou)\n return sum(iou_cls) / len(iou_cls)\n\n\ndef eval_model(net, cfg):\n logger = logging.getLogger(__name__)\n ## dataset\n dsval = PascalVoc(cfg, mode='val')\n ## evaluator\n evaluator = MscEval(\n dsval = dsval,\n scales = cfg.test_scales,\n n_classes = cfg.n_classes,\n lb_ignore = cfg.ignore_label,\n flip = cfg.flip,\n crop_size = cfg.crop_size,\n n_workers = 4,\n )\n ## inference\n logger.info('evaluating on standard voc2012 val set')\n mIOU = evaluator(net)\n\n return mIOU\n\n\nclass MscEval(object):\n def __init__(self,\n dsval,\n scales = [0.5, 0.75, 1, 1.25, 1.5, 1.75],\n n_classes = 19,\n lb_ignore = 255,\n flip = True,\n crop_size = 321,\n n_workers = 2,\n *args, **kwargs):\n self.scales = scales\n self.n_classes = n_classes\n self.lb_ignore = lb_ignore\n self.flip = flip\n self.crop_size = crop_size\n ## dataloader\n self.dsval = dsval\n self.net = None\n\n\n def pad_tensor(self, inten, size):\n N, C, H, W = inten.size()\n ## TODO: use zeros\n outten = torch.zeros(N, C, size[0], size[1]).cuda()\n outten.requires_grad = False\n margin_h, margin_w = size[0]-H, size[1]-W\n hst, hed = margin_h//2, margin_h//2+H\n wst, wed = margin_w//2, margin_w//2+W\n outten[:, :, hst:hed, wst:wed] = inten\n return outten, [hst, hed, wst, wed]\n\n\n def eval_chip(self, crop):\n with torch.no_grad():\n out = self.net(crop)\n prob = F.softmax(out, 1)\n if self.flip:\n crop = torch.flip(crop, dims=(3,))\n out = self.net(crop)\n out = torch.flip(out, dims=(3,))\n prob += F.softmax(out, 1)\n prob = torch.exp(prob)\n return prob\n\n\n def crop_eval(self, im):\n cropsize = self.crop_size\n stride_rate = 5/6.\n N, C, H, W = im.size()\n long_size, short_size = (H,W) if H>W else (W,H)\n if long_size < cropsize:\n im, indices = self.pad_tensor(im, (cropsize, cropsize))\n prob = self.eval_chip(im)\n prob = prob[:, :, indices[0]:indices[1], indices[2]:indices[3]]\n else:\n stride = math.ceil(cropsize*stride_rate)\n if short_size < cropsize:\n if H < W:\n im, indices = self.pad_tensor(im, (cropsize, W))\n else:\n im, indices = self.pad_tensor(im, (H, cropsize))\n N, C, H, W = im.size()\n n_x = math.ceil((W-cropsize)/stride)+1\n n_y = math.ceil((H-cropsize)/stride)+1\n prob = torch.zeros(N, self.n_classes, H, W).cuda()\n prob.requires_grad = False\n for iy in range(n_y):\n for ix in range(n_x):\n hed, wed = min(H, stride*iy+cropsize), min(W, stride*ix+cropsize)\n hst, wst = hed-cropsize, wed-cropsize\n chip = im[:, :, hst:hed, wst:wed]\n prob_chip = self.eval_chip(chip)\n prob[:, :, hst:hed, wst:wed] += prob_chip\n if short_size < cropsize:\n prob = prob[:, :, indices[0]:indices[1], indices[2]:indices[3]]\n return prob\n\n\n def scale_crop_eval(self, im, scale):\n N, C, H, W = im.size()\n new_hw = [int(H*scale), int(W*scale)]\n im = F.interpolate(im, new_hw, mode='bilinear', align_corners=True)\n prob = self.crop_eval(im)\n prob = F.interpolate(prob, (H, W), mode='bilinear', align_corners=True)\n return prob\n\n\n def compute_hist(self, pred, lb, lb_ignore=255):\n n_classes = self.n_classes\n keep = np.logical_not(lb==lb_ignore)\n merge = pred[keep] * n_classes + lb[keep]\n hist = np.bincount(merge, minlength=n_classes**2)\n hist = hist.reshape((n_classes, n_classes))\n return hist\n\n def __call__(self, net):\n self.net = net\n ## evaluate\n hist = np.zeros((self.n_classes, self.n_classes), dtype=np.float32)\n for i, (imgs, label) in enumerate(tqdm(self.dsval)):\n N, _, H, W = imgs.size()\n probs = torch.zeros((N, self.n_classes, H, W))\n probs.requires_grad = False\n imgs = imgs.cuda()\n for sc in self.scales:\n prob = self.scale_crop_eval(imgs, sc)\n probs += prob.detach().cpu()\n probs = probs.data.numpy()\n preds = np.argmax(probs, axis=1)\n\n hist_once = self.compute_hist(preds, label)\n hist = hist + hist_once\n IOUs = np.diag(hist) / (np.sum(hist, axis=0)+np.sum(hist, axis=1)-np.diag(hist))\n mIOU = np.mean(IOUs)\n return mIOU\n\n\ndef evaluate(args):\n ## set up logger and parse cfg\n FORMAT = '%(levelname)s %(filename)s(%(lineno)d): %(message)s'\n logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\n logger = logging.getLogger(__name__)\n spec = importlib.util.spec_from_file_location('mod_cfg', args.cfg)\n mod_cfg = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod_cfg)\n cfg = mod_cfg.cfg\n\n ## initialize model\n net = DeepLabLargeFOV(3, cfg.n_classes)\n net.eval()\n net.cuda()\n model_pth = osp.join(cfg.res_pth, 'model_final.pkl')\n net.load_state_dict(torch.load(model_pth))\n\n ## evaluate\n mIOU = eval_model(net, cfg)\n logger.info('iou in whole is: {}'.format(mIOU))\n\n\nif __name__ == \"__main__\":\n args = get_args()\n evaluate(args)\n"
] | [
[
"numpy.diag",
"numpy.logical_not",
"torch.nn.functional.softmax",
"torch.load",
"torch.zeros",
"numpy.unique",
"numpy.logical_or",
"torch.exp",
"numpy.argmax",
"numpy.mean",
"numpy.bincount",
"torch.nn.functional.interpolate",
"torch.no_grad",
"torch.flip",
"numpy.logical_and",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CaoQiNeng/python-classifier-2021 | [
"a6350988e7ecf0453aac2655f5b0d8af1e538bfc"
] | [
"resnet.py"
] | [
"# -*- coding: utf-8 -*-\n'''\nupdate: 2021/5/18 lym\n'''\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom torchsummary import summary\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed1d.pth',\n}\n\n#Mish - \"Mish: A Self Regularized Non-Monotonic Neural Activation Function\"\n#https://arxiv.org/abs/1908.08681v1\n#implemented for PyTorch / FastAI by lessw2020 \n#github: https://github.com/lessw2020/mish\n\nclass Mish(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n #inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!)\n return x *( torch.tanh(F.softplus(x)))\n\n# or: ELU+init (a=0.54; gain=1.55)\nact_fn = Mish()#nn.ReLU(inplace=True)\n\nclass Attention(nn.Module):\n def __init__(self, feature_dim, step_dim, bias=True, **kwargs):\n super(Attention, self).__init__(**kwargs)\n \n self.supports_masking = True\n\n self.bias = bias\n self.feature_dim = feature_dim\n self.step_dim = step_dim\n self.features_dim = 0\n \n weight = torch.zeros(feature_dim, 1)\n nn.init.kaiming_uniform_(weight)\n self.weight = nn.Parameter(weight)\n \n if bias:\n self.b = nn.Parameter(torch.zeros(step_dim))\n \n def forward(self, x, mask=None):\n feature_dim = self.feature_dim \n step_dim = self.step_dim\n\n eij = torch.mm(\n x.contiguous().view(-1, feature_dim), \n self.weight\n ).view(-1, step_dim)\n \n if self.bias:\n eij = eij + self.b\n \n eij = torch.tanh(eij)\n a = torch.exp(eij)\n \n if mask is not None:\n a = a * mask\n\n a = a / (torch.sum(a, 1, keepdim=True) + 1e-10)\n\n weighted_input = x * torch.unsqueeze(a, -1)\n return torch.sum(weighted_input, 1)\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv1d(in_planes, out_planes, kernel_size=7, stride=stride,\n padding=3, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm1d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm1d(planes)\n self.downsample = downsample\n self.stride = stride\n self.dropout = nn.Dropout(.2)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.dropout(out)\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=7, bias=False, padding=3)\n self.bn1 = nn.BatchNorm1d(planes)\n self.conv2 = nn.Conv1d(planes, planes, kernel_size=11, stride=stride,\n padding=5, bias=False)\n self.bn2 = nn.BatchNorm1d(planes)\n self.conv3 = nn.Conv1d(planes, planes * 4, kernel_size=7, bias=False, padding=3)\n self.bn3 = nn.BatchNorm1d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dropout = nn.Dropout(.2)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.dropout(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_channels, block, layers, num_classes=27):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv1d(in_channels, 64, kernel_size=15, stride=2, padding=7, bias=False) #12 6dao lian\n self.bn1 = nn.BatchNorm1d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n\n self.lstm = nn.LSTM(80, 512, bidirectional=True, batch_first=True)\n self.attention_layer = Attention(1024, 512)\n\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n n = m.kernel_size[0] * m.kernel_size[0] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv1d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm1d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n # print(x.shape)\n # max_pooled = F.adaptive_max_pool1d(out, 1)\n # avg_pooled = F.adaptive_avg_pool1d(out, 1)\n # out = torch.cat([max_pooled, avg_pooled], dim=1)\n x = F.adaptive_max_pool1d(x, 1)#self.avgpool(x)\n # x,_ = self.lstm(x)\n # x = self.attention_layer(x)\n\n # print(x.shape)\n x = x.view(x.size(0), -1)\n # print(x.shape)\n x = self.fc(x)\n # print('x.shape:', x.shape)\n return x\n\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model\n\n\n# def resnet50(pretrained=False, **kwargs):\n# \"\"\"Constructs a ResNet-50 model.\n#\n# Args:\n# pretrained (bool): If True, returns a model pre-trained on ImageNet\n# \"\"\"\n# model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n# if pretrained:\n# print('----------------downloading----------------')\n# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n# return model\n\ndef resnet50(in_channels, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(in_channels,Bottleneck, [3, 4, 6, 3], **kwargs)\n # if pretrained:\n # print('----------------downloading----------------')\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model\n\n\ndef resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model\n\n\nif __name__ == '__main__':\n\n in_channels = 6\n x = torch.randn(2, in_channels, 72000)\n # x = torch.randn(1, 2560*6, 12)\n # net = resnet18()\n # net = resnet34()\n net = resnet50(in_channels)\n # net = resnet101()\n # net = resnet152()\n y = net(x)\n\n summary(net, (in_channels, 72000))\n\n # print(net)\n print(x.shape)\n print(y.shape)\n"
] | [
[
"torch.zeros",
"torch.nn.functional.adaptive_max_pool1d",
"torch.sum",
"torch.tanh",
"torch.nn.Dropout",
"torch.randn",
"torch.nn.MaxPool1d",
"torch.nn.functional.softplus",
"torch.nn.BatchNorm1d",
"torch.nn.Sequential",
"torch.nn.Parameter",
"torch.unsqueeze",
"torch.exp",
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.nn.AdaptiveAvgPool1d",
"torch.nn.LSTM",
"torch.nn.init.kaiming_uniform_",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kew96/GraphcoreExamples | [
"22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb",
"22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb",
"22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb",
"22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb"
] | [
"applications/popart/bert/phased_execution/bert_layers.py",
"applications/pytorch/cnns/models/model_manipulator.py",
"applications/pytorch/cnns/datasets/preprocess.py",
"applications/tensorflow/bert/modeling.py"
] | [
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT layers.\"\"\"\nimport numpy as np\n\nimport popart\nfrom phased_execution.layers import Dense, Dropout, Norm, Split\nfrom phased_execution.nn import Block, Parameter\n\n__all__ = [\n \"Attention\", \"FeedForward\", \"MaskLM\", \"NextSentencePred\", \"SquadProjection\"\n]\n\n\nclass Attention(Block):\n def __init__(self,\n name: str,\n input_size,\n hidden_size,\n num_heads,\n serialize_matmul,\n available_memory_proportion,\n epsilon,\n dropout,\n dropout_prob,\n attn_dropout,\n attn_dropout_prob,\n micro_batch_size,\n sequence_length,\n dtype,\n task,\n num_mask_tokens,\n split_qkv = False,\n attention_bias = False,\n residual=True,\n prefetch_masks=True,\n use_default_mem_proportion=True,\n mask=None,\n increment_scope=True,\n **kwargs):\n if split_qkv:\n params = [\n Parameter(name='Q',\n shape=[input_size, hidden_size],\n value=None),\n Parameter(name='K',\n shape=[input_size, hidden_size],\n value=None),\n Parameter(name='V',\n shape=[input_size, hidden_size],\n value=None),\n Parameter(name='Out', shape=[hidden_size, input_size], value=None)\n ]\n if attention_bias:\n bias_params = [\n Parameter(name='Q_Bias',\n shape=[hidden_size],\n value=None),\n Parameter(name='K_Bias',\n shape=[hidden_size],\n value=None),\n Parameter(name='V_Bias',\n shape=[hidden_size],\n value=None),\n Parameter(name='Out_Bias', shape=[hidden_size], value=None)\n ]\n params = params + bias_params\n else:\n params = [\n Parameter(name='QKV',\n shape=[input_size, 3 * hidden_size],\n value=None),\n Parameter(name='Out', shape=[hidden_size, input_size], value=None)\n ]\n if attention_bias:\n bias_params = [\n Parameter(name='QKV_Bias',\n shape=[3 * hidden_size],\n value=None),\n Parameter(name='Out_Bias', shape=[hidden_size], value=None)\n ]\n params = params + bias_params\n scope_provider = kwargs['scope_provider']\n super(Attention, self).__init__(params=params,\n scope=scope_provider.get_scope(\n name, 'next' if increment_scope else 'prev'),\n dtype=dtype,\n **kwargs)\n self.num_heads = num_heads\n self.hidden_size = hidden_size\n self.serialize_matmul = serialize_matmul\n self.available_memory_proportion = available_memory_proportion\n self.use_default_mem_proportion = use_default_mem_proportion\n self.split_qkv = split_qkv\n self.attention_bias = attention_bias\n self.micro_batch_size = micro_batch_size\n self.seq_len = sequence_length\n if hidden_size % num_heads != 0:\n raise ValueError('Hidden size must be a multiple of num_heads')\n self.qkv_length = hidden_size // num_heads\n self.dtype = dtype\n self.residual = residual\n self.task = task\n self.num_mask_tokens = num_mask_tokens\n self.mask = mask\n self.prefetch_masks = prefetch_masks\n if prefetch_masks:\n # Mask on chip would cause OOM for batch size 1024, choose off chip instead\n additional_scopes = [self.builder.recomputeOutput(popart.RecomputeType.Checkpoint)]\n self.mask_execution_phase = scope_provider.get_scope('Mask', 'prev').execution_phase % 2\n self.mask_scope = scope_provider.get_scope('Mask',\n self.mask_execution_phase,\n additional_scopes=additional_scopes)\n else:\n self.mask_scope = scope_provider.get_scope('Mask', 'prev')\n\n if self.residual:\n self.norm = Norm(scope_provider.get_scope('Norm', 'prev'), hidden_size,\n epsilon, dtype, **kwargs)\n if dropout:\n self.dropout = Dropout(scope_provider.get_scope('Dropout', 'prev'),\n dropout_prob, **kwargs)\n else:\n self.dropout = lambda x: x\n\n if attn_dropout:\n self.attn_dropout = Dropout(scope_provider.get_scope('AttnDropout', 'prev'),\n attn_dropout_prob, **kwargs)\n else:\n self.attn_dropout = lambda x: x\n\n self.total_execution_phases = self.total_phases()\n\n def attention_mask(self, masks):\n if self.prefetch_masks and (self.mask is not None):\n return self.mask\n\n with self.scope_provider(self.builder, self.mask_scope):\n all_indices_np = np.arange(self.seq_len, dtype=np.uint32)\n all_indices = self.builder.aiOnnx.constant(all_indices_np,\n \"mask_sequence\")\n if self.task == \"PRETRAINING\":\n # Mask tokens mask\n indices_less_than_maskidx = self.builder.aiOnnx.less(\n [all_indices, masks[0]])\n\n indices_greater_than_num_mask_token_np = np.greater_equal(\n all_indices_np, self.num_mask_tokens).astype(np.bool)\n indices_greater_than_num_mask_token = self.builder.aiOnnx.constant(\n indices_greater_than_num_mask_token_np)\n\n mask_tokens_mask = self.builder.aiOnnx.logical_or(\n [indices_less_than_maskidx, indices_greater_than_num_mask_token])\n\n # Sequence mask\n sequence_mask = self.builder.aiOnnx.less(\n [all_indices, masks[1]])\n\n final_mask = self.builder.aiOnnx.logical_and(\n [mask_tokens_mask, sequence_mask])\n else:\n final_mask = self.builder.aiOnnx.less([all_indices, masks[0]])\n\n final_mask = self.builder.aiOnnx.cast(\n [final_mask],\n 'FLOAT' if self.dtype == np.float32 else 'FLOAT16')\n final_mask = self.builder.aiOnnx.sub([\n final_mask,\n self.builder.aiOnnx.constant(np.array(1.0, self.dtype))\n ])\n final_mask = self.builder.aiOnnx.mul([\n final_mask,\n self.builder.aiOnnx.constant(np.array(1000.0, self.dtype))\n ])\n final_mask = self.builder.reshape_const(\n self.builder.aiOnnx, [final_mask],\n [self.micro_batch_size, 1, 1, self.seq_len])\n\n # TODO: This shouldn't be needed. No Variables on this path.\n final_mask = self.builder.customOp(\n opName=\"Detach\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[final_mask],\n attributes={\"pass_through_creation\": 1})[0]\n self.mask = final_mask\n return final_mask\n\n def dotproduct_attention(self, qkv, masks):\n if self.split_qkv:\n split_qkv = qkv\n else:\n split_qkv = self.builder.aiOnnx.split([qkv],\n num_outputs=3,\n axis=1,\n split=[self.hidden_size] * 3,\n debugContext=\"QKV_Split\")\n\n def extract_heads(tensor, transpose=False):\n comb_shape = [\n self.micro_batch_size, self.seq_len, self.num_heads, self.qkv_length\n ]\n tensor = self.builder.reshape_const(self.builder.aiOnnx, [tensor],\n comb_shape)\n perm = [0, 2, 1, 3] if not transpose else [0, 2, 3, 1]\n return self.builder.aiOnnx.transpose([tensor], perm=perm)\n\n # q = [micro_batch_size * seq_len, hidden_size]\n # kt = [hidden_size, micro_batch_size * seq_len]\n # v = [micro_batch_size * seq_len, hidden_size]\n q, kt, v = [extract_heads(t, i == 1) for i, t in enumerate(split_qkv)]\n\n # Attention calculation\n with self.builder.nameScope('Z'):\n scores = self.builder.aiOnnx.matmul([q, kt], \"AttentionDotProduct\")\n if not self.use_default_mem_proportion:\n self.builder.setAvailableMemoryProportion(\n scores, self.available_memory_proportion)\n\n scale = self.builder.aiOnnx.constant(\n np.array(1 / np.sqrt(self.qkv_length), self.dtype), \"Scale\")\n scores = self.builder.aiOnnx.mul([scores, scale])\n\n if masks:\n mask = self.attention_mask(masks)\n scores = self.builder.aiOnnx.add([scores, mask], \"ApplyMask\")\n\n scores = self.builder.aiOnnx.softmax([scores], axis=-1)\n scores = self.attn_dropout(scores)\n\n # x[micro_batch_size, attention_heads, sequence_length, sequence_length] * v[micro_batch_size, attention_heads, sequence_length, qkv_length]\n z = self.builder.aiOnnx.matmul([scores, v])\n if not self.use_default_mem_proportion:\n self.builder.setAvailableMemoryProportion(\n z, self.available_memory_proportion)\n\n # [micro_batch_size, attention_heads, sequence_length, qkv_length] -> [micro_batch_size, sequence_length, attention_heads, qkv_length]\n z = self.builder.aiOnnx.transpose([z], perm=[0, 2, 1, 3])\n # [micro_batch_size, sequence_length, attention_heads, qkv_length] -> [micro_batch_size*sequence_length, attention_heads * qkv_length]\n z = self.builder.reshape_const(\n self.builder.aiOnnx, [z],\n [self.seq_len * self.micro_batch_size, self.hidden_size])\n return z\n\n def __qkv_mul_subgraph(self, input_x, wt, b=None):\n\n x = self.builder.aiOnnx.matmul([input_x, wt])\n if self.serialize_matmul:\n self.builder.setSerializeMatMul({x}, 'output_channels', 3, True)\n if not self.use_default_mem_proportion:\n self.builder.setAvailableMemoryProportion(\n x, self.available_memory_proportion)\n if self.attention_bias:\n x = self.builder.aiOnnx.add([x, b])\n mul = x\n perm = [1, 0]\n t = self.builder.aiOnnx.transpose([mul], perm=perm)\n return mul\n\n def forward(self, input_x: str, masks: str):\n # Transform input -> query, keys and value\n if self.split_qkv:\n if self.attention_bias:\n q, k, v, projection_weight, q_bias, k_bias, v_bias, projection_bias = [\n param.popart_tensor for param in self.params\n ]\n qt = self.__qkv_mul_subgraph(input_x, q, q_bias)\n kt = self.__qkv_mul_subgraph(input_x, k, k_bias)\n vt = self.__qkv_mul_subgraph(input_x, v, v_bias)\n else:\n q, k, v, projection_weight = [param.popart_tensor for param in self.params]\n qt = self.__qkv_mul_subgraph(input_x, q)\n kt = self.__qkv_mul_subgraph(input_x, k)\n vt = self.__qkv_mul_subgraph(input_x, v)\n qkv = [qt, kt, vt]\n else:\n if self.attention_bias:\n qkv_weight, projection_weight, qkv_bias, projection_bias = [\n param.popart_tensor for param in self.params\n ]\n else:\n qkv_weight, projection_weight = [\n param.popart_tensor for param in self.params\n ]\n qkv = self.builder.aiOnnx.matmul([input_x, qkv_weight],\n 'DenseTransform')\n if self.serialize_matmul:\n self.builder.setSerializeMatMul({qkv}, 'output_channels', 3, True)\n if not self.use_default_mem_proportion:\n self.builder.setAvailableMemoryProportion(\n qkv, self.available_memory_proportion)\n if self.attention_bias:\n qkv = self.builder.aiOnnx.add([qkv, qkv_bias],\n 'DenseTransformBias')\n\n # Self-attention\n x = self.dotproduct_attention(qkv, masks)\n\n # Projection\n x = self.builder.aiOnnx.matmul([x, projection_weight], 'Projection')\n if not self.use_default_mem_proportion:\n self.builder.setAvailableMemoryProportion(\n x, self.available_memory_proportion)\n if self.attention_bias:\n x = self.builder.aiOnnx.add([x, projection_bias], 'ProjectionBias')\n\n if not self.residual:\n return x\n\n # Residual\n x = self.dropout(x)\n x = self.builder.aiOnnx.add([input_x, x], 'Residual')\n x = self.norm(x)\n return x\n\n\nclass FeedForward(Block):\n def __init__(self,\n name,\n input_size,\n ff_size,\n dropout,\n dropout_prob,\n epsilon,\n residual=True,\n intermediate_act_func='gelu',\n alpha=None,\n increment_scope=True,\n serialize_matmul=False,\n use_default_memory_proportion=True,\n available_memory_proportion=None,\n **kwargs):\n scope_provider = kwargs['scope_provider']\n self.apply_dropout = dropout\n scope = scope_provider.get_scope(name, 'next' if increment_scope else 'prev')\n super(FeedForward, self).__init__(params=[], scope=scope, **kwargs)\n self.residual = residual\n\n if serialize_matmul:\n split = Split(dim='output_channels',\n num_splits=ff_size // input_size)\n else:\n split = None\n self.dense1 = Dense(scope_provider.get_scope(\"1\", 'prev'),\n input_size,\n ff_size,\n split=split,\n activation=intermediate_act_func,\n alpha=alpha,\n use_default_memory_proportion=use_default_memory_proportion,\n available_memory_proportion=available_memory_proportion,\n **kwargs)\n if serialize_matmul:\n split = Split(dim='reducing_dim', num_splits=ff_size // input_size)\n else:\n split = None\n self.dense2 = Dense(scope_provider.get_scope(\"2\", \"prev\"),\n ff_size,\n input_size,\n split=split,\n activation=None,\n use_default_memory_proportion=use_default_memory_proportion,\n available_memory_proportion=available_memory_proportion,\n **kwargs)\n if residual:\n if dropout:\n self.dropout = Dropout(scope_provider.get_scope(\"Dropout\", \"prev\"),\n dropout_prob, **kwargs)\n self.norm = Norm(scope_provider.get_scope(\"Norm\", \"prev\"), input_size,\n epsilon, **kwargs)\n self.total_execution_phases = self.total_phases()\n\n def forward(self, input_x):\n x = self.dense1(input_x)\n x = self.dense2(x)\n if not self.residual:\n return x\n if self.apply_dropout:\n x = self.dropout(x)\n x = self.builder.aiOnnx.add([input_x, x])\n x = self.norm(x)\n return x\n\n\nclass MaskLM(Block):\n def __init__(self,\n name,\n vocab_size,\n hidden_size,\n sequence_length,\n micro_batch_size,\n num_mask_tokens,\n projection_weight,\n activation,\n slice_input=True,\n no_cls_layer=False,\n epsilon=None,\n projection_bias=False,\n **kwargs):\n scope_provider = kwargs['scope_provider']\n super(MaskLM, self).__init__(params=[],\n scope=scope_provider.get_scope(name=f'{name}', execution_phase='next'),\n **kwargs)\n self.sequence_len = sequence_length\n self.hidden_size = hidden_size\n self.micro_batch_size = micro_batch_size\n self.vocab_length = vocab_size\n self.num_mask_tokens = num_mask_tokens\n self.slice_input = slice_input\n self.no_cls_layer = no_cls_layer\n if not no_cls_layer:\n scope = scope_provider.get_scope(\"LMPrediction\", self.scope.execution_phase)\n self.pred_head_transform = Dense(scope,\n hidden_size,\n hidden_size,\n activation=activation,\n **kwargs)\n scope = scope_provider.get_scope('LMPrediction/Norm', self.scope.execution_phase)\n self.norm = Norm(scope, hidden_size,\n epsilon, **kwargs)\n\n decoder_scope = scope_provider.get_scope(\"Projection\", self.scope.execution_phase)\n self.decoder = Dense(decoder_scope,\n hidden_size,\n vocab_size,\n split=None,\n activation=None,\n params=[projection_weight, None],\n bias=projection_bias,\n **kwargs)\n self.total_execution_phases = self.total_phases()\n\n def forward(self, x_in):\n if self.slice_input:\n x = self.builder.reshape_const(\n self.builder.aiOnnx, [x_in],\n [self.micro_batch_size, self.sequence_len, self.hidden_size])\n\n x = self.builder.aiOnnxOpset9.slice([x],\n axes=[1],\n starts=[0],\n ends=[self.num_mask_tokens])\n\n x = self.builder.reshape_const(self.builder.aiOnnx, [x],\n [self.micro_batch_size * self.num_mask_tokens, self.hidden_size])\n if not self.no_cls_layer:\n x = self.pred_head_transform(x)\n x = self.norm(x)\n\n else:\n x = x_in\n\n x = self.decoder(x)\n x = self.builder.reshape_const(\n self.builder.aiOnnx, [x],\n [self.micro_batch_size, self.num_mask_tokens, self.vocab_length])\n return x\n\n\nclass NextSentencePred(Block):\n def __init__(self, name, micro_batch_size, sequence_length, hidden_size, cls_token_pos,\n **kwargs):\n scope_provider = kwargs['scope_provider']\n additional_scopes = [kwargs['builder'].outlineAttributes({'outline_scope': 'NSP'})]\n scope = scope_provider.get_scope(name, execution_phase='next', additional_scopes=additional_scopes)\n params = []\n super().__init__(scope, params, **kwargs)\n self.micro_batch_size = micro_batch_size\n self.sequence_length = sequence_length\n self.hidden_size = hidden_size\n self.cls_token_pos = cls_token_pos\n pooler_scope = scope_provider.get_scope(\"Pool\", execution_phase=self.scope.execution_phase)\n self.pooler = Dense(scope=pooler_scope,\n input_dim=hidden_size,\n output_dim=hidden_size,\n split=None,\n activation='tanh',\n **kwargs)\n classifier_scope = scope_provider.get_scope(\"Classifier\",\n execution_phase=self.scope.execution_phase)\n self.classifier = Dense(scope=classifier_scope,\n input_dim=hidden_size,\n output_dim=2,\n split=None,\n **kwargs)\n self.total_execution_phases = self.total_phases()\n\n def forward(self, x_in):\n x = self.builder.reshape_const(self.builder.aiOnnx, [x_in],\n [self.micro_batch_size, self.sequence_length, self.hidden_size])\n\n x = self.builder.aiOnnxOpset9.slice([x],\n axes=[1],\n starts=[self.cls_token_pos],\n ends=[self.cls_token_pos + 1])\n # This reshape is doing the job of a squeeze, but allows for in-place\n # operation.\n x = self.builder.reshape_const(self.builder.aiOnnx, [x],\n [self.micro_batch_size, self.hidden_size])\n x = self.pooler(x)\n return self.classifier(x)\n\n\nclass SquadProjection(Block):\n def __init__(self, name, micro_batch_size, sequence_length, hidden_size,\n **kwargs):\n scope_provider = kwargs['scope_provider']\n scope = scope_provider.get_scope(name, execution_phase='next')\n params = []\n super().__init__(scope, params, **kwargs)\n self.micro_batch_size = micro_batch_size\n self.sequence_length = sequence_length\n self.hidden_size = hidden_size\n classifier_scope = scope_provider.get_scope(name='', execution_phase=self.scope.execution_phase)\n self.classifier = Dense(scope=classifier_scope,\n input_dim=hidden_size,\n output_dim=2,\n split=None,\n **kwargs)\n self.total_execution_phases = self.total_phases()\n\n def forward(self, x_in):\n x = self.classifier(x_in)\n\n start_logits = self.builder.aiOnnxOpset9.slice(\n [x], axes=[1], starts=[0], ends=[1], debugContext='slice_ans_start')\n end_logits = self.builder.aiOnnxOpset9.slice(\n [x], axes=[1], starts=[1], ends=[2], debugContext='slice_ans_end')\n\n start_logits = self.builder.reshape_const(\n self.builder.aiOnnx, [start_logits],\n [self.micro_batch_size, self.sequence_length],\n debugContext=\"answer_start\")\n end_logits = self.builder.reshape_const(\n self.builder.aiOnnx, [end_logits],\n [self.micro_batch_size, self.sequence_length],\n debugContext=\"answer_end\")\n\n return start_logits, end_logits\n",
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nfrom collections import OrderedDict\nimport re\nimport logging\nimport torch\nimport torchvision\nimport poptorch\nfrom torchvision.models.utils import load_state_dict_from_url\nfrom efficientnet_pytorch import EfficientNet, get_model_params\nfrom torchvision.models.resnet import model_urls as resnet_urls\nfrom torchvision.models.mobilenet import model_urls as mobilenet_urls\nimport sys\nsys.path.append('..')\nimport datasets\n\n\nmodel_urls = dict(resnet_urls.items() | mobilenet_urls.items())\nconvert_model_names = {\"resnext50\": \"resnext50_32x4d\",\n \"resnext101\": \"resnext101_32x8d\",\n \"mobilenet\": \"mobilenet_v2\"}\n\n\ndef create_efficientnet(model_name, pretrained=True, num_classes=1000, norm_layer=torch.nn.BatchNorm2d, expand_ratio=6, group_dim=1):\n \"\"\" Creates EfficientNet instance with the predefined parameters.\n Parameters:\n model_name: Name of the model.\n pretrained: if true the network is initialized with pretrained weights.\n norm_layer: The used normalization layer in the network. eg. torch.nn.Identity means no initialization.\n expand_ratio: The used expand ratio in the blocks. Official EfficientNet uses 6\n group_dim: Dimensionality of the depthwise convolution. Official EfficientNet uses 1.\n\n Returns:\n The initialized EfficientNet model.\n \"\"\"\n EfficientNet._check_model_name_is_valid(model_name)\n blocks_args, global_params = get_model_params(model_name, {\"num_classes\": num_classes})\n # Change expand ratio\n for idx in range(1, len(blocks_args)):\n blocks_args[idx] = blocks_args[idx]._replace(expand_ratio = expand_ratio)\n model = EfficientNet(blocks_args, global_params)\n model.set_swish(memory_efficient=False)\n if group_dim > 1:\n replace_en_depthwise_conv(model, group_dim)\n if not isinstance(norm_layer, torch.nn.BatchNorm2d):\n replace_bn(model, norm_layer)\n init_efficientnet(model)\n if pretrained:\n pretrained_model = EfficientNet.from_pretrained(model_name)\n load_modified_model_from_state(model, pretrained_model.state_dict())\n return model\n\n\ndef init_efficientnet(model):\n \"\"\"\n The method optimize the EfficientNet initialization.\n \"\"\"\n stack = [model]\n while len(stack) != 0:\n node = stack.pop()\n if isinstance(node, torch.nn.Conv2d):\n torch.nn.init.kaiming_normal_(node.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(node, torch.nn.Linear):\n torch.nn.init.xavier_uniform_(node.weight)\n for name, child in node.named_children():\n stack.append(child)\n\n\ndef residual_normlayer_init(model):\n \"\"\"\n The method initialize the norm layer's weight part to be zero before the residual connection.\n It mimics to the networks to be shallower. It helps to converge in the early part of the training.\n Only works on ResNet and ResNext networks.\n \"\"\"\n for layer_id in range(1, 5):\n layer = getattr(model, \"layer\" + str(layer_id))\n for block in layer:\n if hasattr(block, 'downsample') and block.downsample is not None:\n norm_layer = block.downsample[-1]\n else:\n if isinstance(block, torchvision.models.resnet.BasicBlock):\n norm_layer = block.bn2\n elif isinstance(block, torchvision.models.resnet.Bottleneck):\n norm_layer = block.bn3\n if hasattr(norm_layer, \"weight\"):\n torch.nn.init.zeros_(norm_layer.weight)\n\n\ndef replace_en_depthwise_conv(model, group_dim=1):\n \"\"\"\n Modify the depthwise convolutions in EfficientNet to have the given group dimensionality.\n \"\"\"\n for block in model._blocks:\n groups = max(block._depthwise_conv.in_channels // group_dim, 1)\n custom_conv = type(block._depthwise_conv)\n new_conv_layer = custom_conv(in_channels=block._depthwise_conv.in_channels,\n out_channels=block._depthwise_conv.out_channels,\n groups=groups,\n kernel_size=block._depthwise_conv.kernel_size,\n stride=block._depthwise_conv.stride,\n bias=False,\n image_size=224) # Use fake image size as it'll have no effect.\n new_conv_layer.static_padding = block._depthwise_conv.static_padding\n replace_layer(block, '_depthwise_conv', new_conv_layer)\n\n\ndef replace_bn(model, norm_layer):\n \"\"\"Replaces the normalization layers to the given normalization layer.\n Parameters:\n model: The model.\n norm_layer: The inserted torch.nn.Module instance.\n \"\"\"\n stack = [model]\n while len(stack) != 0:\n node = stack.pop()\n for name, child in node.named_children():\n stack.append(child)\n if isinstance(child, torch.nn.BatchNorm2d):\n new_layer = norm_layer(child.num_features)\n replace_layer(node, name, new_layer)\n\n\ndef replace_layer(parent, field_name, new_layer):\n if isinstance(parent, torch.nn.Sequential):\n parent[int(field_name)] = new_layer\n else:\n setattr(parent, field_name, new_layer)\n\n\ndef get_module_and_parent_by_name(node, split_tokens):\n child_to_find = split_tokens[0]\n for name, child in node.named_children():\n if name == child_to_find:\n if len(split_tokens) == 1:\n return node, child, name\n else:\n return get_module_and_parent_by_name(child, split_tokens[1:])\n\n return None, None, None\n\n\ndef load_modified_model(model, model_name):\n if model_name in convert_model_names.keys():\n model_name = convert_model_names[model_name]\n\n model_url = model_urls[model_name]\n trained_state_dict = load_state_dict_from_url(model_url, progress=True)\n return load_modified_model_from_state(model, trained_state_dict)\n\n\ndef load_modified_model_from_state(model, pretrained_state_dict):\n default_state_dict = model.state_dict()\n\n def get_weight(layer):\n if layer in pretrained_state_dict.keys() and pretrained_state_dict[layer].size() == default_state_dict[layer].size():\n return pretrained_state_dict[layer]\n else:\n return default_state_dict[layer]\n corrected_state_dict = OrderedDict({layer: get_weight(layer) for layer in default_state_dict.keys()})\n model.load_state_dict(corrected_state_dict)\n return model\n\n\ndef full_precision_norm(model, norm_layer):\n stack = [model]\n while len(stack) != 0:\n node = stack.pop()\n for name, child in node.named_children():\n stack.append(child)\n if isinstance(child, norm_layer):\n child.float()\n replace_layer(node, name, torch.nn.Sequential(datasets.ToFloat(), child, datasets.ToHalf()))\n\n\ndef recompute_model(model, recompute_checkpoints):\n # Put recomutation checkpoint if regular expression matches\n for name, modules in model.named_modules():\n name = name.replace('.', '/')\n for checkpoint_re in recompute_checkpoints:\n if re.match(checkpoint_re, name):\n parent, node, field_or_idx_str = get_module_and_parent_by_name(model, name.split('/'))\n replace_layer(parent, field_or_idx_str, RecomputationCheckpoint(node))\n break\n\n\nclass RecomputationCheckpoint(torch.nn.Module):\n def __init__(self, layer):\n super().__init__()\n self.layer = layer\n\n def forward(self, x):\n y = self.layer(x)\n return poptorch.recomputationCheckpoint(y)\n",
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport torch\nfrom torchvision import transforms\n\n\nnormalization_parameters = {\"mean\": [0.485, 0.456, 0.406],\n \"std\": [0.229, 0.224, 0.225]}\n\nuse_bbox_info_config = {False: {\"max_trial\": 1, \"minimum_bbox_interlap\": 0.0},\n True: {\"max_trial\": 10, \"minimum_bbox_interlap\": 0.1}}\n\n\ndef get_preprocessing_pipeline(train, input_size=224, half_precision=False, normalize=True, eightbit=False, use_bbox_info=False):\n \"\"\"\n Return optimized pipeline, which contains fused transformations.\n \"\"\"\n pipeline_steps = []\n if train:\n pipeline_steps.append(RandomResizedFlipCrop(input_size, **use_bbox_info_config[use_bbox_info]))\n else:\n pipeline_steps = [transforms.Resize(256), transforms.CenterCrop(input_size)]\n\n if normalize:\n pipeline_steps.append(NormalizeToTensor(mean=normalization_parameters[\"mean\"], std=normalization_parameters[\"std\"]))\n else:\n # Return tensor\n pipeline_steps.append(NormalizeToTensor.pil_to_tensor)\n # if not normalized full precision, need to be converted to float.\n if not half_precision:\n pipeline_steps.append(ToFloat())\n\n if eightbit:\n pipeline_steps.append(ToByte())\n elif half_precision:\n pipeline_steps.append(ToHalf())\n\n return transforms.Compose(pipeline_steps)\n\n\nclass ToHalf(torch.nn.Module):\n def forward(self, tensor):\n return tensor.half()\n\n\nclass ToFloat(torch.nn.Module):\n def forward(self, tensor):\n return tensor.float()\n\n\nclass ToByte(torch.nn.Module):\n def forward(self, tensor):\n return tensor.byte()\n\n\nclass RandomResizedFlipCrop(transforms.RandomResizedCrop):\n \"\"\"\n Fuse RandomResizedCrop and RandomHorizontalFlip augmentation.\n The horizontal flip happens before the resize, depends on the croped imge size\n \"\"\"\n def __init__(self, *args, max_trial=1, minimum_bbox_interlap=0.0, **kwargs):\n self.max_trial = max_trial\n self.minimum_bbox_interlap = minimum_bbox_interlap\n super(RandomResizedFlipCrop, self).__init__(*args, **kwargs)\n\n def get_bbox(self, img, bbox=None):\n if bbox is None:\n return self.get_params(img, self.scale, self.ratio)\n trial_nr = 1\n # adjust bbox with image sizes\n w, h = transforms.functional._get_image_size(img)\n bbox = bbox[0] * w, bbox[1] * h, bbox[2] * w, bbox[3] * h\n while trial_nr < self.max_trial:\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n dx = min(i + h, bbox[2]) - max(i, bbox[0])\n dy = min(j + w, bbox[3]) - max(j, bbox[1])\n if h * w * self.minimum_bbox_interlap <= dx * dy:\n return i, j, h, w\n trial_nr += 1\n return i, j, h, w\n\n\n def __call__(self, img):\n if isinstance(img, tuple): # unpack bbox values if available\n bbox = img[1]\n img = img[0]\n else:\n bbox = None\n i, j, h, w = self.get_bbox(img, bbox)\n if isinstance(img, torch.Tensor):\n tensor = torch.unsqueeze(img, 0)\n tensor = transforms.functional_tensor.crop(tensor, i, j, h, w)\n if torch.rand(1) < 0.5:\n tensor = self.fast_hflip(tensor)\n tensor = tensor.float()\n tensor = torch.nn.functional.interpolate(tensor, size=self.size, mode='bilinear', align_corners=False)\n return tensor.squeeze(0)\n else:\n img = transforms.functional.crop(img, i, j, h, w)\n if torch.rand(1) < 0.5:\n img = transforms.functional_pil.hflip(img)\n img = transforms.functional.resize(img, self.size, self.interpolation)\n return img\n\n @staticmethod\n def fast_hflip(x):\n reverse_index = torch.arange(x.size()[-1] - 1, -1, -1)\n return x[:, :, :, reverse_index]\n\n\nclass NormalizeToTensor(torch.nn.Module):\n def __init__(self, mean, std):\n \"\"\"\n Fuse ToTensor and Normalize operation.\n Expected input is a PIL image and the output is the normalized float tensor.\n \"\"\"\n # fuse: division by 255 and the normalization\n # Convert division to multiply\n mean = torch.as_tensor(mean)\n std = torch.as_tensor(std)\n self.mul = (1.0/(255.0 * std)).view(-1, 1, 1)\n self.sub = (mean / std).view(-1, 1, 1)\n super().__init__()\n\n def forward(self, img):\n if not isinstance(img, torch.Tensor):\n img = self.pil_to_tensor(img).float()\n if not img.dtype == torch.float:\n img = img.float()\n img.mul_(self.mul)\n img.sub_(self.sub)\n return img\n\n @staticmethod\n def pil_to_tensor(pic):\n # If it is already tensor, return it.\n if isinstance(pic, torch.Tensor):\n return pic\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))\n # put it from HWC to CHW format\n img = img.permute((2, 0, 1)).contiguous()\n return img\n",
"# Copyright (c) 2020 Graphcore Ltd. All Rights Reserved.\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This file has been modified by Graphcore Ltd.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport json\nimport math\nimport re\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tensorflow.python import ipu\n\n\nclass BertConfig(object):\n \"\"\"Configuration for `BertModel`.\"\"\"\n\n def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n max_predictions_per_seq=20,\n use_attention_projection_bias=True,\n use_cls_layer=False,\n use_qkv_bias=False,\n use_qkv_split=False,\n task='pretraining',\n matmul_serialize_factor=6,\n static_mask=False,\n compute_acc = False,\n dtype=tf.float32):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n max_predictions_per_seq: Number of masked tokens which need to be predicted in MLM task.\n use_attention_projection_bias: Whether to use bias in linear projection behind attention layer.\n This is for model optimization.\n use_cls_layer: Include the CLS layer in pretraining.\n This layer comes after the encoders but before the projection for the MLM loss.\n use_qkv_bias: Whether to use bias in QKV calculation of attention layer.\n This is for model optimization.\n dtype: Data type.\n \"\"\"\n assert hidden_size % num_attention_heads == 0,\\\n \"The hidden size (%d) is not a multiple of the number of attention \" \\\n \"heads (%d)\" % (hidden_size, num_attention_heads)\n\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.dtype = dtype\n self.attention_head_size = int(\n self.hidden_size / self.num_attention_heads)\n self.max_predictions_per_seq = max_predictions_per_seq\n self.use_attention_projection_bias = use_attention_projection_bias\n self.use_cls_layer = use_cls_layer\n self.use_qkv_bias = use_qkv_bias\n self.use_qkv_split = use_qkv_split\n self.task = task\n self.matmul_serialize_factor = matmul_serialize_factor\n self.static_mask = static_mask\n self.compute_acc = compute_acc\n\n @classmethod\n def from_json(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config_json = dict()\n for (key, value) in six.iteritems(json_object):\n config_json[key] = value\n return config_json\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n if key in config.__dict__:\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_json(json.loads(text))\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass BertModel(object):\n \"\"\"\n BERT model (\"Bidirectional Encoder Representations from Transformers\").\n \"\"\"\n\n def __init__(self, config, is_training):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n self.layer_count = 0\n self.bert_config = config\n self.is_training = is_training\n if not is_training:\n self.bert_config.hidden_dropout_prob = 0.0\n self.bert_config.attention_probs_dropout_prob = 0.0\n\n def embedding(self, input_ids, embedding_size, name, num_splits=1):\n shape = [embedding_size, self.bert_config.hidden_size]\n\n embedding_table = tf.get_variable(\n dtype=self.bert_config.dtype,\n trainable=True,\n name=name,\n shape=shape,\n initializer=create_initializer(self.bert_config.initializer_range))\n\n if name == \"word_embeddings\":\n self.embedding_table = embedding_table\n\n output = ipu.ops.embedding_ops.embedding_lookup(\n embedding_table, tf.reshape(input_ids, [-1]), serialization_factor=num_splits)\n return tf.reshape(output, [input_ids.shape[0], input_ids.shape[1], -1])\n\n def embeddings_layer(self, input_ids, input_mask, segment_ids):\n \"\"\"Combine word embeddings, position embeddings and segmentation embeddings.\"\"\"\n word_embeddings = self.embedding(\n input_ids,\n self.bert_config.vocab_size,\n name=\"word_embeddings\",\n num_splits=self.bert_config.matmul_serialize_factor)\n _batch_size, _seq_len = word_embeddings.shape[:2]\n dummy_pos_index = tf.reshape(\n tf.tile(tf.range(_seq_len), [_batch_size]), [-1, _seq_len])\n position_embeddings = self.embedding(\n dummy_pos_index, self.bert_config.max_position_embeddings, name=\"position_embeddings\")\n seg_onehot = tf.one_hot(segment_ids,\n depth=self.bert_config.type_vocab_size,\n dtype=self.bert_config.dtype)\n seg_weights = tf.get_variable(dtype=self.bert_config.dtype,\n name=\"token_type_embeddings\",\n shape=[self.bert_config.type_vocab_size,\n self.bert_config.hidden_size],\n initializer=create_initializer(\n self.bert_config.initializer_range),\n trainable=True)\n segment_embeddings = tf.matmul(seg_onehot, seg_weights)\n\n full_embeddings = tf.add(word_embeddings, position_embeddings)\n full_embeddings = tf.add(full_embeddings, segment_embeddings)\n full_embeddings = layer_norm_and_dropout(\n full_embeddings, self.bert_config.hidden_dropout_prob)\n\n return full_embeddings\n\n def self_attention(self, input_tensor, mask=None):\n \"\"\"Performs multi-headed self-attention on `input_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". Each timestep in `input_tensor` attends to the\n corresponding sequence in `input_tensor` itself, and returns a fixed-with vector.\n\n This function first projects `input_tensor` into a \"query\" tensor and\n `input_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n hidden_size].\n mask: (optional) float32 Tensor of shape [batch_size,\n seq_length, seq_length]. The values should be -1000 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n\n Returns:\n float Tensor of shape [batch_size * seq_length,\n num_attention_heads * size_per_head]\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=[2, 3])\n assert len(input_shape) in [2, 3], \\\n f\"Input shape of attention moduler should be `[batch_size, seq_length]` or `[batch_size, seq_length, seq_length]`.\"\n batch_size, seq_length = input_shape[:2]\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # S = `input_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n num_attention_heads = int(self.bert_config.num_attention_heads)\n size_per_head = int(self.bert_config.attention_head_size)\n\n input_tensor_2d = reshape_to_matrix(input_tensor)\n\n # We combine the query, key and value layers to reduce memory consume.\n # `qkv_layer` = [B*S, 3N*H] if use_qkv_split:\n if self.bert_config.use_qkv_split:\n head_shape = [num_attention_heads*size_per_head, num_attention_heads*size_per_head]\n with tf.variable_scope('query'):\n q_weight = tf.get_variable(\n dtype=self.bert_config.dtype,\n name=\"kernel\",\n shape=head_shape,\n initializer=create_initializer(self.bert_config.initializer_range),\n trainable=True)\n with tf.variable_scope('key'):\n k_weight = tf.get_variable(\n dtype=self.bert_config.dtype,\n name=\"kernel\",\n shape=head_shape,\n initializer=create_initializer(self.bert_config.initializer_range),\n trainable=True)\n with tf.variable_scope('value'):\n v_weight = tf.get_variable(\n dtype=self.bert_config.dtype,\n name=\"kernel\",\n shape=head_shape,\n initializer=create_initializer(self.bert_config.initializer_range),\n trainable=True)\n qkv_weight = tf.concat([q_weight, k_weight, v_weight], axis=-1)\n\n else:\n with tf.variable_scope('kernel'):\n qkv_weight = tf.get_variable(\n dtype=self.bert_config.dtype,\n name=\"qkv_weight\",\n shape=[num_attention_heads*size_per_head, 3*num_attention_heads*size_per_head],\n initializer=create_initializer(self.bert_config.initializer_range),\n trainable=True)\n\n @ipu.outlined_function\n def inner_attention_func():\n qkv = tf.matmul(input_tensor_2d, qkv_weight)\n\n if self.bert_config.use_qkv_bias:\n qkv_bias = tf.get_variable(\n dtype=self.bert_config.dtype,\n name=\"qkv_bias\",\n shape=[3*num_attention_heads*size_per_head],\n initializer=tf.zeros_initializer(),\n trainable=True\n )\n qkv = tf.nn.bias_add(qkv, qkv_bias)\n # Split and transpose to [B, N, S, H]\n query_layer, key_layer, value_layer = [\n transpose_for_scores(layer, int(batch_size), int(\n num_attention_heads), int(seq_length), int(size_per_head))\n for layer in tf.split(qkv, [int(num_attention_heads*size_per_head)]*3, axis=-1, name='qkv_split')\n ]\n\n # `attention_scores` = [B, N, S, S]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(\n attention_scores, 1.0 / math.sqrt(float(size_per_head)))\n if mask is not None:\n # `mask` = [B, 1, 1, S]\n attention_scores = tf.add(\n attention_scores, tf.expand_dims(mask, axis=1))\n\n # `attention_probs` = [B, N, S, S]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(\n attention_probs, self.bert_config.attention_probs_dropout_prob)\n\n # `context_layer` = [B, N, S, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, S, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B*S, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * seq_length, self.bert_config.hidden_size])\n\n return context_layer\n context_layer = inner_attention_func()\n return context_layer\n\n def attention_projection(self, input_tensor, attention_output):\n with tf.variable_scope(\"projection\"):\n attention_output = dense_layer(\n attention_output,\n self.bert_config.hidden_size,\n kernel_initializer=create_initializer(\n self.bert_config.initializer_range),\n use_bias=self.bert_config.use_attention_projection_bias)\n attention_output = tf.reshape(attention_output, input_tensor.shape)\n attention_output = dropout_residual_add_layer_norm(\n attention_output, input_tensor,\n self.bert_config.hidden_dropout_prob)\n return attention_output\n\n def feed_forward(self, attention_output):\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = dense_layer(\n attention_output,\n self.bert_config.intermediate_size,\n activation=gelu,\n kernel_initializer=create_initializer(self.bert_config.initializer_range))\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n feed_forward_output = dense_layer(\n intermediate_output,\n self.bert_config.hidden_size,\n kernel_initializer=create_initializer(self.bert_config.initializer_range))\n feed_forward_output = dropout_residual_add_layer_norm(\n feed_forward_output,\n attention_output,\n self.bert_config.hidden_dropout_prob\n )\n return feed_forward_output\n\n def encoder(self, input_tensor, attention_mask, masked_lm_positions=None):\n \"\"\"Encoder layer.\"\"\"\n original_input_shape = input_tensor.shape\n\n with tf.variable_scope(\"bert\"):\n with tf.variable_scope(\"encoder\"):\n with tf.variable_scope(\"layer_%d\" % self.layer_count):\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = self.self_attention(\n input_tensor, mask=attention_mask)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(\n attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n attention_output = self.attention_projection(\n input_tensor, attention_output)\n input_tensor = self.feed_forward(attention_output)\n\n input_tensor = ipu.pipelining_ops.recomputation_checkpoint(input_tensor)\n self.layer_count += 1\n # Reshape the last hidden layer outputs.\n if self.layer_count == self.bert_config.num_hidden_layers:\n input_tensor = reshape_from_matrix(\n input_tensor, original_input_shape)\n # Return the masked tokens tensor to avoid major changes in `multi_stage_wrapper`.\n # However this might be optimized later.\n if self.bert_config.task.lower() == 'pretraining':\n masked_tokens_tensor = self.lm_projection(\n input_tensor, masked_lm_positions)\n\n return {\n 'layer_output': input_tensor,\n 'masked_tokens_tensor': masked_tokens_tensor,\n }\n return {\n \"input_tensor\": input_tensor\n }\n\n def pooler(self, input_tensor, cls_position=0):\n with tf.variable_scope(\"pooler\"):\n # Pool out the [CLS] token.\n if self.bert_config.static_mask:\n cls_position = self.bert_config.max_predictions_per_seq\n cls_token_tensor = tf.squeeze(\n input_tensor[:, cls_position:cls_position+1, :], axis=1) # [batch_size, hidden_size]\n pooled_output = tf.layers.dense(\n cls_token_tensor,\n self.bert_config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(self.bert_config.initializer_range))\n return pooled_output\n\n def lm_projection(self, input_tensor, masked_lm_positions):\n if self.bert_config.static_mask:\n masked_tokens_tensor = tf.slice(input_tensor, [0, 0, 0], [-1, self.bert_config.max_predictions_per_seq, -1])\n masked_tokens_tensor = tf.reshape(masked_tokens_tensor, [-1, masked_tokens_tensor.shape[2]])\n else:\n masked_tokens_tensor = gather_indexes(input_tensor, masked_lm_positions)\n\n if self.bert_config.use_cls_layer:\n with tf.variable_scope(\"cls/predictions/transform\"):\n masked_tokens_tensor = tf.layers.dense(\n masked_tokens_tensor,\n units=self.bert_config.hidden_size,\n activation=get_activation(self.bert_config.hidden_act),\n kernel_initializer=create_initializer(\n self.bert_config.initializer_range))\n masked_tokens_tensor = layer_norm(masked_tokens_tensor)\n return masked_tokens_tensor\n\n def nsp_head(self, input_tensor):\n \"\"\"Extract [CLS] tokens and do a linear projection\"\"\"\n with tf.variable_scope(\"cls/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n dtype=self.bert_config.dtype,\n shape=[2, self.bert_config.hidden_size],\n initializer=create_initializer(self.bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\",\n dtype=self.bert_config.dtype,\n shape=[2],\n initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n return logits\n\n def mlm_head(self, masked_tokens_tensor):\n \"\"\"Slice out the masked tokens and do a linear projection.\"\"\"\n with tf.variable_scope(\"cls/predictions\"):\n logits = ipu.math_ops.serialized_matmul(masked_tokens_tensor,\n self.embedding_table,\n serialization_factor=self.bert_config.matmul_serialize_factor,\n serialization_dimension=\"b_rows\",\n transpose_b=True)\n return {\"mlm_logits\": logits}\n\n def squad_head(self, input_tensor, dtype):\n \"\"\"Take linear projection on last hidden layer output.\"\"\"\n with tf.variable_scope(\"cls/squad\"):\n input_tensor = tf.cast(input_tensor, dtype=dtype)\n batch_size, seq_length, hidden_size = input_tensor.shape\n\n output_weights = tf.get_variable(\n name=\"output_weights\",\n shape=[2, self.bert_config.hidden_size],\n dtype=dtype,\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n name=\"output_bias\",\n shape=[2],\n dtype=dtype,\n initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(input_tensor,\n [batch_size * seq_length, hidden_size])\n\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n start_logits, end_logits = tf.unstack(logits, axis=0)\n return start_logits, end_logits\n\n def embedding_lookup_layer(self,\n input_ids,\n ):\n \"\"\"\n pipeline stages of embedding_lookup\n \"\"\"\n with tf.variable_scope(\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n word_embeddings = self.embedding(\n input_ids, self.bert_config.vocab_size,\n name=\"word_embeddings\",\n num_splits=self.bert_config.matmul_serialize_factor)\n return {\"word_embeddings\": word_embeddings\n }\n\n def embedding_postprocessor_layer(self,\n word_embeddings,\n input_ids,\n input_mask=None,\n segment_ids=None,\n input_position=None,\n mask_padding_index=None,\n seq_padding_index=None\n ):\n \"\"\"\n pipeline stages of embedding_postprocessor\n \"\"\"\n with tf.variable_scope(\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n _batch_size, _seq_len = word_embeddings.shape[:2]\n if self.bert_config.static_mask:\n position_embeddings = self.embedding(input_position, self.bert_config.max_position_embeddings, name=\"position_embeddings\")\n else:\n dummy_pos_index = tf.reshape(\n tf.tile(tf.range(_seq_len), [_batch_size]), [-1, _seq_len])\n position_embeddings = self.embedding(\n dummy_pos_index, self.bert_config.max_position_embeddings, name=\"position_embeddings\")\n\n seg_onehot = tf.one_hot(segment_ids,\n depth=self.bert_config.type_vocab_size,\n dtype=self.bert_config.dtype)\n seg_weights = tf.get_variable(dtype=self.bert_config.dtype,\n name=\"token_type_embeddings\",\n shape=[self.bert_config.type_vocab_size,\n self.bert_config.hidden_size],\n initializer=create_initializer(\n self.bert_config.initializer_range),\n trainable=True)\n segment_embeddings = tf.matmul(seg_onehot, seg_weights)\n full_embeddings = tf.add(word_embeddings, position_embeddings)\n full_embeddings = tf.add(full_embeddings, segment_embeddings)\n full_embeddings = layer_norm_and_dropout(\n full_embeddings, self.bert_config.hidden_dropout_prob)\n\n if self.bert_config.static_mask:\n attention_mask = attention_static_remasking(\n mask_padding_index, _seq_len.value, seq_padding_index,\n self.bert_config.max_predictions_per_seq,\n self.bert_config.dtype)\n else:\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask, self.bert_config.dtype)\n\n return {\n \"input_tensor\": full_embeddings,\n \"attention_mask\": attention_mask\n }\n\n def get_next_sentence_output_layer(self,\n layer_output,\n mlm_logits,\n masked_lm_ids,\n masked_lm_weights,\n next_sentence_labels):\n with tf.variable_scope('bert'):\n pooled_output = self.pooler(layer_output)\n nsp_logits = self.nsp_head(pooled_output)\n # Calculate MLM loss\n with tf.variable_scope(\"cls/predictions\"):\n log_probs = tf.nn.log_softmax(mlm_logits, axis=-1)\n label_ids = tf.reshape(masked_lm_ids, [-1])\n label_weights = tf.reshape(masked_lm_weights, [-1])\n one_hot_labels = tf.one_hot(\n tf.cast(label_ids, dtype=tf.int32), depth=self.bert_config.vocab_size, dtype=self.bert_config.dtype)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n mlm_loss = numerator / denominator\n if self.bert_config.compute_acc:\n # Calculate `mlm_acc`\n results = tf.cast(tf.argmax(log_probs, -1), dtype=tf.int32)\n predictions = tf.cast(tf.equal(results, label_ids), dtype=tf.float16)\n predictions = tf.cast(predictions * label_weights, dtype=tf.float32)\n\n mlm_acc = tf.reduce_sum(predictions)\n total_attempted = tf.cast(tf.reduce_sum(label_weights), dtype=tf.float32)\n mlm_acc = mlm_acc / total_attempted\n else:\n mlm_acc = tf.get_variable('mlm_acc', initializer = -1.0, trainable = False, dtype = tf.float32)\n\n # Calculate NSP loss\n with tf.variable_scope(\"cls/seq_relationship\"):\n log_probs = tf.nn.log_softmax(nsp_logits, axis=-1)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n one_hot_labels = tf.one_hot(\n next_sentence_labels, depth=2, dtype=self.bert_config.dtype)\n per_example_loss = - \\\n tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n nsp_loss = tf.reduce_mean(per_example_loss)\n\n if self.bert_config.compute_acc:\n # Calculate the `nsp_acc`\n nsp_acc = tf.reduce_mean(tf.cast(tf.equal(\n tf.cast(tf.argmax(log_probs, -1), dtype=tf.int32),\n next_sentence_labels), dtype=tf.float32))\n else:\n nsp_acc = tf.get_variable('nsp_acc', initializer = -1.0, trainable = False, dtype = tf.float32)\n\n outfeed_mlm_loss = tf.cast(mlm_loss, dtype = tf.float32)\n outfeed_nsp_loss = tf.cast(nsp_loss, dtype = tf.float32)\n\n return {\"mlm_loss\": outfeed_mlm_loss, \"nsp_loss\": outfeed_nsp_loss,\n \"mlm_acc\": mlm_acc, \"nsp_acc\": nsp_acc}\n\n def get_loc_logic_output_layer(self,\n start_positions,\n end_positions,\n input_tensor,\n ):\n # This is the loss and accuracy for SQuAD\n dtype_loss = tf.float32\n start_logits, end_logits = self.squad_head(\n input_tensor, dtype_loss)\n\n if not self.is_training:\n return {'start_logits': start_logits, 'end_logits': end_logits}\n\n def compute_loss(logits, positions):\n seq_len = logits.shape[1]\n logits_fp32 = tf.cast(logits, dtype=dtype_loss)\n one_hot_positions = tf.one_hot(positions, depth=seq_len, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits_fp32, axis=-1)\n loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n loss = tf.cast(loss, dtype=logits.dtype)\n return loss\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n return {'total_loss': total_loss}\n\n\ndef gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n batch_size, seq_length, width = get_shape_list(\n sequence_tensor, expected_rank=3)\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = ipu.ops.embedding_ops.embedding_lookup(\n flat_sequence_tensor, flat_positions, serialization_factor=1)\n output_tensor = tf.reshape(output_tensor, [-1, width])\n return output_tensor\n\n\ndef transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n return ipu.nn_ops.gelu(x)\n\n\ndef get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)\n\n\ndef get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = []\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map.append(name_to_variable[name])\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)\n\n\ndef dropout(input_tensor, dropout_prob=None):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n # We use the IPU-specific dropout.\n output = ipu.ops.rand_ops.dropout(input_tensor, rate=dropout_prob)\n return output\n\n\ndef layer_norm(input_tensor, name='LayerNorm'):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n\n x_reshaped = tf.reshape(input_tensor, (-1, input_tensor.shape[-1]))\n # We use the IPU-specific group_norm() operation.\n y = ipu.normalization_ops.group_norm(\n x_reshaped, groups=1, epsilon=0.001, scope=name)\n return tf.reshape(y, input_tensor.shape)\n\n\ndef layer_norm_and_dropout(input_tensor, dropout_prob, name=\"LayerNorm\"):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor\n\n\ndef create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)\n\n\ndef create_attention_mask_from_input_mask(from_tensor, to_mask, dtype):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size, from_seq_length = from_shape[:2]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), dtype=dtype)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=dtype)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n mask = (1.0 - mask) * -1000.0\n\n return mask\n\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\n\ndef reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor\n\n\ndef reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])\n\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))\n\n\ndef attention_static_remasking(mask_padding_index, seq_length,\n seq_padding_index, num_masked_inputs,\n data_dtype):\n \"\"\"Creates the attention mask when the \"static_mask\" mode is used.\n In this mode the first `num_masked_inputs` tokens are always\n masked. This function handles variable number of predicted tokens\n and sequence lengths.\n \"\"\"\n batch_size = int(mask_padding_index.shape[0])\n base_value = np.arange(seq_length)\n base = tf.constant(base_value, dtype=tf.int32)\n\n # Tokens mask\n mmask = tf.less(base, mask_padding_index)\n _mask = tf.constant(np.greater_equal(base_value, num_masked_inputs),\n np.bool)\n mmask = tf.logical_or(mmask, _mask)\n\n # Sequence mask\n smask = tf.less(base, seq_padding_index)\n final_mask = tf.logical_and(mmask, smask)\n final_mask = tf.reshape(final_mask, [batch_size, 1, seq_length])\n\n final_mask = (1.0 - tf.cast(final_mask, data_dtype)) * -1000.0\n\n return final_mask\n\n\ndef dropout_residual_add_layer_norm(input_tensor,\n residual_tensor,\n dropout_prob):\n @ipu.outlined_function\n def inner_func():\n output = residual_tensor + dropout(input_tensor, dropout_prob)\n output = layer_norm(output)\n return output\n return inner_func()\n\n\ndef dense_layer(input_tensor,\n num_units,\n kernel_initializer,\n activation=None,\n use_bias=True):\n @ipu.outlined_function\n def inner_func():\n return tf.layers.dense(input_tensor,\n num_units,\n use_bias=use_bias,\n activation=activation,\n kernel_initializer=kernel_initializer)\n return inner_func()\n"
] | [
[
"numpy.greater_equal",
"numpy.arange",
"numpy.array",
"numpy.sqrt"
],
[
"torch.nn.init.zeros_",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.kaiming_normal_"
],
[
"torch.nn.functional.interpolate",
"torch.rand",
"torch.unsqueeze",
"torch.as_tensor"
],
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.nn.log_softmax",
"tensorflow.gfile.GFile",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.python.ipu.normalization_ops.group_norm",
"tensorflow.logical_or",
"numpy.arange",
"tensorflow.truncated_normal_initializer",
"tensorflow.layers.dense",
"tensorflow.squeeze",
"numpy.greater_equal",
"tensorflow.add",
"tensorflow.train.list_variables",
"tensorflow.argmax",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.zeros_initializer",
"tensorflow.one_hot",
"tensorflow.python.ipu.nn_ops.gelu",
"tensorflow.python.ipu.ops.rand_ops.dropout",
"tensorflow.nn.bias_add",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.python.ipu.ops.embedding_ops.embedding_lookup",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.python.ipu.math_ops.serialized_matmul",
"tensorflow.python.ipu.pipelining_ops.recomputation_checkpoint",
"tensorflow.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
zkneupper/audio | [
"1f136671b84071a2fe1d5b762df64f3a76310c31",
"1f136671b84071a2fe1d5b762df64f3a76310c31",
"1f136671b84071a2fe1d5b762df64f3a76310c31"
] | [
"examples/pipeline_wav2letter/main.py",
"examples/pipeline_wav2letter/datasets.py",
"torchaudio/prototype/rnnt_loss.py"
] | [
"import argparse\nimport logging\nimport os\nimport string\nfrom datetime import datetime\nfrom time import time\n\nimport torch\nimport torchaudio\nfrom torch.optim import SGD, Adadelta, Adam, AdamW\nfrom torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torchaudio.datasets.utils import bg_iterator\nfrom torchaudio.models.wav2letter import Wav2Letter\n\nfrom ctc_decoders import GreedyDecoder\nfrom datasets import collate_factory, split_process_librispeech\nfrom languagemodels import LanguageModel\nfrom metrics import levenshtein_distance\nfrom transforms import Normalize, UnsqueezeFirst\nfrom utils import MetricLogger, count_parameters, save_checkpoint\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--type\",\n metavar=\"T\",\n default=\"mfcc\",\n choices=[\"waveform\", \"mfcc\"],\n help=\"input type for model\",\n )\n parser.add_argument(\n \"--freq-mask\",\n default=0,\n type=int,\n metavar=\"N\",\n help=\"maximal width of frequency mask\",\n )\n parser.add_argument(\n \"--win-length\",\n default=400,\n type=int,\n metavar=\"N\",\n help=\"width of spectrogram window\",\n )\n parser.add_argument(\n \"--hop-length\",\n default=160,\n type=int,\n metavar=\"N\",\n help=\"width of spectrogram window\",\n )\n parser.add_argument(\n \"--time-mask\",\n default=0,\n type=int,\n metavar=\"N\",\n help=\"maximal width of time mask\",\n )\n parser.add_argument(\n \"--workers\",\n default=0,\n type=int,\n metavar=\"N\",\n help=\"number of data loading workers\",\n )\n parser.add_argument(\n \"--checkpoint\",\n default=\"\",\n type=str,\n metavar=\"PATH\",\n help=\"path to latest checkpoint\",\n )\n parser.add_argument(\n \"--epochs\",\n default=200,\n type=int,\n metavar=\"N\",\n help=\"number of total epochs to run\",\n )\n parser.add_argument(\n \"--start-epoch\", default=0, type=int, metavar=\"N\", help=\"manual epoch number\"\n )\n parser.add_argument(\n \"--reduce-lr-valid\",\n action=\"store_true\",\n help=\"reduce learning rate based on validation loss\",\n )\n parser.add_argument(\n \"--normalize\", action=\"store_true\", help=\"normalize model input\"\n )\n parser.add_argument(\n \"--progress-bar\", action=\"store_true\", help=\"use progress bar while training\"\n )\n parser.add_argument(\n \"--decoder\",\n metavar=\"D\",\n default=\"greedy\",\n choices=[\"greedy\"],\n help=\"decoder to use\",\n )\n parser.add_argument(\n \"--batch-size\", default=128, type=int, metavar=\"N\", help=\"mini-batch size\"\n )\n parser.add_argument(\n \"--n-bins\",\n default=13,\n type=int,\n metavar=\"N\",\n help=\"number of bins in transforms\",\n )\n parser.add_argument(\n \"--optimizer\",\n metavar=\"OPT\",\n default=\"adadelta\",\n choices=[\"sgd\", \"adadelta\", \"adam\", \"adamw\"],\n help=\"optimizer to use\",\n )\n parser.add_argument(\n \"--scheduler\",\n metavar=\"S\",\n default=\"reduceonplateau\",\n choices=[\"exponential\", \"reduceonplateau\"],\n help=\"optimizer to use\",\n )\n parser.add_argument(\n \"--learning-rate\",\n default=0.6,\n type=float,\n metavar=\"LR\",\n help=\"initial learning rate\",\n )\n parser.add_argument(\n \"--gamma\",\n default=0.99,\n type=float,\n metavar=\"GAMMA\",\n help=\"learning rate exponential decay constant\",\n )\n parser.add_argument(\n \"--momentum\", default=0.8, type=float, metavar=\"M\", help=\"momentum\"\n )\n parser.add_argument(\n \"--weight-decay\", default=1e-5, type=float, metavar=\"W\", help=\"weight decay\"\n )\n parser.add_argument(\"--eps\", metavar=\"EPS\", type=float, default=1e-8)\n parser.add_argument(\"--rho\", metavar=\"RHO\", type=float, default=0.95)\n parser.add_argument(\"--clip-grad\", metavar=\"NORM\", type=float, default=0.0)\n parser.add_argument(\n \"--dataset-root\",\n type=str,\n help=\"specify dataset root folder\",\n )\n parser.add_argument(\n \"--dataset-folder-in-archive\",\n type=str,\n help=\"specify dataset folder in archive\",\n )\n parser.add_argument(\n \"--dataset-train\",\n default=[\"train-clean-100\"],\n nargs=\"+\",\n type=str,\n help=\"select which part of librispeech to train with\",\n )\n parser.add_argument(\n \"--dataset-valid\",\n default=[\"dev-clean\"],\n nargs=\"+\",\n type=str,\n help=\"select which part of librispeech to validate with\",\n )\n parser.add_argument(\n \"--distributed\", action=\"store_true\", help=\"enable DistributedDataParallel\"\n )\n parser.add_argument(\"--seed\", type=int, default=0, help=\"random seed\")\n parser.add_argument(\n \"--world-size\", type=int, default=8, help=\"the world size to initiate DPP\"\n )\n parser.add_argument(\"--jit\", action=\"store_true\", help=\"if used, model is jitted\")\n\n args = parser.parse_args()\n logging.info(args)\n return args\n\n\ndef setup_distributed(rank, world_size):\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = \"12355\"\n\n # initialize the process group\n torch.distributed.init_process_group(\"nccl\", rank=rank, world_size=world_size)\n\n\ndef model_length_function(tensor):\n if tensor.shape[1] == 1:\n # waveform mode\n return int(tensor.shape[0]) // 160 // 2 + 1\n return int(tensor.shape[0]) // 2 + 1\n\n\ndef compute_error_rates(outputs, targets, decoder, language_model, metric):\n output = outputs.transpose(0, 1).to(\"cpu\")\n output = decoder(output)\n\n # Compute CER\n\n output = language_model.decode(output.tolist())\n target = language_model.decode(targets.tolist())\n\n print_length = 20\n for i in range(2):\n # Print a few examples\n output_print = output[i].ljust(print_length)[:print_length]\n target_print = target[i].ljust(print_length)[:print_length]\n logging.info(\"Target: %s Output: %s\", target_print, output_print)\n\n cers = [levenshtein_distance(t, o) for t, o in zip(target, output)]\n cers = sum(cers)\n n = sum(len(t) for t in target)\n metric[\"batch char error\"] = cers\n metric[\"batch char total\"] = n\n metric[\"batch char error rate\"] = cers / n\n metric[\"epoch char error\"] += cers\n metric[\"epoch char total\"] += n\n metric[\"epoch char error rate\"] = metric[\"epoch char error\"] / metric[\"epoch char total\"]\n\n # Compute WER\n\n output = [o.split(language_model.char_space) for o in output]\n target = [t.split(language_model.char_space) for t in target]\n\n wers = [levenshtein_distance(t, o) for t, o in zip(target, output)]\n wers = sum(wers)\n n = sum(len(t) for t in target)\n metric[\"batch word error\"] = wers\n metric[\"batch word total\"] = n\n metric[\"batch word error rate\"] = wers / n\n metric[\"epoch word error\"] += wers\n metric[\"epoch word total\"] += n\n metric[\"epoch word error rate\"] = metric[\"epoch word error\"] / metric[\"epoch word total\"]\n\n\ndef train_one_epoch(\n model,\n criterion,\n optimizer,\n scheduler,\n data_loader,\n decoder,\n language_model,\n device,\n epoch,\n clip_grad,\n disable_logger=False,\n reduce_lr_on_plateau=False,\n):\n\n model.train()\n\n metric = MetricLogger(\"train\", disable=disable_logger)\n metric[\"epoch\"] = epoch\n\n for inputs, targets, tensors_lengths, target_lengths in bg_iterator(\n data_loader, maxsize=2\n ):\n\n start = time()\n inputs = inputs.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n # keep batch first for data parallel\n outputs = model(inputs).transpose(-1, -2).transpose(0, 1)\n\n # CTC\n # outputs: input length, batch size, number of classes (including blank)\n # targets: batch size, max target length\n # input_lengths: batch size\n # target_lengths: batch size\n\n loss = criterion(outputs, targets, tensors_lengths, target_lengths)\n\n optimizer.zero_grad()\n loss.backward()\n\n if clip_grad > 0:\n metric[\"gradient\"] = torch.nn.utils.clip_grad_norm_(\n model.parameters(), clip_grad\n )\n\n optimizer.step()\n\n compute_error_rates(outputs, targets, decoder, language_model, metric)\n\n try:\n metric[\"lr\"] = scheduler.get_last_lr()[0]\n except AttributeError:\n metric[\"lr\"] = optimizer.param_groups[0][\"lr\"]\n\n metric[\"batch size\"] = len(inputs)\n metric[\"n_channel\"] = inputs.shape[1]\n metric[\"n_time\"] = inputs.shape[-1]\n metric[\"dataset length\"] += metric[\"batch size\"]\n metric[\"iteration\"] += 1\n metric[\"loss\"] = loss.item()\n metric[\"cumulative loss\"] += metric[\"loss\"]\n metric[\"average loss\"] = metric[\"cumulative loss\"] / metric[\"iteration\"]\n metric[\"iteration time\"] = time() - start\n metric[\"epoch time\"] += metric[\"iteration time\"]\n metric()\n\n if reduce_lr_on_plateau and isinstance(scheduler, ReduceLROnPlateau):\n scheduler.step(metric[\"average loss\"])\n elif not isinstance(scheduler, ReduceLROnPlateau):\n scheduler.step()\n\n\ndef evaluate(\n model,\n criterion,\n data_loader,\n decoder,\n language_model,\n device,\n epoch,\n disable_logger=False,\n):\n\n with torch.no_grad():\n\n model.eval()\n start = time()\n metric = MetricLogger(\"validation\", disable=disable_logger)\n metric[\"epoch\"] = epoch\n\n for inputs, targets, tensors_lengths, target_lengths in bg_iterator(\n data_loader, maxsize=2\n ):\n\n inputs = inputs.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n # keep batch first for data parallel\n outputs = model(inputs).transpose(-1, -2).transpose(0, 1)\n\n # CTC\n # outputs: input length, batch size, number of classes (including blank)\n # targets: batch size, max target length\n # input_lengths: batch size\n # target_lengths: batch size\n\n metric[\"cumulative loss\"] += criterion(\n outputs, targets, tensors_lengths, target_lengths\n ).item()\n\n metric[\"dataset length\"] += len(inputs)\n metric[\"iteration\"] += 1\n\n compute_error_rates(outputs, targets, decoder, language_model, metric)\n\n metric[\"average loss\"] = metric[\"cumulative loss\"] / metric[\"iteration\"]\n metric[\"validation time\"] = time() - start\n metric()\n\n return metric[\"average loss\"]\n\n\ndef main(rank, args):\n\n # Distributed setup\n\n if args.distributed:\n setup_distributed(rank, args.world_size)\n\n not_main_rank = args.distributed and rank != 0\n\n logging.info(\"Start time: %s\", datetime.now())\n\n # Explicitly set seed to make sure models created in separate processes\n # start from same random weights and biases\n torch.manual_seed(args.seed)\n\n # Empty CUDA cache\n torch.cuda.empty_cache()\n\n # Change backend for flac files\n torchaudio.set_audio_backend(\"soundfile\")\n\n # Transforms\n\n melkwargs = {\n \"n_fft\": args.win_length,\n \"n_mels\": args.n_bins,\n \"hop_length\": args.hop_length,\n }\n\n sample_rate_original = 16000\n\n if args.type == \"mfcc\":\n transforms = torch.nn.Sequential(\n torchaudio.transforms.MFCC(\n sample_rate=sample_rate_original,\n n_mfcc=args.n_bins,\n melkwargs=melkwargs,\n ),\n )\n num_features = args.n_bins\n elif args.type == \"waveform\":\n transforms = torch.nn.Sequential(UnsqueezeFirst())\n num_features = 1\n else:\n raise ValueError(\"Model type not supported\")\n\n if args.normalize:\n transforms = torch.nn.Sequential(transforms, Normalize())\n\n augmentations = torch.nn.Sequential()\n if args.freq_mask:\n augmentations = torch.nn.Sequential(\n augmentations,\n torchaudio.transforms.FrequencyMasking(freq_mask_param=args.freq_mask),\n )\n if args.time_mask:\n augmentations = torch.nn.Sequential(\n augmentations,\n torchaudio.transforms.TimeMasking(time_mask_param=args.time_mask),\n )\n\n # Text preprocessing\n\n char_blank = \"*\"\n char_space = \" \"\n char_apostrophe = \"'\"\n labels = char_blank + char_space + char_apostrophe + string.ascii_lowercase\n language_model = LanguageModel(labels, char_blank, char_space)\n\n # Dataset\n\n training, validation = split_process_librispeech(\n [args.dataset_train, args.dataset_valid],\n [transforms, transforms],\n language_model,\n root=args.dataset_root,\n folder_in_archive=args.dataset_folder_in_archive,\n )\n\n # Decoder\n\n if args.decoder == \"greedy\":\n decoder = GreedyDecoder()\n else:\n raise ValueError(\"Selected decoder not supported\")\n\n # Model\n\n model = Wav2Letter(\n num_classes=language_model.length,\n input_type=args.type,\n num_features=num_features,\n )\n\n if args.jit:\n model = torch.jit.script(model)\n\n if args.distributed:\n n = torch.cuda.device_count() // args.world_size\n devices = list(range(rank * n, (rank + 1) * n))\n model = model.to(devices[0])\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=devices)\n else:\n devices = [\"cuda\" if torch.cuda.is_available() else \"cpu\"]\n model = model.to(devices[0], non_blocking=True)\n model = torch.nn.DataParallel(model)\n\n n = count_parameters(model)\n logging.info(\"Number of parameters: %s\", n)\n\n # Optimizer\n\n if args.optimizer == \"adadelta\":\n optimizer = Adadelta(\n model.parameters(),\n lr=args.learning_rate,\n weight_decay=args.weight_decay,\n eps=args.eps,\n rho=args.rho,\n )\n elif args.optimizer == \"sgd\":\n optimizer = SGD(\n model.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n )\n elif args.optimizer == \"adam\":\n optimizer = Adam(\n model.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n )\n elif args.optimizer == \"adamw\":\n optimizer = AdamW(\n model.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n )\n else:\n raise ValueError(\"Selected optimizer not supported\")\n\n if args.scheduler == \"exponential\":\n scheduler = ExponentialLR(optimizer, gamma=args.gamma)\n elif args.scheduler == \"reduceonplateau\":\n scheduler = ReduceLROnPlateau(optimizer, patience=10, threshold=1e-3)\n else:\n raise ValueError(\"Selected scheduler not supported\")\n\n criterion = torch.nn.CTCLoss(\n blank=language_model.mapping[char_blank], zero_infinity=False\n )\n\n # Data Loader\n\n collate_fn_train = collate_factory(model_length_function, augmentations)\n collate_fn_valid = collate_factory(model_length_function)\n\n loader_training_params = {\n \"num_workers\": args.workers,\n \"pin_memory\": True,\n \"shuffle\": True,\n \"drop_last\": True,\n }\n loader_validation_params = loader_training_params.copy()\n loader_validation_params[\"shuffle\"] = False\n\n loader_training = DataLoader(\n training,\n batch_size=args.batch_size,\n collate_fn=collate_fn_train,\n **loader_training_params,\n )\n loader_validation = DataLoader(\n validation,\n batch_size=args.batch_size,\n collate_fn=collate_fn_valid,\n **loader_validation_params,\n )\n\n # Setup checkpoint\n\n best_loss = 1.0\n\n load_checkpoint = args.checkpoint and os.path.isfile(args.checkpoint)\n\n if args.distributed:\n torch.distributed.barrier()\n\n if load_checkpoint:\n logging.info(\"Checkpoint: loading %s\", args.checkpoint)\n checkpoint = torch.load(args.checkpoint)\n\n args.start_epoch = checkpoint[\"epoch\"]\n best_loss = checkpoint[\"best_loss\"]\n\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n scheduler.load_state_dict(checkpoint[\"scheduler\"])\n\n logging.info(\n \"Checkpoint: loaded '%s' at epoch %s\", args.checkpoint, checkpoint[\"epoch\"]\n )\n else:\n logging.info(\"Checkpoint: not found\")\n\n save_checkpoint(\n {\n \"epoch\": args.start_epoch,\n \"state_dict\": model.state_dict(),\n \"best_loss\": best_loss,\n \"optimizer\": optimizer.state_dict(),\n \"scheduler\": scheduler.state_dict(),\n },\n False,\n args.checkpoint,\n not_main_rank,\n )\n\n if args.distributed:\n torch.distributed.barrier()\n\n torch.autograd.set_detect_anomaly(False)\n\n for epoch in range(args.start_epoch, args.epochs):\n\n logging.info(\"Epoch: %s\", epoch)\n\n train_one_epoch(\n model,\n criterion,\n optimizer,\n scheduler,\n loader_training,\n decoder,\n language_model,\n devices[0],\n epoch,\n args.clip_grad,\n not_main_rank,\n not args.reduce_lr_valid,\n )\n\n loss = evaluate(\n model,\n criterion,\n loader_validation,\n decoder,\n language_model,\n devices[0],\n epoch,\n not_main_rank,\n )\n\n if args.reduce_lr_valid and isinstance(scheduler, ReduceLROnPlateau):\n scheduler.step(loss)\n\n is_best = loss < best_loss\n best_loss = min(loss, best_loss)\n save_checkpoint(\n {\n \"epoch\": epoch + 1,\n \"state_dict\": model.state_dict(),\n \"best_loss\": best_loss,\n \"optimizer\": optimizer.state_dict(),\n \"scheduler\": scheduler.state_dict(),\n },\n is_best,\n args.checkpoint,\n not_main_rank,\n )\n\n logging.info(\"End time: %s\", datetime.now())\n\n if args.distributed:\n torch.distributed.destroy_process_group()\n\n\ndef spawn_main(main, args):\n if args.distributed:\n torch.multiprocessing.spawn(\n main, args=(args,), nprocs=args.world_size, join=True\n )\n else:\n main(0, args)\n\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(level=logging.INFO)\n args = parse_args()\n spawn_main(main, args)\n",
"import torch\nfrom torchaudio.datasets import LIBRISPEECH\n\n\nclass MapMemoryCache(torch.utils.data.Dataset):\n \"\"\"\n Wrap a dataset so that, whenever a new item is returned, it is saved to memory.\n \"\"\"\n\n def __init__(self, dataset):\n self.dataset = dataset\n self._cache = [None] * len(dataset)\n\n def __getitem__(self, n):\n if self._cache[n] is not None:\n return self._cache[n]\n\n item = self.dataset[n]\n self._cache[n] = item\n\n return item\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass Processed(torch.utils.data.Dataset):\n def __init__(self, dataset, transforms, encode):\n self.dataset = dataset\n self.transforms = transforms\n self.encode = encode\n\n def __getitem__(self, key):\n item = self.dataset[key]\n return self.process_datapoint(item)\n\n def __len__(self):\n return len(self.dataset)\n\n def process_datapoint(self, item):\n transformed = item[0]\n target = item[2].lower()\n\n transformed = self.transforms(transformed)\n transformed = transformed[0, ...].transpose(0, -1)\n\n target = self.encode(target)\n target = torch.tensor(target, dtype=torch.long, device=transformed.device)\n\n return transformed, target\n\n\ndef split_process_librispeech(\n datasets, transforms, language_model, root, folder_in_archive,\n):\n def create(tags, cache=True):\n\n if isinstance(tags, str):\n tags = [tags]\n if isinstance(transforms, list):\n transform_list = transforms\n else:\n transform_list = [transforms]\n\n data = torch.utils.data.ConcatDataset(\n [\n Processed(\n LIBRISPEECH(\n root, tag, folder_in_archive=folder_in_archive, download=False,\n ),\n transform,\n language_model.encode,\n )\n for tag, transform in zip(tags, transform_list)\n ]\n )\n\n data = MapMemoryCache(data)\n return data\n\n # For performance, we cache all datasets\n return tuple(create(dataset) for dataset in datasets)\n\n\ndef collate_factory(model_length_function, transforms=None):\n\n if transforms is None:\n transforms = torch.nn.Sequential()\n\n def collate_fn(batch):\n\n tensors = [transforms(b[0]) for b in batch if b]\n\n tensors_lengths = torch.tensor(\n [model_length_function(t) for t in tensors],\n dtype=torch.long,\n device=tensors[0].device,\n )\n\n tensors = torch.nn.utils.rnn.pad_sequence(tensors, batch_first=True)\n tensors = tensors.transpose(1, -1)\n\n targets = [b[1] for b in batch if b]\n target_lengths = torch.tensor(\n [target.shape[0] for target in targets],\n dtype=torch.long,\n device=tensors.device,\n )\n targets = torch.nn.utils.rnn.pad_sequence(targets, batch_first=True)\n\n return tensors, targets, tensors_lengths, target_lengths\n\n return collate_fn\n",
"import torch\n\n__all__ = [\n \"RNNTLoss\",\n \"rnnt_loss\",\n]\n\n\ndef _rnnt_loss_alphas(\n logits,\n targets,\n logit_lengths,\n target_lengths,\n blank=-1,\n clamp=-1,\n):\n \"\"\"\n Compute alphas for RNN transducer loss.\n\n See documentation for RNNTLoss\n \"\"\"\n targets = targets.to(device=logits.device)\n logit_lengths = logit_lengths.to(device=logits.device)\n target_lengths = target_lengths.to(device=logits.device)\n\n # make sure all int tensors are of type int32.\n targets = targets.int()\n logit_lengths = logit_lengths.int()\n target_lengths = target_lengths.int()\n\n return torch.ops.torchaudio.rnnt_loss_alphas(\n logits,\n targets,\n logit_lengths,\n target_lengths,\n blank,\n clamp,\n )\n\n\ndef _rnnt_loss_betas(\n logits,\n targets,\n logit_lengths,\n target_lengths,\n blank=-1,\n clamp=-1,\n):\n \"\"\"\n Compute betas for RNN transducer loss\n\n See documentation for RNNTLoss\n \"\"\"\n targets = targets.to(device=logits.device)\n logit_lengths = logit_lengths.to(device=logits.device)\n target_lengths = target_lengths.to(device=logits.device)\n\n # make sure all int tensors are of type int32.\n targets = targets.int()\n logit_lengths = logit_lengths.int()\n target_lengths = target_lengths.int()\n\n return torch.ops.torchaudio.rnnt_loss_betas(\n logits,\n targets,\n logit_lengths,\n target_lengths,\n blank,\n clamp,\n )\n\n\nclass _RNNT(torch.autograd.Function):\n @staticmethod\n def forward(\n ctx,\n logits,\n targets,\n logit_lengths,\n target_lengths,\n blank=-1,\n clamp=-1,\n fused_log_softmax=True,\n reuse_logits_for_grads=True,\n ):\n \"\"\"\n See documentation for RNNTLoss\n \"\"\"\n\n # move everything to the same device.\n targets = targets.to(device=logits.device)\n logit_lengths = logit_lengths.to(device=logits.device)\n target_lengths = target_lengths.to(device=logits.device)\n\n # make sure all int tensors are of type int32.\n targets = targets.int()\n logit_lengths = logit_lengths.int()\n target_lengths = target_lengths.int()\n\n if blank < 0: # reinterpret blank index if blank < 0.\n blank = logits.shape[-1] + blank\n\n costs, gradients = torch.ops.torchaudio.rnnt_loss(\n logits=logits,\n targets=targets,\n src_lengths=logit_lengths,\n tgt_lengths=target_lengths,\n blank=blank,\n clamp=clamp,\n fused_log_smax=fused_log_softmax,\n reuse_logits_for_grads=reuse_logits_for_grads,\n )\n\n ctx.grads = gradients\n\n return costs\n\n @staticmethod\n def backward(ctx, output_gradients):\n output_gradients = output_gradients.view(-1, 1, 1, 1).to(ctx.grads)\n ctx.grads.mul_(output_gradients).to(ctx.grads)\n\n return (\n ctx.grads, # logits\n None, # targets\n None, # logit_lengths\n None, # target_lengths\n None, # blank\n None, # clamp\n None, # fused_log_softmax\n None, # reuse_logits_for_grads\n )\n\n\ndef rnnt_loss(\n logits,\n targets,\n logit_lengths,\n target_lengths,\n blank=-1,\n clamp=-1,\n fused_log_softmax=True,\n reuse_logits_for_grads=True,\n):\n \"\"\"\n Compute the RNN Transducer Loss.\n\n The RNN Transducer loss (`Graves 2012 <https://arxiv.org/pdf/1211.3711.pdf>`__) extends the CTC loss by defining\n a distribution over output sequences of all lengths, and by jointly modelling both input-output and output-output\n dependencies.\n\n Args:\n logits (Tensor): Tensor of dimension (batch, time, target, class) containing output from joiner\n targets (Tensor): Tensor of dimension (batch, max target length) containing targets with zero padded\n logit_lengths (Tensor): Tensor of dimension (batch) containing lengths of each sequence from encoder\n target_lengths (Tensor): Tensor of dimension (batch) containing lengths of targets for each sequence\n blank (int, opt): blank label (Default: ``-1``)\n clamp (float): clamp for gradients (Default: ``-1``)\n runtime_check (bool): whether to do sanity check during runtime. (Default: ``False``)\n fused_log_softmax (bool): set to False if calling log_softmax outside loss (Default: ``True``)\n reuse_logits_for_grads (bool): whether to save memory by reusing logits memory for grads (Default: ``True``)\n \"\"\"\n if not fused_log_softmax:\n logits = torch.nn.functional.log_softmax(logits, dim=-1)\n reuse_logits_for_grads = (\n False # softmax needs the original logits value\n )\n\n cost = _RNNT.apply(\n logits,\n targets,\n logit_lengths,\n target_lengths,\n blank,\n clamp,\n fused_log_softmax,\n reuse_logits_for_grads,\n )\n return cost\n\n\nclass RNNTLoss(torch.nn.Module):\n \"\"\"\n Compute the RNN Transducer Loss.\n\n The RNN Transducer loss (`Graves 2012 <https://arxiv.org/pdf/1211.3711.pdf>`__) extends the CTC loss by defining\n a distribution over output sequences of all lengths, and by jointly modelling both input-output and output-output\n dependencies.\n\n Args:\n blank (int, opt): blank label (Default: ``-1``)\n clamp (float): clamp for gradients (Default: ``-1``)\n fused_log_softmax (bool): set to False if calling log_softmax outside loss (Default: ``True``)\n reuse_logits_for_grads (bool): whether to save memory by reusing logits memory for grads (Default: ``True``)\n \"\"\"\n\n def __init__(\n self,\n blank=-1,\n clamp=-1,\n fused_log_softmax=True,\n reuse_logits_for_grads=True,\n ):\n super().__init__()\n self.blank = blank\n self.clamp = clamp\n self.fused_log_softmax = fused_log_softmax\n self.reuse_logits_for_grads = reuse_logits_for_grads\n\n def forward(\n self,\n logits,\n targets,\n logit_lengths,\n target_lengths,\n ):\n \"\"\"\n Args:\n logits (Tensor): Tensor of dimension (batch, time, target, class) containing output from joiner\n targets (Tensor): Tensor of dimension (batch, max target length) containing targets with zero padded\n logit_lengths (Tensor): Tensor of dimension (batch) containing lengths of each sequence from encoder\n target_lengths (Tensor): Tensor of dimension (batch) containing lengths of targets for each sequence\n \"\"\"\n return rnnt_loss(\n logits,\n targets,\n logit_lengths,\n target_lengths,\n self.blank,\n self.clamp,\n self.fused_log_softmax,\n self.reuse_logits_for_grads,\n )\n"
] | [
[
"torch.autograd.set_detect_anomaly",
"torch.load",
"torch.multiprocessing.spawn",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.nn.CTCLoss",
"torch.cuda.is_available",
"torch.jit.script",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.distributed.init_process_group",
"torch.distributed.barrier",
"torch.nn.Sequential",
"torch.cuda.empty_cache",
"torch.distributed.destroy_process_group",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.manual_seed",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.DataParallel"
],
[
"torch.nn.Sequential",
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor"
],
[
"torch.ops.torchaudio.rnnt_loss_alphas",
"torch.ops.torchaudio.rnnt_loss",
"torch.nn.functional.log_softmax",
"torch.ops.torchaudio.rnnt_loss_betas"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EnergyModels/estorage | [
"0f84c87632dba1ff0564ffb68f59ece314f67022"
] | [
"estorage/archive/state.py"
] | [
"from CoolProp.CoolProp import PropsSI\nimport pandas as pd\n\n\nclass State:\n self.fluid = T\n self.T = T\n self.p = p\n self.h = PropsSI('H', 'T', T, 'P', p, fluid)\n self.s = PropsSI('S', 'T', T, 'P', p, fluid)\n self.D = PropsSI('D', 'T', T, 'P', p, fluid)\n\n\nclass Flow(State):\n self.m_dot = T\n\ndef def_state_init(fluid):\n\n # Standard temperature and preussre\n T = 273.15\n p = 101325.\n\n state = pd.Series(index=['fluid','T','p','h','s','D'])\n\n state.fluid = fluid\n state.T = T\n state.p = p\n state.h = PropsSI('H', 'T', T, 'P', p, fluid)\n state.s = PropsSI('S', 'T', T, 'P', p, fluid)\n state.D = PropsSI('D', 'T', T, 'P', p, fluid)\n\n return state\n\ndef def_state_tp(fluid, T, p):\n\n state = pd.Series(index=['fluid','T','p','h','s','D'])\n\n state.fluid = fluid\n state.T = T\n state.p = p\n state.h = PropsSI('H', 'T', T, 'P', p, fluid)\n state.s = PropsSI('S', 'T', T, 'P', p, fluid)\n state.D = PropsSI('D', 'T', T, 'P', p, fluid)\n\n return state\n\n\ndef def_state_ph(fluid, p, h):\n state = pd.Series(index=['fluid', 'T', 'p', 'h', 's','D'])\n\n state.fluid = fluid\n state.T = PropsSI('S', 'P', p, 'H', h, fluid)\n state.p = p\n state.h = h\n state.s = PropsSI('S', 'P', p, 'H', h, fluid)\n state.D = PropsSI('D', 'P', p, 'H', h, fluid)\n\n return state\n\n\ndef def_state_ps(fluid, p, s):\n state = pd.Series(index=['fluid', 'T', 'p', 'h', 's','D'])\n\n state.fluid = fluid\n state.T = PropsSI('H', 'P', p, 'S', s, fluid)\n state.p = p\n state.h = PropsSI('H', 'P', p, 'S', s, fluid)\n state.s = s\n state.D = PropsSI('D', 'P', p, 'S', s, fluid)\n\n return state\n"
] | [
[
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kerwinxu/rqalpha_local | [
"f90da95085df91706ebba8fc905b4cdc85492b11"
] | [
"rqalpha/mod/rqalpha_mod_sys_analyser/plot.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Last Change: 2018-01-09 09:24:03\n# Copyright 2017 Ricequant, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport rqalpha\nfrom rqalpha.utils.logger import system_log\nfrom rqalpha.utils.i18n import gettext\n\n\ndef plot_result(result_dict, show_windows=True, savefile=None):\n import os\n from matplotlib import rcParams, gridspec, ticker, image as mpimg, pyplot as plt\n from matplotlib.font_manager import findfont, FontProperties\n import numpy as np\n\n rcParams['font.family'] = 'sans-serif'\n rcParams['font.sans-serif'] = [\n u'SimHei',\n u'Microsoft Yahei',\n u'Heiti SC',\n u'Heiti TC',\n u'STHeiti',\n u'WenQuanYi Zen Hei',\n u'WenQuanYi Micro Hei',\n u\"文泉驿微米黑\",\n ] + rcParams['font.sans-serif']\n rcParams['axes.unicode_minus'] = False\n\n use_chinese_fonts = True\n font = findfont(FontProperties(family=['sans-serif']))\n if \"/matplotlib/\" in font:\n use_chinese_fonts = False\n system_log.warn(\"Missing Chinese fonts. Fallback to English.\")\n\n summary = result_dict[\"summary\"]\n\n title = summary['strategy_file']\n\n portfolio = result_dict[\"portfolio\"]\n benchmark_portfolio = result_dict.get(\"benchmark_portfolio\")\n\n index = portfolio.index\n\n # maxdrawdown\n portfolio_value = portfolio.unit_net_value * portfolio.units\n xs = portfolio_value.values\n rt = portfolio.unit_net_value.values\n max_dd_end = np.argmax(np.maximum.accumulate(xs) / xs)\n if max_dd_end == 0:\n max_dd_end = len(xs) - 1\n max_dd_start = np.argmax(xs[:max_dd_end]) if max_dd_end > 0 else 0\n\n # maxdrawdown duration\n al_cum = np.maximum.accumulate(xs)\n a = np.unique(al_cum, return_counts=True)\n start_idx = np.argmax(a[1])\n m = a[0][start_idx]\n al_cum_array = np.where(al_cum == m)\n max_ddd_start_day = al_cum_array[0][0]\n max_ddd_end_day = al_cum_array[0][-1]\n\n max_dd_info = \"MaxDD {}~{}, {} days\".format(index[max_dd_start], index[max_dd_end],\n (index[max_dd_end] - index[max_dd_start]).days)\n max_dd_info += \"\\nMaxDDD {}~{}, {} days\".format(index[max_ddd_start_day], index[max_ddd_end_day],\n (index[max_ddd_end_day] - index[max_ddd_start_day]).days)\n\n plt.style.use('ggplot')\n\n red = \"#aa4643\"\n blue = \"#4572a7\"\n black = \"#000000\"\n\n plots_area_size = 0\n if \"plots\" in result_dict:\n plots_area_size = 5\n\n figsize = (18, 6 + int(plots_area_size * 0.9))\n plt.figure(title, figsize=figsize)\n max_height = 10 + plots_area_size\n gs = gridspec.GridSpec(max_height, 8)\n\n # draw logo\n ax = plt.subplot(gs[:3, -1:])\n ax.axis(\"off\")\n filename = os.path.join(os.path.dirname(os.path.realpath(rqalpha.__file__)), \"resource\")\n filename = os.path.join(filename, \"ricequant-logo.png\")\n img = mpimg.imread(filename)\n ax.imshow(img, interpolation=\"nearest\")\n ax.autoscale_view()\n\n # draw risk and portfolio\n\n font_size = 12\n value_font_size = 11\n label_height, value_height = 0.8, 0.6\n label_height2, value_height2 = 0.35, 0.15\n\n def _(txt):\n return gettext(txt) if use_chinese_fonts else txt\n\n fig_data = [\n (0.00, label_height, value_height, _(u\"Total Returns\"), \"{0:.3%}\".format(summary[\"total_returns\"]), red, black),\n (0.15, label_height, value_height, _(u\"Annual Returns\"), \"{0:.3%}\".format(summary[\"annualized_returns\"]), red, black),\n (0.00, label_height2, value_height2, _(u\"Benchmark Returns\"), \"{0:.3%}\".format(summary.get(\"benchmark_total_returns\", 0)), blue,\n black),\n (0.15, label_height2, value_height2, _(u\"Benchmark Annual\"), \"{0:.3%}\".format(summary.get(\"benchmark_annualized_returns\", 0)),\n blue, black),\n\n (0.30, label_height, value_height, _(u\"Alpha\"), \"{0:.4}\".format(summary[\"alpha\"]), black, black),\n (0.40, label_height, value_height, _(u\"Beta\"), \"{0:.4}\".format(summary[\"beta\"]), black, black),\n (0.55, label_height, value_height, _(u\"Sharpe\"), \"{0:.4}\".format(summary[\"sharpe\"]), black, black),\n (0.70, label_height, value_height, _(u\"Sortino\"), \"{0:.4}\".format(summary[\"sortino\"]), black, black),\n (0.85, label_height, value_height, _(u\"Information Ratio\"), \"{0:.4}\".format(summary[\"information_ratio\"]), black, black),\n\n (0.30, label_height2, value_height2, _(u\"Volatility\"), \"{0:.4}\".format(summary[\"volatility\"]), black, black),\n (0.40, label_height2, value_height2, _(u\"MaxDrawdown\"), \"{0:.3%}\".format(summary[\"max_drawdown\"]), black, black),\n (0.55, label_height2, value_height2, _(u\"Tracking Error\"), \"{0:.4}\".format(summary[\"tracking_error\"]), black, black),\n (0.70, label_height2, value_height2, _(u\"Downside Risk\"), \"{0:.4}\".format(summary[\"downside_risk\"]), black, black),\n ]\n\n ax = plt.subplot(gs[:3, :-1])\n ax.axis(\"off\")\n for x, y1, y2, label, value, label_color, value_color in fig_data:\n ax.text(x, y1, label, color=label_color, fontsize=font_size)\n ax.text(x, y2, value, color=value_color, fontsize=value_font_size)\n for x, y1, y2, label, value, label_color, value_color in [\n (0.85, label_height2, value_height2, _(u\"MaxDD/MaxDDD\"), max_dd_info, black, black)]:\n ax.text(x, y1, label, color=label_color, fontsize=font_size)\n ax.text(x, y2, value, color=value_color, fontsize=8)\n\n # strategy vs benchmark\n ax = plt.subplot(gs[4:10, :])\n\n ax.get_xaxis().set_minor_locator(ticker.AutoMinorLocator())\n ax.get_yaxis().set_minor_locator(ticker.AutoMinorLocator())\n ax.grid(b=True, which='minor', linewidth=.2)\n ax.grid(b=True, which='major', linewidth=1)\n\n # plot two lines\n ax.plot(portfolio[\"unit_net_value\"] - 1.0, label=_(u\"strategy\"), alpha=1, linewidth=2, color=red)\n if benchmark_portfolio is not None:\n ax.plot(benchmark_portfolio[\"unit_net_value\"] - 1.0, label=_(u\"benchmark\"), alpha=1, linewidth=2, color=blue)\n\n # plot MaxDD/MaxDDD\n ax.plot([index[max_dd_end], index[max_dd_start]], [rt[max_dd_end] - 1.0, rt[max_dd_start] - 1.0],\n 'v', color='Green', markersize=8, alpha=.7, label=_(u\"MaxDrawdown\"))\n ax.plot([index[max_ddd_start_day], index[max_ddd_end_day]],\n [rt[max_ddd_start_day] - 1.0, rt[max_ddd_end_day] - 1.0], 'D', color='Blue', markersize=8, alpha=.7,\n label=_(u\"MaxDDD\"))\n\n # place legend\n leg = plt.legend(loc=\"best\")\n leg.get_frame().set_alpha(0.5)\n\n # manipulate axis\n vals = ax.get_yticks()\n ax.set_yticklabels(['{:3.2f}%'.format(x * 100) for x in vals])\n\n # plot user plots\n if \"plots\" in result_dict:\n plots_df = result_dict[\"plots\"]\n\n ax2 = plt.subplot(gs[11:, :])\n for column in plots_df.columns:\n ax2.plot(plots_df[column], label=column)\n\n leg = plt.legend(loc=\"best\")\n leg.get_frame().set_alpha(0.5)\n\n if show_windows:\n plt.show()\n\n if savefile:\n fnmame = savefile\n if os.path.isdir(savefile):\n fnmame = os.path.join(savefile, \"{}.png\".format(summary[\"strategy_name\"]))\n plt.savefig(fnmame, bbox_inches='tight')\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.unique",
"matplotlib.ticker.AutoMinorLocator",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.savefig",
"matplotlib.image.imread",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"numpy.maximum.accumulate",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
novid1134/solar-3d | [
"1e5b77173abb0d805b3cc613e0a7ab3f063fb7e3"
] | [
"uvf.py"
] | [
"# universal variable formulation, 3rd order differential equation solver for orbital prediction,\n# implemented due to huge efficiency issues when using conventional methods (loops, recursion),\n# algorithms based on Vectorized Analytic Two Body Propagator in MATLAB Copyright (c) 2012, Darin Koblick\n\nfrom scipy.spatial.distance import cdist\nfrom vis import *\nfrom parameters import u, sun_radius\nimport numpy as np\n\nu2 = np.sqrt(u)\n\n\ndef c2c3(psi): # Stumpff functions definitions\n\n c2, c3 = 0, 0\n\n if np.any(psi > 1e-6):\n c2 = (1 - np.cos(np.sqrt(psi))) / psi\n c3 = (np.sqrt(psi) - np.sin(np.sqrt(psi))) / np.sqrt(psi ** 3)\n\n if np.any(psi < -1e-6):\n c2 = (1 - np.cosh(np.sqrt(-psi))) / psi\n c3 = (np.sinh(np.sqrt(-psi)) - np.sqrt(-psi)) / np.sqrt(-psi ** 3)\n\n if np.any(abs(psi) <= 1e-6):\n c2 = 0.5\n c3 = 1. / 6.\n\n return c2, c3\n\n\ndef position(r0, v0, t, trailer, tol=100):\n r0mag = mag(r0) # magnitude of the distance from the Sun\n v0mag = mag(v0) # magnitude of spacecraft velocity\n\n alpha = -(v0mag * v0mag) / u + 2. / r0mag # constant term in differential equation\n\n # compute initial guess (x0) for Newton-Raphson solver:\n\n s0 = 0\n\n if alpha > 0.000001: # elliptical orbits\n s0 = u2 * t * alpha\n\n if abs(alpha) < 0.000001: # parabolic orbits\n h = cross(r0, v0) # cross product of vectors r0 and v0\n hmag = mag(h) # magnitude of the h vector\n p = hmag / u\n s = np.arctan(1 / (3. * np.sqrt(u / (p ** 3)) * t)) / 2.\n w = np.arctan(np.tan(s) ** (1 / 3.))\n s0 = np.sqrt(p) * 2. * np.tan(1 / (2. * w))\n\n if alpha < -0.000001: # hyperbolic orbits\n a = 1. / alpha\n s0 = np.sign(t) * np.sqrt(-a) * np.log(-2. * u * alpha * t / (r0.dot(v0) + np.sign(t) *\n np.sqrt(-u * a) * (1 - r0mag * alpha)))\n\n # Newton-Raphson solver:\n\n err = np.inf\n dr0v0 = r0.dot(v0) / u2\n u2t = u2 * t\n i, s, c2, c3 = 0, 0, 0, 0\n\n while np.any(abs(err) > tol) and i < 25:\n s2 = s0 * s0 # s^2\n s3 = s2 * s0 # s^3\n psi = s2 * alpha # alpha * s^2\n\n c2, c3 = c2c3(psi) # Stumpff functions\n\n s0psic3 = s0 * (1.0 - psi * c3)\n s2c2 = s2 * c2\n\n r = s2c2 + dr0v0 * s0psic3 + r0mag * (1 - psi * c2) # f'(s)\n\n s = s0 + (u2t - s3 * c3 - dr0v0 * s2c2 - r0mag * s0psic3) / r # old value + f(s)/f'(s)\n\n err = s - s0 # convergence check\n s0 = s\n\n i += 1\n\n # identify non-converging array entries and remove them:\n\n del2 = np.where(abs(err) > tol)\n s, c2, c3, t = np.delete(s, del2), np.delete(c2, del2), np.delete(c3, del2), np.delete(t, del2)\n\n # calculate final coefficients:\n\n f = 1 - (s * s) * c2 / r0mag\n g = t - (s * s * s) * c3 / u2\n\n # calculate final path prediction:\n\n r2 = np.array(r0.astuple()) # convert vPython vectors to numpy arrays\n v2 = np.array(v0.astuple())\n\n path = r2 * f[:, None] + v2 * g[:, None] # (changing shape to enable numpy broadcasting)\n\n dst = cdist(path, [[0, 0, 0]]) # compute distance of all points in the path from the origin\n\n # draw path:\n\n trailer.trail.color = color.green # default color (green)\n\n if np.any(dst <= sun_radius):\n\n trailer.trail.color = color.red # turn path RED, if collision detected\n trailer.trail.pos = path[0:np.argmax(dst <= sun_radius)] # draw path only up to the Sun collision point\n\n else:\n trailer.trail.pos = path # update full path\n\n return trailer\n"
] | [
[
"numpy.sqrt",
"scipy.spatial.distance.cdist",
"numpy.tan",
"numpy.sign",
"numpy.delete",
"numpy.argmax",
"numpy.any"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
hecoding/pytorch-lightning-bolts | [
"4d254fde6112b21436003028d553a726bf7ea6ef"
] | [
"tests/models/self_supervised/test_scripts.py"
] | [
"from unittest import mock\n\nimport pytest\nimport torch\n\nfrom tests import DATASETS_PATH\n\n\[email protected]('cli_args', [\n f\"--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2\"\n])\ndef test_cli_run_self_supervised_amdim(cli_args):\n \"\"\"Test running CLI for an example with default params.\"\"\"\n from pl_bolts.models.self_supervised.amdim.amdim_module import cli_main\n\n cli_args = cli_args.split(' ') if cli_args else []\n with mock.patch(\"argparse._sys.argv\", [\"any.py\"] + cli_args):\n cli_main()\n\n\n# TODO: this test is hanging (runs for more then 10min) so we need to use GPU or optimize it...\[email protected](not torch.cuda.is_available(), reason=\"test requires GPU machine\")\[email protected]('cli_args', [\n f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --encoder resnet18'\n])\ndef test_cli_run_self_supervised_cpc(cli_args):\n \"\"\"Test running CLI for an example with default params.\"\"\"\n from pl_bolts.models.self_supervised.cpc.cpc_module import cli_main\n\n cli_args = cli_args.split(' ') if cli_args else []\n with mock.patch(\"argparse._sys.argv\", [\"any.py\"] + cli_args):\n cli_main()\n\n\[email protected]('cli_args', [\n f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2'\n])\ndef test_cli_run_self_supervised_moco(cli_args):\n \"\"\"Test running CLI for an example with default params.\"\"\"\n from pl_bolts.models.self_supervised.moco.moco2_module import cli_main\n\n cli_args = cli_args.split(' ') if cli_args else []\n with mock.patch(\"argparse._sys.argv\", [\"any.py\"] + cli_args):\n cli_main()\n\n\[email protected]('cli_args', [\n f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --online_ft'\n])\ndef test_cli_run_self_supervised_simclr(cli_args):\n \"\"\"Test running CLI for an example with default params.\"\"\"\n from pl_bolts.models.self_supervised.simclr.simclr_module import cli_main\n\n cli_args = cli_args.split(' ') if cli_args else []\n with mock.patch(\"argparse._sys.argv\", [\"any.py\"] + cli_args):\n cli_main()\n\n\[email protected]('cli_args', [\n f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --online_ft'\n])\ndef test_cli_run_self_supervised_byol(cli_args):\n \"\"\"Test running CLI for an example with default params.\"\"\"\n from pl_bolts.models.self_supervised.byol.byol_module import cli_main\n\n cli_args = cli_args.split(' ') if cli_args else []\n with mock.patch(\"argparse._sys.argv\", [\"any.py\"] + cli_args):\n cli_main()\n\n\[email protected](\n 'cli_args', [\n f'--dataset cifar10 --data_path {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2'\n ' --gpus 0 --arch resnet18 --hidden_mlp 512 --fp32 --sinkhorn_iterations 1 --nmb_prototypes 2'\n ]\n)\ndef test_cli_run_self_supervised_swav(cli_args):\n \"\"\"Test running CLI for an example with default params.\"\"\"\n from pl_bolts.models.self_supervised.swav.swav_module import cli_main\n\n cli_args = cli_args.split(' ') if cli_args else []\n with mock.patch(\"argparse._sys.argv\", [\"any.py\"] + cli_args):\n cli_main()\n"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arokem/picard | [
"1ca98a51b60bc51bfcf8767dea989cc4f5b8b522",
"1ca98a51b60bc51bfcf8767dea989cc4f5b8b522"
] | [
"picard/_tools.py",
"picard/densities.py"
] | [
"# Authors: Pierre Ablin <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Jean-Francois Cardoso <[email protected]>\n#\n# License: BSD (3-clause)\nimport numbers\n\nimport numpy as np\n\n\ndef permute(A, scale=True):\n '''Get a permutation to diagonalize and scale a matrix\n\n Parameters\n ----------\n A : ndarray, shape (n_features, n_features)\n A matrix close from a permutation and scale matrix.\n\n scale : boolean, optional\n If True, scales the matrix A wrt its diagonal\n Returns\n -------\n A : ndarray, shape (n_features, n_features)\n A permuted matrix.\n '''\n A = A.copy()\n n = A.shape[0]\n idx = np.arange(n)\n done = False\n while not done:\n done = True\n for i in range(n):\n for j in range(i):\n if A[i, i] ** 2 + A[j, j] ** 2 < A[i, j] ** 2 + A[j, i] ** 2:\n A[(i, j), :] = A[(j, i), :]\n idx[i], idx[j] = idx[j], idx[i]\n done = False\n if scale:\n A /= np.diag(A)\n order_sort = np.argsort(np.sum(np.abs(A), axis=0))\n A = A[order_sort, :]\n A = A[:, order_sort]\n return A\n\n\ndef check_random_state(seed):\n \"\"\"Turn seed into a np.random.RandomState instance\n Parameters\n ----------\n seed : None | int | instance of RandomState\n If seed is None, return the RandomState singleton used by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n Otherwise raise ValueError.\n \"\"\"\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)\n\n\ndef _sym_decorrelation(W):\n \"\"\" Symmetric decorrelation\n i.e. W <- (W * W.T) ^{-1/2} * W\n \"\"\"\n s, u = np.linalg.eigh(np.dot(W, W.T))\n return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)\n\n\ndef _ica_par(X, fun, max_iter, w_init, verbose):\n \"\"\"Parallel FastICA.\n Used internally by FastICA --main loop\n \"\"\"\n if verbose:\n print('Running %d iterations of FastICA...' % max_iter)\n W = _sym_decorrelation(w_init)\n del w_init\n p_ = float(X.shape[1])\n for ii in range(max_iter):\n gwtx, g_wtx = fun.score_and_der(np.dot(W, X))\n g_wtx = g_wtx.mean(axis=1)\n C = np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W\n W = _sym_decorrelation(C)\n del gwtx, g_wtx\n if verbose:\n print('Running Picard...')\n return W\n\n\ndef amari_distance(W, A):\n \"\"\"\n Computes the Amari distance between two matrices W and A.\n It cancels when WA is a permutation and scale matrix.\n\n Parameters\n ----------\n W : ndarray, shape (n_features, n_features)\n Input matrix\n\n A : ndarray, shape (n_features, n_features)\n Input matrix\n\n Returns\n -------\n d : float\n The Amari distance\n \"\"\"\n P = np.dot(W, A)\n\n def s(r):\n return np.sum(np.sum(r ** 2, axis=1) / np.max(r ** 2, axis=1) - 1)\n return (s(np.abs(P)) + s(np.abs(P.T))) / (2 * P.shape[0])\n",
"# Authors: Pierre Ablin <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Jean-Francois Cardoso <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nimport numexpr as ne\n\nfrom scipy.optimize import check_grad\nfrom numpy.testing import assert_allclose\n\n\ndef check_density(density, tol=1e-6, n_test=10, rng=None):\n if rng is None:\n rng = np.random.RandomState(0)\n Y = rng.randn(n_test)\n\n def score(Y):\n return density.score_and_der(Y)[0]\n\n def score_der(Y):\n return density.score_and_der(Y)[1]\n\n err_msgs = ['score', 'score derivative']\n for f, fprime, err_msg in zip([density.log_lik, score], [score, score_der],\n err_msgs):\n for y in Y:\n err = check_grad(f, fprime, np.array([y]))\n assert_allclose(err, 0, atol=tol, rtol=0,\n err_msg='Wrong %s' % err_msg)\n\n\nclass Tanh(object):\n def __init__(self, params=None):\n self.alpha = 1.\n if params is not None:\n if 'alpha' in params:\n self.alpha = params['alpha']\n\n def log_lik(self, Y):\n alpha = self.alpha # noqa\n return ne.evaluate('abs(Y) + log1p(exp(-2. * alpha * abs(Y))) / alpha')\n\n def score_and_der(self, Y):\n alpha = self.alpha\n score = ne.evaluate('tanh(alpha * Y)')\n return score, alpha - alpha * score ** 2\n\n\nclass Exp(object):\n def __init__(self, params=None):\n self.alpha = 1.\n if params is not None:\n if 'alpha' in params:\n self.alpha = params['alpha']\n\n def log_lik(self, Y):\n a = self.alpha # noqa\n return ne.evaluate('-exp(- a * Y ** 2 / 2.) / a')\n\n def score_and_der(self, Y):\n a = self.alpha # noqa\n Y_sq = ne.evaluate('Y ** 2') # noqa\n K = ne.evaluate('exp(- a / 2. * Y_sq)') # noqa\n return ne.evaluate('Y * K'), ne.evaluate('(1- a * Y_sq) * K')\n\n\nclass Cube(object):\n def log_lik(self, Y):\n return ne.evaluate('Y ** 4 / 4')\n\n def score_and_der(self, Y):\n return ne.evaluate('Y ** 3'), ne.evaluate('3 * Y ** 2')\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.abs",
"numpy.sqrt",
"numpy.arange",
"numpy.max",
"numpy.random.RandomState",
"numpy.sum"
],
[
"numpy.array",
"numpy.random.RandomState",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VitaNova1998/REP | [
"abc640dfca1b23237770b0508f79c45b84baf81b"
] | [
"utils/audio_utils.py"
] | [
"import numpy as np\nimport librosa\nfrom scipy import signal\nimport fnmatch\nimport os\n\n\ndef preemphasis(x, coeff=0.97):\n return signal.lfilter([1, -coeff], [1], x)\n\n\ndef inv_preemphasis(x, coeff=0.97):\n return signal.lfilter([1], [1, -coeff], x)\n\n\ndef griffin_lim(stft_matrix_,\n n_fft,\n hop_size,\n win_size=None,\n max_iter=50,\n delta=20):\n\n n_frames = stft_matrix_.shape[1]\n expected_len = n_fft + hop_size*(n_frames - 1)\n shape = (expected_len - n_fft,)\n y = np.random.random(shape)\n\n for i in range(max_iter):\n stft_matrix = librosa.core.stft(\n y,\n n_fft=n_fft,\n hop_length=hop_size,\n win_length=win_size)\n stft_matrix = stft_matrix_ * stft_matrix / np.abs(stft_matrix)\n y = librosa.core.istft(\n stft_matrix,\n hop_length=hop_size,\n win_length=win_size)\n return y\n\n\ndef log_magnitude_postproc(stftm, magnitude_enphasis):\n # emphasizing magnitude\n stftm = stftm * magnitude_enphasis\n # Undo log and square\n stftm = np.sqrt(np.exp(stftm))\n return stftm\n"
] | [
[
"scipy.signal.lfilter",
"numpy.exp",
"numpy.random.random",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
judyheflin/amazon-s3-plugin-for-pytorch | [
"38284c8a5e92be3bbf47b08e8c90d94be0cb79e7"
] | [
"examples/s3_cv_map_example.py"
] | [
"\nfrom awsio.python.lib.io.s3.s3dataset import S3Dataset\nfrom torch.utils.data import DataLoader\n\nurl_list = ['s3://image-data-bucket/train/n01440764/n01440764_10026.JPEG',\n 's3://image-data-bucket/train/n01440764/n01440764_10027.JPEG',\n 's3://image-data-bucket/train/n01440764/n01440764_10029.JPEG']\n\ndataset = S3Dataset(url_list)\ndataloader = DataLoader(dataset,\n batch_size=2,\n num_workers=64)\n\nfor i, (image, label) in enumerate(dataloader):\n print(type(image), len(image))\n\n"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dvav/clonosGP | [
"e7f9a08869df0e1857fe24e4e311999f7ba6560f"
] | [
"run_models_on_simulated_data.py"
] | [
"import os\nimport sys\n\nIDX = int(sys.argv[1])\nos.environ['THEANO_FLAGS'] = f'base_compiledir=\"theano/p{IDX}\"'\n\nimport tqdm as tqd\nimport itertools as itr\nimport numpy as nmp\nimport pandas as pnd\nimport sklearn.metrics as mtr\nimport scipy.special as scp\n\nimport pymc3 as pmc\nimport clonosGP as cln\n\n\n\n##\ndef run_model(prior, cov, lik, R, K, M, N, tau, h2, data): \n nmp.random.seed(42)\n pmc.tt_rng(42)\n \n res = cln.infer(data, \n model_args={'K': 20, 'prior': prior, 'cov': cov, 'lik': lik, 'threshold': 0.0}, \n pymc3_args={'niters': 40000, 'method': 'advi', 'flow': 'scale-loc', 'learning_rate': 1e-2, 'random_seed': 42})\n\n z_true = data[['MUTID', 'CLUSTERID']].drop_duplicates().CLUSTERID.values\n z_pred = res['data'][['MUTID', 'CLUSTERID']].drop_duplicates().CLUSTERID.values\n \n return pnd.DataFrame({\n 'REP': R, \n 'NCLUSTERS': K, \n 'NSAMPLES': M, \n 'NMUTS': N,\n 'TAU': tau,\n 'H2': h2,\n 'PRIOR': prior,\n 'COV': cov,\n 'LIK': lik,\n 'ARI': mtr.adjusted_rand_score(z_true, z_pred),\n 'AMI': mtr.adjusted_mutual_info_score(z_true, z_pred),\n 'FMI': mtr.fowlkes_mallows_score(z_true, z_pred),\n }, index=[0]).reset_index(drop=True)\n\n\n## load template\ndepths = pnd.read_csv('data/cll_Rincon_2019_patient1.csv').R.values\n\n##\nprint(f'Generating data: {IDX}')\nnmp.random.seed(42)\nDATA = [[R, K, M, N, TAU, H2, cln.sim.get_Nsamples(nclusters=K, nmuts=N, nsamples=M, tau=TAU, h2=H2, mean_depth=(40, 40), depths=depths)] \n for R, K, M, N, TAU, H2 in itr.product([1, 2, 3], [2, 4, 8], [3, 6, 12], [25, 50, 100], [1, 10, 100], [1, 10, 20])]\n\n##\nprint(f\"Running model: {IDX}\")\nARGS = [('Flat', 'Exp', 'Bin'), ('Flat', 'Exp', 'BBin'), ('GP0', 'Exp', 'Bin'), ('GP0', 'Exp', 'BBin'),('GP0', 'ExpQ', 'Bin'),('GP0', 'ExpQ', 'BBin')]\nRES = [run_model(*args, *DATA[IDX-1]) for args in ARGS]\nRES = pnd.concat(RES).reset_index(drop=True)\nRES.to_csv(f'results/simdata{IDX}.csv', index=False)\n\n\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"sklearn.metrics.fowlkes_mallows_score",
"numpy.random.seed",
"sklearn.metrics.adjusted_mutual_info_score",
"sklearn.metrics.adjusted_rand_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
CancerDataScience/NuCLS | [
"c172b55b18d4ea78c3f51a8fd28ee6c2595c8360",
"c172b55b18d4ea78c3f51a8fd28ee6c2595c8360"
] | [
"TorchUtils.py",
"interrater/scripts/i12_statistical_tests.py"
] | [
"import torch\nimport nucls_model.torchvision_detection_utils.transforms as tvdt\n\n\nISCUDA = torch.cuda.is_available()\n\n\ndef tensor_isin(arr1, arr2):\n r\"\"\" Compares a tensor element-wise with a list of possible values.\n See :func:`torch.isin`\n\n Source: https://github.com/pytorch/pytorch/pull/26144\n \"\"\"\n result = (arr1[..., None] == arr2).any(-1)\n return result.type(torch.ByteTensor)\n\n\ndef transform_dlinput(\n tlist=None, make_tensor=True, flip_prob=0.5,\n augment_stain_sigma1=0.5, augment_stain_sigma2=0.5):\n \"\"\"Transform input image data for a DL model.\n\n Parameters\n ----------\n tlist: None or list. If testing mode, pass as None.\n flip_prob\n augment_stain_sigma1\n augment_stain_sigma2\n\n \"\"\"\n tmap = {\n 'hflip': tvdt.RandomHorizontalFlip(prob=flip_prob),\n 'augment_stain': tvdt.RandomHEStain(\n sigma1=augment_stain_sigma1, sigma2=augment_stain_sigma2),\n }\n tlist = [] if tlist is None else tlist\n transforms = []\n # go through various transforms\n for tname in tlist:\n transforms.append(tmap[tname])\n # maybe convert to tensor\n if make_tensor:\n # transforms.append(tvdt.PILToTensor(float16=ISCUDA))\n transforms.append(tvdt.PILToTensor(float16=False))\n return tvdt.Compose(transforms)\n",
"from scipy.stats import mannwhitneyu, wilcoxon\nfrom pandas import read_csv, concat, read_sql_query\nimport numpy as np\nfrom os.path import join as opj\nfrom itertools import combinations\n\nfrom interrater.interrater_utils import _connect_to_anchor_db, \\\n get_roc_and_auroc_for_who, remap_classes_in_anchorsdf\nfrom configs.nucleus_style_defaults import Interrater as ir\n\nrpath = '/home/mtageld/Desktop/cTME/results/tcga-nucleus/interrater/CURATED_v1_2020-03-29_EVAL/' # noqa\nrfile = opj(rpath, 'i12_statistical_tests.txt')\n\n\ndef interrater_pvals():\n \"\"\"Ps-Ps interrater VS Ps-NPs VS NPs-NPs (E-set, classific.)\n Mann-whitney-U\"\"\"\n\n res = \"\"\n res += \"\\n**********************************************************************\" # noqa\n res += \"\\ni9_InterRaterStats -> interrater_boxplots.csv\\n\"\n res += \"\\nClassification pairwise inter-rater agreement for the E-set.\\n\"\n\n pvals_interrater = {}\n dfs = {}\n for clsgroup in ['main', 'super']:\n\n # read df\n fpath = opj(rpath, f'i9_InterRaterStats/{clsgroup}/csv/interrater_boxplots.csv') # noqa\n df = read_csv(fpath, index_col=0)\n df = df.loc[df.loc[:, 'evalset'] == 'E', :]\n dfs[clsgroup] = df\n\n # comparisons within the same class grouping\n comps = {\n cp: df.loc[df.loc[:, 'comparison'] == cp, 'classification'].values\n for cp in ['Ps-Ps', 'Ps-NPs', 'NPs-NPs']\n }\n for cp1, cp2 in combinations(comps.keys(), 2):\n _, pvals_interrater[f'{cp1} VS {cp2} ({clsgroup})'] = mannwhitneyu(\n comps[cp1], comps[cp2], alternative='two-sided')\n\n res += f'\\n> pvals_interrater (MANNWHITNEYU): '\n res += '----------------------------\\n'\n for k, v in pvals_interrater.items():\n res += f\"{k.replace('_', ' ')}: %.3f\\n\" % v\n\n # comparison across class grouping for same participant pairs\n pvals_interrater = {}\n for cp in ['Ps-Ps', 'Ps-NPs', 'NPs-NPs']:\n compdf = []\n for clsgroup in ['main', 'super']:\n df = dfs[clsgroup]\n df = df.loc[df.loc[:, 'comparison'] == cp, ['classification']]\n df.rename(\n columns={'classification': f'classification_{clsgroup}'},\n inplace=True)\n compdf.append(df)\n compdf = concat(compdf, axis=1, join='inner')\n _, pvals_interrater[f'{cp} (main VS super)'] = wilcoxon(\n compdf.iloc[:, 0], compdf.iloc[:, 1], alternative='two-sided')\n\n res += f'\\n> pvals_interrater (MAIN VS SUPER, WILCOXON): '\n res += '----------------------------\\n'\n for k, v in pvals_interrater.items():\n res += f\"{k.replace('_', ' ')}: %.3f\\n\" % v\n\n print(res)\n with open(rfile, 'a') as f:\n f.write(res)\n\n\ndef intrarater_pvals_accross_evalsets(\n clsgroup: str, method: str, justnps_segm=False, justnps_cls=True):\n \"\"\"Intra-rater comparison vs the U-control\n\n wilcoxon: paired, but loss of info because half participants didnt do\n B-control!\n\n mannwhitneyu: unpaired, but all data can be compared\n \"\"\"\n assert method in ['wilcoxon', 'mannwhitneyu']\n pvals_intrarater = {}\n\n # proportion segmented\n fpath = opj(rpath, f'i8_IntraRaterStats/{clsgroup}/csv/intra-rater_comparison.csv') # noqa\n tmpdf = read_csv(fpath, index_col=0)\n tmpdf = tmpdf.loc[:, ['evalset', 'swho', 'psegmented']]\n if justnps_segm:\n tmpdf = tmpdf.loc[tmpdf.loc[:, 'swho'] == 'NPs', :] # restrict to NPs?\n bcontrol = tmpdf.loc[tmpdf.loc[:, 'evalset'] == 'B-control', :].dropna()\n bcontrol.columns = [f'bc_{c}' for c in bcontrol.columns]\n eset = tmpdf.loc[tmpdf.loc[:, 'evalset'] == 'E', :].dropna()\n eset.columns = [f'e_{c}' for c in eset.columns]\n\n if method == 'wilcoxon':\n df = concat([bcontrol, eset], axis=1, join='inner')\n _, pvals_intrarater['psegmented'] = wilcoxon(\n df.loc[:, 'bc_psegmented'],\n df.loc[:, 'e_psegmented'],\n alternative='two-sided')\n elif method == 'mannwhitneyu':\n df = concat([bcontrol, eset], axis=1, join='outer')\n _, pvals_intrarater['psegmented'] = mannwhitneyu(\n df.loc[:, 'bc_psegmented'].dropna(),\n df.loc[:, 'e_psegmented'].dropna(),\n alternative='two-sided')\n\n # Classification\n fpath = opj(rpath, f'i8_IntraRaterStats/{clsgroup}/csv/intra-rater_comparison.csv') # noqa\n tmpdf = read_csv(fpath, index_col=0)\n tmpdf = tmpdf.loc[:, ['evalset', 'swho', 'classification']]\n if justnps_cls:\n tmpdf = tmpdf.loc[tmpdf.loc[:, 'swho'] == 'NPs', :] # restrict to NPs?\n bcontrol = tmpdf.loc[tmpdf.loc[:, 'evalset'] == 'B-control', :].dropna()\n bcontrol.columns = [f'bc_{c}' for c in bcontrol.columns]\n eset = tmpdf.loc[tmpdf.loc[:, 'evalset'] == 'E', :].dropna()\n eset.columns = [f'e_{c}' for c in eset.columns]\n\n if method == 'wilcoxon':\n df = concat([bcontrol, eset], axis=1, join='inner')\n _, pvals_intrarater['classification'] = wilcoxon(\n df.loc[:, 'bc_classification'],\n df.loc[:, 'e_classification'],\n alternative='two-sided')\n elif method == 'mannwhitneyu':\n df = concat([bcontrol, eset], axis=1, join='outer')\n _, pvals_intrarater['classification'] = mannwhitneyu(\n df.loc[:, 'bc_classification'].dropna(),\n df.loc[:, 'e_classification'].dropna(),\n alternative='two-sided')\n\n res = \"\"\n if (clsgroup == 'main') and (method == 'wilcoxon'):\n res += \"\\n**********************************************************************\" # noqa\n res += \"\\ni8_IntraRaterStats -> intra-rater_comparison.csv (ACCROSS EVALSETS)\\n\" # noqa\n res += \"\\nIntra-rater comparison vs the U-control.\\n\"\n res += \"- Option 1: Wilcoxon: paired, but loss of info because half participants didnt do B-control.\\n\" # noqa\n res += \"- Option 2: Mannwhitneyu: unpaired, but all data can be compared.\\n\" # noqa\n res += f'\\n> pvals_intrarater ({clsgroup.upper()}, {method.upper()}): '\n res += '----------------------------\\n'\n for k, v in pvals_intrarater.items():\n res += f'{k}: %.3f\\n' % v\n\n print(res)\n with open(rfile, 'a') as f:\n f.write(res)\n\n\ndef intrarater_pvals_accross_clsgroup(evalset: str):\n \"\"\"Intra-rater comparison (self agreement vs the U-control) for\n main vs super classes for various pariticpant groups within the same\n evaluation set.\n \"\"\"\n pvals = {}\n\n # read dfs\n dfs = {}\n for clsgroup in ['main', 'super']:\n fpath = opj(rpath, f'i8_IntraRaterStats/{clsgroup}/csv/intra-rater_comparison.csv') # noqa\n df = read_csv(fpath, index_col=0)\n df = df.loc[df.loc[:, 'evalset'] == evalset, :]\n dfs[clsgroup] = df\n\n for swho in ['Ps', 'NPs']:\n compdf = []\n for clsgroup in ['main', 'super']:\n df = dfs[clsgroup]\n df = df.loc[df.loc[:, 'swho'] == swho, ['classification']]\n df.rename(\n columns={'classification': f'classification_{clsgroup}'},\n inplace=True)\n compdf.append(df)\n compdf = concat(compdf, axis=1, join='inner')\n _, pvals[f'{swho} (main VS super, {evalset}, WILCOXON)'] = wilcoxon(\n compdf.iloc[:, 0], compdf.iloc[:, 1], alternative='two-sided')\n\n res = \"\"\n if evalset == 'B-control':\n res += \"\\n**********************************************************************\" # noqa\n res += \"\\ni8_IntraRaterStats -> intra-rater_comparison.csv (ACCROSS CLSGROUP)\\n\" # noqa\n res += \"\\nIntra-rater comparison vs the U-control (by class grouping).\\n\" # noqa\n res += f'\\n> pvals_intrarater ({evalset}): '\n res += '----------------------------\\n'\n for k, v in pvals.items():\n res += f'{k}: %.3f\\n' % v\n\n print(res)\n with open(rfile, 'a') as f:\n f.write(res)\n\n\ndef roc_pvals(clsgroup, ntrials=1000, unbiased=False):\n \"\"\"Accuracy of inferred truth from NPs with/out algorithmic suggestions.\n\n This gets the bootstrap 95% confidence interval and p-values.\n \"\"\"\n print(f\"\\n> [GO GET COFFEE ...] Getting roc_pvals for {clsgroup.upper()})\")\n\n # connect to sqlite database -- anchors\n dbcon = _connect_to_anchor_db(rpath)\n\n # first we read all anchors\n ubstr = ir._ubstr(unbiased)\n truthcol = f'{ubstr}EM_inferred_label_Ps'\n anchors = {}\n for evalset in ir.MAIN_EVALSET_NAMES:\n # read real anchors and remap labels\n ubstr = ir._ubstr(unbiased)\n tablename = f'v3.1_final_anchors_{evalset}_{ubstr}Ps_AreTruth'\n anchs = read_sql_query(f\"\"\"\n SELECT * FROM \"{tablename}\"\n ;\"\"\", dbcon)\n anchs = remap_classes_in_anchorsdf(\n anchors=anchs, clsgroup=clsgroup,\n also_ilabel=True, remove_ambiguous=True,\n who_determines_ambig='Ps', how_ambig_is_determined='EM',\n )\n\n anchs.loc[:, 'ilabel'] = anchs.loc[:, 'EM_inferred_label_NPs']\n anchors[evalset] = anchs\n\n # get bootstrap roc aucs\n cats = ['micro', 'macro']\n roc_aucs = {\n cat: {evs: [] for evs in ir.MAIN_EVALSET_NAMES}\n for cat in cats\n }\n for _ in range(ntrials):\n for evalset in ir.MAIN_EVALSET_NAMES:\n x = anchors[evalset]\n idxs = np.random.randint(x.shape[0], size=x.shape[0])\n _, _, rocauc = get_roc_and_auroc_for_who(\n anchors=x.iloc[idxs, :], truthcol=truthcol,\n probcol_prefix='EM_prob_', probcol_postfix='_NPs',\n )\n for cat in cats:\n roc_aucs[cat][evalset].append(rocauc[cat])\n\n # now get p-values\n pvals = {}\n for ev1, ev2 in combinations(ir.MAIN_EVALSET_NAMES, 2):\n for cat in cats:\n _, pvals[f'{ev1}_VS_{ev2}_{cat}'] = mannwhitneyu(\n roc_aucs[cat][ev1], roc_aucs[cat][ev2],\n alternative='two-sided')\n\n res = \"\"\n if clsgroup == 'main':\n res += \"\\n**********************************************************************\" # noqa\n res += \"\\ni5_ParticipantAccuracy -> Ps_AreTruth_superimposed_auroc_curves.svg\\n\" # noqa\n res += \"\\nAccuracy of inferred truth from NPs with/out algorithmic suggestions.\\n\" # noqa\n res += f\"This is the bootstrap AUROC comparison p-value with {ntrials} trials.\\n\" # noqa\n res += f'\\n> AUROCs ({clsgroup.upper()}): '\n res += '----------------------------\\n'\n for cat, aucvals_dict in roc_aucs.items():\n for ev, aucvals in aucvals_dict.items():\n res += (\n f\"{cat}: {ev}: {np.round(np.percentile(aucvals, 50), 3)} \"\n f\"({np.round(np.percentile(aucvals, 5), 3)}, \"\n f\"{np.round(np.percentile(aucvals, 95), 3)})\\n\"\n )\n\n res += f'\\n> pvals_intrarater ({clsgroup.upper()}, MANNWHITNEYU): '\n res += '----------------------------\\n'\n for k, v in pvals.items():\n res += f\"{k.replace('_', ' ')}: %.3f\\n\" % v\n\n print(res)\n with open(rfile, 'a') as f:\n f.write(res)\n\n\ndef segmentation_pvals(who='NPs', metric='DICE'):\n \"\"\"Segmentation accuracy p-values\"\"\"\n fpath = opj(rpath, f'i6_SegmentationAccuracy/{who}_AreTruth/csv/'\n f'{who}_AreTruth_evalset_violinplot_comparison.csv')\n df = read_csv(fpath)\n\n comps = {}\n eset = df.loc[df.loc[:, 'evalset'] == 'E', :]\n bset = df.loc[df.loc[:, 'evalset'] == 'B-control', :]\n\n _, comps['E-set VS B-control (overall)'] = mannwhitneyu(\n eset.loc[:, metric].values,\n bset.loc[:, metric].values,\n alternative='two-sided')\n\n _, comps['E-set VS B-control (correct)'] = mannwhitneyu(\n eset.loc[eset.loc[:, 'iscorrect'] == 1, metric].values,\n bset.loc[bset.loc[:, 'iscorrect'] == 1, metric].values,\n alternative='two-sided')\n\n _, comps['E-set VS B-control (incorrect)'] = mannwhitneyu(\n eset.loc[eset.loc[:, 'iscorrect'] == 0, metric].values,\n bset.loc[bset.loc[:, 'iscorrect'] == 0, metric].values,\n alternative='two-sided')\n\n _, comps['E-set correct vs incorrect'] = mannwhitneyu(\n eset.loc[eset.loc[:, 'iscorrect'] == 1, metric].values,\n eset.loc[eset.loc[:, 'iscorrect'] == 0, metric].values,\n alternative='two-sided')\n\n _, comps['B-control correct vs incorrect'] = mannwhitneyu(\n bset.loc[bset.loc[:, 'iscorrect'] == 1, metric].values,\n bset.loc[bset.loc[:, 'iscorrect'] == 0, metric].values,\n alternative='two-sided')\n\n res = \"\"\n res += \"\\n**********************************************************************\" # noqa\n res += f\"\\ni6_SegmentationAccuracy -> {who}_AreTruth_evalset_violinplot_comparison.csv\\n\" # noqa\n res += \"\\nSegmentation accuracy p-values.\\n\"\n res += f'\\n> segmentation_pvals ({metric}), MANNWHITNEYU): '\n res += '----------------------------\\n'\n for k, v in comps.items():\n res += f\"{k}: %.3f\\n\" % v\n\n # same nucleus in both evalsets\n fpath = opj(rpath, f'i6_SegmentationAccuracy/{who}_AreTruth/csv/'\n f'{who}_AreTruth_evalset_{metric}_comparison.csv')\n df = read_csv(fpath, index_col=0)\n _, pval = wilcoxon(\n df.loc[:, f'{metric}_B-control'], df.loc[:, f'{metric}_E'],\n alternative='two-sided')\n res += f'\\n> segmentation_pvals, joint ({metric}), WILCOXON): '\n res += '----------------------------\\n'\n res += f\"E-set VS B-control (joint): %.3f\\n\" % pval\n\n # save\n print(res)\n with open(rfile, 'a') as f:\n f.write(res)\n\n\n# =============================================================================\n\nif __name__ == '__main__':\n\n def run_seq1(fun, **kwargs):\n for clsgroup in ['main', 'super']:\n fun(clsgroup=clsgroup, **kwargs)\n\n def run_seq2(fun, **kwargs):\n for evalset in ['B-control', 'E']:\n fun(evalset=evalset, **kwargs)\n\n # interrater_pvals()\n # run_seq1(intrarater_pvals_accross_evalsets, method='wilcoxon')\n # run_seq1(intrarater_pvals_accross_evalsets, method='mannwhitneyu')\n # run_seq2(intrarater_pvals_accross_clsgroup)\n # segmentation_pvals()\n\n # This takes a while\n run_seq1(roc_pvals)\n"
] | [
[
"torch.cuda.is_available"
],
[
"pandas.read_sql_query",
"pandas.read_csv",
"pandas.concat",
"numpy.percentile",
"scipy.stats.mannwhitneyu",
"scipy.stats.wilcoxon",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jingjieli95/UnarySim | [
"c03386efdbb8151f3c33f34b44d1d6a6fc960434",
"c03386efdbb8151f3c33f34b44d1d6a6fc960434",
"c03386efdbb8151f3c33f34b44d1d6a6fc960434"
] | [
"test/kernel/test_kernel_exp_comb.py",
"app/uSystolic/alexnet_imagenet/alexnet_sa.py",
"test/kernel/test_kernel_conv2d_padding.py"
] | [
"# %%\nimport torch\nimport math\nfrom UnarySim.kernel.exp import expN1\nfrom UnarySim.stream.gen import RNG, SourceGen, BSGen\nfrom UnarySim.metric.metric import ProgError\nimport matplotlib.pyplot as plt\nimport time\nimport math\nimport numpy as np\n\n# %%\ndef exp_comb_test(bw=8, mode=\"unipolar\", rng=\"Sobol\"):\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n total_cnt = 100\n bitwidth = bw\n btype = torch.float\n rtype=torch.float\n stype=torch.float\n\n print(\"========================================================\")\n print(mode)\n print(\"========================================================\")\n # all input values are non-negative\n low_bound = 0\n if mode == \"unipolar\":\n up_bound = 2**bitwidth\n elif mode == \"bipolar\":\n low_bound = 0\n up_bound = 2**(bitwidth-1)\n\n input_list = []\n for input_val in range(low_bound, up_bound+1, 1):\n input_list.append(input_val)\n\n input = torch.tensor(input_list).type(torch.float).div(up_bound).to(device)\n output = torch.exp(input.mul(-1))\n \n result_pe_total = []\n for rand_idx in range(1, total_cnt+1):\n \n outputPE = ProgError(output, mode=mode).to(device)\n inputPE = ProgError(input, mode=mode).to(device)\n \n dut_exp_comb = expN1(mode=mode, \n rng=rng, \n rng_dim=rand_idx, \n rng_width=bitwidth).to(device)\n inputSRC = SourceGen(input, bitwidth, mode=mode, rtype=rtype)().to(device)\n inputRNG = RNG(bitwidth, rand_idx, rng, rtype)().to(device)\n inputBS = BSGen(inputSRC, inputRNG, stype).to(device)\n with torch.no_grad():\n start_time = time.time()\n for i in range(2**bitwidth):\n input_bs = inputBS(torch.tensor([i]))\n inputPE.Monitor(input_bs)\n\n output_bs = dut_exp_comb(input_bs)\n outputPE.Monitor(output_bs)\n \n # get the result for different rng\n result_pe = outputPE()[1].cpu().numpy()\n result_pe_total.append(result_pe) \n \n # get the result for different rng\n result_pe_total = np.array(result_pe_total)\n #######################################################################\n # check the error of all simulation\n #######################################################################\n print(\"RMSE:{:1.4}\".format(math.sqrt(np.mean(result_pe_total**2))))\n print(\"MAE: {:1.4}\".format(np.mean(np.abs(result_pe_total))))\n print(\"bias:{:1.4}\".format(np.mean(result_pe_total)))\n print(\"max: {:1.4}\".format(np.max(result_pe_total)))\n print(\"min: {:1.4}\".format(np.min(result_pe_total)))\n\n #######################################################################\n # check the error according to input value\n #######################################################################\n max_total = np.max(result_pe_total, axis=0)\n min_total = np.min(result_pe_total, axis=0)\n avg_total = np.mean(result_pe_total, axis=0)\n \n axis_len = outputPE()[1].size()[0]\n input_x_axis = []\n for axis_index in range(axis_len):\n input_x_axis.append((axis_index/(axis_len-1)*(up_bound-low_bound)+low_bound)/up_bound)\n fig, ax = plt.subplots()\n ax.fill_between(input_x_axis, max_total, avg_total, facecolor=\"red\", alpha=0.75)\n ax.fill_between(input_x_axis, avg_total, min_total, facecolor=\"blue\", alpha=0.75)\n ax.plot(input_x_axis, avg_total, label='Avg error', color=\"black\", linewidth=0.3)\n plt.tight_layout()\n plt.xlabel('Input value')\n plt.ylabel('Output error')\n plt.xticks(np.arange(0, 1.1, step=0.5))\n # ax.xaxis.set_ticklabels([])\n plt.xlim(0, 1)\n plt.yticks(np.arange(-0.2, 0.4, step=0.2))\n # ax.yaxis.set_ticklabels([])\n plt.ylim(-0.3, 0.55)\n plt.grid(b=True, which=\"both\", axis=\"y\", linestyle=\"--\", color=\"grey\", linewidth=0.3)\n fig.set_size_inches(4, 4)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.show()\n plt.close()\n\n# %%\nexp_comb_test(8,\"unipolar\", \"Sobol\")\n\n# %%\nexp_comb_test(8, \"unipolar\", \"SYS\")\n\n# %%\nexp_comb_test(8, \"unipolar\", \"LFSR\")",
"import torch\nimport torch.nn as nn\nfrom torchvision.models.utils import load_state_dict_from_url\nfrom UnarySim.kernel.conv import HUBConv2d\nfrom UnarySim.kernel.linear import HUBLinear\nfrom UnarySim.kernel.utils import conv2d_output_shape\n\n\n__all__ = ['AlexNet', 'alexnet']\n\n\nmodel_urls = {\n 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',\n}\n\nclass AlexNet(nn.Module):\n\n def __init__(self, num_classes=1000, pretrained_model_state_dict=None, cycle=None):\n super(AlexNet, self).__init__()\n if pretrained_model_state_dict is None:\n self.features = nn.Sequential(\n HUBConv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n HUBConv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n HUBConv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n HUBConv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n HUBConv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.avgpool = nn.AdaptiveAvgPool2d((6, 6))\n self.classifier = nn.Sequential(\n nn.Dropout(),\n HUBLinear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n HUBLinear(4096, 4096),\n nn.ReLU(inplace=True),\n HUBLinear(4096, num_classes),\n )\n else:\n param_list = [param for param in pretrained_model_state_dict]\n print(\"load model parameters: \", param_list)\n state_list = [pretrained_model_state_dict[param] for param in param_list]\n# output_size_list = []\n# output_size_list[0] = \n \n self.features = nn.Sequential(\n HUBConv2d(3, 64, kernel_size=11, stride=4, padding=2, binary_weight=state_list[0], binary_bias=state_list[1], cycle=cycle[0]),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n HUBConv2d(64, 192, kernel_size=5, padding=2, binary_weight=state_list[2], binary_bias=state_list[3], cycle=cycle[1]),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n HUBConv2d(192, 384, kernel_size=3, padding=1, binary_weight=state_list[4], binary_bias=state_list[5], cycle=cycle[2]),\n nn.ReLU(inplace=True),\n HUBConv2d(384, 256, kernel_size=3, padding=1, binary_weight=state_list[6], binary_bias=state_list[7], cycle=cycle[3]),\n nn.ReLU(inplace=True),\n HUBConv2d(256, 256, kernel_size=3, padding=1, binary_weight=state_list[8], binary_bias=state_list[9], cycle=cycle[4]),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.avgpool = nn.AdaptiveAvgPool2d((6, 6))\n self.classifier = nn.Sequential(\n nn.Dropout(),\n HUBLinear(256 * 6 * 6, 4096, binary_weight=state_list[10], binary_bias=state_list[11], cycle=cycle[5]),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n HUBLinear(4096, 4096, binary_weight=state_list[12], binary_bias=state_list[13], cycle=cycle[6]),\n nn.ReLU(inplace=True),\n HUBLinear(4096, num_classes, binary_weight=state_list[14], binary_bias=state_list[15], cycle=cycle[7]),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\n\ndef alexnet(pretrained=False, progress=True, **kwargs):\n r\"\"\"AlexNet model architecture from the\n `\"One weird trick...\" <https://arxiv.org/abs/1404.5997>`_ paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls['alexnet'],\n progress=progress)\n else:\n state_dict = None\n model = AlexNet(pretrained_model_state_dict=state_dict, **kwargs)\n return model\n",
"import torch\nfrom torch import nn as nn\n\nkernel_size = (3, 3)\ndilation = 1\npadding = 1\nstride = 1\n\ninput = torch.ones(2, 1, 5, 5)\npadding0 = torch.nn.ConstantPad2d(padding, 0)\ninput0 = padding0(input)\noutput0 = torch.nn.functional.unfold(input0, kernel_size, dilation, 0, stride)\n\noutput1 = torch.nn.functional.unfold(input, kernel_size, dilation, padding, stride)\n\nprint(output0.shape)\nprint(output0)\n\nprint(output1.shape)\nprint(output1)\n\nprint(torch.sum(output0 == output1) == torch.prod(torch.tensor(output1.size())))"
] | [
[
"matplotlib.pyplot.tight_layout",
"numpy.abs",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.arange",
"matplotlib.pyplot.subplots",
"torch.tensor",
"numpy.max",
"matplotlib.pyplot.xlim",
"numpy.mean",
"torch.no_grad",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"torch.cuda.is_available",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten",
"torch.nn.ReLU"
],
[
"torch.nn.ConstantPad2d",
"torch.nn.functional.unfold",
"torch.ones",
"torch.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ilvcd/Flux.jl | [
"d22479318b5f2a19a0003f3201d9b145b4562f9f"
] | [
"u2net.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch\nimport torch.nn as nn\nimport math\nimport time\n# class REBNCONVs(nn.Module):\n# def __init__(self,in_ch=3,out_ch=3,dirate=1):\n# super(REBNCONVs,self).__init__()\n\n# self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)\n# self.bn_s1 = nn.BatchNorm2d(out_ch)\n# self.relu_s1 = nn.ReLU(inplace=True)\n\n# def forward(self,x):\n\n# hx = x\n# xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))\n\n# return xout\n\n## upsample tensor 'src' to have the same spatial size with tensor 'tar'\ndef _upsample_like1(src,tar):\n src = F.upsample(src,size=tar.shape[2:],mode='bilinear')\n return src\n\ndef _upsample_like(src,tar):\n\n src_h,src_l = src\n tar_h,tar_l = tar\n src_h = F.upsample(src_h,size=tar_h.shape[2:],mode='bilinear')\n src_l = F.upsample(src_l,size=tar_l.shape[2:],mode='bilinear') if src_l is not None else None\n if src_l is None:\n return src_h\n return src_h,src_l\n\ndef comb(src,tar):\n src_h,src_l = src\n tar_h,tar_l = tar\n return src_h+tar_h,src_l+tar_l\n\ndef cat6(src1,src2,src3,src4,src5,src6,dim):\n src_h1,src_l1 = src1\n src_h2,src_l2 = src2\n src_h3,src_l3 = src3\n src_h4,src_l4 = src4\n src_h5,src_l5 = src5\n src_h6,src_l6 = src6\n return torch.cat((src_h1,src_h2,src_h3,src_h4,src_h5,src_h6),dim),torch.cat((src_l1,src_l2,src_l3,src_l4,src_l5,src_l6),dim)\n\ndef cat(src,tar,dim):\n src_h,src_l = src\n tar_h,tar_l = tar\n return torch.cat((src_h,tar_h),dim),torch.cat((src_l,tar_l),dim)\n\nclass OctConv(nn.Conv2d):\n \"\"\"\n Octave convolution layer.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 1\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n oct_alpha : float, default 0.0\n Octave alpha coefficient.\n oct_mode : str, default 'std'\n Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.\n oct_value : int, default 2\n Octave value.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding=1,\n dilation=1,\n groups=1,\n bias=False,\n oct_alpha=0.5,\n oct_mode=\"std\",\n oct_value=2):\n if isinstance(stride, int):\n stride = (stride, stride)\n self.downsample = (stride[0] > 1) or (stride[1] > 1)\n assert (stride[0] in [1, oct_value]) and (stride[1] in [1, oct_value])\n stride = (1, 1)\n if oct_mode == \"first\":\n in_alpha = 0.0\n out_alpha = oct_alpha\n elif oct_mode == \"norm\":\n in_alpha = oct_alpha\n out_alpha = oct_alpha\n elif oct_mode == \"last\":\n in_alpha = oct_alpha\n out_alpha = 0.0\n elif oct_mode == \"std\":\n in_alpha = 0.0\n out_alpha = 0.0\n else:\n raise ValueError(\"Unsupported octave convolution mode: {}\".format(oct_mode))\n self.h_in_channels = int(in_channels * (1.0 - in_alpha))\n self.h_out_channels = int(out_channels * (1.0 - out_alpha))\n self.l_out_channels = out_channels - self.h_out_channels\n self.oct_alpha = oct_alpha\n self.oct_mode = oct_mode\n self.oct_value = oct_value\n super(OctConv, self).__init__(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n self.conv_kwargs = {\n \"stride\": stride,\n \"padding\": padding,\n \"dilation\": dilation,\n \"groups\": groups}\n\n def forward(self,x):\n hx,lx=x\n if self.oct_mode == \"std\":\n return F.conv2d(\n input=hx,\n weight=self.weight,\n bias=self.bias,\n **self.conv_kwargs), None\n\n if self.downsample:\n hx = F.avg_pool2d(\n input=hx,\n kernel_size=(self.oct_value, self.oct_value),\n stride=(self.oct_value, self.oct_value))\n\n hhy = F.conv2d(\n input=hx,\n weight=self.weight[0:self.h_out_channels, 0:self.h_in_channels, :, :],\n bias=self.bias[0:self.h_out_channels] if self.bias is not None else None,\n **self.conv_kwargs)\n\n if self.oct_mode != \"first\":\n hlx = F.conv2d(\n input=lx,\n weight=self.weight[0:self.h_out_channels, self.h_in_channels:, :, :],\n bias=self.bias[0:self.h_out_channels] if self.bias is not None else None,\n **self.conv_kwargs)\n\n if self.oct_mode == \"last\":\n hlx = F.interpolate(\n input=hlx,\n scale_factor=self.oct_value,\n mode=\"nearest\")\n hy = hhy + hlx\n ly = None\n return hy\n\n lhx = F.avg_pool2d(\n input=hx,\n kernel_size=(self.oct_value, self.oct_value),\n stride=(self.oct_value, self.oct_value))\n lhy = F.conv2d(\n input=lhx,\n weight=self.weight[self.h_out_channels:, 0:self.h_in_channels, :, :],\n bias=self.bias[self.h_out_channels:] if self.bias is not None else None,\n **self.conv_kwargs)\n\n if self.oct_mode == \"first\":\n hy = hhy\n ly = lhy\n return hy, ly\n\n if self.downsample:\n hly = hlx\n llx = F.avg_pool2d(\n input=lx,\n kernel_size=(self.oct_value, self.oct_value),\n stride=(self.oct_value, self.oct_value))\n else:\n hly = F.interpolate(\n input=hlx,\n scale_factor=self.oct_value,\n mode=\"nearest\")\n llx = lx\n lly = F.conv2d(\n input=llx,\n weight=self.weight[self.h_out_channels:, self.h_in_channels:, :, :],\n bias=self.bias[self.h_out_channels:] if self.bias is not None else None,\n **self.conv_kwargs)\n\n hy = hhy + hly\n ly = lhy + lly\n return hy, ly\n\n\n\n\nclass OctaveConv(nn.Module):\n \"\"\"\n Octave convolution block with Batch normalization and ReLU/ReLU6 activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n oct_alpha : float, default 0.0\n Octave alpha coefficient.\n oct_mode : str, default 'std'\n Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n oct_alpha=0.5,\n padding=0,\n oct_mode=\"last\"):\n super(OctaveConv, self).__init__()\n self.last = (oct_mode == \"last\") or (oct_mode == \"std\")\n self.conv = OctConv(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=1,\n padding=padding,\n dilation=1,\n groups=1,\n bias=False,\n oct_alpha=oct_alpha,\n oct_mode=oct_mode)\n\n def forward(self, x):\n ret = self.conv(x)\n return ret\n\nclass REBNCONV(nn.Module):\n \"\"\"\n Octave convolution block with Batch normalization and ReLU/ReLU6 activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n oct_alpha : float, default 0.0\n Octave alpha coefficient.\n oct_mode : str, default 'std'\n Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size=3,\n dirate=1,\n oct_alpha=0.5,\n oct_mode=\"norm\",\n bn_eps=1e-5):\n super(REBNCONV, self).__init__()\n self.activate = nn.ReLU(inplace=True)\n self.last = (oct_mode == \"last\") or (oct_mode == \"std\")\n out_alpha = 0.0 if self.last else oct_alpha\n h_out_channels = int(out_channels * (1.0 - out_alpha))\n l_out_channels = out_channels - h_out_channels\n self.conv = OctConv(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=1,\n padding=1*dirate,\n dilation=1*dirate,\n groups=1,\n bias=False,\n oct_alpha=oct_alpha,\n oct_mode=oct_mode)\n self.h_bn = nn.BatchNorm2d(\n num_features=h_out_channels,\n eps=bn_eps)\n if not self.last:\n self.l_bn = nn.BatchNorm2d(\n num_features=l_out_channels,\n eps=bn_eps)\n\n def forward(self, x):\n hx, lx = self.conv(x)\n hx = self.h_bn(hx)\n if self.activate:\n hx = self.activate(hx)\n if not self.last:\n lx = self.l_bn(lx)\n lx = self.activate(lx)\n return hx, lx\n\n\n\nclass MaxPool2dx(nn.Module):\n def __init__(self):\n super(MaxPool2dx, self).__init__()\n self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n def forward(self, x):\n x_h, x_l = x\n x_h = self.pool1(x_h)\n x_l = self.pool1(x_l) if x_l is not None else None\n return x_h, x_l\n\n\n\n### RSU-7 ###\nclass RSU7(nn.Module):#UNet07DRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3,first=False):\n super(RSU7,self).__init__()\n oct_mode = 'norm'\n if first:\n oct_mode = 'first'\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1,oct_mode = oct_mode)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.pool1 = MaxPool2dx()\n\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool2 = MaxPool2dx()\n\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool3 = MaxPool2dx()\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool4 = MaxPool2dx()\n\n self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool5 = MaxPool2dx()\n\n self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)\n\n self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)\n\n self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n hx = self.pool4(hx4)\n\n hx5 = self.rebnconv5(hx)\n hx = self.pool5(hx5)\n\n hx6 = self.rebnconv6(hx)\n\n hx7 = self.rebnconv7(hx6)\n\n hx6d = self.rebnconv6d(cat(hx7,hx6,1))\n hx6dup = _upsample_like(hx6d,hx5)\n\n hx5d = self.rebnconv5d(cat(hx6dup,hx5,1))\n hx5dup = _upsample_like(hx5d,hx4)\n\n hx4d = self.rebnconv4d(cat(hx5dup,hx4,1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.rebnconv3d(cat(hx4dup,hx3,1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.rebnconv2d(cat(hx3dup,hx2,1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.rebnconv1d(cat(hx2dup,hx1,1))\n\n return comb(hx1d, hxin)\n\n### RSU-6 ###\nclass RSU6(nn.Module):#UNet06DRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU6,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.pool1 = MaxPool2dx()\n\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool2 = MaxPool2dx()\n\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool3 = MaxPool2dx()\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool4 = MaxPool2dx()\n\n self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)\n\n self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)\n\n self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n hx = self.pool4(hx4)\n\n hx5 = self.rebnconv5(hx)\n\n hx6 = self.rebnconv6(hx5)\n\n\n hx5d = self.rebnconv5d(cat(hx6,hx5,1))\n hx5dup = _upsample_like(hx5d,hx4)\n\n hx4d = self.rebnconv4d(cat(hx5dup,hx4,1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.rebnconv3d(cat(hx4dup,hx3,1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.rebnconv2d(cat(hx3dup,hx2,1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.rebnconv1d(cat(hx2dup,hx1,1))\n\n return comb(hx1d, hxin)\n\n### RSU-5 ###\nclass RSU5(nn.Module):#UNet05DRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU5,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.pool1 = MaxPool2dx()\n\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool2 = MaxPool2dx()\n\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool3 = MaxPool2dx()\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)\n\n self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)\n\n self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n\n hx5 = self.rebnconv5(hx4)\n\n hx4d = self.rebnconv4d(cat(hx5,hx4,1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.rebnconv3d(cat(hx4dup,hx3,1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.rebnconv2d(cat(hx3dup,hx2,1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.rebnconv1d(cat(hx2dup,hx1,1))\n\n return comb(hx1d, hxin)\n\n### RSU-4 ###\nclass RSU4(nn.Module):#UNet04DRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU4,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.pool1 = MaxPool2dx()\n\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool2 = MaxPool2dx()\n\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)\n\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n\n hx4 = self.rebnconv4(hx3)\n\n hx3d = self.rebnconv3d(cat(hx4,hx3,1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.rebnconv2d(cat(hx3dup,hx2,1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.rebnconv1d(cat(hx2dup,hx1,1))\n\n return comb(hx1d, hxin)\n\n### RSU-4F ###\nclass RSU4F(nn.Module):#UNet04FRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU4F,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)\n\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx2 = self.rebnconv2(hx1)\n hx3 = self.rebnconv3(hx2)\n\n hx4 = self.rebnconv4(hx3)\n\n hx3d = self.rebnconv3d(cat(hx4,hx3,1))\n hx2d = self.rebnconv2d(cat(hx3d,hx2,1))\n hx1d = self.rebnconv1d(cat(hx2d,hx1,1))\n\n return comb(hx1d, hxin)\n\n\n### U^2-Net small ###\nclass U2NETP(nn.Module):\n\n def __init__(self,in_ch=2,out_ch=2):\n super(U2NETP,self).__init__()\n\n self.stage1 = RSU7(in_ch,16,64,first=True)\n self.pool12 = MaxPool2dx()\n\n self.stage2 = RSU6(64,16,64)\n self.pool23 = MaxPool2dx()\n\n self.stage3 = RSU5(64,16,64)\n self.pool34 = MaxPool2dx()\n\n self.stage4 = RSU4(64,16,64)\n self.pool45 = MaxPool2dx()\n\n self.stage5 = RSU4F(64,16,64)\n self.pool56 = MaxPool2dx()\n\n self.stage6 = RSU4F(64,16,64)\n\n # decoder\n self.stage5d = RSU4F(128,16,64)\n self.stage4d = RSU4(128,16,64)\n self.stage3d = RSU5(128,16,64)\n self.stage2d = RSU6(128,16,64)\n self.stage1d = RSU7(128,16,64)\n\n self.side1 = OctaveConv(64,out_ch,3,padding=1)\n self.side2 = OctaveConv(64,out_ch,3,padding=1)\n self.side3 = OctaveConv(64,out_ch,3,padding=1)\n self.side4 = OctaveConv(64,out_ch,3,padding=1)\n self.side5 = OctaveConv(64,out_ch,3,padding=1)\n self.side6 = OctaveConv(64,out_ch,3,padding=1)\n\n self.outconv = nn.Conv2d(6*out_ch,out_ch,1)\n\n def forward(self,x):\n #x1=torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)\n hx = x\n\n #stage 1\n hx1 = self.stage1((hx,None))\n hx = self.pool12(hx1)\n\n #stage 2\n hx2 = self.stage2(hx)\n hx = self.pool23(hx2)\n\n #stage 3\n hx3 = self.stage3(hx)\n hx = self.pool34(hx3)\n\n #stage 4\n hx4 = self.stage4(hx)\n hx = self.pool45(hx4)\n\n #stage 5\n hx5 = self.stage5(hx)\n hx = self.pool56(hx5)\n\n #stage 6\n hx6 = self.stage6(hx)\n hx6up = _upsample_like(hx6,hx5)\n\n #decoder\n hx5d = self.stage5d(cat(hx6up,hx5,1))\n hx5dup = _upsample_like(hx5d,hx4)\n\n hx4d = self.stage4d(cat(hx5dup,hx4,1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.stage3d(cat(hx4dup,hx3,1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.stage2d(cat(hx3dup,hx2,1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.stage1d(cat(hx2dup,hx1,1))\n\n\n #side output\n d1 = self.side1(hx1d)\n d2 = self.side2(hx2d)\n d3 = self.side3(hx3d)\n d4 = self.side4(hx4d)\n d5 = self.side5(hx5d)\n d6 = self.side6(hx6)\n\n d2 = _upsample_like1(d2,d1)\n d3 = _upsample_like1(d3,d1)\n d4 = _upsample_like1(d4,d1)\n d5 = _upsample_like1(d5,d1)\n d6 = _upsample_like1(d6,d1)\n\n d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))\n d0 = _upsample_like1(d0,x)\n return x*F.relu(d0)#, F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)\n\nnet = U2NETP()\nd=torch.ones((1,2,512,512))\nnet.eval()\ns=net(d)\nt=time.time()\nfor i in range(10):\n s=net(d)\nt=time.time()-t\nprint(t/10.0)\n#dict=net.state_dict()\n#torch.save(dict,\"x.pt\")"
] | [
[
"torch.nn.functional.upsample",
"torch.ones",
"torch.cat",
"torch.nn.functional.conv2d",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daniel-dr-rojas/scipy | [
"8e418e38fe6c88105eeae8eeb53248d19a8286fb"
] | [
"scipy/special/_precompute/lambertw.py"
] | [
"\"\"\"Compute a Pade approximation for the principle branch of the\nLambert W function around 0 and compare it to various other\napproximations.\n\n\"\"\"\nimport numpy as np\n\ntry:\n import mpmath # type: ignore[import]\n import matplotlib.pyplot as plt\nexcept ImportError:\n pass\n\n\ndef lambertw_pade():\n derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]\n p, q = mpmath.pade(derivs, 3, 2)\n return p, q\n\n\ndef main():\n print(__doc__)\n with mpmath.workdps(50):\n p, q = lambertw_pade()\n p, q = p[::-1], q[::-1]\n print(\"p = {}\".format(p))\n print(\"q = {}\".format(q))\n\n x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)\n x, y = np.meshgrid(x, y)\n z = x + 1j*y\n lambertw_std = []\n for z0 in z.flatten():\n lambertw_std.append(complex(mpmath.lambertw(z0)))\n lambertw_std = np.array(lambertw_std).reshape(x.shape)\n\n fig, axes = plt.subplots(nrows=3, ncols=1)\n # Compare Pade approximation to true result\n p = np.array([float(p0) for p0 in p])\n q = np.array([float(q0) for q0 in q])\n pade_approx = np.polyval(p, z)/np.polyval(q, z)\n pade_err = abs(pade_approx - lambertw_std)\n axes[0].pcolormesh(x, y, pade_err)\n # Compare two terms of asymptotic series to true result\n asy_approx = np.log(z) - np.log(np.log(z))\n asy_err = abs(asy_approx - lambertw_std)\n axes[1].pcolormesh(x, y, asy_err)\n # Compare two terms of the series around the branch point to the\n # true result\n p = np.sqrt(2*(np.exp(1)*z + 1))\n series_approx = -1 + p - p**2/3\n series_err = abs(series_approx - lambertw_std)\n im = axes[2].pcolormesh(x, y, series_err)\n\n fig.colorbar(im, ax=axes.ravel().tolist())\n plt.show()\n\n fig, ax = plt.subplots(nrows=1, ncols=1)\n pade_better = pade_err < asy_err\n im = ax.pcolormesh(x, y, pade_better)\n t = np.linspace(-0.3, 0.3)\n ax.plot(-2.5*abs(t) - 0.2, t, 'r')\n fig.colorbar(im, ax=ax)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.log",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.exp",
"numpy.array",
"numpy.meshgrid",
"numpy.polyval",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexpyattaev/pyquaternion | [
"19c3581799c220607ce327af5f1cbe9f98dba4e6"
] | [
"pyquaternion/test/test_quaternion.py"
] | [
"#!/usr/bin python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis file is part of the pyquaternion python module\n\nAuthor: Kieran Wynn\nWebsite: https://github.com/KieranWynn/pyquaternion\nDocumentation: http://kieranwynn.github.io/pyquaternion/\n\nVersion: 1.0.0\nLicense: The MIT License (MIT)\n\nCopyright (c) 2015 Kieran Wynn\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\ntest_quaternion.py - Unit test for quaternion module\n\n\"\"\"\n\nimport unittest\nfrom math import pi, sin, cos\nfrom random import random\n\nimport numpy as np\nimport pytest\n\nimport pyquaternion\nfrom pyquaternion.numba_opt import TypingError\nfrom pyquaternion.quaternion import z_axis, x_axis, to_str\n\nQuaternion = pyquaternion.Quaternion\n\nALMOST_EQUAL_TOLERANCE = 13\n\n\ndef randomElements():\n return tuple(np.random.uniform(-1, 1, 4))\n\n\ndef test_init_default():\n q = Quaternion()\n assert isinstance(q, Quaternion)\n assert q.eq(Quaternion(np.array((1., 0., 0., 0.))))\n\n\ndef test_init_junk():\n with pytest.raises(TypingError):\n q = Quaternion(\"blaaa\")\n with pytest.raises(TypingError):\n q = Quaternion(None)\n\n\ndef test_init_copy():\n q1 = Quaternion.random()\n q2 = q1.copy()\n\n assert q2.eq(q1)\n\n\ndef test_init_random(self):\n r1 = Quaternion.random()\n r2 = Quaternion.random()\n self.assertAlmostEqual(r1.norm, 1.0, ALMOST_EQUAL_TOLERANCE)\n self.assertIsInstance(r1, Quaternion)\n self.assertNotEqual(r1, r2) # TODO, this *may* fail at random\n\n\n\n\ndef test_init_from_elements():\n a, b, c, d = randomElements()\n q1 = Quaternion(np.array([a, b, c, d], dtype=float))\n\n # assert np.array_equal(q1.q, [a, b, c, d], dtype=float)\n with pytest.raises(ValueError):\n q = Quaternion(np.zeros(3))\n\n with pytest.raises(TypingError):\n q = Quaternion.from_scalar_and_vector(None, np.array([b, c, d]))\n\n\ndef test_init_from_array(self):\n r = randomElements()\n a = np.array(r)\n q = Quaternion(a)\n self.assertIsInstance(q, Quaternion)\n self.assertTrue(np.allclose(q.q, a))\n with self.assertRaises(ValueError):\n q = Quaternion(a[1:4]) # 3-vector\n with self.assertRaises(ValueError):\n q = Quaternion(np.hstack((a, a))) # 8-vector\n with self.assertRaises(ValueError):\n q = Quaternion(np.array([a, a])) # 2x4-\n\n\ndef test_init_from_explicit_rotation_params():\n vx = random()\n vy = random()\n vz = random()\n theta = random() * 2.0 * pi\n\n v1 = np.array([vx, vy, vz], dtype=float)\n v3 = np.copy(v1)\n\n q1 = Quaternion.from_axis_angle(axis=v1, angle=theta)\n\n with pytest.raises(ValueError):\n q1 = Quaternion.from_axis_angle(axis=np.zeros(3), angle=theta)\n # normalise v to a unit vector\n v3 = v3 / np.linalg.norm(v3)\n\n q4 = Quaternion.from_axis_angle(angle=theta, axis=v3)\n\n # Construct the true quaternion\n t = theta / 2.0\n\n a = cos(t)\n b = v3[0] * sin(t)\n c = v3[1] * sin(t)\n d = v3[2] * sin(t)\n\n truth = Quaternion(np.array([a, b, c, d]))\n\n assert q1.eq(truth)\n\n assert q4.eq(truth)\n\n assert Quaternion.from_axis_angle(np.array([1, 0, 0], dtype=float)).eq(Quaternion())\n\n # Result should be a versor (Unit Quaternion)\n assert abs(q1.norm - 1.0) < ALMOST_EQUAL_TOLERANCE\n\n with pytest.raises(Exception):\n q = Quaternion.from_axis_angle(angle=theta)\n with pytest.raises(TypingError):\n q = Quaternion.from_axis_angle(axis=[b, c], angle=theta)\n with pytest.raises(TypingError):\n q = Quaternion.from_axis_angle(axis=np.array([1, 2, 3], dtype=int), angle=theta)\n with pytest.raises(TypingError):\n q = Quaternion.from_axis_angle(axis=[b, c], angle=None)\n\n\ndef test_init_from_explicit_matrix():\n def R_z(theta):\n \"\"\"\n Generate a rotation matrix describing a rotation of theta degrees about the z-axis\n \"\"\"\n c = cos(theta)\n s = sin(theta)\n return np.array([\n [c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n\n v = np.copy(x_axis)\n for angle in [0, pi / 6, pi / 4, pi / 2, pi, 4 * pi / 3, 3 * pi / 2, 2 * pi]:\n R = R_z(angle) # rotation matrix describing rotation of 90 about +z\n v_prime_r = np.dot(R, v)\n\n q1 = Quaternion.from_axis_angle(axis=z_axis, angle=angle)\n v_prime_q1 = q1.rotate(v)\n print(\"=====\" + str(angle))\n # assert np.allclose(v_prime_r, v_prime_q1)\n\n q2 = Quaternion.from_matrix(matrix=R)\n\n v_prime_q2 = q2.rotate(v)\n print(v_prime_q1)\n print(v_prime_q2)\n print(v_prime_r)\n # assert np.allclose(v_prime_q2, v_prime_r)\n\n R = np.matrix(np.eye(3))\n q3 = Quaternion.from_matrix(matrix=R)\n v_prime_q3 = q3.rotate(v)\n assert np.allclose(v, v_prime_q3)\n assert q3.eq(Quaternion())\n\n R[0, 1] += 3 # introduce error to make matrix non-orthogonal\n with pytest.raises(ValueError):\n q4 = Quaternion.from_matrix(matrix=R)\n\n\ndef test_init_from_explicit_matrix_with_optional_tolerance_arguments():\n \"\"\"\n The matrix defined in this test is orthogonal was carefully crafted\n such that it's orthogonal to a precision of 1e-06, but not to a precision\n of 1e-08.\n\n Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html\n \"\"\"\n m = [[0.73297226, -0.16524626, -0.65988294, -0.07654548],\n [0.13108627, 0.98617666, -0.10135052, -0.04878795],\n [0.66750896, -0.01221443, 0.74450167, -0.05474513],\n [0, 0, 0, 1, ]]\n npm = np.matrix(m)\n\n with pytest.raises(ValueError):\n Quaternion.from_matrix(matrix=npm)\n\n q1 = Quaternion.from_matrix(matrix=npm, atol=1e-6)\n\n\ndef test_str():\n a, b, c, d = randomElements()\n q = Quaternion(np.array([a, b, c, d], dtype=float))\n string = \"{:.3f} {:+.3f}i {:+.3f}j {:+.3f}k\".format(a, b, c, d)\n assert string == to_str(q)\n\n\ndef test_equality(self):\n r = randomElements()\n self.assertEqual(Quaternion(*r), Quaternion(*r))\n q = Quaternion(*r)\n self.assertEqual(q, q)\n # Equality should work with other types, if they can be interpreted as quaternions\n self.assertEqual(q, r)\n self.assertEqual(Quaternion(1., 0., 0., 0.), 1.0)\n with self.assertRaises(ValueError):\n Quaternion(\"1.32\")\n self.assertNotEqual(q, q + Quaternion(0.0, 0.002, 0.0, 0.0))\n\n # Equality should also cover small rounding and floating point errors\n self.assertEqual(Quaternion(1., 0., 0., 0.), Quaternion(1.0 - 1e-14, 0., 0., 0.))\n self.assertNotEqual(Quaternion(1., 0., 0., 0.), Quaternion(1.0 - 1e-12, 0., 0., 0.))\n self.assertNotEqual(Quaternion(160., 0., 0., 0.), Quaternion(160.0 - 1e-10, 0., 0., 0.))\n self.assertNotEqual(Quaternion(1600., 0., 0., 0.), Quaternion(1600.0 - 1e-9, 0., 0., 0.))\n\n with self.assertRaises(TypeError):\n q == None\n with self.assertRaises(ValueError):\n q == 's'\n\n\ndef test_assignment(self):\n a, b, c, d = randomElements()\n q1 = Quaternion(a, b, c, d)\n q2 = Quaternion(a, b * 0.1, c + 0.3, d)\n self.assertNotEqual(q1, q2)\n q2 = q1\n self.assertEqual(q1, q2)\n\n\ndef test_unary_minus(self):\n a, b, c, d = randomElements()\n q = Quaternion(a, b, c, d)\n self.assertEqual(-q, Quaternion(-a, -b, -c, -d))\n\n\ndef test_add(self):\n r1 = randomElements()\n r2 = randomElements()\n r = random()\n n = None\n\n q1 = Quaternion(*r1)\n q2 = Quaternion(*r2)\n q3 = Quaternion(array=np.array(r1) + np.array(r2))\n q4 = Quaternion(array=np.array(r2) + np.array([r, 0.0, 0.0, 0.0]))\n self.assertEqual(q1 + q2, q3)\n q1 += q2\n self.assertEqual(q1, q3)\n self.assertEqual(q2 + r, q4)\n self.assertEqual(r + q2, q4)\n\n with self.assertRaises(TypeError):\n q1 += n\n with self.assertRaises(TypeError):\n n += q1\n\n\ndef test_subtract(self):\n r1 = randomElements()\n r2 = randomElements()\n r = random()\n n = None\n\n q1 = Quaternion(*r1)\n q2 = Quaternion(*r2)\n q3 = Quaternion(array=np.array(r1) - np.array(r2))\n q4 = Quaternion(array=np.array(r2) - np.array([r, 0.0, 0.0, 0.0]))\n self.assertEqual(q1 - q2, q3)\n q1 -= q2\n self.assertEqual(q1, q3)\n self.assertEqual(q2 - r, q4)\n self.assertEqual(r - q2, -q4)\n\n with self.assertRaises(TypeError):\n q1 -= n\n with self.assertRaises(TypeError):\n n -= q1\n\n\ndef test_multiplication_of_bases():\n one = Quaternion(np.array([1.0, 0.0, 0.0, 0.0]))\n i = Quaternion(np.array([0.0, 1.0, 0.0, 0.0]))\n j = Quaternion(np.array([0.0, 0.0, 1.0, 0.0]))\n k = Quaternion(np.array([0.0, 0.0, 0.0, 1.0]))\n\n assert i.mul(i).eq(j.mul(j))\n assert j.mul(j).eq(k.mul(k))\n\n assert k.mul(k).eq(i.mul(j).mul(k))\n\n assert i.mul(j.mul(k)).eq(one.neg())\n\n assert i.mul(j).eq(k)\n assert i.mul(i).eq(one.neg())\n assert i.mul(k).eq(j.neg())\n assert j.mul(i).eq(k.neg())\n assert j.mul(j).eq(one.neg())\n assert j.mul(k).eq(i)\n assert k.mul(i).eq(j)\n assert k.mul(j).eq(i.neg())\n assert k.mul(k).eq(one.neg())\n assert i.mul(j).mul(k).eq(one.neg())\n\n # self.assertEqual(i * i, j * j)\n # self.assertEqual(j * j, k * k)\n # self.assertEqual(k * k, i * j * k)\n # self.assertEqual(i * j * k, -one)\n #\n # self.assertEqual(i * j, k)\n # self.assertEqual(i * i, -one)\n # self.assertEqual(i * k, -j)\n # self.assertEqual(j * i, -k)\n # self.assertEqual(j * j, -one)\n # self.assertEqual(j * k, i)\n # self.assertEqual(k * i, j)\n # self.assertEqual(k * j, -i)\n # self.assertEqual(k * k, -one)\n # self.assertEqual(i * j * k, -one)\n\n\ndef test_multiply_by_scalar():\n a, b, c, d = randomElements()\n q1 = Quaternion(np.array((a, b, c, d)))\n for s in [30.0, 0.3, -2, -4.7, 0]:\n\n q2 = Quaternion(s * a, s * b, s * c, s * d)\n S = Quaternion.from_scalar_and_vector(s)\n q3 = q1\n assert q1.mul(S).eq(q2) # post-multiply by scalar\n assert S.mul(q1).eq(q2) # pre-multiply by scalar\n q3 = q3.mul(S)\n assert q3.eq(q2)\n\n\ndef test_divide():\n r = np.random.rand(4)\n q = Quaternion(r)\n if q:\n assert q.div(q).eq(Quaternion())\n else:\n with pytest.raises(ZeroDivisionError):\n q.div(q)\n\n with pytest.raises(ZeroDivisionError):\n q.div(Quaternion.from_scalar_and_vector(0.0))\n\n\ndef test_division_of_bases():\n one = Quaternion(np.array([1.0, 0.0, 0.0, 0.0]))\n i = Quaternion(np.array([0.0, 1.0, 0.0, 0.0]))\n j = Quaternion(np.array([0.0, 0.0, 1.0, 0.0]))\n k = Quaternion(np.array([0.0, 0.0, 0.0, 1.0]))\n\n assert i.div(i).eq(j.div(j))\n # self.assertEqual(j / j, k / k)\n # self.assertEqual(k / k, one)\n # self.assertEqual(k / -k, -one)\n #\n # self.assertEqual(i / j, -k)\n # self.assertEqual(i / i, one)\n # self.assertEqual(i / k, j)\n # self.assertEqual(j / i, k)\n # self.assertEqual(j / j, one)\n # self.assertEqual(j / k, -i)\n # self.assertEqual(k / i, -j)\n # self.assertEqual(k / j, i)\n # self.assertEqual(k / k, one)\n # self.assertEqual(i / -j, k)\n\n\ndef test_divide_by_scalar():\n a, b, c, d = randomElements()\n q1 = Quaternion(a, b, c, d)\n for s in [30.0, 0.3, -2, -4.7]:\n q2 = Quaternion(a / s, b / s, c / s, d / s)\n q3 = q1\n self.assertEqual(q1 / s, q2)\n if q1:\n self.assertEqual(s / q1, q2.inverse)\n else:\n with self.assertRaises(ZeroDivisionError):\n s / q1\n\n q3 /= s\n self.assertEqual(q3, q2)\n\n with self.assertRaises(ZeroDivisionError):\n q4 = q1 / 0.0\n with self.assertRaises(TypeError):\n q4 = q1 / None\n with self.assertRaises(ValueError):\n q4 = q1 / 's'\n\n\ndef test_squared():\n one = Quaternion(1.0, 0.0, 0.0, 0.0)\n i = Quaternion(0.0, 1.0, 0.0, 0.0)\n j = Quaternion(0.0, 0.0, 1.0, 0.0)\n k = Quaternion(0.0, 0.0, 0.0, 1.0)\n\n self.assertEqual(i ** 2, j ** 2)\n self.assertEqual(j ** 2, k ** 2)\n self.assertEqual(k ** 2, -one)\n\n\ndef test_power():\n q1 = Quaternion.random()\n q2 = Quaternion(q1)\n self.assertEqual(q1 ** 0, Quaternion())\n self.assertEqual(q1 ** 1, q1)\n q2 **= 4\n self.assertEqual(q2, q1 * q1 * q1 * q1)\n self.assertEqual((q1 ** 0.5) * (q1 ** 0.5), q1)\n self.assertEqual(q1 ** -1, q1.inverse)\n self.assertEqual(4 ** Quaternion(2), Quaternion(16))\n with self.assertRaises(TypeError):\n q1 ** None\n with self.assertRaises(ValueError):\n q1 ** 's'\n q3 = Quaternion()\n self.assertEqual(q3 ** 0.5, q3) # Identity behaves as an identity\n self.assertEqual(q3 ** 5, q3)\n self.assertEqual(q3 ** 3.4, q3)\n q4 = Quaternion(scalar=5) # real number behaves as any other real number would\n self.assertEqual(q4 ** 4, Quaternion(scalar=5 ** 4))\n\n\ndef test_distributive():\n q1 = Quaternion.random()\n q2 = Quaternion.random()\n q3 = Quaternion.random()\n self.assertEqual(q1 * (q2 + q3), q1 * q2 + q1 * q3)\n\n\ndef test_noncommutative():\n q1 = Quaternion.random()\n q2 = Quaternion.random()\n if not q1 == q2: # Small chance of this happening with random initialisation\n self.assertNotEqual(q1 * q2, q2 * q1)\n\n\nclass TestQuaternionFeatures(unittest.TestCase):\n\n def test_conjugate(self):\n a, b, c, d = randomElements()\n q1 = Quaternion(a, b, c, d)\n q2 = Quaternion.random()\n self.assertEqual(q1.conjugate, Quaternion(a, -b, -c, -d))\n\n self.assertEqual((q1 * q2).conjugate, q2.conjugate * q1.conjugate)\n self.assertEqual((q1 + q1.conjugate) / 2, Quaternion(scalar=q1.scalar))\n self.assertEqual((q1 - q1.conjugate) / 2, Quaternion(vector=q1.vector))\n\n def test_double_conjugate(self):\n q = Quaternion.random()\n self.assertEqual(q, q.conjugate.conjugate)\n\n def test_norm(self):\n r = randomElements()\n q1 = Quaternion(*r)\n q2 = Quaternion.random()\n self.assertEqual(q1.norm, np.linalg.norm(np.array(r)))\n self.assertEqual(q1.magnitude, np.linalg.norm(np.array(r)))\n # Multiplicative norm\n self.assertAlmostEqual((q1 * q2).norm, q1.norm * q2.norm, ALMOST_EQUAL_TOLERANCE)\n # Scaled norm\n for s in [30.0, 0.3, -2, -4.7]:\n self.assertAlmostEqual((q1 * s).norm, q1.norm * abs(s), ALMOST_EQUAL_TOLERANCE)\n\n def test_inverse(self):\n q1 = Quaternion(randomElements())\n q2 = Quaternion.random()\n if q1:\n self.assertEqual(q1 * q1.inverse, Quaternion(1.0, 0.0, 0.0, 0.0))\n else:\n with self.assertRaises(ZeroDivisionError):\n q1 * q1.inverse\n\n self.assertEqual(q2 * q2.inverse, Quaternion(1.0, 0.0, 0.0, 0.0))\n\n def test_normalisation(self): # normalise to unit quaternion\n r = randomElements()\n q1 = Quaternion(*r)\n v = q1.unit\n n = q1.normalised\n\n if q1 == Quaternion(0): # small chance with random generation\n return # a 0 quaternion does not normalise\n\n # Test normalised objects are unit quaternions\n np.testing.assert_almost_equal(v.q, q1.elements / q1.norm, decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(n.q, q1.elements / q1.norm, decimal=ALMOST_EQUAL_TOLERANCE)\n self.assertAlmostEqual(v.norm, 1.0, ALMOST_EQUAL_TOLERANCE)\n self.assertAlmostEqual(n.norm, 1.0, ALMOST_EQUAL_TOLERANCE)\n # Test axis and angle remain the same\n np.testing.assert_almost_equal(q1.axis, v.axis, decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(q1.axis, n.axis, decimal=ALMOST_EQUAL_TOLERANCE)\n self.assertAlmostEqual(q1.angle, v.angle, ALMOST_EQUAL_TOLERANCE)\n self.assertAlmostEqual(q1.angle, n.angle, ALMOST_EQUAL_TOLERANCE)\n # Test special case where q is zero\n q2 = Quaternion(0)\n self.assertEqual(q2, q2.normalised)\n\n def test_is_unit(self):\n q1 = Quaternion()\n q2 = Quaternion(1.0, 0, 0, 0.0001)\n self.assertTrue(q1.is_unit())\n self.assertFalse(q2.is_unit())\n self.assertTrue(q2.is_unit(0.001))\n\n def test_q_matrix(self):\n a, b, c, d = randomElements()\n q = Quaternion(a, b, c, d)\n M = np.array([\n [a, -b, -c, -d],\n [b, a, -d, c],\n [c, d, a, -b],\n [d, -c, b, a]])\n self.assertTrue(np.array_equal(q._q_matrix(), M))\n\n def test_q_bar_matrix(self):\n a, b, c, d = randomElements()\n q = Quaternion(a, b, c, d)\n M = np.array([\n [a, -b, -c, -d],\n [b, a, d, -c],\n [c, -d, a, b],\n [d, c, -b, a]])\n self.assertTrue(np.array_equal(q._q_bar_matrix(), M))\n\n def test_output_of_components(self):\n a, b, c, d = randomElements()\n q = Quaternion(a, b, c, d)\n # Test scalar\n self.assertEqual(q.scalar, a)\n self.assertEqual(q.real, a)\n # Test vector\n self.assertTrue(np.array_equal(q.vector, [b, c, d]))\n self.assertTrue(np.array_equal(q.imaginary, [b, c, d]))\n self.assertEqual(tuple(q.vector), (b, c, d))\n self.assertEqual(list(q.imaginary), [b, c, d])\n self.assertEqual(q.w, a)\n self.assertEqual(q.x, b)\n self.assertEqual(q.y, c)\n self.assertEqual(q.z, d)\n\n def test_output_of_elements(self):\n r = randomElements()\n q = Quaternion(*r)\n self.assertEqual(tuple(q.elements), r)\n\n def test_element_access(self):\n r = randomElements()\n q = Quaternion(*r)\n self.assertEqual(q[0], r[0])\n self.assertEqual(q[1], r[1])\n self.assertEqual(q[2], r[2])\n self.assertEqual(q[3], r[3])\n self.assertEqual(q[-1], r[3])\n self.assertEqual(q[-4], r[0])\n with self.assertRaises(TypeError):\n q[None]\n with self.assertRaises(IndexError):\n q[4]\n with self.assertRaises(IndexError):\n q[-5]\n\n def test_element_assignment(self):\n q = Quaternion()\n self.assertEqual(q[1], 0.0)\n q[1] = 10.0\n self.assertEqual(q[1], 10.0)\n self.assertEqual(q, Quaternion(1.0, 10.0, 0.0, 0.0))\n with self.assertRaises(TypeError):\n q[2] = None\n with self.assertRaises(ValueError):\n q[2] = 's'\n\n def test_rotate(self):\n q = Quaternion(axis=[1, 1, 1], angle=2 * pi / 3)\n q2 = Quaternion(axis=[1, 0, 0], angle=-pi)\n q3 = Quaternion(axis=[1, 0, 0], angle=pi)\n precision = ALMOST_EQUAL_TOLERANCE\n for r in [1, 3.8976, -69.7, -0.000001]:\n # use np.testing.assert_almost_equal() to compare float sequences\n np.testing.assert_almost_equal(q.rotate((r, 0, 0)), (0, r, 0), decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(q.rotate([0, r, 0]), [0, 0, r], decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(q.rotate(np.array([0, 0, r])), np.array([r, 0, 0]),\n decimal=ALMOST_EQUAL_TOLERANCE)\n self.assertEqual(q.rotate(Quaternion(vector=[-r, 0, 0])), Quaternion(vector=[0, -r, 0]))\n np.testing.assert_almost_equal(q.rotate([0, -r, 0]), [0, 0, -r], decimal=ALMOST_EQUAL_TOLERANCE)\n self.assertEqual(q.rotate(Quaternion(vector=[0, 0, -r])), Quaternion(vector=[-r, 0, 0]))\n\n np.testing.assert_almost_equal(q2.rotate((r, 0, 0)), q3.rotate((r, 0, 0)), decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(q2.rotate((0, r, 0)), q3.rotate((0, r, 0)), decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(q2.rotate((0, 0, r)), q3.rotate((0, 0, r)), decimal=ALMOST_EQUAL_TOLERANCE)\n\n def test_conversion_to_matrix(self):\n q = Quaternion.random()\n a, b, c, d = tuple(q.elements)\n R = np.array([\n [a ** 2 + b ** 2 - c ** 2 - d ** 2, 2 * (b * c - a * d), 2 * (a * c + b * d)],\n [2 * (b * c + a * d), a ** 2 - b ** 2 + c ** 2 - d ** 2, 2 * (c * d - a * b)],\n [2 * (b * d - a * c), 2 * (a * b + c * d), a ** 2 - b ** 2 - c ** 2 + d ** 2]])\n t = np.array([[0], [0], [0]])\n T = np.vstack([np.hstack([R, t]), np.array([0, 0, 0, 1])])\n np.testing.assert_almost_equal(R, q.rotation_matrix, decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(T, q.transformation_matrix, decimal=ALMOST_EQUAL_TOLERANCE)\n\n # Test no scaling of rotated vectors\n v1 = np.array([1, 0, 0])\n v2 = np.hstack((np.random.uniform(-10, 10, 3), 1.0))\n v1_ = np.dot(q.rotation_matrix, v1)\n v2_ = np.dot(q.transformation_matrix, v2)\n self.assertAlmostEqual(np.linalg.norm(v1_), 1.0, ALMOST_EQUAL_TOLERANCE)\n self.assertAlmostEqual(np.linalg.norm(v2_), np.linalg.norm(v2), ALMOST_EQUAL_TOLERANCE)\n\n # Test transformation of vectors is equivalent for quaternion & matrix\n np.testing.assert_almost_equal(v1_, q.rotate(v1), decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(v2_[0:3], q.rotate(v2[0:3]), decimal=ALMOST_EQUAL_TOLERANCE)\n\n def test_conversion_to_ypr(self):\n\n def R_x(theta):\n c = cos(theta)\n s = sin(theta)\n return np.array([\n [1, 0, 0],\n [0, c, -s],\n [0, s, c]])\n\n def R_y(theta):\n c = cos(theta)\n s = sin(theta)\n return np.array([\n [c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n\n def R_z(theta):\n c = cos(theta)\n s = sin(theta)\n return np.array([\n [c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n\n p = np.random.randn(3)\n q = Quaternion.random()\n yaw, pitch, roll = q.yaw_pitch_roll\n\n p_q = q.rotate(p)\n R_q = q.rotation_matrix\n\n # build rotation matrix, R = R_z(yaw)*R_y(pitch)*R_x(roll)\n R_ypr = np.dot(R_x(roll), np.dot(R_y(pitch), R_z(yaw)))\n p_ypr = np.dot(R_ypr, p)\n\n np.testing.assert_almost_equal(p_q, p_ypr, decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(R_q, R_ypr, decimal=ALMOST_EQUAL_TOLERANCE)\n\n def test_matrix_io(self):\n v = np.random.uniform(-100, 100, 3)\n\n for i in range(10):\n q0 = Quaternion.random()\n R = q0.rotation_matrix\n q1 = Quaternion(matrix=R)\n np.testing.assert_almost_equal(q0.rotate(v), np.dot(R, v), decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(q0.rotate(v), q1.rotate(v), decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(q1.rotate(v), np.dot(R, v), decimal=ALMOST_EQUAL_TOLERANCE)\n\n self.assertTrue((q0 == q1) or (q0 == -q1)) # q1 and -q1 are equivalent rotations\n\n def validate_axis_angle(self, axis, angle):\n\n def wrap_angle(theta):\n \"\"\" Wrap any angle to lie between -pi and pi\n\n Odd multiples of pi are wrapped to +pi (as opposed to -pi)\n \"\"\"\n result = ((theta + pi) % (2 * pi)) - pi\n if result == -pi: result = pi\n return result\n\n theta = wrap_angle(angle)\n v = axis\n\n q = Quaternion(angle=theta, axis=v)\n\n v_ = q.axis\n theta_ = q.angle\n\n if theta == 0.0: # axis is irrelevant (check defaults to x=y=z)\n np.testing.assert_almost_equal(theta_, 0.0, decimal=ALMOST_EQUAL_TOLERANCE)\n np.testing.assert_almost_equal(v_, np.zeros(3), decimal=ALMOST_EQUAL_TOLERANCE)\n return\n elif abs(theta) == pi: # rotation in either direction is equivalent\n self.assertTrue(\n np.isclose(theta, pi) or np.isclose(theta, -pi)\n and\n np.isclose(v, v_).all() or np.isclose(v, -v_).all()\n )\n else:\n self.assertTrue(\n np.isclose(theta, theta_) and np.isclose(v, v_).all()\n or\n np.isclose(theta, -theta_) and np.isclose(v, -v_).all()\n )\n # Ensure the returned axis is a unit vector\n np.testing.assert_almost_equal(np.linalg.norm(v_), 1.0, decimal=ALMOST_EQUAL_TOLERANCE)\n\n def test_conversion_to_axis_angle(self):\n random_axis = np.random.uniform(-1, 1, 3)\n random_axis /= np.linalg.norm(random_axis)\n\n angles = np.array([-3, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 3]) * pi\n axes = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 1]), random_axis]\n\n for v in axes:\n for theta in angles:\n self.validate_axis_angle(v, theta)\n\n def test_axis_angle_io(self):\n for i in range(20):\n v = np.random.uniform(-1, 1, 3)\n v /= np.linalg.norm(v)\n theta = float(np.random.uniform(-2, 2, 1)) * pi\n self.validate_axis_angle(v, theta)\n\n def test_exp(self):\n from math import exp\n q = Quaternion(axis=[1, 0, 0], angle=pi)\n exp_q = Quaternion.exp(q)\n self.assertEqual(exp_q, exp(0) * Quaternion(scalar=cos(1.0), vector=[sin(1.0), 0, 0]))\n\n def test_log(self):\n from math import log\n q = Quaternion(axis=[1, 0, 0], angle=pi)\n log_q = Quaternion.log(q)\n self.assertEqual(log_q, Quaternion(scalar=0, vector=[pi / 2, 0, 0]))\n\n def test_distance(self):\n q = Quaternion(scalar=0, vector=[1, 0, 0])\n p = Quaternion(scalar=0, vector=[0, 1, 0])\n self.assertEqual(pi / 2, Quaternion.distance(q, p))\n q = Quaternion(angle=pi / 2, axis=[1, 0, 0])\n p = Quaternion(angle=pi / 2, axis=[0, 1, 0])\n self.assertEqual(pi / 3, Quaternion.distance(q, p))\n q = Quaternion(scalar=1, vector=[1, 1, 1])\n p = Quaternion(scalar=-1, vector=[-1, -1, -1])\n p._normalise()\n q._normalise()\n self.assertAlmostEqual(0, Quaternion.distance(q, p), places=8)\n\n def test_absolute_distance(self):\n q = Quaternion(scalar=0, vector=[1, 0, 0])\n p = Quaternion(scalar=0, vector=[0, 1, 0])\n self.assertEqual((q - p).norm, Quaternion.absolute_distance(q, p))\n q = Quaternion(angle=pi / 2, axis=[1, 0, 0])\n p = Quaternion(angle=pi / 2, axis=[0, 1, 0])\n self.assertEqual((q - p).norm, Quaternion.absolute_distance(q, p))\n q = Quaternion(scalar=0, vector=[1, 0, 0])\n p = Quaternion(scalar=-1, vector=[0, -1, 0])\n self.assertEqual((q + p).norm, Quaternion.absolute_distance(q, p))\n q = Quaternion(scalar=1, vector=[1, 1, 1])\n p = Quaternion(scalar=-1, vector=[-1, -1, -1])\n p._normalise()\n q._normalise()\n self.assertAlmostEqual(0, Quaternion.absolute_distance(q, p), places=8)\n\n def test_sym_distance(self):\n q = Quaternion(scalar=0, vector=[1, 0, 0])\n p = Quaternion(scalar=0, vector=[0, 1, 0])\n self.assertEqual(pi / 2, Quaternion.sym_distance(q, p))\n q = Quaternion(angle=pi / 2, axis=[1, 0, 0])\n p = Quaternion(angle=pi / 2, axis=[0, 1, 0])\n self.assertAlmostEqual(pi / 3, Quaternion.sym_distance(q, p), places=6)\n q = Quaternion(scalar=0, vector=[1, 0, 0])\n p = Quaternion(scalar=0, vector=[0, -1, 0])\n self.assertEqual(pi / 2, Quaternion.sym_distance(q, p))\n # TODO: this is numerically unstable, previous EPS of 1e-17 was too low for double precision floats\n # q = Quaternion(scalar=1, vector=[1, 1, 1])\n # p = Quaternion(scalar=-1, vector=[-1, -1, -1])\n # p._normalise()\n # q._normalise()\n # self.assertAlmostEqual(pi, Quaternion.sym_distance(q, p), places=8)\n\n def test_slerp(self):\n q1 = Quaternion(axis=[1, 0, 0], angle=0.0)\n q2 = Quaternion(axis=[1, 0, 0], angle=pi / 2)\n q3 = Quaternion.slerp(q1, q2, 0.5)\n self.assertEqual(q3, Quaternion(axis=[1, 0, 0], angle=pi / 4))\n\n def test_slerp_extensive(self):\n for axis in [[1, 0, 0], [0, 1, 0], [0, 0, 1]]:\n q1 = Quaternion(axis=axis, angle=0.0)\n q2 = Quaternion(axis=axis, angle=pi / 2.0)\n q3 = Quaternion(axis=axis, angle=pi * 3.0 / 2.0)\n for t in np.arange(0.1, 1, 0.1):\n q4 = Quaternion.slerp(q1, q2, t)\n q5 = Quaternion.slerp(q1, q3, t)\n q6 = Quaternion(axis=axis, angle=t * pi / 2)\n q7 = Quaternion(axis=axis, angle=-t * pi / 2)\n assert q4 == q6 or q4 == -q6\n assert q5 == q7 or q5 == -q7\n\n def test_interpolate(self):\n q1 = Quaternion(axis=[1, 0, 0], angle=0.0)\n q2 = Quaternion(axis=[1, 0, 0], angle=2 * pi / 3)\n num_intermediates = 3\n base = pi / 6\n list1 = list(Quaternion.intermediates(q1, q2, num_intermediates, include_endpoints=False))\n list2 = list(Quaternion.intermediates(q1, q2, num_intermediates, include_endpoints=True))\n self.assertEqual(len(list1), num_intermediates)\n self.assertEqual(len(list2), num_intermediates + 2)\n self.assertEqual(list1[0], list2[1])\n self.assertEqual(list1[1], list2[2])\n self.assertEqual(list1[2], list2[3])\n\n self.assertEqual(list2[0], q1)\n self.assertEqual(list2[1], Quaternion(axis=[1, 0, 0], angle=base))\n self.assertEqual(list2[2], Quaternion(axis=[1, 0, 0], angle=2 * base))\n self.assertEqual(list2[3], Quaternion(axis=[1, 0, 0], angle=3 * base))\n self.assertEqual(list2[4], q2)\n\n def test_differentiation(self):\n q = Quaternion.random()\n omega = np.random.uniform(-1, 1, 3) # Random angular velocity\n\n q_dash = 0.5 * q * Quaternion(vector=omega)\n\n self.assertEqual(q_dash, q.derivative(omega))\n\n def test_integration(self):\n rotation_rate = [0, 0, 2 * pi] # one rev per sec around z\n v = [1, 0, 0] # test vector\n for dt in [0, 0.25, 0.5, 0.75, 1, 2, 10, 1e-10, random() * 10]: # time step in seconds\n qt = Quaternion() # no rotation\n qt.integrate(rotation_rate, dt)\n q_truth = Quaternion(axis=[0, 0, 1], angle=dt * 2 * pi)\n a = qt.rotate(v)\n b = q_truth.rotate(v)\n np.testing.assert_almost_equal(a, b, decimal=ALMOST_EQUAL_TOLERANCE)\n self.assertTrue(qt.is_unit())\n # Check integrate() is norm-preserving over many calls\n q = Quaternion()\n for i in range(1000):\n q.integrate([pi, 0, 0], 0.001)\n self.assertTrue(q.is_unit())\n\n\nclass TestQuaternionUtilities(unittest.TestCase):\n def test_copy(self):\n from copy import copy\n q = Quaternion.random()\n q2 = copy(q)\n self.assertEqual(q, q2)\n self.assertFalse(q is q2)\n self.assertTrue(all(q.q == q2.q))\n\n def test_deep_copy(self):\n from copy import deepcopy\n q = Quaternion.random()\n q2 = deepcopy(q)\n self.assertEqual(q, q2)\n self.assertFalse(q is q2)\n self.assertFalse(q.q is q2.q)\n\n\nclass TestQuaternionHashing(unittest.TestCase):\n def test_equal_quaternions(self):\n q1 = Quaternion(np.array([1, 0, 0, 0]))\n q2 = Quaternion(np.array([1, 0, 0, 0]))\n\n self.assertEqual(hash(q1), hash(q2))\n\n def test_unequal_quaternions(self):\n q1 = Quaternion(np.array([1, 0, 0, 0]))\n q2 = Quaternion(np.array([0, 1, 0, 0]))\n\n self.assertNotEqual(hash(q1), hash(q2))\n\n\nclass TestSwingTwist(unittest.TestCase):\n \"\"\"\n tests the swing-twist decomposition\n source: https://github.com/CCP-NC/soprano/blob/master/tests/utils_tests.py\n \"\"\"\n\n def test_swing_twist(self):\n test_n = 10\n\n for t_i in range(test_n):\n # Create two quaternions with random rotations\n theta1, theta2 = np.random.random(2) * 2 * np.pi\n ax1 = np.random.random(3)\n ax2 = np.cross(np.random.random(3), ax1)\n ax1 /= np.linalg.norm(ax1)\n ax2 /= np.linalg.norm(ax2)\n\n q1 = Quaternion(np.array([np.cos(theta1 / 2)] + list(ax1 * np.sin(theta1 / 2))))\n q2 = Quaternion(np.array([np.cos(theta2 / 2)] + list(ax2 * np.sin(theta2 / 2))))\n\n qT = q1 * q2\n\n # Now decompose\n qsw, qtw = qT.swing_twist_decomp(ax2)\n # And check\n q1.q *= np.sign(q1.q[0])\n q2.q *= np.sign(q2.q[0])\n qsw.q *= np.sign(qsw.q[0])\n qtw.q *= np.sign(qtw.q[0])\n\n self.assertTrue(np.allclose(q1.q, qsw.q))\n self.assertTrue(np.allclose(q2.q, qtw.q))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.matrix",
"numpy.dot",
"numpy.random.randn",
"numpy.hstack",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"numpy.sin",
"numpy.testing.assert_almost_equal",
"numpy.copy",
"numpy.zeros",
"numpy.isclose",
"numpy.random.rand",
"numpy.array",
"numpy.random.random",
"numpy.array_equal",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sign",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
prajwal309/HAPILite_Modified | [
"e0527328569ef8bd576114b1619ac5c87ff8c5b2",
"e0527328569ef8bd576114b1619ac5c87ff8c5b2"
] | [
"GenerateDatabase.py",
"lib/CrossSectionFunctions.py"
] | [
"import glob\nimport numpy as np\nfrom lib.CrossSectionFunctions import GetWaveNumbers, BinModel\nimport matplotlib.pyplot as plt\nimport os\nimport itertools\nimport time\nimport multiprocessing as mp\n\n#parse the parameters.ini which contains the information\nData = [f.split(\":\") for f in open(\"CrossSectionParams/Parameters.ini\",'r+')][1:]\nValues = [Item[1].split(\"#\")[0] for Item in Data]\n\n\n#Load the parameters for creating\nTempStart = float(Values[0]) #Step size of the temperature\nTempStop = float(Values[1]) #Step size of the temperature\nTempStep = float(Values[2]) #Step size of the temperature\n\nexpP_Start = float(Values[3]) #The largest log10(pressure) in atm\nexpP_Stop = float(Values[4]) #The smallest log10(pressure) in atm\nexpP_Step = float(Values[5]) #Step size of the pressure\n\n\nBroadener = Values[6].replace(\" \",\"\") #Broadening either self or air at this point\nOmegaWidth = float(Values[7]) #Consider the omegawidth -- how far the lines have to be considered\nLowWavelength = float(Values[8]) #Shortest Wavelength coverage range\nHighWavelength = float(Values[9]) #Longest Wavelength coverage range\nWN_Resolution = float(Values[10]) #Resolution of the Wave Number\nLineShapeProfile = Values[11].replace(\" \",\"\") #Voigt profile by default\nMoleculeList = Values[12].split(\",\") #Get the list of Molecular species\nCores = int(Values[13])\nError = Values[14].replace(\"\\t\",\"\")\n\n\n\nTempRange = np.arange(TempStart,TempStop+TempStep, TempStep) #Temperature in K\nexpP_Range = np.arange(expP_Start, expP_Stop-expP_Step, -expP_Step) #Pressure in log(P) atm\n\n\n#Define the wavenumber range values...\nWaveNumberStart = 1./(HighWavelength*1.e-7) #in per cm\nWaveNumberStop= 1./(LowWavelength*1.e-7) #in per cm\nWaveNumberRange = np.arange(WaveNumberStart, WaveNumberStop, WN_Resolution)\n#Plotting in the ascending order\nWaveLengthRange = 1./WaveNumberRange\nWaveLengthRange = WaveLengthRange[::-1]\n\n\n\n#Now get the assign the resolution values\nResolution = 10000\n\n#Low resolution wavelength\nWavelength_LR, WaveNumber_LR = GetWaveNumbers(LowWavelength, HighWavelength, Resolution)\n\n\nFolder2Save = \"R1SIG\"+str(Resolution)\n\nif not(os.path.exists(Folder2Save)):\n os.system(\"mkdir %s\" %(Folder2Save))\n\nnp.savetxt(Folder2Save+\"/Temperature.txt\", TempRange, delimiter=\",\")\nnp.savetxt(Folder2Save+\"/exp_Pressure.txt\", expP_Range, delimiter=\",\")\nnp.savetxt(Folder2Save+\"/WaveLength.txt\", Wavelength_LR, delimiter=\",\")\nnp.savetxt(Folder2Save+\"/Molecules.txt\", MoleculeList, delimiter=\",\", fmt='%s')\n\nBaseLocation = \"DataMatrix1SIG/\"\nMoleculesFiles = glob.glob(BaseLocation+\"*.npy\")\nNumMolecules = len(MoleculesFiles)\nNumTempValues = len(TempRange)\nNumPValues = len(expP_Range)\nNumWL_Values = len(Wavelength_LR)\n\n#Initiate a database matrix\nDatabaseMatrix = np.ones((NumTempValues, NumPValues, NumMolecules, NumWL_Values), dtype=np.float32)\n\nStartTime = time.time()\nfor MoleculeCount, Molecule in enumerate(MoleculeList):\n #Read the molecule name\n MoleculeLocation = BaseLocation+Molecule+\".npy\"\n TP_Counter = list(itertools.product(range(len(TempRange)),range(len(expP_Range))))\n\n #Using multiprocessing\n SigmaMatrix = np.load(MoleculeLocation,mmap_mode='r')\n\n print(\"The molecule is given by::\", Molecule, \". Now loading the data....\")\n print(\"The location of the molecule is given by::\", MoleculeLocation)\n\n while(len(TP_Counter)>0):\n NUMCORES = min([mp.cpu_count(), len(TP_Counter)])\n CPU_Pool = mp.Pool(NUMCORES)\n Tasks = []\n TempCounterValues = []\n PCounterValues = []\n for i in range(NUMCORES):\n\n Item = TP_Counter[0]\n TempCounter, PCounter = Item\n TempCounterValues.append(TempCounter)\n PCounterValues.append(PCounter)\n TP_Counter.pop(0)\n\n #High resolution cross-section data\n Sigma_HR = SigmaMatrix[TempCounter, PCounter, :]\n Tasks.append(CPU_Pool.apply_async(BinModel, (WaveLengthRange, Sigma_HR, Wavelength_LR)))\n\n CPU_Pool.close()\n CPU_Pool.join()\n\n for Count, task in enumerate(Tasks):\n Result=task.get()\n #Assign the value to the datamatrix\n DatabaseMatrix[TempCounterValues[Count], PCounterValues[Count], MoleculeCount,:] = Result\n\n\n\n#Now save the datamatrix\nprint(\"Time taken::\", time.time() - StartTime)\nnp.save(Folder2Save+\"/DataBase_%d.npy\" %Resolution, DatabaseMatrix)\n",
"import numpy as np\nimport glob\nfrom scipy.interpolate import interp1d\nimport multiprocessing as mp\nfrom bisect import bisect\n\ndef GetWaveNumbers(StartWavelength=300, EndWavelength=30000, Resolution=100000):\n '''\n Returns the wavelengths corresponding to the resolution and in units\n of cm.\n '''\n WaveLengthValues = []\n\n #Converting values to\n StartWavelength = StartWavelength*1.0e-7 #nm to cm\n EndWavelength = EndWavelength*1.0e-7 #nm to cm\n WaveLengthValues = [StartWavelength]\n\n while WaveLengthValues[-1]<EndWavelength:\n WaveLengthValues.append(WaveLengthValues[-1]+WaveLengthValues[-1]/Resolution)\n WaveLengthValues = np.array(WaveLengthValues)\n WaveNumberRange = 1./WaveLengthValues\n\n WaveNumberRange = np.array(sorted(WaveNumberRange))\n return WaveLengthValues, WaveNumberRange\n\ndef BinModel(nu_HR,abs_HR,nu_Grid):\n '''\n This function takes a cross-section at high resolution:\n nu_HR is the wavenumber in increasing order\n abs_HR is the absorption cross-section in an increasing order\n The stepsize in the WaveNumberGrid is not expected to be the equal\n '''\n\n InterpValues = np.zeros(len(nu_Grid))\n Start = 0\n i = 0\n while i<len(nu_Grid):\n StartIndex = bisect(nu_HR, Start)\n StopIndex = bisect(nu_HR, nu_Grid[i])\n InterpValues[i] = np.mean(abs_HR[StartIndex:StopIndex])\n Start=nu_Grid[i]\n i+=1\n NanIndex = np.isnan(InterpValues)\n InterpValues[NanIndex] = 0.0\n return InterpValues\n\n\ndef SymplecticInterpolation(nu_HR,abs_HR,nu_Grid):\n '''\n This function takes a cross-section at high resolution:\n nu_HR is the wavenumber in increasing order\n abs_HR is the absorption cross-section in an increasing order\n The stepsize in the WaveNumberGrid is not expected to be the equal\n '''\n #Assert the Wavenumber is strictly increasing\n assert nu_HR[-1]>nu_HR[0], \"The high resolution nu should also be strictly increasing.\"\n assert nu_Grid[-1]>nu_Grid[0], \"The low resolution wavenumber should also be strictly increasing.\"\n\n InterpolatedValues = np.zeros(len(nu_Grid))\n for i in range(len(nu_Grid)):\n if i+1<len(nu_Grid):\n StepSize= nu_Grid[i+1] - nu_Grid[i]\n SelectIndex = np.abs(nu_HR-nu_Grid[i])<StepSize/2.0\n InterpolatedValues[i] = np.mean(abs_HR[SelectIndex])\n NanIndex = np.isnan(InterpolatedValues)\n InterpolatedValues[NanIndex] = 0.0\n return InterpolatedValues\n"
] | [
[
"numpy.arange",
"numpy.save",
"numpy.ones",
"numpy.savetxt",
"numpy.load"
],
[
"numpy.isnan",
"numpy.array",
"numpy.mean",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anzelpwj/arviz | [
"3aba6c9d1831199631a8e48ecc82ed482bdf00e8",
"3aba6c9d1831199631a8e48ecc82ed482bdf00e8"
] | [
"arviz/plots/backends/bokeh/bokeh_violinplot.py",
"arviz/plots/backends/matplotlib/mpl_khatplot.py"
] | [
"\"\"\"Bokeh Violinplot.\"\"\"\nimport bokeh.plotting as bkp\nfrom bokeh.layouts import gridplot\nfrom bokeh.models.annotations import Title\nimport numpy as np\n\nfrom ....stats import hpd\nfrom ....stats.stats_utils import histogram\nfrom ...kdeplot import _fast_kde\nfrom ...plot_utils import get_bins, make_label, _create_axes_grid\n\n\ndef _plot_violin(\n ax,\n plotters,\n figsize,\n rows,\n cols,\n sharey,\n kwargs_shade,\n shade,\n bw,\n credible_interval,\n linewidth,\n quartiles,\n show,\n):\n if ax is None:\n _, ax = _create_axes_grid(\n len(plotters),\n rows,\n cols,\n sharey=sharey,\n figsize=figsize,\n squeeze=False,\n backend=\"bokeh\",\n )\n\n ax = np.atleast_1d(ax)\n\n for (var_name, selection, x), ax_ in zip(plotters, ax.flatten()):\n val = x.flatten()\n if val[0].dtype.kind == \"i\":\n cat_hist(val, shade, ax_, **kwargs_shade)\n else:\n _violinplot(val, shade, bw, ax_, **kwargs_shade)\n\n per = np.percentile(val, [25, 75, 50])\n hpd_intervals = hpd(val, credible_interval, multimodal=False)\n\n if quartiles:\n ax_.line([0, 0], per[:2], line_width=linewidth * 3, line_color=\"black\")\n ax_.line([0, 0], hpd_intervals, line_width=linewidth, line_color=\"black\")\n ax_.circle(0, per[-1])\n\n _title = Title()\n _title.text = make_label(var_name, selection)\n ax_.title = _title\n ax_.xaxis.major_tick_line_color = None\n ax_.xaxis.minor_tick_line_color = None\n ax_.xaxis.major_label_text_font_size = \"0pt\"\n\n if show:\n grid = gridplot([list(item) for item in ax], toolbar_location=\"above\")\n bkp.show(grid)\n\n return ax\n\n\ndef _violinplot(val, shade, bw, ax, **kwargs_shade):\n \"\"\"Auxiliary function to plot violinplots.\"\"\"\n density, low_b, up_b = _fast_kde(val, bw=bw)\n x = np.linspace(low_b, up_b, len(density))\n\n x = np.concatenate([x, x[::-1]])\n density = np.concatenate([-density, density[::-1]])\n\n ax.patch(density, x, fill_alpha=shade, line_width=0, **kwargs_shade)\n\n\ndef cat_hist(val, shade, ax, **kwargs_shade):\n \"\"\"Auxiliary function to plot discrete-violinplots.\"\"\"\n bins = get_bins(val)\n _, binned_d, _ = histogram(val, bins=bins)\n\n bin_edges = np.linspace(np.min(val), np.max(val), len(bins))\n centers = 0.5 * (bin_edges + np.roll(bin_edges, 1))[:-1]\n heights = np.diff(bin_edges)\n\n lefts = -0.5 * binned_d\n\n ax.hbar(\n y=centers,\n left=lefts,\n right=-lefts,\n height=heights,\n fill_alpha=shade,\n line_alpha=shade,\n line_color=None,\n **kwargs_shade\n )\n",
"\"\"\"Matplotlib khatplot.\"\"\"\nimport warnings\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ...plot_utils import set_xticklabels\nfrom ....stats.stats_utils import histogram\n\n\ndef _plot_khat(\n hover_label,\n hover_format,\n ax,\n figsize,\n xdata,\n khats,\n rgba_c,\n kwargs,\n annotate,\n coord_labels,\n ax_labelsize,\n xt_labelsize,\n show_bins,\n linewidth,\n hlines_kwargs,\n xlabels,\n legend,\n color_mapping,\n cmap,\n color,\n n_data_points,\n bin_format,\n):\n if hover_label and mpl.get_backend() not in mpl.rcsetup.interactive_bk:\n hover_label = False\n warnings.warn(\n \"hover labels are only available with interactive backends. To switch to an \"\n \"interactive backend from ipython or jupyter, use `%matplotlib` there should be \"\n \"no need to restart the kernel. For other cases, see \"\n \"https://matplotlib.org/3.1.0/tutorials/introductory/usage.html#backends\",\n UserWarning,\n )\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize, constrained_layout=not xlabels)\n else:\n fig = ax.get_figure()\n\n sc_plot = ax.scatter(xdata, khats, c=rgba_c, **kwargs)\n\n if annotate:\n idxs = xdata[khats > 1]\n for idx in idxs:\n ax.text(\n idx,\n khats[idx],\n coord_labels[idx],\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\",\n fontsize=0.8 * xt_labelsize,\n )\n\n xmin, xmax = ax.get_xlim()\n if show_bins:\n xmax += n_data_points / 12\n ylims1 = ax.get_ylim()\n ax.hlines([0, 0.5, 0.7, 1], xmin=xmin, xmax=xmax, linewidth=linewidth, **hlines_kwargs)\n ylims2 = ax.get_ylim()\n ymin = min(ylims1[0], ylims2[0])\n ymax = min(ylims1[1], ylims2[1])\n if show_bins:\n bin_edges = np.array([ymin, 0.5, 0.7, 1, ymax])\n bin_edges = bin_edges[(bin_edges >= ymin) & (bin_edges <= ymax)]\n _, hist, _ = histogram(khats, bin_edges)\n for idx, count in enumerate(hist):\n ax.text(\n (n_data_points - 1 + xmax) / 2,\n np.mean(bin_edges[idx : idx + 2]),\n bin_format.format(count, count / n_data_points * 100),\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n )\n ax.set_ylim(ymin, ymax)\n ax.set_xlim(xmin, xmax)\n\n ax.set_xlabel(\"Data Point\", fontsize=ax_labelsize)\n ax.set_ylabel(r\"Shape parameter k\", fontsize=ax_labelsize)\n ax.tick_params(labelsize=xt_labelsize)\n if xlabels:\n set_xticklabels(ax, coord_labels)\n fig.autofmt_xdate()\n fig.tight_layout()\n if legend:\n ncols = len(color_mapping) // 6 + 1\n for label, float_color in color_mapping.items():\n ax.scatter([], [], c=[cmap(float_color)], label=label, **kwargs)\n ax.legend(ncol=ncols, title=color)\n\n if hover_label and mpl.get_backend() in mpl.rcsetup.interactive_bk:\n _make_hover_annotation(fig, ax, sc_plot, coord_labels, rgba_c, hover_format)\n\n return ax\n\n\ndef _make_hover_annotation(fig, ax, sc_plot, coord_labels, rgba_c, hover_format):\n \"\"\"Show data point label when hovering over it with mouse.\"\"\"\n annot = ax.annotate(\n \"\",\n xy=(0, 0),\n xytext=(0, 0),\n textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\", alpha=0.4),\n arrowprops=dict(arrowstyle=\"->\"),\n )\n annot.set_visible(False)\n xmid = np.mean(ax.get_xlim())\n ymid = np.mean(ax.get_ylim())\n offset = 10\n\n def update_annot(ind):\n\n idx = ind[\"ind\"][0]\n pos = sc_plot.get_offsets()[idx]\n annot_text = hover_format.format(idx, coord_labels[idx])\n annot.xy = pos\n annot.set_position(\n (-offset if pos[0] > xmid else offset, -offset if pos[1] > ymid else offset)\n )\n annot.set_text(annot_text)\n annot.get_bbox_patch().set_facecolor(rgba_c[idx])\n annot.set_ha(\"right\" if pos[0] > xmid else \"left\")\n annot.set_va(\"top\" if pos[1] > ymid else \"bottom\")\n\n def hover(event):\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = sc_plot.contains(event)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n"
] | [
[
"numpy.min",
"numpy.percentile",
"numpy.atleast_1d",
"numpy.concatenate",
"numpy.max",
"numpy.diff",
"numpy.roll"
],
[
"matplotlib.get_backend",
"numpy.array",
"numpy.mean",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jopasserat/he-transformer | [
"d2865be507d2e20568ca5289513925c813aa59e6"
] | [
"examples/pyclient_mnist.py"
] | [
"import time\nimport argparse\nimport numpy as np\nimport sys\nimport os\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport he_seal_client\n\nFLAGS = None\n\n\ndef test_mnist_cnn(FLAGS):\n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n batch_size = FLAGS.batch_size\n x_test_batch = mnist.test.images[:batch_size]\n y_test_batch = mnist.test.labels[:batch_size]\n\n data = x_test_batch.flatten('F')\n print('Client batch size from FLAG: ', batch_size)\n\n complex_scale_factor = 1\n if ('NGRAPH_COMPLEX_PACK' in os.environ):\n complex_scale_factor = 2\n\n print('complex_scale_factor', complex_scale_factor)\n\n # TODO: support even batch sizes\n assert (batch_size % complex_scale_factor == 0)\n\n hostname = 'localhost'\n port = 34000\n\n new_batch_size = batch_size // complex_scale_factor\n print('new_batch_size', new_batch_size)\n\n client = he_seal_client.HESealClient(hostname, port, new_batch_size, data)\n\n print('Sleeping until client is done')\n while not client.is_done():\n time.sleep(1)\n\n results = client.get_results()\n results = np.round(results, 2)\n\n y_pred_reshape = np.array(results).reshape(10, batch_size)\n with np.printoptions(precision=3, suppress=True):\n print(y_pred_reshape.T)\n\n y_pred = y_pred_reshape.argmax(axis=0)\n print('y_pred', y_pred)\n y_true = y_test_batch.argmax(axis=1)\n\n correct = np.sum(np.equal(y_pred, y_true))\n acc = correct / float(batch_size)\n print('pred size', len(y_pred))\n print('correct', correct)\n print('Accuracy (batch size', batch_size, ') =', acc * 100., '%')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data_dir',\n type=str,\n default='/tmp/tensorflow/mnist/input_data',\n help='Directory where input data is stored')\n parser.add_argument('--batch_size', type=int, default=1, help='Batch size')\n\n FLAGS, unparsed = parser.parse_known_args()\n\n test_mnist_cnn(FLAGS)\n"
] | [
[
"numpy.printoptions",
"numpy.round",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"numpy.equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
ajweiss/tensorflow | [
"2f4d4da52f0c488417d7e917edaf1b7569b5e408",
"2f4d4da52f0c488417d7e917edaf1b7569b5e408"
] | [
"tensorflow/python/autograph/pyct/ast_util.py",
"tensorflow/contrib/ignite/python/tests/ignite_dataset_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"AST manipulation utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ast\n\nimport gast\n\nfrom tensorflow.python.autograph.pyct import anno\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.util import tf_inspect\n\n\nclass CleanCopier(object):\n \"\"\"NodeTransformer-like visitor that copies an AST.\"\"\"\n\n def __init__(self, preserve_annos):\n super(CleanCopier, self).__init__()\n self.preserve_annos = preserve_annos\n\n def copy(self, node):\n \"\"\"Returns a deep copy of node (excluding some fields, see copy_clean).\"\"\"\n\n if isinstance(node, list):\n return [self.copy(n) for n in node]\n elif isinstance(node, tuple):\n return tuple(self.copy(n) for n in node)\n elif not isinstance(node, (gast.AST, ast.AST)):\n # Assuming everything that's not an AST, list or tuple is a value type\n # and may simply be assigned.\n return node\n\n assert isinstance(node, (gast.AST, ast.AST))\n\n new_fields = {}\n for f in node._fields:\n if not f.startswith('__') and hasattr(node, f):\n new_fields[f] = self.copy(getattr(node, f))\n new_node = type(node)(**new_fields)\n\n if self.preserve_annos:\n for k in self.preserve_annos:\n anno.copyanno(node, new_node, k)\n return new_node\n\n\ndef copy_clean(node, preserve_annos=None):\n \"\"\"Creates a deep copy of an AST.\n\n The copy will not include fields that are prefixed by '__', with the\n exception of user-specified annotations.\n\n Args:\n node: ast.AST\n preserve_annos: Optional[Set[Hashable]], annotation keys to include in the\n copy\n Returns:\n ast.AST\n \"\"\"\n return CleanCopier(preserve_annos).copy(node)\n\n\nclass SymbolRenamer(gast.NodeTransformer):\n \"\"\"Transformer that can rename symbols to a simple names.\"\"\"\n\n def __init__(self, name_map):\n self.name_map = name_map\n\n def _process(self, node):\n qn = anno.getanno(node, anno.Basic.QN)\n if qn in self.name_map:\n new_node = gast.Name(str(self.name_map[qn]), node.ctx, None)\n # All annotations get carried over.\n for k in anno.keys(node):\n anno.copyanno(node, new_node, k)\n return new_node\n return self.generic_visit(node)\n\n def visit_Name(self, node):\n return self._process(node)\n\n def visit_Attribute(self, node):\n if anno.hasanno(node, anno.Basic.QN):\n return self._process(node)\n # Attributes of dynamic objects will not have a QN.\n return self.generic_visit(node)\n\n\ndef rename_symbols(node, name_map):\n \"\"\"Renames symbols in an AST. Requires qual_names annotations.\"\"\"\n renamer = SymbolRenamer(name_map)\n if isinstance(node, list):\n return [renamer.visit(n) for n in node]\n elif isinstance(node, tuple):\n return tuple(renamer.visit(n) for n in node)\n return renamer.visit(node)\n\n\ndef keywords_to_dict(keywords):\n \"\"\"Converts a list of ast.keyword objects to a dict.\"\"\"\n keys = []\n values = []\n for kw in keywords:\n keys.append(gast.Str(kw.arg))\n values.append(kw.value)\n return gast.Dict(keys=keys, values=values)\n\n\nclass PatternMatcher(gast.NodeVisitor):\n \"\"\"Matches a node against a pattern represented by a node.\"\"\"\n\n def __init__(self, pattern):\n self.pattern = pattern\n self.pattern_stack = []\n self.matches = True\n\n def compare_and_visit(self, node, pattern):\n self.pattern_stack.append(self.pattern)\n self.pattern = pattern\n self.generic_visit(node)\n self.pattern = self.pattern_stack.pop()\n\n def no_match(self):\n self.matches = False\n return False\n\n def is_wildcard(self, p):\n if isinstance(p, (list, tuple)) and len(p) == 1:\n p, = p\n if isinstance(p, gast.Name) and p.id == '_':\n return True\n if p == '_':\n return True\n return False\n\n def generic_visit(self, node):\n if not self.matches:\n return\n\n pattern = self.pattern\n for f in node._fields:\n if f.startswith('__'):\n continue\n\n if not hasattr(node, f):\n if hasattr(pattern, f) and getattr(pattern, f):\n return self.no_match()\n else:\n continue\n if not hasattr(pattern, f):\n return self.no_match()\n\n v = getattr(node, f)\n p = getattr(pattern, f)\n\n if self.is_wildcard(p):\n continue\n if isinstance(v, (list, tuple)):\n if not isinstance(p, (list, tuple)) or len(v) != len(p):\n return self.no_match()\n for v_item, p_item in zip(v, p):\n self.compare_and_visit(v_item, p_item)\n elif isinstance(v, (gast.AST, ast.AST)):\n if not isinstance(v, type(p)) and not isinstance(p, type(v)):\n return self.no_match()\n self.compare_and_visit(v, p)\n else:\n # Assume everything else is a value type.\n if v != p:\n return self.no_match()\n\n\ndef matches(node, pattern):\n \"\"\"Basic pattern matcher for AST.\n\n The pattern may contain wildcards represented by the symbol '_'. A node\n matches a pattern if for every node in the tree, either there is a node of\n the same type in pattern, or a Name node with id='_'.\n\n Args:\n node: ast.AST\n pattern: ast.AST\n Returns:\n bool\n \"\"\"\n if isinstance(pattern, str):\n pattern, = parser.parse_str(pattern).body\n\n matcher = PatternMatcher(pattern)\n matcher.visit(node)\n return matcher.matches\n\n\n# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.\ndef apply_to_single_assignments(targets, values, apply_fn):\n \"\"\"Applies a function to each individual assignment.\n\n This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.\n It tries to break down the unpacking if possible. In effect, it has the same\n effect as passing the assigned values in SSA form to apply_fn.\n\n Examples:\n\n The following will result in apply_fn(a, c), apply_fn(b, d):\n\n a, b = c, d\n\n The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):\n\n a, b = c\n\n The following will result in apply_fn(a, (b, c)):\n\n a = b, c\n\n It uses the visitor pattern to allow subclasses to process single\n assignments individually.\n\n Args:\n targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be\n used with the targets field of an ast.Assign node\n values: ast.AST\n apply_fn: Callable[[ast.AST, ast.AST], None], called with the\n respective nodes of each single assignment\n \"\"\"\n if not isinstance(targets, (list, tuple)):\n targets = (targets,)\n for target in targets:\n if isinstance(target, (gast.Tuple, gast.List)):\n for i in range(len(target.elts)):\n target_el = target.elts[i]\n if isinstance(values, (gast.Tuple, gast.List)):\n value_el = values.elts[i]\n else:\n idx = parser.parse_expression(str(i))\n value_el = gast.Subscript(values, gast.Index(idx), ctx=gast.Load())\n apply_to_single_assignments(target_el, value_el, apply_fn)\n else:\n apply_fn(target, values)\n\n\ndef parallel_walk(node, other):\n \"\"\"Walks two ASTs in parallel.\n\n The two trees must have identical structure.\n\n Args:\n node: Union[ast.AST, Iterable[ast.AST]]\n other: Union[ast.AST, Iterable[ast.AST]]\n Yields:\n Tuple[ast.AST, ast.AST]\n Raises:\n ValueError: if the two trees don't have identical structure.\n \"\"\"\n if isinstance(node, (list, tuple)):\n node_stack = list(node)\n else:\n node_stack = [node]\n\n if isinstance(other, (list, tuple)):\n other_stack = list(other)\n else:\n other_stack = [other]\n\n while node_stack and other_stack:\n assert len(node_stack) == len(other_stack)\n n = node_stack.pop()\n o = other_stack.pop()\n\n if (not isinstance(n, (ast.AST, gast.AST)) or\n not isinstance(o, (ast.AST, gast.AST)) or\n n.__class__.__name__ != o.__class__.__name__):\n raise ValueError('inconsistent nodes: {} and {}'.format(n, o))\n\n yield n, o\n\n for f in n._fields:\n n_child = getattr(n, f, None)\n o_child = getattr(o, f, None)\n if f.startswith('__') or n_child is None or o_child is None:\n continue\n\n if isinstance(n_child, (list, tuple)):\n if (not isinstance(o_child, (list, tuple)) or\n len(n_child) != len(o_child)):\n raise ValueError(\n 'inconsistent values for field {}: {} and {}'.format(\n f, n_child, o_child))\n node_stack.extend(n_child)\n other_stack.extend(o_child)\n\n elif isinstance(n_child, (gast.AST, ast.AST)):\n node_stack.append(n_child)\n other_stack.append(o_child)\n\n elif n_child != o_child:\n raise ValueError(\n 'inconsistent values for field {}: {} and {}'.format(\n f, n_child, o_child))\n\n\nclass FunctionDefMatcher(gast.NodeVisitor):\n \"\"\"Finds nodes that match a given function's signature.\"\"\"\n\n def __init__(self, fn):\n self.fn = fn\n self.matching_nodes = []\n\n def _arg_name(self, node):\n if node is None:\n return None\n if isinstance(node, gast.Name):\n return node.id\n assert isinstance(node, str)\n return node\n\n def _argspec_matches(self, node):\n arg_spec = tf_inspect.getfullargspec(self.fn)\n\n node_args = tuple(self._arg_name(arg) for arg in node.args.args)\n if node_args != tuple(arg_spec.args):\n return False\n\n if arg_spec.varargs != self._arg_name(node.args.vararg):\n return False\n\n if arg_spec.varkw != self._arg_name(node.args.kwarg):\n return False\n\n node_kwonlyargs = tuple(self._arg_name(arg) for arg in node.args.kwonlyargs)\n if node_kwonlyargs != tuple(arg_spec.kwonlyargs):\n return False\n\n return True\n\n def _argspec_compatible(self, node):\n arg_spec = tf_inspect.getfullargspec(self.fn)\n\n node_args = tuple(self._arg_name(arg) for arg in node.args.args)\n if len(node_args) != len(arg_spec.args) and node.args.vararg is None:\n return False\n\n if arg_spec.varargs is not None and node.args.vararg is None:\n return False\n\n if arg_spec.varkw is not None and node.args.kwarg is None:\n return False\n\n node_kwonlyargs = tuple(self._arg_name(arg) for arg in node.args.kwonlyargs)\n if (len(node_kwonlyargs) != len(arg_spec.kwonlyargs) and\n node.args.kwarg is None):\n return False\n\n return True\n\n def visit_Lambda(self, node):\n self.generic_visit(node)\n\n if self.fn.__name__ != '<lambda>':\n return\n if not self._argspec_matches(node):\n return\n\n self.matching_nodes.append(node)\n\n def visit_FunctionDef(self, node):\n self.generic_visit(node)\n\n if self.fn.__name__ != node.name:\n return\n\n # Decorators have the ability to modify a function's signature. They usually\n # claim that the result is indistinguishable from the original function,\n # but it's very difficult to fool this test. As a consequence, we relax the\n # verification and just check that the arguments are compatible.\n if node.decorator_list:\n if not self._argspec_compatible(node):\n return\n else:\n if not self._argspec_matches(node):\n return\n\n self.matching_nodes.append(node)\n\n\ndef find_matching_definitions(node, f):\n matcher = FunctionDefMatcher(f)\n matcher.visit(node)\n return tuple(matcher.matching_nodes)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n# ==============================================================================\n\"\"\"Tests for IgniteDataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.contrib.ignite import IgniteDataset\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.platform import test\n\n\nclass IgniteDatasetTest(test.TestCase):\n \"\"\"The Apache Ignite servers have to setup before the test and tear down\n\n after the test manually. The docker engine has to be installed.\n\n To setup Apache Ignite servers:\n $ bash start_ignite.sh\n\n To tear down Apache Ignite servers:\n $ bash stop_ignite.sh\n \"\"\"\n\n def test_ignite_dataset_with_plain_client(self):\n \"\"\"Test Ignite Dataset with plain client.\n\n \"\"\"\n self._clear_env()\n ds = IgniteDataset(cache_name=\"SQL_PUBLIC_TEST_CACHE\", port=42300)\n self._check_dataset(ds)\n\n def _clear_env(self):\n \"\"\"Clears environment variables used by Ignite Dataset.\n\n \"\"\"\n if \"IGNITE_DATASET_USERNAME\" in os.environ:\n del os.environ[\"IGNITE_DATASET_USERNAME\"]\n if \"IGNITE_DATASET_PASSWORD\" in os.environ:\n del os.environ[\"IGNITE_DATASET_PASSWORD\"]\n if \"IGNITE_DATASET_CERTFILE\" in os.environ:\n del os.environ[\"IGNITE_DATASET_CERTFILE\"]\n if \"IGNITE_DATASET_CERT_PASSWORD\" in os.environ:\n del os.environ[\"IGNITE_DATASET_CERT_PASSWORD\"]\n\n def _check_dataset(self, dataset):\n \"\"\"Checks that dataset provides correct data.\"\"\"\n self.assertEqual(dtypes.int64, dataset.output_types[\"key\"])\n self.assertEqual(dtypes.string, dataset.output_types[\"val\"][\"NAME\"])\n self.assertEqual(dtypes.int64, dataset.output_types[\"val\"][\"VAL\"])\n\n it = dataset_ops.make_one_shot_iterator(dataset)\n ne = it.get_next()\n\n with session.Session() as sess:\n rows = [sess.run(ne), sess.run(ne), sess.run(ne)]\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(ne)\n\n self.assertEqual({\"key\": 1, \"val\": {\"NAME\": b\"TEST1\", \"VAL\": 42}}, rows[0])\n self.assertEqual({\"key\": 2, \"val\": {\"NAME\": b\"TEST2\", \"VAL\": 43}}, rows[1])\n self.assertEqual({\"key\": 3, \"val\": {\"NAME\": b\"TEST3\", \"VAL\": 44}}, rows[2])\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.autograph.pyct.anno.keys",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.autograph.pyct.anno.hasanno",
"tensorflow.python.autograph.pyct.anno.copyanno",
"tensorflow.python.autograph.pyct.anno.getanno",
"tensorflow.python.autograph.pyct.parser.parse_str"
],
[
"tensorflow.python.client.session.Session",
"tensorflow.python.platform.test.main",
"tensorflow.contrib.ignite.IgniteDataset",
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13"
]
}
] |
ivis-kuwata/albert | [
"bd336ba2d324bb35b9d09599da3d635895bac379"
] | [
"export_to_tfhub.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Exports a minimal TF-Hub module for ALBERT models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nfrom absl import app\nfrom absl import flags\nfrom albert import modeling\nimport tensorflow.compat.v1 as tf\nimport tensorflow_hub as hub\n\nflags.DEFINE_string(\n \"albert_directory\", None,\n \"The config json file corresponding to the pre-trained ALBERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\n \"checkpoint_name\", \"model.ckpt-best\",\n \"Name of the checkpoint under albert_directory to be exported.\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_bool(\n \"use_einsum\", True,\n \"Whether to use tf.einsum or tf.reshape+tf.matmul for dense layers. Must \"\n \"be set to False for TFLite compatibility.\")\n\nflags.DEFINE_string(\"export_path\", None, \"Path to the output TF-Hub module.\")\n\nFLAGS = flags.FLAGS\n\n\ndef gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor\n\n\ndef get_mlm_logits(model, albert_config, mlm_positions):\n \"\"\"From run_pretraining.py.\"\"\"\n input_tensor = gather_indexes(model.get_sequence_output(), mlm_positions)\n with tf.variable_scope(\"cls/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=albert_config.embedding_size,\n activation=modeling.get_activation(albert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n albert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[albert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(\n input_tensor, model.get_embedding_table(), transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n return logits\n\n\ndef get_sop_log_probs(model, albert_config):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n input_tensor = model.get_pooled_output()\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, albert_config.hidden_size],\n initializer=modeling.create_initializer(\n albert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n return log_probs\n\n\ndef module_fn(is_training):\n \"\"\"Module function.\"\"\"\n input_ids = tf.placeholder(tf.int32, [None, None], \"input_ids\")\n input_mask = tf.placeholder(tf.int32, [None, None], \"input_mask\")\n segment_ids = tf.placeholder(tf.int32, [None, None], \"segment_ids\")\n mlm_positions = tf.placeholder(tf.int32, [None, None], \"mlm_positions\")\n\n albert_config_path = os.path.join(\n FLAGS.albert_directory, \"albert_config.json\")\n albert_config = modeling.AlbertConfig.from_json_file(albert_config_path)\n model = modeling.AlbertModel(\n config=albert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=False,\n use_einsum=FLAGS.use_einsum)\n\n mlm_logits = get_mlm_logits(model, albert_config, mlm_positions)\n sop_log_probs = get_sop_log_probs(model, albert_config)\n\n vocab_model_path = os.path.join(FLAGS.albert_directory, \"30k-clean.model\")\n vocab_file_path = os.path.join(FLAGS.albert_directory, \"30k-clean.vocab\")\n\n config_file = tf.constant(\n value=albert_config_path, dtype=tf.string, name=\"config_file\")\n vocab_model = tf.constant(\n value=vocab_model_path, dtype=tf.string, name=\"vocab_model\")\n # This is only for visualization purpose.\n vocab_file = tf.constant(\n value=vocab_file_path, dtype=tf.string, name=\"vocab_file\")\n\n # By adding `config_file, vocab_model and vocab_file`\n # to the ASSET_FILEPATHS collection, TF-Hub will\n # rewrite this tensor so that this asset is portable.\n tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, config_file)\n tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, vocab_model)\n tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, vocab_file)\n\n hub.add_signature(\n name=\"tokens\",\n inputs=dict(\n input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids),\n outputs=dict(\n sequence_output=model.get_sequence_output(),\n pooled_output=model.get_pooled_output()))\n\n hub.add_signature(\n name=\"sop\",\n inputs=dict(\n input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids),\n outputs=dict(\n sequence_output=model.get_sequence_output(),\n pooled_output=model.get_pooled_output(),\n sop_log_probs=sop_log_probs))\n\n hub.add_signature(\n name=\"mlm\",\n inputs=dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n mlm_positions=mlm_positions),\n outputs=dict(\n sequence_output=model.get_sequence_output(),\n pooled_output=model.get_pooled_output(),\n mlm_logits=mlm_logits))\n\n hub.add_signature(\n name=\"tokenization_info\",\n inputs={},\n outputs=dict(\n vocab_file=vocab_model,\n do_lower_case=tf.constant(FLAGS.do_lower_case)))\n\n\ndef main(_):\n tags_and_args = []\n for is_training in (True, False):\n tags = set()\n if is_training:\n tags.add(\"train\")\n tags_and_args.append((tags, dict(is_training=is_training)))\n spec = hub.create_module_spec(module_fn, tags_and_args=tags_and_args)\n checkpoint_path = os.path.join(FLAGS.albert_directory, FLAGS.checkpoint_name)\n tf.logging.info(\"Using checkpoint {}\".format(checkpoint_path))\n spec.export(FLAGS.export_path, checkpoint_path=checkpoint_path)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"albert_directory\")\n flags.mark_flag_as_required(\"export_path\")\n app.run(main)\n"
] | [
[
"tensorflow.compat.v1.nn.log_softmax",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.add_to_collection",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.range",
"tensorflow.compat.v1.nn.bias_add",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
acrucetta/Chicago_COVI_WebApp | [
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"d73a6b7f68d7bab25d134d3f85c6b63a86c206c5",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913",
"a37c9f492a20dcd625f8647067394617988de913"
] | [
".venv/lib/python3.8/site-packages/pandas/core/reshape/tile.py",
".venv/lib/python3.8/site-packages/pandas/tests/extension/test_categorical.py",
".venv/lib/python3.8/site-packages/pandas/tests/indexes/multi/test_formats.py",
".venv/lib/python3.8/site-packages/pandas/plotting/_matplotlib/style.py",
"env/lib/python3.8/site-packages/pandas/tests/series/methods/test_quantile.py",
"env/lib/python3.8/site-packages/numpy/f2py/tests/test_mixed.py",
".venv/lib/python3.8/site-packages/pandas/tests/indexing/interval/test_interval_new.py",
"env/lib/python3.8/site-packages/numpy/lib/histograms.py",
".venv/lib/python3.8/site-packages/pandas/core/indexes/period.py",
"env/lib/python3.8/site-packages/pandas/tests/series/test_period.py",
"env/lib/python3.8/site-packages/pandas/tests/series/conftest.py",
".venv/lib/python3.8/site-packages/pandas/tests/util/test_hashing.py",
".venv/lib/python3.8/site-packages/pandas/io/json/_json.py",
"env/lib/python3.8/site-packages/numpy/distutils/log.py",
"env/lib/python3.8/site-packages/pandas/tests/indexes/datetimes/test_constructors.py",
"env/lib/python3.8/site-packages/numpy/core/tests/test_numeric.py",
".venv/lib/python3.8/site-packages/pandas/tests/reshape/merge/test_merge_index_as_string.py",
"env/lib/python3.8/site-packages/pandas/tests/frame/methods/test_round.py",
"env/lib/python3.8/site-packages/numpy/matrixlib/tests/test_interaction.py",
".venv/lib/python3.8/site-packages/pandas/core/window/indexers.py",
"env/lib/python3.8/site-packages/pandas/tests/test_register_accessor.py",
".venv/lib/python3.8/site-packages/pandas/tests/computation/test_eval.py",
".venv/lib/python3.8/site-packages/pandas/tests/io/parser/test_textreader.py",
"env/lib/python3.8/site-packages/pandas/tests/groupby/test_categorical.py",
".venv/lib/python3.8/site-packages/numpy/distutils/fcompiler/__init__.py",
"env/lib/python3.8/site-packages/pandas/tests/plotting/test_frame.py",
"env/lib/python3.8/site-packages/pandas/tests/indexes/datetimes/test_shift.py",
"env/lib/python3.8/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py",
".venv/lib/python3.8/site-packages/pandas/tests/io/excel/test_xlrd.py",
"env/lib/python3.8/site-packages/pandas/tests/series/methods/test_count.py",
"env/lib/python3.8/site-packages/pandas/tests/scalar/timestamp/test_timestamp.py",
"env/lib/python3.8/site-packages/numpy/random/__init__.py",
"env/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py",
".venv/lib/python3.8/site-packages/pandas/io/formats/printing.py"
] | [
"\"\"\"\nQuantilization functions and related stuff\n\"\"\"\nimport numpy as np\n\nfrom pandas._libs import Timedelta, Timestamp\nfrom pandas._libs.lib import infer_dtype\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n ensure_int64,\n is_bool_dtype,\n is_categorical_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_extension_array_dtype,\n is_integer,\n is_integer_dtype,\n is_list_like,\n is_scalar,\n is_timedelta64_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas import Categorical, Index, IntervalIndex, to_datetime, to_timedelta\nimport pandas.core.algorithms as algos\nimport pandas.core.nanops as nanops\n\n\ndef cut(\n x,\n bins,\n right: bool = True,\n labels=None,\n retbins: bool = False,\n precision: int = 3,\n include_lowest: bool = False,\n duplicates: str = \"raise\",\n ordered: bool = True,\n):\n \"\"\"\n Bin values into discrete intervals.\n\n Use `cut` when you need to segment and sort data values into bins. This\n function is also useful for going from a continuous variable to a\n categorical variable. For example, `cut` could convert ages to groups of\n age ranges. Supports binning into an equal number of bins, or a\n pre-specified array of bins.\n\n Parameters\n ----------\n x : array-like\n The input array to be binned. Must be 1-dimensional.\n bins : int, sequence of scalars, or IntervalIndex\n The criteria to bin by.\n\n * int : Defines the number of equal-width bins in the range of `x`. The\n range of `x` is extended by .1% on each side to include the minimum\n and maximum values of `x`.\n * sequence of scalars : Defines the bin edges allowing for non-uniform\n width. No extension of the range of `x` is done.\n * IntervalIndex : Defines the exact bins to be used. Note that\n IntervalIndex for `bins` must be non-overlapping.\n\n right : bool, default True\n Indicates whether `bins` includes the rightmost edge or not. If\n ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``\n indicate (1,2], (2,3], (3,4]. This argument is ignored when\n `bins` is an IntervalIndex.\n labels : array or False, default None\n Specifies the labels for the returned bins. Must be the same length as\n the resulting bins. If False, returns only integer indicators of the\n bins. This affects the type of the output container (see below).\n This argument is ignored when `bins` is an IntervalIndex. If True,\n raises an error. When `ordered=False`, labels must be provided.\n retbins : bool, default False\n Whether to return the bins or not. Useful when bins is provided\n as a scalar.\n precision : int, default 3\n The precision at which to store and display the bins labels.\n include_lowest : bool, default False\n Whether the first interval should be left-inclusive or not.\n duplicates : {default 'raise', 'drop'}, optional\n If bin edges are not unique, raise ValueError or drop non-uniques.\n\n .. versionadded:: 0.23.0\n ordered : bool, default True\n Whether the labels are ordered or not. Applies to returned types\n Categorical and Series (with Categorical dtype). If True,\n the resulting categorical will be ordered. If False, the resulting\n categorical will be unordered (labels must be provided).\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n out : Categorical, Series, or ndarray\n An array-like object representing the respective bin for each value\n of `x`. The type depends on the value of `labels`.\n\n * True (default) : returns a Series for Series `x` or a\n Categorical for all other inputs. The values stored within\n are Interval dtype.\n\n * sequence of scalars : returns a Series for Series `x` or a\n Categorical for all other inputs. The values stored within\n are whatever the type in the sequence is.\n\n * False : returns an ndarray of integers.\n\n bins : numpy.ndarray or IntervalIndex.\n The computed or specified bins. Only returned when `retbins=True`.\n For scalar or sequence `bins`, this is an ndarray with the computed\n bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For\n an IntervalIndex `bins`, this is equal to `bins`.\n\n See Also\n --------\n qcut : Discretize variable into equal-sized buckets based on rank\n or based on sample quantiles.\n Categorical : Array type for storing data that come from a\n fixed set of values.\n Series : One-dimensional array with axis labels (including time series).\n IntervalIndex : Immutable Index implementing an ordered, sliceable set.\n\n Notes\n -----\n Any NA values will be NA in the result. Out of bounds values will be NA in\n the resulting Series or Categorical object.\n\n Examples\n --------\n Discretize into three equal-sized bins.\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)\n ... # doctest: +ELLIPSIS\n [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...\n Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)\n ... # doctest: +ELLIPSIS\n ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...\n Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...\n array([0.994, 3. , 5. , 7. ]))\n\n Discovers the same bins, but assign them specific labels. Notice that\n the returned Categorical's categories are `labels` and is ordered.\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),\n ... 3, labels=[\"bad\", \"medium\", \"good\"])\n ['bad', 'good', 'medium', 'medium', 'good', 'bad']\n Categories (3, object): ['bad' < 'medium' < 'good']\n\n ``ordered=False`` will result in unordered categories when labels are passed.\n This parameter can be used to allow non-unique labels:\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3,\n ... labels=[\"B\", \"A\", \"B\"], ordered=False)\n ['B', 'B', 'A', 'A', 'B', 'B']\n Categories (2, object): ['A', 'B']\n\n ``labels=False`` implies you just want the bins back.\n\n >>> pd.cut([0, 1, 1, 2], bins=4, labels=False)\n array([0, 1, 1, 3])\n\n Passing a Series as an input returns a Series with categorical dtype:\n\n >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> pd.cut(s, 3)\n ... # doctest: +ELLIPSIS\n a (1.992, 4.667]\n b (1.992, 4.667]\n c (4.667, 7.333]\n d (7.333, 10.0]\n e (7.333, 10.0]\n dtype: category\n Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...\n\n Passing a Series as an input returns a Series with mapping value.\n It is used to map numerically to intervals based on bins.\n\n >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)\n ... # doctest: +ELLIPSIS\n (a 1.0\n b 2.0\n c 3.0\n d 4.0\n e NaN\n dtype: float64,\n array([ 0, 2, 4, 6, 8, 10]))\n\n Use `drop` optional when bins is not unique\n\n >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,\n ... right=False, duplicates='drop')\n ... # doctest: +ELLIPSIS\n (a 1.0\n b 2.0\n c 3.0\n d 3.0\n e NaN\n dtype: float64,\n array([ 0, 2, 4, 6, 10]))\n\n Passing an IntervalIndex for `bins` results in those categories exactly.\n Notice that values not covered by the IntervalIndex are set to NaN. 0\n is to the left of the first bin (which is closed on the right), and 1.5\n falls between two bins.\n\n >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])\n >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)\n [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]]\n Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]\n \"\"\"\n # NOTE: this binning code is changed a bit from histogram for var(x) == 0\n\n original = x\n x = _preprocess_for_cut(x)\n x, dtype = _coerce_to_type(x)\n\n if not np.iterable(bins):\n if is_scalar(bins) and bins < 1:\n raise ValueError(\"`bins` should be a positive integer.\")\n\n try: # for array-like\n sz = x.size\n except AttributeError:\n x = np.asarray(x)\n sz = x.size\n\n if sz == 0:\n raise ValueError(\"Cannot cut empty array\")\n\n rng = (nanops.nanmin(x), nanops.nanmax(x))\n mn, mx = [mi + 0.0 for mi in rng]\n\n if np.isinf(mn) or np.isinf(mx):\n # GH 24314\n raise ValueError(\n \"cannot specify integer `bins` when input data contains infinity\"\n )\n elif mn == mx: # adjust end points before binning\n mn -= 0.001 * abs(mn) if mn != 0 else 0.001\n mx += 0.001 * abs(mx) if mx != 0 else 0.001\n bins = np.linspace(mn, mx, bins + 1, endpoint=True)\n else: # adjust end points after binning\n bins = np.linspace(mn, mx, bins + 1, endpoint=True)\n adj = (mx - mn) * 0.001 # 0.1% of the range\n if right:\n bins[0] -= adj\n else:\n bins[-1] += adj\n\n elif isinstance(bins, IntervalIndex):\n if bins.is_overlapping:\n raise ValueError(\"Overlapping IntervalIndex is not accepted.\")\n\n else:\n if is_datetime64tz_dtype(bins):\n bins = np.asarray(bins, dtype=DT64NS_DTYPE)\n else:\n bins = np.asarray(bins)\n bins = _convert_bin_to_numeric_type(bins, dtype)\n\n # GH 26045: cast to float64 to avoid an overflow\n if (np.diff(bins.astype(\"float64\")) < 0).any():\n raise ValueError(\"bins must increase monotonically.\")\n\n fac, bins = _bins_to_cuts(\n x,\n bins,\n right=right,\n labels=labels,\n precision=precision,\n include_lowest=include_lowest,\n dtype=dtype,\n duplicates=duplicates,\n ordered=ordered,\n )\n\n return _postprocess_for_cut(fac, bins, retbins, dtype, original)\n\n\ndef qcut(\n x,\n q,\n labels=None,\n retbins: bool = False,\n precision: int = 3,\n duplicates: str = \"raise\",\n):\n \"\"\"\n Quantile-based discretization function.\n\n Discretize variable into equal-sized buckets based on rank or based\n on sample quantiles. For example 1000 values for 10 quantiles would\n produce a Categorical object indicating quantile membership for each data point.\n\n Parameters\n ----------\n x : 1d ndarray or Series\n q : int or list-like of float\n Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately\n array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.\n labels : array or False, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, return only integer indicators of the\n bins. If True, raises an error.\n retbins : bool, optional\n Whether to return the (bins, labels) or not. Can be useful if bins\n is given as a scalar.\n precision : int, optional\n The precision at which to store and display the bins labels.\n duplicates : {default 'raise', 'drop'}, optional\n If bin edges are not unique, raise ValueError or drop non-uniques.\n\n Returns\n -------\n out : Categorical or Series or array of integers if labels is False\n The return type (Categorical or Series) depends on the input: a Series\n of type category if input is a Series else Categorical. Bins are\n represented as categories when categorical data is returned.\n bins : ndarray of floats\n Returned only if `retbins` is True.\n\n Notes\n -----\n Out of bounds values will be NA in the resulting Categorical object\n\n Examples\n --------\n >>> pd.qcut(range(5), 4)\n ... # doctest: +ELLIPSIS\n [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]\n Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...\n\n >>> pd.qcut(range(5), 3, labels=[\"good\", \"medium\", \"bad\"])\n ... # doctest: +SKIP\n [good, good, medium, bad, bad]\n Categories (3, object): [good < medium < bad]\n\n >>> pd.qcut(range(5), 4, labels=False)\n array([0, 0, 1, 2, 3])\n \"\"\"\n original = x\n x = _preprocess_for_cut(x)\n x, dtype = _coerce_to_type(x)\n\n if is_integer(q):\n quantiles = np.linspace(0, 1, q + 1)\n else:\n quantiles = q\n bins = algos.quantile(x, quantiles)\n fac, bins = _bins_to_cuts(\n x,\n bins,\n labels=labels,\n precision=precision,\n include_lowest=True,\n dtype=dtype,\n duplicates=duplicates,\n )\n\n return _postprocess_for_cut(fac, bins, retbins, dtype, original)\n\n\ndef _bins_to_cuts(\n x,\n bins,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n dtype=None,\n duplicates: str = \"raise\",\n ordered: bool = True,\n):\n if not ordered and not labels:\n raise ValueError(\"'labels' must be provided if 'ordered = False'\")\n\n if duplicates not in [\"raise\", \"drop\"]:\n raise ValueError(\n \"invalid value for 'duplicates' parameter, valid options are: raise, drop\"\n )\n\n if isinstance(bins, IntervalIndex):\n # we have a fast-path here\n ids = bins.get_indexer(x)\n result = Categorical.from_codes(ids, categories=bins, ordered=True)\n return result, bins\n\n unique_bins = algos.unique(bins)\n if len(unique_bins) < len(bins) and len(bins) != 2:\n if duplicates == \"raise\":\n raise ValueError(\n f\"Bin edges must be unique: {repr(bins)}.\\n\"\n f\"You can drop duplicate edges by setting the 'duplicates' kwarg\"\n )\n else:\n bins = unique_bins\n\n side = \"left\" if right else \"right\"\n ids = ensure_int64(bins.searchsorted(x, side=side))\n\n if include_lowest:\n ids[x == bins[0]] = 1\n\n na_mask = isna(x) | (ids == len(bins)) | (ids == 0)\n has_nas = na_mask.any()\n\n if labels is not False:\n if not (labels is None or is_list_like(labels)):\n raise ValueError(\n \"Bin labels must either be False, None or passed in as a \"\n \"list-like argument\"\n )\n\n elif labels is None:\n labels = _format_labels(\n bins, precision, right=right, include_lowest=include_lowest, dtype=dtype\n )\n elif ordered and len(set(labels)) != len(labels):\n raise ValueError(\n \"labels must be unique if ordered=True; pass ordered=False for duplicate labels\" # noqa\n )\n else:\n if len(labels) != len(bins) - 1:\n raise ValueError(\n \"Bin labels must be one fewer than the number of bin edges\"\n )\n if not is_categorical_dtype(labels):\n labels = Categorical(\n labels,\n categories=labels if len(set(labels)) == len(labels) else None,\n ordered=ordered,\n )\n # TODO: handle mismatch between categorical label order and pandas.cut order.\n np.putmask(ids, na_mask, 0)\n result = algos.take_nd(labels, ids - 1)\n\n else:\n result = ids - 1\n if has_nas:\n result = result.astype(np.float64)\n np.putmask(result, na_mask, np.nan)\n\n return result, bins\n\n\ndef _coerce_to_type(x):\n \"\"\"\n if the passed data is of datetime/timedelta, bool or nullable int type,\n this method converts it to numeric so that cut or qcut method can\n handle it\n \"\"\"\n dtype = None\n\n if is_datetime64tz_dtype(x.dtype):\n dtype = x.dtype\n elif is_datetime64_dtype(x.dtype):\n x = to_datetime(x)\n dtype = np.dtype(\"datetime64[ns]\")\n elif is_timedelta64_dtype(x.dtype):\n x = to_timedelta(x)\n dtype = np.dtype(\"timedelta64[ns]\")\n elif is_bool_dtype(x.dtype):\n # GH 20303\n x = x.astype(np.int64)\n # To support cut and qcut for IntegerArray we convert to float dtype.\n # Will properly support in the future.\n # https://github.com/pandas-dev/pandas/pull/31290\n # https://github.com/pandas-dev/pandas/issues/31389\n elif is_extension_array_dtype(x.dtype) and is_integer_dtype(x.dtype):\n x = x.to_numpy(dtype=np.float64, na_value=np.nan)\n\n if dtype is not None:\n # GH 19768: force NaT to NaN during integer conversion\n x = np.where(x.notna(), x.view(np.int64), np.nan)\n\n return x, dtype\n\n\ndef _convert_bin_to_numeric_type(bins, dtype):\n \"\"\"\n if the passed bin is of datetime/timedelta type,\n this method converts it to integer\n\n Parameters\n ----------\n bins : list-like of bins\n dtype : dtype of data\n\n Raises\n ------\n ValueError if bins are not of a compat dtype to dtype\n \"\"\"\n bins_dtype = infer_dtype(bins, skipna=False)\n if is_timedelta64_dtype(dtype):\n if bins_dtype in [\"timedelta\", \"timedelta64\"]:\n bins = to_timedelta(bins).view(np.int64)\n else:\n raise ValueError(\"bins must be of timedelta64 dtype\")\n elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):\n if bins_dtype in [\"datetime\", \"datetime64\"]:\n bins = to_datetime(bins).view(np.int64)\n else:\n raise ValueError(\"bins must be of datetime64 dtype\")\n\n return bins\n\n\ndef _convert_bin_to_datelike_type(bins, dtype):\n \"\"\"\n Convert bins to a DatetimeIndex or TimedeltaIndex if the original dtype is\n datelike\n\n Parameters\n ----------\n bins : list-like of bins\n dtype : dtype of data\n\n Returns\n -------\n bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is\n datelike\n \"\"\"\n if is_datetime64tz_dtype(dtype):\n bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz)\n elif is_datetime_or_timedelta_dtype(dtype):\n bins = Index(bins.astype(np.int64), dtype=dtype)\n return bins\n\n\ndef _format_labels(\n bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None\n):\n \"\"\" based on the dtype, return our labels \"\"\"\n closed = \"right\" if right else \"left\"\n\n if is_datetime64tz_dtype(dtype):\n formatter = lambda x: Timestamp(x, tz=dtype.tz)\n adjust = lambda x: x - Timedelta(\"1ns\")\n elif is_datetime64_dtype(dtype):\n formatter = Timestamp\n adjust = lambda x: x - Timedelta(\"1ns\")\n elif is_timedelta64_dtype(dtype):\n formatter = Timedelta\n adjust = lambda x: x - Timedelta(\"1ns\")\n else:\n precision = _infer_precision(precision, bins)\n formatter = lambda x: _round_frac(x, precision)\n adjust = lambda x: x - 10 ** (-precision)\n\n breaks = [formatter(b) for b in bins]\n if right and include_lowest:\n # adjust lhs of first interval by precision to account for being right closed\n breaks[0] = adjust(breaks[0])\n\n return IntervalIndex.from_breaks(breaks, closed=closed)\n\n\ndef _preprocess_for_cut(x):\n \"\"\"\n handles preprocessing for cut where we convert passed\n input to array, strip the index information and store it\n separately\n \"\"\"\n # Check that the passed array is a Pandas or Numpy object\n # We don't want to strip away a Pandas data-type here (e.g. datetimetz)\n ndim = getattr(x, \"ndim\", None)\n if ndim is None:\n x = np.asarray(x)\n if x.ndim != 1:\n raise ValueError(\"Input array must be 1 dimensional\")\n\n return x\n\n\ndef _postprocess_for_cut(fac, bins, retbins: bool, dtype, original):\n \"\"\"\n handles post processing for the cut method where\n we combine the index information if the originally passed\n datatype was a series\n \"\"\"\n if isinstance(original, ABCSeries):\n fac = original._constructor(fac, index=original.index, name=original.name)\n\n if not retbins:\n return fac\n\n bins = _convert_bin_to_datelike_type(bins, dtype)\n\n return fac, bins\n\n\ndef _round_frac(x, precision: int):\n \"\"\"\n Round the fractional part of the given number\n \"\"\"\n if not np.isfinite(x) or x == 0:\n return x\n else:\n frac, whole = np.modf(x)\n if whole == 0:\n digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision\n else:\n digits = precision\n return np.around(x, digits)\n\n\ndef _infer_precision(base_precision: int, bins) -> int:\n \"\"\"\n Infer an appropriate precision for _round_frac\n \"\"\"\n for precision in range(base_precision, 20):\n levels = [_round_frac(b, precision) for b in bins]\n if algos.unique(levels).size == bins.size:\n return precision\n return base_precision # default\n",
"\"\"\"\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n\"\"\"\nimport string\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Categorical, CategoricalIndex, Timestamp\nimport pandas._testing as tm\nfrom pandas.api.types import CategoricalDtype\nfrom pandas.tests.extension import base\n\n\ndef make_data():\n while True:\n values = np.random.choice(list(string.ascii_letters), size=100)\n # ensure we meet the requirements\n # 1. first two not null\n # 2. first and second are different\n if values[0] != values[1]:\n break\n return values\n\n\[email protected]\ndef dtype():\n return CategoricalDtype()\n\n\[email protected]\ndef data():\n \"\"\"Length-100 array for this type.\n\n * data[0] and data[1] should both be non missing\n * data[0] and data[1] should not gbe equal\n \"\"\"\n return Categorical(make_data())\n\n\[email protected]\ndef data_missing():\n \"\"\"Length 2 array with [NA, Valid]\"\"\"\n return Categorical([np.nan, \"A\"])\n\n\[email protected]\ndef data_for_sorting():\n return Categorical([\"A\", \"B\", \"C\"], categories=[\"C\", \"A\", \"B\"], ordered=True)\n\n\[email protected]\ndef data_missing_for_sorting():\n return Categorical([\"A\", None, \"B\"], categories=[\"B\", \"A\"], ordered=True)\n\n\[email protected]\ndef na_value():\n return np.nan\n\n\[email protected]\ndef data_for_grouping():\n return Categorical([\"a\", \"a\", None, None, \"b\", \"b\", \"a\", \"c\"])\n\n\nclass TestDtype(base.BaseDtypeTests):\n pass\n\n\nclass TestInterface(base.BaseInterfaceTests):\n @pytest.mark.skip(reason=\"Memory usage doesn't match\")\n def test_memory_usage(self, data):\n # Is this deliberate?\n super().test_memory_usage(data)\n\n\nclass TestConstructors(base.BaseConstructorsTests):\n pass\n\n\nclass TestReshaping(base.BaseReshapingTests):\n def test_concat_with_reindex(self, data):\n pytest.xfail(reason=\"Deliberately upcast to object?\")\n\n\nclass TestGetitem(base.BaseGetitemTests):\n @pytest.mark.skip(reason=\"Backwards compatibility\")\n def test_getitem_scalar(self, data):\n # CategoricalDtype.type isn't \"correct\" since it should\n # be a parent of the elements (object). But don't want\n # to break things by changing.\n super().test_getitem_scalar(data)\n\n\nclass TestSetitem(base.BaseSetitemTests):\n pass\n\n\nclass TestMissing(base.BaseMissingTests):\n @pytest.mark.skip(reason=\"Not implemented\")\n def test_fillna_limit_pad(self, data_missing):\n super().test_fillna_limit_pad(data_missing)\n\n @pytest.mark.skip(reason=\"Not implemented\")\n def test_fillna_limit_backfill(self, data_missing):\n super().test_fillna_limit_backfill(data_missing)\n\n\nclass TestReduce(base.BaseNoReduceTests):\n pass\n\n\nclass TestMethods(base.BaseMethodsTests):\n @pytest.mark.skip(reason=\"Unobserved categories included\")\n def test_value_counts(self, all_data, dropna):\n return super().test_value_counts(all_data, dropna)\n\n def test_combine_add(self, data_repeated):\n # GH 20825\n # When adding categoricals in combine, result is a string\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n result = s1.combine(s2, lambda x1, x2: x1 + x2)\n expected = pd.Series(\n ([a + b for (a, b) in zip(list(orig_data1), list(orig_data2))])\n )\n self.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 + x2)\n expected = pd.Series([a + val for a in list(orig_data1)])\n self.assert_series_equal(result, expected)\n\n @pytest.mark.skip(reason=\"Not Applicable\")\n def test_fillna_length_mismatch(self, data_missing):\n super().test_fillna_length_mismatch(data_missing)\n\n def test_searchsorted(self, data_for_sorting):\n if not data_for_sorting.ordered:\n raise pytest.skip(reason=\"searchsorted requires ordered data.\")\n\n\nclass TestCasting(base.BaseCastingTests):\n @pytest.mark.parametrize(\"cls\", [Categorical, CategoricalIndex])\n @pytest.mark.parametrize(\"values\", [[1, np.nan], [Timestamp(\"2000\"), pd.NaT]])\n def test_cast_nan_to_int(self, cls, values):\n # GH 28406\n s = cls(values)\n\n msg = \"Cannot (cast|convert)\"\n with pytest.raises((ValueError, TypeError), match=msg):\n s.astype(int)\n\n @pytest.mark.parametrize(\n \"expected\",\n [\n pd.Series([\"2019\", \"2020\"], dtype=\"datetime64[ns, UTC]\"),\n pd.Series([0, 0], dtype=\"timedelta64[ns]\"),\n pd.Series([pd.Period(\"2019\"), pd.Period(\"2020\")], dtype=\"period[A-DEC]\"),\n pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=\"interval\"),\n pd.Series([1, np.nan], dtype=\"Int64\"),\n ],\n )\n def test_cast_category_to_extension_dtype(self, expected):\n # GH 28668\n result = expected.astype(\"category\").astype(expected.dtype)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"dtype, expected\",\n [\n (\n \"datetime64[ns]\",\n np.array([\"2015-01-01T00:00:00.000000000\"], dtype=\"datetime64[ns]\"),\n ),\n (\n \"datetime64[ns, MET]\",\n pd.DatetimeIndex(\n [pd.Timestamp(\"2015-01-01 00:00:00+0100\", tz=\"MET\")]\n ).array,\n ),\n ],\n )\n def test_consistent_casting(self, dtype, expected):\n # GH 28448\n result = pd.Categorical(\"2015-01-01\").astype(dtype)\n assert result == expected\n\n\nclass TestArithmeticOps(base.BaseArithmeticOpsTests):\n def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):\n # frame & scalar\n op_name = all_arithmetic_operators\n if op_name != \"__rmod__\":\n super().test_arith_frame_with_scalar(data, all_arithmetic_operators)\n else:\n pytest.skip(\"rmod never called when string is first argument\")\n\n def test_arith_series_with_scalar(self, data, all_arithmetic_operators):\n\n op_name = all_arithmetic_operators\n if op_name != \"__rmod__\":\n super().test_arith_series_with_scalar(data, op_name)\n else:\n pytest.skip(\"rmod never called when string is first argument\")\n\n def test_add_series_with_extension_array(self, data):\n ser = pd.Series(data)\n with pytest.raises(TypeError, match=\"cannot perform|unsupported operand\"):\n ser + data\n\n def test_divmod_series_array(self):\n # GH 23287\n # skipping because it is not implemented\n pass\n\n def _check_divmod_op(self, s, op, other, exc=NotImplementedError):\n return super()._check_divmod_op(s, op, other, exc=TypeError)\n\n\nclass TestComparisonOps(base.BaseComparisonOpsTests):\n def _compare_other(self, s, data, op_name, other):\n op = self.get_op_from_name(op_name)\n if op_name == \"__eq__\":\n result = op(s, other)\n expected = s.combine(other, lambda x, y: x == y)\n assert (result == expected).all()\n\n elif op_name == \"__ne__\":\n result = op(s, other)\n expected = s.combine(other, lambda x, y: x != y)\n assert (result == expected).all()\n\n else:\n msg = \"Unordered Categoricals can only compare equality or not\"\n with pytest.raises(TypeError, match=msg):\n op(data, other)\n\n @pytest.mark.parametrize(\n \"categories\",\n [[\"a\", \"b\"], [0, 1], [pd.Timestamp(\"2019\"), pd.Timestamp(\"2020\")]],\n )\n def test_not_equal_with_na(self, categories):\n # https://github.com/pandas-dev/pandas/issues/32276\n c1 = Categorical.from_codes([-1, 0], categories=categories)\n c2 = Categorical.from_codes([0, 1], categories=categories)\n\n result = c1 != c2\n\n assert result.all()\n\n\nclass TestParsing(base.BaseParsingTests):\n pass\n",
"import warnings\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Index, MultiIndex\nimport pandas._testing as tm\n\n\ndef test_format(idx):\n idx.format()\n idx[:0].format()\n\n\ndef test_format_integer_names():\n index = MultiIndex(\n levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]\n )\n index.format(names=True)\n\n\ndef test_format_sparse_config(idx):\n warn_filters = warnings.filters\n warnings.filterwarnings(\"ignore\", category=FutureWarning, module=\".*format\")\n # GH1538\n pd.set_option(\"display.multi_sparse\", False)\n\n result = idx.format()\n assert result[1] == \"foo two\"\n\n tm.reset_display_options()\n\n warnings.filters = warn_filters\n\n\ndef test_format_sparse_display():\n index = MultiIndex(\n levels=[[0, 1], [0, 1], [0, 1], [0]],\n codes=[\n [0, 0, 0, 1, 1, 1],\n [0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0],\n ],\n )\n\n result = index.format()\n assert result[3] == \"1 0 0 0\"\n\n\ndef test_repr_with_unicode_data():\n with pd.option_context(\"display.encoding\", \"UTF-8\"):\n d = {\"a\": [\"\\u05d0\", 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n index = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n assert \"\\\\\" not in repr(index) # we don't want unicode-escaped\n\n\ndef test_repr_roundtrip_raises():\n mi = MultiIndex.from_product([list(\"ab\"), range(3)], names=[\"first\", \"second\"])\n msg = \"Must pass both levels and codes\"\n with pytest.raises(TypeError, match=msg):\n eval(repr(mi))\n\n\ndef test_unicode_string_with_unicode():\n d = {\"a\": [\"\\u05d0\", 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n idx = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n str(idx)\n\n\ndef test_repr_max_seq_item_setting(idx):\n # GH10182\n idx = idx.repeat(50)\n with pd.option_context(\"display.max_seq_items\", None):\n repr(idx)\n assert \"...\" not in str(idx)\n\n\nclass TestRepr:\n def test_unicode_repr_issues(self):\n levels = [Index([\"a/\\u03c3\", \"b/\\u03c3\", \"c/\\u03c3\"]), Index([0, 1])]\n codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]\n index = MultiIndex(levels=levels, codes=codes)\n\n repr(index.levels)\n\n # FIXME: dont leave commented-out\n # NumPy bug\n # repr(index.get_level_values(1))\n\n def test_repr(self, idx):\n result = idx[:1].__repr__()\n expected = \"\"\"\\\nMultiIndex([('foo', 'one')],\n names=['first', 'second'])\"\"\"\n assert result == expected\n\n result = idx.__repr__()\n expected = \"\"\"\\\nMultiIndex([('foo', 'one'),\n ('foo', 'two'),\n ('bar', 'one'),\n ('baz', 'two'),\n ('qux', 'one'),\n ('qux', 'two')],\n names=['first', 'second'])\"\"\"\n assert result == expected\n\n with pd.option_context(\"display.max_seq_items\", 5):\n result = idx.__repr__()\n expected = \"\"\"\\\nMultiIndex([('foo', 'one'),\n ('foo', 'two'),\n ...\n ('qux', 'one'),\n ('qux', 'two')],\n names=['first', 'second'], length=6)\"\"\"\n assert result == expected\n\n def test_rjust(self, narrow_multi_index):\n mi = narrow_multi_index\n result = mi[:1].__repr__()\n expected = \"\"\"\\\nMultiIndex([('a', 9, '2000-01-01 00:00:00')],\n names=['a', 'b', 'dti'])\"\"\"\n assert result == expected\n\n result = mi[::500].__repr__()\n expected = \"\"\"\\\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00'),\n ( 'a', 9, '2000-01-01 00:08:20'),\n ('abc', 10, '2000-01-01 00:16:40'),\n ('abc', 10, '2000-01-01 00:25:00')],\n names=['a', 'b', 'dti'])\"\"\"\n assert result == expected\n\n result = mi.__repr__()\n expected = \"\"\"\\\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00'),\n ( 'a', 9, '2000-01-01 00:00:01'),\n ( 'a', 9, '2000-01-01 00:00:02'),\n ( 'a', 9, '2000-01-01 00:00:03'),\n ( 'a', 9, '2000-01-01 00:00:04'),\n ( 'a', 9, '2000-01-01 00:00:05'),\n ( 'a', 9, '2000-01-01 00:00:06'),\n ( 'a', 9, '2000-01-01 00:00:07'),\n ( 'a', 9, '2000-01-01 00:00:08'),\n ( 'a', 9, '2000-01-01 00:00:09'),\n ...\n ('abc', 10, '2000-01-01 00:33:10'),\n ('abc', 10, '2000-01-01 00:33:11'),\n ('abc', 10, '2000-01-01 00:33:12'),\n ('abc', 10, '2000-01-01 00:33:13'),\n ('abc', 10, '2000-01-01 00:33:14'),\n ('abc', 10, '2000-01-01 00:33:15'),\n ('abc', 10, '2000-01-01 00:33:16'),\n ('abc', 10, '2000-01-01 00:33:17'),\n ('abc', 10, '2000-01-01 00:33:18'),\n ('abc', 10, '2000-01-01 00:33:19')],\n names=['a', 'b', 'dti'], length=2000)\"\"\"\n assert result == expected\n\n def test_tuple_width(self, wide_multi_index):\n mi = wide_multi_index\n result = mi[:1].__repr__()\n expected = \"\"\"MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])\"\"\"\n assert result == expected\n\n result = mi[:10].__repr__()\n expected = \"\"\"\\\nMultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),\n ('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),\n ('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),\n ('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),\n ('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),\n ('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),\n ('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),\n ('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),\n ('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),\n ('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])\"\"\"\n assert result == expected\n\n result = mi.__repr__()\n expected = \"\"\"\\\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),\n ( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),\n ( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),\n ( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),\n ( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),\n ( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),\n ( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),\n ( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),\n ( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),\n ( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),\n ...\n ('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),\n ('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),\n ('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),\n ('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),\n ('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),\n ('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),\n ('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),\n ('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),\n ('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),\n ('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)\"\"\" # noqa\n assert result == expected\n",
"# being a bit too dynamic\nimport warnings\n\nimport matplotlib.cm as cm\nimport matplotlib.colors\nimport numpy as np\n\nfrom pandas.core.dtypes.common import is_list_like\n\nimport pandas.core.common as com\n\n\ndef _get_standard_colors(\n num_colors=None, colormap=None, color_type=\"default\", color=None\n):\n import matplotlib.pyplot as plt\n\n if color is None and colormap is not None:\n if isinstance(colormap, str):\n cmap = colormap\n colormap = cm.get_cmap(colormap)\n if colormap is None:\n raise ValueError(f\"Colormap {cmap} is not recognized\")\n colors = [colormap(num) for num in np.linspace(0, 1, num=num_colors)]\n elif color is not None:\n if colormap is not None:\n warnings.warn(\n \"'color' and 'colormap' cannot be used simultaneously. Using 'color'\"\n )\n colors = (\n list(color)\n if is_list_like(color) and not isinstance(color, dict)\n else color\n )\n else:\n if color_type == \"default\":\n # need to call list() on the result to copy so we don't\n # modify the global rcParams below\n try:\n colors = [c[\"color\"] for c in list(plt.rcParams[\"axes.prop_cycle\"])]\n except KeyError:\n colors = list(plt.rcParams.get(\"axes.color_cycle\", list(\"bgrcmyk\")))\n if isinstance(colors, str):\n colors = list(colors)\n\n colors = colors[0:num_colors]\n elif color_type == \"random\":\n\n def random_color(column):\n \"\"\" Returns a random color represented as a list of length 3\"\"\"\n # GH17525 use common._random_state to avoid resetting the seed\n rs = com.random_state(column)\n return rs.rand(3).tolist()\n\n colors = [random_color(num) for num in range(num_colors)]\n else:\n raise ValueError(\"color_type must be either 'default' or 'random'\")\n\n if isinstance(colors, str):\n conv = matplotlib.colors.ColorConverter()\n\n def _maybe_valid_colors(colors):\n try:\n [conv.to_rgba(c) for c in colors]\n return True\n except ValueError:\n return False\n\n # check whether the string can be convertible to single color\n maybe_single_color = _maybe_valid_colors([colors])\n # check whether each character can be convertible to colors\n maybe_color_cycle = _maybe_valid_colors(list(colors))\n if maybe_single_color and maybe_color_cycle and len(colors) > 1:\n hex_color = [c[\"color\"] for c in list(plt.rcParams[\"axes.prop_cycle\"])]\n colors = [hex_color[int(colors[1])]]\n elif maybe_single_color:\n colors = [colors]\n else:\n # ``colors`` is regarded as color cycle.\n # mpl will raise error any of them is invalid\n pass\n\n # Append more colors by cycling if there is not enough color.\n # Extra colors will be ignored by matplotlib if there are more colors\n # than needed and nothing needs to be done here.\n if len(colors) < num_colors:\n try:\n multiple = num_colors // len(colors) - 1\n except ZeroDivisionError:\n raise ValueError(\"Invalid color argument: ''\")\n mod = num_colors % len(colors)\n\n colors += multiple * colors\n colors += colors[:mod]\n\n return colors\n",
"import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_integer\n\nimport pandas as pd\nfrom pandas import Index, Series\nimport pandas._testing as tm\nfrom pandas.core.indexes.datetimes import Timestamp\n\n\nclass TestSeriesQuantile:\n def test_quantile(self, datetime_series):\n\n q = datetime_series.quantile(0.1)\n assert q == np.percentile(datetime_series.dropna(), 10)\n\n q = datetime_series.quantile(0.9)\n assert q == np.percentile(datetime_series.dropna(), 90)\n\n # object dtype\n q = Series(datetime_series, dtype=object).quantile(0.9)\n assert q == np.percentile(datetime_series.dropna(), 90)\n\n # datetime64[ns] dtype\n dts = datetime_series.index.to_series()\n q = dts.quantile(0.2)\n assert q == Timestamp(\"2000-01-10 19:12:00\")\n\n # timedelta64[ns] dtype\n tds = dts.diff()\n q = tds.quantile(0.25)\n assert q == pd.to_timedelta(\"24:00:00\")\n\n # GH7661\n result = Series([np.timedelta64(\"NaT\")]).sum()\n assert result == pd.Timedelta(0)\n\n msg = \"percentiles should all be in the interval \\\\[0, 1\\\\]\"\n for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:\n with pytest.raises(ValueError, match=msg):\n datetime_series.quantile(invalid)\n\n def test_quantile_multi(self, datetime_series):\n\n qs = [0.1, 0.9]\n result = datetime_series.quantile(qs)\n expected = pd.Series(\n [\n np.percentile(datetime_series.dropna(), 10),\n np.percentile(datetime_series.dropna(), 90),\n ],\n index=qs,\n name=datetime_series.name,\n )\n tm.assert_series_equal(result, expected)\n\n dts = datetime_series.index.to_series()\n dts.name = \"xxx\"\n result = dts.quantile((0.2, 0.2))\n expected = Series(\n [Timestamp(\"2000-01-10 19:12:00\"), Timestamp(\"2000-01-10 19:12:00\")],\n index=[0.2, 0.2],\n name=\"xxx\",\n )\n tm.assert_series_equal(result, expected)\n\n result = datetime_series.quantile([])\n expected = pd.Series(\n [], name=datetime_series.name, index=Index([], dtype=float), dtype=\"float64\"\n )\n tm.assert_series_equal(result, expected)\n\n def test_quantile_interpolation(self, datetime_series):\n # see gh-10174\n\n # interpolation = linear (default case)\n q = datetime_series.quantile(0.1, interpolation=\"linear\")\n assert q == np.percentile(datetime_series.dropna(), 10)\n q1 = datetime_series.quantile(0.1)\n assert q1 == np.percentile(datetime_series.dropna(), 10)\n\n # test with and without interpolation keyword\n assert q == q1\n\n def test_quantile_interpolation_dtype(self):\n # GH #10174\n\n # interpolation = linear (default case)\n q = pd.Series([1, 3, 4]).quantile(0.5, interpolation=\"lower\")\n assert q == np.percentile(np.array([1, 3, 4]), 50)\n assert is_integer(q)\n\n q = pd.Series([1, 3, 4]).quantile(0.5, interpolation=\"higher\")\n assert q == np.percentile(np.array([1, 3, 4]), 50)\n assert is_integer(q)\n\n def test_quantile_nan(self):\n\n # GH 13098\n s = pd.Series([1, 2, 3, 4, np.nan])\n result = s.quantile(0.5)\n expected = 2.5\n assert result == expected\n\n # all nan/empty\n s1 = Series([], dtype=object)\n cases = [s1, Series([np.nan, np.nan])]\n\n for s in cases:\n res = s.quantile(0.5)\n assert np.isnan(res)\n\n res = s.quantile([0.5])\n tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))\n\n res = s.quantile([0.2, 0.3])\n tm.assert_series_equal(res, pd.Series([np.nan, np.nan], index=[0.2, 0.3]))\n\n @pytest.mark.parametrize(\n \"case\",\n [\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2011-01-02\"),\n pd.Timestamp(\"2011-01-03\"),\n ],\n [\n pd.Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-01-03\", tz=\"US/Eastern\"),\n ],\n [pd.Timedelta(\"1 days\"), pd.Timedelta(\"2 days\"), pd.Timedelta(\"3 days\")],\n # NaT\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2011-01-02\"),\n pd.Timestamp(\"2011-01-03\"),\n pd.NaT,\n ],\n [\n pd.Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-01-03\", tz=\"US/Eastern\"),\n pd.NaT,\n ],\n [\n pd.Timedelta(\"1 days\"),\n pd.Timedelta(\"2 days\"),\n pd.Timedelta(\"3 days\"),\n pd.NaT,\n ],\n ],\n )\n def test_quantile_box(self, case):\n s = pd.Series(case, name=\"XXX\")\n res = s.quantile(0.5)\n assert res == case[1]\n\n res = s.quantile([0.5])\n exp = pd.Series([case[1]], index=[0.5], name=\"XXX\")\n tm.assert_series_equal(res, exp)\n\n def test_datetime_timedelta_quantiles(self):\n # covers #9694\n assert pd.isna(Series([], dtype=\"M8[ns]\").quantile(0.5))\n assert pd.isna(Series([], dtype=\"m8[ns]\").quantile(0.5))\n\n def test_quantile_nat(self):\n res = Series([pd.NaT, pd.NaT]).quantile(0.5)\n assert res is pd.NaT\n\n res = Series([pd.NaT, pd.NaT]).quantile([0.5])\n tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))\n\n @pytest.mark.parametrize(\n \"values, dtype\",\n [([0, 0, 0, 1, 2, 3], \"Sparse[int]\"), ([0.0, None, 1.0, 2.0], \"Sparse[float]\")],\n )\n def test_quantile_sparse(self, values, dtype):\n ser = pd.Series(values, dtype=dtype)\n result = ser.quantile([0.5])\n expected = pd.Series(np.asarray(ser)).quantile([0.5])\n tm.assert_series_equal(result, expected)\n\n def test_quantile_empty(self):\n\n # floats\n s = Series([], dtype=\"float64\")\n\n res = s.quantile(0.5)\n assert np.isnan(res)\n\n res = s.quantile([0.5])\n exp = Series([np.nan], index=[0.5])\n tm.assert_series_equal(res, exp)\n\n # int\n s = Series([], dtype=\"int64\")\n\n res = s.quantile(0.5)\n assert np.isnan(res)\n\n res = s.quantile([0.5])\n exp = Series([np.nan], index=[0.5])\n tm.assert_series_equal(res, exp)\n\n # datetime\n s = Series([], dtype=\"datetime64[ns]\")\n\n res = s.quantile(0.5)\n assert res is pd.NaT\n\n res = s.quantile([0.5])\n exp = Series([pd.NaT], index=[0.5])\n tm.assert_series_equal(res, exp)\n",
"from __future__ import division, absolute_import, print_function\n\nimport os\nimport textwrap\nimport pytest\n\nfrom numpy.testing import assert_, assert_equal\nfrom . import util\n\n\ndef _path(*a):\n return os.path.join(*((os.path.dirname(__file__),) + a))\n\n\nclass TestMixed(util.F2PyTest):\n sources = [_path('src', 'mixed', 'foo.f'),\n _path('src', 'mixed', 'foo_fixed.f90'),\n _path('src', 'mixed', 'foo_free.f90')]\n\n @pytest.mark.slow\n def test_all(self):\n assert_(self.module.bar11() == 11)\n assert_(self.module.foo_fixed.bar12() == 12)\n assert_(self.module.foo_free.bar13() == 13)\n\n @pytest.mark.slow\n def test_docstring(self):\n expected = textwrap.dedent(\"\"\"\\\n a = bar11()\n\n Wrapper for ``bar11``.\n\n Returns\n -------\n a : int\n \"\"\")\n assert_equal(self.module.bar11.__doc__, expected)\n",
"import re\n\nimport numpy as np\nimport pytest\n\nfrom pandas import Interval, IntervalIndex, Series\nimport pandas._testing as tm\n\n\nclass TestIntervalIndex:\n def setup_method(self, method):\n self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))\n\n def test_loc_with_interval(self):\n\n # loc with single label / list of labels:\n # - Intervals: only exact matches\n # - scalars: those that contain it\n\n s = self.s\n\n expected = 0\n result = s.loc[Interval(0, 1)]\n assert result == expected\n result = s[Interval(0, 1)]\n assert result == expected\n\n expected = s.iloc[3:5]\n result = s.loc[[Interval(3, 4), Interval(4, 5)]]\n tm.assert_series_equal(expected, result)\n result = s[[Interval(3, 4), Interval(4, 5)]]\n tm.assert_series_equal(expected, result)\n\n # missing or not exact\n with pytest.raises(KeyError, match=re.escape(\"Interval(3, 5, closed='left')\")):\n s.loc[Interval(3, 5, closed=\"left\")]\n\n with pytest.raises(KeyError, match=re.escape(\"Interval(3, 5, closed='left')\")):\n s[Interval(3, 5, closed=\"left\")]\n\n with pytest.raises(KeyError, match=re.escape(\"Interval(3, 5, closed='right')\")):\n s[Interval(3, 5)]\n\n with pytest.raises(KeyError, match=re.escape(\"Interval(3, 5, closed='right')\")):\n s.loc[Interval(3, 5)]\n\n with pytest.raises(KeyError, match=re.escape(\"Interval(3, 5, closed='right')\")):\n s[Interval(3, 5)]\n\n with pytest.raises(\n KeyError, match=re.escape(\"Interval(-2, 0, closed='right')\")\n ):\n s.loc[Interval(-2, 0)]\n\n with pytest.raises(\n KeyError, match=re.escape(\"Interval(-2, 0, closed='right')\")\n ):\n s[Interval(-2, 0)]\n\n with pytest.raises(KeyError, match=re.escape(\"Interval(5, 6, closed='right')\")):\n s.loc[Interval(5, 6)]\n\n with pytest.raises(KeyError, match=re.escape(\"Interval(5, 6, closed='right')\")):\n s[Interval(5, 6)]\n\n def test_loc_with_scalar(self):\n\n # loc with single label / list of labels:\n # - Intervals: only exact matches\n # - scalars: those that contain it\n\n s = self.s\n\n assert s.loc[1] == 0\n assert s.loc[1.5] == 1\n assert s.loc[2] == 1\n\n assert s[1] == 0\n assert s[1.5] == 1\n assert s[2] == 1\n\n expected = s.iloc[1:4]\n tm.assert_series_equal(expected, s.loc[[1.5, 2.5, 3.5]])\n tm.assert_series_equal(expected, s.loc[[2, 3, 4]])\n tm.assert_series_equal(expected, s.loc[[1.5, 3, 4]])\n\n expected = s.iloc[[1, 1, 2, 1]]\n tm.assert_series_equal(expected, s.loc[[1.5, 2, 2.5, 1.5]])\n\n expected = s.iloc[2:5]\n tm.assert_series_equal(expected, s.loc[s >= 2])\n\n def test_loc_with_slices(self):\n\n # loc with slices:\n # - Interval objects: only works with exact matches\n # - scalars: only works for non-overlapping, monotonic intervals,\n # and start/stop select location based on the interval that\n # contains them:\n # (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop))\n\n s = self.s\n\n # slice of interval\n\n expected = s.iloc[:3]\n result = s.loc[Interval(0, 1) : Interval(2, 3)]\n tm.assert_series_equal(expected, result)\n result = s[Interval(0, 1) : Interval(2, 3)]\n tm.assert_series_equal(expected, result)\n\n expected = s.iloc[3:]\n result = s.loc[Interval(3, 4) :]\n tm.assert_series_equal(expected, result)\n result = s[Interval(3, 4) :]\n tm.assert_series_equal(expected, result)\n\n msg = \"Interval objects are not currently supported\"\n with pytest.raises(NotImplementedError, match=msg):\n s.loc[Interval(3, 6) :]\n\n with pytest.raises(NotImplementedError, match=msg):\n s[Interval(3, 6) :]\n\n with pytest.raises(NotImplementedError, match=msg):\n s.loc[Interval(3, 4, closed=\"left\") :]\n\n with pytest.raises(NotImplementedError, match=msg):\n s[Interval(3, 4, closed=\"left\") :]\n\n # slice of scalar\n\n expected = s.iloc[:3]\n tm.assert_series_equal(expected, s.loc[:3])\n tm.assert_series_equal(expected, s.loc[:2.5])\n tm.assert_series_equal(expected, s.loc[0.1:2.5])\n tm.assert_series_equal(expected, s.loc[-1:3])\n\n tm.assert_series_equal(expected, s[:3])\n tm.assert_series_equal(expected, s[:2.5])\n tm.assert_series_equal(expected, s[0.1:2.5])\n\n def test_slice_step_ne1(self):\n # GH#31658 slice of scalar with step != 1\n s = self.s\n expected = s.iloc[0:4:2]\n\n result = s[0:4:2]\n tm.assert_series_equal(result, expected)\n\n result2 = s[0:4][::2]\n tm.assert_series_equal(result2, expected)\n\n def test_slice_float_start_stop(self):\n # GH#31658 slicing with integers is positional, with floats is not\n # supported\n ser = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))\n\n msg = \"label-based slicing with step!=1 is not supported for IntervalIndex\"\n with pytest.raises(ValueError, match=msg):\n ser[1.5:9.5:2]\n\n def test_slice_interval_step(self):\n # GH#31658 allows for integer step!=1, not Interval step\n s = self.s\n msg = \"label-based slicing with step!=1 is not supported for IntervalIndex\"\n with pytest.raises(ValueError, match=msg):\n s[0 : 4 : Interval(0, 1)]\n\n def test_loc_with_overlap(self):\n\n idx = IntervalIndex.from_tuples([(1, 5), (3, 7)])\n s = Series(range(len(idx)), index=idx)\n\n # scalar\n expected = s\n result = s.loc[4]\n tm.assert_series_equal(expected, result)\n\n result = s[4]\n tm.assert_series_equal(expected, result)\n\n result = s.loc[[4]]\n tm.assert_series_equal(expected, result)\n\n result = s[[4]]\n tm.assert_series_equal(expected, result)\n\n # interval\n expected = 0\n result = s.loc[Interval(1, 5)]\n result == expected\n\n result = s[Interval(1, 5)]\n result == expected\n\n expected = s\n result = s.loc[[Interval(1, 5), Interval(3, 7)]]\n tm.assert_series_equal(expected, result)\n\n result = s[[Interval(1, 5), Interval(3, 7)]]\n tm.assert_series_equal(expected, result)\n\n with pytest.raises(KeyError, match=re.escape(\"Interval(3, 5, closed='right')\")):\n s.loc[Interval(3, 5)]\n\n with pytest.raises(KeyError, match=\"^$\"):\n s.loc[[Interval(3, 5)]]\n\n with pytest.raises(KeyError, match=re.escape(\"Interval(3, 5, closed='right')\")):\n s[Interval(3, 5)]\n\n with pytest.raises(KeyError, match=\"^$\"):\n s[[Interval(3, 5)]]\n\n # slices with interval (only exact matches)\n expected = s\n result = s.loc[Interval(1, 5) : Interval(3, 7)]\n tm.assert_series_equal(expected, result)\n\n result = s[Interval(1, 5) : Interval(3, 7)]\n tm.assert_series_equal(expected, result)\n\n msg = \"'can only get slices from an IntervalIndex if bounds are\"\n \" non-overlapping and all monotonic increasing or decreasing'\"\n with pytest.raises(KeyError, match=msg):\n s.loc[Interval(1, 6) : Interval(3, 8)]\n\n with pytest.raises(KeyError, match=msg):\n s[Interval(1, 6) : Interval(3, 8)]\n\n # slices with scalar raise for overlapping intervals\n # TODO KeyError is the appropriate error?\n with pytest.raises(KeyError, match=msg):\n s.loc[1:4]\n\n def test_non_unique(self):\n\n idx = IntervalIndex.from_tuples([(1, 3), (3, 7)])\n s = Series(range(len(idx)), index=idx)\n\n result = s.loc[Interval(1, 3)]\n assert result == 0\n\n result = s.loc[[Interval(1, 3)]]\n expected = s.iloc[0:1]\n tm.assert_series_equal(expected, result)\n\n def test_non_unique_moar(self):\n\n idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)])\n s = Series(range(len(idx)), index=idx)\n\n expected = s.iloc[[0, 1]]\n result = s.loc[Interval(1, 3)]\n tm.assert_series_equal(expected, result)\n\n expected = s\n result = s.loc[Interval(1, 3) :]\n tm.assert_series_equal(expected, result)\n\n expected = s\n result = s[Interval(1, 3) :]\n tm.assert_series_equal(expected, result)\n\n expected = s.iloc[[0, 1]]\n result = s[[Interval(1, 3)]]\n tm.assert_series_equal(expected, result)\n",
"\"\"\"\nHistogram-related functions\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport contextlib\nimport functools\nimport operator\nimport warnings\n\nimport numpy as np\nfrom numpy.compat.py3k import basestring\nfrom numpy.core import overrides\n\n__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n# range is a keyword argument to many functions, so save the builtin so they can\n# use it.\n_range = range\n\n\ndef _ptp(x):\n \"\"\"Peak-to-peak value of x.\n\n This implementation avoids the problem of signed integer arrays having a\n peak-to-peak value that cannot be represented with the array's data type.\n This function returns an unsigned value for signed integer arrays.\n \"\"\"\n return _unsigned_subtract(x.max(), x.min())\n\n\ndef _hist_bin_sqrt(x, range):\n \"\"\"\n Square root histogram bin estimator.\n\n Bin width is inversely proportional to the data size. Used by many\n programs for its simplicity.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n return _ptp(x) / np.sqrt(x.size)\n\n\ndef _hist_bin_sturges(x, range):\n \"\"\"\n Sturges histogram bin estimator.\n\n A very simplistic estimator based on the assumption of normality of\n the data. This estimator has poor performance for non-normal data,\n which becomes especially obvious for large data sets. The estimate\n depends only on size of the data.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n return _ptp(x) / (np.log2(x.size) + 1.0)\n\n\ndef _hist_bin_rice(x, range):\n \"\"\"\n Rice histogram bin estimator.\n\n Another simple estimator with no normality assumption. It has better\n performance for large data than Sturges, but tends to overestimate\n the number of bins. The number of bins is proportional to the cube\n root of data size (asymptotically optimal). The estimate depends\n only on size of the data.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n return _ptp(x) / (2.0 * x.size ** (1.0 / 3))\n\n\ndef _hist_bin_scott(x, range):\n \"\"\"\n Scott histogram bin estimator.\n\n The binwidth is proportional to the standard deviation of the data\n and inversely proportional to the cube root of data size\n (asymptotically optimal).\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)\n\n\ndef _hist_bin_stone(x, range):\n \"\"\"\n Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).\n\n The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.\n The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.\n https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule\n\n This paper by Stone appears to be the origination of this rule.\n http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n range : (float, float)\n The lower and upper range of the bins.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n\n n = x.size\n ptp_x = _ptp(x)\n if n <= 1 or ptp_x == 0:\n return 0\n\n def jhat(nbins):\n hh = ptp_x / nbins\n p_k = np.histogram(x, bins=nbins, range=range)[0] / n\n return (2 - (n + 1) * p_k.dot(p_k)) / hh\n\n nbins_upper_bound = max(100, int(np.sqrt(n)))\n nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)\n if nbins == nbins_upper_bound:\n warnings.warn(\"The number of bins estimated may be suboptimal.\",\n RuntimeWarning, stacklevel=3)\n return ptp_x / nbins\n\n\ndef _hist_bin_doane(x, range):\n \"\"\"\n Doane's histogram bin estimator.\n\n Improved version of Sturges' formula which works better for\n non-normal data. See\n stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n if x.size > 2:\n sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))\n sigma = np.std(x)\n if sigma > 0.0:\n # These three operations add up to\n # g1 = np.mean(((x - np.mean(x)) / sigma)**3)\n # but use only one temp array instead of three\n temp = x - np.mean(x)\n np.true_divide(temp, sigma, temp)\n np.power(temp, 3, temp)\n g1 = np.mean(temp)\n return _ptp(x) / (1.0 + np.log2(x.size) +\n np.log2(1.0 + np.absolute(g1) / sg1))\n return 0.0\n\n\ndef _hist_bin_fd(x, range):\n \"\"\"\n The Freedman-Diaconis histogram bin estimator.\n\n The Freedman-Diaconis rule uses interquartile range (IQR) to\n estimate binwidth. It is considered a variation of the Scott rule\n with more robustness as the IQR is less affected by outliers than\n the standard deviation. However, the IQR depends on fewer points\n than the standard deviation, so it is less accurate, especially for\n long tailed distributions.\n\n If the IQR is 0, this function returns 1 for the number of bins.\n Binwidth is inversely proportional to the cube root of data size\n (asymptotically optimal).\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n del range # unused\n iqr = np.subtract(*np.percentile(x, [75, 25]))\n return 2.0 * iqr * x.size ** (-1.0 / 3.0)\n\n\ndef _hist_bin_auto(x, range):\n \"\"\"\n Histogram bin estimator that uses the minimum width of the\n Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero\n and the Sturges estimator if the FD bandwidth is 0.\n\n The FD estimator is usually the most robust method, but its width\n estimate tends to be too large for small `x` and bad for data with limited\n variance. The Sturges estimator is quite good for small (<1000) datasets\n and is the default in the R language. This method gives good off the shelf\n behaviour.\n\n .. versionchanged:: 1.15.0\n If there is limited variance the IQR can be 0, which results in the\n FD bin width being 0 too. This is not a valid bin width, so\n ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.\n If the IQR is 0, it's unlikely any variance based estimators will be of\n use, so we revert to the sturges estimator, which only uses the size of the\n dataset in its calculation.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n\n See Also\n --------\n _hist_bin_fd, _hist_bin_sturges\n \"\"\"\n fd_bw = _hist_bin_fd(x, range)\n sturges_bw = _hist_bin_sturges(x, range)\n del range # unused\n if fd_bw:\n return min(fd_bw, sturges_bw)\n else:\n # limited variance, so we return a len dependent bw estimator\n return sturges_bw\n\n# Private dict initialized at module load time\n_hist_bin_selectors = {'stone': _hist_bin_stone,\n 'auto': _hist_bin_auto,\n 'doane': _hist_bin_doane,\n 'fd': _hist_bin_fd,\n 'rice': _hist_bin_rice,\n 'scott': _hist_bin_scott,\n 'sqrt': _hist_bin_sqrt,\n 'sturges': _hist_bin_sturges}\n\n\ndef _ravel_and_check_weights(a, weights):\n \"\"\" Check a and weights have matching shapes, and ravel both \"\"\"\n a = np.asarray(a)\n\n # Ensure that the array is a \"subtractable\" dtype\n if a.dtype == np.bool_:\n warnings.warn(\"Converting input from {} to {} for compatibility.\"\n .format(a.dtype, np.uint8),\n RuntimeWarning, stacklevel=3)\n a = a.astype(np.uint8)\n\n if weights is not None:\n weights = np.asarray(weights)\n if weights.shape != a.shape:\n raise ValueError(\n 'weights should have the same shape as a.')\n weights = weights.ravel()\n a = a.ravel()\n return a, weights\n\n\ndef _get_outer_edges(a, range):\n \"\"\"\n Determine the outer bin edges to use, from either the data or the range\n argument\n \"\"\"\n if range is not None:\n first_edge, last_edge = range\n if first_edge > last_edge:\n raise ValueError(\n 'max must be larger than min in range parameter.')\n if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n raise ValueError(\n \"supplied range of [{}, {}] is not finite\".format(first_edge, last_edge))\n elif a.size == 0:\n # handle empty arrays. Can't determine range, so use 0-1.\n first_edge, last_edge = 0, 1\n else:\n first_edge, last_edge = a.min(), a.max()\n if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n raise ValueError(\n \"autodetected range of [{}, {}] is not finite\".format(first_edge, last_edge))\n\n # expand empty range to avoid divide by zero\n if first_edge == last_edge:\n first_edge = first_edge - 0.5\n last_edge = last_edge + 0.5\n\n return first_edge, last_edge\n\n\ndef _unsigned_subtract(a, b):\n \"\"\"\n Subtract two values where a >= b, and produce an unsigned result\n\n This is needed when finding the difference between the upper and lower\n bound of an int16 histogram\n \"\"\"\n # coerce to a single type\n signed_to_unsigned = {\n np.byte: np.ubyte,\n np.short: np.ushort,\n np.intc: np.uintc,\n np.int_: np.uint,\n np.longlong: np.ulonglong\n }\n dt = np.result_type(a, b)\n try:\n dt = signed_to_unsigned[dt.type]\n except KeyError:\n return np.subtract(a, b, dtype=dt)\n else:\n # we know the inputs are integers, and we are deliberately casting\n # signed to unsigned\n return np.subtract(a, b, casting='unsafe', dtype=dt)\n\n\ndef _get_bin_edges(a, bins, range, weights):\n \"\"\"\n Computes the bins used internally by `histogram`.\n\n Parameters\n ==========\n a : ndarray\n Ravelled data array\n bins, range\n Forwarded arguments from `histogram`.\n weights : ndarray, optional\n Ravelled weights array, or None\n\n Returns\n =======\n bin_edges : ndarray\n Array of bin edges\n uniform_bins : (Number, Number, int):\n The upper bound, lowerbound, and number of bins, used in the optimized\n implementation of `histogram` that works on uniform bins.\n \"\"\"\n # parse the overloaded bins argument\n n_equal_bins = None\n bin_edges = None\n\n if isinstance(bins, basestring):\n bin_name = bins\n # if `bins` is a string for an automatic method,\n # this will replace it with the number of bins calculated\n if bin_name not in _hist_bin_selectors:\n raise ValueError(\n \"{!r} is not a valid estimator for `bins`\".format(bin_name))\n if weights is not None:\n raise TypeError(\"Automated estimation of the number of \"\n \"bins is not supported for weighted data\")\n\n first_edge, last_edge = _get_outer_edges(a, range)\n\n # truncate the range if needed\n if range is not None:\n keep = (a >= first_edge)\n keep &= (a <= last_edge)\n if not np.logical_and.reduce(keep):\n a = a[keep]\n\n if a.size == 0:\n n_equal_bins = 1\n else:\n # Do not call selectors on empty arrays\n width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))\n if width:\n n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))\n else:\n # Width can be zero for some estimators, e.g. FD when\n # the IQR of the data is zero.\n n_equal_bins = 1\n\n elif np.ndim(bins) == 0:\n try:\n n_equal_bins = operator.index(bins)\n except TypeError:\n raise TypeError(\n '`bins` must be an integer, a string, or an array')\n if n_equal_bins < 1:\n raise ValueError('`bins` must be positive, when an integer')\n\n first_edge, last_edge = _get_outer_edges(a, range)\n\n elif np.ndim(bins) == 1:\n bin_edges = np.asarray(bins)\n if np.any(bin_edges[:-1] > bin_edges[1:]):\n raise ValueError(\n '`bins` must increase monotonically, when an array')\n\n else:\n raise ValueError('`bins` must be 1d, when an array')\n\n if n_equal_bins is not None:\n # gh-10322 means that type resolution rules are dependent on array\n # shapes. To avoid this causing problems, we pick a type now and stick\n # with it throughout.\n bin_type = np.result_type(first_edge, last_edge, a)\n if np.issubdtype(bin_type, np.integer):\n bin_type = np.result_type(bin_type, float)\n\n # bin edges must be computed\n bin_edges = np.linspace(\n first_edge, last_edge, n_equal_bins + 1,\n endpoint=True, dtype=bin_type)\n return bin_edges, (first_edge, last_edge, n_equal_bins)\n else:\n return bin_edges, None\n\n\ndef _search_sorted_inclusive(a, v):\n \"\"\"\n Like `searchsorted`, but where the last item in `v` is placed on the right.\n\n In the context of a histogram, this makes the last bin edge inclusive\n \"\"\"\n return np.concatenate((\n a.searchsorted(v[:-1], 'left'),\n a.searchsorted(v[-1:], 'right')\n ))\n\n\ndef _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):\n return (a, bins, weights)\n\n\n@array_function_dispatch(_histogram_bin_edges_dispatcher)\ndef histogram_bin_edges(a, bins=10, range=None, weights=None):\n r\"\"\"\n Function to calculate only the edges of the bins used by the `histogram`\n function.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str, optional\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines the bin edges, including the rightmost\n edge, allowing for non-uniform bin widths.\n\n If `bins` is a string from the list below, `histogram_bin_edges` will use\n the method chosen to calculate the optimal bin width and\n consequently the number of bins (see `Notes` for more detail on\n the estimators) from the data that falls within the requested\n range. While the bin width will be optimal for the actual data\n in the range, the number of bins will be computed to fill the\n entire range, including the empty portions. For visualisation,\n using the 'auto' option is suggested. Weighted data is not\n supported for automated bin size selection.\n\n 'auto'\n Maximum of the 'sturges' and 'fd' estimators. Provides good\n all around performance.\n\n 'fd' (Freedman Diaconis Estimator)\n Robust (resilient to outliers) estimator that takes into\n account data variability and data size.\n\n 'doane'\n An improved version of Sturges' estimator that works better\n with non-normal datasets.\n\n 'scott'\n Less robust estimator that that takes into account data\n variability and data size.\n\n 'stone'\n Estimator based on leave-one-out cross-validation estimate of\n the integrated squared error. Can be regarded as a generalization\n of Scott's rule.\n\n 'rice'\n Estimator does not take variability into account, only data\n size. Commonly overestimates number of bins required.\n\n 'sturges'\n R's default method, only accounts for data size. Only\n optimal for gaussian data and underestimates number of bins\n for large non-gaussian datasets.\n\n 'sqrt'\n Square root (of data size) estimator, used by Excel and\n other programs for its speed and simplicity.\n\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n\n weights : array_like, optional\n An array of weights, of the same shape as `a`. Each value in\n `a` only contributes its associated weight towards the bin count\n (instead of 1). This is currently not used by any of the bin estimators,\n but may be in the future.\n\n Returns\n -------\n bin_edges : array of dtype float\n The edges to pass into `histogram`\n\n See Also\n --------\n histogram\n\n Notes\n -----\n The methods to estimate the optimal number of bins are well founded\n in literature, and are inspired by the choices R provides for\n histogram visualisation. Note that having the number of bins\n proportional to :math:`n^{1/3}` is asymptotically optimal, which is\n why it appears in most estimators. These are simply plug-in methods\n that give good starting points for number of bins. In the equations\n below, :math:`h` is the binwidth and :math:`n_h` is the number of\n bins. All estimators that compute bin counts are recast to bin width\n using the `ptp` of the data. The final bin count is obtained from\n ``np.round(np.ceil(range / h))``.\n\n 'auto' (maximum of the 'sturges' and 'fd' estimators)\n A compromise to get a good value. For small datasets the Sturges\n value will usually be chosen, while larger datasets will usually\n default to FD. Avoids the overly conservative behaviour of FD\n and Sturges for small and large datasets respectively.\n Switchover point is usually :math:`a.size \\approx 1000`.\n\n 'fd' (Freedman Diaconis Estimator)\n .. math:: h = 2 \\frac{IQR}{n^{1/3}}\n\n The binwidth is proportional to the interquartile range (IQR)\n and inversely proportional to cube root of a.size. Can be too\n conservative for small datasets, but is quite good for large\n datasets. The IQR is very robust to outliers.\n\n 'scott'\n .. math:: h = \\sigma \\sqrt[3]{\\frac{24 * \\sqrt{\\pi}}{n}}\n\n The binwidth is proportional to the standard deviation of the\n data and inversely proportional to cube root of ``x.size``. Can\n be too conservative for small datasets, but is quite good for\n large datasets. The standard deviation is not very robust to\n outliers. Values are very similar to the Freedman-Diaconis\n estimator in the absence of outliers.\n\n 'rice'\n .. math:: n_h = 2n^{1/3}\n\n The number of bins is only proportional to cube root of\n ``a.size``. It tends to overestimate the number of bins and it\n does not take into account data variability.\n\n 'sturges'\n .. math:: n_h = \\log _{2}n+1\n\n The number of bins is the base 2 log of ``a.size``. This\n estimator assumes normality of data and is too conservative for\n larger, non-normal datasets. This is the default method in R's\n ``hist`` method.\n\n 'doane'\n .. math:: n_h = 1 + \\log_{2}(n) +\n \\log_{2}(1 + \\frac{|g_1|}{\\sigma_{g_1}})\n\n g_1 = mean[(\\frac{x - \\mu}{\\sigma})^3]\n\n \\sigma_{g_1} = \\sqrt{\\frac{6(n - 2)}{(n + 1)(n + 3)}}\n\n An improved version of Sturges' formula that produces better\n estimates for non-normal datasets. This estimator attempts to\n account for the skew of the data.\n\n 'sqrt'\n .. math:: n_h = \\sqrt n\n\n The simplest and fastest estimator. Only takes into account the\n data size.\n\n Examples\n --------\n >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])\n >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))\n array([0. , 0.25, 0.5 , 0.75, 1. ])\n >>> np.histogram_bin_edges(arr, bins=2)\n array([0. , 2.5, 5. ])\n\n For consistency with histogram, an array of pre-computed bins is\n passed through unmodified:\n\n >>> np.histogram_bin_edges(arr, [1, 2])\n array([1, 2])\n\n This function allows one set of bins to be computed, and reused across\n multiple histograms:\n\n >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')\n >>> shared_bins\n array([0., 1., 2., 3., 4., 5.])\n\n >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])\n >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)\n >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)\n\n >>> hist_0; hist_1\n array([1, 1, 0, 1, 0])\n array([2, 0, 1, 1, 2])\n\n Which gives more easily comparable results than using separate bins for\n each histogram:\n\n >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')\n >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')\n >>> hist_0; hist_1\n array([1, 1, 1])\n array([2, 1, 1, 2])\n >>> bins_0; bins_1\n array([0., 1., 2., 3.])\n array([0. , 1.25, 2.5 , 3.75, 5. ])\n\n \"\"\"\n a, weights = _ravel_and_check_weights(a, weights)\n bin_edges, _ = _get_bin_edges(a, bins, range, weights)\n return bin_edges\n\n\ndef _histogram_dispatcher(\n a, bins=None, range=None, normed=None, weights=None, density=None):\n return (a, bins, weights)\n\n\n@array_function_dispatch(_histogram_dispatcher)\ndef histogram(a, bins=10, range=None, normed=None, weights=None,\n density=None):\n r\"\"\"\n Compute the histogram of a set of data.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str, optional\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n\n .. versionadded:: 1.11.0\n\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n normed : bool, optional\n\n .. deprecated:: 1.6.0\n\n This is equivalent to the `density` argument, but produces incorrect\n results for unequal bin widths. It should not be used.\n\n .. versionchanged:: 1.15.0\n DeprecationWarnings are actually emitted.\n\n weights : array_like, optional\n An array of weights, of the same shape as `a`. Each value in\n `a` only contributes its associated weight towards the bin count\n (instead of 1). If `density` is True, the weights are\n normalized, so that the integral of the density over the range\n remains 1.\n density : bool, optional\n If ``False``, the result will contain the number of samples in\n each bin. If ``True``, the result is the value of the\n probability *density* function at the bin, normalized such that\n the *integral* over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability *mass* function.\n\n Overrides the ``normed`` keyword if given.\n\n Returns\n -------\n hist : array\n The values of the histogram. See `density` and `weights` for a\n description of the possible semantics.\n bin_edges : array of dtype float\n Return the bin edges ``(length(hist)+1)``.\n\n\n See Also\n --------\n histogramdd, bincount, searchsorted, digitize, histogram_bin_edges\n\n Notes\n -----\n All but the last (righthand-most) bin is half-open. In other words,\n if `bins` is::\n\n [1, 2, 3, 4]\n\n then the first bin is ``[1, 2)`` (including 1, but excluding 2) and\n the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which\n *includes* 4.\n\n\n Examples\n --------\n >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])\n (array([0, 2, 1]), array([0, 1, 2, 3]))\n >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)\n (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))\n >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])\n (array([1, 4, 1]), array([0, 1, 2, 3]))\n\n >>> a = np.arange(5)\n >>> hist, bin_edges = np.histogram(a, density=True)\n >>> hist\n array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])\n >>> hist.sum()\n 2.4999999999999996\n >>> np.sum(hist * np.diff(bin_edges))\n 1.0\n\n .. versionadded:: 1.11.0\n\n Automated Bin Selection Methods example, using 2 peak random data\n with 2000 points:\n\n >>> import matplotlib.pyplot as plt\n >>> rng = np.random.RandomState(10) # deterministic random data\n >>> a = np.hstack((rng.normal(size=1000),\n ... rng.normal(loc=5, scale=2, size=1000)))\n >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram\n >>> plt.title(\"Histogram with 'auto' bins\")\n Text(0.5, 1.0, \"Histogram with 'auto' bins\")\n >>> plt.show()\n\n \"\"\"\n a, weights = _ravel_and_check_weights(a, weights)\n\n bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)\n\n # Histogram is an integer or a float array depending on the weights.\n if weights is None:\n ntype = np.dtype(np.intp)\n else:\n ntype = weights.dtype\n\n # We set a block size, as this allows us to iterate over chunks when\n # computing histograms, to minimize memory usage.\n BLOCK = 65536\n\n # The fast path uses bincount, but that only works for certain types\n # of weight\n simple_weights = (\n weights is None or\n np.can_cast(weights.dtype, np.double) or\n np.can_cast(weights.dtype, complex)\n )\n\n if uniform_bins is not None and simple_weights:\n # Fast algorithm for equal bins\n # We now convert values of a to bin indices, under the assumption of\n # equal bin widths (which is valid here).\n first_edge, last_edge, n_equal_bins = uniform_bins\n\n # Initialize empty histogram\n n = np.zeros(n_equal_bins, ntype)\n\n # Pre-compute histogram scaling factor\n norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)\n\n # We iterate over blocks here for two reasons: the first is that for\n # large arrays, it is actually faster (for example for a 10^8 array it\n # is 2x as fast) and it results in a memory footprint 3x lower in the\n # limit of large arrays.\n for i in _range(0, len(a), BLOCK):\n tmp_a = a[i:i+BLOCK]\n if weights is None:\n tmp_w = None\n else:\n tmp_w = weights[i:i + BLOCK]\n\n # Only include values in the right range\n keep = (tmp_a >= first_edge)\n keep &= (tmp_a <= last_edge)\n if not np.logical_and.reduce(keep):\n tmp_a = tmp_a[keep]\n if tmp_w is not None:\n tmp_w = tmp_w[keep]\n\n # This cast ensures no type promotions occur below, which gh-10322\n # make unpredictable. Getting it wrong leads to precision errors\n # like gh-8123.\n tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)\n\n # Compute the bin indices, and for values that lie exactly on\n # last_edge we need to subtract one\n f_indices = _unsigned_subtract(tmp_a, first_edge) * norm\n indices = f_indices.astype(np.intp)\n indices[indices == n_equal_bins] -= 1\n\n # The index computation is not guaranteed to give exactly\n # consistent results within ~1 ULP of the bin edges.\n decrement = tmp_a < bin_edges[indices]\n indices[decrement] -= 1\n # The last bin includes the right edge. The other bins do not.\n increment = ((tmp_a >= bin_edges[indices + 1])\n & (indices != n_equal_bins - 1))\n indices[increment] += 1\n\n # We now compute the histogram using bincount\n if ntype.kind == 'c':\n n.real += np.bincount(indices, weights=tmp_w.real,\n minlength=n_equal_bins)\n n.imag += np.bincount(indices, weights=tmp_w.imag,\n minlength=n_equal_bins)\n else:\n n += np.bincount(indices, weights=tmp_w,\n minlength=n_equal_bins).astype(ntype)\n else:\n # Compute via cumulative histogram\n cum_n = np.zeros(bin_edges.shape, ntype)\n if weights is None:\n for i in _range(0, len(a), BLOCK):\n sa = np.sort(a[i:i+BLOCK])\n cum_n += _search_sorted_inclusive(sa, bin_edges)\n else:\n zero = np.zeros(1, dtype=ntype)\n for i in _range(0, len(a), BLOCK):\n tmp_a = a[i:i+BLOCK]\n tmp_w = weights[i:i+BLOCK]\n sorting_index = np.argsort(tmp_a)\n sa = tmp_a[sorting_index]\n sw = tmp_w[sorting_index]\n cw = np.concatenate((zero, sw.cumsum()))\n bin_index = _search_sorted_inclusive(sa, bin_edges)\n cum_n += cw[bin_index]\n\n n = np.diff(cum_n)\n\n # density overrides the normed keyword\n if density is not None:\n if normed is not None:\n # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)\n warnings.warn(\n \"The normed argument is ignored when density is provided. \"\n \"In future passing both will result in an error.\",\n DeprecationWarning, stacklevel=3)\n normed = None\n\n if density:\n db = np.array(np.diff(bin_edges), float)\n return n/db/n.sum(), bin_edges\n elif normed:\n # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)\n warnings.warn(\n \"Passing `normed=True` on non-uniform bins has always been \"\n \"broken, and computes neither the probability density \"\n \"function nor the probability mass function. \"\n \"The result is only correct if the bins are uniform, when \"\n \"density=True will produce the same result anyway. \"\n \"The argument will be removed in a future version of \"\n \"numpy.\",\n np.VisibleDeprecationWarning, stacklevel=3)\n\n # this normalization is incorrect, but\n db = np.array(np.diff(bin_edges), float)\n return n/(n*db).sum(), bin_edges\n else:\n if normed is not None:\n # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)\n warnings.warn(\n \"Passing normed=False is deprecated, and has no effect. \"\n \"Consider passing the density argument instead.\",\n DeprecationWarning, stacklevel=3)\n return n, bin_edges\n\n\ndef _histogramdd_dispatcher(sample, bins=None, range=None, normed=None,\n weights=None, density=None):\n if hasattr(sample, 'shape'): # same condition as used in histogramdd\n yield sample\n else:\n yield from sample\n with contextlib.suppress(TypeError):\n yield from bins\n yield weights\n\n\n@array_function_dispatch(_histogramdd_dispatcher)\ndef histogramdd(sample, bins=10, range=None, normed=None, weights=None,\n density=None):\n \"\"\"\n Compute the multidimensional histogram of some data.\n\n Parameters\n ----------\n sample : (N, D) array, or (D, N) array_like\n The data to be histogrammed.\n\n Note the unusual interpretation of sample when an array_like:\n\n * When an array, each row is a coordinate in a D-dimensional space -\n such as ``histogramgramdd(np.array([p1, p2, p3]))``.\n * When an array_like, each element is the list of values for single\n coordinate - such as ``histogramgramdd((X, Y, Z))``.\n\n The first form should be preferred.\n\n bins : sequence or int, optional\n The bin specification:\n\n * A sequence of arrays describing the monotonically increasing bin\n edges along each dimension.\n * The number of bins for each dimension (nx, ny, ... =bins)\n * The number of bins for all dimensions (nx=ny=...=bins).\n\n range : sequence, optional\n A sequence of length D, each an optional (lower, upper) tuple giving\n the outer bin edges to be used if the edges are not given explicitly in\n `bins`.\n An entry of None in the sequence results in the minimum and maximum\n values being used for the corresponding dimension.\n The default, None, is equivalent to passing a tuple of D None values.\n density : bool, optional\n If False, the default, returns the number of samples in each bin.\n If True, returns the probability *density* function at the bin,\n ``bin_count / sample_count / bin_volume``.\n normed : bool, optional\n An alias for the density argument that behaves identically. To avoid\n confusion with the broken normed argument to `histogram`, `density`\n should be preferred.\n weights : (N,) array_like, optional\n An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.\n Weights are normalized to 1 if normed is True. If normed is False,\n the values of the returned histogram are equal to the sum of the\n weights belonging to the samples falling into each bin.\n\n Returns\n -------\n H : ndarray\n The multidimensional histogram of sample x. See normed and weights\n for the different possible semantics.\n edges : list\n A list of D arrays describing the bin edges for each dimension.\n\n See Also\n --------\n histogram: 1-D histogram\n histogram2d: 2-D histogram\n\n Examples\n --------\n >>> r = np.random.randn(100,3)\n >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))\n >>> H.shape, edges[0].size, edges[1].size, edges[2].size\n ((5, 8, 4), 6, 9, 5)\n\n \"\"\"\n\n try:\n # Sample is an ND-array.\n N, D = sample.shape\n except (AttributeError, ValueError):\n # Sample is a sequence of 1D arrays.\n sample = np.atleast_2d(sample).T\n N, D = sample.shape\n\n nbin = np.empty(D, int)\n edges = D*[None]\n dedges = D*[None]\n if weights is not None:\n weights = np.asarray(weights)\n\n try:\n M = len(bins)\n if M != D:\n raise ValueError(\n 'The dimension of bins must be equal to the dimension of the '\n ' sample x.')\n except TypeError:\n # bins is an integer\n bins = D*[bins]\n\n # normalize the range argument\n if range is None:\n range = (None,) * D\n elif len(range) != D:\n raise ValueError('range argument must have one entry per dimension')\n\n # Create edge arrays\n for i in _range(D):\n if np.ndim(bins[i]) == 0:\n if bins[i] < 1:\n raise ValueError(\n '`bins[{}]` must be positive, when an integer'.format(i))\n smin, smax = _get_outer_edges(sample[:,i], range[i])\n edges[i] = np.linspace(smin, smax, bins[i] + 1)\n elif np.ndim(bins[i]) == 1:\n edges[i] = np.asarray(bins[i])\n if np.any(edges[i][:-1] > edges[i][1:]):\n raise ValueError(\n '`bins[{}]` must be monotonically increasing, when an array'\n .format(i))\n else:\n raise ValueError(\n '`bins[{}]` must be a scalar or 1d array'.format(i))\n\n nbin[i] = len(edges[i]) + 1 # includes an outlier on each end\n dedges[i] = np.diff(edges[i])\n\n # Compute the bin number each sample falls into.\n Ncount = tuple(\n # avoid np.digitize to work around gh-11022\n np.searchsorted(edges[i], sample[:, i], side='right')\n for i in _range(D)\n )\n\n # Using digitize, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right edge to be\n # counted in the last bin, and not as an outlier.\n for i in _range(D):\n # Find which points are on the rightmost edge.\n on_edge = (sample[:, i] == edges[i][-1])\n # Shift these points one bin to the left.\n Ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened histogram matrix.\n # This raises an error if the array is too large.\n xy = np.ravel_multi_index(Ncount, nbin)\n\n # Compute the number of repetitions in xy and assign it to the\n # flattened histmat.\n hist = np.bincount(xy, weights, minlength=nbin.prod())\n\n # Shape into a proper matrix\n hist = hist.reshape(nbin)\n\n # This preserves the (bad) behavior observed in gh-7845, for now.\n hist = hist.astype(float, casting='safe')\n\n # Remove outliers (indices 0 and -1 for each dimension).\n core = D*(slice(1, -1),)\n hist = hist[core]\n\n # handle the aliasing normed argument\n if normed is None:\n if density is None:\n density = False\n elif density is None:\n # an explicit normed argument was passed, alias it to the new name\n density = normed\n else:\n raise TypeError(\"Cannot specify both 'normed' and 'density'\")\n\n if density:\n # calculate the probability density function\n s = hist.sum()\n for i in _range(D):\n shape = np.ones(D, int)\n shape[i] = nbin[i] - 2\n hist = hist / dedges[i].reshape(shape)\n hist /= s\n\n if (hist.shape != nbin - 2).any():\n raise RuntimeError(\n \"Internal Shape Error\")\n return hist, edges\n",
"from datetime import datetime, timedelta\nfrom typing import Any\n\nimport numpy as np\n\nfrom pandas._libs import index as libindex\nfrom pandas._libs.lib import no_default\nfrom pandas._libs.tslibs import BaseOffset, Period, Resolution, Tick\nfrom pandas._libs.tslibs.parsing import DateParseError, parse_time_string\nfrom pandas._typing import DtypeObj, Label\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import Appender, cache_readonly, doc\n\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_dtype_equal,\n is_float,\n is_integer,\n is_object_dtype,\n is_scalar,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import PeriodDtype\n\nfrom pandas.core.arrays.period import (\n PeriodArray,\n period_array,\n raise_on_incompatible,\n validate_dtype_freq,\n)\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n _index_shared_docs,\n ensure_index,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\nfrom pandas.core.indexes.datetimes import DatetimeIndex, Index\nfrom pandas.core.indexes.extension import inherit_names\nfrom pandas.core.indexes.numeric import Int64Index\nfrom pandas.core.ops import get_op_result_name\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(dict(target_klass=\"PeriodIndex or list of Periods\"))\n\n# --- Period index sketch\n\n\ndef _new_PeriodIndex(cls, **d):\n # GH13277 for unpickling\n values = d.pop(\"data\")\n if values.dtype == \"int64\":\n freq = d.pop(\"freq\", None)\n values = PeriodArray(values, freq=freq)\n return cls._simple_new(values, **d)\n else:\n return cls(values, **d)\n\n\n@inherit_names(\n [\"strftime\", \"to_timestamp\", \"start_time\", \"end_time\"] + PeriodArray._field_ops,\n PeriodArray,\n wrap=True,\n)\n@inherit_names([\"is_leap_year\", \"_format_native_types\"], PeriodArray)\nclass PeriodIndex(DatetimeIndexOpsMixin, Int64Index):\n \"\"\"\n Immutable ndarray holding ordinal values indicating regular periods in time.\n\n Index keys are boxed to Period objects which carries the metadata (eg,\n frequency information).\n\n Parameters\n ----------\n data : array-like (1d int np.ndarray or PeriodArray), optional\n Optional period-like data to construct index with.\n copy : bool\n Make a copy of input ndarray.\n freq : str or period object, optional\n One of pandas period strings or corresponding objects.\n year : int, array, or Series, default None\n month : int, array, or Series, default None\n quarter : int, array, or Series, default None\n day : int, array, or Series, default None\n hour : int, array, or Series, default None\n minute : int, array, or Series, default None\n second : int, array, or Series, default None\n tz : object, default None\n Timezone for converting datetime64 data to Periods.\n dtype : str or PeriodDtype, default None\n\n Attributes\n ----------\n day\n dayofweek\n dayofyear\n days_in_month\n daysinmonth\n end_time\n freq\n freqstr\n hour\n is_leap_year\n minute\n month\n quarter\n qyear\n second\n start_time\n week\n weekday\n weekofyear\n year\n\n Methods\n -------\n asfreq\n strftime\n to_timestamp\n\n See Also\n --------\n Index : The base pandas Index type.\n Period : Represents a period of time.\n DatetimeIndex : Index with datetime64 data.\n TimedeltaIndex : Index of timedelta64 data.\n period_range : Create a fixed-frequency PeriodIndex.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])\n >>> idx\n PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]', freq='Q-DEC')\n \"\"\"\n\n _typ = \"periodindex\"\n _attributes = [\"name\", \"freq\"]\n\n # define my properties & methods for delegation\n _is_numeric_dtype = False\n\n _data: PeriodArray\n freq: BaseOffset\n\n _engine_type = libindex.PeriodEngine\n _supports_partial_string_indexing = True\n\n # --------------------------------------------------------------------\n # methods that dispatch to array and wrap result in PeriodIndex\n\n @doc(PeriodArray.asfreq)\n def asfreq(self, freq=None, how: str = \"E\") -> \"PeriodIndex\":\n arr = self._data.asfreq(freq, how)\n return type(self)._simple_new(arr, name=self.name)\n\n # ------------------------------------------------------------------------\n # Index Constructors\n\n def __new__(\n cls,\n data=None,\n ordinal=None,\n freq=None,\n tz=None,\n dtype=None,\n copy=False,\n name=None,\n **fields,\n ):\n\n valid_field_set = {\n \"year\",\n \"month\",\n \"day\",\n \"quarter\",\n \"hour\",\n \"minute\",\n \"second\",\n }\n\n if not set(fields).issubset(valid_field_set):\n argument = list(set(fields) - valid_field_set)[0]\n raise TypeError(f\"__new__() got an unexpected keyword argument {argument}\")\n\n name = maybe_extract_name(name, data, cls)\n\n if data is None and ordinal is None:\n # range-based.\n data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)\n # PeriodArray._generate range does validation that fields is\n # empty when really using the range-based constructor.\n freq = freq2\n\n data = PeriodArray(data, freq=freq)\n else:\n freq = validate_dtype_freq(dtype, freq)\n\n # PeriodIndex allow PeriodIndex(period_index, freq=different)\n # Let's not encourage that kind of behavior in PeriodArray.\n\n if freq and isinstance(data, cls) and data.freq != freq:\n # TODO: We can do some of these with no-copy / coercion?\n # e.g. D -> 2D seems to be OK\n data = data.asfreq(freq)\n\n if data is None and ordinal is not None:\n # we strangely ignore `ordinal` if data is passed.\n ordinal = np.asarray(ordinal, dtype=np.int64)\n data = PeriodArray(ordinal, freq)\n else:\n # don't pass copy here, since we copy later.\n data = period_array(data=data, freq=freq)\n\n if copy:\n data = data.copy()\n\n return cls._simple_new(data, name=name)\n\n @classmethod\n def _simple_new(cls, values: PeriodArray, name: Label = None):\n \"\"\"\n Create a new PeriodIndex.\n\n Parameters\n ----------\n values : PeriodArray\n Values that can be converted to a PeriodArray without inference\n or coercion.\n \"\"\"\n assert isinstance(values, PeriodArray), type(values)\n\n result = object.__new__(cls)\n result._data = values\n # For groupby perf. See note in indexes/base about _index_data\n result._index_data = values._data\n result.name = name\n result._cache = {}\n result._reset_identity()\n return result\n\n # ------------------------------------------------------------------------\n # Data\n\n @property\n def values(self):\n return np.asarray(self)\n\n @property\n def _has_complex_internals(self):\n # used to avoid libreduction code paths, which raise or require conversion\n return True\n\n def _shallow_copy(self, values=None, name: Label = no_default):\n name = name if name is not no_default else self.name\n cache = self._cache.copy() if values is None else {}\n if values is None:\n values = self._data\n\n result = self._simple_new(values, name=name)\n result._cache = cache\n return result\n\n def _maybe_convert_timedelta(self, other):\n \"\"\"\n Convert timedelta-like input to an integer multiple of self.freq\n\n Parameters\n ----------\n other : timedelta, np.timedelta64, DateOffset, int, np.ndarray\n\n Returns\n -------\n converted : int, np.ndarray[int64]\n\n Raises\n ------\n IncompatibleFrequency : if the input cannot be written as a multiple\n of self.freq. Note IncompatibleFrequency subclasses ValueError.\n \"\"\"\n if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):\n if isinstance(self.freq, Tick):\n # _check_timedeltalike_freq_compat will raise if incompatible\n delta = self._data._check_timedeltalike_freq_compat(other)\n return delta\n elif isinstance(other, BaseOffset):\n if other.base == self.freq.base:\n return other.n\n\n raise raise_on_incompatible(self, other)\n elif is_integer(other):\n # integer is passed to .shift via\n # _add_datetimelike_methods basically\n # but ufunc may pass integer to _add_delta\n return other\n\n # raise when input doesn't have freq\n raise raise_on_incompatible(self, None)\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n \"\"\"\n Can we compare values of the given dtype to our own?\n \"\"\"\n if not isinstance(dtype, PeriodDtype):\n return False\n return dtype.freq == self.freq\n\n # ------------------------------------------------------------------------\n # Rendering Methods\n\n def _mpl_repr(self):\n # how to represent ourselves to matplotlib\n return self.astype(object)._values\n\n @property\n def _formatter_func(self):\n return self.array._formatter(boxed=False)\n\n # ------------------------------------------------------------------------\n # Indexing\n\n @doc(Index.__contains__)\n def __contains__(self, key: Any) -> bool:\n if isinstance(key, Period):\n if key.freq != self.freq:\n return False\n else:\n return key.ordinal in self._engine\n else:\n hash(key)\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n @cache_readonly\n def _int64index(self) -> Int64Index:\n return Int64Index._simple_new(self.asi8, name=self.name)\n\n # ------------------------------------------------------------------------\n # Index Methods\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc. Needs additional handling as\n PeriodIndex stores internal data as int dtype\n\n Replace this to __numpy_ufunc__ in future version\n \"\"\"\n if isinstance(context, tuple) and len(context) > 0:\n func = context[0]\n if func is np.add:\n pass\n elif func is np.subtract:\n name = self.name\n left = context[1][0]\n right = context[1][1]\n if isinstance(left, PeriodIndex) and isinstance(right, PeriodIndex):\n name = left.name if left.name == right.name else None\n return Index(result, name=name)\n elif isinstance(left, Period) or isinstance(right, Period):\n return Index(result, name=name)\n elif isinstance(func, np.ufunc):\n if \"M->M\" not in func.types:\n msg = f\"ufunc '{func.__name__}' not supported for the PeriodIndex\"\n # This should be TypeError, but TypeError cannot be raised\n # from here because numpy catches.\n raise ValueError(msg)\n\n if is_bool_dtype(result):\n return result\n # the result is object dtype array of Period\n # cannot pass _simple_new as it is\n return type(self)(result, freq=self.freq, name=self.name)\n\n def asof_locs(self, where, mask: np.ndarray) -> np.ndarray:\n \"\"\"\n where : array of timestamps\n mask : array of booleans where data is not NA\n \"\"\"\n where_idx = where\n if isinstance(where_idx, DatetimeIndex):\n where_idx = PeriodIndex(where_idx._values, freq=self.freq)\n elif not isinstance(where_idx, PeriodIndex):\n raise TypeError(\"asof_locs `where` must be DatetimeIndex or PeriodIndex\")\n elif where_idx.freq != self.freq:\n raise raise_on_incompatible(self, where_idx)\n\n locs = self.asi8[mask].searchsorted(where_idx.asi8, side=\"right\")\n\n locs = np.where(locs > 0, locs - 1, 0)\n result = np.arange(len(self))[mask].take(locs)\n\n first = mask.argmax()\n result[(locs == 0) & (where_idx.asi8 < self.asi8[first])] = -1\n\n return result\n\n @doc(Index.astype)\n def astype(self, dtype, copy=True, how=\"start\"):\n dtype = pandas_dtype(dtype)\n\n if is_datetime64_any_dtype(dtype):\n # 'how' is index-specific, isn't part of the EA interface.\n tz = getattr(dtype, \"tz\", None)\n return self.to_timestamp(how=how).tz_localize(tz)\n\n # TODO: should probably raise on `how` here, so we don't ignore it.\n return super().astype(dtype, copy=copy)\n\n @property\n def is_full(self) -> bool:\n \"\"\"\n Returns True if this PeriodIndex is range-like in that all Periods\n between start and end are present, in order.\n \"\"\"\n if len(self) == 0:\n return True\n if not self.is_monotonic:\n raise ValueError(\"Index is not monotonic\")\n values = self.asi8\n return ((values[1:] - values[:-1]) < 2).all()\n\n @property\n def inferred_type(self) -> str:\n # b/c data is represented as ints make sure we can't have ambiguous\n # indexing\n return \"period\"\n\n @Appender(_index_shared_docs[\"get_indexer\"] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n target = ensure_index(target)\n\n if isinstance(target, PeriodIndex):\n if target.freq != self.freq:\n # No matches\n no_matches = -1 * np.ones(self.shape, dtype=np.intp)\n return no_matches\n\n target = target.asi8\n self_index = self._int64index\n else:\n self_index = self\n\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, target)\n if self_index is not self:\n # convert tolerance to i8\n tolerance = self._maybe_convert_timedelta(tolerance)\n\n return Index.get_indexer(self_index, target, method, limit, tolerance)\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n target = ensure_index(target)\n\n if not self._is_comparable_dtype(target.dtype):\n no_matches = -1 * np.ones(self.shape, dtype=np.intp)\n return no_matches, no_matches\n\n target = target.asi8\n\n indexer, missing = self._int64index.get_indexer_non_unique(target)\n return ensure_platform_int(indexer), missing\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label.\n\n Parameters\n ----------\n key : Period, NaT, str, or datetime\n String or datetime key must be parsable as Period.\n\n Returns\n -------\n loc : int or ndarray[int64]\n\n Raises\n ------\n KeyError\n Key is not present in the index.\n TypeError\n If key is listlike or otherwise not hashable.\n \"\"\"\n orig_key = key\n\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n if isinstance(key, str):\n\n try:\n loc = self._get_string_slice(key)\n return loc\n except (TypeError, ValueError):\n pass\n\n try:\n asdt, reso = parse_time_string(key, self.freq)\n except DateParseError as err:\n # A string with invalid format\n raise KeyError(f\"Cannot interpret '{key}' as period\") from err\n\n reso = Resolution.from_attrname(reso)\n grp = reso.freq_group\n freqn = self.dtype.freq_group\n\n # _get_string_slice will handle cases where grp < freqn\n assert grp >= freqn\n\n # BusinessDay is a bit strange. It has a *lower* code, but we never parse\n # a string as \"BusinessDay\" resolution, just Day.\n if grp == freqn or (\n reso == Resolution.RESO_DAY and self.dtype.freq.name == \"B\"\n ):\n key = Period(asdt, freq=self.freq)\n loc = self.get_loc(key, method=method, tolerance=tolerance)\n return loc\n elif method is None:\n raise KeyError(key)\n else:\n key = asdt\n\n elif is_integer(key):\n # Period constructor will cast to string, which we dont want\n raise KeyError(key)\n\n try:\n key = Period(key, freq=self.freq)\n except ValueError as err:\n # we cannot construct the Period\n raise KeyError(orig_key) from err\n\n try:\n return Index.get_loc(self, key, method, tolerance)\n except KeyError as err:\n raise KeyError(orig_key) from err\n\n def _maybe_cast_slice_bound(self, label, side: str, kind: str):\n \"\"\"\n If label is a string or a datetime, cast it to Period.ordinal according\n to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'}\n\n Returns\n -------\n bound : Period or object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n \"\"\"\n assert kind in [\"loc\", \"getitem\"]\n\n if isinstance(label, datetime):\n return Period(label, freq=self.freq)\n elif isinstance(label, str):\n try:\n parsed, reso = parse_time_string(label, self.freq)\n reso = Resolution.from_attrname(reso)\n bounds = self._parsed_string_to_bounds(reso, parsed)\n return bounds[0 if side == \"left\" else 1]\n except ValueError as err:\n # string cannot be parsed as datetime-like\n # TODO: we need tests for this case\n raise KeyError(label) from err\n elif is_integer(label) or is_float(label):\n self._invalid_indexer(\"slice\", label)\n\n return label\n\n def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):\n grp = reso.freq_group\n iv = Period(parsed, freq=grp)\n return (iv.asfreq(self.freq, how=\"start\"), iv.asfreq(self.freq, how=\"end\"))\n\n def _validate_partial_date_slice(self, reso: Resolution):\n assert isinstance(reso, Resolution), (type(reso), reso)\n grp = reso.freq_group\n freqn = self.dtype.freq_group\n\n if not grp < freqn:\n # TODO: we used to also check for\n # reso in [\"day\", \"hour\", \"minute\", \"second\"]\n # why is that check not needed?\n raise ValueError\n\n def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):\n # TODO: Check for non-True use_lhs/use_rhs\n parsed, reso = parse_time_string(key, self.freq)\n reso = Resolution.from_attrname(reso)\n try:\n return self._partial_date_slice(reso, parsed, use_lhs, use_rhs)\n except KeyError as err:\n raise KeyError(key) from err\n\n def insert(self, loc, item):\n if not isinstance(item, Period) or self.freq != item.freq:\n return self.astype(object).insert(loc, item)\n\n i8result = np.concatenate(\n (self[:loc].asi8, np.array([item.ordinal]), self[loc:].asi8)\n )\n arr = type(self._data)._simple_new(i8result, dtype=self.dtype)\n return type(self)._simple_new(arr, name=self.name)\n\n def join(self, other, how=\"left\", level=None, return_indexers=False, sort=False):\n \"\"\"\n See Index.join\n \"\"\"\n self._assert_can_do_setop(other)\n\n if not isinstance(other, PeriodIndex):\n return self.astype(object).join(\n other, how=how, level=level, return_indexers=return_indexers, sort=sort\n )\n\n # _assert_can_do_setop ensures we have matching dtype\n result = Int64Index.join(\n self,\n other,\n how=how,\n level=level,\n return_indexers=return_indexers,\n sort=sort,\n )\n return result\n\n # ------------------------------------------------------------------------\n # Set Operation Methods\n\n def _assert_can_do_setop(self, other):\n super()._assert_can_do_setop(other)\n\n # *Can't* use PeriodIndexes of different freqs\n # *Can* use PeriodIndex/DatetimeIndex\n if isinstance(other, PeriodIndex) and self.freq != other.freq:\n raise raise_on_incompatible(self, other)\n\n def _setop(self, other, sort, opname: str):\n \"\"\"\n Perform a set operation by dispatching to the Int64Index implementation.\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n res_name = get_op_result_name(self, other)\n other = ensure_index(other)\n\n i8self = Int64Index._simple_new(self.asi8)\n i8other = Int64Index._simple_new(other.asi8)\n i8result = getattr(i8self, opname)(i8other, sort=sort)\n\n parr = type(self._data)(np.asarray(i8result, dtype=np.int64), dtype=self.dtype)\n result = type(self)._simple_new(parr, name=res_name)\n return result\n\n def intersection(self, other, sort=False):\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other = ensure_index(other)\n\n if self.equals(other):\n return self._get_reconciled_name_object(other)\n\n elif is_object_dtype(other.dtype):\n return self.astype(\"O\").intersection(other, sort=sort)\n\n elif not is_dtype_equal(self.dtype, other.dtype):\n # We can infer that the intersection is empty.\n # assert_can_do_setop ensures that this is not just a mismatched freq\n this = self[:0].astype(\"O\")\n other = other[:0].astype(\"O\")\n return this.intersection(other, sort=sort)\n\n return self._setop(other, sort, opname=\"intersection\")\n\n def difference(self, other, sort=None):\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other = ensure_index(other)\n\n if self.equals(other):\n # pass an empty PeriodArray with the appropriate dtype\n return type(self)._simple_new(self._data[:0], name=self.name)\n\n if is_object_dtype(other):\n return self.astype(object).difference(other).astype(self.dtype)\n\n elif not is_dtype_equal(self.dtype, other.dtype):\n return self\n\n return self._setop(other, sort, opname=\"difference\")\n\n def _union(self, other, sort):\n if not len(other) or self.equals(other) or not len(self):\n return super()._union(other, sort=sort)\n\n # We are called by `union`, which is responsible for this validation\n assert isinstance(other, type(self))\n\n if not is_dtype_equal(self.dtype, other.dtype):\n this = self.astype(\"O\")\n other = other.astype(\"O\")\n return this._union(other, sort=sort)\n\n return self._setop(other, sort, opname=\"_union\")\n\n # ------------------------------------------------------------------------\n\n def memory_usage(self, deep=False):\n result = super().memory_usage(deep=deep)\n if hasattr(self, \"_cache\") and \"_int64index\" in self._cache:\n result += self._int64index.memory_usage(deep=deep)\n return result\n\n\nPeriodIndex._add_numeric_methods_disabled()\nPeriodIndex._add_logical_methods_disabled()\n\n\ndef period_range(\n start=None, end=None, periods=None, freq=None, name=None\n) -> PeriodIndex:\n \"\"\"\n Return a fixed frequency PeriodIndex.\n\n The day (calendar) is the default frequency.\n\n Parameters\n ----------\n start : str or period-like, default None\n Left bound for generating periods.\n end : str or period-like, default None\n Right bound for generating periods.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, optional\n Frequency alias. By default the freq is taken from `start` or `end`\n if those are Period objects. Otherwise, the default is ``\"D\"`` for\n daily frequency.\n name : str, default None\n Name of the resulting PeriodIndex.\n\n Returns\n -------\n PeriodIndex\n\n Notes\n -----\n Of the three parameters: ``start``, ``end``, and ``periods``, exactly two\n must be specified.\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')\n PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',\n '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',\n '2018-01'],\n dtype='period[M]', freq='M')\n\n If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor\n endpoints for a ``PeriodIndex`` with frequency matching that of the\n ``period_range`` constructor.\n\n >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),\n ... end=pd.Period('2017Q2', freq='Q'), freq='M')\n PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],\n dtype='period[M]', freq='M')\n \"\"\"\n if com.count_not_none(start, end, periods) != 2:\n raise ValueError(\n \"Of the three parameters: start, end, and periods, \"\n \"exactly two must be specified\"\n )\n if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):\n freq = \"D\"\n\n data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})\n data = PeriodArray(data, freq=freq)\n return PeriodIndex(data, name=name)\n",
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame, Period, Series, period_range\nimport pandas._testing as tm\nfrom pandas.core.arrays import PeriodArray\n\n\nclass TestSeriesPeriod:\n def setup_method(self, method):\n self.series = Series(period_range(\"2000-01-01\", periods=10, freq=\"D\"))\n\n def test_auto_conversion(self):\n series = Series(list(period_range(\"2000-01-01\", periods=10, freq=\"D\")))\n assert series.dtype == \"Period[D]\"\n\n series = pd.Series(\n [pd.Period(\"2011-01-01\", freq=\"D\"), pd.Period(\"2011-02-01\", freq=\"D\")]\n )\n assert series.dtype == \"Period[D]\"\n\n def test_getitem(self):\n assert self.series[1] == pd.Period(\"2000-01-02\", freq=\"D\")\n\n result = self.series[[2, 4]]\n exp = pd.Series(\n [pd.Period(\"2000-01-03\", freq=\"D\"), pd.Period(\"2000-01-05\", freq=\"D\")],\n index=[2, 4],\n dtype=\"Period[D]\",\n )\n tm.assert_series_equal(result, exp)\n assert result.dtype == \"Period[D]\"\n\n def test_isna(self):\n # GH 13737\n s = Series([pd.Period(\"2011-01\", freq=\"M\"), pd.Period(\"NaT\", freq=\"M\")])\n tm.assert_series_equal(s.isna(), Series([False, True]))\n tm.assert_series_equal(s.notna(), Series([True, False]))\n\n def test_fillna(self):\n # GH 13737\n s = Series([pd.Period(\"2011-01\", freq=\"M\"), pd.Period(\"NaT\", freq=\"M\")])\n\n res = s.fillna(pd.Period(\"2012-01\", freq=\"M\"))\n exp = Series([pd.Period(\"2011-01\", freq=\"M\"), pd.Period(\"2012-01\", freq=\"M\")])\n tm.assert_series_equal(res, exp)\n assert res.dtype == \"Period[M]\"\n\n def test_dropna(self):\n # GH 13737\n s = Series([pd.Period(\"2011-01\", freq=\"M\"), pd.Period(\"NaT\", freq=\"M\")])\n tm.assert_series_equal(s.dropna(), Series([pd.Period(\"2011-01\", freq=\"M\")]))\n\n def test_between(self):\n left, right = self.series[[2, 7]]\n result = self.series.between(left, right)\n expected = (self.series >= left) & (self.series <= right)\n tm.assert_series_equal(result, expected)\n\n # ---------------------------------------------------------------------\n # NaT support\n\n @pytest.mark.xfail(reason=\"PeriodDtype Series not supported yet\")\n def test_NaT_scalar(self):\n series = Series([0, 1000, 2000, pd._libs.iNaT], dtype=\"period[D]\")\n\n val = series[3]\n assert pd.isna(val)\n\n series[2] = val\n assert pd.isna(series[2])\n\n def test_NaT_cast(self):\n result = Series([np.nan]).astype(\"period[D]\")\n expected = Series([pd.NaT], dtype=\"period[D]\")\n tm.assert_series_equal(result, expected)\n\n def test_set_none(self):\n self.series[3] = None\n assert self.series[3] is pd.NaT\n\n self.series[3:5] = None\n assert self.series[4] is pd.NaT\n\n def test_set_nan(self):\n # Do we want to allow this?\n self.series[5] = np.nan\n assert self.series[5] is pd.NaT\n\n self.series[5:7] = np.nan\n assert self.series[6] is pd.NaT\n\n def test_intercept_astype_object(self):\n expected = self.series.astype(\"object\")\n\n df = DataFrame({\"a\": self.series, \"b\": np.random.randn(len(self.series))})\n\n result = df.values.squeeze()\n assert (result[:, 0] == expected.values).all()\n\n df = DataFrame({\"a\": self.series, \"b\": [\"foo\"] * len(self.series)})\n\n result = df.values.squeeze()\n assert (result[:, 0] == expected.values).all()\n\n def test_align_series(self, join_type):\n rng = period_range(\"1/1/2000\", \"1/1/2010\", freq=\"A\")\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n ts.align(ts[::2], join=join_type)\n\n def test_truncate(self):\n # GH 17717\n idx1 = pd.PeriodIndex(\n [pd.Period(\"2017-09-02\"), pd.Period(\"2017-09-02\"), pd.Period(\"2017-09-03\")]\n )\n series1 = pd.Series([1, 2, 3], index=idx1)\n result1 = series1.truncate(after=\"2017-09-02\")\n\n expected_idx1 = pd.PeriodIndex(\n [pd.Period(\"2017-09-02\"), pd.Period(\"2017-09-02\")]\n )\n tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1))\n\n idx2 = pd.PeriodIndex(\n [pd.Period(\"2017-09-03\"), pd.Period(\"2017-09-02\"), pd.Period(\"2017-09-03\")]\n )\n series2 = pd.Series([1, 2, 3], index=idx2)\n result2 = series2.sort_index().truncate(after=\"2017-09-02\")\n\n expected_idx2 = pd.PeriodIndex([pd.Period(\"2017-09-02\")])\n tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2))\n\n @pytest.mark.parametrize(\n \"input_vals\",\n [\n [Period(\"2016-01\", freq=\"M\"), Period(\"2016-02\", freq=\"M\")],\n [Period(\"2016-01-01\", freq=\"D\"), Period(\"2016-01-02\", freq=\"D\")],\n [\n Period(\"2016-01-01 00:00:00\", freq=\"H\"),\n Period(\"2016-01-01 01:00:00\", freq=\"H\"),\n ],\n [\n Period(\"2016-01-01 00:00:00\", freq=\"M\"),\n Period(\"2016-01-01 00:01:00\", freq=\"M\"),\n ],\n [\n Period(\"2016-01-01 00:00:00\", freq=\"S\"),\n Period(\"2016-01-01 00:00:01\", freq=\"S\"),\n ],\n ],\n )\n def test_end_time_timevalues(self, input_vals):\n # GH 17157\n # Check that the time part of the Period is adjusted by end_time\n # when using the dt accessor on a Series\n input_vals = PeriodArray._from_sequence(np.asarray(input_vals))\n\n s = Series(input_vals)\n result = s.dt.end_time\n expected = s.apply(lambda x: x.end_time)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"input_vals\", [(\"2001\"), (\"NaT\")])\n def test_to_period(self, input_vals):\n # GH 21205\n expected = Series([input_vals], dtype=\"Period[D]\")\n result = Series([input_vals], dtype=\"datetime64[ns]\").dt.to_period(\"D\")\n tm.assert_series_equal(result, expected)\n",
"import pytest\n\nimport pandas._testing as tm\n\n\[email protected]\ndef datetime_series():\n \"\"\"\n Fixture for Series of floats with DatetimeIndex\n \"\"\"\n s = tm.makeTimeSeries()\n s.name = \"ts\"\n return s\n\n\[email protected]\ndef string_series():\n \"\"\"\n Fixture for Series of floats with Index of unique strings\n \"\"\"\n s = tm.makeStringSeries()\n s.name = \"series\"\n return s\n\n\[email protected]\ndef object_series():\n \"\"\"\n Fixture for Series of dtype object with Index of unique strings\n \"\"\"\n s = tm.makeObjectSeries()\n s.name = \"objects\"\n return s\n",
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame, Index, MultiIndex, Series\nimport pandas._testing as tm\nfrom pandas.core.util.hashing import hash_tuples\nfrom pandas.util import hash_array, hash_pandas_object\n\n\[email protected](\n params=[\n Series([1, 2, 3] * 3, dtype=\"int32\"),\n Series([None, 2.5, 3.5] * 3, dtype=\"float32\"),\n Series([\"a\", \"b\", \"c\"] * 3, dtype=\"category\"),\n Series([\"d\", \"e\", \"f\"] * 3),\n Series([True, False, True] * 3),\n Series(pd.date_range(\"20130101\", periods=9)),\n Series(pd.date_range(\"20130101\", periods=9, tz=\"US/Eastern\")),\n Series(pd.timedelta_range(\"2000\", periods=9)),\n ]\n)\ndef series(request):\n return request.param\n\n\[email protected](params=[True, False])\ndef index(request):\n return request.param\n\n\ndef _check_equal(obj, **kwargs):\n \"\"\"\n Check that hashing an objects produces the same value each time.\n\n Parameters\n ----------\n obj : object\n The object to hash.\n kwargs : kwargs\n Keyword arguments to pass to the hashing function.\n \"\"\"\n a = hash_pandas_object(obj, **kwargs)\n b = hash_pandas_object(obj, **kwargs)\n tm.assert_series_equal(a, b)\n\n\ndef _check_not_equal_with_index(obj):\n \"\"\"\n Check the hash of an object with and without its index is not the same.\n\n Parameters\n ----------\n obj : object\n The object to hash.\n \"\"\"\n if not isinstance(obj, Index):\n a = hash_pandas_object(obj, index=True)\n b = hash_pandas_object(obj, index=False)\n\n if len(obj):\n assert not (a == b).all()\n\n\ndef test_consistency():\n # Check that our hash doesn't change because of a mistake\n # in the actual code; this is the ground truth.\n result = hash_pandas_object(Index([\"foo\", \"bar\", \"baz\"]))\n expected = Series(\n np.array(\n [3600424527151052760, 1374399572096150070, 477881037637427054],\n dtype=\"uint64\",\n ),\n index=[\"foo\", \"bar\", \"baz\"],\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_hash_array(series):\n arr = series.values\n tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr))\n\n\[email protected](\n \"arr2\", [np.array([3, 4, \"All\"]), np.array([3, 4, \"All\"], dtype=object)]\n)\ndef test_hash_array_mixed(arr2):\n result1 = hash_array(np.array([\"3\", \"4\", \"All\"]))\n result2 = hash_array(arr2)\n\n tm.assert_numpy_array_equal(result1, result2)\n\n\[email protected](\"val\", [5, \"foo\", pd.Timestamp(\"20130101\")])\ndef test_hash_array_errors(val):\n msg = \"must pass a ndarray-like\"\n with pytest.raises(TypeError, match=msg):\n hash_array(val)\n\n\ndef test_hash_tuples():\n tuples = [(1, \"one\"), (1, \"two\"), (2, \"one\")]\n result = hash_tuples(tuples)\n\n expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values\n tm.assert_numpy_array_equal(result, expected)\n\n result = hash_tuples(tuples[0])\n assert result == expected[0]\n\n\[email protected](\"val\", [5, \"foo\", pd.Timestamp(\"20130101\")])\ndef test_hash_tuples_err(val):\n msg = \"must be convertible to a list-of-tuples\"\n with pytest.raises(TypeError, match=msg):\n hash_tuples(val)\n\n\ndef test_multiindex_unique():\n mi = MultiIndex.from_tuples([(118, 472), (236, 118), (51, 204), (102, 51)])\n assert mi.is_unique is True\n\n result = hash_pandas_object(mi)\n assert result.is_unique is True\n\n\ndef test_multiindex_objects():\n mi = MultiIndex(\n levels=[[\"b\", \"d\", \"a\"], [1, 2, 3]],\n codes=[[0, 1, 0, 2], [2, 0, 0, 1]],\n names=[\"col1\", \"col2\"],\n )\n recons = mi._sort_levels_monotonic()\n\n # These are equal.\n assert mi.equals(recons)\n assert Index(mi.values).equals(Index(recons.values))\n\n\[email protected](\n \"obj\",\n [\n Series([1, 2, 3]),\n Series([1.0, 1.5, 3.2]),\n Series([1.0, 1.5, np.nan]),\n Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n Series([\"a\", \"b\", \"c\"]),\n Series([\"a\", np.nan, \"c\"]),\n Series([\"a\", None, \"c\"]),\n Series([True, False, True]),\n Series(dtype=object),\n Index([1, 2, 3]),\n Index([True, False, True]),\n DataFrame({\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3]}),\n DataFrame(),\n tm.makeMissingDataframe(),\n tm.makeMixedDataFrame(),\n tm.makeTimeDataFrame(),\n tm.makeTimeSeries(),\n tm.makeTimedeltaIndex(),\n tm.makePeriodIndex(),\n Series(tm.makePeriodIndex()),\n Series(pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\")),\n MultiIndex.from_product(\n [range(5), [\"foo\", \"bar\", \"baz\"], pd.date_range(\"20130101\", periods=2)]\n ),\n MultiIndex.from_product([pd.CategoricalIndex(list(\"aabc\")), range(3)]),\n ],\n)\ndef test_hash_pandas_object(obj, index):\n _check_equal(obj, index=index)\n _check_not_equal_with_index(obj)\n\n\ndef test_hash_pandas_object2(series, index):\n _check_equal(series, index=index)\n _check_not_equal_with_index(series)\n\n\[email protected](\n \"obj\", [Series([], dtype=\"float64\"), Series([], dtype=\"object\"), Index([])]\n)\ndef test_hash_pandas_empty_object(obj, index):\n # These are by-definition the same with\n # or without the index as the data is empty.\n _check_equal(obj, index=index)\n\n\[email protected](\n \"s1\",\n [\n Series([\"a\", \"b\", \"c\", \"d\"]),\n Series([1000, 2000, 3000, 4000]),\n Series(pd.date_range(0, periods=4)),\n ],\n)\[email protected](\"categorize\", [True, False])\ndef test_categorical_consistency(s1, categorize):\n # see gh-15143\n #\n # Check that categoricals hash consistent with their values,\n # not codes. This should work for categoricals of any dtype.\n s2 = s1.astype(\"category\").cat.set_categories(s1)\n s3 = s2.cat.set_categories(list(reversed(s1)))\n\n # These should all hash identically.\n h1 = hash_pandas_object(s1, categorize=categorize)\n h2 = hash_pandas_object(s2, categorize=categorize)\n h3 = hash_pandas_object(s3, categorize=categorize)\n\n tm.assert_series_equal(h1, h2)\n tm.assert_series_equal(h1, h3)\n\n\ndef test_categorical_with_nan_consistency():\n c = pd.Categorical.from_codes(\n [-1, 0, 1, 2, 3, 4], categories=pd.date_range(\"2012-01-01\", periods=5, name=\"B\")\n )\n expected = hash_array(c, categorize=False)\n\n c = pd.Categorical.from_codes([-1, 0], categories=[pd.Timestamp(\"2012-01-01\")])\n result = hash_array(c, categorize=False)\n\n assert result[0] in expected\n assert result[1] in expected\n\n\[email protected](\"obj\", [pd.Timestamp(\"20130101\")])\ndef test_pandas_errors(obj):\n msg = \"Unexpected type for hashing\"\n with pytest.raises(TypeError, match=msg):\n hash_pandas_object(obj)\n\n\ndef test_hash_keys():\n # Using different hash keys, should have\n # different hashes for the same data.\n #\n # This only matters for object dtypes.\n obj = Series(list(\"abc\"))\n\n a = hash_pandas_object(obj, hash_key=\"9876543210123456\")\n b = hash_pandas_object(obj, hash_key=\"9876543210123465\")\n\n assert (a != b).all()\n\n\ndef test_invalid_key():\n # This only matters for object dtypes.\n msg = \"key should be a 16-byte string encoded\"\n\n with pytest.raises(ValueError, match=msg):\n hash_pandas_object(Series(list(\"abc\")), hash_key=\"foo\")\n\n\ndef test_already_encoded(index):\n # If already encoded, then ok.\n obj = Series(list(\"abc\")).str.encode(\"utf8\")\n _check_equal(obj, index=index)\n\n\ndef test_alternate_encoding(index):\n obj = Series(list(\"abc\"))\n _check_equal(obj, index=index, encoding=\"ascii\")\n\n\[email protected](\"l_exp\", range(8))\[email protected](\"l_add\", [0, 1])\ndef test_same_len_hash_collisions(l_exp, l_add):\n length = 2 ** (l_exp + 8) + l_add\n s = tm.rands_array(length, 2)\n\n result = hash_array(s, \"utf8\")\n assert not result[0] == result[1]\n\n\ndef test_hash_collisions():\n # Hash collisions are bad.\n #\n # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726\n hashes = [\n \"Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9\", # noqa: E501\n \"Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe\", # noqa: E501\n ]\n\n # These should be different.\n result1 = hash_array(np.asarray(hashes[0:1], dtype=object), \"utf8\")\n expected1 = np.array([14963968704024874985], dtype=np.uint64)\n tm.assert_numpy_array_equal(result1, expected1)\n\n result2 = hash_array(np.asarray(hashes[1:2], dtype=object), \"utf8\")\n expected2 = np.array([16428432627716348016], dtype=np.uint64)\n tm.assert_numpy_array_equal(result2, expected2)\n\n result = hash_array(np.asarray(hashes, dtype=object), \"utf8\")\n tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0))\n\n\ndef test_hash_with_tuple():\n # GH#28969 array containing a tuple raises on call to arr.astype(str)\n # apparently a numpy bug github.com/numpy/numpy/issues/9441\n\n df = pd.DataFrame({\"data\": [tuple(\"1\"), tuple(\"2\")]})\n result = hash_pandas_object(df)\n expected = pd.Series([10345501319357378243, 8331063931016360761], dtype=np.uint64)\n tm.assert_series_equal(result, expected)\n\n df2 = pd.DataFrame({\"data\": [tuple([1]), tuple([2])]})\n result = hash_pandas_object(df2)\n expected = pd.Series([9408946347443669104, 3278256261030523334], dtype=np.uint64)\n tm.assert_series_equal(result, expected)\n\n # require that the elements of such tuples are themselves hashable\n\n df3 = pd.DataFrame({\"data\": [tuple([1, []]), tuple([2, {}])]})\n with pytest.raises(TypeError, match=\"unhashable type: 'list'\"):\n hash_pandas_object(df3)\n\n\ndef test_hash_object_none_key():\n # https://github.com/pandas-dev/pandas/issues/30887\n result = pd.util.hash_pandas_object(pd.Series([\"a\", \"b\"]), hash_key=None)\n expected = pd.Series([4578374827886788867, 17338122309987883691], dtype=\"uint64\")\n tm.assert_series_equal(result, expected)\n",
"from collections import abc\nimport functools\nfrom io import BytesIO, StringIO\nfrom itertools import islice\nimport os\nfrom typing import Any, Callable, Optional, Type\n\nimport numpy as np\n\nimport pandas._libs.json as json\nfrom pandas._libs.tslibs import iNaT\nfrom pandas._typing import JSONSerializable\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import deprecate_kwarg, deprecate_nonkeyword_arguments\n\nfrom pandas.core.dtypes.common import ensure_str, is_period_dtype\n\nfrom pandas import DataFrame, MultiIndex, Series, isna, to_datetime\nfrom pandas.core.construction import create_series_with_explicit_dtype\nfrom pandas.core.reshape.concat import concat\n\nfrom pandas.io.common import get_filepath_or_buffer, get_handle, infer_compression\nfrom pandas.io.json._normalize import convert_to_line_delimits\nfrom pandas.io.json._table_schema import build_table_schema, parse_table_schema\nfrom pandas.io.parsers import _validate_integer\n\nloads = json.loads\ndumps = json.dumps\n\nTABLE_SCHEMA_VERSION = \"0.20.0\"\n\n\n# interface to/from\ndef to_json(\n path_or_buf,\n obj,\n orient: Optional[str] = None,\n date_format: str = \"epoch\",\n double_precision: int = 10,\n force_ascii: bool = True,\n date_unit: str = \"ms\",\n default_handler: Optional[Callable[[Any], JSONSerializable]] = None,\n lines: bool = False,\n compression: Optional[str] = \"infer\",\n index: bool = True,\n indent: int = 0,\n):\n\n if not index and orient not in [\"split\", \"table\"]:\n raise ValueError(\n \"'index=False' is only valid when 'orient' is 'split' or 'table'\"\n )\n\n if path_or_buf is not None:\n path_or_buf, _, _, _ = get_filepath_or_buffer(\n path_or_buf, compression=compression, mode=\"w\"\n )\n\n if lines and orient != \"records\":\n raise ValueError(\"'lines' keyword only valid when 'orient' is records\")\n\n if orient == \"table\" and isinstance(obj, Series):\n obj = obj.to_frame(name=obj.name or \"values\")\n\n writer: Type[\"Writer\"]\n if orient == \"table\" and isinstance(obj, DataFrame):\n writer = JSONTableWriter\n elif isinstance(obj, Series):\n writer = SeriesWriter\n elif isinstance(obj, DataFrame):\n writer = FrameWriter\n else:\n raise NotImplementedError(\"'obj' should be a Series or a DataFrame\")\n\n s = writer(\n obj,\n orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n ensure_ascii=force_ascii,\n date_unit=date_unit,\n default_handler=default_handler,\n index=index,\n indent=indent,\n ).write()\n\n if lines:\n s = convert_to_line_delimits(s)\n\n if isinstance(path_or_buf, str):\n fh, handles = get_handle(path_or_buf, \"w\", compression=compression)\n try:\n fh.write(s)\n finally:\n fh.close()\n elif path_or_buf is None:\n return s\n else:\n path_or_buf.write(s)\n\n\nclass Writer:\n def __init__(\n self,\n obj,\n orient: Optional[str],\n date_format: str,\n double_precision: int,\n ensure_ascii: bool,\n date_unit: str,\n index: bool,\n default_handler: Optional[Callable[[Any], JSONSerializable]] = None,\n indent: int = 0,\n ):\n self.obj = obj\n\n if orient is None:\n orient = self._default_orient # type: ignore\n\n self.orient = orient\n self.date_format = date_format\n self.double_precision = double_precision\n self.ensure_ascii = ensure_ascii\n self.date_unit = date_unit\n self.default_handler = default_handler\n self.index = index\n self.indent = indent\n\n self.is_copy = None\n self._format_axes()\n\n def _format_axes(self):\n raise AbstractMethodError(self)\n\n def write(self):\n return self._write(\n self.obj,\n self.orient,\n self.double_precision,\n self.ensure_ascii,\n self.date_unit,\n self.date_format == \"iso\",\n self.default_handler,\n self.indent,\n )\n\n def _write(\n self,\n obj,\n orient: Optional[str],\n double_precision: int,\n ensure_ascii: bool,\n date_unit: str,\n iso_dates: bool,\n default_handler: Optional[Callable[[Any], JSONSerializable]],\n indent: int,\n ):\n return dumps(\n obj,\n orient=orient,\n double_precision=double_precision,\n ensure_ascii=ensure_ascii,\n date_unit=date_unit,\n iso_dates=iso_dates,\n default_handler=default_handler,\n indent=indent,\n )\n\n\nclass SeriesWriter(Writer):\n _default_orient = \"index\"\n\n def _format_axes(self):\n if not self.obj.index.is_unique and self.orient == \"index\":\n raise ValueError(f\"Series index must be unique for orient='{self.orient}'\")\n\n def _write(\n self,\n obj,\n orient: Optional[str],\n double_precision: int,\n ensure_ascii: bool,\n date_unit: str,\n iso_dates: bool,\n default_handler: Optional[Callable[[Any], JSONSerializable]],\n indent: int,\n ):\n if not self.index and orient == \"split\":\n obj = {\"name\": obj.name, \"data\": obj.values}\n return super()._write(\n obj,\n orient,\n double_precision,\n ensure_ascii,\n date_unit,\n iso_dates,\n default_handler,\n indent,\n )\n\n\nclass FrameWriter(Writer):\n _default_orient = \"columns\"\n\n def _format_axes(self):\n \"\"\"\n Try to format axes if they are datelike.\n \"\"\"\n if not self.obj.index.is_unique and self.orient in (\"index\", \"columns\"):\n raise ValueError(\n f\"DataFrame index must be unique for orient='{self.orient}'.\"\n )\n if not self.obj.columns.is_unique and self.orient in (\n \"index\",\n \"columns\",\n \"records\",\n ):\n raise ValueError(\n f\"DataFrame columns must be unique for orient='{self.orient}'.\"\n )\n\n def _write(\n self,\n obj,\n orient: Optional[str],\n double_precision: int,\n ensure_ascii: bool,\n date_unit: str,\n iso_dates: bool,\n default_handler: Optional[Callable[[Any], JSONSerializable]],\n indent: int,\n ):\n if not self.index and orient == \"split\":\n obj = obj.to_dict(orient=\"split\")\n del obj[\"index\"]\n return super()._write(\n obj,\n orient,\n double_precision,\n ensure_ascii,\n date_unit,\n iso_dates,\n default_handler,\n indent,\n )\n\n\nclass JSONTableWriter(FrameWriter):\n _default_orient = \"records\"\n\n def __init__(\n self,\n obj,\n orient: Optional[str],\n date_format: str,\n double_precision: int,\n ensure_ascii: bool,\n date_unit: str,\n index: bool,\n default_handler: Optional[Callable[[Any], JSONSerializable]] = None,\n indent: int = 0,\n ):\n \"\"\"\n Adds a `schema` attribute with the Table Schema, resets\n the index (can't do in caller, because the schema inference needs\n to know what the index is, forces orient to records, and forces\n date_format to 'iso'.\n \"\"\"\n super().__init__(\n obj,\n orient,\n date_format,\n double_precision,\n ensure_ascii,\n date_unit,\n index,\n default_handler=default_handler,\n indent=indent,\n )\n\n if date_format != \"iso\":\n msg = (\n \"Trying to write with `orient='table'` and \"\n f\"`date_format='{date_format}'`. Table Schema requires dates \"\n \"to be formatted with `date_format='iso'`\"\n )\n raise ValueError(msg)\n\n self.schema = build_table_schema(obj, index=self.index)\n\n # NotImplemented on a column MultiIndex\n if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):\n raise NotImplementedError(\"orient='table' is not supported for MultiIndex\")\n\n # TODO: Do this timedelta properly in objToJSON.c See GH #15137\n if (\n (obj.ndim == 1)\n and (obj.name in set(obj.index.names))\n or len(obj.columns & obj.index.names)\n ):\n msg = \"Overlapping names between the index and columns\"\n raise ValueError(msg)\n\n obj = obj.copy()\n timedeltas = obj.select_dtypes(include=[\"timedelta\"]).columns\n if len(timedeltas):\n obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())\n # Convert PeriodIndex to datetimes before serializing\n if is_period_dtype(obj.index.dtype):\n obj.index = obj.index.to_timestamp()\n\n # exclude index from obj if index=False\n if not self.index:\n self.obj = obj.reset_index(drop=True)\n else:\n self.obj = obj.reset_index(drop=False)\n self.date_format = \"iso\"\n self.orient = \"records\"\n self.index = index\n\n def _write(\n self,\n obj,\n orient,\n double_precision,\n ensure_ascii,\n date_unit,\n iso_dates,\n default_handler,\n indent,\n ):\n table_obj = {\"schema\": self.schema, \"data\": obj}\n serialized = super()._write(\n table_obj,\n orient,\n double_precision,\n ensure_ascii,\n date_unit,\n iso_dates,\n default_handler,\n indent,\n )\n\n return serialized\n\n\n@deprecate_kwarg(old_arg_name=\"numpy\", new_arg_name=None)\n@deprecate_nonkeyword_arguments(\n version=\"2.0\", allowed_args=[\"path_or_buf\"], stacklevel=3\n)\ndef read_json(\n path_or_buf=None,\n orient=None,\n typ=\"frame\",\n dtype=None,\n convert_axes=None,\n convert_dates=True,\n keep_default_dates: bool = True,\n numpy: bool = False,\n precise_float: bool = False,\n date_unit=None,\n encoding=None,\n lines: bool = False,\n chunksize: Optional[int] = None,\n compression=\"infer\",\n nrows: Optional[int] = None,\n):\n \"\"\"\n Convert a JSON string to pandas object.\n\n Parameters\n ----------\n path_or_buf : a valid JSON str, path object or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be:\n ``file://localhost/path/to/table.json``.\n\n If you want to pass in a path object, pandas accepts any\n ``os.PathLike``.\n\n By file-like object, we refer to objects with a ``read()`` method,\n such as a file handler (e.g. via builtin ``open`` function)\n or ``StringIO``.\n orient : str\n Indication of expected JSON string format.\n Compatible JSON strings can be produced by ``to_json()`` with a\n corresponding orient value.\n The set of possible orients is:\n\n - ``'split'`` : dict like\n ``{index -> [index], columns -> [columns], data -> [values]}``\n - ``'records'`` : list like\n ``[{column -> value}, ... , {column -> value}]``\n - ``'index'`` : dict like ``{index -> {column -> value}}``\n - ``'columns'`` : dict like ``{column -> {index -> value}}``\n - ``'values'`` : just the values array\n\n The allowed and default values depend on the value\n of the `typ` parameter.\n\n * when ``typ == 'series'``,\n\n - allowed orients are ``{'split','records','index'}``\n - default is ``'index'``\n - The Series index must be unique for orient ``'index'``.\n\n * when ``typ == 'frame'``,\n\n - allowed orients are ``{'split','records','index',\n 'columns','values', 'table'}``\n - default is ``'columns'``\n - The DataFrame index must be unique for orients ``'index'`` and\n ``'columns'``.\n - The DataFrame columns must be unique for orients ``'index'``,\n ``'columns'``, and ``'records'``.\n\n .. versionadded:: 0.23.0\n 'table' as an allowed value for the ``orient`` argument\n\n typ : {'frame', 'series'}, default 'frame'\n The type of object to recover.\n\n dtype : bool or dict, default None\n If True, infer dtypes; if a dict of column to dtype, then use those;\n if False, then don't infer dtypes at all, applies only to the data.\n\n For all ``orient`` values except ``'table'``, default is True.\n\n .. versionchanged:: 0.25.0\n\n Not applicable for ``orient='table'``.\n\n convert_axes : bool, default None\n Try to convert the axes to the proper dtypes.\n\n For all ``orient`` values except ``'table'``, default is True.\n\n .. versionchanged:: 0.25.0\n\n Not applicable for ``orient='table'``.\n\n convert_dates : bool or list of str, default True\n If True then default datelike columns may be converted (depending on\n keep_default_dates).\n If False, no dates will be converted.\n If a list of column names, then those columns will be converted and\n default datelike columns may also be converted (depending on\n keep_default_dates).\n\n keep_default_dates : bool, default True\n If parsing dates (convert_dates is not False), then try to parse the\n default datelike columns.\n A column label is datelike if\n\n * it ends with ``'_at'``,\n\n * it ends with ``'_time'``,\n\n * it begins with ``'timestamp'``,\n\n * it is ``'modified'``, or\n\n * it is ``'date'``.\n\n numpy : bool, default False\n Direct decoding to numpy arrays. Supports numeric data only, but\n non-numeric column and index labels are supported. Note also that the\n JSON ordering MUST be the same for each term if numpy=True.\n\n .. deprecated:: 1.0.0\n\n precise_float : bool, default False\n Set to enable usage of higher precision (strtod) function when\n decoding string to double values. Default (False) is to use fast but\n less precise builtin functionality.\n\n date_unit : str, default None\n The timestamp unit to detect if converting dates. The default behaviour\n is to try and detect the correct precision, but if this is not desired\n then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,\n milliseconds, microseconds or nanoseconds respectively.\n\n encoding : str, default is 'utf-8'\n The encoding to use to decode py3 bytes.\n\n lines : bool, default False\n Read the file as a json object per line.\n\n chunksize : int, optional\n Return JsonReader object for iteration.\n See the `line-delimited json docs\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_\n for more information on ``chunksize``.\n This can only be passed if `lines=True`.\n If this is None, the file will be read into memory all at once.\n\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'\n For on-the-fly decompression of on-disk data. If 'infer', then use\n gzip, bz2, zip or xz if path_or_buf is a string ending in\n '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression\n otherwise. If using 'zip', the ZIP file must contain only one data\n file to be read in. Set to None for no decompression.\n\n nrows : int, optional\n The number of lines from the line-delimited jsonfile that has to be read.\n This can only be passed if `lines=True`.\n If this is None, all the rows will be returned.\n\n .. versionadded:: 1.1\n\n Returns\n -------\n Series or DataFrame\n The type returned depends on the value of `typ`.\n\n See Also\n --------\n DataFrame.to_json : Convert a DataFrame to a JSON string.\n Series.to_json : Convert a Series to a JSON string.\n\n Notes\n -----\n Specific to ``orient='table'``, if a :class:`DataFrame` with a literal\n :class:`Index` name of `index` gets written with :func:`to_json`, the\n subsequent read operation will incorrectly set the :class:`Index` name to\n ``None``. This is because `index` is also used by :func:`DataFrame.to_json`\n to denote a missing :class:`Index` name, and the subsequent\n :func:`read_json` operation cannot distinguish between the two. The same\n limitation is encountered with a :class:`MultiIndex` and any names\n beginning with ``'level_'``.\n\n Examples\n --------\n >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n\n Encoding/decoding a Dataframe using ``'split'`` formatted JSON:\n\n >>> df.to_json(orient='split')\n '{\"columns\":[\"col 1\",\"col 2\"],\n \"index\":[\"row 1\",\"row 2\"],\n \"data\":[[\"a\",\"b\"],[\"c\",\"d\"]]}'\n >>> pd.read_json(_, orient='split')\n col 1 col 2\n row 1 a b\n row 2 c d\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> df.to_json(orient='index')\n '{\"row 1\":{\"col 1\":\"a\",\"col 2\":\"b\"},\"row 2\":{\"col 1\":\"c\",\"col 2\":\"d\"}}'\n >>> pd.read_json(_, orient='index')\n col 1 col 2\n row 1 a b\n row 2 c d\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient='records')\n '[{\"col 1\":\"a\",\"col 2\":\"b\"},{\"col 1\":\"c\",\"col 2\":\"d\"}]'\n >>> pd.read_json(_, orient='records')\n col 1 col 2\n 0 a b\n 1 c d\n\n Encoding with Table Schema\n\n >>> df.to_json(orient='table')\n '{\"schema\": {\"fields\": [{\"name\": \"index\", \"type\": \"string\"},\n {\"name\": \"col 1\", \"type\": \"string\"},\n {\"name\": \"col 2\", \"type\": \"string\"}],\n \"primaryKey\": \"index\",\n \"pandas_version\": \"0.20.0\"},\n \"data\": [{\"index\": \"row 1\", \"col 1\": \"a\", \"col 2\": \"b\"},\n {\"index\": \"row 2\", \"col 1\": \"c\", \"col 2\": \"d\"}]}'\n \"\"\"\n if orient == \"table\" and dtype:\n raise ValueError(\"cannot pass both dtype and orient='table'\")\n if orient == \"table\" and convert_axes:\n raise ValueError(\"cannot pass both convert_axes and orient='table'\")\n\n if dtype is None and orient != \"table\":\n dtype = True\n if convert_axes is None and orient != \"table\":\n convert_axes = True\n if encoding is None:\n encoding = \"utf-8\"\n\n compression = infer_compression(path_or_buf, compression)\n filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(\n path_or_buf, encoding=encoding, compression=compression\n )\n\n json_reader = JsonReader(\n filepath_or_buffer,\n orient=orient,\n typ=typ,\n dtype=dtype,\n convert_axes=convert_axes,\n convert_dates=convert_dates,\n keep_default_dates=keep_default_dates,\n numpy=numpy,\n precise_float=precise_float,\n date_unit=date_unit,\n encoding=encoding,\n lines=lines,\n chunksize=chunksize,\n compression=compression,\n nrows=nrows,\n )\n\n if chunksize:\n return json_reader\n\n result = json_reader.read()\n if should_close:\n filepath_or_buffer.close()\n\n return result\n\n\nclass JsonReader(abc.Iterator):\n \"\"\"\n JsonReader provides an interface for reading in a JSON file.\n\n If initialized with ``lines=True`` and ``chunksize``, can be iterated over\n ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the\n whole document.\n \"\"\"\n\n def __init__(\n self,\n filepath_or_buffer,\n orient,\n typ,\n dtype,\n convert_axes,\n convert_dates,\n keep_default_dates: bool,\n numpy: bool,\n precise_float: bool,\n date_unit,\n encoding,\n lines: bool,\n chunksize: Optional[int],\n compression,\n nrows: Optional[int],\n ):\n\n self.orient = orient\n self.typ = typ\n self.dtype = dtype\n self.convert_axes = convert_axes\n self.convert_dates = convert_dates\n self.keep_default_dates = keep_default_dates\n self.numpy = numpy\n self.precise_float = precise_float\n self.date_unit = date_unit\n self.encoding = encoding\n self.compression = compression\n self.lines = lines\n self.chunksize = chunksize\n self.nrows_seen = 0\n self.should_close = False\n self.nrows = nrows\n\n if self.chunksize is not None:\n self.chunksize = _validate_integer(\"chunksize\", self.chunksize, 1)\n if not self.lines:\n raise ValueError(\"chunksize can only be passed if lines=True\")\n if self.nrows is not None:\n self.nrows = _validate_integer(\"nrows\", self.nrows, 0)\n if not self.lines:\n raise ValueError(\"nrows can only be passed if lines=True\")\n\n data = self._get_data_from_filepath(filepath_or_buffer)\n self.data = self._preprocess_data(data)\n\n def _preprocess_data(self, data):\n \"\"\"\n At this point, the data either has a `read` attribute (e.g. a file\n object or a StringIO) or is a string that is a JSON document.\n\n If self.chunksize, we prepare the data for the `__next__` method.\n Otherwise, we read it into memory for the `read` method.\n \"\"\"\n if hasattr(data, \"read\") and (not self.chunksize or not self.nrows):\n data = data.read()\n if not hasattr(data, \"read\") and (self.chunksize or self.nrows):\n data = StringIO(data)\n\n return data\n\n def _get_data_from_filepath(self, filepath_or_buffer):\n \"\"\"\n The function read_json accepts three input types:\n 1. filepath (string-like)\n 2. file-like object (e.g. open file object, StringIO)\n 3. JSON string\n\n This method turns (1) into (2) to simplify the rest of the processing.\n It returns input types (2) and (3) unchanged.\n \"\"\"\n data = filepath_or_buffer\n\n exists = False\n if isinstance(data, str):\n try:\n exists = os.path.exists(filepath_or_buffer)\n # gh-5874: if the filepath is too long will raise here\n except (TypeError, ValueError):\n pass\n\n if exists or self.compression is not None:\n data, _ = get_handle(\n filepath_or_buffer,\n \"r\",\n encoding=self.encoding,\n compression=self.compression,\n )\n self.should_close = True\n self.open_stream = data\n\n if isinstance(data, BytesIO):\n data = data.getvalue().decode()\n\n return data\n\n def _combine_lines(self, lines) -> str:\n \"\"\"\n Combines a list of JSON objects into one JSON object.\n \"\"\"\n lines = filter(None, map(lambda x: x.strip(), lines))\n return \"[\" + \",\".join(lines) + \"]\"\n\n def read(self):\n \"\"\"\n Read the whole JSON input into a pandas object.\n \"\"\"\n if self.lines:\n if self.chunksize:\n obj = concat(self)\n elif self.nrows:\n lines = list(islice(self.data, self.nrows))\n lines_json = self._combine_lines(lines)\n obj = self._get_object_parser(lines_json)\n else:\n data = ensure_str(self.data)\n data = data.split(\"\\n\")\n obj = self._get_object_parser(self._combine_lines(data))\n else:\n obj = self._get_object_parser(self.data)\n self.close()\n return obj\n\n def _get_object_parser(self, json):\n \"\"\"\n Parses a json document into a pandas object.\n \"\"\"\n typ = self.typ\n dtype = self.dtype\n kwargs = {\n \"orient\": self.orient,\n \"dtype\": self.dtype,\n \"convert_axes\": self.convert_axes,\n \"convert_dates\": self.convert_dates,\n \"keep_default_dates\": self.keep_default_dates,\n \"numpy\": self.numpy,\n \"precise_float\": self.precise_float,\n \"date_unit\": self.date_unit,\n }\n obj = None\n if typ == \"frame\":\n obj = FrameParser(json, **kwargs).parse()\n\n if typ == \"series\" or obj is None:\n if not isinstance(dtype, bool):\n kwargs[\"dtype\"] = dtype\n obj = SeriesParser(json, **kwargs).parse()\n\n return obj\n\n def close(self):\n \"\"\"\n If we opened a stream earlier, in _get_data_from_filepath, we should\n close it.\n\n If an open stream or file was passed, we leave it open.\n \"\"\"\n if self.should_close:\n try:\n self.open_stream.close()\n except (IOError, AttributeError):\n pass\n\n def __next__(self):\n if self.nrows:\n if self.nrows_seen >= self.nrows:\n self.close()\n raise StopIteration\n\n lines = list(islice(self.data, self.chunksize))\n if lines:\n lines_json = self._combine_lines(lines)\n obj = self._get_object_parser(lines_json)\n\n # Make sure that the returned objects have the right index.\n obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))\n self.nrows_seen += len(obj)\n\n return obj\n\n self.close()\n raise StopIteration\n\n\nclass Parser:\n\n _STAMP_UNITS = (\"s\", \"ms\", \"us\", \"ns\")\n _MIN_STAMPS = {\n \"s\": 31536000,\n \"ms\": 31536000000,\n \"us\": 31536000000000,\n \"ns\": 31536000000000000,\n }\n\n def __init__(\n self,\n json,\n orient,\n dtype=None,\n convert_axes=True,\n convert_dates=True,\n keep_default_dates=False,\n numpy=False,\n precise_float=False,\n date_unit=None,\n ):\n self.json = json\n\n if orient is None:\n orient = self._default_orient\n self.orient = orient\n\n self.dtype = dtype\n\n if orient == \"split\":\n numpy = False\n\n if date_unit is not None:\n date_unit = date_unit.lower()\n if date_unit not in self._STAMP_UNITS:\n raise ValueError(f\"date_unit must be one of {self._STAMP_UNITS}\")\n self.min_stamp = self._MIN_STAMPS[date_unit]\n else:\n self.min_stamp = self._MIN_STAMPS[\"s\"]\n\n self.numpy = numpy\n self.precise_float = precise_float\n self.convert_axes = convert_axes\n self.convert_dates = convert_dates\n self.date_unit = date_unit\n self.keep_default_dates = keep_default_dates\n self.obj = None\n\n def check_keys_split(self, decoded):\n \"\"\"\n Checks that dict has only the appropriate keys for orient='split'.\n \"\"\"\n bad_keys = set(decoded.keys()).difference(set(self._split_keys))\n if bad_keys:\n bad_keys = \", \".join(bad_keys)\n raise ValueError(f\"JSON data had unexpected key(s): {bad_keys}\")\n\n def parse(self):\n\n # try numpy\n numpy = self.numpy\n if numpy:\n self._parse_numpy()\n\n else:\n self._parse_no_numpy()\n\n if self.obj is None:\n return None\n if self.convert_axes:\n self._convert_axes()\n self._try_convert_types()\n return self.obj\n\n def _convert_axes(self):\n \"\"\"\n Try to convert axes.\n \"\"\"\n for axis_name in self.obj._AXIS_ORDERS:\n new_axis, result = self._try_convert_data(\n name=axis_name,\n data=self.obj._get_axis(axis_name),\n use_dtypes=False,\n convert_dates=True,\n )\n if result:\n setattr(self.obj, axis_name, new_axis)\n\n def _try_convert_types(self):\n raise AbstractMethodError(self)\n\n def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True):\n \"\"\"\n Try to parse a ndarray like into a column by inferring dtype.\n \"\"\"\n # don't try to coerce, unless a force conversion\n if use_dtypes:\n if not self.dtype:\n return data, False\n elif self.dtype is True:\n pass\n else:\n # dtype to force\n dtype = (\n self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype\n )\n if dtype is not None:\n try:\n dtype = np.dtype(dtype)\n return data.astype(dtype), True\n except (TypeError, ValueError):\n return data, False\n\n if convert_dates:\n new_data, result = self._try_convert_to_date(data)\n if result:\n return new_data, True\n\n result = False\n\n if data.dtype == \"object\":\n\n # try float\n try:\n data = data.astype(\"float64\")\n result = True\n except (TypeError, ValueError):\n pass\n\n if data.dtype.kind == \"f\":\n\n if data.dtype != \"float64\":\n\n # coerce floats to 64\n try:\n data = data.astype(\"float64\")\n result = True\n except (TypeError, ValueError):\n pass\n\n # don't coerce 0-len data\n if len(data) and (data.dtype == \"float\" or data.dtype == \"object\"):\n\n # coerce ints if we can\n try:\n new_data = data.astype(\"int64\")\n if (new_data == data).all():\n data = new_data\n result = True\n except (TypeError, ValueError, OverflowError):\n pass\n\n # coerce ints to 64\n if data.dtype == \"int\":\n\n # coerce floats to 64\n try:\n data = data.astype(\"int64\")\n result = True\n except (TypeError, ValueError):\n pass\n\n return data, result\n\n def _try_convert_to_date(self, data):\n \"\"\"\n Try to parse a ndarray like into a date column.\n\n Try to coerce object in epoch/iso formats and integer/float in epoch\n formats. Return a boolean if parsing was successful.\n \"\"\"\n # no conversion on empty\n if not len(data):\n return data, False\n\n new_data = data\n if new_data.dtype == \"object\":\n try:\n new_data = data.astype(\"int64\")\n except (TypeError, ValueError, OverflowError):\n pass\n\n # ignore numbers that are out of range\n if issubclass(new_data.dtype.type, np.number):\n in_range = (\n isna(new_data._values)\n | (new_data > self.min_stamp)\n | (new_data._values == iNaT)\n )\n if not in_range.all():\n return data, False\n\n date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS\n for date_unit in date_units:\n try:\n new_data = to_datetime(new_data, errors=\"raise\", unit=date_unit)\n except (ValueError, OverflowError, TypeError):\n continue\n return new_data, True\n return data, False\n\n def _try_convert_dates(self):\n raise AbstractMethodError(self)\n\n\nclass SeriesParser(Parser):\n _default_orient = \"index\"\n _split_keys = (\"name\", \"index\", \"data\")\n\n def _parse_no_numpy(self):\n data = loads(self.json, precise_float=self.precise_float)\n\n if self.orient == \"split\":\n decoded = {str(k): v for k, v in data.items()}\n self.check_keys_split(decoded)\n self.obj = create_series_with_explicit_dtype(**decoded)\n else:\n self.obj = create_series_with_explicit_dtype(data, dtype_if_empty=object)\n\n def _parse_numpy(self):\n load_kwargs = {\n \"dtype\": None,\n \"numpy\": True,\n \"precise_float\": self.precise_float,\n }\n if self.orient in [\"columns\", \"index\"]:\n load_kwargs[\"labelled\"] = True\n loads_ = functools.partial(loads, **load_kwargs)\n data = loads_(self.json)\n\n if self.orient == \"split\":\n decoded = {str(k): v for k, v in data.items()}\n self.check_keys_split(decoded)\n self.obj = create_series_with_explicit_dtype(**decoded)\n elif self.orient in [\"columns\", \"index\"]:\n self.obj = create_series_with_explicit_dtype(*data, dtype_if_empty=object)\n else:\n self.obj = create_series_with_explicit_dtype(data, dtype_if_empty=object)\n\n def _try_convert_types(self):\n if self.obj is None:\n return\n obj, result = self._try_convert_data(\n \"data\", self.obj, convert_dates=self.convert_dates\n )\n if result:\n self.obj = obj\n\n\nclass FrameParser(Parser):\n _default_orient = \"columns\"\n _split_keys = (\"columns\", \"index\", \"data\")\n\n def _parse_numpy(self):\n\n json = self.json\n orient = self.orient\n\n if orient == \"columns\":\n args = loads(\n json,\n dtype=None,\n numpy=True,\n labelled=True,\n precise_float=self.precise_float,\n )\n if len(args):\n args = (args[0].T, args[2], args[1])\n self.obj = DataFrame(*args)\n elif orient == \"split\":\n decoded = loads(\n json, dtype=None, numpy=True, precise_float=self.precise_float\n )\n decoded = {str(k): v for k, v in decoded.items()}\n self.check_keys_split(decoded)\n self.obj = DataFrame(**decoded)\n elif orient == \"values\":\n self.obj = DataFrame(\n loads(json, dtype=None, numpy=True, precise_float=self.precise_float)\n )\n else:\n self.obj = DataFrame(\n *loads(\n json,\n dtype=None,\n numpy=True,\n labelled=True,\n precise_float=self.precise_float,\n )\n )\n\n def _parse_no_numpy(self):\n\n json = self.json\n orient = self.orient\n\n if orient == \"columns\":\n self.obj = DataFrame(\n loads(json, precise_float=self.precise_float), dtype=None\n )\n elif orient == \"split\":\n decoded = {\n str(k): v\n for k, v in loads(json, precise_float=self.precise_float).items()\n }\n self.check_keys_split(decoded)\n self.obj = DataFrame(dtype=None, **decoded)\n elif orient == \"index\":\n self.obj = DataFrame.from_dict(\n loads(json, precise_float=self.precise_float),\n dtype=None,\n orient=\"index\",\n )\n elif orient == \"table\":\n self.obj = parse_table_schema(json, precise_float=self.precise_float)\n else:\n self.obj = DataFrame(\n loads(json, precise_float=self.precise_float), dtype=None\n )\n\n def _process_converter(self, f, filt=None):\n \"\"\"\n Take a conversion function and possibly recreate the frame.\n \"\"\"\n if filt is None:\n filt = lambda col, c: True\n\n needs_new_obj = False\n new_obj = dict()\n for i, (col, c) in enumerate(self.obj.items()):\n if filt(col, c):\n new_data, result = f(col, c)\n if result:\n c = new_data\n needs_new_obj = True\n new_obj[i] = c\n\n if needs_new_obj:\n\n # possibly handle dup columns\n new_obj = DataFrame(new_obj, index=self.obj.index)\n new_obj.columns = self.obj.columns\n self.obj = new_obj\n\n def _try_convert_types(self):\n if self.obj is None:\n return\n if self.convert_dates:\n self._try_convert_dates()\n\n self._process_converter(\n lambda col, c: self._try_convert_data(col, c, convert_dates=False)\n )\n\n def _try_convert_dates(self):\n if self.obj is None:\n return\n\n # our columns to parse\n convert_dates = self.convert_dates\n if convert_dates is True:\n convert_dates = []\n convert_dates = set(convert_dates)\n\n def is_ok(col) -> bool:\n \"\"\"\n Return if this col is ok to try for a date parse.\n \"\"\"\n if not isinstance(col, str):\n return False\n\n col_lower = col.lower()\n if (\n col_lower.endswith(\"_at\")\n or col_lower.endswith(\"_time\")\n or col_lower == \"modified\"\n or col_lower == \"date\"\n or col_lower == \"datetime\"\n or col_lower.startswith(\"timestamp\")\n ):\n return True\n return False\n\n self._process_converter(\n lambda col, c: self._try_convert_to_date(c),\n lambda col, c: (\n (self.keep_default_dates and is_ok(col)) or col in convert_dates\n ),\n )\n",
"# Colored log, requires Python 2.3 or up.\nfrom __future__ import division, absolute_import, print_function\n\nimport sys\nfrom distutils.log import *\nfrom distutils.log import Log as old_Log\nfrom distutils.log import _global_log\n\nif sys.version_info[0] < 3:\n from .misc_util import (red_text, default_text, cyan_text, green_text,\n is_sequence, is_string)\nelse:\n from numpy.distutils.misc_util import (red_text, default_text, cyan_text,\n green_text, is_sequence, is_string)\n\n\ndef _fix_args(args,flag=1):\n if is_string(args):\n return args.replace('%', '%%')\n if flag and is_sequence(args):\n return tuple([_fix_args(a, flag=0) for a in args])\n return args\n\n\nclass Log(old_Log):\n def _log(self, level, msg, args):\n if level >= self.threshold:\n if args:\n msg = msg % _fix_args(args)\n if 0:\n if msg.startswith('copying ') and msg.find(' -> ') != -1:\n return\n if msg.startswith('byte-compiling '):\n return\n print(_global_color_map[level](msg))\n sys.stdout.flush()\n\n def good(self, msg, *args):\n \"\"\"\n If we log WARN messages, log this message as a 'nice' anti-warn\n message.\n\n \"\"\"\n if WARN >= self.threshold:\n if args:\n print(green_text(msg % _fix_args(args)))\n else:\n print(green_text(msg))\n sys.stdout.flush()\n\n\n_global_log.__class__ = Log\n\ngood = _global_log.good\n\ndef set_threshold(level, force=False):\n prev_level = _global_log.threshold\n if prev_level > DEBUG or force:\n # If we're running at DEBUG, don't change the threshold, as there's\n # likely a good reason why we're running at this level.\n _global_log.threshold = level\n if level <= DEBUG:\n info('set_threshold: setting threshold to DEBUG level,'\n ' it can be changed only with force argument')\n else:\n info('set_threshold: not changing threshold from DEBUG level'\n ' %s to %s' % (prev_level, level))\n return prev_level\n\ndef get_threshold():\n\treturn _global_log.threshold\n\ndef set_verbosity(v, force=False):\n prev_level = _global_log.threshold\n if v < 0:\n set_threshold(ERROR, force)\n elif v == 0:\n set_threshold(WARN, force)\n elif v == 1:\n set_threshold(INFO, force)\n elif v >= 2:\n set_threshold(DEBUG, force)\n return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1)\n\n\n_global_color_map = {\n DEBUG:cyan_text,\n INFO:default_text,\n WARN:red_text,\n ERROR:red_text,\n FATAL:red_text\n}\n\n# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold.\nset_verbosity(0, force=True)\n",
"from datetime import datetime, timedelta\nfrom functools import partial\nfrom operator import attrgetter\n\nimport dateutil\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import OutOfBoundsDatetime, conversion\n\nimport pandas as pd\nfrom pandas import DatetimeIndex, Index, Timestamp, date_range, offsets, to_datetime\nimport pandas._testing as tm\nfrom pandas.core.arrays import DatetimeArray, period_array\n\n\nclass TestDatetimeIndex:\n @pytest.mark.parametrize(\"dt_cls\", [DatetimeIndex, DatetimeArray._from_sequence])\n def test_freq_validation_with_nat(self, dt_cls):\n # GH#11587 make sure we get a useful error message when generate_range\n # raises\n msg = (\n \"Inferred frequency None from passed values does not conform \"\n \"to passed frequency D\"\n )\n with pytest.raises(ValueError, match=msg):\n dt_cls([pd.NaT, pd.Timestamp(\"2011-01-01\")], freq=\"D\")\n with pytest.raises(ValueError, match=msg):\n dt_cls([pd.NaT, pd.Timestamp(\"2011-01-01\").value], freq=\"D\")\n\n # TODO: better place for tests shared by DTI/TDI?\n @pytest.mark.parametrize(\n \"index\",\n [\n pd.date_range(\"2016-01-01\", periods=5, tz=\"US/Pacific\"),\n pd.timedelta_range(\"1 Day\", periods=5),\n ],\n )\n def test_shallow_copy_inherits_array_freq(self, index):\n # If we pass a DTA/TDA to shallow_copy and dont specify a freq,\n # we should inherit the array's freq, not our own.\n array = index._data\n\n arr = array[[0, 3, 2, 4, 1]]\n assert arr.freq is None\n\n result = index._shallow_copy(arr)\n assert result.freq is None\n\n def test_categorical_preserves_tz(self):\n # GH#18664 retain tz when going DTI-->Categorical-->DTI\n # TODO: parametrize over DatetimeIndex/DatetimeArray\n # once CategoricalIndex(DTA) works\n\n dti = pd.DatetimeIndex(\n [pd.NaT, \"2015-01-01\", \"1999-04-06 15:14:13\", \"2015-01-01\"], tz=\"US/Eastern\"\n )\n\n ci = pd.CategoricalIndex(dti)\n carr = pd.Categorical(dti)\n cser = pd.Series(ci)\n\n for obj in [ci, carr, cser]:\n result = pd.DatetimeIndex(obj)\n tm.assert_index_equal(result, dti)\n\n def test_dti_with_period_data_raises(self):\n # GH#23675\n data = pd.PeriodIndex([\"2016Q1\", \"2016Q2\"], freq=\"Q\")\n\n with pytest.raises(TypeError, match=\"PeriodDtype data is invalid\"):\n DatetimeIndex(data)\n\n with pytest.raises(TypeError, match=\"PeriodDtype data is invalid\"):\n to_datetime(data)\n\n with pytest.raises(TypeError, match=\"PeriodDtype data is invalid\"):\n DatetimeIndex(period_array(data))\n\n with pytest.raises(TypeError, match=\"PeriodDtype data is invalid\"):\n to_datetime(period_array(data))\n\n def test_dti_with_timedelta64_data_raises(self):\n # GH#23675 deprecated, enforrced in GH#29794\n data = np.array([0], dtype=\"m8[ns]\")\n msg = r\"timedelta64\\[ns\\] cannot be converted to datetime64\"\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(data)\n\n with pytest.raises(TypeError, match=msg):\n to_datetime(data)\n\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(pd.TimedeltaIndex(data))\n\n with pytest.raises(TypeError, match=msg):\n to_datetime(pd.TimedeltaIndex(data))\n\n def test_construction_caching(self):\n\n df = pd.DataFrame(\n {\n \"dt\": pd.date_range(\"20130101\", periods=3),\n \"dttz\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"dt_with_null\": [\n pd.Timestamp(\"20130101\"),\n pd.NaT,\n pd.Timestamp(\"20130103\"),\n ],\n \"dtns\": pd.date_range(\"20130101\", periods=3, freq=\"ns\"),\n }\n )\n assert df.dttz.dtype.tz.zone == \"US/Eastern\"\n\n @pytest.mark.parametrize(\n \"kwargs\",\n [{\"tz\": \"dtype.tz\"}, {\"dtype\": \"dtype\"}, {\"dtype\": \"dtype\", \"tz\": \"dtype.tz\"}],\n )\n def test_construction_with_alt(self, kwargs, tz_aware_fixture):\n tz = tz_aware_fixture\n i = pd.date_range(\"20130101\", periods=5, freq=\"H\", tz=tz)\n kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}\n result = DatetimeIndex(i, **kwargs)\n tm.assert_index_equal(i, result)\n\n @pytest.mark.parametrize(\n \"kwargs\",\n [{\"tz\": \"dtype.tz\"}, {\"dtype\": \"dtype\"}, {\"dtype\": \"dtype\", \"tz\": \"dtype.tz\"}],\n )\n def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):\n tz = tz_aware_fixture\n i = pd.date_range(\"20130101\", periods=5, freq=\"H\", tz=tz)\n kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}\n\n if \"tz\" in kwargs:\n result = DatetimeIndex(i.asi8, tz=\"UTC\").tz_convert(kwargs[\"tz\"])\n\n expected = DatetimeIndex(i, **kwargs)\n tm.assert_index_equal(result, expected)\n\n # localize into the provided tz\n i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=\"UTC\")\n expected = i.tz_localize(None).tz_localize(\"UTC\")\n tm.assert_index_equal(i2, expected)\n\n # incompat tz/dtype\n msg = \"cannot supply both a tz and a dtype with a tz\"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz=\"US/Pacific\")\n\n def test_construction_index_with_mixed_timezones(self):\n # gh-11488: no tz results in DatetimeIndex\n result = Index([Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\")], name=\"idx\")\n exp = DatetimeIndex(\n [Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\")], name=\"idx\"\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # same tz results in DatetimeIndex\n result = Index(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"Asia/Tokyo\"),\n ],\n name=\"idx\",\n )\n exp = DatetimeIndex(\n [Timestamp(\"2011-01-01 10:00\"), Timestamp(\"2011-01-02 10:00\")],\n tz=\"Asia/Tokyo\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # same tz results in DatetimeIndex (DST)\n result = Index(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"US/Eastern\"),\n Timestamp(\"2011-08-01 10:00\", tz=\"US/Eastern\"),\n ],\n name=\"idx\",\n )\n exp = DatetimeIndex(\n [Timestamp(\"2011-01-01 10:00\"), Timestamp(\"2011-08-01 10:00\")],\n tz=\"US/Eastern\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # Different tz results in Index(dtype=object)\n result = Index(\n [\n Timestamp(\"2011-01-01 10:00\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n name=\"idx\",\n )\n exp = Index(\n [\n Timestamp(\"2011-01-01 10:00\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n dtype=\"object\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n result = Index(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n name=\"idx\",\n )\n exp = Index(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n dtype=\"object\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n # length = 1\n result = Index([Timestamp(\"2011-01-01\")], name=\"idx\")\n exp = DatetimeIndex([Timestamp(\"2011-01-01\")], name=\"idx\")\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # length = 1 with tz\n result = Index([Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\")], name=\"idx\")\n exp = DatetimeIndex(\n [Timestamp(\"2011-01-01 10:00\")], tz=\"Asia/Tokyo\", name=\"idx\"\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n def test_construction_index_with_mixed_timezones_with_NaT(self):\n # see gh-11488\n result = Index(\n [pd.NaT, Timestamp(\"2011-01-01\"), pd.NaT, Timestamp(\"2011-01-02\")],\n name=\"idx\",\n )\n exp = DatetimeIndex(\n [pd.NaT, Timestamp(\"2011-01-01\"), pd.NaT, Timestamp(\"2011-01-02\")],\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # Same tz results in DatetimeIndex\n result = Index(\n [\n pd.NaT,\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n pd.NaT,\n Timestamp(\"2011-01-02 10:00\", tz=\"Asia/Tokyo\"),\n ],\n name=\"idx\",\n )\n exp = DatetimeIndex(\n [\n pd.NaT,\n Timestamp(\"2011-01-01 10:00\"),\n pd.NaT,\n Timestamp(\"2011-01-02 10:00\"),\n ],\n tz=\"Asia/Tokyo\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # same tz results in DatetimeIndex (DST)\n result = Index(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"US/Eastern\"),\n pd.NaT,\n Timestamp(\"2011-08-01 10:00\", tz=\"US/Eastern\"),\n ],\n name=\"idx\",\n )\n exp = DatetimeIndex(\n [Timestamp(\"2011-01-01 10:00\"), pd.NaT, Timestamp(\"2011-08-01 10:00\")],\n tz=\"US/Eastern\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # different tz results in Index(dtype=object)\n result = Index(\n [\n pd.NaT,\n Timestamp(\"2011-01-01 10:00\"),\n pd.NaT,\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n name=\"idx\",\n )\n exp = Index(\n [\n pd.NaT,\n Timestamp(\"2011-01-01 10:00\"),\n pd.NaT,\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n dtype=\"object\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n result = Index(\n [\n pd.NaT,\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n pd.NaT,\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n name=\"idx\",\n )\n exp = Index(\n [\n pd.NaT,\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n pd.NaT,\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n dtype=\"object\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n # all NaT\n result = Index([pd.NaT, pd.NaT], name=\"idx\")\n exp = DatetimeIndex([pd.NaT, pd.NaT], name=\"idx\")\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # all NaT with tz\n result = Index([pd.NaT, pd.NaT], tz=\"Asia/Tokyo\", name=\"idx\")\n exp = DatetimeIndex([pd.NaT, pd.NaT], tz=\"Asia/Tokyo\", name=\"idx\")\n\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n def test_construction_dti_with_mixed_timezones(self):\n # GH 11488 (not changed, added explicit tests)\n\n # no tz results in DatetimeIndex\n result = DatetimeIndex(\n [Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\")], name=\"idx\"\n )\n exp = DatetimeIndex(\n [Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\")], name=\"idx\"\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # same tz results in DatetimeIndex\n result = DatetimeIndex(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"Asia/Tokyo\"),\n ],\n name=\"idx\",\n )\n exp = DatetimeIndex(\n [Timestamp(\"2011-01-01 10:00\"), Timestamp(\"2011-01-02 10:00\")],\n tz=\"Asia/Tokyo\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # same tz results in DatetimeIndex (DST)\n result = DatetimeIndex(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"US/Eastern\"),\n Timestamp(\"2011-08-01 10:00\", tz=\"US/Eastern\"),\n ],\n name=\"idx\",\n )\n exp = DatetimeIndex(\n [Timestamp(\"2011-01-01 10:00\"), Timestamp(\"2011-08-01 10:00\")],\n tz=\"US/Eastern\",\n name=\"idx\",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # tz mismatch affecting to tz-aware raises TypeError/ValueError\n\n with pytest.raises(ValueError):\n DatetimeIndex(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n name=\"idx\",\n )\n\n msg = \"cannot be converted to datetime64\"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(\n [\n Timestamp(\"2011-01-01 10:00\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n tz=\"Asia/Tokyo\",\n name=\"idx\",\n )\n\n with pytest.raises(ValueError):\n DatetimeIndex(\n [\n Timestamp(\"2011-01-01 10:00\", tz=\"Asia/Tokyo\"),\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n tz=\"US/Eastern\",\n name=\"idx\",\n )\n\n with pytest.raises(ValueError, match=msg):\n # passing tz should results in DatetimeIndex, then mismatch raises\n # TypeError\n Index(\n [\n pd.NaT,\n Timestamp(\"2011-01-01 10:00\"),\n pd.NaT,\n Timestamp(\"2011-01-02 10:00\", tz=\"US/Eastern\"),\n ],\n tz=\"Asia/Tokyo\",\n name=\"idx\",\n )\n\n def test_construction_base_constructor(self):\n arr = [pd.Timestamp(\"2011-01-01\"), pd.NaT, pd.Timestamp(\"2011-01-03\")]\n tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))\n tm.assert_index_equal(pd.Index(np.array(arr)), pd.DatetimeIndex(np.array(arr)))\n\n arr = [np.nan, pd.NaT, pd.Timestamp(\"2011-01-03\")]\n tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))\n tm.assert_index_equal(pd.Index(np.array(arr)), pd.DatetimeIndex(np.array(arr)))\n\n def test_construction_outofbounds(self):\n # GH 13663\n dates = [\n datetime(3000, 1, 1),\n datetime(4000, 1, 1),\n datetime(5000, 1, 1),\n datetime(6000, 1, 1),\n ]\n exp = Index(dates, dtype=object)\n # coerces to object\n tm.assert_index_equal(Index(dates), exp)\n\n with pytest.raises(OutOfBoundsDatetime):\n # can't create DatetimeIndex\n DatetimeIndex(dates)\n\n def test_construction_with_ndarray(self):\n # GH 5152\n dates = [datetime(2013, 10, 7), datetime(2013, 10, 8), datetime(2013, 10, 9)]\n data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values\n result = DatetimeIndex(data, freq=pd.offsets.BDay())\n expected = DatetimeIndex([\"2013-10-07\", \"2013-10-08\", \"2013-10-09\"], freq=\"B\")\n tm.assert_index_equal(result, expected)\n\n def test_integer_values_and_tz_interpreted_as_utc(self):\n # GH-24559\n val = np.datetime64(\"2000-01-01 00:00:00\", \"ns\")\n values = np.array([val.view(\"i8\")])\n\n result = DatetimeIndex(values).tz_localize(\"US/Central\")\n\n expected = pd.DatetimeIndex([\"2000-01-01T00:00:00\"], tz=\"US/Central\")\n tm.assert_index_equal(result, expected)\n\n # but UTC is *not* deprecated.\n with tm.assert_produces_warning(None):\n result = DatetimeIndex(values, tz=\"UTC\")\n expected = pd.DatetimeIndex([\"2000-01-01T00:00:00\"], tz=\"US/Central\")\n\n def test_constructor_coverage(self):\n rng = date_range(\"1/1/2000\", periods=10.5)\n exp = date_range(\"1/1/2000\", periods=10)\n tm.assert_index_equal(rng, exp)\n\n msg = \"periods must be a number, got foo\"\n with pytest.raises(TypeError, match=msg):\n date_range(start=\"1/1/2000\", periods=\"foo\", freq=\"D\")\n\n with pytest.raises(TypeError):\n DatetimeIndex(\"1/1/2000\")\n\n # generator expression\n gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))\n result = DatetimeIndex(gen)\n expected = DatetimeIndex(\n [datetime(2000, 1, 1) + timedelta(i) for i in range(10)]\n )\n tm.assert_index_equal(result, expected)\n\n # NumPy string array\n strings = np.array([\"2000-01-01\", \"2000-01-02\", \"2000-01-03\"])\n result = DatetimeIndex(strings)\n expected = DatetimeIndex(strings.astype(\"O\"))\n tm.assert_index_equal(result, expected)\n\n from_ints = DatetimeIndex(expected.asi8)\n tm.assert_index_equal(from_ints, expected)\n\n # string with NaT\n strings = np.array([\"2000-01-01\", \"2000-01-02\", \"NaT\"])\n result = DatetimeIndex(strings)\n expected = DatetimeIndex(strings.astype(\"O\"))\n tm.assert_index_equal(result, expected)\n\n from_ints = DatetimeIndex(expected.asi8)\n tm.assert_index_equal(from_ints, expected)\n\n # non-conforming\n msg = (\n \"Inferred frequency None from passed values does not conform \"\n \"to passed frequency D\"\n )\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex([\"2000-01-01\", \"2000-01-02\", \"2000-01-04\"], freq=\"D\")\n\n msg = (\n \"Of the four parameters: start, end, periods, and freq, exactly \"\n \"three must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n date_range(start=\"2011-01-01\", freq=\"b\")\n with pytest.raises(ValueError, match=msg):\n date_range(end=\"2011-01-01\", freq=\"B\")\n with pytest.raises(ValueError, match=msg):\n date_range(periods=10, freq=\"D\")\n\n @pytest.mark.parametrize(\"freq\", [\"AS\", \"W-SUN\"])\n def test_constructor_datetime64_tzformat(self, freq):\n # see GH#6572: ISO 8601 format results in pytz.FixedOffset\n idx = date_range(\n \"2013-01-01T00:00:00-05:00\", \"2016-01-01T23:59:59-05:00\", freq=freq\n )\n expected = date_range(\n \"2013-01-01T00:00:00\",\n \"2016-01-01T23:59:59\",\n freq=freq,\n tz=pytz.FixedOffset(-300),\n )\n tm.assert_index_equal(idx, expected)\n # Unable to use `US/Eastern` because of DST\n expected_i8 = date_range(\n \"2013-01-01T00:00:00\", \"2016-01-01T23:59:59\", freq=freq, tz=\"America/Lima\"\n )\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n idx = date_range(\n \"2013-01-01T00:00:00+09:00\", \"2016-01-01T23:59:59+09:00\", freq=freq\n )\n expected = date_range(\n \"2013-01-01T00:00:00\",\n \"2016-01-01T23:59:59\",\n freq=freq,\n tz=pytz.FixedOffset(540),\n )\n tm.assert_index_equal(idx, expected)\n expected_i8 = date_range(\n \"2013-01-01T00:00:00\", \"2016-01-01T23:59:59\", freq=freq, tz=\"Asia/Tokyo\"\n )\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n # Non ISO 8601 format results in dateutil.tz.tzoffset\n idx = date_range(\"2013/1/1 0:00:00-5:00\", \"2016/1/1 23:59:59-5:00\", freq=freq)\n expected = date_range(\n \"2013-01-01T00:00:00\",\n \"2016-01-01T23:59:59\",\n freq=freq,\n tz=pytz.FixedOffset(-300),\n )\n tm.assert_index_equal(idx, expected)\n # Unable to use `US/Eastern` because of DST\n expected_i8 = date_range(\n \"2013-01-01T00:00:00\", \"2016-01-01T23:59:59\", freq=freq, tz=\"America/Lima\"\n )\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n idx = date_range(\"2013/1/1 0:00:00+9:00\", \"2016/1/1 23:59:59+09:00\", freq=freq)\n expected = date_range(\n \"2013-01-01T00:00:00\",\n \"2016-01-01T23:59:59\",\n freq=freq,\n tz=pytz.FixedOffset(540),\n )\n tm.assert_index_equal(idx, expected)\n expected_i8 = date_range(\n \"2013-01-01T00:00:00\", \"2016-01-01T23:59:59\", freq=freq, tz=\"Asia/Tokyo\"\n )\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n def test_constructor_dtype(self):\n\n # passing a dtype with a tz should localize\n idx = DatetimeIndex(\n [\"2013-01-01\", \"2013-01-02\"], dtype=\"datetime64[ns, US/Eastern]\"\n )\n expected = DatetimeIndex([\"2013-01-01\", \"2013-01-02\"]).tz_localize(\"US/Eastern\")\n tm.assert_index_equal(idx, expected)\n\n idx = DatetimeIndex([\"2013-01-01\", \"2013-01-02\"], tz=\"US/Eastern\")\n tm.assert_index_equal(idx, expected)\n\n # if we already have a tz and its not the same, then raise\n idx = DatetimeIndex(\n [\"2013-01-01\", \"2013-01-02\"], dtype=\"datetime64[ns, US/Eastern]\"\n )\n\n msg = (\n \"cannot supply both a tz and a timezone-naive dtype\"\n r\" \\(i\\.e\\. datetime64\\[ns\\]\\)\"\n )\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(idx, dtype=\"datetime64[ns]\")\n\n # this is effectively trying to convert tz's\n msg = \"data is already tz-aware US/Eastern, unable to set specified tz: CET\"\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(idx, dtype=\"datetime64[ns, CET]\")\n msg = \"cannot supply both a tz and a dtype with a tz\"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(idx, tz=\"CET\", dtype=\"datetime64[ns, US/Eastern]\")\n\n result = DatetimeIndex(idx, dtype=\"datetime64[ns, US/Eastern]\")\n tm.assert_index_equal(idx, result)\n\n @pytest.mark.parametrize(\"dtype\", [object, np.int32, np.int64])\n def test_constructor_invalid_dtype_raises(self, dtype):\n # GH 23986\n with pytest.raises(ValueError):\n DatetimeIndex([1, 2], dtype=dtype)\n\n def test_constructor_name(self):\n idx = date_range(start=\"2000-01-01\", periods=1, freq=\"A\", name=\"TEST\")\n assert idx.name == \"TEST\"\n\n def test_000constructor_resolution(self):\n # 2252\n t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)\n idx = DatetimeIndex([t1])\n\n assert idx.nanosecond[0] == t1.nanosecond\n\n def test_disallow_setting_tz(self):\n # GH 3746\n dti = DatetimeIndex([\"2010\"], tz=\"UTC\")\n with pytest.raises(AttributeError):\n dti.tz = pytz.timezone(\"US/Pacific\")\n\n @pytest.mark.parametrize(\n \"tz\",\n [\n None,\n \"America/Los_Angeles\",\n pytz.timezone(\"America/Los_Angeles\"),\n Timestamp(\"2000\", tz=\"America/Los_Angeles\").tz,\n ],\n )\n def test_constructor_start_end_with_tz(self, tz):\n # GH 18595\n start = Timestamp(\"2013-01-01 06:00:00\", tz=\"America/Los_Angeles\")\n end = Timestamp(\"2013-01-02 06:00:00\", tz=\"America/Los_Angeles\")\n result = date_range(freq=\"D\", start=start, end=end, tz=tz)\n expected = DatetimeIndex(\n [\"2013-01-01 06:00:00\", \"2013-01-02 06:00:00\"], tz=\"America/Los_Angeles\"\n )\n tm.assert_index_equal(result, expected)\n # Especially assert that the timezone is consistent for pytz\n assert pytz.timezone(\"America/Los_Angeles\") is result.tz\n\n @pytest.mark.parametrize(\"tz\", [\"US/Pacific\", \"US/Eastern\", \"Asia/Tokyo\"])\n def test_constructor_with_non_normalized_pytz(self, tz):\n # GH 18595\n non_norm_tz = Timestamp(\"2010\", tz=tz).tz\n result = DatetimeIndex([\"2010\"], tz=non_norm_tz)\n assert pytz.timezone(tz) is result.tz\n\n def test_constructor_timestamp_near_dst(self):\n # GH 20854\n ts = [\n Timestamp(\"2016-10-30 03:00:00+0300\", tz=\"Europe/Helsinki\"),\n Timestamp(\"2016-10-30 03:00:00+0200\", tz=\"Europe/Helsinki\"),\n ]\n result = DatetimeIndex(ts)\n expected = DatetimeIndex([ts[0].to_pydatetime(), ts[1].to_pydatetime()])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\"klass\", [Index, DatetimeIndex])\n @pytest.mark.parametrize(\"box\", [np.array, partial(np.array, dtype=object), list])\n @pytest.mark.parametrize(\n \"tz, dtype\",\n [(\"US/Pacific\", \"datetime64[ns, US/Pacific]\"), (None, \"datetime64[ns]\")],\n )\n def test_constructor_with_int_tz(self, klass, box, tz, dtype):\n # GH 20997, 20964\n ts = Timestamp(\"2018-01-01\", tz=tz)\n result = klass(box([ts.value]), dtype=dtype)\n expected = klass([ts])\n assert result == expected\n\n def test_construction_int_rountrip(self, tz_naive_fixture):\n # GH 12619, GH#24559\n tz = tz_naive_fixture\n\n result = 1293858000000000000\n expected = DatetimeIndex([result], tz=tz).asi8[0]\n assert result == expected\n\n def test_construction_from_replaced_timestamps_with_dst(self):\n # GH 18785\n index = pd.date_range(\n pd.Timestamp(2000, 1, 1),\n pd.Timestamp(2005, 1, 1),\n freq=\"MS\",\n tz=\"Australia/Melbourne\",\n )\n test = pd.DataFrame({\"data\": range(len(index))}, index=index)\n test = test.resample(\"Y\").mean()\n result = pd.DatetimeIndex([x.replace(month=6, day=1) for x in test.index])\n expected = pd.DatetimeIndex(\n [\n \"2000-06-01 00:00:00\",\n \"2001-06-01 00:00:00\",\n \"2002-06-01 00:00:00\",\n \"2003-06-01 00:00:00\",\n \"2004-06-01 00:00:00\",\n \"2005-06-01 00:00:00\",\n ],\n tz=\"Australia/Melbourne\",\n )\n tm.assert_index_equal(result, expected)\n\n def test_construction_with_tz_and_tz_aware_dti(self):\n # GH 23579\n dti = date_range(\"2016-01-01\", periods=3, tz=\"US/Central\")\n with pytest.raises(TypeError):\n DatetimeIndex(dti, tz=\"Asia/Tokyo\")\n\n def test_construction_with_nat_and_tzlocal(self):\n tz = dateutil.tz.tzlocal()\n result = DatetimeIndex([\"2018\", \"NaT\"], tz=tz)\n expected = DatetimeIndex([Timestamp(\"2018\", tz=tz), pd.NaT])\n tm.assert_index_equal(result, expected)\n\n def test_constructor_no_precision_raises(self):\n # GH-24753, GH-24739\n\n msg = \"with no precision is not allowed\"\n with pytest.raises(ValueError, match=msg):\n pd.DatetimeIndex([\"2000\"], dtype=\"datetime64\")\n\n with pytest.raises(ValueError, match=msg):\n pd.Index([\"2000\"], dtype=\"datetime64\")\n\n def test_constructor_wrong_precision_raises(self):\n with pytest.raises(ValueError):\n pd.DatetimeIndex([\"2000\"], dtype=\"datetime64[us]\")\n\n def test_index_constructor_with_numpy_object_array_and_timestamp_tz_with_nan(self):\n # GH 27011\n result = Index(np.array([Timestamp(\"2019\", tz=\"UTC\"), np.nan], dtype=object))\n expected = DatetimeIndex([Timestamp(\"2019\", tz=\"UTC\"), pd.NaT])\n tm.assert_index_equal(result, expected)\n\n\nclass TestTimeSeries:\n def test_dti_constructor_preserve_dti_freq(self):\n rng = date_range(\"1/1/2000\", \"1/2/2000\", freq=\"5min\")\n\n rng2 = DatetimeIndex(rng)\n assert rng.freq == rng2.freq\n\n def test_dti_constructor_years_only(self, tz_naive_fixture):\n tz = tz_naive_fixture\n # GH 6961\n rng1 = date_range(\"2014\", \"2015\", freq=\"M\", tz=tz)\n expected1 = date_range(\"2014-01-31\", \"2014-12-31\", freq=\"M\", tz=tz)\n\n rng2 = date_range(\"2014\", \"2015\", freq=\"MS\", tz=tz)\n expected2 = date_range(\"2014-01-01\", \"2015-01-01\", freq=\"MS\", tz=tz)\n\n rng3 = date_range(\"2014\", \"2020\", freq=\"A\", tz=tz)\n expected3 = date_range(\"2014-12-31\", \"2019-12-31\", freq=\"A\", tz=tz)\n\n rng4 = date_range(\"2014\", \"2020\", freq=\"AS\", tz=tz)\n expected4 = date_range(\"2014-01-01\", \"2020-01-01\", freq=\"AS\", tz=tz)\n\n for rng, expected in [\n (rng1, expected1),\n (rng2, expected2),\n (rng3, expected3),\n (rng4, expected4),\n ]:\n tm.assert_index_equal(rng, expected)\n\n def test_dti_constructor_small_int(self, any_int_dtype):\n # see gh-13721\n exp = DatetimeIndex(\n [\n \"1970-01-01 00:00:00.00000000\",\n \"1970-01-01 00:00:00.00000001\",\n \"1970-01-01 00:00:00.00000002\",\n ]\n )\n\n arr = np.array([0, 10, 20], dtype=any_int_dtype)\n tm.assert_index_equal(DatetimeIndex(arr), exp)\n\n def test_ctor_str_intraday(self):\n rng = DatetimeIndex([\"1-1-2000 00:00:01\"])\n assert rng[0].second == 1\n\n def test_is_(self):\n dti = date_range(start=\"1/1/2005\", end=\"12/1/2005\", freq=\"M\")\n assert dti.is_(dti)\n assert dti.is_(dti.view())\n assert not dti.is_(dti.copy())\n\n def test_index_cast_datetime64_other_units(self):\n arr = np.arange(0, 100, 10, dtype=np.int64).view(\"M8[D]\")\n idx = Index(arr)\n\n assert (idx.values == conversion.ensure_datetime64ns(arr)).all()\n\n def test_constructor_int64_nocopy(self):\n # GH#1624\n arr = np.arange(1000, dtype=np.int64)\n index = DatetimeIndex(arr)\n\n arr[50:100] = -1\n assert (index.asi8[50:100] == -1).all()\n\n arr = np.arange(1000, dtype=np.int64)\n index = DatetimeIndex(arr, copy=True)\n\n arr[50:100] = -1\n assert (index.asi8[50:100] != -1).all()\n\n @pytest.mark.parametrize(\n \"freq\", [\"M\", \"Q\", \"A\", \"D\", \"B\", \"BH\", \"T\", \"S\", \"L\", \"U\", \"H\", \"N\", \"C\"]\n )\n def test_from_freq_recreate_from_data(self, freq):\n org = date_range(start=\"2001/02/01 09:00\", freq=freq, periods=1)\n idx = DatetimeIndex(org, freq=freq)\n tm.assert_index_equal(idx, org)\n\n org = date_range(\n start=\"2001/02/01 09:00\", freq=freq, tz=\"US/Pacific\", periods=1\n )\n idx = DatetimeIndex(org, freq=freq, tz=\"US/Pacific\")\n tm.assert_index_equal(idx, org)\n\n def test_datetimeindex_constructor_misc(self):\n arr = [\"1/1/2005\", \"1/2/2005\", \"Jn 3, 2005\", \"2005-01-04\"]\n msg = r\"(\\(')?Unknown string format(:', 'Jn 3, 2005'\\))?\"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(arr)\n\n arr = [\"1/1/2005\", \"1/2/2005\", \"1/3/2005\", \"2005-01-04\"]\n idx1 = DatetimeIndex(arr)\n\n arr = [datetime(2005, 1, 1), \"1/2/2005\", \"1/3/2005\", \"2005-01-04\"]\n idx2 = DatetimeIndex(arr)\n\n arr = [Timestamp(datetime(2005, 1, 1)), \"1/2/2005\", \"1/3/2005\", \"2005-01-04\"]\n idx3 = DatetimeIndex(arr)\n\n arr = np.array([\"1/1/2005\", \"1/2/2005\", \"1/3/2005\", \"2005-01-04\"], dtype=\"O\")\n idx4 = DatetimeIndex(arr)\n\n arr = to_datetime([\"1/1/2005\", \"1/2/2005\", \"1/3/2005\", \"2005-01-04\"])\n idx5 = DatetimeIndex(arr)\n\n arr = to_datetime([\"1/1/2005\", \"1/2/2005\", \"Jan 3, 2005\", \"2005-01-04\"])\n idx6 = DatetimeIndex(arr)\n\n idx7 = DatetimeIndex([\"12/05/2007\", \"25/01/2008\"], dayfirst=True)\n idx8 = DatetimeIndex(\n [\"2007/05/12\", \"2008/01/25\"], dayfirst=False, yearfirst=True\n )\n tm.assert_index_equal(idx7, idx8)\n\n for other in [idx2, idx3, idx4, idx5, idx6]:\n assert (idx1.values == other.values).all()\n\n sdate = datetime(1999, 12, 25)\n edate = datetime(2000, 1, 1)\n idx = date_range(start=sdate, freq=\"1B\", periods=20)\n assert len(idx) == 20\n assert idx[0] == sdate + 0 * offsets.BDay()\n assert idx.freq == \"B\"\n\n idx = date_range(end=edate, freq=(\"D\", 5), periods=20)\n assert len(idx) == 20\n assert idx[-1] == edate\n assert idx.freq == \"5D\"\n\n idx1 = date_range(start=sdate, end=edate, freq=\"W-SUN\")\n idx2 = date_range(start=sdate, end=edate, freq=offsets.Week(weekday=6))\n assert len(idx1) == len(idx2)\n assert idx1.freq == idx2.freq\n\n idx1 = date_range(start=sdate, end=edate, freq=\"QS\")\n idx2 = date_range(\n start=sdate, end=edate, freq=offsets.QuarterBegin(startingMonth=1)\n )\n assert len(idx1) == len(idx2)\n assert idx1.freq == idx2.freq\n\n idx1 = date_range(start=sdate, end=edate, freq=\"BQ\")\n idx2 = date_range(\n start=sdate, end=edate, freq=offsets.BQuarterEnd(startingMonth=12)\n )\n assert len(idx1) == len(idx2)\n assert idx1.freq == idx2.freq\n",
"from __future__ import division, absolute_import, print_function\n\nimport sys\nimport warnings\nimport itertools\nimport platform\nimport pytest\nfrom decimal import Decimal\n\nimport numpy as np\nfrom numpy.core import umath\nfrom numpy.random import rand, randint, randn\nfrom numpy.testing import (\n assert_, assert_equal, assert_raises, assert_raises_regex,\n assert_array_equal, assert_almost_equal, assert_array_almost_equal,\n assert_warns, HAS_REFCOUNT\n )\n\n\nclass TestResize(object):\n def test_copies(self):\n A = np.array([[1, 2], [3, 4]])\n Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])\n assert_equal(np.resize(A, (2, 4)), Ar1)\n\n Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])\n assert_equal(np.resize(A, (4, 2)), Ar2)\n\n Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])\n assert_equal(np.resize(A, (4, 3)), Ar3)\n\n def test_zeroresize(self):\n A = np.array([[1, 2], [3, 4]])\n Ar = np.resize(A, (0,))\n assert_array_equal(Ar, np.array([]))\n assert_equal(A.dtype, Ar.dtype)\n\n Ar = np.resize(A, (0, 2))\n assert_equal(Ar.shape, (0, 2))\n\n Ar = np.resize(A, (2, 0))\n assert_equal(Ar.shape, (2, 0))\n\n def test_reshape_from_zero(self):\n # See also gh-6740\n A = np.zeros(0, dtype=[('a', np.float32)])\n Ar = np.resize(A, (2, 1))\n assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))\n assert_equal(A.dtype, Ar.dtype)\n\n\nclass TestNonarrayArgs(object):\n # check that non-array arguments to functions wrap them in arrays\n def test_choose(self):\n choices = [[0, 1, 2],\n [3, 4, 5],\n [5, 6, 7]]\n tgt = [5, 1, 5]\n a = [2, 0, 1]\n\n out = np.choose(a, choices)\n assert_equal(out, tgt)\n\n def test_clip(self):\n arr = [-1, 5, 2, 3, 10, -4, -9]\n out = np.clip(arr, 2, 7)\n tgt = [2, 5, 2, 3, 7, 2, 2]\n assert_equal(out, tgt)\n\n def test_compress(self):\n arr = [[0, 1, 2, 3, 4],\n [5, 6, 7, 8, 9]]\n tgt = [[5, 6, 7, 8, 9]]\n out = np.compress([0, 1], arr, axis=0)\n assert_equal(out, tgt)\n\n def test_count_nonzero(self):\n arr = [[0, 1, 7, 0, 0],\n [3, 0, 0, 2, 19]]\n tgt = np.array([2, 3])\n out = np.count_nonzero(arr, axis=1)\n assert_equal(out, tgt)\n\n def test_cumproduct(self):\n A = [[1, 2, 3], [4, 5, 6]]\n assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))\n\n def test_diagonal(self):\n a = [[0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11]]\n out = np.diagonal(a)\n tgt = [0, 5, 10]\n\n assert_equal(out, tgt)\n\n def test_mean(self):\n A = [[1, 2, 3], [4, 5, 6]]\n assert_(np.mean(A) == 3.5)\n assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))\n assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_(np.isnan(np.mean([])))\n assert_(w[0].category is RuntimeWarning)\n\n def test_ptp(self):\n a = [3, 4, 5, 10, -3, -5, 6.0]\n assert_equal(np.ptp(a, axis=0), 15.0)\n\n def test_prod(self):\n arr = [[1, 2, 3, 4],\n [5, 6, 7, 9],\n [10, 3, 4, 5]]\n tgt = [24, 1890, 600]\n\n assert_equal(np.prod(arr, axis=-1), tgt)\n\n def test_ravel(self):\n a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n assert_equal(np.ravel(a), tgt)\n\n def test_repeat(self):\n a = [1, 2, 3]\n tgt = [1, 1, 2, 2, 3, 3]\n\n out = np.repeat(a, 2)\n assert_equal(out, tgt)\n\n def test_reshape(self):\n arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]\n assert_equal(np.reshape(arr, (2, 6)), tgt)\n\n def test_round(self):\n arr = [1.56, 72.54, 6.35, 3.25]\n tgt = [1.6, 72.5, 6.4, 3.2]\n assert_equal(np.around(arr, decimals=1), tgt)\n\n def test_searchsorted(self):\n arr = [-8, -5, -1, 3, 6, 10]\n out = np.searchsorted(arr, 0)\n assert_equal(out, 3)\n\n def test_size(self):\n A = [[1, 2, 3], [4, 5, 6]]\n assert_(np.size(A) == 6)\n assert_(np.size(A, 0) == 2)\n assert_(np.size(A, 1) == 3)\n\n def test_squeeze(self):\n A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]\n assert_equal(np.squeeze(A).shape, (3, 3))\n assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))\n assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))\n assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))\n assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))\n assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))\n assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))\n assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))\n assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))\n\n def test_std(self):\n A = [[1, 2, 3], [4, 5, 6]]\n assert_almost_equal(np.std(A), 1.707825127659933)\n assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))\n assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_(np.isnan(np.std([])))\n assert_(w[0].category is RuntimeWarning)\n\n def test_swapaxes(self):\n tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]\n a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]\n out = np.swapaxes(a, 0, 2)\n assert_equal(out, tgt)\n\n def test_sum(self):\n m = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n tgt = [[6], [15], [24]]\n out = np.sum(m, axis=1, keepdims=True)\n\n assert_equal(tgt, out)\n\n def test_take(self):\n tgt = [2, 3, 5]\n indices = [1, 2, 4]\n a = [1, 2, 3, 4, 5]\n\n out = np.take(a, indices)\n assert_equal(out, tgt)\n\n def test_trace(self):\n c = [[1, 2], [3, 4], [5, 6]]\n assert_equal(np.trace(c), 5)\n\n def test_transpose(self):\n arr = [[1, 2], [3, 4], [5, 6]]\n tgt = [[1, 3, 5], [2, 4, 6]]\n assert_equal(np.transpose(arr, (1, 0)), tgt)\n\n def test_var(self):\n A = [[1, 2, 3], [4, 5, 6]]\n assert_almost_equal(np.var(A), 2.9166666666666665)\n assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))\n assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_(np.isnan(np.var([])))\n assert_(w[0].category is RuntimeWarning)\n\n B = np.array([None, 0])\n B[0] = 1j\n assert_almost_equal(np.var(B), 0.25)\n\nclass TestIsscalar(object):\n def test_isscalar(self):\n assert_(np.isscalar(3.1))\n assert_(np.isscalar(np.int16(12345)))\n assert_(np.isscalar(False))\n assert_(np.isscalar('numpy'))\n assert_(not np.isscalar([3.1]))\n assert_(not np.isscalar(None))\n\n # PEP 3141\n from fractions import Fraction\n assert_(np.isscalar(Fraction(5, 17)))\n from numbers import Number\n assert_(np.isscalar(Number()))\n\n\nclass TestBoolScalar(object):\n def test_logical(self):\n f = np.False_\n t = np.True_\n s = \"xyz\"\n assert_((t and s) is s)\n assert_((f and s) is f)\n\n def test_bitwise_or(self):\n f = np.False_\n t = np.True_\n assert_((t | t) is t)\n assert_((f | t) is t)\n assert_((t | f) is t)\n assert_((f | f) is f)\n\n def test_bitwise_and(self):\n f = np.False_\n t = np.True_\n assert_((t & t) is t)\n assert_((f & t) is f)\n assert_((t & f) is f)\n assert_((f & f) is f)\n\n def test_bitwise_xor(self):\n f = np.False_\n t = np.True_\n assert_((t ^ t) is f)\n assert_((f ^ t) is t)\n assert_((t ^ f) is t)\n assert_((f ^ f) is f)\n\n\nclass TestBoolArray(object):\n def setup(self):\n # offset for simd tests\n self.t = np.array([True] * 41, dtype=bool)[1::]\n self.f = np.array([False] * 41, dtype=bool)[1::]\n self.o = np.array([False] * 42, dtype=bool)[2::]\n self.nm = self.f.copy()\n self.im = self.t.copy()\n self.nm[3] = True\n self.nm[-2] = True\n self.im[3] = False\n self.im[-2] = False\n\n def test_all_any(self):\n assert_(self.t.all())\n assert_(self.t.any())\n assert_(not self.f.all())\n assert_(not self.f.any())\n assert_(self.nm.any())\n assert_(self.im.any())\n assert_(not self.nm.all())\n assert_(not self.im.all())\n # check bad element in all positions\n for i in range(256 - 7):\n d = np.array([False] * 256, dtype=bool)[7::]\n d[i] = True\n assert_(np.any(d))\n e = np.array([True] * 256, dtype=bool)[7::]\n e[i] = False\n assert_(not np.all(e))\n assert_array_equal(e, ~d)\n # big array test for blocked libc loops\n for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:\n d = np.array([False] * 100043, dtype=bool)\n d[i] = True\n assert_(np.any(d), msg=\"%r\" % i)\n e = np.array([True] * 100043, dtype=bool)\n e[i] = False\n assert_(not np.all(e), msg=\"%r\" % i)\n\n def test_logical_not_abs(self):\n assert_array_equal(~self.t, self.f)\n assert_array_equal(np.abs(~self.t), self.f)\n assert_array_equal(np.abs(~self.f), self.t)\n assert_array_equal(np.abs(self.f), self.f)\n assert_array_equal(~np.abs(self.f), self.t)\n assert_array_equal(~np.abs(self.t), self.f)\n assert_array_equal(np.abs(~self.nm), self.im)\n np.logical_not(self.t, out=self.o)\n assert_array_equal(self.o, self.f)\n np.abs(self.t, out=self.o)\n assert_array_equal(self.o, self.t)\n\n def test_logical_and_or_xor(self):\n assert_array_equal(self.t | self.t, self.t)\n assert_array_equal(self.f | self.f, self.f)\n assert_array_equal(self.t | self.f, self.t)\n assert_array_equal(self.f | self.t, self.t)\n np.logical_or(self.t, self.t, out=self.o)\n assert_array_equal(self.o, self.t)\n assert_array_equal(self.t & self.t, self.t)\n assert_array_equal(self.f & self.f, self.f)\n assert_array_equal(self.t & self.f, self.f)\n assert_array_equal(self.f & self.t, self.f)\n np.logical_and(self.t, self.t, out=self.o)\n assert_array_equal(self.o, self.t)\n assert_array_equal(self.t ^ self.t, self.f)\n assert_array_equal(self.f ^ self.f, self.f)\n assert_array_equal(self.t ^ self.f, self.t)\n assert_array_equal(self.f ^ self.t, self.t)\n np.logical_xor(self.t, self.t, out=self.o)\n assert_array_equal(self.o, self.f)\n\n assert_array_equal(self.nm & self.t, self.nm)\n assert_array_equal(self.im & self.f, False)\n assert_array_equal(self.nm & True, self.nm)\n assert_array_equal(self.im & False, self.f)\n assert_array_equal(self.nm | self.t, self.t)\n assert_array_equal(self.im | self.f, self.im)\n assert_array_equal(self.nm | True, self.t)\n assert_array_equal(self.im | False, self.im)\n assert_array_equal(self.nm ^ self.t, self.im)\n assert_array_equal(self.im ^ self.f, self.im)\n assert_array_equal(self.nm ^ True, self.im)\n assert_array_equal(self.im ^ False, self.im)\n\n\nclass TestBoolCmp(object):\n def setup(self):\n self.f = np.ones(256, dtype=np.float32)\n self.ef = np.ones(self.f.size, dtype=bool)\n self.d = np.ones(128, dtype=np.float64)\n self.ed = np.ones(self.d.size, dtype=bool)\n # generate values for all permutation of 256bit simd vectors\n s = 0\n for i in range(32):\n self.f[s:s+8] = [i & 2**x for x in range(8)]\n self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]\n s += 8\n s = 0\n for i in range(16):\n self.d[s:s+4] = [i & 2**x for x in range(4)]\n self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]\n s += 4\n\n self.nf = self.f.copy()\n self.nd = self.d.copy()\n self.nf[self.ef] = np.nan\n self.nd[self.ed] = np.nan\n\n self.inff = self.f.copy()\n self.infd = self.d.copy()\n self.inff[::3][self.ef[::3]] = np.inf\n self.infd[::3][self.ed[::3]] = np.inf\n self.inff[1::3][self.ef[1::3]] = -np.inf\n self.infd[1::3][self.ed[1::3]] = -np.inf\n self.inff[2::3][self.ef[2::3]] = np.nan\n self.infd[2::3][self.ed[2::3]] = np.nan\n self.efnonan = self.ef.copy()\n self.efnonan[2::3] = False\n self.ednonan = self.ed.copy()\n self.ednonan[2::3] = False\n\n self.signf = self.f.copy()\n self.signd = self.d.copy()\n self.signf[self.ef] *= -1.\n self.signd[self.ed] *= -1.\n self.signf[1::6][self.ef[1::6]] = -np.inf\n self.signd[1::6][self.ed[1::6]] = -np.inf\n self.signf[3::6][self.ef[3::6]] = -np.nan\n self.signd[3::6][self.ed[3::6]] = -np.nan\n self.signf[4::6][self.ef[4::6]] = -0.\n self.signd[4::6][self.ed[4::6]] = -0.\n\n def test_float(self):\n # offset for alignment test\n for i in range(4):\n assert_array_equal(self.f[i:] > 0, self.ef[i:])\n assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])\n assert_array_equal(self.f[i:] == 0, ~self.ef[i:])\n assert_array_equal(-self.f[i:] < 0, self.ef[i:])\n assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])\n r = self.f[i:] != 0\n assert_array_equal(r, self.ef[i:])\n r2 = self.f[i:] != np.zeros_like(self.f[i:])\n r3 = 0 != self.f[i:]\n assert_array_equal(r, r2)\n assert_array_equal(r, r3)\n # check bool == 0x1\n assert_array_equal(r.view(np.int8), r.astype(np.int8))\n assert_array_equal(r2.view(np.int8), r2.astype(np.int8))\n assert_array_equal(r3.view(np.int8), r3.astype(np.int8))\n\n # isnan on amd64 takes the same code path\n assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])\n assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])\n assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])\n assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])\n assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])\n\n def test_double(self):\n # offset for alignment test\n for i in range(2):\n assert_array_equal(self.d[i:] > 0, self.ed[i:])\n assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])\n assert_array_equal(self.d[i:] == 0, ~self.ed[i:])\n assert_array_equal(-self.d[i:] < 0, self.ed[i:])\n assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])\n r = self.d[i:] != 0\n assert_array_equal(r, self.ed[i:])\n r2 = self.d[i:] != np.zeros_like(self.d[i:])\n r3 = 0 != self.d[i:]\n assert_array_equal(r, r2)\n assert_array_equal(r, r3)\n # check bool == 0x1\n assert_array_equal(r.view(np.int8), r.astype(np.int8))\n assert_array_equal(r2.view(np.int8), r2.astype(np.int8))\n assert_array_equal(r3.view(np.int8), r3.astype(np.int8))\n\n # isnan on amd64 takes the same code path\n assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])\n assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])\n assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])\n assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])\n assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])\n\n\nclass TestSeterr(object):\n def test_default(self):\n err = np.geterr()\n assert_equal(err,\n dict(divide='warn',\n invalid='warn',\n over='warn',\n under='ignore')\n )\n\n def test_set(self):\n with np.errstate():\n err = np.seterr()\n old = np.seterr(divide='print')\n assert_(err == old)\n new = np.seterr()\n assert_(new['divide'] == 'print')\n np.seterr(over='raise')\n assert_(np.geterr()['over'] == 'raise')\n assert_(new['divide'] == 'print')\n np.seterr(**old)\n assert_(np.geterr() == old)\n\n @pytest.mark.skipif(platform.machine() == \"armv5tel\", reason=\"See gh-413.\")\n def test_divide_err(self):\n with np.errstate(divide='raise'):\n with assert_raises(FloatingPointError):\n np.array([1.]) / np.array([0.])\n\n np.seterr(divide='ignore')\n np.array([1.]) / np.array([0.])\n\n def test_errobj(self):\n olderrobj = np.geterrobj()\n self.called = 0\n try:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n with np.errstate(divide='warn'):\n np.seterrobj([20000, 1, None])\n np.array([1.]) / np.array([0.])\n assert_equal(len(w), 1)\n\n def log_err(*args):\n self.called += 1\n extobj_err = args\n assert_(len(extobj_err) == 2)\n assert_(\"divide\" in extobj_err[0])\n\n with np.errstate(divide='ignore'):\n np.seterrobj([20000, 3, log_err])\n np.array([1.]) / np.array([0.])\n assert_equal(self.called, 1)\n\n np.seterrobj(olderrobj)\n with np.errstate(divide='ignore'):\n np.divide(1., 0., extobj=[20000, 3, log_err])\n assert_equal(self.called, 2)\n finally:\n np.seterrobj(olderrobj)\n del self.called\n\n def test_errobj_noerrmask(self):\n # errmask = 0 has a special code path for the default\n olderrobj = np.geterrobj()\n try:\n # set errobj to something non default\n np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,\n umath.ERR_DEFAULT + 1, None])\n # call a ufunc\n np.isnan(np.array([6]))\n # same with the default, lots of times to get rid of possible\n # pre-existing stack in the code\n for i in range(10000):\n np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,\n None])\n np.isnan(np.array([6]))\n finally:\n np.seterrobj(olderrobj)\n\n\nclass TestFloatExceptions(object):\n def assert_raises_fpe(self, fpeerr, flop, x, y):\n ftype = type(x)\n try:\n flop(x, y)\n assert_(False,\n \"Type %s did not raise fpe error '%s'.\" % (ftype, fpeerr))\n except FloatingPointError as exc:\n assert_(str(exc).find(fpeerr) >= 0,\n \"Type %s raised wrong fpe error '%s'.\" % (ftype, exc))\n\n def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):\n # Check that fpe exception is raised.\n #\n # Given a floating operation `flop` and two scalar values, check that\n # the operation raises the floating point exception specified by\n # `fpeerr`. Tests all variants with 0-d array scalars as well.\n\n self.assert_raises_fpe(fpeerr, flop, sc1, sc2)\n self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)\n self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])\n self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])\n\n def test_floating_exceptions(self):\n # Test basic arithmetic function errors\n with np.errstate(all='raise'):\n # Test for all real and complex float types\n for typecode in np.typecodes['AllFloat']:\n ftype = np.obj2sctype(typecode)\n if np.dtype(ftype).kind == 'f':\n # Get some extreme values for the type\n fi = np.finfo(ftype)\n ft_tiny = fi.tiny\n ft_max = fi.max\n ft_eps = fi.eps\n underflow = 'underflow'\n divbyzero = 'divide by zero'\n else:\n # 'c', complex, corresponding real dtype\n rtype = type(ftype(0).real)\n fi = np.finfo(rtype)\n ft_tiny = ftype(fi.tiny)\n ft_max = ftype(fi.max)\n ft_eps = ftype(fi.eps)\n # The complex types raise different exceptions\n underflow = ''\n divbyzero = ''\n overflow = 'overflow'\n invalid = 'invalid'\n\n self.assert_raises_fpe(underflow,\n lambda a, b: a/b, ft_tiny, ft_max)\n self.assert_raises_fpe(underflow,\n lambda a, b: a*b, ft_tiny, ft_tiny)\n self.assert_raises_fpe(overflow,\n lambda a, b: a*b, ft_max, ftype(2))\n self.assert_raises_fpe(overflow,\n lambda a, b: a/b, ft_max, ftype(0.5))\n self.assert_raises_fpe(overflow,\n lambda a, b: a+b, ft_max, ft_max*ft_eps)\n self.assert_raises_fpe(overflow,\n lambda a, b: a-b, -ft_max, ft_max*ft_eps)\n self.assert_raises_fpe(overflow,\n np.power, ftype(2), ftype(2**fi.nexp))\n self.assert_raises_fpe(divbyzero,\n lambda a, b: a/b, ftype(1), ftype(0))\n self.assert_raises_fpe(invalid,\n lambda a, b: a/b, ftype(np.inf), ftype(np.inf))\n self.assert_raises_fpe(invalid,\n lambda a, b: a/b, ftype(0), ftype(0))\n self.assert_raises_fpe(invalid,\n lambda a, b: a-b, ftype(np.inf), ftype(np.inf))\n self.assert_raises_fpe(invalid,\n lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))\n self.assert_raises_fpe(invalid,\n lambda a, b: a*b, ftype(0), ftype(np.inf))\n\n def test_warnings(self):\n # test warning code path\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n with np.errstate(all=\"warn\"):\n np.divide(1, 0.)\n assert_equal(len(w), 1)\n assert_(\"divide by zero\" in str(w[0].message))\n np.array(1e300) * np.array(1e300)\n assert_equal(len(w), 2)\n assert_(\"overflow\" in str(w[-1].message))\n np.array(np.inf) - np.array(np.inf)\n assert_equal(len(w), 3)\n assert_(\"invalid value\" in str(w[-1].message))\n np.array(1e-300) * np.array(1e-300)\n assert_equal(len(w), 4)\n assert_(\"underflow\" in str(w[-1].message))\n\n\nclass TestTypes(object):\n def check_promotion_cases(self, promote_func):\n # tests that the scalars get coerced correctly.\n b = np.bool_(0)\n i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)\n u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)\n f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)\n c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)\n\n # coercion within the same kind\n assert_equal(promote_func(i8, i16), np.dtype(np.int16))\n assert_equal(promote_func(i32, i8), np.dtype(np.int32))\n assert_equal(promote_func(i16, i64), np.dtype(np.int64))\n assert_equal(promote_func(u8, u32), np.dtype(np.uint32))\n assert_equal(promote_func(f32, f64), np.dtype(np.float64))\n assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))\n assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))\n assert_equal(promote_func(c128, c64), np.dtype(np.complex128))\n assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))\n assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))\n\n # coercion between kinds\n assert_equal(promote_func(b, i32), np.dtype(np.int32))\n assert_equal(promote_func(b, u8), np.dtype(np.uint8))\n assert_equal(promote_func(i8, u8), np.dtype(np.int16))\n assert_equal(promote_func(u8, i32), np.dtype(np.int32))\n assert_equal(promote_func(i64, u32), np.dtype(np.int64))\n assert_equal(promote_func(u64, i32), np.dtype(np.float64))\n assert_equal(promote_func(i32, f32), np.dtype(np.float64))\n assert_equal(promote_func(i64, f32), np.dtype(np.float64))\n assert_equal(promote_func(f32, i16), np.dtype(np.float32))\n assert_equal(promote_func(f32, u32), np.dtype(np.float64))\n assert_equal(promote_func(f32, c64), np.dtype(np.complex64))\n assert_equal(promote_func(c128, f32), np.dtype(np.complex128))\n assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))\n\n # coercion between scalars and 1-D arrays\n assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))\n assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))\n assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))\n assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))\n assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))\n assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))\n assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))\n assert_equal(promote_func(np.int32(-1), np.array([u64])),\n np.dtype(np.float64))\n assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))\n assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))\n assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))\n assert_equal(promote_func(fld, np.array([c64])),\n np.dtype(np.complex64))\n assert_equal(promote_func(c64, np.array([f64])),\n np.dtype(np.complex128))\n assert_equal(promote_func(np.complex64(3j), np.array([f64])),\n np.dtype(np.complex128))\n\n # coercion between scalars and 1-D arrays, where\n # the scalar has greater kind than the array\n assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))\n assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))\n assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))\n assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))\n assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))\n\n # uint and int are treated as the same \"kind\" for\n # the purposes of array-scalar promotion.\n assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))\n\n # float and complex are treated as the same \"kind\" for\n # the purposes of array-scalar promotion, so that you can do\n # (0j + float32array) to get a complex64 array instead of\n # a complex128 array.\n assert_equal(promote_func(np.array([f32]), c128),\n np.dtype(np.complex64))\n\n def test_coercion(self):\n def res_type(a, b):\n return np.add(a, b).dtype\n\n self.check_promotion_cases(res_type)\n\n # Use-case: float/complex scalar * bool/int8 array\n # shouldn't narrow the float/complex type\n for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:\n b = 1.234 * a\n assert_equal(b.dtype, np.dtype('f8'), \"array type %s\" % a.dtype)\n b = np.longdouble(1.234) * a\n assert_equal(b.dtype, np.dtype(np.longdouble),\n \"array type %s\" % a.dtype)\n b = np.float64(1.234) * a\n assert_equal(b.dtype, np.dtype('f8'), \"array type %s\" % a.dtype)\n b = np.float32(1.234) * a\n assert_equal(b.dtype, np.dtype('f4'), \"array type %s\" % a.dtype)\n b = np.float16(1.234) * a\n assert_equal(b.dtype, np.dtype('f2'), \"array type %s\" % a.dtype)\n\n b = 1.234j * a\n assert_equal(b.dtype, np.dtype('c16'), \"array type %s\" % a.dtype)\n b = np.clongdouble(1.234j) * a\n assert_equal(b.dtype, np.dtype(np.clongdouble),\n \"array type %s\" % a.dtype)\n b = np.complex128(1.234j) * a\n assert_equal(b.dtype, np.dtype('c16'), \"array type %s\" % a.dtype)\n b = np.complex64(1.234j) * a\n assert_equal(b.dtype, np.dtype('c8'), \"array type %s\" % a.dtype)\n\n # The following use-case is problematic, and to resolve its\n # tricky side-effects requires more changes.\n #\n # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is\n # a float32, shouldn't promote to float64\n #\n # a = np.array([1.0, 1.5], dtype=np.float32)\n # t = np.array([True, False])\n # b = t*a\n # assert_equal(b, [1.0, 0.0])\n # assert_equal(b.dtype, np.dtype('f4'))\n # b = (1-t)*a\n # assert_equal(b, [0.0, 1.5])\n # assert_equal(b.dtype, np.dtype('f4'))\n #\n # Probably ~t (bitwise negation) is more proper to use here,\n # but this is arguably less intuitive to understand at a glance, and\n # would fail if 't' is actually an integer array instead of boolean:\n #\n # b = (~t)*a\n # assert_equal(b, [0.0, 1.5])\n # assert_equal(b.dtype, np.dtype('f4'))\n\n def test_result_type(self):\n self.check_promotion_cases(np.result_type)\n assert_(np.result_type(None) == np.dtype(None))\n\n def test_promote_types_endian(self):\n # promote_types should always return native-endian types\n assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))\n assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))\n\n assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))\n assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))\n assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))\n assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))\n\n assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))\n assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))\n assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))\n assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))\n assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))\n assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))\n\n assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))\n assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))\n assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))\n assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))\n\n def test_promote_types_strings(self):\n assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))\n assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))\n assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))\n assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))\n assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))\n assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))\n assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))\n assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))\n assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))\n assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))\n assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))\n assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))\n assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))\n assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))\n assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))\n assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))\n assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))\n assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))\n assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))\n assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))\n assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))\n assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))\n assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))\n assert_equal(np.promote_types('b', 'S30'), np.dtype('S30'))\n assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3'))\n assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30'))\n assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5'))\n assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30'))\n assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10'))\n assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30'))\n assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))\n assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))\n\n def test_can_cast(self):\n assert_(np.can_cast(np.int32, np.int64))\n assert_(np.can_cast(np.float64, complex))\n assert_(not np.can_cast(complex, float))\n\n assert_(np.can_cast('i8', 'f8'))\n assert_(not np.can_cast('i8', 'f4'))\n assert_(np.can_cast('i4', 'S11'))\n\n assert_(np.can_cast('i8', 'i8', 'no'))\n assert_(not np.can_cast('<i8', '>i8', 'no'))\n\n assert_(np.can_cast('<i8', '>i8', 'equiv'))\n assert_(not np.can_cast('<i4', '>i8', 'equiv'))\n\n assert_(np.can_cast('<i4', '>i8', 'safe'))\n assert_(not np.can_cast('<i8', '>i4', 'safe'))\n\n assert_(np.can_cast('<i8', '>i4', 'same_kind'))\n assert_(not np.can_cast('<i8', '>u4', 'same_kind'))\n\n assert_(np.can_cast('<i8', '>u4', 'unsafe'))\n\n assert_(np.can_cast('bool', 'S5'))\n assert_(not np.can_cast('bool', 'S4'))\n\n assert_(np.can_cast('b', 'S4'))\n assert_(not np.can_cast('b', 'S3'))\n\n assert_(np.can_cast('u1', 'S3'))\n assert_(not np.can_cast('u1', 'S2'))\n assert_(np.can_cast('u2', 'S5'))\n assert_(not np.can_cast('u2', 'S4'))\n assert_(np.can_cast('u4', 'S10'))\n assert_(not np.can_cast('u4', 'S9'))\n assert_(np.can_cast('u8', 'S20'))\n assert_(not np.can_cast('u8', 'S19'))\n\n assert_(np.can_cast('i1', 'S4'))\n assert_(not np.can_cast('i1', 'S3'))\n assert_(np.can_cast('i2', 'S6'))\n assert_(not np.can_cast('i2', 'S5'))\n assert_(np.can_cast('i4', 'S11'))\n assert_(not np.can_cast('i4', 'S10'))\n assert_(np.can_cast('i8', 'S21'))\n assert_(not np.can_cast('i8', 'S20'))\n\n assert_(np.can_cast('bool', 'S5'))\n assert_(not np.can_cast('bool', 'S4'))\n\n assert_(np.can_cast('b', 'U4'))\n assert_(not np.can_cast('b', 'U3'))\n\n assert_(np.can_cast('u1', 'U3'))\n assert_(not np.can_cast('u1', 'U2'))\n assert_(np.can_cast('u2', 'U5'))\n assert_(not np.can_cast('u2', 'U4'))\n assert_(np.can_cast('u4', 'U10'))\n assert_(not np.can_cast('u4', 'U9'))\n assert_(np.can_cast('u8', 'U20'))\n assert_(not np.can_cast('u8', 'U19'))\n\n assert_(np.can_cast('i1', 'U4'))\n assert_(not np.can_cast('i1', 'U3'))\n assert_(np.can_cast('i2', 'U6'))\n assert_(not np.can_cast('i2', 'U5'))\n assert_(np.can_cast('i4', 'U11'))\n assert_(not np.can_cast('i4', 'U10'))\n assert_(np.can_cast('i8', 'U21'))\n assert_(not np.can_cast('i8', 'U20'))\n\n assert_raises(TypeError, np.can_cast, 'i4', None)\n assert_raises(TypeError, np.can_cast, None, 'i4')\n\n # Also test keyword arguments\n assert_(np.can_cast(from_=np.int32, to=np.int64))\n\n def test_can_cast_simple_to_structured(self):\n # Non-structured can only be cast to structured in 'unsafe' mode.\n assert_(not np.can_cast('i4', 'i4,i4'))\n assert_(not np.can_cast('i4', 'i4,i2'))\n assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))\n assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))\n # Even if there is just a single field which is OK.\n assert_(not np.can_cast('i2', [('f1', 'i4')]))\n assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))\n assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))\n # It should be the same for recursive structured or subarrays.\n assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))\n assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))\n assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))\n assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))\n\n def test_can_cast_structured_to_simple(self):\n # Need unsafe casting for structured to simple.\n assert_(not np.can_cast([('f1', 'i4')], 'i4'))\n assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))\n assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))\n # Since it is unclear what is being cast, multiple fields to\n # single should not work even for unsafe casting.\n assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))\n # But a single field inside a single field is OK.\n assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))\n assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))\n # And a subarray is fine too - it will just take the first element\n # (arguably not very consistently; might also take the first field).\n assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))\n assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))\n # But a structured subarray with multiple fields should fail.\n assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',\n casting='unsafe'))\n\n def test_can_cast_values(self):\n # gh-5917\n for dt in np.sctypes['int'] + np.sctypes['uint']:\n ii = np.iinfo(dt)\n assert_(np.can_cast(ii.min, dt))\n assert_(np.can_cast(ii.max, dt))\n assert_(not np.can_cast(ii.min - 1, dt))\n assert_(not np.can_cast(ii.max + 1, dt))\n\n for dt in np.sctypes['float']:\n fi = np.finfo(dt)\n assert_(np.can_cast(fi.min, dt))\n assert_(np.can_cast(fi.max, dt))\n\n\n# Custom exception class to test exception propagation in fromiter\nclass NIterError(Exception):\n pass\n\n\nclass TestFromiter(object):\n def makegen(self):\n for x in range(24):\n yield x**2\n\n def test_types(self):\n ai32 = np.fromiter(self.makegen(), np.int32)\n ai64 = np.fromiter(self.makegen(), np.int64)\n af = np.fromiter(self.makegen(), float)\n assert_(ai32.dtype == np.dtype(np.int32))\n assert_(ai64.dtype == np.dtype(np.int64))\n assert_(af.dtype == np.dtype(float))\n\n def test_lengths(self):\n expected = np.array(list(self.makegen()))\n a = np.fromiter(self.makegen(), int)\n a20 = np.fromiter(self.makegen(), int, 20)\n assert_(len(a) == len(expected))\n assert_(len(a20) == 20)\n assert_raises(ValueError, np.fromiter,\n self.makegen(), int, len(expected) + 10)\n\n def test_values(self):\n expected = np.array(list(self.makegen()))\n a = np.fromiter(self.makegen(), int)\n a20 = np.fromiter(self.makegen(), int, 20)\n assert_(np.alltrue(a == expected, axis=0))\n assert_(np.alltrue(a20 == expected[:20], axis=0))\n\n def load_data(self, n, eindex):\n # Utility method for the issue 2592 tests.\n # Raise an exception at the desired index in the iterator.\n for e in range(n):\n if e == eindex:\n raise NIterError('error at index %s' % eindex)\n yield e\n\n def test_2592(self):\n # Test iteration exceptions are correctly raised.\n count, eindex = 10, 5\n assert_raises(NIterError, np.fromiter,\n self.load_data(count, eindex), dtype=int, count=count)\n\n def test_2592_edge(self):\n # Test iter. exceptions, edge case (exception at end of iterator).\n count = 10\n eindex = count-1\n assert_raises(NIterError, np.fromiter,\n self.load_data(count, eindex), dtype=int, count=count)\n\n\nclass TestNonzero(object):\n def test_nonzero_trivial(self):\n assert_equal(np.count_nonzero(np.array([])), 0)\n assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)\n assert_equal(np.nonzero(np.array([])), ([],))\n\n assert_equal(np.count_nonzero(np.array([0])), 0)\n assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0)\n assert_equal(np.nonzero(np.array([0])), ([],))\n\n assert_equal(np.count_nonzero(np.array([1])), 1)\n assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1)\n assert_equal(np.nonzero(np.array([1])), ([0],))\n\n def test_nonzero_zerod(self):\n assert_equal(np.count_nonzero(np.array(0)), 0)\n assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)\n with assert_warns(DeprecationWarning):\n assert_equal(np.nonzero(np.array(0)), ([],))\n\n assert_equal(np.count_nonzero(np.array(1)), 1)\n assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)\n with assert_warns(DeprecationWarning):\n assert_equal(np.nonzero(np.array(1)), ([0],))\n\n def test_nonzero_onedim(self):\n x = np.array([1, 0, 2, -1, 0, 0, 8])\n assert_equal(np.count_nonzero(x), 4)\n assert_equal(np.count_nonzero(x), 4)\n assert_equal(np.nonzero(x), ([0, 2, 3, 6],))\n\n x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],\n dtype=[('a', 'i4'), ('b', 'i2')])\n assert_equal(np.count_nonzero(x['a']), 3)\n assert_equal(np.count_nonzero(x['b']), 4)\n assert_equal(np.nonzero(x['a']), ([0, 2, 3],))\n assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))\n\n def test_nonzero_twodim(self):\n x = np.array([[0, 1, 0], [2, 0, 3]])\n assert_equal(np.count_nonzero(x), 3)\n assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))\n\n x = np.eye(3)\n assert_equal(np.count_nonzero(x), 3)\n assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))\n\n x = np.array([[(0, 1), (0, 0), (1, 11)],\n [(1, 1), (1, 0), (0, 0)],\n [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])\n assert_equal(np.count_nonzero(x['a']), 4)\n assert_equal(np.count_nonzero(x['b']), 5)\n assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))\n assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))\n\n assert_(not x['a'].T.flags.aligned)\n assert_equal(np.count_nonzero(x['a'].T), 4)\n assert_equal(np.count_nonzero(x['b'].T), 5)\n assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))\n assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))\n\n def test_sparse(self):\n # test special sparse condition boolean code path\n for i in range(20):\n c = np.zeros(200, dtype=bool)\n c[i::20] = True\n assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))\n\n c = np.zeros(400, dtype=bool)\n c[10 + i:20 + i] = True\n c[20 + i*2] = True\n assert_equal(np.nonzero(c)[0],\n np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))\n\n def test_return_type(self):\n class C(np.ndarray):\n pass\n\n for view in (C, np.ndarray):\n for nd in range(1, 4):\n shape = tuple(range(2, 2+nd))\n x = np.arange(np.prod(shape)).reshape(shape).view(view)\n for nzx in (np.nonzero(x), x.nonzero()):\n for nzx_i in nzx:\n assert_(type(nzx_i) is np.ndarray)\n assert_(nzx_i.flags.writeable)\n\n def test_count_nonzero_axis(self):\n # Basic check of functionality\n m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])\n\n expected = np.array([1, 1, 1, 1, 1])\n assert_equal(np.count_nonzero(m, axis=0), expected)\n\n expected = np.array([2, 3])\n assert_equal(np.count_nonzero(m, axis=1), expected)\n\n assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))\n assert_raises(TypeError, np.count_nonzero, m, axis='foo')\n assert_raises(np.AxisError, np.count_nonzero, m, axis=3)\n assert_raises(TypeError, np.count_nonzero,\n m, axis=np.array([[1], [2]]))\n\n def test_count_nonzero_axis_all_dtypes(self):\n # More thorough test that the axis argument is respected\n # for all dtypes and responds correctly when presented with\n # either integer or tuple arguments for axis\n msg = \"Mismatch for dtype: %s\"\n\n def assert_equal_w_dt(a, b, err_msg):\n assert_equal(a.dtype, b.dtype, err_msg=err_msg)\n assert_equal(a, b, err_msg=err_msg)\n\n for dt in np.typecodes['All']:\n err_msg = msg % (np.dtype(dt).name,)\n\n if dt != 'V':\n if dt != 'M':\n m = np.zeros((3, 3), dtype=dt)\n n = np.ones(1, dtype=dt)\n\n m[0, 0] = n[0]\n m[1, 0] = n[0]\n\n else: # np.zeros doesn't work for np.datetime64\n m = np.array(['1970-01-01'] * 9)\n m = m.reshape((3, 3))\n\n m[0, 0] = '1970-01-12'\n m[1, 0] = '1970-01-12'\n m = m.astype(dt)\n\n expected = np.array([2, 0, 0], dtype=np.intp)\n assert_equal_w_dt(np.count_nonzero(m, axis=0),\n expected, err_msg=err_msg)\n\n expected = np.array([1, 1, 0], dtype=np.intp)\n assert_equal_w_dt(np.count_nonzero(m, axis=1),\n expected, err_msg=err_msg)\n\n expected = np.array(2)\n assert_equal(np.count_nonzero(m, axis=(0, 1)),\n expected, err_msg=err_msg)\n assert_equal(np.count_nonzero(m, axis=None),\n expected, err_msg=err_msg)\n assert_equal(np.count_nonzero(m),\n expected, err_msg=err_msg)\n\n if dt == 'V':\n # There are no 'nonzero' objects for np.void, so the testing\n # setup is slightly different for this dtype\n m = np.array([np.void(1)] * 6).reshape((2, 3))\n\n expected = np.array([0, 0, 0], dtype=np.intp)\n assert_equal_w_dt(np.count_nonzero(m, axis=0),\n expected, err_msg=err_msg)\n\n expected = np.array([0, 0], dtype=np.intp)\n assert_equal_w_dt(np.count_nonzero(m, axis=1),\n expected, err_msg=err_msg)\n\n expected = np.array(0)\n assert_equal(np.count_nonzero(m, axis=(0, 1)),\n expected, err_msg=err_msg)\n assert_equal(np.count_nonzero(m, axis=None),\n expected, err_msg=err_msg)\n assert_equal(np.count_nonzero(m),\n expected, err_msg=err_msg)\n\n def test_count_nonzero_axis_consistent(self):\n # Check that the axis behaviour for valid axes in\n # non-special cases is consistent (and therefore\n # correct) by checking it against an integer array\n # that is then casted to the generic object dtype\n from itertools import combinations, permutations\n\n axis = (0, 1, 2, 3)\n size = (5, 5, 5, 5)\n msg = \"Mismatch for axis: %s\"\n\n rng = np.random.RandomState(1234)\n m = rng.randint(-100, 100, size=size)\n n = m.astype(object)\n\n for length in range(len(axis)):\n for combo in combinations(axis, length):\n for perm in permutations(combo):\n assert_equal(\n np.count_nonzero(m, axis=perm),\n np.count_nonzero(n, axis=perm),\n err_msg=msg % (perm,))\n\n def test_countnonzero_axis_empty(self):\n a = np.array([[0, 0, 1], [1, 0, 1]])\n assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))\n\n def test_array_method(self):\n # Tests that the array method\n # call to nonzero works\n m = np.array([[1, 0, 0], [4, 0, 6]])\n tgt = [[0, 1, 1], [0, 0, 2]]\n\n assert_equal(m.nonzero(), tgt)\n\n def test_nonzero_invalid_object(self):\n # gh-9295\n a = np.array([np.array([1, 2]), 3])\n assert_raises(ValueError, np.nonzero, a)\n\n class BoolErrors:\n def __bool__(self):\n raise ValueError(\"Not allowed\")\n def __nonzero__(self):\n raise ValueError(\"Not allowed\")\n\n assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))\n\n def test_nonzero_sideeffect_safety(self):\n # gh-13631\n class FalseThenTrue:\n _val = False\n def __bool__(self):\n try:\n return self._val\n finally:\n self._val = True\n\n class TrueThenFalse:\n _val = True\n def __bool__(self):\n try:\n return self._val\n finally:\n self._val = False\n\n # result grows on the second pass\n a = np.array([True, FalseThenTrue()])\n assert_raises(RuntimeError, np.nonzero, a)\n\n a = np.array([[True], [FalseThenTrue()]])\n assert_raises(RuntimeError, np.nonzero, a)\n\n # result shrinks on the second pass\n a = np.array([False, TrueThenFalse()])\n assert_raises(RuntimeError, np.nonzero, a)\n\n a = np.array([[False], [TrueThenFalse()]])\n assert_raises(RuntimeError, np.nonzero, a)\n\n def test_nonzero_exception_safe(self):\n # gh-13930\n\n class ThrowsAfter:\n def __init__(self, iters):\n self.iters_left = iters\n\n def __bool__(self):\n if self.iters_left == 0:\n raise ValueError(\"called `iters` times\")\n\n self.iters_left -= 1\n return True\n\n \"\"\"\n Test that a ValueError is raised instead of a SystemError\n\n If the __bool__ function is called after the error state is set,\n Python (cpython) will raise a SystemError.\n \"\"\"\n\n # assert that an exception in first pass is handled correctly\n a = np.array([ThrowsAfter(5)]*10)\n assert_raises(ValueError, np.nonzero, a)\n\n # raise exception in second pass for 1-dimensional loop\n a = np.array([ThrowsAfter(15)]*10)\n assert_raises(ValueError, np.nonzero, a)\n\n # raise exception in second pass for n-dimensional loop\n a = np.array([[ThrowsAfter(15)]]*10)\n assert_raises(ValueError, np.nonzero, a)\n\n\nclass TestIndex(object):\n def test_boolean(self):\n a = rand(3, 5, 8)\n V = rand(5, 8)\n g1 = randint(0, 5, size=15)\n g2 = randint(0, 8, size=15)\n V[g1, g2] = -V[g1, g2]\n assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())\n\n def test_boolean_edgecase(self):\n a = np.array([], dtype='int32')\n b = np.array([], dtype='bool')\n c = a[b]\n assert_equal(c, [])\n assert_equal(c.dtype, np.dtype('int32'))\n\n\nclass TestBinaryRepr(object):\n def test_zero(self):\n assert_equal(np.binary_repr(0), '0')\n\n def test_positive(self):\n assert_equal(np.binary_repr(10), '1010')\n assert_equal(np.binary_repr(12522),\n '11000011101010')\n assert_equal(np.binary_repr(10736848),\n '101000111101010011010000')\n\n def test_negative(self):\n assert_equal(np.binary_repr(-1), '-1')\n assert_equal(np.binary_repr(-10), '-1010')\n assert_equal(np.binary_repr(-12522),\n '-11000011101010')\n assert_equal(np.binary_repr(-10736848),\n '-101000111101010011010000')\n\n def test_sufficient_width(self):\n assert_equal(np.binary_repr(0, width=5), '00000')\n assert_equal(np.binary_repr(10, width=7), '0001010')\n assert_equal(np.binary_repr(-5, width=7), '1111011')\n\n def test_neg_width_boundaries(self):\n # see gh-8670\n\n # Ensure that the example in the issue does not\n # break before proceeding to a more thorough test.\n assert_equal(np.binary_repr(-128, width=8), '10000000')\n\n for width in range(1, 11):\n num = -2**(width - 1)\n exp = '1' + (width - 1) * '0'\n assert_equal(np.binary_repr(num, width=width), exp)\n\n def test_large_neg_int64(self):\n # See gh-14289.\n assert_equal(np.binary_repr(np.int64(-2**62), width=64),\n '11' + '0'*62)\n\n\nclass TestBaseRepr(object):\n def test_base3(self):\n assert_equal(np.base_repr(3**5, 3), '100000')\n\n def test_positive(self):\n assert_equal(np.base_repr(12, 10), '12')\n assert_equal(np.base_repr(12, 10, 4), '000012')\n assert_equal(np.base_repr(12, 4), '30')\n assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')\n\n def test_negative(self):\n assert_equal(np.base_repr(-12, 10), '-12')\n assert_equal(np.base_repr(-12, 10, 4), '-000012')\n assert_equal(np.base_repr(-12, 4), '-30')\n\n def test_base_range(self):\n with assert_raises(ValueError):\n np.base_repr(1, 1)\n with assert_raises(ValueError):\n np.base_repr(1, 37)\n\n\nclass TestArrayComparisons(object):\n def test_array_equal(self):\n res = np.array_equal(np.array([1, 2]), np.array([1, 2]))\n assert_(res)\n assert_(type(res) is bool)\n res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))\n assert_(not res)\n assert_(type(res) is bool)\n res = np.array_equal(np.array([1, 2]), np.array([3, 4]))\n assert_(not res)\n assert_(type(res) is bool)\n res = np.array_equal(np.array([1, 2]), np.array([1, 3]))\n assert_(not res)\n assert_(type(res) is bool)\n res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))\n assert_(res)\n assert_(type(res) is bool)\n res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),\n np.array([('a', 1)], dtype='S1,u4'))\n assert_(res)\n assert_(type(res) is bool)\n\n def test_none_compares_elementwise(self):\n a = np.array([None, 1, None], dtype=object)\n assert_equal(a == None, [True, False, True])\n assert_equal(a != None, [False, True, False])\n\n a = np.ones(3)\n assert_equal(a == None, [False, False, False])\n assert_equal(a != None, [True, True, True])\n\n def test_array_equiv(self):\n res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))\n assert_(res)\n assert_(type(res) is bool)\n res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))\n assert_(not res)\n assert_(type(res) is bool)\n res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))\n assert_(not res)\n assert_(type(res) is bool)\n res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))\n assert_(not res)\n assert_(type(res) is bool)\n\n res = np.array_equiv(np.array([1, 1]), np.array([1]))\n assert_(res)\n assert_(type(res) is bool)\n res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))\n assert_(res)\n assert_(type(res) is bool)\n res = np.array_equiv(np.array([1, 2]), np.array([2]))\n assert_(not res)\n assert_(type(res) is bool)\n res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))\n assert_(not res)\n assert_(type(res) is bool)\n res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))\n assert_(not res)\n assert_(type(res) is bool)\n\n\ndef assert_array_strict_equal(x, y):\n assert_array_equal(x, y)\n # Check flags, 32 bit arches typically don't provide 16 byte alignment\n if ((x.dtype.alignment <= 8 or\n np.intp().dtype.itemsize != 4) and\n sys.platform != 'win32'):\n assert_(x.flags == y.flags)\n else:\n assert_(x.flags.owndata == y.flags.owndata)\n assert_(x.flags.writeable == y.flags.writeable)\n assert_(x.flags.c_contiguous == y.flags.c_contiguous)\n assert_(x.flags.f_contiguous == y.flags.f_contiguous)\n assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)\n # check endianness\n assert_(x.dtype.isnative == y.dtype.isnative)\n\n\nclass TestClip(object):\n def setup(self):\n self.nr = 5\n self.nc = 3\n\n def fastclip(self, a, m, M, out=None, casting=None):\n if out is None:\n if casting is None:\n return a.clip(m, M)\n else:\n return a.clip(m, M, casting=casting)\n else:\n if casting is None:\n return a.clip(m, M, out)\n else:\n return a.clip(m, M, out, casting=casting)\n\n def clip(self, a, m, M, out=None):\n # use slow-clip\n selector = np.less(a, m) + 2*np.greater(a, M)\n return selector.choose((a, m, M), out=out)\n\n # Handy functions\n def _generate_data(self, n, m):\n return randn(n, m)\n\n def _generate_data_complex(self, n, m):\n return randn(n, m) + 1.j * rand(n, m)\n\n def _generate_flt_data(self, n, m):\n return (randn(n, m)).astype(np.float32)\n\n def _neg_byteorder(self, a):\n a = np.asarray(a)\n if sys.byteorder == 'little':\n a = a.astype(a.dtype.newbyteorder('>'))\n else:\n a = a.astype(a.dtype.newbyteorder('<'))\n return a\n\n def _generate_non_native_data(self, n, m):\n data = randn(n, m)\n data = self._neg_byteorder(data)\n assert_(not data.dtype.isnative)\n return data\n\n def _generate_int_data(self, n, m):\n return (10 * rand(n, m)).astype(np.int64)\n\n def _generate_int32_data(self, n, m):\n return (10 * rand(n, m)).astype(np.int32)\n\n # Now the real test cases\n\n @pytest.mark.parametrize(\"dtype\", '?bhilqpBHILQPefdgFDGO')\n def test_ones_pathological(self, dtype):\n # for preservation of behavior described in\n # gh-12519; amin > amax behavior may still change\n # in the future\n arr = np.ones(10, dtype=dtype)\n expected = np.zeros(10, dtype=dtype)\n actual = np.clip(arr, 1, 0)\n if dtype == 'O':\n assert actual.tolist() == expected.tolist()\n else:\n assert_equal(actual, expected)\n\n def test_simple_double(self):\n # Test native double input with scalar min/max.\n a = self._generate_data(self.nr, self.nc)\n m = 0.1\n M = 0.6\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_strict_equal(ac, act)\n\n def test_simple_int(self):\n # Test native int input with scalar min/max.\n a = self._generate_int_data(self.nr, self.nc)\n a = a.astype(int)\n m = -2\n M = 4\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_strict_equal(ac, act)\n\n def test_array_double(self):\n # Test native double input with array min/max.\n a = self._generate_data(self.nr, self.nc)\n m = np.zeros(a.shape)\n M = m + 0.5\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_strict_equal(ac, act)\n\n def test_simple_nonnative(self):\n # Test non native double input with scalar min/max.\n # Test native double input with non native double scalar min/max.\n a = self._generate_non_native_data(self.nr, self.nc)\n m = -0.5\n M = 0.6\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_equal(ac, act)\n\n # Test native double input with non native double scalar min/max.\n a = self._generate_data(self.nr, self.nc)\n m = -0.5\n M = self._neg_byteorder(0.6)\n assert_(not M.dtype.isnative)\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_equal(ac, act)\n\n def test_simple_complex(self):\n # Test native complex input with native double scalar min/max.\n # Test native input with complex double scalar min/max.\n a = 3 * self._generate_data_complex(self.nr, self.nc)\n m = -0.5\n M = 1.\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_strict_equal(ac, act)\n\n # Test native input with complex double scalar min/max.\n a = 3 * self._generate_data(self.nr, self.nc)\n m = -0.5 + 1.j\n M = 1. + 2.j\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_strict_equal(ac, act)\n\n def test_clip_complex(self):\n # Address Issue gh-5354 for clipping complex arrays\n # Test native complex input without explicit min/max\n # ie, either min=None or max=None\n a = np.ones(10, dtype=complex)\n m = a.min()\n M = a.max()\n am = self.fastclip(a, m, None)\n aM = self.fastclip(a, None, M)\n assert_array_strict_equal(am, a)\n assert_array_strict_equal(aM, a)\n\n def test_clip_non_contig(self):\n # Test clip for non contiguous native input and native scalar min/max.\n a = self._generate_data(self.nr * 2, self.nc * 3)\n a = a[::2, ::3]\n assert_(not a.flags['F_CONTIGUOUS'])\n assert_(not a.flags['C_CONTIGUOUS'])\n ac = self.fastclip(a, -1.6, 1.7)\n act = self.clip(a, -1.6, 1.7)\n assert_array_strict_equal(ac, act)\n\n def test_simple_out(self):\n # Test native double input with scalar min/max.\n a = self._generate_data(self.nr, self.nc)\n m = -0.5\n M = 0.6\n ac = np.zeros(a.shape)\n act = np.zeros(a.shape)\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n @pytest.mark.parametrize(\"casting\", [None, \"unsafe\"])\n def test_simple_int32_inout(self, casting):\n # Test native int32 input with double min/max and int32 out.\n a = self._generate_int32_data(self.nr, self.nc)\n m = np.float64(0)\n M = np.float64(2)\n ac = np.zeros(a.shape, dtype=np.int32)\n act = ac.copy()\n if casting is None:\n with assert_warns(DeprecationWarning):\n # NumPy 1.17.0, 2018-02-24 - casting is unsafe\n self.fastclip(a, m, M, ac, casting=casting)\n else:\n # explicitly passing \"unsafe\" will silence warning\n self.fastclip(a, m, M, ac, casting=casting)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_simple_int64_out(self):\n # Test native int32 input with int32 scalar min/max and int64 out.\n a = self._generate_int32_data(self.nr, self.nc)\n m = np.int32(-1)\n M = np.int32(1)\n ac = np.zeros(a.shape, dtype=np.int64)\n act = ac.copy()\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_simple_int64_inout(self):\n # Test native int32 input with double array min/max and int32 out.\n a = self._generate_int32_data(self.nr, self.nc)\n m = np.zeros(a.shape, np.float64)\n M = np.float64(1)\n ac = np.zeros(a.shape, dtype=np.int32)\n act = ac.copy()\n with assert_warns(DeprecationWarning):\n # NumPy 1.17.0, 2018-02-24 - casting is unsafe\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_simple_int32_out(self):\n # Test native double input with scalar min/max and int out.\n a = self._generate_data(self.nr, self.nc)\n m = -1.0\n M = 2.0\n ac = np.zeros(a.shape, dtype=np.int32)\n act = ac.copy()\n with assert_warns(DeprecationWarning):\n # NumPy 1.17.0, 2018-02-24 - casting is unsafe\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_simple_inplace_01(self):\n # Test native double input with array min/max in-place.\n a = self._generate_data(self.nr, self.nc)\n ac = a.copy()\n m = np.zeros(a.shape)\n M = 1.0\n self.fastclip(a, m, M, a)\n self.clip(a, m, M, ac)\n assert_array_strict_equal(a, ac)\n\n def test_simple_inplace_02(self):\n # Test native double input with scalar min/max in-place.\n a = self._generate_data(self.nr, self.nc)\n ac = a.copy()\n m = -0.5\n M = 0.6\n self.fastclip(a, m, M, a)\n self.clip(ac, m, M, ac)\n assert_array_strict_equal(a, ac)\n\n def test_noncontig_inplace(self):\n # Test non contiguous double input with double scalar min/max in-place.\n a = self._generate_data(self.nr * 2, self.nc * 3)\n a = a[::2, ::3]\n assert_(not a.flags['F_CONTIGUOUS'])\n assert_(not a.flags['C_CONTIGUOUS'])\n ac = a.copy()\n m = -0.5\n M = 0.6\n self.fastclip(a, m, M, a)\n self.clip(ac, m, M, ac)\n assert_array_equal(a, ac)\n\n def test_type_cast_01(self):\n # Test native double input with scalar min/max.\n a = self._generate_data(self.nr, self.nc)\n m = -0.5\n M = 0.6\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_02(self):\n # Test native int32 input with int32 scalar min/max.\n a = self._generate_int_data(self.nr, self.nc)\n a = a.astype(np.int32)\n m = -2\n M = 4\n ac = self.fastclip(a, m, M)\n act = self.clip(a, m, M)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_03(self):\n # Test native int32 input with float64 scalar min/max.\n a = self._generate_int32_data(self.nr, self.nc)\n m = -2\n M = 4\n ac = self.fastclip(a, np.float64(m), np.float64(M))\n act = self.clip(a, np.float64(m), np.float64(M))\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_04(self):\n # Test native int32 input with float32 scalar min/max.\n a = self._generate_int32_data(self.nr, self.nc)\n m = np.float32(-2)\n M = np.float32(4)\n act = self.fastclip(a, m, M)\n ac = self.clip(a, m, M)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_05(self):\n # Test native int32 with double arrays min/max.\n a = self._generate_int_data(self.nr, self.nc)\n m = -0.5\n M = 1.\n ac = self.fastclip(a, m * np.zeros(a.shape), M)\n act = self.clip(a, m * np.zeros(a.shape), M)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_06(self):\n # Test native with NON native scalar min/max.\n a = self._generate_data(self.nr, self.nc)\n m = 0.5\n m_s = self._neg_byteorder(m)\n M = 1.\n act = self.clip(a, m_s, M)\n ac = self.fastclip(a, m_s, M)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_07(self):\n # Test NON native with native array min/max.\n a = self._generate_data(self.nr, self.nc)\n m = -0.5 * np.ones(a.shape)\n M = 1.\n a_s = self._neg_byteorder(a)\n assert_(not a_s.dtype.isnative)\n act = a_s.clip(m, M)\n ac = self.fastclip(a_s, m, M)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_08(self):\n # Test NON native with native scalar min/max.\n a = self._generate_data(self.nr, self.nc)\n m = -0.5\n M = 1.\n a_s = self._neg_byteorder(a)\n assert_(not a_s.dtype.isnative)\n ac = self.fastclip(a_s, m, M)\n act = a_s.clip(m, M)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_09(self):\n # Test native with NON native array min/max.\n a = self._generate_data(self.nr, self.nc)\n m = -0.5 * np.ones(a.shape)\n M = 1.\n m_s = self._neg_byteorder(m)\n assert_(not m_s.dtype.isnative)\n ac = self.fastclip(a, m_s, M)\n act = self.clip(a, m_s, M)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_10(self):\n # Test native int32 with float min/max and float out for output argument.\n a = self._generate_int_data(self.nr, self.nc)\n b = np.zeros(a.shape, dtype=np.float32)\n m = np.float32(-0.5)\n M = np.float32(1)\n act = self.clip(a, m, M, out=b)\n ac = self.fastclip(a, m, M, out=b)\n assert_array_strict_equal(ac, act)\n\n def test_type_cast_11(self):\n # Test non native with native scalar, min/max, out non native\n a = self._generate_non_native_data(self.nr, self.nc)\n b = a.copy()\n b = b.astype(b.dtype.newbyteorder('>'))\n bt = b.copy()\n m = -0.5\n M = 1.\n self.fastclip(a, m, M, out=b)\n self.clip(a, m, M, out=bt)\n assert_array_strict_equal(b, bt)\n\n def test_type_cast_12(self):\n # Test native int32 input and min/max and float out\n a = self._generate_int_data(self.nr, self.nc)\n b = np.zeros(a.shape, dtype=np.float32)\n m = np.int32(0)\n M = np.int32(1)\n act = self.clip(a, m, M, out=b)\n ac = self.fastclip(a, m, M, out=b)\n assert_array_strict_equal(ac, act)\n\n def test_clip_with_out_simple(self):\n # Test native double input with scalar min/max\n a = self._generate_data(self.nr, self.nc)\n m = -0.5\n M = 0.6\n ac = np.zeros(a.shape)\n act = np.zeros(a.shape)\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_clip_with_out_simple2(self):\n # Test native int32 input with double min/max and int32 out\n a = self._generate_int32_data(self.nr, self.nc)\n m = np.float64(0)\n M = np.float64(2)\n ac = np.zeros(a.shape, dtype=np.int32)\n act = ac.copy()\n with assert_warns(DeprecationWarning):\n # NumPy 1.17.0, 2018-02-24 - casting is unsafe\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_clip_with_out_simple_int32(self):\n # Test native int32 input with int32 scalar min/max and int64 out\n a = self._generate_int32_data(self.nr, self.nc)\n m = np.int32(-1)\n M = np.int32(1)\n ac = np.zeros(a.shape, dtype=np.int64)\n act = ac.copy()\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_clip_with_out_array_int32(self):\n # Test native int32 input with double array min/max and int32 out\n a = self._generate_int32_data(self.nr, self.nc)\n m = np.zeros(a.shape, np.float64)\n M = np.float64(1)\n ac = np.zeros(a.shape, dtype=np.int32)\n act = ac.copy()\n with assert_warns(DeprecationWarning):\n # NumPy 1.17.0, 2018-02-24 - casting is unsafe\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_clip_with_out_array_outint32(self):\n # Test native double input with scalar min/max and int out\n a = self._generate_data(self.nr, self.nc)\n m = -1.0\n M = 2.0\n ac = np.zeros(a.shape, dtype=np.int32)\n act = ac.copy()\n with assert_warns(DeprecationWarning):\n # NumPy 1.17.0, 2018-02-24 - casting is unsafe\n self.fastclip(a, m, M, ac)\n self.clip(a, m, M, act)\n assert_array_strict_equal(ac, act)\n\n def test_clip_with_out_transposed(self):\n # Test that the out argument works when tranposed\n a = np.arange(16).reshape(4, 4)\n out = np.empty_like(a).T\n a.clip(4, 10, out=out)\n expected = self.clip(a, 4, 10)\n assert_array_equal(out, expected)\n\n def test_clip_with_out_memory_overlap(self):\n # Test that the out argument works when it has memory overlap\n a = np.arange(16).reshape(4, 4)\n ac = a.copy()\n a[:-1].clip(4, 10, out=a[1:])\n expected = self.clip(ac[:-1], 4, 10)\n assert_array_equal(a[1:], expected)\n\n def test_clip_inplace_array(self):\n # Test native double input with array min/max\n a = self._generate_data(self.nr, self.nc)\n ac = a.copy()\n m = np.zeros(a.shape)\n M = 1.0\n self.fastclip(a, m, M, a)\n self.clip(a, m, M, ac)\n assert_array_strict_equal(a, ac)\n\n def test_clip_inplace_simple(self):\n # Test native double input with scalar min/max\n a = self._generate_data(self.nr, self.nc)\n ac = a.copy()\n m = -0.5\n M = 0.6\n self.fastclip(a, m, M, a)\n self.clip(a, m, M, ac)\n assert_array_strict_equal(a, ac)\n\n def test_clip_func_takes_out(self):\n # Ensure that the clip() function takes an out=argument.\n a = self._generate_data(self.nr, self.nc)\n ac = a.copy()\n m = -0.5\n M = 0.6\n a2 = np.clip(a, m, M, out=a)\n self.clip(a, m, M, ac)\n assert_array_strict_equal(a2, ac)\n assert_(a2 is a)\n\n def test_clip_nan(self):\n d = np.arange(7.)\n with assert_warns(DeprecationWarning):\n assert_equal(d.clip(min=np.nan), d)\n with assert_warns(DeprecationWarning):\n assert_equal(d.clip(max=np.nan), d)\n with assert_warns(DeprecationWarning):\n assert_equal(d.clip(min=np.nan, max=np.nan), d)\n with assert_warns(DeprecationWarning):\n assert_equal(d.clip(min=-2, max=np.nan), d)\n with assert_warns(DeprecationWarning):\n assert_equal(d.clip(min=np.nan, max=10), d)\n\n def test_object_clip(self):\n a = np.arange(10, dtype=object)\n actual = np.clip(a, 1, 5)\n expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5])\n assert actual.tolist() == expected.tolist()\n\n def test_clip_all_none(self):\n a = np.arange(10, dtype=object)\n with assert_raises_regex(ValueError, 'max or min'):\n np.clip(a, None, None)\n\n def test_clip_invalid_casting(self):\n a = np.arange(10, dtype=object)\n with assert_raises_regex(ValueError,\n 'casting must be one of'):\n self.fastclip(a, 1, 8, casting=\"garbage\")\n\n @pytest.mark.parametrize(\"amin, amax\", [\n # two scalars\n (1, 0),\n # mix scalar and array\n (1, np.zeros(10)),\n # two arrays\n (np.ones(10), np.zeros(10)),\n ])\n def test_clip_value_min_max_flip(self, amin, amax):\n a = np.arange(10, dtype=np.int64)\n # requirement from ufunc_docstrings.py\n expected = np.minimum(np.maximum(a, amin), amax)\n actual = np.clip(a, amin, amax)\n assert_equal(actual, expected)\n\n @pytest.mark.parametrize(\"arr, amin, amax, exp\", [\n # for a bug in npy_ObjectClip, based on a\n # case produced by hypothesis\n (np.zeros(10, dtype=np.int64),\n 0,\n -2**64+1,\n np.full(10, -2**64+1, dtype=object)),\n # for bugs in NPY_TIMEDELTA_MAX, based on a case\n # produced by hypothesis\n (np.zeros(10, dtype='m8') - 1,\n 0,\n 0,\n np.zeros(10, dtype='m8')),\n ])\n def test_clip_problem_cases(self, arr, amin, amax, exp):\n actual = np.clip(arr, amin, amax)\n assert_equal(actual, exp)\n\n @pytest.mark.xfail(reason=\"no scalar nan propagation yet\")\n @pytest.mark.parametrize(\"arr, amin, amax\", [\n # problematic scalar nan case from hypothesis\n (np.zeros(10, dtype=np.int64),\n np.array(np.nan),\n np.zeros(10, dtype=np.int32)),\n ])\n def test_clip_scalar_nan_propagation(self, arr, amin, amax):\n # enforcement of scalar nan propagation for comparisons\n # called through clip()\n expected = np.minimum(np.maximum(a, amin), amax)\n with assert_warns(DeprecationWarning):\n actual = np.clip(arr, amin, amax)\n assert_equal(actual, expected)\n\n @pytest.mark.xfail(reason=\"propagation doesn't match spec\")\n @pytest.mark.parametrize(\"arr, amin, amax\", [\n (np.array([1] * 10, dtype='m8'),\n np.timedelta64('NaT'),\n np.zeros(10, dtype=np.int32)),\n ])\n def test_NaT_propagation(self, arr, amin, amax):\n # NOTE: the expected function spec doesn't\n # propagate NaT, but clip() now does\n expected = np.minimum(np.maximum(a, amin), amax)\n actual = np.clip(arr, amin, amax)\n assert_equal(actual, expected)\n\n\nclass TestAllclose(object):\n rtol = 1e-5\n atol = 1e-8\n\n def setup(self):\n self.olderr = np.seterr(invalid='ignore')\n\n def teardown(self):\n np.seterr(**self.olderr)\n\n def tst_allclose(self, x, y):\n assert_(np.allclose(x, y), \"%s and %s not close\" % (x, y))\n\n def tst_not_allclose(self, x, y):\n assert_(not np.allclose(x, y), \"%s and %s shouldn't be close\" % (x, y))\n\n def test_ip_allclose(self):\n # Parametric test factory.\n arr = np.array([100, 1000])\n aran = np.arange(125).reshape((5, 5, 5))\n\n atol = self.atol\n rtol = self.rtol\n\n data = [([1, 0], [1, 0]),\n ([atol], [0]),\n ([1], [1+rtol+atol]),\n (arr, arr + arr*rtol),\n (arr, arr + arr*rtol + atol*2),\n (aran, aran + aran*rtol),\n (np.inf, np.inf),\n (np.inf, [np.inf])]\n\n for (x, y) in data:\n self.tst_allclose(x, y)\n\n def test_ip_not_allclose(self):\n # Parametric test factory.\n aran = np.arange(125).reshape((5, 5, 5))\n\n atol = self.atol\n rtol = self.rtol\n\n data = [([np.inf, 0], [1, np.inf]),\n ([np.inf, 0], [1, 0]),\n ([np.inf, np.inf], [1, np.inf]),\n ([np.inf, np.inf], [1, 0]),\n ([-np.inf, 0], [np.inf, 0]),\n ([np.nan, 0], [np.nan, 0]),\n ([atol*2], [0]),\n ([1], [1+rtol+atol*2]),\n (aran, aran + aran*atol + atol*2),\n (np.array([np.inf, 1]), np.array([0, np.inf]))]\n\n for (x, y) in data:\n self.tst_not_allclose(x, y)\n\n def test_no_parameter_modification(self):\n x = np.array([np.inf, 1])\n y = np.array([0, np.inf])\n np.allclose(x, y)\n assert_array_equal(x, np.array([np.inf, 1]))\n assert_array_equal(y, np.array([0, np.inf]))\n\n def test_min_int(self):\n # Could make problems because of abs(min_int) == min_int\n min_int = np.iinfo(np.int_).min\n a = np.array([min_int], dtype=np.int_)\n assert_(np.allclose(a, a))\n\n def test_equalnan(self):\n x = np.array([1.0, np.nan])\n assert_(np.allclose(x, x, equal_nan=True))\n\n def test_return_class_is_ndarray(self):\n # Issue gh-6475\n # Check that allclose does not preserve subtypes\n class Foo(np.ndarray):\n def __new__(cls, *args, **kwargs):\n return np.array(*args, **kwargs).view(cls)\n\n a = Foo([1])\n assert_(type(np.allclose(a, a)) is bool)\n\n\nclass TestIsclose(object):\n rtol = 1e-5\n atol = 1e-8\n\n def setup(self):\n atol = self.atol\n rtol = self.rtol\n arr = np.array([100, 1000])\n aran = np.arange(125).reshape((5, 5, 5))\n\n self.all_close_tests = [\n ([1, 0], [1, 0]),\n ([atol], [0]),\n ([1], [1 + rtol + atol]),\n (arr, arr + arr*rtol),\n (arr, arr + arr*rtol + atol),\n (aran, aran + aran*rtol),\n (np.inf, np.inf),\n (np.inf, [np.inf]),\n ([np.inf, -np.inf], [np.inf, -np.inf]),\n ]\n self.none_close_tests = [\n ([np.inf, 0], [1, np.inf]),\n ([np.inf, -np.inf], [1, 0]),\n ([np.inf, np.inf], [1, -np.inf]),\n ([np.inf, np.inf], [1, 0]),\n ([np.nan, 0], [np.nan, -np.inf]),\n ([atol*2], [0]),\n ([1], [1 + rtol + atol*2]),\n (aran, aran + rtol*1.1*aran + atol*1.1),\n (np.array([np.inf, 1]), np.array([0, np.inf])),\n ]\n self.some_close_tests = [\n ([np.inf, 0], [np.inf, atol*2]),\n ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),\n (np.arange(3), [0, 1, 2.1]),\n (np.nan, [np.nan, np.nan, np.nan]),\n ([0], [atol, np.inf, -np.inf, np.nan]),\n (0, [atol, np.inf, -np.inf, np.nan]),\n ]\n self.some_close_results = [\n [True, False],\n [True, False, False],\n [True, True, False],\n [False, False, False],\n [True, False, False, False],\n [True, False, False, False],\n ]\n\n def test_ip_isclose(self):\n self.setup()\n tests = self.some_close_tests\n results = self.some_close_results\n for (x, y), result in zip(tests, results):\n assert_array_equal(np.isclose(x, y), result)\n\n def tst_all_isclose(self, x, y):\n assert_(np.all(np.isclose(x, y)), \"%s and %s not close\" % (x, y))\n\n def tst_none_isclose(self, x, y):\n msg = \"%s and %s shouldn't be close\"\n assert_(not np.any(np.isclose(x, y)), msg % (x, y))\n\n def tst_isclose_allclose(self, x, y):\n msg = \"isclose.all() and allclose aren't same for %s and %s\"\n msg2 = \"isclose and allclose aren't same for %s and %s\"\n if np.isscalar(x) and np.isscalar(y):\n assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))\n else:\n assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))\n\n def test_ip_all_isclose(self):\n self.setup()\n for (x, y) in self.all_close_tests:\n self.tst_all_isclose(x, y)\n\n def test_ip_none_isclose(self):\n self.setup()\n for (x, y) in self.none_close_tests:\n self.tst_none_isclose(x, y)\n\n def test_ip_isclose_allclose(self):\n self.setup()\n tests = (self.all_close_tests + self.none_close_tests +\n self.some_close_tests)\n for (x, y) in tests:\n self.tst_isclose_allclose(x, y)\n\n def test_equal_nan(self):\n assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])\n arr = np.array([1.0, np.nan])\n assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])\n\n def test_masked_arrays(self):\n # Make sure to test the output type when arguments are interchanged.\n\n x = np.ma.masked_where([True, True, False], np.arange(3))\n assert_(type(x) is type(np.isclose(2, x)))\n assert_(type(x) is type(np.isclose(x, 2)))\n\n x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])\n assert_(type(x) is type(np.isclose(np.inf, x)))\n assert_(type(x) is type(np.isclose(x, np.inf)))\n\n x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])\n y = np.isclose(np.nan, x, equal_nan=True)\n assert_(type(x) is type(y))\n # Ensure that the mask isn't modified...\n assert_array_equal([True, True, False], y.mask)\n y = np.isclose(x, np.nan, equal_nan=True)\n assert_(type(x) is type(y))\n # Ensure that the mask isn't modified...\n assert_array_equal([True, True, False], y.mask)\n\n x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])\n y = np.isclose(x, x, equal_nan=True)\n assert_(type(x) is type(y))\n # Ensure that the mask isn't modified...\n assert_array_equal([True, True, False], y.mask)\n\n def test_scalar_return(self):\n assert_(np.isscalar(np.isclose(1, 1)))\n\n def test_no_parameter_modification(self):\n x = np.array([np.inf, 1])\n y = np.array([0, np.inf])\n np.isclose(x, y)\n assert_array_equal(x, np.array([np.inf, 1]))\n assert_array_equal(y, np.array([0, np.inf]))\n\n def test_non_finite_scalar(self):\n # GH7014, when two scalars are compared the output should also be a\n # scalar\n assert_(np.isclose(np.inf, -np.inf) is np.False_)\n assert_(np.isclose(0, np.inf) is np.False_)\n assert_(type(np.isclose(0, np.inf)) is np.bool_)\n\n\nclass TestStdVar(object):\n def setup(self):\n self.A = np.array([1, -1, 1, -1])\n self.real_var = 1\n\n def test_basic(self):\n assert_almost_equal(np.var(self.A), self.real_var)\n assert_almost_equal(np.std(self.A)**2, self.real_var)\n\n def test_scalars(self):\n assert_equal(np.var(1), 0)\n assert_equal(np.std(1), 0)\n\n def test_ddof1(self):\n assert_almost_equal(np.var(self.A, ddof=1),\n self.real_var*len(self.A)/float(len(self.A)-1))\n assert_almost_equal(np.std(self.A, ddof=1)**2,\n self.real_var*len(self.A)/float(len(self.A)-1))\n\n def test_ddof2(self):\n assert_almost_equal(np.var(self.A, ddof=2),\n self.real_var*len(self.A)/float(len(self.A)-2))\n assert_almost_equal(np.std(self.A, ddof=2)**2,\n self.real_var*len(self.A)/float(len(self.A)-2))\n\n def test_out_scalar(self):\n d = np.arange(10)\n out = np.array(0.)\n r = np.std(d, out=out)\n assert_(r is out)\n assert_array_equal(r, out)\n r = np.var(d, out=out)\n assert_(r is out)\n assert_array_equal(r, out)\n r = np.mean(d, out=out)\n assert_(r is out)\n assert_array_equal(r, out)\n\n\nclass TestStdVarComplex(object):\n def test_basic(self):\n A = np.array([1, 1.j, -1, -1.j])\n real_var = 1\n assert_almost_equal(np.var(A), real_var)\n assert_almost_equal(np.std(A)**2, real_var)\n\n def test_scalars(self):\n assert_equal(np.var(1j), 0)\n assert_equal(np.std(1j), 0)\n\n\nclass TestCreationFuncs(object):\n # Test ones, zeros, empty and full.\n\n def setup(self):\n dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}\n # void, bytes, str\n variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}\n self.dtypes = sorted(dtypes - variable_sized |\n {np.dtype(tp.str.replace(\"0\", str(i)))\n for tp in variable_sized for i in range(1, 10)},\n key=lambda dtype: dtype.str)\n self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}\n self.ndims = 10\n\n def check_function(self, func, fill_value=None):\n par = ((0, 1, 2),\n range(self.ndims),\n self.orders,\n self.dtypes)\n fill_kwarg = {}\n if fill_value is not None:\n fill_kwarg = {'fill_value': fill_value}\n\n for size, ndims, order, dtype in itertools.product(*par):\n shape = ndims * [size]\n\n # do not fill void type\n if fill_kwarg and dtype.str.startswith('|V'):\n continue\n\n arr = func(shape, order=order, dtype=dtype,\n **fill_kwarg)\n\n assert_equal(arr.dtype, dtype)\n assert_(getattr(arr.flags, self.orders[order]))\n\n if fill_value is not None:\n if dtype.str.startswith('|S'):\n val = str(fill_value)\n else:\n val = fill_value\n assert_equal(arr, dtype.type(val))\n\n def test_zeros(self):\n self.check_function(np.zeros)\n\n def test_ones(self):\n self.check_function(np.zeros)\n\n def test_empty(self):\n self.check_function(np.empty)\n\n def test_full(self):\n self.check_function(np.full, 0)\n self.check_function(np.full, 1)\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_for_reference_leak(self):\n # Make sure we have an object for reference\n dim = 1\n beg = sys.getrefcount(dim)\n np.zeros([dim]*10)\n assert_(sys.getrefcount(dim) == beg)\n np.ones([dim]*10)\n assert_(sys.getrefcount(dim) == beg)\n np.empty([dim]*10)\n assert_(sys.getrefcount(dim) == beg)\n np.full([dim]*10, 0)\n assert_(sys.getrefcount(dim) == beg)\n\n\nclass TestLikeFuncs(object):\n '''Test ones_like, zeros_like, empty_like and full_like'''\n\n def setup(self):\n self.data = [\n # Array scalars\n (np.array(3.), None),\n (np.array(3), 'f8'),\n # 1D arrays\n (np.arange(6, dtype='f4'), None),\n (np.arange(6), 'c16'),\n # 2D C-layout arrays\n (np.arange(6).reshape(2, 3), None),\n (np.arange(6).reshape(3, 2), 'i1'),\n # 2D F-layout arrays\n (np.arange(6).reshape((2, 3), order='F'), None),\n (np.arange(6).reshape((3, 2), order='F'), 'i1'),\n # 3D C-layout arrays\n (np.arange(24).reshape(2, 3, 4), None),\n (np.arange(24).reshape(4, 3, 2), 'f4'),\n # 3D F-layout arrays\n (np.arange(24).reshape((2, 3, 4), order='F'), None),\n (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),\n # 3D non-C/F-layout arrays\n (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),\n (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),\n ]\n self.shapes = [(5,), (5,6,), (5,6,7,)]\n\n def compare_array_value(self, dz, value, fill_value):\n if value is not None:\n if fill_value:\n try:\n z = dz.dtype.type(value)\n except OverflowError:\n pass\n else:\n assert_(np.all(dz == z))\n else:\n assert_(np.all(dz == value))\n\n def check_like_function(self, like_function, value, fill_value=False):\n if fill_value:\n fill_kwarg = {'fill_value': value}\n else:\n fill_kwarg = {}\n for d, dtype in self.data:\n # default (K) order, dtype\n dz = like_function(d, dtype=dtype, **fill_kwarg)\n assert_equal(dz.shape, d.shape)\n assert_equal(np.array(dz.strides)*d.dtype.itemsize,\n np.array(d.strides)*dz.dtype.itemsize)\n assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)\n assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)\n if dtype is None:\n assert_equal(dz.dtype, d.dtype)\n else:\n assert_equal(dz.dtype, np.dtype(dtype))\n self.compare_array_value(dz, value, fill_value)\n\n # C order, default dtype\n dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)\n assert_equal(dz.shape, d.shape)\n assert_(dz.flags.c_contiguous)\n if dtype is None:\n assert_equal(dz.dtype, d.dtype)\n else:\n assert_equal(dz.dtype, np.dtype(dtype))\n self.compare_array_value(dz, value, fill_value)\n\n # F order, default dtype\n dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)\n assert_equal(dz.shape, d.shape)\n assert_(dz.flags.f_contiguous)\n if dtype is None:\n assert_equal(dz.dtype, d.dtype)\n else:\n assert_equal(dz.dtype, np.dtype(dtype))\n self.compare_array_value(dz, value, fill_value)\n\n # A order\n dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)\n assert_equal(dz.shape, d.shape)\n if d.flags.f_contiguous:\n assert_(dz.flags.f_contiguous)\n else:\n assert_(dz.flags.c_contiguous)\n if dtype is None:\n assert_equal(dz.dtype, d.dtype)\n else:\n assert_equal(dz.dtype, np.dtype(dtype))\n self.compare_array_value(dz, value, fill_value)\n\n # Test the 'shape' parameter\n for s in self.shapes:\n for o in 'CFA':\n sz = like_function(d, dtype=dtype, shape=s, order=o,\n **fill_kwarg)\n assert_equal(sz.shape, s)\n if dtype is None:\n assert_equal(sz.dtype, d.dtype)\n else:\n assert_equal(sz.dtype, np.dtype(dtype))\n if o == 'C' or (o == 'A' and d.flags.c_contiguous):\n assert_(sz.flags.c_contiguous)\n elif o == 'F' or (o == 'A' and d.flags.f_contiguous):\n assert_(sz.flags.f_contiguous)\n self.compare_array_value(sz, value, fill_value)\n\n if (d.ndim != len(s)):\n assert_equal(np.argsort(like_function(d, dtype=dtype,\n shape=s, order='K',\n **fill_kwarg).strides),\n np.argsort(np.empty(s, dtype=dtype,\n order='C').strides))\n else:\n assert_equal(np.argsort(like_function(d, dtype=dtype,\n shape=s, order='K',\n **fill_kwarg).strides),\n np.argsort(d.strides))\n\n # Test the 'subok' parameter\n class MyNDArray(np.ndarray):\n pass\n\n a = np.array([[1, 2], [3, 4]]).view(MyNDArray)\n\n b = like_function(a, **fill_kwarg)\n assert_(type(b) is MyNDArray)\n\n b = like_function(a, subok=False, **fill_kwarg)\n assert_(type(b) is not MyNDArray)\n\n def test_ones_like(self):\n self.check_like_function(np.ones_like, 1)\n\n def test_zeros_like(self):\n self.check_like_function(np.zeros_like, 0)\n\n def test_empty_like(self):\n self.check_like_function(np.empty_like, None)\n\n def test_filled_like(self):\n self.check_like_function(np.full_like, 0, True)\n self.check_like_function(np.full_like, 1, True)\n self.check_like_function(np.full_like, 1000, True)\n self.check_like_function(np.full_like, 123.456, True)\n self.check_like_function(np.full_like, np.inf, True)\n\n\nclass TestCorrelate(object):\n def _setup(self, dt):\n self.x = np.array([1, 2, 3, 4, 5], dtype=dt)\n self.xs = np.arange(1, 20)[::3]\n self.y = np.array([-1, -2, -3], dtype=dt)\n self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt)\n self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)\n self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)\n self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)\n self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)\n self.zs = np.array([-3., -14., -30., -48., -66., -84.,\n -102., -54., -19.], dtype=dt)\n\n def test_float(self):\n self._setup(float)\n z = np.correlate(self.x, self.y, 'full')\n assert_array_almost_equal(z, self.z1)\n z = np.correlate(self.x, self.y[:-1], 'full')\n assert_array_almost_equal(z, self.z1_4)\n z = np.correlate(self.y, self.x, 'full')\n assert_array_almost_equal(z, self.z2)\n z = np.correlate(self.x[::-1], self.y, 'full')\n assert_array_almost_equal(z, self.z1r)\n z = np.correlate(self.y, self.x[::-1], 'full')\n assert_array_almost_equal(z, self.z2r)\n z = np.correlate(self.xs, self.y, 'full')\n assert_array_almost_equal(z, self.zs)\n\n def test_object(self):\n self._setup(Decimal)\n z = np.correlate(self.x, self.y, 'full')\n assert_array_almost_equal(z, self.z1)\n z = np.correlate(self.y, self.x, 'full')\n assert_array_almost_equal(z, self.z2)\n\n def test_no_overwrite(self):\n d = np.ones(100)\n k = np.ones(3)\n np.correlate(d, k)\n assert_array_equal(d, np.ones(100))\n assert_array_equal(k, np.ones(3))\n\n def test_complex(self):\n x = np.array([1, 2, 3, 4+1j], dtype=complex)\n y = np.array([-1, -2j, 3+1j], dtype=complex)\n r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)\n r_z = r_z[::-1].conjugate()\n z = np.correlate(y, x, mode='full')\n assert_array_almost_equal(z, r_z)\n\n def test_zero_size(self):\n with pytest.raises(ValueError):\n np.correlate(np.array([]), np.ones(1000), mode='full')\n with pytest.raises(ValueError):\n np.correlate(np.ones(1000), np.array([]), mode='full')\n\nclass TestConvolve(object):\n def test_object(self):\n d = [1.] * 100\n k = [1.] * 3\n assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))\n\n def test_no_overwrite(self):\n d = np.ones(100)\n k = np.ones(3)\n np.convolve(d, k)\n assert_array_equal(d, np.ones(100))\n assert_array_equal(k, np.ones(3))\n\n\nclass TestArgwhere(object):\n\n @pytest.mark.parametrize('nd', [0, 1, 2])\n def test_nd(self, nd):\n # get an nd array with multiple elements in every dimension\n x = np.empty((2,)*nd, bool)\n\n # none\n x[...] = False\n assert_equal(np.argwhere(x).shape, (0, nd))\n\n # only one\n x[...] = False\n x.flat[0] = True\n assert_equal(np.argwhere(x).shape, (1, nd))\n\n # all but one\n x[...] = True\n x.flat[0] = False\n assert_equal(np.argwhere(x).shape, (x.size - 1, nd))\n\n # all\n x[...] = True\n assert_equal(np.argwhere(x).shape, (x.size, nd))\n\n def test_2D(self):\n x = np.arange(6).reshape((2, 3))\n assert_array_equal(np.argwhere(x > 1),\n [[0, 2],\n [1, 0],\n [1, 1],\n [1, 2]])\n\n def test_list(self):\n assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])\n\n\nclass TestStringFunction(object):\n\n def test_set_string_function(self):\n a = np.array([1])\n np.set_string_function(lambda x: \"FOO\", repr=True)\n assert_equal(repr(a), \"FOO\")\n np.set_string_function(None, repr=True)\n assert_equal(repr(a), \"array([1])\")\n\n np.set_string_function(lambda x: \"FOO\", repr=False)\n assert_equal(str(a), \"FOO\")\n np.set_string_function(None, repr=False)\n assert_equal(str(a), \"[1]\")\n\n\nclass TestRoll(object):\n def test_roll1d(self):\n x = np.arange(10)\n xr = np.roll(x, 2)\n assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))\n\n def test_roll2d(self):\n x2 = np.reshape(np.arange(10), (2, 5))\n x2r = np.roll(x2, 1)\n assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))\n\n x2r = np.roll(x2, 1, axis=0)\n assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))\n\n x2r = np.roll(x2, 1, axis=1)\n assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))\n\n # Roll multiple axes at once.\n x2r = np.roll(x2, 1, axis=(0, 1))\n assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))\n\n x2r = np.roll(x2, (1, 0), axis=(0, 1))\n assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))\n\n x2r = np.roll(x2, (-1, 0), axis=(0, 1))\n assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))\n\n x2r = np.roll(x2, (0, 1), axis=(0, 1))\n assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))\n\n x2r = np.roll(x2, (0, -1), axis=(0, 1))\n assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))\n\n x2r = np.roll(x2, (1, 1), axis=(0, 1))\n assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))\n\n x2r = np.roll(x2, (-1, -1), axis=(0, 1))\n assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))\n\n # Roll the same axis multiple times.\n x2r = np.roll(x2, 1, axis=(0, 0))\n assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))\n\n x2r = np.roll(x2, 1, axis=(1, 1))\n assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))\n\n # Roll more than one turn in either direction.\n x2r = np.roll(x2, 6, axis=1)\n assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))\n\n x2r = np.roll(x2, -4, axis=1)\n assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))\n\n def test_roll_empty(self):\n x = np.array([])\n assert_equal(np.roll(x, 1), np.array([]))\n\n\nclass TestRollaxis(object):\n\n # expected shape indexed by (axis, start) for array of\n # shape (1, 2, 3, 4)\n tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),\n (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),\n (0, 4): (2, 3, 4, 1),\n (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),\n (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),\n (1, 4): (1, 3, 4, 2),\n (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),\n (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),\n (2, 4): (1, 2, 4, 3),\n (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),\n (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),\n (3, 4): (1, 2, 3, 4)}\n\n def test_exceptions(self):\n a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)\n assert_raises(np.AxisError, np.rollaxis, a, -5, 0)\n assert_raises(np.AxisError, np.rollaxis, a, 0, -5)\n assert_raises(np.AxisError, np.rollaxis, a, 4, 0)\n assert_raises(np.AxisError, np.rollaxis, a, 0, 5)\n\n def test_results(self):\n a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()\n aind = np.indices(a.shape)\n assert_(a.flags['OWNDATA'])\n for (i, j) in self.tgtshape:\n # positive axis, positive start\n res = np.rollaxis(a, axis=i, start=j)\n i0, i1, i2, i3 = aind[np.array(res.shape) - 1]\n assert_(np.all(res[i0, i1, i2, i3] == a))\n assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))\n assert_(not res.flags['OWNDATA'])\n\n # negative axis, positive start\n ip = i + 1\n res = np.rollaxis(a, axis=-ip, start=j)\n i0, i1, i2, i3 = aind[np.array(res.shape) - 1]\n assert_(np.all(res[i0, i1, i2, i3] == a))\n assert_(res.shape == self.tgtshape[(4 - ip, j)])\n assert_(not res.flags['OWNDATA'])\n\n # positive axis, negative start\n jp = j + 1 if j < 4 else j\n res = np.rollaxis(a, axis=i, start=-jp)\n i0, i1, i2, i3 = aind[np.array(res.shape) - 1]\n assert_(np.all(res[i0, i1, i2, i3] == a))\n assert_(res.shape == self.tgtshape[(i, 4 - jp)])\n assert_(not res.flags['OWNDATA'])\n\n # negative axis, negative start\n ip = i + 1\n jp = j + 1 if j < 4 else j\n res = np.rollaxis(a, axis=-ip, start=-jp)\n i0, i1, i2, i3 = aind[np.array(res.shape) - 1]\n assert_(np.all(res[i0, i1, i2, i3] == a))\n assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])\n assert_(not res.flags['OWNDATA'])\n\n\nclass TestMoveaxis(object):\n def test_move_to_end(self):\n x = np.random.randn(5, 6, 7)\n for source, expected in [(0, (6, 7, 5)),\n (1, (5, 7, 6)),\n (2, (5, 6, 7)),\n (-1, (5, 6, 7))]:\n actual = np.moveaxis(x, source, -1).shape\n assert_(actual, expected)\n\n def test_move_new_position(self):\n x = np.random.randn(1, 2, 3, 4)\n for source, destination, expected in [\n (0, 1, (2, 1, 3, 4)),\n (1, 2, (1, 3, 2, 4)),\n (1, -1, (1, 3, 4, 2)),\n ]:\n actual = np.moveaxis(x, source, destination).shape\n assert_(actual, expected)\n\n def test_preserve_order(self):\n x = np.zeros((1, 2, 3, 4))\n for source, destination in [\n (0, 0),\n (3, -1),\n (-1, 3),\n ([0, -1], [0, -1]),\n ([2, 0], [2, 0]),\n (range(4), range(4)),\n ]:\n actual = np.moveaxis(x, source, destination).shape\n assert_(actual, (1, 2, 3, 4))\n\n def test_move_multiples(self):\n x = np.zeros((0, 1, 2, 3))\n for source, destination, expected in [\n ([0, 1], [2, 3], (2, 3, 0, 1)),\n ([2, 3], [0, 1], (2, 3, 0, 1)),\n ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),\n ([3, 0], [1, 0], (0, 3, 1, 2)),\n ([0, 3], [0, 1], (0, 3, 1, 2)),\n ]:\n actual = np.moveaxis(x, source, destination).shape\n assert_(actual, expected)\n\n def test_errors(self):\n x = np.random.randn(1, 2, 3)\n assert_raises_regex(np.AxisError, 'source.*out of bounds',\n np.moveaxis, x, 3, 0)\n assert_raises_regex(np.AxisError, 'source.*out of bounds',\n np.moveaxis, x, -4, 0)\n assert_raises_regex(np.AxisError, 'destination.*out of bounds',\n np.moveaxis, x, 0, 5)\n assert_raises_regex(ValueError, 'repeated axis in `source`',\n np.moveaxis, x, [0, 0], [0, 1])\n assert_raises_regex(ValueError, 'repeated axis in `destination`',\n np.moveaxis, x, [0, 1], [1, 1])\n assert_raises_regex(ValueError, 'must have the same number',\n np.moveaxis, x, 0, [0, 1])\n assert_raises_regex(ValueError, 'must have the same number',\n np.moveaxis, x, [0, 1], [0])\n\n def test_array_likes(self):\n x = np.ma.zeros((1, 2, 3))\n result = np.moveaxis(x, 0, 0)\n assert_(x.shape, result.shape)\n assert_(isinstance(result, np.ma.MaskedArray))\n\n x = [1, 2, 3]\n result = np.moveaxis(x, 0, 0)\n assert_(x, list(result))\n assert_(isinstance(result, np.ndarray))\n\n\nclass TestCross(object):\n def test_2x2(self):\n u = [1, 2]\n v = [3, 4]\n z = -2\n cp = np.cross(u, v)\n assert_equal(cp, z)\n cp = np.cross(v, u)\n assert_equal(cp, -z)\n\n def test_2x3(self):\n u = [1, 2]\n v = [3, 4, 5]\n z = np.array([10, -5, -2])\n cp = np.cross(u, v)\n assert_equal(cp, z)\n cp = np.cross(v, u)\n assert_equal(cp, -z)\n\n def test_3x3(self):\n u = [1, 2, 3]\n v = [4, 5, 6]\n z = np.array([-3, 6, -3])\n cp = np.cross(u, v)\n assert_equal(cp, z)\n cp = np.cross(v, u)\n assert_equal(cp, -z)\n\n def test_broadcasting(self):\n # Ticket #2624 (Trac #2032)\n u = np.tile([1, 2], (11, 1))\n v = np.tile([3, 4], (11, 1))\n z = -2\n assert_equal(np.cross(u, v), z)\n assert_equal(np.cross(v, u), -z)\n assert_equal(np.cross(u, u), 0)\n\n u = np.tile([1, 2], (11, 1)).T\n v = np.tile([3, 4, 5], (11, 1))\n z = np.tile([10, -5, -2], (11, 1))\n assert_equal(np.cross(u, v, axisa=0), z)\n assert_equal(np.cross(v, u.T), -z)\n assert_equal(np.cross(v, v), 0)\n\n u = np.tile([1, 2, 3], (11, 1)).T\n v = np.tile([3, 4], (11, 1)).T\n z = np.tile([-12, 9, -2], (11, 1))\n assert_equal(np.cross(u, v, axisa=0, axisb=0), z)\n assert_equal(np.cross(v.T, u.T), -z)\n assert_equal(np.cross(u.T, u.T), 0)\n\n u = np.tile([1, 2, 3], (5, 1))\n v = np.tile([4, 5, 6], (5, 1)).T\n z = np.tile([-3, 6, -3], (5, 1))\n assert_equal(np.cross(u, v, axisb=0), z)\n assert_equal(np.cross(v.T, u), -z)\n assert_equal(np.cross(u, u), 0)\n\n def test_broadcasting_shapes(self):\n u = np.ones((2, 1, 3))\n v = np.ones((5, 3))\n assert_equal(np.cross(u, v).shape, (2, 5, 3))\n u = np.ones((10, 3, 5))\n v = np.ones((2, 5))\n assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))\n assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)\n assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)\n u = np.ones((10, 3, 5, 7))\n v = np.ones((5, 7, 2))\n assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))\n assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)\n assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)\n # gh-5885\n u = np.ones((3, 4, 2))\n for axisc in range(-2, 2):\n assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))\n\n\ndef test_outer_out_param():\n arr1 = np.ones((5,))\n arr2 = np.ones((2,))\n arr3 = np.linspace(-2, 2, 5)\n out1 = np.ndarray(shape=(5,5))\n out2 = np.ndarray(shape=(2, 5))\n res1 = np.outer(arr1, arr3, out1)\n assert_equal(res1, out1)\n assert_equal(np.outer(arr2, arr3, out2), out2)\n\n\nclass TestIndices(object):\n\n def test_simple(self):\n [x, y] = np.indices((4, 3))\n assert_array_equal(x, np.array([[0, 0, 0],\n [1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]]))\n assert_array_equal(y, np.array([[0, 1, 2],\n [0, 1, 2],\n [0, 1, 2],\n [0, 1, 2]]))\n\n def test_single_input(self):\n [x] = np.indices((4,))\n assert_array_equal(x, np.array([0, 1, 2, 3]))\n\n [x] = np.indices((4,), sparse=True)\n assert_array_equal(x, np.array([0, 1, 2, 3]))\n\n def test_scalar_input(self):\n assert_array_equal([], np.indices(()))\n assert_array_equal([], np.indices((), sparse=True))\n assert_array_equal([[]], np.indices((0,)))\n assert_array_equal([[]], np.indices((0,), sparse=True))\n\n def test_sparse(self):\n [x, y] = np.indices((4,3), sparse=True)\n assert_array_equal(x, np.array([[0], [1], [2], [3]]))\n assert_array_equal(y, np.array([[0, 1, 2]]))\n\n @pytest.mark.parametrize(\"dtype\", [np.int32, np.int64, np.float32, np.float64])\n @pytest.mark.parametrize(\"dims\", [(), (0,), (4, 3)])\n def test_return_type(self, dtype, dims):\n inds = np.indices(dims, dtype=dtype)\n assert_(inds.dtype == dtype)\n\n for arr in np.indices(dims, dtype=dtype, sparse=True):\n assert_(arr.dtype == dtype)\n\n\nclass TestRequire(object):\n flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',\n 'F', 'F_CONTIGUOUS', 'FORTRAN',\n 'A', 'ALIGNED',\n 'W', 'WRITEABLE',\n 'O', 'OWNDATA']\n\n def generate_all_false(self, dtype):\n arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])\n arr.setflags(write=False)\n a = arr['a']\n assert_(not a.flags['C'])\n assert_(not a.flags['F'])\n assert_(not a.flags['O'])\n assert_(not a.flags['W'])\n assert_(not a.flags['A'])\n return a\n\n def set_and_check_flag(self, flag, dtype, arr):\n if dtype is None:\n dtype = arr.dtype\n b = np.require(arr, dtype, [flag])\n assert_(b.flags[flag])\n assert_(b.dtype == dtype)\n\n # a further call to np.require ought to return the same array\n # unless OWNDATA is specified.\n c = np.require(b, None, [flag])\n if flag[0] != 'O':\n assert_(c is b)\n else:\n assert_(c.flags[flag])\n\n def test_require_each(self):\n\n id = ['f8', 'i4']\n fd = [None, 'f8', 'c16']\n for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):\n a = self.generate_all_false(idtype)\n self.set_and_check_flag(flag, fdtype, a)\n\n def test_unknown_requirement(self):\n a = self.generate_all_false('f8')\n assert_raises(KeyError, np.require, a, None, 'Q')\n\n def test_non_array_input(self):\n a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])\n assert_(a.flags['O'])\n assert_(a.flags['C'])\n assert_(a.flags['A'])\n assert_(a.dtype == 'i4')\n assert_equal(a, [1, 2, 3, 4])\n\n def test_C_and_F_simul(self):\n a = self.generate_all_false('f8')\n assert_raises(ValueError, np.require, a, None, ['C', 'F'])\n\n def test_ensure_array(self):\n class ArraySubclass(np.ndarray):\n pass\n\n a = ArraySubclass((2, 2))\n b = np.require(a, None, ['E'])\n assert_(type(b) is np.ndarray)\n\n def test_preserve_subtype(self):\n class ArraySubclass(np.ndarray):\n pass\n\n for flag in self.flag_names:\n a = ArraySubclass((2, 2))\n self.set_and_check_flag(flag, None, a)\n\n\nclass TestBroadcast(object):\n def test_broadcast_in_args(self):\n # gh-5881\n arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),\n np.empty((5, 1, 7))]\n mits = [np.broadcast(*arrs),\n np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),\n np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),\n np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),\n np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]\n for mit in mits:\n assert_equal(mit.shape, (5, 6, 7))\n assert_equal(mit.ndim, 3)\n assert_equal(mit.nd, 3)\n assert_equal(mit.numiter, 4)\n for a, ia in zip(arrs, mit.iters):\n assert_(a is ia.base)\n\n def test_broadcast_single_arg(self):\n # gh-6899\n arrs = [np.empty((5, 6, 7))]\n mit = np.broadcast(*arrs)\n assert_equal(mit.shape, (5, 6, 7))\n assert_equal(mit.ndim, 3)\n assert_equal(mit.nd, 3)\n assert_equal(mit.numiter, 1)\n assert_(arrs[0] is mit.iters[0].base)\n\n def test_number_of_arguments(self):\n arr = np.empty((5,))\n for j in range(35):\n arrs = [arr] * j\n if j > 32:\n assert_raises(ValueError, np.broadcast, *arrs)\n else:\n mit = np.broadcast(*arrs)\n assert_equal(mit.numiter, j)\n\n def test_broadcast_error_kwargs(self):\n #gh-13455\n arrs = [np.empty((5, 6, 7))]\n mit = np.broadcast(*arrs)\n mit2 = np.broadcast(*arrs, **{})\n assert_equal(mit.shape, mit2.shape)\n assert_equal(mit.ndim, mit2.ndim)\n assert_equal(mit.nd, mit2.nd)\n assert_equal(mit.numiter, mit2.numiter)\n assert_(mit.iters[0].base is mit2.iters[0].base)\n\n assert_raises(ValueError, np.broadcast, 1, **{'x': 1})\n\nclass TestKeepdims(object):\n\n class sub_array(np.ndarray):\n def sum(self, axis=None, dtype=None, out=None):\n return np.ndarray.sum(self, axis, dtype, out, keepdims=True)\n\n def test_raise(self):\n sub_class = self.sub_array\n x = np.arange(30).view(sub_class)\n assert_raises(TypeError, np.sum, x, keepdims=True)\n\n\nclass TestTensordot(object):\n\n def test_zero_dimension(self):\n # Test resolution to issue #5663\n a = np.ndarray((3,0))\n b = np.ndarray((0,4))\n td = np.tensordot(a, b, (1, 0))\n assert_array_equal(td, np.dot(a, b))\n assert_array_equal(td, np.einsum('ij,jk', a, b))\n\n def test_zero_dimensional(self):\n # gh-12130\n arr_0d = np.array(1)\n ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined\n assert_array_equal(ret, arr_0d)\n",
"import numpy as np\nimport pytest\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\n\[email protected]\ndef df1():\n return DataFrame(\n dict(\n outer=[1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4],\n inner=[1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2],\n v1=np.linspace(0, 1, 11),\n )\n )\n\n\[email protected]\ndef df2():\n return DataFrame(\n dict(\n outer=[1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3],\n inner=[1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3],\n v2=np.linspace(10, 11, 12),\n )\n )\n\n\[email protected](params=[[], [\"outer\"], [\"outer\", \"inner\"]])\ndef left_df(request, df1):\n \"\"\" Construct left test DataFrame with specified levels\n (any of 'outer', 'inner', and 'v1')\n \"\"\"\n levels = request.param\n if levels:\n df1 = df1.set_index(levels)\n\n return df1\n\n\[email protected](params=[[], [\"outer\"], [\"outer\", \"inner\"]])\ndef right_df(request, df2):\n \"\"\" Construct right test DataFrame with specified levels\n (any of 'outer', 'inner', and 'v2')\n \"\"\"\n levels = request.param\n\n if levels:\n df2 = df2.set_index(levels)\n\n return df2\n\n\ndef compute_expected(df_left, df_right, on=None, left_on=None, right_on=None, how=None):\n \"\"\"\n Compute the expected merge result for the test case.\n\n This method computes the expected result of merging two DataFrames on\n a combination of their columns and index levels. It does so by\n explicitly dropping/resetting their named index levels, performing a\n merge on their columns, and then finally restoring the appropriate\n index in the result.\n\n Parameters\n ----------\n df_left : DataFrame\n The left DataFrame (may have zero or more named index levels)\n df_right : DataFrame\n The right DataFrame (may have zero or more named index levels)\n on : list of str\n The on parameter to the merge operation\n left_on : list of str\n The left_on parameter to the merge operation\n right_on : list of str\n The right_on parameter to the merge operation\n how : str\n The how parameter to the merge operation\n\n Returns\n -------\n DataFrame\n The expected merge result\n \"\"\"\n # Handle on param if specified\n if on is not None:\n left_on, right_on = on, on\n\n # Compute input named index levels\n left_levels = [n for n in df_left.index.names if n is not None]\n right_levels = [n for n in df_right.index.names if n is not None]\n\n # Compute output named index levels\n output_levels = [i for i in left_on if i in right_levels and i in left_levels]\n\n # Drop index levels that aren't involved in the merge\n drop_left = [n for n in left_levels if n not in left_on]\n if drop_left:\n df_left = df_left.reset_index(drop_left, drop=True)\n\n drop_right = [n for n in right_levels if n not in right_on]\n if drop_right:\n df_right = df_right.reset_index(drop_right, drop=True)\n\n # Convert remaining index levels to columns\n reset_left = [n for n in left_levels if n in left_on]\n if reset_left:\n df_left = df_left.reset_index(level=reset_left)\n\n reset_right = [n for n in right_levels if n in right_on]\n if reset_right:\n df_right = df_right.reset_index(level=reset_right)\n\n # Perform merge\n expected = df_left.merge(df_right, left_on=left_on, right_on=right_on, how=how)\n\n # Restore index levels\n if output_levels:\n expected = expected.set_index(output_levels)\n\n return expected\n\n\[email protected](\n \"on,how\",\n [\n ([\"outer\"], \"inner\"),\n ([\"inner\"], \"left\"),\n ([\"outer\", \"inner\"], \"right\"),\n ([\"inner\", \"outer\"], \"outer\"),\n ],\n)\ndef test_merge_indexes_and_columns_on(left_df, right_df, on, how):\n\n # Construct expected result\n expected = compute_expected(left_df, right_df, on=on, how=how)\n\n # Perform merge\n result = left_df.merge(right_df, on=on, how=how)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\[email protected](\n \"left_on,right_on,how\",\n [\n ([\"outer\"], [\"outer\"], \"inner\"),\n ([\"inner\"], [\"inner\"], \"right\"),\n ([\"outer\", \"inner\"], [\"outer\", \"inner\"], \"left\"),\n ([\"inner\", \"outer\"], [\"inner\", \"outer\"], \"outer\"),\n ],\n)\ndef test_merge_indexes_and_columns_lefton_righton(\n left_df, right_df, left_on, right_on, how\n):\n\n # Construct expected result\n expected = compute_expected(\n left_df, right_df, left_on=left_on, right_on=right_on, how=how\n )\n\n # Perform merge\n result = left_df.merge(right_df, left_on=left_on, right_on=right_on, how=how)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\[email protected](\"left_index\", [\"inner\", [\"inner\", \"outer\"]])\ndef test_join_indexes_and_columns_on(df1, df2, left_index, join_type):\n\n # Construct left_df\n left_df = df1.set_index(left_index)\n\n # Construct right_df\n right_df = df2.set_index([\"outer\", \"inner\"])\n\n # Result\n expected = (\n left_df.reset_index()\n .join(\n right_df, on=[\"outer\", \"inner\"], how=join_type, lsuffix=\"_x\", rsuffix=\"_y\"\n )\n .set_index(left_index)\n )\n\n # Perform join\n result = left_df.join(\n right_df, on=[\"outer\", \"inner\"], how=join_type, lsuffix=\"_x\", rsuffix=\"_y\"\n )\n\n tm.assert_frame_equal(result, expected, check_like=True)\n",
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame, Series, date_range\nimport pandas._testing as tm\n\n\nclass TestDataFrameRound:\n def test_round(self):\n # GH#2665\n\n # Test that rounding an empty DataFrame does nothing\n df = DataFrame()\n tm.assert_frame_equal(df, df.round())\n\n # Here's the test frame we'll be working with\n df = DataFrame({\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.234, 2.234, 3.234]})\n\n # Default round to integer (i.e. decimals=0)\n expected_rounded = DataFrame({\"col1\": [1.0, 2.0, 3.0], \"col2\": [1.0, 2.0, 3.0]})\n tm.assert_frame_equal(df.round(), expected_rounded)\n\n # Round with an integer\n decimals = 2\n expected_rounded = DataFrame(\n {\"col1\": [1.12, 2.12, 3.12], \"col2\": [1.23, 2.23, 3.23]}\n )\n tm.assert_frame_equal(df.round(decimals), expected_rounded)\n\n # This should also work with np.round (since np.round dispatches to\n # df.round)\n tm.assert_frame_equal(np.round(df, decimals), expected_rounded)\n\n # Round with a list\n round_list = [1, 2]\n with pytest.raises(TypeError):\n df.round(round_list)\n\n # Round with a dictionary\n expected_rounded = DataFrame(\n {\"col1\": [1.1, 2.1, 3.1], \"col2\": [1.23, 2.23, 3.23]}\n )\n round_dict = {\"col1\": 1, \"col2\": 2}\n tm.assert_frame_equal(df.round(round_dict), expected_rounded)\n\n # Incomplete dict\n expected_partially_rounded = DataFrame(\n {\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.2, 2.2, 3.2]}\n )\n partial_round_dict = {\"col2\": 1}\n tm.assert_frame_equal(df.round(partial_round_dict), expected_partially_rounded)\n\n # Dict with unknown elements\n wrong_round_dict = {\"col3\": 2, \"col2\": 1}\n tm.assert_frame_equal(df.round(wrong_round_dict), expected_partially_rounded)\n\n # float input to `decimals`\n non_int_round_dict = {\"col1\": 1, \"col2\": 0.5}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n # String input\n non_int_round_dict = {\"col1\": 1, \"col2\": \"foo\"}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # List input\n non_int_round_dict = {\"col1\": 1, \"col2\": [1, 2]}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # Non integer Series inputs\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # Negative numbers\n negative_round_dict = {\"col1\": -1, \"col2\": -2}\n big_df = df * 100\n expected_neg_rounded = DataFrame(\n {\"col1\": [110.0, 210, 310], \"col2\": [100.0, 200, 300]}\n )\n tm.assert_frame_equal(big_df.round(negative_round_dict), expected_neg_rounded)\n\n # nan in Series round\n nan_round_Series = Series({\"col1\": np.nan, \"col2\": 1})\n\n # TODO(wesm): unused?\n expected_nan_round = DataFrame( # noqa\n {\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.2, 2.2, 3.2]}\n )\n\n with pytest.raises(TypeError):\n df.round(nan_round_Series)\n\n # Make sure this doesn't break existing Series.round\n tm.assert_series_equal(df[\"col1\"].round(1), expected_rounded[\"col1\"])\n\n # named columns\n # GH#11986\n decimals = 2\n expected_rounded = DataFrame(\n {\"col1\": [1.12, 2.12, 3.12], \"col2\": [1.23, 2.23, 3.23]}\n )\n df.columns.name = \"cols\"\n expected_rounded.columns.name = \"cols\"\n tm.assert_frame_equal(df.round(decimals), expected_rounded)\n\n # interaction of named columns & series\n tm.assert_series_equal(df[\"col1\"].round(decimals), expected_rounded[\"col1\"])\n tm.assert_series_equal(df.round(decimals)[\"col1\"], expected_rounded[\"col1\"])\n\n def test_round_numpy(self):\n # GH#12600\n df = DataFrame([[1.53, 1.36], [0.06, 7.01]])\n out = np.round(df, decimals=0)\n expected = DataFrame([[2.0, 1.0], [0.0, 7.0]])\n tm.assert_frame_equal(out, expected)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.round(df, decimals=0, out=df)\n\n def test_round_numpy_with_nan(self):\n # See GH#14197\n df = Series([1.53, np.nan, 0.06]).to_frame()\n with tm.assert_produces_warning(None):\n result = df.round()\n expected = Series([2.0, np.nan, 0.0]).to_frame()\n tm.assert_frame_equal(result, expected)\n\n def test_round_mixed_type(self):\n # GH#11885\n df = DataFrame(\n {\n \"col1\": [1.1, 2.2, 3.3, 4.4],\n \"col2\": [\"1\", \"a\", \"c\", \"f\"],\n \"col3\": date_range(\"20111111\", periods=4),\n }\n )\n round_0 = DataFrame(\n {\n \"col1\": [1.0, 2.0, 3.0, 4.0],\n \"col2\": [\"1\", \"a\", \"c\", \"f\"],\n \"col3\": date_range(\"20111111\", periods=4),\n }\n )\n tm.assert_frame_equal(df.round(), round_0)\n tm.assert_frame_equal(df.round(1), df)\n tm.assert_frame_equal(df.round({\"col1\": 1}), df)\n tm.assert_frame_equal(df.round({\"col1\": 0}), round_0)\n tm.assert_frame_equal(df.round({\"col1\": 0, \"col2\": 1}), round_0)\n tm.assert_frame_equal(df.round({\"col3\": 1}), df)\n\n def test_round_with_duplicate_columns(self):\n # GH#11611\n\n df = pd.DataFrame(\n np.random.random([3, 3]),\n columns=[\"A\", \"B\", \"C\"],\n index=[\"first\", \"second\", \"third\"],\n )\n\n dfs = pd.concat((df, df), axis=1)\n rounded = dfs.round()\n tm.assert_index_equal(rounded.index, dfs.index)\n\n decimals = pd.Series([1, 0, 2], index=[\"A\", \"B\", \"A\"])\n msg = \"Index of decimals must be unique\"\n with pytest.raises(ValueError, match=msg):\n df.round(decimals)\n\n def test_round_builtin(self):\n # GH#11763\n # Here's the test frame we'll be working with\n df = DataFrame({\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.234, 2.234, 3.234]})\n\n # Default round to integer (i.e. decimals=0)\n expected_rounded = DataFrame({\"col1\": [1.0, 2.0, 3.0], \"col2\": [1.0, 2.0, 3.0]})\n tm.assert_frame_equal(round(df), expected_rounded)\n\n def test_round_nonunique_categorical(self):\n # See GH#21809\n idx = pd.CategoricalIndex([\"low\"] * 3 + [\"hi\"] * 3)\n df = pd.DataFrame(np.random.rand(6, 3), columns=list(\"abc\"))\n\n expected = df.round(3)\n expected.index = idx\n\n df_categorical = df.copy().set_index(idx)\n assert df_categorical.shape == (6, 3)\n result = df_categorical.round(3)\n assert result.shape == (6, 3)\n\n tm.assert_frame_equal(result, expected)\n\n def test_round_interval_category_columns(self):\n # GH#30063\n columns = pd.CategoricalIndex(pd.interval_range(0, 2))\n df = DataFrame([[0.66, 1.1], [0.3, 0.25]], columns=columns)\n\n result = df.round()\n expected = DataFrame([[1.0, 1.0], [0.0, 0.0]], columns=columns)\n tm.assert_frame_equal(result, expected)\n",
"\"\"\"Tests of interaction of matrix with other parts of numpy.\n\nNote that tests with MaskedArray and linalg are done in separate files.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport pytest\n\nimport textwrap\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import (assert_, assert_equal, assert_raises,\n assert_raises_regex, assert_array_equal,\n assert_almost_equal, assert_array_almost_equal)\n\n\ndef test_fancy_indexing():\n # The matrix class messes with the shape. While this is always\n # weird (getitem is not used, it does not have setitem nor knows\n # about fancy indexing), this tests gh-3110\n # 2018-04-29: moved here from core.tests.test_index.\n m = np.matrix([[1, 2], [3, 4]])\n\n assert_(isinstance(m[[0, 1, 0], :], np.matrix))\n\n # gh-3110. Note the transpose currently because matrices do *not*\n # support dimension fixing for fancy indexing correctly.\n x = np.asmatrix(np.arange(50).reshape(5, 10))\n assert_equal(x[:2, np.array(-1)], x[:2, -1].T)\n\n\ndef test_polynomial_mapdomain():\n # test that polynomial preserved matrix subtype.\n # 2018-04-29: moved here from polynomial.tests.polyutils.\n dom1 = [0, 4]\n dom2 = [1, 3]\n x = np.matrix([dom1, dom1])\n res = np.polynomial.polyutils.mapdomain(x, dom1, dom2)\n assert_(isinstance(res, np.matrix))\n\n\ndef test_sort_matrix_none():\n # 2018-04-29: moved here from core.tests.test_multiarray\n a = np.matrix([[2, 1, 0]])\n actual = np.sort(a, axis=None)\n expected = np.matrix([[0, 1, 2]])\n assert_equal(actual, expected)\n assert_(type(expected) is np.matrix)\n\n\ndef test_partition_matrix_none():\n # gh-4301\n # 2018-04-29: moved here from core.tests.test_multiarray\n a = np.matrix([[2, 1, 0]])\n actual = np.partition(a, 1, axis=None)\n expected = np.matrix([[0, 1, 2]])\n assert_equal(actual, expected)\n assert_(type(expected) is np.matrix)\n\n\ndef test_dot_scalar_and_matrix_of_objects():\n # Ticket #2469\n # 2018-04-29: moved here from core.tests.test_multiarray\n arr = np.matrix([1, 2], dtype=object)\n desired = np.matrix([[3, 6]], dtype=object)\n assert_equal(np.dot(arr, 3), desired)\n assert_equal(np.dot(3, arr), desired)\n\n\ndef test_inner_scalar_and_matrix():\n # 2018-04-29: moved here from core.tests.test_multiarray\n for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':\n sca = np.array(3, dtype=dt)[()]\n arr = np.matrix([[1, 2], [3, 4]], dtype=dt)\n desired = np.matrix([[3, 6], [9, 12]], dtype=dt)\n assert_equal(np.inner(arr, sca), desired)\n assert_equal(np.inner(sca, arr), desired)\n\n\ndef test_inner_scalar_and_matrix_of_objects():\n # Ticket #4482\n # 2018-04-29: moved here from core.tests.test_multiarray\n arr = np.matrix([1, 2], dtype=object)\n desired = np.matrix([[3, 6]], dtype=object)\n assert_equal(np.inner(arr, 3), desired)\n assert_equal(np.inner(3, arr), desired)\n\n\ndef test_iter_allocate_output_subtype():\n # Make sure that the subtype with priority wins\n # 2018-04-29: moved here from core.tests.test_nditer, given the\n # matrix specific shape test.\n\n # matrix vs ndarray\n a = np.matrix([[1, 2], [3, 4]])\n b = np.arange(4).reshape(2, 2).T\n i = np.nditer([a, b, None], [],\n [['readonly'], ['readonly'], ['writeonly', 'allocate']])\n assert_(type(i.operands[2]) is np.matrix)\n assert_(type(i.operands[2]) is not np.ndarray)\n assert_equal(i.operands[2].shape, (2, 2))\n\n # matrix always wants things to be 2D\n b = np.arange(4).reshape(1, 2, 2)\n assert_raises(RuntimeError, np.nditer, [a, b, None], [],\n [['readonly'], ['readonly'], ['writeonly', 'allocate']])\n # but if subtypes are disabled, the result can still work\n i = np.nditer([a, b, None], [],\n [['readonly'], ['readonly'],\n ['writeonly', 'allocate', 'no_subtype']])\n assert_(type(i.operands[2]) is np.ndarray)\n assert_(type(i.operands[2]) is not np.matrix)\n assert_equal(i.operands[2].shape, (1, 2, 2))\n\n\ndef like_function():\n # 2018-04-29: moved here from core.tests.test_numeric\n a = np.matrix([[1, 2], [3, 4]])\n for like_function in np.zeros_like, np.ones_like, np.empty_like:\n b = like_function(a)\n assert_(type(b) is np.matrix)\n\n c = like_function(a, subok=False)\n assert_(type(c) is not np.matrix)\n\n\ndef test_array_astype():\n # 2018-04-29: copied here from core.tests.test_api\n # subok=True passes through a matrix\n a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')\n b = a.astype('f4', subok=True, copy=False)\n assert_(a is b)\n\n # subok=True is default, and creates a subtype on a cast\n b = a.astype('i4', copy=False)\n assert_equal(a, b)\n assert_equal(type(b), np.matrix)\n\n # subok=False never returns a matrix\n b = a.astype('f4', subok=False, copy=False)\n assert_equal(a, b)\n assert_(not (a is b))\n assert_(type(b) is not np.matrix)\n\n\ndef test_stack():\n # 2018-04-29: copied here from core.tests.test_shape_base\n # check np.matrix cannot be stacked\n m = np.matrix([[1, 2], [3, 4]])\n assert_raises_regex(ValueError, 'shape too large to be a matrix',\n np.stack, [m, m])\n\n\ndef test_object_scalar_multiply():\n # Tickets #2469 and #4482\n # 2018-04-29: moved here from core.tests.test_ufunc\n arr = np.matrix([1, 2], dtype=object)\n desired = np.matrix([[3, 6]], dtype=object)\n assert_equal(np.multiply(arr, 3), desired)\n assert_equal(np.multiply(3, arr), desired)\n\n\ndef test_nanfunctions_matrices():\n # Check that it works and that type and\n # shape are preserved\n # 2018-04-29: moved here from core.tests.test_nanfunctions\n mat = np.matrix(np.eye(3))\n for f in [np.nanmin, np.nanmax]:\n res = f(mat, axis=0)\n assert_(isinstance(res, np.matrix))\n assert_(res.shape == (1, 3))\n res = f(mat, axis=1)\n assert_(isinstance(res, np.matrix))\n assert_(res.shape == (3, 1))\n res = f(mat)\n assert_(np.isscalar(res))\n # check that rows of nan are dealt with for subclasses (#4628)\n mat[1] = np.nan\n for f in [np.nanmin, np.nanmax]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n res = f(mat, axis=0)\n assert_(isinstance(res, np.matrix))\n assert_(not np.any(np.isnan(res)))\n assert_(len(w) == 0)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n res = f(mat, axis=1)\n assert_(isinstance(res, np.matrix))\n assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])\n and not np.isnan(res[2, 0]))\n assert_(len(w) == 1, 'no warning raised')\n assert_(issubclass(w[0].category, RuntimeWarning))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n res = f(mat)\n assert_(np.isscalar(res))\n assert_(res != np.nan)\n assert_(len(w) == 0)\n\n\ndef test_nanfunctions_matrices_general():\n # Check that it works and that type and\n # shape are preserved\n # 2018-04-29: moved here from core.tests.test_nanfunctions\n mat = np.matrix(np.eye(3))\n for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,\n np.nanmean, np.nanvar, np.nanstd):\n res = f(mat, axis=0)\n assert_(isinstance(res, np.matrix))\n assert_(res.shape == (1, 3))\n res = f(mat, axis=1)\n assert_(isinstance(res, np.matrix))\n assert_(res.shape == (3, 1))\n res = f(mat)\n assert_(np.isscalar(res))\n\n for f in np.nancumsum, np.nancumprod:\n res = f(mat, axis=0)\n assert_(isinstance(res, np.matrix))\n assert_(res.shape == (3, 3))\n res = f(mat, axis=1)\n assert_(isinstance(res, np.matrix))\n assert_(res.shape == (3, 3))\n res = f(mat)\n assert_(isinstance(res, np.matrix))\n assert_(res.shape == (1, 3*3))\n\n\ndef test_average_matrix():\n # 2018-04-29: moved here from core.tests.test_function_base.\n y = np.matrix(np.random.rand(5, 5))\n assert_array_equal(y.mean(0), np.average(y, 0))\n\n a = np.matrix([[1, 2], [3, 4]])\n w = np.matrix([[1, 2], [3, 4]])\n\n r = np.average(a, axis=0, weights=w)\n assert_equal(type(r), np.matrix)\n assert_equal(r, [[2.5, 10.0/3]])\n\n\ndef test_trapz_matrix():\n # Test to make sure matrices give the same answer as ndarrays\n # 2018-04-29: moved here from core.tests.test_function_base.\n x = np.linspace(0, 5)\n y = x * x\n r = np.trapz(y, x)\n mx = np.matrix(x)\n my = np.matrix(y)\n mr = np.trapz(my, mx)\n assert_almost_equal(mr, r)\n\n\ndef test_ediff1d_matrix():\n # 2018-04-29: moved here from core.tests.test_arraysetops.\n assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix))\n assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))\n\n\ndef test_apply_along_axis_matrix():\n # this test is particularly malicious because matrix\n # refuses to become 1d\n # 2018-04-29: moved here from core.tests.test_shape_base.\n def double(row):\n return row * 2\n\n m = np.matrix([[0, 1], [2, 3]])\n expected = np.matrix([[0, 2], [4, 6]])\n\n result = np.apply_along_axis(double, 0, m)\n assert_(isinstance(result, np.matrix))\n assert_array_equal(result, expected)\n\n result = np.apply_along_axis(double, 1, m)\n assert_(isinstance(result, np.matrix))\n assert_array_equal(result, expected)\n\n\ndef test_kron_matrix():\n # 2018-04-29: moved here from core.tests.test_shape_base.\n a = np.ones([2, 2])\n m = np.asmatrix(a)\n assert_equal(type(np.kron(a, a)), np.ndarray)\n assert_equal(type(np.kron(m, m)), np.matrix)\n assert_equal(type(np.kron(a, m)), np.matrix)\n assert_equal(type(np.kron(m, a)), np.matrix)\n\n\nclass TestConcatenatorMatrix(object):\n # 2018-04-29: moved here from core.tests.test_index_tricks.\n def test_matrix(self):\n a = [1, 2]\n b = [3, 4]\n\n ab_r = np.r_['r', a, b]\n ab_c = np.r_['c', a, b]\n\n assert_equal(type(ab_r), np.matrix)\n assert_equal(type(ab_c), np.matrix)\n\n assert_equal(np.array(ab_r), [[1, 2, 3, 4]])\n assert_equal(np.array(ab_c), [[1], [2], [3], [4]])\n\n assert_raises(ValueError, lambda: np.r_['rc', a, b])\n\n def test_matrix_scalar(self):\n r = np.r_['r', [1, 2], 3]\n assert_equal(type(r), np.matrix)\n assert_equal(np.array(r), [[1, 2, 3]])\n\n def test_matrix_builder(self):\n a = np.array([1])\n b = np.array([2])\n c = np.array([3])\n d = np.array([4])\n actual = np.r_['a, b; c, d']\n expected = np.bmat([[a, b], [c, d]])\n\n assert_equal(actual, expected)\n assert_equal(type(actual), type(expected))\n\n\ndef test_array_equal_error_message_matrix():\n # 2018-04-29: moved here from testing.tests.test_utils.\n try:\n assert_equal(np.array([1, 2]), np.matrix([1, 2]))\n except AssertionError as e:\n msg = str(e)\n msg2 = msg.replace(\"shapes (2L,), (1L, 2L)\", \"shapes (2,), (1, 2)\")\n msg_reference = textwrap.dedent(\"\"\"\\\n\n Arrays are not equal\n\n (shapes (2,), (1, 2) mismatch)\n x: array([1, 2])\n y: matrix([[1, 2]])\"\"\")\n try:\n assert_equal(msg, msg_reference)\n except AssertionError:\n assert_equal(msg2, msg_reference)\n else:\n raise AssertionError(\"Did not raise\")\n\n\ndef test_array_almost_equal_matrix():\n # Matrix slicing keeps things 2-D, while array does not necessarily.\n # See gh-8452.\n # 2018-04-29: moved here from testing.tests.test_utils.\n m1 = np.matrix([[1., 2.]])\n m2 = np.matrix([[1., np.nan]])\n m3 = np.matrix([[1., -np.inf]])\n m4 = np.matrix([[np.nan, np.inf]])\n m5 = np.matrix([[1., 2.], [np.nan, np.inf]])\n for assert_func in assert_array_almost_equal, assert_almost_equal:\n for m in m1, m2, m3, m4, m5:\n assert_func(m, m)\n a = np.array(m)\n assert_func(a, m)\n assert_func(m, a)\n",
"\"\"\"Indexer objects for computing start/end window bounds for rolling operations\"\"\"\nfrom datetime import timedelta\nfrom typing import Dict, Optional, Tuple, Type, Union\n\nimport numpy as np\n\nfrom pandas._libs.window.indexers import calculate_variable_window_bounds\nfrom pandas.util._decorators import Appender\n\nfrom pandas.tseries.offsets import Nano\n\nget_window_bounds_doc = \"\"\"\nComputes the bounds of a window.\n\nParameters\n----------\nnum_values : int, default 0\n number of values that will be aggregated over\nwindow_size : int, default 0\n the number of rows in a window\nmin_periods : int, default None\n min_periods passed from the top level rolling API\ncenter : bool, default None\n center passed from the top level rolling API\nclosed : str, default None\n closed passed from the top level rolling API\nwin_type : str, default None\n win_type passed from the top level rolling API\n\nReturns\n-------\nA tuple of ndarray[int64]s, indicating the boundaries of each\nwindow\n\"\"\"\n\n\nclass BaseIndexer:\n \"\"\"Base class for window bounds calculations.\"\"\"\n\n def __init__(\n self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs,\n ):\n \"\"\"\n Parameters\n ----------\n **kwargs :\n keyword arguments that will be available when get_window_bounds is called\n \"\"\"\n self.index_array = index_array\n self.window_size = window_size\n # Set user defined kwargs as attributes that can be used in get_window_bounds\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: Optional[int] = None,\n center: Optional[bool] = None,\n closed: Optional[str] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n raise NotImplementedError\n\n\nclass FixedWindowIndexer(BaseIndexer):\n \"\"\"Creates window boundaries that are of fixed length.\"\"\"\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: Optional[int] = None,\n center: Optional[bool] = None,\n closed: Optional[str] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n start_s = np.zeros(self.window_size, dtype=\"int64\")\n start_e = (\n np.arange(self.window_size, num_values, dtype=\"int64\")\n - self.window_size\n + 1\n )\n start = np.concatenate([start_s, start_e])[:num_values]\n\n end_s = np.arange(self.window_size, dtype=\"int64\") + 1\n end_e = start_e + self.window_size\n end = np.concatenate([end_s, end_e])[:num_values]\n return start, end\n\n\nclass VariableWindowIndexer(BaseIndexer):\n \"\"\"Creates window boundaries that are of variable length, namely for time series.\"\"\"\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: Optional[int] = None,\n center: Optional[bool] = None,\n closed: Optional[str] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n return calculate_variable_window_bounds(\n num_values, self.window_size, min_periods, center, closed, self.index_array,\n )\n\n\nclass VariableOffsetWindowIndexer(BaseIndexer):\n \"\"\"Calculate window boundaries based on a non-fixed offset such as a BusinessDay\"\"\"\n\n def __init__(\n self,\n index_array: Optional[np.ndarray] = None,\n window_size: int = 0,\n index=None,\n offset=None,\n **kwargs,\n ):\n super().__init__(index_array, window_size, **kwargs)\n self.index = index\n self.offset = offset\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: Optional[int] = None,\n center: Optional[bool] = None,\n closed: Optional[str] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n # if windows is variable, default is 'right', otherwise default is 'both'\n if closed is None:\n closed = \"right\" if self.index is not None else \"both\"\n\n right_closed = closed in [\"right\", \"both\"]\n left_closed = closed in [\"left\", \"both\"]\n\n if self.index[num_values - 1] < self.index[0]:\n index_growth_sign = -1\n else:\n index_growth_sign = 1\n\n start = np.empty(num_values, dtype=\"int64\")\n start.fill(-1)\n end = np.empty(num_values, dtype=\"int64\")\n end.fill(-1)\n\n start[0] = 0\n\n # right endpoint is closed\n if right_closed:\n end[0] = 1\n # right endpoint is open\n else:\n end[0] = 0\n\n # start is start of slice interval (including)\n # end is end of slice interval (not including)\n for i in range(1, num_values):\n end_bound = self.index[i]\n start_bound = self.index[i] - index_growth_sign * self.offset\n\n # left endpoint is closed\n if left_closed:\n start_bound -= Nano(1)\n\n # advance the start bound until we are\n # within the constraint\n start[i] = i\n for j in range(start[i - 1], i):\n if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):\n start[i] = j\n break\n\n # end bound is previous end\n # or current index\n if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):\n end[i] = i + 1\n else:\n end[i] = end[i - 1]\n\n # right endpoint is open\n if not right_closed:\n end[i] -= 1\n\n return start, end\n\n\nclass ExpandingIndexer(BaseIndexer):\n \"\"\"Calculate expanding window bounds, mimicking df.expanding()\"\"\"\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: Optional[int] = None,\n center: Optional[bool] = None,\n closed: Optional[str] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n return (\n np.zeros(num_values, dtype=np.int64),\n np.arange(1, num_values + 1, dtype=np.int64),\n )\n\n\nclass FixedForwardWindowIndexer(BaseIndexer):\n \"\"\"\n Creates window boundaries for fixed-length windows that include the\n current row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)\n >>> df.rolling(window=indexer, min_periods=1).sum()\n B\n 0 1.0\n 1 3.0\n 2 2.0\n 3 4.0\n 4 4.0\n \"\"\"\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: Optional[int] = None,\n center: Optional[bool] = None,\n closed: Optional[str] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n if center:\n raise ValueError(\"Forward-looking windows can't have center=True\")\n if closed is not None:\n raise ValueError(\n \"Forward-looking windows don't support setting the closed argument\"\n )\n\n start = np.arange(num_values, dtype=\"int64\")\n end_s = start[: -self.window_size] + self.window_size\n end_e = np.full(self.window_size, num_values, dtype=\"int64\")\n end = np.concatenate([end_s, end_e])\n\n return start, end\n\n\nclass GroupbyRollingIndexer(BaseIndexer):\n \"\"\"Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()\"\"\"\n\n def __init__(\n self,\n index_array: Optional[np.ndarray],\n window_size: int,\n groupby_indicies: Dict,\n rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]],\n **kwargs,\n ):\n \"\"\"\n Parameters\n ----------\n **kwargs :\n keyword arguments that will be available when get_window_bounds is called\n \"\"\"\n self.groupby_indicies = groupby_indicies\n self.rolling_indexer = rolling_indexer\n super().__init__(index_array, window_size, **kwargs)\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(\n self,\n num_values: int = 0,\n min_periods: Optional[int] = None,\n center: Optional[bool] = None,\n closed: Optional[str] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n # 1) For each group, get the indices that belong to the group\n # 2) Use the indices to calculate the start & end bounds of the window\n # 3) Append the window bounds in group order\n start_arrays = []\n end_arrays = []\n window_indicies_start = 0\n for key, indicies in self.groupby_indicies.items():\n if self.index_array is not None:\n index_array = self.index_array.take(indicies)\n else:\n index_array = self.index_array\n indexer = self.rolling_indexer(\n index_array=index_array, window_size=self.window_size,\n )\n start, end = indexer.get_window_bounds(\n len(indicies), min_periods, center, closed\n )\n start = start.astype(np.int64)\n end = end.astype(np.int64)\n # Cannot use groupby_indicies as they might not be monotonic with the object\n # we're rolling over\n window_indicies = np.arange(\n window_indicies_start, window_indicies_start + len(indicies),\n )\n window_indicies_start += len(indicies)\n # Extend as we'll be slicing window like [start, end)\n window_indicies = np.append(\n window_indicies, [window_indicies[-1] + 1]\n ).astype(np.int64)\n start_arrays.append(window_indicies.take(start))\n end_arrays.append(window_indicies.take(end))\n start = np.concatenate(start_arrays)\n end = np.concatenate(end_arrays)\n return start, end\n",
"import contextlib\n\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\[email protected]\ndef ensure_removed(obj, attr):\n \"\"\"Ensure that an attribute added to 'obj' during the test is\n removed when we're done\"\"\"\n try:\n yield\n finally:\n try:\n delattr(obj, attr)\n except AttributeError:\n pass\n obj._accessors.discard(attr)\n\n\nclass MyAccessor:\n def __init__(self, obj):\n self.obj = obj\n self.item = \"item\"\n\n @property\n def prop(self):\n return self.item\n\n def method(self):\n return self.item\n\n\[email protected](\n \"obj, registrar\",\n [\n (pd.Series, pd.api.extensions.register_series_accessor),\n (pd.DataFrame, pd.api.extensions.register_dataframe_accessor),\n (pd.Index, pd.api.extensions.register_index_accessor),\n ],\n)\ndef test_register(obj, registrar):\n with ensure_removed(obj, \"mine\"):\n before = set(dir(obj))\n registrar(\"mine\")(MyAccessor)\n o = obj([]) if obj is not pd.Series else obj([], dtype=object)\n assert o.mine.prop == \"item\"\n after = set(dir(obj))\n assert (before ^ after) == {\"mine\"}\n assert \"mine\" in obj._accessors\n\n\ndef test_accessor_works():\n with ensure_removed(pd.Series, \"mine\"):\n pd.api.extensions.register_series_accessor(\"mine\")(MyAccessor)\n\n s = pd.Series([1, 2])\n assert s.mine.obj is s\n\n assert s.mine.prop == \"item\"\n assert s.mine.method() == \"item\"\n\n\ndef test_overwrite_warns():\n # Need to restore mean\n mean = pd.Series.mean\n try:\n with tm.assert_produces_warning(UserWarning) as w:\n pd.api.extensions.register_series_accessor(\"mean\")(MyAccessor)\n s = pd.Series([1, 2])\n assert s.mean.prop == \"item\"\n msg = str(w[0].message)\n assert \"mean\" in msg\n assert \"MyAccessor\" in msg\n assert \"Series\" in msg\n finally:\n pd.Series.mean = mean\n\n\ndef test_raises_attribute_error():\n\n with ensure_removed(pd.Series, \"bad\"):\n\n @pd.api.extensions.register_series_accessor(\"bad\")\n class Bad:\n def __init__(self, data):\n raise AttributeError(\"whoops\")\n\n with pytest.raises(AttributeError, match=\"whoops\"):\n pd.Series([], dtype=object).bad\n",
"from distutils.version import LooseVersion\nfrom functools import reduce\nfrom itertools import product\nimport operator\nfrom typing import Dict, Type\nimport warnings\n\nimport numpy as np\nfrom numpy.random import rand, randint, randn\nimport pytest\n\nfrom pandas.errors import PerformanceWarning\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.common import is_bool, is_list_like, is_scalar\n\nimport pandas as pd\nfrom pandas import DataFrame, Series, compat, date_range\nimport pandas._testing as tm\nfrom pandas.core.computation import pytables\nfrom pandas.core.computation.check import _NUMEXPR_VERSION\nfrom pandas.core.computation.engines import NumExprClobberingError, _engines\nimport pandas.core.computation.expr as expr\nfrom pandas.core.computation.expr import (\n BaseExprVisitor,\n PandasExprVisitor,\n PythonExprVisitor,\n)\nfrom pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR\nfrom pandas.core.computation.ops import (\n _arith_ops_syms,\n _binary_math_ops,\n _binary_ops_dict,\n _special_case_arith_ops_syms,\n _unary_math_ops,\n)\n\n\[email protected](\n params=(\n pytest.param(\n engine,\n marks=pytest.mark.skipif(\n engine == \"numexpr\" and not _USE_NUMEXPR,\n reason=f\"numexpr enabled->{_USE_NUMEXPR}, \"\n f\"installed->{_NUMEXPR_INSTALLED}\",\n ),\n )\n for engine in _engines\n )\n) # noqa\ndef engine(request):\n return request.param\n\n\[email protected](params=expr._parsers)\ndef parser(request):\n return request.param\n\n\[email protected]\ndef ne_lt_2_6_9():\n if _NUMEXPR_INSTALLED and _NUMEXPR_VERSION >= LooseVersion(\"2.6.9\"):\n pytest.skip(\"numexpr is >= 2.6.9\")\n return \"numexpr\"\n\n\[email protected]\ndef unary_fns_for_ne():\n if _NUMEXPR_INSTALLED:\n if _NUMEXPR_VERSION >= LooseVersion(\"2.6.9\"):\n return _unary_math_ops\n else:\n return tuple(x for x in _unary_math_ops if x not in (\"floor\", \"ceil\"))\n else:\n pytest.skip(\"numexpr is not present\")\n\n\ndef engine_has_neg_frac(engine):\n return _engines[engine].has_neg_frac\n\n\ndef _eval_single_bin(lhs, cmp1, rhs, engine):\n c = _binary_ops_dict[cmp1]\n if engine_has_neg_frac(engine):\n try:\n return c(lhs, rhs)\n except ValueError as e:\n if str(e).startswith(\n \"negative number cannot be raised to a fractional power\"\n ):\n return np.nan\n raise\n return c(lhs, rhs)\n\n\ndef _series_and_2d_ndarray(lhs, rhs):\n return (\n isinstance(lhs, Series) and isinstance(rhs, np.ndarray) and rhs.ndim > 1\n ) or (isinstance(rhs, Series) and isinstance(lhs, np.ndarray) and lhs.ndim > 1)\n\n\ndef _series_and_frame(lhs, rhs):\n return (isinstance(lhs, Series) and isinstance(rhs, DataFrame)) or (\n isinstance(rhs, Series) and isinstance(lhs, DataFrame)\n )\n\n\ndef _bool_and_frame(lhs, rhs):\n return isinstance(lhs, bool) and isinstance(rhs, pd.core.generic.NDFrame)\n\n\ndef _is_py3_complex_incompat(result, expected):\n return isinstance(expected, (complex, np.complexfloating)) and np.isnan(result)\n\n\n_good_arith_ops = set(_arith_ops_syms).difference(_special_case_arith_ops_syms)\n\n\[email protected]_if_no_ne\nclass TestEvalNumexprPandas:\n @classmethod\n def setup_class(cls):\n import numexpr as ne\n\n cls.ne = ne\n cls.engine = \"numexpr\"\n cls.parser = \"pandas\"\n\n @classmethod\n def teardown_class(cls):\n del cls.engine, cls.parser\n if hasattr(cls, \"ne\"):\n del cls.ne\n\n def setup_data(self):\n nan_df1 = DataFrame(rand(10, 5))\n nan_df1[nan_df1 > 0.5] = np.nan\n nan_df2 = DataFrame(rand(10, 5))\n nan_df2[nan_df2 > 0.5] = np.nan\n\n self.pandas_lhses = (\n DataFrame(randn(10, 5)),\n Series(randn(5)),\n Series([1, 2, np.nan, np.nan, 5]),\n nan_df1,\n )\n self.pandas_rhses = (\n DataFrame(randn(10, 5)),\n Series(randn(5)),\n Series([1, 2, np.nan, np.nan, 5]),\n nan_df2,\n )\n self.scalar_lhses = (randn(),)\n self.scalar_rhses = (randn(),)\n\n self.lhses = self.pandas_lhses + self.scalar_lhses\n self.rhses = self.pandas_rhses + self.scalar_rhses\n\n def setup_ops(self):\n self.cmp_ops = expr._cmp_ops_syms\n self.cmp2_ops = self.cmp_ops[::-1]\n self.bin_ops = expr._bool_ops_syms\n self.special_case_ops = _special_case_arith_ops_syms\n self.arith_ops = _good_arith_ops\n self.unary_ops = \"-\", \"~\", \"not \"\n\n def setup_method(self, method):\n self.setup_ops()\n self.setup_data()\n self.current_engines = filter(lambda x: x != self.engine, _engines)\n\n def teardown_method(self, method):\n del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses\n del self.pandas_rhses, self.pandas_lhses, self.current_engines\n\n @pytest.mark.slow\n @pytest.mark.parametrize(\n \"cmp1\",\n [\"!=\", \"==\", \"<=\", \">=\", \"<\", \">\"],\n ids=[\"ne\", \"eq\", \"le\", \"ge\", \"lt\", \"gt\"],\n )\n @pytest.mark.parametrize(\"cmp2\", [\">\", \"<\"], ids=[\"gt\", \"lt\"])\n def test_complex_cmp_ops(self, cmp1, cmp2):\n for lhs, rhs, binop in product(self.lhses, self.rhses, self.bin_ops):\n lhs_new = _eval_single_bin(lhs, cmp1, rhs, self.engine)\n rhs_new = _eval_single_bin(lhs, cmp2, rhs, self.engine)\n expected = _eval_single_bin(lhs_new, binop, rhs_new, self.engine)\n\n ex = f\"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)\"\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n self.check_equal(result, expected)\n\n def test_simple_cmp_ops(self):\n bool_lhses = (\n DataFrame(tm.randbool(size=(10, 5))),\n Series(tm.randbool((5,))),\n tm.randbool(),\n )\n bool_rhses = (\n DataFrame(tm.randbool(size=(10, 5))),\n Series(tm.randbool((5,))),\n tm.randbool(),\n )\n for lhs, rhs, cmp_op in product(bool_lhses, bool_rhses, self.cmp_ops):\n self.check_simple_cmp_op(lhs, cmp_op, rhs)\n\n @pytest.mark.slow\n def test_binary_arith_ops(self):\n for lhs, op, rhs in product(self.lhses, self.arith_ops, self.rhses):\n self.check_binary_arith_op(lhs, op, rhs)\n\n def test_modulus(self):\n for lhs, rhs in product(self.lhses, self.rhses):\n self.check_modulus(lhs, \"%\", rhs)\n\n def test_floor_division(self):\n for lhs, rhs in product(self.lhses, self.rhses):\n self.check_floor_division(lhs, \"//\", rhs)\n\n @td.skip_if_windows\n def test_pow(self):\n # odd failure on win32 platform, so skip\n for lhs, rhs in product(self.lhses, self.rhses):\n self.check_pow(lhs, \"**\", rhs)\n\n @pytest.mark.slow\n def test_single_invert_op(self):\n for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):\n self.check_single_invert_op(lhs, op, rhs)\n\n @pytest.mark.slow\n def test_compound_invert_op(self):\n for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):\n self.check_compound_invert_op(lhs, op, rhs)\n\n @pytest.mark.slow\n def test_chained_cmp_op(self):\n mids = self.lhses\n cmp_ops = \"<\", \">\"\n for lhs, cmp1, mid, cmp2, rhs in product(\n self.lhses, cmp_ops, mids, cmp_ops, self.rhses\n ):\n self.check_chained_cmp_op(lhs, cmp1, mid, cmp2, rhs)\n\n def check_equal(self, result, expected):\n if isinstance(result, DataFrame):\n tm.assert_frame_equal(result, expected)\n elif isinstance(result, Series):\n tm.assert_series_equal(result, expected)\n elif isinstance(result, np.ndarray):\n tm.assert_numpy_array_equal(result, expected)\n else:\n assert result == expected\n\n def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):\n def check_operands(left, right, cmp_op):\n return _eval_single_bin(left, cmp_op, right, self.engine)\n\n lhs_new = check_operands(lhs, mid, cmp1)\n rhs_new = check_operands(mid, rhs, cmp2)\n\n if lhs_new is not None and rhs_new is not None:\n ex1 = f\"lhs {cmp1} mid {cmp2} rhs\"\n ex2 = f\"lhs {cmp1} mid and mid {cmp2} rhs\"\n ex3 = f\"(lhs {cmp1} mid) & (mid {cmp2} rhs)\"\n expected = _eval_single_bin(lhs_new, \"&\", rhs_new, self.engine)\n\n for ex in (ex1, ex2, ex3):\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n\n tm.assert_almost_equal(result, expected)\n\n def check_simple_cmp_op(self, lhs, cmp1, rhs):\n ex = f\"lhs {cmp1} rhs\"\n msg = (\n r\"only list-like( or dict-like)? objects are allowed to be \"\n r\"passed to (DataFrame\\.)?isin\\(\\), you passed a \"\n r\"(\\[|')bool(\\]|')|\"\n \"argument of type 'bool' is not iterable\"\n )\n if cmp1 in (\"in\", \"not in\") and not is_list_like(rhs):\n with pytest.raises(TypeError, match=msg):\n pd.eval(\n ex,\n engine=self.engine,\n parser=self.parser,\n local_dict={\"lhs\": lhs, \"rhs\": rhs},\n )\n else:\n expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n self.check_equal(result, expected)\n\n def check_binary_arith_op(self, lhs, arith1, rhs):\n ex = f\"lhs {arith1} rhs\"\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n expected = _eval_single_bin(lhs, arith1, rhs, self.engine)\n\n tm.assert_almost_equal(result, expected)\n ex = f\"lhs {arith1} rhs {arith1} rhs\"\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n nlhs = _eval_single_bin(lhs, arith1, rhs, self.engine)\n self.check_alignment(result, nlhs, rhs, arith1)\n\n def check_alignment(self, result, nlhs, ghs, op):\n try:\n nlhs, ghs = nlhs.align(ghs)\n except (ValueError, TypeError, AttributeError):\n # ValueError: series frame or frame series align\n # TypeError, AttributeError: series or frame with scalar align\n pass\n else:\n\n # direct numpy comparison\n expected = self.ne.evaluate(f\"nlhs {op} ghs\")\n tm.assert_numpy_array_equal(result.values, expected)\n\n # modulus, pow, and floor division require special casing\n\n def check_modulus(self, lhs, arith1, rhs):\n ex = f\"lhs {arith1} rhs\"\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n expected = lhs % rhs\n\n tm.assert_almost_equal(result, expected)\n expected = self.ne.evaluate(f\"expected {arith1} rhs\")\n if isinstance(result, (DataFrame, Series)):\n tm.assert_almost_equal(result.values, expected)\n else:\n tm.assert_almost_equal(result, expected.item())\n\n def check_floor_division(self, lhs, arith1, rhs):\n ex = f\"lhs {arith1} rhs\"\n\n if self.engine == \"python\":\n res = pd.eval(ex, engine=self.engine, parser=self.parser)\n expected = lhs // rhs\n self.check_equal(res, expected)\n else:\n msg = (\n r\"unsupported operand type\\(s\\) for //: 'VariableNode' and \"\n \"'VariableNode'\"\n )\n with pytest.raises(TypeError, match=msg):\n pd.eval(\n ex,\n local_dict={\"lhs\": lhs, \"rhs\": rhs},\n engine=self.engine,\n parser=self.parser,\n )\n\n def get_expected_pow_result(self, lhs, rhs):\n try:\n expected = _eval_single_bin(lhs, \"**\", rhs, self.engine)\n except ValueError as e:\n if str(e).startswith(\n \"negative number cannot be raised to a fractional power\"\n ):\n if self.engine == \"python\":\n pytest.skip(str(e))\n else:\n expected = np.nan\n else:\n raise\n return expected\n\n def check_pow(self, lhs, arith1, rhs):\n ex = f\"lhs {arith1} rhs\"\n expected = self.get_expected_pow_result(lhs, rhs)\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n\n if (\n is_scalar(lhs)\n and is_scalar(rhs)\n and _is_py3_complex_incompat(result, expected)\n ):\n msg = \"(DataFrame.columns|numpy array) are different\"\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(result, expected)\n else:\n tm.assert_almost_equal(result, expected)\n\n ex = f\"(lhs {arith1} rhs) {arith1} rhs\"\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n expected = self.get_expected_pow_result(\n self.get_expected_pow_result(lhs, rhs), rhs\n )\n tm.assert_almost_equal(result, expected)\n\n def check_single_invert_op(self, lhs, cmp1, rhs):\n # simple\n for el in (lhs, rhs):\n try:\n elb = el.astype(bool)\n except AttributeError:\n elb = np.array([bool(el)])\n expected = ~elb\n result = pd.eval(\"~elb\", engine=self.engine, parser=self.parser)\n tm.assert_almost_equal(expected, result)\n\n for engine in self.current_engines:\n tm.assert_almost_equal(\n result, pd.eval(\"~elb\", engine=engine, parser=self.parser)\n )\n\n def check_compound_invert_op(self, lhs, cmp1, rhs):\n skip_these = [\"in\", \"not in\"]\n ex = f\"~(lhs {cmp1} rhs)\"\n\n msg = (\n r\"only list-like( or dict-like)? objects are allowed to be \"\n r\"passed to (DataFrame\\.)?isin\\(\\), you passed a \"\n r\"(\\[|')float(\\]|')|\"\n \"argument of type 'float' is not iterable\"\n )\n if is_scalar(rhs) and cmp1 in skip_these:\n with pytest.raises(TypeError, match=msg):\n pd.eval(\n ex,\n engine=self.engine,\n parser=self.parser,\n local_dict={\"lhs\": lhs, \"rhs\": rhs},\n )\n else:\n # compound\n if is_scalar(lhs) and is_scalar(rhs):\n lhs, rhs = map(lambda x: np.array([x]), (lhs, rhs))\n expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)\n if is_scalar(expected):\n expected = not expected\n else:\n expected = ~expected\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n tm.assert_almost_equal(expected, result)\n\n # make sure the other engines work the same as this one\n for engine in self.current_engines:\n ev = pd.eval(ex, engine=self.engine, parser=self.parser)\n tm.assert_almost_equal(ev, result)\n\n def ex(self, op, var_name=\"lhs\"):\n return f\"{op}{var_name}\"\n\n def test_frame_invert(self):\n expr = self.ex(\"~\")\n\n # ~ ##\n # frame\n # float always raises\n lhs = DataFrame(randn(5, 2))\n if self.engine == \"numexpr\":\n msg = \"couldn't find matching opcode for 'invert_dd'\"\n with pytest.raises(NotImplementedError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n msg = \"ufunc 'invert' not supported for the input types\"\n with pytest.raises(TypeError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n\n # int raises on numexpr\n lhs = DataFrame(randint(5, size=(5, 2)))\n if self.engine == \"numexpr\":\n msg = \"couldn't find matching opcode for 'invert\"\n with pytest.raises(NotImplementedError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = ~lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(expect, result)\n\n # bool always works\n lhs = DataFrame(rand(5, 2) > 0.5)\n expect = ~lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(expect, result)\n\n # object raises\n lhs = DataFrame({\"b\": [\"a\", 1, 2.0], \"c\": rand(3) > 0.5})\n if self.engine == \"numexpr\":\n with pytest.raises(ValueError, match=\"unknown type object\"):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n msg = \"bad operand type for unary ~: 'str'\"\n with pytest.raises(TypeError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n\n def test_series_invert(self):\n # ~ ####\n expr = self.ex(\"~\")\n\n # series\n # float raises\n lhs = Series(randn(5))\n if self.engine == \"numexpr\":\n msg = \"couldn't find matching opcode for 'invert_dd'\"\n with pytest.raises(NotImplementedError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n msg = \"ufunc 'invert' not supported for the input types\"\n with pytest.raises(TypeError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n\n # int raises on numexpr\n lhs = Series(randint(5, size=5))\n if self.engine == \"numexpr\":\n msg = \"couldn't find matching opcode for 'invert\"\n with pytest.raises(NotImplementedError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = ~lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_series_equal(expect, result)\n\n # bool\n lhs = Series(rand(5) > 0.5)\n expect = ~lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_series_equal(expect, result)\n\n # float\n # int\n # bool\n\n # object\n lhs = Series([\"a\", 1, 2.0])\n if self.engine == \"numexpr\":\n with pytest.raises(ValueError, match=\"unknown type object\"):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n msg = \"bad operand type for unary ~: 'str'\"\n with pytest.raises(TypeError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n\n def test_frame_negate(self):\n expr = self.ex(\"-\")\n\n # float\n lhs = DataFrame(randn(5, 2))\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(expect, result)\n\n # int\n lhs = DataFrame(randint(5, size=(5, 2)))\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(expect, result)\n\n # bool doesn't work with numexpr but works elsewhere\n lhs = DataFrame(rand(5, 2) > 0.5)\n if self.engine == \"numexpr\":\n msg = \"couldn't find matching opcode for 'neg_bb'\"\n with pytest.raises(NotImplementedError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(expect, result)\n\n def test_series_negate(self):\n expr = self.ex(\"-\")\n\n # float\n lhs = Series(randn(5))\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_series_equal(expect, result)\n\n # int\n lhs = Series(randint(5, size=5))\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_series_equal(expect, result)\n\n # bool doesn't work with numexpr but works elsewhere\n lhs = Series(rand(5) > 0.5)\n if self.engine == \"numexpr\":\n msg = \"couldn't find matching opcode for 'neg_bb'\"\n with pytest.raises(NotImplementedError, match=msg):\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n else:\n expect = -lhs\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_series_equal(expect, result)\n\n @pytest.mark.parametrize(\n \"lhs\",\n [\n # Float\n DataFrame(randn(5, 2)),\n # Int\n DataFrame(randint(5, size=(5, 2))),\n # bool doesn't work with numexpr but works elsewhere\n DataFrame(rand(5, 2) > 0.5),\n ],\n )\n def test_frame_pos(self, lhs):\n expr = self.ex(\"+\")\n expect = lhs\n\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(expect, result)\n\n @pytest.mark.parametrize(\n \"lhs\",\n [\n # Float\n Series(randn(5)),\n # Int\n Series(randint(5, size=5)),\n # bool doesn't work with numexpr but works elsewhere\n Series(rand(5) > 0.5),\n ],\n )\n def test_series_pos(self, lhs):\n expr = self.ex(\"+\")\n expect = lhs\n\n result = pd.eval(expr, engine=self.engine, parser=self.parser)\n tm.assert_series_equal(expect, result)\n\n def test_scalar_unary(self):\n msg = \"bad operand type for unary ~: 'float'\"\n with pytest.raises(TypeError, match=msg):\n pd.eval(\"~1.0\", engine=self.engine, parser=self.parser)\n\n assert pd.eval(\"-1.0\", parser=self.parser, engine=self.engine) == -1.0\n assert pd.eval(\"+1.0\", parser=self.parser, engine=self.engine) == +1.0\n assert pd.eval(\"~1\", parser=self.parser, engine=self.engine) == ~1\n assert pd.eval(\"-1\", parser=self.parser, engine=self.engine) == -1\n assert pd.eval(\"+1\", parser=self.parser, engine=self.engine) == +1\n assert pd.eval(\"~True\", parser=self.parser, engine=self.engine) == ~True\n assert pd.eval(\"~False\", parser=self.parser, engine=self.engine) == ~False\n assert pd.eval(\"-True\", parser=self.parser, engine=self.engine) == -True\n assert pd.eval(\"-False\", parser=self.parser, engine=self.engine) == -False\n assert pd.eval(\"+True\", parser=self.parser, engine=self.engine) == +True\n assert pd.eval(\"+False\", parser=self.parser, engine=self.engine) == +False\n\n def test_unary_in_array(self):\n # GH 11235\n tm.assert_numpy_array_equal(\n pd.eval(\n \"[-True, True, ~True, +True,\"\n \"-False, False, ~False, +False,\"\n \"-37, 37, ~37, +37]\"\n ),\n np.array(\n [\n -True,\n True,\n ~True,\n +True,\n -False,\n False,\n ~False,\n +False,\n -37,\n 37,\n ~37,\n +37,\n ],\n dtype=np.object_,\n ),\n )\n\n @pytest.mark.parametrize(\"dtype\", [np.float32, np.float64])\n def test_float_comparison_bin_op(self, dtype):\n # GH 16363\n df = pd.DataFrame({\"x\": np.array([0], dtype=dtype)})\n res = df.eval(\"x < -0.1\")\n assert res.values == np.array([False])\n\n res = df.eval(\"-5 > x\")\n assert res.values == np.array([False])\n\n def test_disallow_scalar_bool_ops(self):\n exprs = \"1 or 2\", \"1 and 2\"\n exprs += \"a and b\", \"a or b\"\n exprs += (\"1 or 2 and (3 + 2) > 3\",)\n exprs += (\"2 * x > 2 or 1 and 2\",)\n exprs += (\"2 * df > 3 and 1 or a\",)\n\n x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2)) # noqa\n for ex in exprs:\n msg = \"cannot evaluate scalar only bool ops|'BoolOp' nodes are not\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(ex, engine=self.engine, parser=self.parser)\n\n def test_identical(self):\n # see gh-10546\n x = 1\n result = pd.eval(\"x\", engine=self.engine, parser=self.parser)\n assert result == 1\n assert is_scalar(result)\n\n x = 1.5\n result = pd.eval(\"x\", engine=self.engine, parser=self.parser)\n assert result == 1.5\n assert is_scalar(result)\n\n x = False\n result = pd.eval(\"x\", engine=self.engine, parser=self.parser)\n assert not result\n assert is_bool(result)\n assert is_scalar(result)\n\n x = np.array([1])\n result = pd.eval(\"x\", engine=self.engine, parser=self.parser)\n tm.assert_numpy_array_equal(result, np.array([1]))\n assert result.shape == (1,)\n\n x = np.array([1.5])\n result = pd.eval(\"x\", engine=self.engine, parser=self.parser)\n tm.assert_numpy_array_equal(result, np.array([1.5]))\n assert result.shape == (1,)\n\n x = np.array([False]) # noqa\n result = pd.eval(\"x\", engine=self.engine, parser=self.parser)\n tm.assert_numpy_array_equal(result, np.array([False]))\n assert result.shape == (1,)\n\n def test_line_continuation(self):\n # GH 11149\n exp = \"\"\"1 + 2 * \\\n 5 - 1 + 2 \"\"\"\n result = pd.eval(exp, engine=self.engine, parser=self.parser)\n assert result == 12\n\n def test_float_truncation(self):\n # GH 14241\n exp = \"1000000000.006\"\n result = pd.eval(exp, engine=self.engine, parser=self.parser)\n expected = np.float64(exp)\n assert result == expected\n\n df = pd.DataFrame({\"A\": [1000000000.0009, 1000000000.0011, 1000000000.0015]})\n cutoff = 1000000000.0006\n result = df.query(f\"A < {cutoff:.4f}\")\n assert result.empty\n\n cutoff = 1000000000.0010\n result = df.query(f\"A > {cutoff:.4f}\")\n expected = df.loc[[1, 2], :]\n tm.assert_frame_equal(expected, result)\n\n exact = 1000000000.0011\n result = df.query(f\"A == {exact:.4f}\")\n expected = df.loc[[1], :]\n tm.assert_frame_equal(expected, result)\n\n def test_disallow_python_keywords(self):\n # GH 18221\n df = pd.DataFrame([[0, 0, 0]], columns=[\"foo\", \"bar\", \"class\"])\n msg = \"Python keyword not valid identifier in numexpr query\"\n with pytest.raises(SyntaxError, match=msg):\n df.query(\"class == 0\")\n\n df = pd.DataFrame()\n df.index.name = \"lambda\"\n with pytest.raises(SyntaxError, match=msg):\n df.query(\"lambda == 0\")\n\n\[email protected]_if_no_ne\nclass TestEvalNumexprPython(TestEvalNumexprPandas):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n import numexpr as ne\n\n cls.ne = ne\n cls.engine = \"numexpr\"\n cls.parser = \"python\"\n\n def setup_ops(self):\n self.cmp_ops = list(\n filter(lambda x: x not in (\"in\", \"not in\"), expr._cmp_ops_syms)\n )\n self.cmp2_ops = self.cmp_ops[::-1]\n self.bin_ops = [s for s in expr._bool_ops_syms if s not in (\"and\", \"or\")]\n self.special_case_ops = _special_case_arith_ops_syms\n self.arith_ops = _good_arith_ops\n self.unary_ops = \"+\", \"-\", \"~\"\n\n def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):\n ex1 = f\"lhs {cmp1} mid {cmp2} rhs\"\n msg = \"'BoolOp' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(ex1, engine=self.engine, parser=self.parser)\n\n\nclass TestEvalPythonPython(TestEvalNumexprPython):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"python\"\n cls.parser = \"python\"\n\n def check_modulus(self, lhs, arith1, rhs):\n ex = f\"lhs {arith1} rhs\"\n result = pd.eval(ex, engine=self.engine, parser=self.parser)\n\n expected = lhs % rhs\n tm.assert_almost_equal(result, expected)\n\n expected = _eval_single_bin(expected, arith1, rhs, self.engine)\n tm.assert_almost_equal(result, expected)\n\n def check_alignment(self, result, nlhs, ghs, op):\n try:\n nlhs, ghs = nlhs.align(ghs)\n except (ValueError, TypeError, AttributeError):\n # ValueError: series frame or frame series align\n # TypeError, AttributeError: series or frame with scalar align\n pass\n else:\n expected = eval(f\"nlhs {op} ghs\")\n tm.assert_almost_equal(result, expected)\n\n\nclass TestEvalPythonPandas(TestEvalPythonPython):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"python\"\n cls.parser = \"pandas\"\n\n def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):\n TestEvalNumexprPandas.check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs)\n\n\nf = lambda *args, **kwargs: np.random.randn()\n\n\n# -------------------------------------\n# gh-12388: Typecasting rules consistency with python\n\n\nclass TestTypeCasting:\n @pytest.mark.parametrize(\"op\", [\"+\", \"-\", \"*\", \"**\", \"/\"])\n # maybe someday... numexpr has too many upcasting rules now\n # chain(*(np.sctypes[x] for x in ['uint', 'int', 'float']))\n @pytest.mark.parametrize(\"dt\", [np.float32, np.float64])\n def test_binop_typecasting(self, engine, parser, op, dt):\n df = tm.makeCustomDataframe(5, 3, data_gen_f=f, dtype=dt)\n s = f\"df {op} 3\"\n res = pd.eval(s, engine=engine, parser=parser)\n assert df.values.dtype == dt\n assert res.values.dtype == dt\n tm.assert_frame_equal(res, eval(s))\n\n s = f\"3 {op} df\"\n res = pd.eval(s, engine=engine, parser=parser)\n assert df.values.dtype == dt\n assert res.values.dtype == dt\n tm.assert_frame_equal(res, eval(s))\n\n\n# -------------------------------------\n# Basic and complex alignment\n\n\ndef _is_datetime(x):\n return issubclass(x.dtype.type, np.datetime64)\n\n\ndef should_warn(*args):\n not_mono = not any(map(operator.attrgetter(\"is_monotonic\"), args))\n only_one_dt = reduce(operator.xor, map(_is_datetime, args))\n return not_mono and only_one_dt\n\n\nclass TestAlignment:\n\n index_types = \"i\", \"u\", \"dt\"\n lhs_index_types = index_types + (\"s\",) # 'p'\n\n def test_align_nested_unary_op(self, engine, parser):\n s = \"df * ~2\"\n df = tm.makeCustomDataframe(5, 3, data_gen_f=f)\n res = pd.eval(s, engine=engine, parser=parser)\n tm.assert_frame_equal(res, df * ~2)\n\n def test_basic_frame_alignment(self, engine, parser):\n args = product(self.lhs_index_types, self.index_types, self.index_types)\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\", RuntimeWarning)\n for lr_idx_type, rr_idx_type, c_idx_type in args:\n df = tm.makeCustomDataframe(\n 10, 10, data_gen_f=f, r_idx_type=lr_idx_type, c_idx_type=c_idx_type\n )\n df2 = tm.makeCustomDataframe(\n 20, 10, data_gen_f=f, r_idx_type=rr_idx_type, c_idx_type=c_idx_type\n )\n # only warns if not monotonic and not sortable\n if should_warn(df.index, df2.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval(\"df + df2\", engine=engine, parser=parser)\n else:\n res = pd.eval(\"df + df2\", engine=engine, parser=parser)\n tm.assert_frame_equal(res, df + df2)\n\n def test_frame_comparison(self, engine, parser):\n args = product(self.lhs_index_types, repeat=2)\n for r_idx_type, c_idx_type in args:\n df = tm.makeCustomDataframe(\n 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type\n )\n res = pd.eval(\"df < 2\", engine=engine, parser=parser)\n tm.assert_frame_equal(res, df < 2)\n\n df3 = DataFrame(randn(*df.shape), index=df.index, columns=df.columns)\n res = pd.eval(\"df < df3\", engine=engine, parser=parser)\n tm.assert_frame_equal(res, df < df3)\n\n @pytest.mark.slow\n def test_medium_complex_frame_alignment(self, engine, parser):\n args = product(\n self.lhs_index_types, self.index_types, self.index_types, self.index_types\n )\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\", RuntimeWarning)\n\n for r1, c1, r2, c2 in args:\n df = tm.makeCustomDataframe(\n 3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1\n )\n df2 = tm.makeCustomDataframe(\n 4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2\n )\n df3 = tm.makeCustomDataframe(\n 5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2\n )\n if should_warn(df.index, df2.index, df3.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval(\"df + df2 + df3\", engine=engine, parser=parser)\n else:\n res = pd.eval(\"df + df2 + df3\", engine=engine, parser=parser)\n tm.assert_frame_equal(res, df + df2 + df3)\n\n def test_basic_frame_series_alignment(self, engine, parser):\n def testit(r_idx_type, c_idx_type, index_name):\n df = tm.makeCustomDataframe(\n 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type\n )\n index = getattr(df, index_name)\n s = Series(np.random.randn(5), index[:5])\n\n if should_warn(df.index, s.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval(\"df + s\", engine=engine, parser=parser)\n else:\n res = pd.eval(\"df + s\", engine=engine, parser=parser)\n\n if r_idx_type == \"dt\" or c_idx_type == \"dt\":\n expected = df.add(s) if engine == \"numexpr\" else df + s\n else:\n expected = df + s\n tm.assert_frame_equal(res, expected)\n\n args = product(self.lhs_index_types, self.index_types, (\"index\", \"columns\"))\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\", RuntimeWarning)\n for r_idx_type, c_idx_type, index_name in args:\n testit(r_idx_type, c_idx_type, index_name)\n\n def test_basic_series_frame_alignment(self, engine, parser):\n def testit(r_idx_type, c_idx_type, index_name):\n df = tm.makeCustomDataframe(\n 10, 7, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type\n )\n index = getattr(df, index_name)\n s = Series(np.random.randn(5), index[:5])\n if should_warn(s.index, df.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval(\"s + df\", engine=engine, parser=parser)\n else:\n res = pd.eval(\"s + df\", engine=engine, parser=parser)\n\n if r_idx_type == \"dt\" or c_idx_type == \"dt\":\n expected = df.add(s) if engine == \"numexpr\" else s + df\n else:\n expected = s + df\n tm.assert_frame_equal(res, expected)\n\n # only test dt with dt, otherwise weird joins result\n args = product([\"i\", \"u\", \"s\"], [\"i\", \"u\", \"s\"], (\"index\", \"columns\"))\n with warnings.catch_warnings(record=True):\n # avoid warning about comparing strings and ints\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n\n for r_idx_type, c_idx_type, index_name in args:\n testit(r_idx_type, c_idx_type, index_name)\n\n # dt with dt\n args = product([\"dt\"], [\"dt\"], (\"index\", \"columns\"))\n with warnings.catch_warnings(record=True):\n # avoid warning about comparing strings and ints\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n\n for r_idx_type, c_idx_type, index_name in args:\n testit(r_idx_type, c_idx_type, index_name)\n\n def test_series_frame_commutativity(self, engine, parser):\n args = product(\n self.lhs_index_types, self.index_types, (\"+\", \"*\"), (\"index\", \"columns\")\n )\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\", RuntimeWarning)\n for r_idx_type, c_idx_type, op, index_name in args:\n df = tm.makeCustomDataframe(\n 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type\n )\n index = getattr(df, index_name)\n s = Series(np.random.randn(5), index[:5])\n\n lhs = f\"s {op} df\"\n rhs = f\"df {op} s\"\n if should_warn(df.index, s.index):\n with tm.assert_produces_warning(RuntimeWarning):\n a = pd.eval(lhs, engine=engine, parser=parser)\n with tm.assert_produces_warning(RuntimeWarning):\n b = pd.eval(rhs, engine=engine, parser=parser)\n else:\n a = pd.eval(lhs, engine=engine, parser=parser)\n b = pd.eval(rhs, engine=engine, parser=parser)\n\n if r_idx_type != \"dt\" and c_idx_type != \"dt\":\n if engine == \"numexpr\":\n tm.assert_frame_equal(a, b)\n\n @pytest.mark.slow\n def test_complex_series_frame_alignment(self, engine, parser):\n import random\n\n args = product(\n self.lhs_index_types, self.index_types, self.index_types, self.index_types\n )\n n = 3\n m1 = 5\n m2 = 2 * m1\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\", RuntimeWarning)\n for r1, r2, c1, c2 in args:\n index_name = random.choice([\"index\", \"columns\"])\n obj_name = random.choice([\"df\", \"df2\"])\n\n df = tm.makeCustomDataframe(\n m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1\n )\n df2 = tm.makeCustomDataframe(\n m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2\n )\n index = getattr(locals().get(obj_name), index_name)\n s = Series(np.random.randn(n), index[:n])\n\n if r2 == \"dt\" or c2 == \"dt\":\n if engine == \"numexpr\":\n expected2 = df2.add(s)\n else:\n expected2 = df2 + s\n else:\n expected2 = df2 + s\n\n if r1 == \"dt\" or c1 == \"dt\":\n if engine == \"numexpr\":\n expected = expected2.add(df)\n else:\n expected = expected2 + df\n else:\n expected = expected2 + df\n\n if should_warn(df2.index, s.index, df.index):\n with tm.assert_produces_warning(RuntimeWarning):\n res = pd.eval(\"df2 + s + df\", engine=engine, parser=parser)\n else:\n res = pd.eval(\"df2 + s + df\", engine=engine, parser=parser)\n assert res.shape == expected.shape\n tm.assert_frame_equal(res, expected)\n\n def test_performance_warning_for_poor_alignment(self, engine, parser):\n df = DataFrame(randn(1000, 10))\n s = Series(randn(10000))\n if engine == \"numexpr\":\n seen = PerformanceWarning\n else:\n seen = False\n\n with tm.assert_produces_warning(seen):\n pd.eval(\"df + s\", engine=engine, parser=parser)\n\n s = Series(randn(1000))\n with tm.assert_produces_warning(False):\n pd.eval(\"df + s\", engine=engine, parser=parser)\n\n df = DataFrame(randn(10, 10000))\n s = Series(randn(10000))\n with tm.assert_produces_warning(False):\n pd.eval(\"df + s\", engine=engine, parser=parser)\n\n df = DataFrame(randn(10, 10))\n s = Series(randn(10000))\n\n is_python_engine = engine == \"python\"\n\n if not is_python_engine:\n wrn = PerformanceWarning\n else:\n wrn = False\n\n with tm.assert_produces_warning(wrn) as w:\n pd.eval(\"df + s\", engine=engine, parser=parser)\n\n if not is_python_engine:\n assert len(w) == 1\n msg = str(w[0].message)\n loged = np.log10(s.size - df.shape[1])\n expected = (\n f\"Alignment difference on axis 1 is larger \"\n f\"than an order of magnitude on term 'df', \"\n f\"by more than {loged:.4g}; performance may suffer\"\n )\n assert msg == expected\n\n\n# ------------------------------------\n# Slightly more complex ops\n\n\[email protected]_if_no_ne\nclass TestOperationsNumExprPandas:\n @classmethod\n def setup_class(cls):\n cls.engine = \"numexpr\"\n cls.parser = \"pandas\"\n cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms\n\n @classmethod\n def teardown_class(cls):\n del cls.engine, cls.parser\n\n def eval(self, *args, **kwargs):\n kwargs[\"engine\"] = self.engine\n kwargs[\"parser\"] = self.parser\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n return pd.eval(*args, **kwargs)\n\n def test_simple_arith_ops(self):\n ops = self.arith_ops\n\n for op in filter(lambda x: x != \"//\", ops):\n ex = f\"1 {op} 1\"\n ex2 = f\"x {op} 1\"\n ex3 = f\"1 {op} (x + 1)\"\n\n if op in (\"in\", \"not in\"):\n msg = \"argument of type 'int' is not iterable\"\n with pytest.raises(TypeError, match=msg):\n pd.eval(ex, engine=self.engine, parser=self.parser)\n else:\n expec = _eval_single_bin(1, op, 1, self.engine)\n x = self.eval(ex, engine=self.engine, parser=self.parser)\n assert x == expec\n\n expec = _eval_single_bin(x, op, 1, self.engine)\n y = self.eval(\n ex2, local_dict={\"x\": x}, engine=self.engine, parser=self.parser\n )\n assert y == expec\n\n expec = _eval_single_bin(1, op, x + 1, self.engine)\n y = self.eval(\n ex3, local_dict={\"x\": x}, engine=self.engine, parser=self.parser\n )\n assert y == expec\n\n def test_simple_bool_ops(self):\n for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), (True, False)):\n ex = f\"{lhs} {op} {rhs}\"\n res = self.eval(ex)\n exp = eval(ex)\n assert res == exp\n\n def test_bool_ops_with_constants(self):\n for op, lhs, rhs in product(\n expr._bool_ops_syms, (\"True\", \"False\"), (\"True\", \"False\")\n ):\n ex = f\"{lhs} {op} {rhs}\"\n res = self.eval(ex)\n exp = eval(ex)\n assert res == exp\n\n def test_4d_ndarray_fails(self):\n x = randn(3, 4, 5, 6)\n y = Series(randn(10))\n msg = \"N-dimensional objects, where N > 2, are not supported with eval\"\n with pytest.raises(NotImplementedError, match=msg):\n self.eval(\"x + y\", local_dict={\"x\": x, \"y\": y})\n\n def test_constant(self):\n x = self.eval(\"1\")\n assert x == 1\n\n def test_single_variable(self):\n df = DataFrame(randn(10, 2))\n df2 = self.eval(\"df\", local_dict={\"df\": df})\n tm.assert_frame_equal(df, df2)\n\n def test_truediv(self):\n s = np.array([1])\n ex = \"s / 1\"\n d = {\"s\": s} # noqa\n\n # FutureWarning: The `truediv` parameter in pd.eval is deprecated and will be\n # removed in a future version.\n with tm.assert_produces_warning(FutureWarning):\n res = self.eval(ex, truediv=False)\n tm.assert_numpy_array_equal(res, np.array([1.0]))\n\n with tm.assert_produces_warning(FutureWarning):\n res = self.eval(ex, truediv=True)\n tm.assert_numpy_array_equal(res, np.array([1.0]))\n\n with tm.assert_produces_warning(FutureWarning):\n res = self.eval(\"1 / 2\", truediv=True)\n expec = 0.5\n assert res == expec\n\n with tm.assert_produces_warning(FutureWarning):\n res = self.eval(\"1 / 2\", truediv=False)\n expec = 0.5\n assert res == expec\n\n with tm.assert_produces_warning(FutureWarning):\n res = self.eval(\"s / 2\", truediv=False)\n expec = 0.5\n assert res == expec\n\n with tm.assert_produces_warning(FutureWarning):\n res = self.eval(\"s / 2\", truediv=True)\n expec = 0.5\n assert res == expec\n\n def test_failing_subscript_with_name_error(self):\n df = DataFrame(np.random.randn(5, 3)) # noqa\n with pytest.raises(NameError, match=\"name 'x' is not defined\"):\n self.eval(\"df[x > 2] > 2\")\n\n def test_lhs_expression_subscript(self):\n df = DataFrame(np.random.randn(5, 3))\n result = self.eval(\"(df + 1)[df > 2]\", local_dict={\"df\": df})\n expected = (df + 1)[df > 2]\n tm.assert_frame_equal(result, expected)\n\n def test_attr_expression(self):\n df = DataFrame(np.random.randn(5, 3), columns=list(\"abc\"))\n expr1 = \"df.a < df.b\"\n expec1 = df.a < df.b\n expr2 = \"df.a + df.b + df.c\"\n expec2 = df.a + df.b + df.c\n expr3 = \"df.a + df.b + df.c[df.b < 0]\"\n expec3 = df.a + df.b + df.c[df.b < 0]\n exprs = expr1, expr2, expr3\n expecs = expec1, expec2, expec3\n for e, expec in zip(exprs, expecs):\n tm.assert_series_equal(expec, self.eval(e, local_dict={\"df\": df}))\n\n def test_assignment_fails(self):\n df = DataFrame(np.random.randn(5, 3), columns=list(\"abc\"))\n df2 = DataFrame(np.random.randn(5, 3))\n expr1 = \"df = df2\"\n msg = \"cannot assign without a target object\"\n with pytest.raises(ValueError, match=msg):\n self.eval(expr1, local_dict={\"df\": df, \"df2\": df2})\n\n def test_assignment_column(self):\n df = DataFrame(np.random.randn(5, 2), columns=list(\"ab\"))\n orig_df = df.copy()\n\n # multiple assignees\n with pytest.raises(SyntaxError, match=\"invalid syntax\"):\n df.eval(\"d c = a + b\")\n\n # invalid assignees\n msg = \"left hand side of an assignment must be a single name\"\n with pytest.raises(SyntaxError, match=msg):\n df.eval(\"d,c = a + b\")\n if compat.PY38:\n msg = \"cannot assign to function call\"\n else:\n msg = \"can't assign to function call\"\n with pytest.raises(SyntaxError, match=msg):\n df.eval('Timestamp(\"20131001\") = a + b')\n\n # single assignment - existing variable\n expected = orig_df.copy()\n expected[\"a\"] = expected[\"a\"] + expected[\"b\"]\n df = orig_df.copy()\n df.eval(\"a = a + b\", inplace=True)\n tm.assert_frame_equal(df, expected)\n\n # single assignment - new variable\n expected = orig_df.copy()\n expected[\"c\"] = expected[\"a\"] + expected[\"b\"]\n df = orig_df.copy()\n df.eval(\"c = a + b\", inplace=True)\n tm.assert_frame_equal(df, expected)\n\n # with a local name overlap\n def f():\n df = orig_df.copy()\n a = 1 # noqa\n df.eval(\"a = 1 + b\", inplace=True)\n return df\n\n df = f()\n expected = orig_df.copy()\n expected[\"a\"] = 1 + expected[\"b\"]\n tm.assert_frame_equal(df, expected)\n\n df = orig_df.copy()\n\n def f():\n a = 1 # noqa\n old_a = df.a.copy()\n df.eval(\"a = a + b\", inplace=True)\n result = old_a + df.b\n tm.assert_series_equal(result, df.a, check_names=False)\n assert result.name is None\n\n f()\n\n # multiple assignment\n df = orig_df.copy()\n df.eval(\"c = a + b\", inplace=True)\n msg = \"can only assign a single expression\"\n with pytest.raises(SyntaxError, match=msg):\n df.eval(\"c = a = b\")\n\n # explicit targets\n df = orig_df.copy()\n self.eval(\"c = df.a + df.b\", local_dict={\"df\": df}, target=df, inplace=True)\n expected = orig_df.copy()\n expected[\"c\"] = expected[\"a\"] + expected[\"b\"]\n tm.assert_frame_equal(df, expected)\n\n def test_column_in(self):\n # GH 11235\n df = DataFrame({\"a\": [11], \"b\": [-32]})\n result = df.eval(\"a in [11, -32]\")\n expected = Series([True])\n tm.assert_series_equal(result, expected)\n\n def assignment_not_inplace(self):\n # see gh-9297\n df = DataFrame(np.random.randn(5, 2), columns=list(\"ab\"))\n\n actual = df.eval(\"c = a + b\", inplace=False)\n assert actual is not None\n\n expected = df.copy()\n expected[\"c\"] = expected[\"a\"] + expected[\"b\"]\n tm.assert_frame_equal(df, expected)\n\n def test_multi_line_expression(self):\n # GH 11149\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n expected = df.copy()\n\n expected[\"c\"] = expected[\"a\"] + expected[\"b\"]\n expected[\"d\"] = expected[\"c\"] + expected[\"b\"]\n ans = df.eval(\n \"\"\"\n c = a + b\n d = c + b\"\"\",\n inplace=True,\n )\n tm.assert_frame_equal(expected, df)\n assert ans is None\n\n expected[\"a\"] = expected[\"a\"] - 1\n expected[\"e\"] = expected[\"a\"] + 2\n ans = df.eval(\n \"\"\"\n a = a - 1\n e = a + 2\"\"\",\n inplace=True,\n )\n tm.assert_frame_equal(expected, df)\n assert ans is None\n\n # multi-line not valid if not all assignments\n msg = \"Multi-line expressions are only valid if all expressions contain\"\n with pytest.raises(ValueError, match=msg):\n df.eval(\n \"\"\"\n a = b + 2\n b - 2\"\"\",\n inplace=False,\n )\n\n def test_multi_line_expression_not_inplace(self):\n # GH 11149\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n expected = df.copy()\n\n expected[\"c\"] = expected[\"a\"] + expected[\"b\"]\n expected[\"d\"] = expected[\"c\"] + expected[\"b\"]\n df = df.eval(\n \"\"\"\n c = a + b\n d = c + b\"\"\",\n inplace=False,\n )\n tm.assert_frame_equal(expected, df)\n\n expected[\"a\"] = expected[\"a\"] - 1\n expected[\"e\"] = expected[\"a\"] + 2\n df = df.eval(\n \"\"\"\n a = a - 1\n e = a + 2\"\"\",\n inplace=False,\n )\n tm.assert_frame_equal(expected, df)\n\n def test_multi_line_expression_local_variable(self):\n # GH 15342\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n expected = df.copy()\n\n local_var = 7\n expected[\"c\"] = expected[\"a\"] * local_var\n expected[\"d\"] = expected[\"c\"] + local_var\n ans = df.eval(\n \"\"\"\n c = a * @local_var\n d = c + @local_var\n \"\"\",\n inplace=True,\n )\n tm.assert_frame_equal(expected, df)\n assert ans is None\n\n def test_multi_line_expression_callable_local_variable(self):\n # 26426\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n\n def local_func(a, b):\n return b\n\n expected = df.copy()\n expected[\"c\"] = expected[\"a\"] * local_func(1, 7)\n expected[\"d\"] = expected[\"c\"] + local_func(1, 7)\n ans = df.eval(\n \"\"\"\n c = a * @local_func(1, 7)\n d = c + @local_func(1, 7)\n \"\"\",\n inplace=True,\n )\n tm.assert_frame_equal(expected, df)\n assert ans is None\n\n def test_multi_line_expression_callable_local_variable_with_kwargs(self):\n # 26426\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n\n def local_func(a, b):\n return b\n\n expected = df.copy()\n expected[\"c\"] = expected[\"a\"] * local_func(b=7, a=1)\n expected[\"d\"] = expected[\"c\"] + local_func(b=7, a=1)\n ans = df.eval(\n \"\"\"\n c = a * @local_func(b=7, a=1)\n d = c + @local_func(b=7, a=1)\n \"\"\",\n inplace=True,\n )\n tm.assert_frame_equal(expected, df)\n assert ans is None\n\n def test_assignment_in_query(self):\n # GH 8664\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n df_orig = df.copy()\n msg = \"cannot assign without a target object\"\n with pytest.raises(ValueError, match=msg):\n df.query(\"a = 1\")\n tm.assert_frame_equal(df, df_orig)\n\n def test_query_inplace(self):\n # see gh-11149\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n expected = df.copy()\n expected = expected[expected[\"a\"] == 2]\n df.query(\"a == 2\", inplace=True)\n tm.assert_frame_equal(expected, df)\n\n df = {}\n expected = {\"a\": 3}\n\n self.eval(\"a = 1 + 2\", target=df, inplace=True)\n tm.assert_dict_equal(df, expected)\n\n @pytest.mark.parametrize(\"invalid_target\", [1, \"cat\", [1, 2], np.array([]), (1, 3)])\n @pytest.mark.filterwarnings(\"ignore::FutureWarning\")\n def test_cannot_item_assign(self, invalid_target):\n msg = \"Cannot assign expression output to target\"\n expression = \"a = 1 + 2\"\n\n with pytest.raises(ValueError, match=msg):\n self.eval(expression, target=invalid_target, inplace=True)\n\n if hasattr(invalid_target, \"copy\"):\n with pytest.raises(ValueError, match=msg):\n self.eval(expression, target=invalid_target, inplace=False)\n\n @pytest.mark.parametrize(\"invalid_target\", [1, \"cat\", (1, 3)])\n def test_cannot_copy_item(self, invalid_target):\n msg = \"Cannot return a copy of the target\"\n expression = \"a = 1 + 2\"\n\n with pytest.raises(ValueError, match=msg):\n self.eval(expression, target=invalid_target, inplace=False)\n\n @pytest.mark.parametrize(\"target\", [1, \"cat\", [1, 2], np.array([]), (1, 3), {1: 2}])\n def test_inplace_no_assignment(self, target):\n expression = \"1 + 2\"\n\n assert self.eval(expression, target=target, inplace=False) == 3\n\n msg = \"Cannot operate inplace if there is no assignment\"\n with pytest.raises(ValueError, match=msg):\n self.eval(expression, target=target, inplace=True)\n\n def test_basic_period_index_boolean_expression(self):\n df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type=\"p\", r_idx_type=\"i\")\n\n e = df < 2\n r = self.eval(\"df < 2\", local_dict={\"df\": df})\n x = df < 2\n\n tm.assert_frame_equal(r, e)\n tm.assert_frame_equal(x, e)\n\n def test_basic_period_index_subscript_expression(self):\n df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type=\"p\", r_idx_type=\"i\")\n r = self.eval(\"df[df < 2 + 3]\", local_dict={\"df\": df})\n e = df[df < 2 + 3]\n tm.assert_frame_equal(r, e)\n\n def test_nested_period_index_subscript_expression(self):\n df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type=\"p\", r_idx_type=\"i\")\n r = self.eval(\"df[df[df < 2] < 2] + df * 2\", local_dict={\"df\": df})\n e = df[df[df < 2] < 2] + df * 2\n tm.assert_frame_equal(r, e)\n\n def test_date_boolean(self):\n df = DataFrame(randn(5, 3))\n df[\"dates1\"] = date_range(\"1/1/2012\", periods=5)\n res = self.eval(\n \"df.dates1 < 20130101\",\n local_dict={\"df\": df},\n engine=self.engine,\n parser=self.parser,\n )\n expec = df.dates1 < \"20130101\"\n tm.assert_series_equal(res, expec, check_names=False)\n\n def test_simple_in_ops(self):\n if self.parser != \"python\":\n res = pd.eval(\"1 in [1, 2]\", engine=self.engine, parser=self.parser)\n assert res\n\n res = pd.eval(\"2 in (1, 2)\", engine=self.engine, parser=self.parser)\n assert res\n\n res = pd.eval(\"3 in (1, 2)\", engine=self.engine, parser=self.parser)\n assert not res\n\n res = pd.eval(\"3 not in (1, 2)\", engine=self.engine, parser=self.parser)\n assert res\n\n res = pd.eval(\"[3] not in (1, 2)\", engine=self.engine, parser=self.parser)\n assert res\n\n res = pd.eval(\"[3] in ([3], 2)\", engine=self.engine, parser=self.parser)\n assert res\n\n res = pd.eval(\"[[3]] in [[[3]], 2]\", engine=self.engine, parser=self.parser)\n assert res\n\n res = pd.eval(\"(3,) in [(3,), 2]\", engine=self.engine, parser=self.parser)\n assert res\n\n res = pd.eval(\n \"(3,) not in [(3,), 2]\", engine=self.engine, parser=self.parser\n )\n assert not res\n\n res = pd.eval(\n \"[(3,)] in [[(3,)], 2]\", engine=self.engine, parser=self.parser\n )\n assert res\n else:\n msg = \"'In' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\"1 in [1, 2]\", engine=self.engine, parser=self.parser)\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\"2 in (1, 2)\", engine=self.engine, parser=self.parser)\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\"3 in (1, 2)\", engine=self.engine, parser=self.parser)\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\n \"[(3,)] in (1, 2, [(3,)])\", engine=self.engine, parser=self.parser\n )\n msg = \"'NotIn' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\"3 not in (1, 2)\", engine=self.engine, parser=self.parser)\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\n \"[3] not in (1, 2, [[3]])\", engine=self.engine, parser=self.parser\n )\n\n\[email protected]_if_no_ne\nclass TestOperationsNumExprPython(TestOperationsNumExprPandas):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"numexpr\"\n cls.parser = \"python\"\n cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms\n cls.arith_ops = filter(lambda x: x not in (\"in\", \"not in\"), cls.arith_ops)\n\n def test_check_many_exprs(self):\n a = 1 # noqa\n expr = \" * \".join(\"a\" * 33)\n expected = 1\n res = pd.eval(expr, engine=self.engine, parser=self.parser)\n assert res == expected\n\n def test_fails_and(self):\n df = DataFrame(np.random.randn(5, 3))\n msg = \"'BoolOp' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\n \"df > 2 and df > 3\",\n local_dict={\"df\": df},\n parser=self.parser,\n engine=self.engine,\n )\n\n def test_fails_or(self):\n df = DataFrame(np.random.randn(5, 3))\n msg = \"'BoolOp' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\n \"df > 2 or df > 3\",\n local_dict={\"df\": df},\n parser=self.parser,\n engine=self.engine,\n )\n\n def test_fails_not(self):\n df = DataFrame(np.random.randn(5, 3))\n msg = \"'Not' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(\n \"not df > 2\",\n local_dict={\"df\": df},\n parser=self.parser,\n engine=self.engine,\n )\n\n def test_fails_ampersand(self):\n df = DataFrame(np.random.randn(5, 3)) # noqa\n ex = \"(df + 2)[df > 1] > 0 & (df > 0)\"\n msg = \"cannot evaluate scalar only bool ops\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(ex, parser=self.parser, engine=self.engine)\n\n def test_fails_pipe(self):\n df = DataFrame(np.random.randn(5, 3)) # noqa\n ex = \"(df + 2)[df > 1] > 0 | (df > 0)\"\n msg = \"cannot evaluate scalar only bool ops\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(ex, parser=self.parser, engine=self.engine)\n\n def test_bool_ops_with_constants(self):\n for op, lhs, rhs in product(\n expr._bool_ops_syms, (\"True\", \"False\"), (\"True\", \"False\")\n ):\n ex = f\"{lhs} {op} {rhs}\"\n if op in (\"and\", \"or\"):\n msg = \"'BoolOp' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n self.eval(ex)\n else:\n res = self.eval(ex)\n exp = eval(ex)\n assert res == exp\n\n def test_simple_bool_ops(self):\n for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), (True, False)):\n ex = f\"lhs {op} rhs\"\n if op in (\"and\", \"or\"):\n msg = \"'BoolOp' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(ex, engine=self.engine, parser=self.parser)\n else:\n res = pd.eval(ex, engine=self.engine, parser=self.parser)\n exp = eval(ex)\n assert res == exp\n\n\nclass TestOperationsPythonPython(TestOperationsNumExprPython):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = cls.parser = \"python\"\n cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms\n cls.arith_ops = filter(lambda x: x not in (\"in\", \"not in\"), cls.arith_ops)\n\n\nclass TestOperationsPythonPandas(TestOperationsNumExprPandas):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"python\"\n cls.parser = \"pandas\"\n cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms\n\n\[email protected]_if_no_ne\nclass TestMathPythonPython:\n @classmethod\n def setup_class(cls):\n cls.engine = \"python\"\n cls.parser = \"pandas\"\n cls.unary_fns = _unary_math_ops\n cls.binary_fns = _binary_math_ops\n\n @classmethod\n def teardown_class(cls):\n del cls.engine, cls.parser\n\n def eval(self, *args, **kwargs):\n kwargs[\"engine\"] = self.engine\n kwargs[\"parser\"] = self.parser\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n return pd.eval(*args, **kwargs)\n\n def test_unary_functions(self, unary_fns_for_ne):\n df = DataFrame({\"a\": np.random.randn(10)})\n a = df.a\n\n for fn in unary_fns_for_ne:\n expr = f\"{fn}(a)\"\n got = self.eval(expr)\n with np.errstate(all=\"ignore\"):\n expect = getattr(np, fn)(a)\n tm.assert_series_equal(got, expect, check_names=False)\n\n def test_floor_and_ceil_functions_raise_error(self, ne_lt_2_6_9, unary_fns_for_ne):\n for fn in (\"floor\", \"ceil\"):\n msg = f'\"{fn}\" is not a supported function'\n with pytest.raises(ValueError, match=msg):\n expr = f\"{fn}(100)\"\n self.eval(expr)\n\n def test_binary_functions(self):\n df = DataFrame({\"a\": np.random.randn(10), \"b\": np.random.randn(10)})\n a = df.a\n b = df.b\n for fn in self.binary_fns:\n expr = f\"{fn}(a, b)\"\n got = self.eval(expr)\n with np.errstate(all=\"ignore\"):\n expect = getattr(np, fn)(a, b)\n tm.assert_almost_equal(got, expect, check_names=False)\n\n def test_df_use_case(self):\n df = DataFrame({\"a\": np.random.randn(10), \"b\": np.random.randn(10)})\n df.eval(\n \"e = arctan2(sin(a), b)\",\n engine=self.engine,\n parser=self.parser,\n inplace=True,\n )\n got = df.e\n expect = np.arctan2(np.sin(df.a), df.b)\n tm.assert_series_equal(got, expect, check_names=False)\n\n def test_df_arithmetic_subexpression(self):\n df = DataFrame({\"a\": np.random.randn(10), \"b\": np.random.randn(10)})\n df.eval(\"e = sin(a + b)\", engine=self.engine, parser=self.parser, inplace=True)\n got = df.e\n expect = np.sin(df.a + df.b)\n tm.assert_series_equal(got, expect, check_names=False)\n\n def check_result_type(self, dtype, expect_dtype):\n df = DataFrame({\"a\": np.random.randn(10).astype(dtype)})\n assert df.a.dtype == dtype\n df.eval(\"b = sin(a)\", engine=self.engine, parser=self.parser, inplace=True)\n got = df.b\n expect = np.sin(df.a)\n assert expect.dtype == got.dtype\n assert expect_dtype == got.dtype\n tm.assert_series_equal(got, expect, check_names=False)\n\n def test_result_types(self):\n self.check_result_type(np.int32, np.float64)\n self.check_result_type(np.int64, np.float64)\n self.check_result_type(np.float32, np.float32)\n self.check_result_type(np.float64, np.float64)\n\n @td.skip_if_windows\n def test_result_complex128(self):\n # xref https://github.com/pandas-dev/pandas/issues/12293\n # this fails on Windows, apparently a floating point precision issue\n\n # Did not test complex64 because DataFrame is converting it to\n # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952\n self.check_result_type(np.complex128, np.complex128)\n\n def test_undefined_func(self):\n df = DataFrame({\"a\": np.random.randn(10)})\n msg = '\"mysin\" is not a supported function'\n\n with pytest.raises(ValueError, match=msg):\n df.eval(\"mysin(a)\", engine=self.engine, parser=self.parser)\n\n def test_keyword_arg(self):\n df = DataFrame({\"a\": np.random.randn(10)})\n msg = 'Function \"sin\" does not support keyword arguments'\n\n with pytest.raises(TypeError, match=msg):\n df.eval(\"sin(x=a)\", engine=self.engine, parser=self.parser)\n\n\nclass TestMathPythonPandas(TestMathPythonPython):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"python\"\n cls.parser = \"pandas\"\n\n\nclass TestMathNumExprPandas(TestMathPythonPython):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"numexpr\"\n cls.parser = \"pandas\"\n\n\nclass TestMathNumExprPython(TestMathPythonPython):\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.engine = \"numexpr\"\n cls.parser = \"python\"\n\n\n_var_s = randn(10)\n\n\nclass TestScope:\n def test_global_scope(self, engine, parser):\n e = \"_var_s * 2\"\n tm.assert_numpy_array_equal(\n _var_s * 2, pd.eval(e, engine=engine, parser=parser)\n )\n\n def test_no_new_locals(self, engine, parser):\n x = 1 # noqa\n lcls = locals().copy()\n pd.eval(\"x + 1\", local_dict=lcls, engine=engine, parser=parser)\n lcls2 = locals().copy()\n lcls2.pop(\"lcls\")\n assert lcls == lcls2\n\n def test_no_new_globals(self, engine, parser):\n x = 1 # noqa\n gbls = globals().copy()\n pd.eval(\"x + 1\", engine=engine, parser=parser)\n gbls2 = globals().copy()\n assert gbls == gbls2\n\n\[email protected]_if_no_ne\ndef test_invalid_engine():\n msg = \"Invalid engine 'asdf' passed\"\n with pytest.raises(KeyError, match=msg):\n pd.eval(\"x + y\", local_dict={\"x\": 1, \"y\": 2}, engine=\"asdf\")\n\n\[email protected]_if_no_ne\ndef test_invalid_parser():\n msg = \"Invalid parser 'asdf' passed\"\n with pytest.raises(KeyError, match=msg):\n pd.eval(\"x + y\", local_dict={\"x\": 1, \"y\": 2}, parser=\"asdf\")\n\n\n_parsers: Dict[str, Type[BaseExprVisitor]] = {\n \"python\": PythonExprVisitor,\n \"pytables\": pytables.PyTablesExprVisitor,\n \"pandas\": PandasExprVisitor,\n}\n\n\[email protected](\"engine\", _engines)\[email protected](\"parser\", _parsers)\ndef test_disallowed_nodes(engine, parser):\n VisitorClass = _parsers[parser]\n uns_ops = VisitorClass.unsupported_nodes\n inst = VisitorClass(\"x + 1\", engine, parser)\n\n for ops in uns_ops:\n msg = \"nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n getattr(inst, ops)()\n\n\ndef test_syntax_error_exprs(engine, parser):\n e = \"s +\"\n with pytest.raises(SyntaxError, match=\"invalid syntax\"):\n pd.eval(e, engine=engine, parser=parser)\n\n\ndef test_name_error_exprs(engine, parser):\n e = \"s + t\"\n msg = \"name 's' is not defined\"\n with pytest.raises(NameError, match=msg):\n pd.eval(e, engine=engine, parser=parser)\n\n\ndef test_invalid_local_variable_reference(engine, parser):\n a, b = 1, 2 # noqa\n exprs = \"a + @b\", \"@a + b\", \"@a + @b\"\n\n for _expr in exprs:\n if parser != \"pandas\":\n with pytest.raises(SyntaxError, match=\"The '@' prefix is only\"):\n pd.eval(_expr, engine=engine, parser=parser)\n else:\n with pytest.raises(SyntaxError, match=\"The '@' prefix is not\"):\n pd.eval(_expr, engine=engine, parser=parser)\n\n\ndef test_numexpr_builtin_raises(engine, parser):\n sin, dotted_line = 1, 2\n if engine == \"numexpr\":\n msg = \"Variables in expression .+\"\n with pytest.raises(NumExprClobberingError, match=msg):\n pd.eval(\"sin + dotted_line\", engine=engine, parser=parser)\n else:\n res = pd.eval(\"sin + dotted_line\", engine=engine, parser=parser)\n assert res == sin + dotted_line\n\n\ndef test_bad_resolver_raises(engine, parser):\n cannot_resolve = 42, 3.0\n with pytest.raises(TypeError, match=\"Resolver of type .+\"):\n pd.eval(\"1 + 2\", resolvers=cannot_resolve, engine=engine, parser=parser)\n\n\ndef test_empty_string_raises(engine, parser):\n # GH 13139\n with pytest.raises(ValueError, match=\"expr cannot be an empty string\"):\n pd.eval(\"\", engine=engine, parser=parser)\n\n\ndef test_more_than_one_expression_raises(engine, parser):\n with pytest.raises(SyntaxError, match=(\"only a single expression is allowed\")):\n pd.eval(\"1 + 1; 2 + 2\", engine=engine, parser=parser)\n\n\[email protected](\"cmp\", (\"and\", \"or\"))\[email protected](\"lhs\", (int, float))\[email protected](\"rhs\", (int, float))\ndef test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):\n gen = {int: lambda: np.random.randint(10), float: np.random.randn}\n\n mid = gen[lhs]() # noqa\n lhs = gen[lhs]() # noqa\n rhs = gen[rhs]() # noqa\n\n ex1 = f\"lhs {cmp} mid {cmp} rhs\"\n ex2 = f\"lhs {cmp} mid and mid {cmp} rhs\"\n ex3 = f\"(lhs {cmp} mid) & (mid {cmp} rhs)\"\n for ex in (ex1, ex2, ex3):\n msg = \"cannot evaluate scalar only bool ops|'BoolOp' nodes are not\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(ex, engine=engine, parser=parser)\n\n\[email protected](\n \"other\",\n [\n \"'x'\",\n pytest.param(\n \"...\", marks=pytest.mark.xfail(not compat.PY38, reason=\"GH-28116\")\n ),\n ],\n)\ndef test_equals_various(other):\n df = DataFrame({\"A\": [\"a\", \"b\", \"c\"]})\n result = df.eval(f\"A == {other}\")\n expected = Series([False, False, False], name=\"A\")\n if _USE_NUMEXPR:\n # https://github.com/pandas-dev/pandas/issues/10239\n # lose name with numexpr engine. Remove when that's fixed.\n expected.name = None\n tm.assert_series_equal(result, expected)\n\n\ndef test_inf(engine, parser):\n s = \"inf + 1\"\n expected = np.inf\n result = pd.eval(s, engine=engine, parser=parser)\n assert result == expected\n\n\ndef test_truediv_deprecated(engine, parser):\n # GH#29182\n match = \"The `truediv` parameter in pd.eval is deprecated\"\n\n with tm.assert_produces_warning(FutureWarning) as m:\n pd.eval(\"1+1\", engine=engine, parser=parser, truediv=True)\n\n assert len(m) == 1\n assert match in str(m[0].message)\n\n with tm.assert_produces_warning(FutureWarning) as m:\n pd.eval(\"1+1\", engine=engine, parser=parser, truediv=False)\n\n assert len(m) == 1\n assert match in str(m[0].message)\n\n\ndef test_negate_lt_eq_le(engine, parser):\n df = pd.DataFrame([[0, 10], [1, 20]], columns=[\"cat\", \"count\"])\n expected = df[~(df.cat > 0)]\n\n result = df.query(\"~(cat > 0)\", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n if parser == \"python\":\n msg = \"'Not' nodes are not implemented\"\n with pytest.raises(NotImplementedError, match=msg):\n df.query(\"not (cat > 0)\", engine=engine, parser=parser)\n else:\n result = df.query(\"not (cat > 0)\", engine=engine, parser=parser)\n tm.assert_frame_equal(result, expected)\n\n\nclass TestValidate:\n def test_validate_bool_args(self):\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n\n for value in invalid_values:\n msg = 'For argument \"inplace\" expected type bool, received type'\n with pytest.raises(ValueError, match=msg):\n pd.eval(\"2+2\", inplace=value)\n",
"\"\"\"\nTests the TextReader class in parsers.pyx, which\nis integral to the C engine in parsers.py\n\"\"\"\nfrom io import BytesIO, StringIO\nimport os\n\nimport numpy as np\nimport pytest\n\nimport pandas._libs.parsers as parser\nfrom pandas._libs.parsers import TextReader\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\nfrom pandas.io.parsers import TextFileReader, read_csv\n\n\nclass TestTextReader:\n @pytest.fixture(autouse=True)\n def setup_method(self, datapath):\n self.dirpath = datapath(\"io\", \"parser\", \"data\")\n csv1_dirpath = datapath(\"io\", \"data\", \"csv\")\n self.csv1 = os.path.join(csv1_dirpath, \"test1.csv\")\n self.csv2 = os.path.join(self.dirpath, \"test2.csv\")\n self.xls1 = os.path.join(self.dirpath, \"test.xls\")\n\n def test_file_handle(self):\n with open(self.csv1, \"rb\") as f:\n reader = TextReader(f)\n reader.read()\n\n def test_string_filename(self):\n reader = TextReader(self.csv1, header=None)\n reader.read()\n\n def test_file_handle_mmap(self):\n with open(self.csv1, \"rb\") as f:\n reader = TextReader(f, memory_map=True, header=None)\n reader.read()\n\n def test_StringIO(self):\n with open(self.csv1, \"rb\") as f:\n text = f.read()\n src = BytesIO(text)\n reader = TextReader(src, header=None)\n reader.read()\n\n def test_string_factorize(self):\n # should this be optional?\n data = \"a\\nb\\na\\nb\\na\"\n reader = TextReader(StringIO(data), header=None)\n result = reader.read()\n assert len(set(map(id, result[0]))) == 2\n\n def test_skipinitialspace(self):\n data = \"a, b\\na, b\\na, b\\na, b\"\n\n reader = TextReader(StringIO(data), skipinitialspace=True, header=None)\n result = reader.read()\n\n tm.assert_numpy_array_equal(\n result[0], np.array([\"a\", \"a\", \"a\", \"a\"], dtype=np.object_)\n )\n tm.assert_numpy_array_equal(\n result[1], np.array([\"b\", \"b\", \"b\", \"b\"], dtype=np.object_)\n )\n\n def test_parse_booleans(self):\n data = \"True\\nFalse\\nTrue\\nTrue\"\n\n reader = TextReader(StringIO(data), header=None)\n result = reader.read()\n\n assert result[0].dtype == np.bool_\n\n def test_delimit_whitespace(self):\n data = 'a b\\na\\t\\t \"b\"\\n\"a\"\\t \\t b'\n\n reader = TextReader(StringIO(data), delim_whitespace=True, header=None)\n result = reader.read()\n\n tm.assert_numpy_array_equal(\n result[0], np.array([\"a\", \"a\", \"a\"], dtype=np.object_)\n )\n tm.assert_numpy_array_equal(\n result[1], np.array([\"b\", \"b\", \"b\"], dtype=np.object_)\n )\n\n def test_embedded_newline(self):\n data = 'a\\n\"hello\\nthere\"\\nthis'\n\n reader = TextReader(StringIO(data), header=None)\n result = reader.read()\n\n expected = np.array([\"a\", \"hello\\nthere\", \"this\"], dtype=np.object_)\n tm.assert_numpy_array_equal(result[0], expected)\n\n def test_euro_decimal(self):\n data = \"12345,67\\n345,678\"\n\n reader = TextReader(StringIO(data), delimiter=\":\", decimal=\",\", header=None)\n result = reader.read()\n\n expected = np.array([12345.67, 345.678])\n tm.assert_almost_equal(result[0], expected)\n\n def test_integer_thousands(self):\n data = \"123,456\\n12,500\"\n\n reader = TextReader(StringIO(data), delimiter=\":\", thousands=\",\", header=None)\n result = reader.read()\n\n expected = np.array([123456, 12500], dtype=np.int64)\n tm.assert_almost_equal(result[0], expected)\n\n def test_integer_thousands_alt(self):\n data = \"123.456\\n12.500\"\n\n reader = TextFileReader(\n StringIO(data), delimiter=\":\", thousands=\".\", header=None\n )\n result = reader.read()\n\n expected = DataFrame([123456, 12500])\n tm.assert_frame_equal(result, expected)\n\n def test_skip_bad_lines(self, capsys):\n # too many lines, see #2430 for why\n data = \"a:b:c\\nd:e:f\\ng:h:i\\nj:k:l:m\\nl:m:n\\no:p:q:r\"\n\n reader = TextReader(StringIO(data), delimiter=\":\", header=None)\n msg = r\"Error tokenizing data\\. C error: Expected 3 fields in line 4, saw 4\"\n with pytest.raises(parser.ParserError, match=msg):\n reader.read()\n\n reader = TextReader(\n StringIO(data),\n delimiter=\":\",\n header=None,\n error_bad_lines=False,\n warn_bad_lines=False,\n )\n result = reader.read()\n expected = {\n 0: np.array([\"a\", \"d\", \"g\", \"l\"], dtype=object),\n 1: np.array([\"b\", \"e\", \"h\", \"m\"], dtype=object),\n 2: np.array([\"c\", \"f\", \"i\", \"n\"], dtype=object),\n }\n assert_array_dicts_equal(result, expected)\n\n reader = TextReader(\n StringIO(data),\n delimiter=\":\",\n header=None,\n error_bad_lines=False,\n warn_bad_lines=True,\n )\n reader.read()\n captured = capsys.readouterr()\n\n assert \"Skipping line 4\" in captured.err\n assert \"Skipping line 6\" in captured.err\n\n def test_header_not_enough_lines(self):\n data = \"skip this\\nskip this\\na,b,c\\n1,2,3\\n4,5,6\"\n\n reader = TextReader(StringIO(data), delimiter=\",\", header=2)\n header = reader.header\n expected = [[\"a\", \"b\", \"c\"]]\n assert header == expected\n\n recs = reader.read()\n expected = {\n 0: np.array([1, 4], dtype=np.int64),\n 1: np.array([2, 5], dtype=np.int64),\n 2: np.array([3, 6], dtype=np.int64),\n }\n assert_array_dicts_equal(recs, expected)\n\n def test_escapechar(self):\n data = '\\\\\"hello world\"\\n\\\\\"hello world\"\\n\\\\\"hello world\"'\n\n reader = TextReader(StringIO(data), delimiter=\",\", header=None, escapechar=\"\\\\\")\n result = reader.read()\n expected = {0: np.array(['\"hello world\"'] * 3, dtype=object)}\n assert_array_dicts_equal(result, expected)\n\n def test_eof_has_eol(self):\n # handling of new line at EOF\n pass\n\n def test_na_substitution(self):\n pass\n\n def test_numpy_string_dtype(self):\n data = \"\"\"\\\na,1\naa,2\naaa,3\naaaa,4\naaaaa,5\"\"\"\n\n def _make_reader(**kwds):\n return TextReader(StringIO(data), delimiter=\",\", header=None, **kwds)\n\n reader = _make_reader(dtype=\"S5,i4\")\n result = reader.read()\n\n assert result[0].dtype == \"S5\"\n\n ex_values = np.array([\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaaa\"], dtype=\"S5\")\n assert (result[0] == ex_values).all()\n assert result[1].dtype == \"i4\"\n\n reader = _make_reader(dtype=\"S4\")\n result = reader.read()\n assert result[0].dtype == \"S4\"\n ex_values = np.array([\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaa\"], dtype=\"S4\")\n assert (result[0] == ex_values).all()\n assert result[1].dtype == \"S4\"\n\n def test_pass_dtype(self):\n data = \"\"\"\\\none,two\n1,a\n2,b\n3,c\n4,d\"\"\"\n\n def _make_reader(**kwds):\n return TextReader(StringIO(data), delimiter=\",\", **kwds)\n\n reader = _make_reader(dtype={\"one\": \"u1\", 1: \"S1\"})\n result = reader.read()\n assert result[0].dtype == \"u1\"\n assert result[1].dtype == \"S1\"\n\n reader = _make_reader(dtype={\"one\": np.uint8, 1: object})\n result = reader.read()\n assert result[0].dtype == \"u1\"\n assert result[1].dtype == \"O\"\n\n reader = _make_reader(dtype={\"one\": np.dtype(\"u1\"), 1: np.dtype(\"O\")})\n result = reader.read()\n assert result[0].dtype == \"u1\"\n assert result[1].dtype == \"O\"\n\n def test_usecols(self):\n data = \"\"\"\\\na,b,c\n1,2,3\n4,5,6\n7,8,9\n10,11,12\"\"\"\n\n def _make_reader(**kwds):\n return TextReader(StringIO(data), delimiter=\",\", **kwds)\n\n reader = _make_reader(usecols=(1, 2))\n result = reader.read()\n\n exp = _make_reader().read()\n assert len(result) == 2\n assert (result[1] == exp[1]).all()\n assert (result[2] == exp[2]).all()\n\n def test_cr_delimited(self):\n def _test(text, **kwargs):\n nice_text = text.replace(\"\\r\", \"\\r\\n\")\n result = TextReader(StringIO(text), **kwargs).read()\n expected = TextReader(StringIO(nice_text), **kwargs).read()\n assert_array_dicts_equal(result, expected)\n\n data = \"a,b,c\\r1,2,3\\r4,5,6\\r7,8,9\\r10,11,12\"\n _test(data, delimiter=\",\")\n\n data = \"a b c\\r1 2 3\\r4 5 6\\r7 8 9\\r10 11 12\"\n _test(data, delim_whitespace=True)\n\n data = \"a,b,c\\r1,2,3\\r4,5,6\\r,88,9\\r10,11,12\"\n _test(data, delimiter=\",\")\n\n sample = (\n \"A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\\r\"\n \"AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\\r\"\n \",BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\"\n )\n _test(sample, delimiter=\",\")\n\n data = \"A B C\\r 2 3\\r4 5 6\"\n _test(data, delim_whitespace=True)\n\n data = \"A B C\\r2 3\\r4 5 6\"\n _test(data, delim_whitespace=True)\n\n def test_empty_field_eof(self):\n data = \"a,b,c\\n1,2,3\\n4,,\"\n\n result = TextReader(StringIO(data), delimiter=\",\").read()\n\n expected = {\n 0: np.array([1, 4], dtype=np.int64),\n 1: np.array([\"2\", \"\"], dtype=object),\n 2: np.array([\"3\", \"\"], dtype=object),\n }\n assert_array_dicts_equal(result, expected)\n\n # GH5664\n a = DataFrame([[\"b\"], [np.nan]], columns=[\"a\"], index=[\"a\", \"c\"])\n b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list(\"abcd\"), index=[1, 1])\n c = DataFrame(\n [\n [1, 2, 3, 4],\n [6, np.nan, np.nan, np.nan],\n [8, 9, 10, 11],\n [13, 14, np.nan, np.nan],\n ],\n columns=list(\"abcd\"),\n index=[0, 5, 7, 12],\n )\n\n for _ in range(100):\n df = read_csv(StringIO(\"a,b\\nc\\n\"), skiprows=0, names=[\"a\"], engine=\"c\")\n tm.assert_frame_equal(df, a)\n\n df = read_csv(\n StringIO(\"1,1,1,1,0\\n\" * 2 + \"\\n\" * 2), names=list(\"abcd\"), engine=\"c\"\n )\n tm.assert_frame_equal(df, b)\n\n df = read_csv(\n StringIO(\"0,1,2,3,4\\n5,6\\n7,8,9,10,11\\n12,13,14\"),\n names=list(\"abcd\"),\n engine=\"c\",\n )\n tm.assert_frame_equal(df, c)\n\n def test_empty_csv_input(self):\n # GH14867\n df = read_csv(StringIO(), chunksize=20, header=None, names=[\"a\", \"b\", \"c\"])\n assert isinstance(df, TextFileReader)\n\n\ndef assert_array_dicts_equal(left, right):\n for k, v in left.items():\n tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k]))\n",
"from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import PY37\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n qcut,\n)\nimport pandas._testing as tm\n\n\ndef cartesian_product_for_groupers(result, args, names):\n \"\"\" Reindex to a cartesian production for the groupers,\n preserving the nature (Categorical) of each grouper \"\"\"\n\n def f(a):\n if isinstance(a, (CategoricalIndex, Categorical)):\n categories = a.categories\n a = Categorical.from_codes(\n np.arange(len(categories)), categories=categories, ordered=a.ordered\n )\n return a\n\n index = MultiIndex.from_product(map(f, args), names=names)\n return result.reindex(index).sort_index()\n\n\ndef test_apply_use_categorical_name(df):\n cats = qcut(df.C, 4)\n\n def get_stats(group):\n return {\n \"min\": group.min(),\n \"max\": group.max(),\n \"count\": group.count(),\n \"mean\": group.mean(),\n }\n\n result = df.groupby(cats, observed=False).D.apply(get_stats)\n assert result.index.names[0] == \"C\"\n\n\ndef test_basic():\n\n cats = Categorical(\n [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\", \"b\", \"c\", \"d\"],\n ordered=True,\n )\n data = DataFrame({\"a\": [1, 1, 1, 2, 2, 2, 3, 4, 5], \"b\": cats})\n\n exp_index = CategoricalIndex(list(\"abcd\"), name=\"b\", ordered=True)\n expected = DataFrame({\"a\": [1, 2, 4, np.nan]}, index=exp_index)\n result = data.groupby(\"b\", observed=False).mean()\n tm.assert_frame_equal(result, expected)\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"], categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n\n # single grouper\n gb = df.groupby(\"A\", observed=False)\n exp_idx = CategoricalIndex([\"a\", \"b\", \"z\"], name=\"A\", ordered=True)\n expected = DataFrame({\"values\": Series([3, 7, 0], index=exp_idx)})\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n # GH 8623\n x = DataFrame(\n [[1, \"John P. Doe\"], [2, \"Jane Dove\"], [1, \"John P. Doe\"]],\n columns=[\"person_id\", \"person_name\"],\n )\n x[\"person_name\"] = Categorical(x.person_name)\n\n g = x.groupby([\"person_id\"], observed=False)\n result = g.transform(lambda x: x)\n tm.assert_frame_equal(result, x[[\"person_name\"]])\n\n result = x.drop_duplicates(\"person_name\")\n expected = x.iloc[[0, 1]]\n tm.assert_frame_equal(result, expected)\n\n def f(x):\n return x.drop_duplicates(\"person_name\").iloc[0]\n\n result = g.apply(f)\n expected = x.iloc[[0, 1]].copy()\n expected.index = Index([1, 2], name=\"person_id\")\n expected[\"person_name\"] = expected[\"person_name\"].astype(\"object\")\n tm.assert_frame_equal(result, expected)\n\n # GH 9921\n # Monotonic\n df = DataFrame({\"a\": [5, 15, 25]})\n c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])\n\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df[\"a\"])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[\"a\"]\n )\n tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[[\"a\"]])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[[\"a\"]]\n )\n\n # Filter\n tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df[\"a\"])\n tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)\n\n # Non-monotonic\n df = DataFrame({\"a\": [5, 15, 25, -5]})\n c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])\n\n result = df.a.groupby(c, observed=False).transform(sum)\n tm.assert_series_equal(result, df[\"a\"])\n\n tm.assert_series_equal(\n df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[\"a\"]\n )\n tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[[\"a\"]])\n tm.assert_frame_equal(\n df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[[\"a\"]]\n )\n\n # GH 9603\n df = DataFrame({\"a\": [1, 0, 0, 0]})\n c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list(\"abcd\")))\n result = df.groupby(c, observed=False).apply(len)\n\n exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)\n expected = Series([1, 0, 0, 0], index=exp_index)\n expected.index.name = \"a\"\n tm.assert_series_equal(result, expected)\n\n # more basic\n levels = [\"foo\", \"bar\", \"baz\", \"qux\"]\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.randn(100, 4))\n\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)\n expected = expected.reindex(exp_idx)\n\n tm.assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = np.asarray(cats).take(idx)\n ord_data = data.take(idx)\n\n exp_cats = Categorical(\n ord_labels, ordered=True, categories=[\"foo\", \"bar\", \"baz\", \"qux\"]\n )\n expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()\n tm.assert_frame_equal(desc_result, expected)\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)\n exp = Index([\"count\", \"mean\", \"std\", \"min\", \"25%\", \"50%\", \"75%\", \"max\"] * 4)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)\n\n\ndef test_level_get_group(observed):\n # GH15155\n df = DataFrame(\n data=np.arange(2, 22, 2),\n index=MultiIndex(\n levels=[CategoricalIndex([\"a\", \"b\"]), range(10)],\n codes=[[0] * 5 + [1] * 5, range(10)],\n names=[\"Index1\", \"Index2\"],\n ),\n )\n g = df.groupby(level=[\"Index1\"], observed=observed)\n\n # expected should equal test.loc[[\"a\"]]\n # GH15166\n expected = DataFrame(\n data=np.arange(2, 12, 2),\n index=MultiIndex(\n levels=[CategoricalIndex([\"a\", \"b\"]), range(5)],\n codes=[[0] * 5, range(5)],\n names=[\"Index1\", \"Index2\"],\n ),\n )\n result = g.get_group(\"a\")\n\n tm.assert_frame_equal(result, expected)\n\n\n# GH#21636 flaky on py37; may be related to older numpy, see discussion\n# https://github.com/MacPython/pandas-wheels/pull/64\[email protected](PY37, reason=\"Flaky, GH-27902\", strict=False)\[email protected](\"ordered\", [True, False])\ndef test_apply(ordered):\n # GH 10138\n\n dense = Categorical(list(\"abc\"), ordered=ordered)\n\n # 'b' is in the categories but not in the list\n missing = Categorical(list(\"aaa\"), categories=[\"a\", \"b\"], ordered=ordered)\n values = np.arange(len(dense))\n df = DataFrame({\"missing\": missing, \"dense\": dense, \"values\": values})\n grouped = df.groupby([\"missing\", \"dense\"], observed=True)\n\n # missing category 'b' should still exist in the output index\n idx = MultiIndex.from_arrays([missing, dense], names=[\"missing\", \"dense\"])\n expected = DataFrame([0, 1, 2.0], index=idx, columns=[\"values\"])\n\n # GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])\n # is coming back as Series([0., 1., 0.], index=[\"missing\", \"dense\", \"values\"])\n # when we expect Series(0., index=[\"values\"])\n result = grouped.apply(lambda x: np.mean(x))\n tm.assert_frame_equal(result, expected)\n\n # we coerce back to ints\n expected = expected.astype(\"int\")\n result = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n result = grouped.agg(np.mean)\n tm.assert_frame_equal(result, expected)\n\n # but for transform we should still get back the original index\n idx = MultiIndex.from_arrays([missing, dense], names=[\"missing\", \"dense\"])\n expected = Series(1, index=idx)\n result = grouped.apply(lambda x: 1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_observed(observed):\n # multiple groupers, don't re-expand the output space\n # of the grouper\n # gh-14942 (implement)\n # gh-10132 (back-compat)\n # gh-8138 (back-compat)\n # gh-8869\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"], categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n df[\"C\"] = [\"foo\", \"bar\"] * 2\n\n # multiple groupers with a non-cat\n gb = df.groupby([\"A\", \"B\", \"C\"], observed=observed)\n exp_index = MultiIndex.from_arrays(\n [cat1, cat2, [\"foo\", \"bar\"] * 2], names=[\"A\", \"B\", \"C\"]\n )\n expected = DataFrame({\"values\": Series([1, 2, 3, 4], index=exp_index)}).sort_index()\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [cat1, cat2, [\"foo\", \"bar\"]], list(\"ABC\")\n )\n\n tm.assert_frame_equal(result, expected)\n\n gb = df.groupby([\"A\", \"B\"], observed=observed)\n exp_index = MultiIndex.from_arrays([cat1, cat2], names=[\"A\", \"B\"])\n expected = DataFrame({\"values\": [1, 2, 3, 4]}, index=exp_index)\n result = gb.sum()\n if not observed:\n expected = cartesian_product_for_groupers(expected, [cat1, cat2], list(\"AB\"))\n\n tm.assert_frame_equal(result, expected)\n\n # https://github.com/pandas-dev/pandas/issues/8138\n d = {\n \"cat\": Categorical(\n [\"a\", \"b\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"], ordered=True\n ),\n \"ints\": [1, 1, 2, 2],\n \"val\": [10, 20, 30, 40],\n }\n df = DataFrame(d)\n\n # Grouping on a single column\n groups_single_key = df.groupby(\"cat\", observed=observed)\n result = groups_single_key.mean()\n\n exp_index = CategoricalIndex(\n list(\"ab\"), name=\"cat\", categories=list(\"abc\"), ordered=True\n )\n expected = DataFrame({\"ints\": [1.5, 1.5], \"val\": [20.0, 30]}, index=exp_index)\n if not observed:\n index = CategoricalIndex(\n list(\"abc\"), name=\"cat\", categories=list(\"abc\"), ordered=True\n )\n expected = expected.reindex(index)\n\n tm.assert_frame_equal(result, expected)\n\n # Grouping on two columns\n groups_double_key = df.groupby([\"cat\", \"ints\"], observed=observed)\n result = groups_double_key.agg(\"mean\")\n expected = DataFrame(\n {\n \"val\": [10, 30, 20, 40],\n \"cat\": Categorical(\n [\"a\", \"a\", \"b\", \"b\"], categories=[\"a\", \"b\", \"c\"], ordered=True\n ),\n \"ints\": [1, 2, 1, 2],\n }\n ).set_index([\"cat\", \"ints\"])\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [df.cat.values, [1, 2]], [\"cat\", \"ints\"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n # GH 10132\n for key in [(\"a\", 1), (\"b\", 2), (\"b\", 1), (\"a\", 2)]:\n c, i = key\n result = groups_double_key.get_group(key)\n expected = df[(df.cat == c) & (df.ints == i)]\n tm.assert_frame_equal(result, expected)\n\n # gh-8869\n # with as_index\n d = {\n \"foo\": [10, 8, 4, 8, 4, 1, 1],\n \"bar\": [10, 20, 30, 40, 50, 60, 70],\n \"baz\": [\"d\", \"c\", \"e\", \"a\", \"a\", \"d\", \"c\"],\n }\n df = DataFrame(d)\n cat = pd.cut(df[\"foo\"], np.linspace(0, 10, 3))\n df[\"range\"] = cat\n groups = df.groupby([\"range\", \"baz\"], as_index=False, observed=observed)\n result = groups.agg(\"mean\")\n\n groups2 = df.groupby([\"range\", \"baz\"], as_index=True, observed=observed)\n expected = groups2.agg(\"mean\").reset_index()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_codes_remap(observed):\n d = {\"C1\": [3, 3, 4, 5], \"C2\": [1, 2, 3, 4], \"C3\": [10, 100, 200, 34]}\n df = DataFrame(d)\n values = pd.cut(df[\"C1\"], [1, 2, 3, 6])\n values.name = \"cat\"\n groups_double_key = df.groupby([values, \"C2\"], observed=observed)\n\n idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=[\"cat\", \"C2\"])\n expected = DataFrame({\"C1\": [3, 3, 4, 5], \"C3\": [10, 100, 200, 34]}, index=idx)\n if not observed:\n expected = cartesian_product_for_groupers(\n expected, [values.values, [1, 2, 3, 4]], [\"cat\", \"C2\"]\n )\n\n result = groups_double_key.agg(\"mean\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_observed_perf():\n # we create a cartesian product, so this is\n # non-performant if we don't use observed values\n # gh-14942\n df = DataFrame(\n {\n \"cat\": np.random.randint(0, 255, size=30000),\n \"int_id\": np.random.randint(0, 255, size=30000),\n \"other_id\": np.random.randint(0, 10000, size=30000),\n \"foo\": 0,\n }\n )\n df[\"cat\"] = df.cat.astype(str).astype(\"category\")\n\n grouped = df.groupby([\"cat\", \"int_id\", \"other_id\"], observed=True)\n result = grouped.count()\n assert result.index.levels[0].nunique() == df.cat.nunique()\n assert result.index.levels[1].nunique() == df.int_id.nunique()\n assert result.index.levels[2].nunique() == df.other_id.nunique()\n\n\ndef test_observed_groups(observed):\n # gh-20583\n # test that we have the appropriate groups\n\n cat = Categorical([\"a\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\"])\n df = DataFrame({\"cat\": cat, \"vals\": [1, 2, 3]})\n g = df.groupby(\"cat\", observed=observed)\n\n result = g.groups\n if observed:\n expected = {\"a\": Index([0, 2], dtype=\"int64\"), \"c\": Index([1], dtype=\"int64\")}\n else:\n expected = {\n \"a\": Index([0, 2], dtype=\"int64\"),\n \"b\": Index([], dtype=\"int64\"),\n \"c\": Index([1], dtype=\"int64\"),\n }\n\n tm.assert_dict_equal(result, expected)\n\n\ndef test_observed_groups_with_nan(observed):\n # GH 24740\n df = DataFrame(\n {\n \"cat\": Categorical([\"a\", np.nan, \"a\"], categories=[\"a\", \"b\", \"d\"]),\n \"vals\": [1, 2, 3],\n }\n )\n g = df.groupby(\"cat\", observed=observed)\n result = g.groups\n if observed:\n expected = {\"a\": Index([0, 2], dtype=\"int64\")}\n else:\n expected = {\n \"a\": Index([0, 2], dtype=\"int64\"),\n \"b\": Index([], dtype=\"int64\"),\n \"d\": Index([], dtype=\"int64\"),\n }\n tm.assert_dict_equal(result, expected)\n\n\ndef test_observed_nth():\n # GH 26385\n cat = pd.Categorical([\"a\", np.nan, np.nan], categories=[\"a\", \"b\", \"c\"])\n ser = pd.Series([1, 2, 3])\n df = pd.DataFrame({\"cat\": cat, \"ser\": ser})\n\n result = df.groupby(\"cat\", observed=False)[\"ser\"].nth(0)\n\n index = pd.Categorical([\"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\"])\n expected = pd.Series([1, np.nan, np.nan], index=index, name=\"ser\")\n expected.index.name = \"cat\"\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_dataframe_categorical_with_nan(observed):\n # GH 21151\n s1 = Categorical([np.nan, \"a\", np.nan, \"a\"], categories=[\"a\", \"b\", \"c\"])\n s2 = Series([1, 2, 3, 4])\n df = DataFrame({\"s1\": s1, \"s2\": s2})\n result = df.groupby(\"s1\", observed=observed).first().reset_index()\n if observed:\n expected = DataFrame(\n {\"s1\": Categorical([\"a\"], categories=[\"a\", \"b\", \"c\"]), \"s2\": [2]}\n )\n else:\n expected = DataFrame(\n {\n \"s1\": Categorical([\"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\"]),\n \"s2\": [2, np.nan, np.nan],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"ordered\", [True, False])\[email protected](\"observed\", [True, False])\[email protected](\"sort\", [True, False])\ndef test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):\n # GH 25871: Fix groupby sorting on ordered Categoricals\n # GH 25167: Groupby with observed=True doesn't sort\n\n # Build a dataframe with cat having one unobserved category ('missing'),\n # and a Series with identical values\n label = Categorical(\n [\"d\", \"a\", \"b\", \"a\", \"d\", \"b\"],\n categories=[\"a\", \"b\", \"missing\", \"d\"],\n ordered=ordered,\n )\n val = Series([\"d\", \"a\", \"b\", \"a\", \"d\", \"b\"])\n df = DataFrame({\"label\": label, \"val\": val})\n\n # aggregate on the Categorical\n result = df.groupby(\"label\", observed=observed, sort=sort)[\"val\"].aggregate(\"first\")\n\n # If ordering works, we expect index labels equal to aggregation results,\n # except for 'observed=False': label 'missing' has aggregation None\n label = Series(result.index.array, dtype=\"object\")\n aggr = Series(result.array)\n if not observed:\n aggr[aggr.isna()] = \"missing\"\n if not all(label == aggr):\n msg = (\n f\"Labels and aggregation results not consistently sorted\\n\"\n + \"for (ordered={ordered}, observed={observed}, sort={sort})\\n\"\n + \"Result:\\n{result}\"\n )\n assert False, msg\n\n\ndef test_datetime():\n # GH9049: ensure backward compatibility\n levels = pd.date_range(\"2014-01-01\", periods=4)\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, ordered=True)\n\n data = DataFrame(np.random.randn(100, 4))\n result = data.groupby(cats, observed=False).mean()\n\n expected = data.groupby(np.asarray(cats), observed=False).mean()\n expected = expected.reindex(levels)\n expected.index = CategoricalIndex(\n expected.index, categories=expected.index, ordered=True\n )\n\n tm.assert_frame_equal(result, expected)\n\n grouped = data.groupby(cats, observed=False)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = cats.take(idx)\n ord_data = data.take(idx)\n expected = ord_data.groupby(ord_labels, observed=False).describe()\n tm.assert_frame_equal(desc_result, expected)\n tm.assert_index_equal(desc_result.index, expected.index)\n tm.assert_index_equal(\n desc_result.index.get_level_values(0), expected.index.get_level_values(0)\n )\n\n # GH 10460\n expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)\n exp = CategoricalIndex(expc)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)\n exp = Index([\"count\", \"mean\", \"std\", \"min\", \"25%\", \"50%\", \"75%\", \"max\"] * 4)\n tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)\n\n\ndef test_categorical_index():\n\n s = np.random.RandomState(12345)\n levels = [\"foo\", \"bar\", \"baz\", \"qux\"]\n codes = s.randint(0, 4, size=20)\n cats = Categorical.from_codes(codes, levels, ordered=True)\n df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list(\"abcd\"))\n df[\"cats\"] = cats\n\n # with a cat index\n result = df.set_index(\"cats\").groupby(level=0, observed=False).sum()\n expected = df[list(\"abcd\")].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name=\"cats\"\n )\n tm.assert_frame_equal(result, expected)\n\n # with a cat column, should produce a cat index\n result = df.groupby(\"cats\", observed=False).sum()\n expected = df[list(\"abcd\")].groupby(cats.codes, observed=False).sum()\n expected.index = CategoricalIndex(\n Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name=\"cats\"\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_describe_categorical_columns():\n # GH 11558\n cats = CategoricalIndex(\n [\"qux\", \"foo\", \"baz\", \"bar\"],\n categories=[\"foo\", \"bar\", \"baz\", \"qux\"],\n ordered=True,\n )\n df = DataFrame(np.random.randn(20, 4), columns=cats)\n result = df.groupby([1, 2, 3, 4] * 5).describe()\n\n tm.assert_index_equal(result.stack().columns, cats)\n tm.assert_categorical_equal(result.stack().columns.values, cats.values)\n\n\ndef test_unstack_categorical():\n # GH11558 (example is taken from the original issue)\n df = DataFrame(\n {\"a\": range(10), \"medium\": [\"A\", \"B\"] * 5, \"artist\": list(\"XYXXY\") * 2}\n )\n df[\"medium\"] = df[\"medium\"].astype(\"category\")\n\n gcat = df.groupby([\"artist\", \"medium\"], observed=False)[\"a\"].count().unstack()\n result = gcat.describe()\n\n exp_columns = CategoricalIndex([\"A\", \"B\"], ordered=False, name=\"medium\")\n tm.assert_index_equal(result.columns, exp_columns)\n tm.assert_categorical_equal(result.columns.values, exp_columns.values)\n\n result = gcat[\"A\"] + gcat[\"B\"]\n expected = Series([6, 4], index=Index([\"X\", \"Y\"], name=\"artist\"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_bins_unequal_len():\n # GH3011\n series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])\n bins = pd.cut(series.dropna().values, 4)\n\n # len(bins) != len(series) here\n with pytest.raises(ValueError):\n series.groupby(bins).mean()\n\n\ndef test_as_index():\n # GH13204\n df = DataFrame(\n {\n \"cat\": Categorical([1, 2, 2], [1, 2, 3]),\n \"A\": [10, 11, 11],\n \"B\": [101, 102, 103],\n }\n )\n result = df.groupby([\"cat\", \"A\"], as_index=False, observed=True).sum()\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 11],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # function grouper\n f = lambda r: df.loc[r, \"A\"]\n result = df.groupby([\"cat\", f], as_index=False, observed=True).sum()\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 22],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # another not in-axis grouper (conflicting names in index)\n s = Series([\"a\", \"b\", \"b\"], name=\"cat\")\n result = df.groupby([\"cat\", s], as_index=False, observed=True).sum()\n tm.assert_frame_equal(result, expected)\n\n # is original index dropped?\n group_columns = [\"cat\", \"A\"]\n expected = DataFrame(\n {\n \"cat\": Categorical([1, 2], categories=df.cat.cat.categories),\n \"A\": [10, 11],\n \"B\": [101, 205],\n },\n columns=[\"cat\", \"A\", \"B\"],\n )\n\n for name in [None, \"X\", \"B\"]:\n df.index = Index(list(\"abc\"), name=name)\n result = df.groupby(group_columns, as_index=False, observed=True).sum()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_preserve_categories():\n # GH-13179\n categories = list(\"abc\")\n\n # ordered=True\n df = DataFrame({\"A\": Categorical(list(\"ba\"), categories=categories, ordered=True)})\n index = CategoricalIndex(categories, categories, ordered=True, name=\"A\")\n tm.assert_index_equal(\n df.groupby(\"A\", sort=True, observed=False).first().index, index\n )\n tm.assert_index_equal(\n df.groupby(\"A\", sort=False, observed=False).first().index, index\n )\n\n # ordered=False\n df = DataFrame({\"A\": Categorical(list(\"ba\"), categories=categories, ordered=False)})\n sort_index = CategoricalIndex(categories, categories, ordered=False, name=\"A\")\n nosort_index = CategoricalIndex(list(\"bac\"), list(\"bac\"), ordered=False, name=\"A\")\n tm.assert_index_equal(\n df.groupby(\"A\", sort=True, observed=False).first().index, sort_index\n )\n tm.assert_index_equal(\n df.groupby(\"A\", sort=False, observed=False).first().index, nosort_index\n )\n\n\ndef test_preserve_categorical_dtype():\n # GH13743, GH13854\n df = DataFrame(\n {\n \"A\": [1, 2, 1, 1, 2],\n \"B\": [10, 16, 22, 28, 34],\n \"C1\": Categorical(list(\"abaab\"), categories=list(\"bac\"), ordered=False),\n \"C2\": Categorical(list(\"abaab\"), categories=list(\"bac\"), ordered=True),\n }\n )\n # single grouper\n exp_full = DataFrame(\n {\n \"A\": [2.0, 1.0, np.nan],\n \"B\": [25.0, 20.0, np.nan],\n \"C1\": Categorical(list(\"bac\"), categories=list(\"bac\"), ordered=False),\n \"C2\": Categorical(list(\"bac\"), categories=list(\"bac\"), ordered=True),\n }\n )\n for col in [\"C1\", \"C2\"]:\n result1 = df.groupby(by=col, as_index=False, observed=False).mean()\n result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()\n expected = exp_full.reindex(columns=result1.columns)\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n\n\[email protected](\n \"func, values\",\n [\n (\"first\", [\"second\", \"first\"]),\n (\"last\", [\"fourth\", \"third\"]),\n (\"min\", [\"fourth\", \"first\"]),\n (\"max\", [\"second\", \"third\"]),\n ],\n)\ndef test_preserve_on_ordered_ops(func, values):\n # gh-18502\n # preserve the categoricals on ops\n c = pd.Categorical([\"first\", \"second\", \"third\", \"fourth\"], ordered=True)\n df = pd.DataFrame({\"payload\": [-1, -2, -1, -2], \"col\": c})\n g = df.groupby(\"payload\")\n result = getattr(g, func)()\n expected = pd.DataFrame(\n {\"payload\": [-2, -1], \"col\": pd.Series(values, dtype=c.dtype)}\n ).set_index(\"payload\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_no_compress():\n data = Series(np.random.randn(9))\n\n codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean()\n\n exp.index = CategoricalIndex(\n exp.index, categories=cats.categories, ordered=cats.ordered\n )\n tm.assert_series_equal(result, exp)\n\n codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])\n cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)\n\n result = data.groupby(cats, observed=False).mean()\n exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)\n exp.index = CategoricalIndex(\n exp.index, categories=cats.categories, ordered=cats.ordered\n )\n tm.assert_series_equal(result, exp)\n\n cats = Categorical(\n [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\", \"b\", \"c\", \"d\"],\n ordered=True,\n )\n data = DataFrame({\"a\": [1, 1, 1, 2, 2, 2, 3, 4, 5], \"b\": cats})\n\n result = data.groupby(\"b\", observed=False).mean()\n result = result[\"a\"].values\n exp = np.array([1, 2, 4, np.nan])\n tm.assert_numpy_array_equal(result, exp)\n\n\ndef test_groupby_empty_with_category():\n # GH-9614\n # test fix for when group by on None resulted in\n # coercion of dtype categorical -> float\n df = pd.DataFrame(\n {\"A\": [None] * 3, \"B\": pd.Categorical([\"train\", \"train\", \"test\"])}\n )\n result = df.groupby(\"A\").first()[\"B\"]\n expected = pd.Series(\n pd.Categorical([], categories=[\"test\", \"train\"]),\n index=pd.Series([], dtype=\"object\", name=\"A\"),\n name=\"B\",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_sort():\n\n # https://stackoverflow.com/questions/23814368/sorting-pandas-\n # categorical-labels-after-groupby\n # This should result in a properly sorted Series so that the plot\n # has a sorted x axis\n # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')\n\n df = DataFrame({\"value\": np.random.randint(0, 10000, 100)})\n labels = [f\"{i} - {i+499}\" for i in range(0, 10000, 500)]\n cat_labels = Categorical(labels, labels)\n\n df = df.sort_values(by=[\"value\"], ascending=True)\n df[\"value_group\"] = pd.cut(\n df.value, range(0, 10500, 500), right=False, labels=cat_labels\n )\n\n res = df.groupby([\"value_group\"], observed=False)[\"value_group\"].count()\n exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]\n exp.index = CategoricalIndex(exp.index, name=exp.index.name)\n tm.assert_series_equal(res, exp)\n\n\ndef test_sort2():\n # dataframe groupby sort was being ignored # GH 8868\n df = DataFrame(\n [\n [\"(7.5, 10]\", 10, 10],\n [\"(7.5, 10]\", 8, 20],\n [\"(2.5, 5]\", 5, 30],\n [\"(5, 7.5]\", 6, 40],\n [\"(2.5, 5]\", 4, 50],\n [\"(0, 2.5]\", 1, 60],\n [\"(5, 7.5]\", 7, 70],\n ],\n columns=[\"range\", \"foo\", \"bar\"],\n )\n df[\"range\"] = Categorical(df[\"range\"], ordered=True)\n index = CategoricalIndex(\n [\"(0, 2.5]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(7.5, 10]\"], name=\"range\", ordered=True\n )\n expected_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"], index=index\n )\n\n col = \"range\"\n result_sort = df.groupby(col, sort=True, observed=False).first()\n tm.assert_frame_equal(result_sort, expected_sort)\n\n # when categories is ordered, group is ordered by category's order\n expected_sort = result_sort\n result_sort = df.groupby(col, sort=False, observed=False).first()\n tm.assert_frame_equal(result_sort, expected_sort)\n\n df[\"range\"] = Categorical(df[\"range\"], ordered=False)\n index = CategoricalIndex(\n [\"(0, 2.5]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(7.5, 10]\"], name=\"range\"\n )\n expected_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"], index=index\n )\n\n index = CategoricalIndex(\n [\"(7.5, 10]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(0, 2.5]\"],\n categories=[\"(7.5, 10]\", \"(2.5, 5]\", \"(5, 7.5]\", \"(0, 2.5]\"],\n name=\"range\",\n )\n expected_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=[\"foo\", \"bar\"]\n )\n\n col = \"range\"\n\n # this is an unordered categorical, but we allow this ####\n result_sort = df.groupby(col, sort=True, observed=False).first()\n tm.assert_frame_equal(result_sort, expected_sort)\n\n result_nosort = df.groupby(col, sort=False, observed=False).first()\n tm.assert_frame_equal(result_nosort, expected_nosort)\n\n\ndef test_sort_datetimelike():\n # GH10505\n\n # use same data as test_groupby_sort_categorical, which category is\n # corresponding to datetime.month\n df = DataFrame(\n {\n \"dt\": [\n datetime(2011, 7, 1),\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 2, 1),\n datetime(2011, 1, 1),\n datetime(2011, 5, 1),\n ],\n \"foo\": [10, 8, 5, 6, 4, 1, 7],\n \"bar\": [10, 20, 30, 40, 50, 60, 70],\n },\n columns=[\"dt\", \"foo\", \"bar\"],\n )\n\n # ordered=True\n df[\"dt\"] = Categorical(df[\"dt\"], ordered=True)\n index = [\n datetime(2011, 1, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 7, 1),\n ]\n result_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"]\n )\n result_sort.index = CategoricalIndex(index, name=\"dt\", ordered=True)\n\n index = [\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 1, 1),\n ]\n result_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], columns=[\"foo\", \"bar\"]\n )\n result_nosort.index = CategoricalIndex(\n index, categories=index, name=\"dt\", ordered=True\n )\n\n col = \"dt\"\n tm.assert_frame_equal(\n result_sort, df.groupby(col, sort=True, observed=False).first()\n )\n\n # when categories is ordered, group is ordered by category's order\n tm.assert_frame_equal(\n result_sort, df.groupby(col, sort=False, observed=False).first()\n )\n\n # ordered = False\n df[\"dt\"] = Categorical(df[\"dt\"], ordered=False)\n index = [\n datetime(2011, 1, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 7, 1),\n ]\n result_sort = DataFrame(\n [[1, 60], [5, 30], [6, 40], [10, 10]], columns=[\"foo\", \"bar\"]\n )\n result_sort.index = CategoricalIndex(index, name=\"dt\")\n\n index = [\n datetime(2011, 7, 1),\n datetime(2011, 2, 1),\n datetime(2011, 5, 1),\n datetime(2011, 1, 1),\n ]\n result_nosort = DataFrame(\n [[10, 10], [5, 30], [6, 40], [1, 60]], columns=[\"foo\", \"bar\"]\n )\n result_nosort.index = CategoricalIndex(index, categories=index, name=\"dt\")\n\n col = \"dt\"\n tm.assert_frame_equal(\n result_sort, df.groupby(col, sort=True, observed=False).first()\n )\n tm.assert_frame_equal(\n result_nosort, df.groupby(col, sort=False, observed=False).first()\n )\n\n\ndef test_empty_sum():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = DataFrame(\n {\"A\": Categorical([\"a\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"]), \"B\": [1, 2, 1]}\n )\n expected_idx = CategoricalIndex([\"a\", \"b\", \"c\"], name=\"A\")\n\n # 0 by default\n result = df.groupby(\"A\", observed=False).B.sum()\n expected = Series([3, 1, 0], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby(\"A\", observed=False).B.sum(min_count=0)\n expected = Series([3, 1, 0], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby(\"A\", observed=False).B.sum(min_count=1)\n expected = Series([3, 1, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count>1\n result = df.groupby(\"A\", observed=False).B.sum(min_count=2)\n expected = Series([3, np.nan, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_empty_prod():\n # https://github.com/pandas-dev/pandas/issues/18678\n df = DataFrame(\n {\"A\": Categorical([\"a\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"]), \"B\": [1, 2, 1]}\n )\n\n expected_idx = CategoricalIndex([\"a\", \"b\", \"c\"], name=\"A\")\n\n # 1 by default\n result = df.groupby(\"A\", observed=False).B.prod()\n expected = Series([2, 1, 1], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df.groupby(\"A\", observed=False).B.prod(min_count=0)\n expected = Series([2, 1, 1], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df.groupby(\"A\", observed=False).B.prod(min_count=1)\n expected = Series([2, 1, np.nan], expected_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_multiindex_categorical_datetime():\n # https://github.com/pandas-dev/pandas/issues/21390\n\n df = DataFrame(\n {\n \"key1\": Categorical(list(\"abcbabcba\")),\n \"key2\": Categorical(\n list(pd.date_range(\"2018-06-01 00\", freq=\"1T\", periods=3)) * 3\n ),\n \"values\": np.arange(9),\n }\n )\n result = df.groupby([\"key1\", \"key2\"]).mean()\n\n idx = MultiIndex.from_product(\n [\n Categorical([\"a\", \"b\", \"c\"]),\n Categorical(pd.date_range(\"2018-06-01 00\", freq=\"1T\", periods=3)),\n ],\n names=[\"key1\", \"key2\"],\n )\n expected = DataFrame({\"values\": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"as_index, expected\",\n [\n (\n True,\n Series(\n index=MultiIndex.from_arrays(\n [Series([1, 1, 2], dtype=\"category\"), [1, 2, 2]], names=[\"a\", \"b\"]\n ),\n data=[1, 2, 3],\n name=\"x\",\n ),\n ),\n (\n False,\n DataFrame(\n {\n \"a\": Series([1, 1, 2], dtype=\"category\"),\n \"b\": [1, 2, 2],\n \"x\": [1, 2, 3],\n }\n ),\n ),\n ],\n)\ndef test_groupby_agg_observed_true_single_column(as_index, expected):\n # GH-23970\n df = DataFrame(\n {\"a\": Series([1, 1, 2], dtype=\"category\"), \"b\": [1, 2, 2], \"x\": [1, 2, 3]}\n )\n\n result = df.groupby([\"a\", \"b\"], as_index=as_index, observed=True)[\"x\"].sum()\n\n tm.assert_equal(result, expected)\n\n\[email protected](\"fill_value\", [None, np.nan, pd.NaT])\ndef test_shift(fill_value):\n ct = Categorical(\n [\"a\", \"b\", \"c\", \"d\"], categories=[\"a\", \"b\", \"c\", \"d\"], ordered=False\n )\n expected = Categorical(\n [None, \"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\", \"d\"], ordered=False\n )\n res = ct.shift(1, fill_value=fill_value)\n tm.assert_equal(res, expected)\n\n\[email protected]\ndef df_cat(df):\n \"\"\"\n DataFrame with multiple categorical columns and a column of integers.\n Shortened so as not to contain all possible combinations of categories.\n Useful for testing `observed` kwarg functionality on GroupBy objects.\n\n Parameters\n ----------\n df: DataFrame\n Non-categorical, longer DataFrame from another fixture, used to derive\n this one\n\n Returns\n -------\n df_cat: DataFrame\n \"\"\"\n df_cat = df.copy()[:4] # leave out some groups\n df_cat[\"A\"] = df_cat[\"A\"].astype(\"category\")\n df_cat[\"B\"] = df_cat[\"B\"].astype(\"category\")\n df_cat[\"C\"] = Series([1, 2, 3, 4])\n df_cat = df_cat.drop([\"D\"], axis=1)\n return df_cat\n\n\[email protected](\n \"operation, kwargs\", [(\"agg\", dict(dtype=\"category\")), (\"apply\", dict())]\n)\ndef test_seriesgroupby_observed_true(df_cat, operation, kwargs):\n # GH 24880\n index = MultiIndex.from_frame(\n DataFrame(\n {\"A\": [\"foo\", \"foo\", \"bar\", \"bar\"], \"B\": [\"one\", \"two\", \"one\", \"three\"]},\n **kwargs,\n )\n )\n expected = Series(data=[1, 3, 2, 4], index=index, name=\"C\")\n grouped = df_cat.groupby([\"A\", \"B\"], observed=True)[\"C\"]\n result = getattr(grouped, operation)(sum)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"operation\", [\"agg\", \"apply\"])\[email protected](\"observed\", [False, None])\ndef test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):\n # GH 24880\n index, _ = MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n ],\n names=[\"A\", \"B\"],\n ).sortlevel()\n\n expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name=\"C\")\n grouped = df_cat.groupby([\"A\", \"B\"], observed=observed)[\"C\"]\n result = getattr(grouped, operation)(sum)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"observed, index, data\",\n [\n (\n True,\n MultiIndex.from_tuples(\n [\n (\"foo\", \"one\", \"min\"),\n (\"foo\", \"one\", \"max\"),\n (\"foo\", \"two\", \"min\"),\n (\"foo\", \"two\", \"max\"),\n (\"bar\", \"one\", \"min\"),\n (\"bar\", \"one\", \"max\"),\n (\"bar\", \"three\", \"min\"),\n (\"bar\", \"three\", \"max\"),\n ],\n names=[\"A\", \"B\", None],\n ),\n [1, 1, 3, 3, 2, 2, 4, 4],\n ),\n (\n False,\n MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n Index([\"min\", \"max\"]),\n ],\n names=[\"A\", \"B\", None],\n ),\n [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],\n ),\n (\n None,\n MultiIndex.from_product(\n [\n CategoricalIndex([\"bar\", \"foo\"], ordered=False),\n CategoricalIndex([\"one\", \"three\", \"two\"], ordered=False),\n Index([\"min\", \"max\"]),\n ],\n names=[\"A\", \"B\", None],\n ),\n [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],\n ),\n ],\n)\ndef test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):\n # GH 24880\n expected = Series(data=data, index=index, name=\"C\")\n result = df_cat.groupby([\"A\", \"B\"], observed=observed)[\"C\"].apply(\n lambda x: {\"min\": x.min(), \"max\": x.max()}\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_categorical_series_dataframe_consistent(df_cat):\n # GH 20416\n expected = df_cat.groupby([\"A\", \"B\"])[\"C\"].mean()\n result = df_cat.groupby([\"A\", \"B\"]).mean()[\"C\"]\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"code\", [([1, 0, 0]), ([0, 0, 0])])\ndef test_groupby_categorical_axis_1(code):\n # GH 13420\n df = DataFrame({\"a\": [1, 2, 3, 4], \"b\": [-1, -2, -3, -4], \"c\": [5, 6, 7, 8]})\n cat = pd.Categorical.from_codes(code, categories=list(\"abc\"))\n result = df.groupby(cat, axis=1).mean()\n expected = df.T.groupby(cat, axis=0).mean().T\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_cat_preserves_structure(observed, ordered_fixture):\n # GH 28787\n df = DataFrame(\n {\"Name\": Categorical([\"Bob\", \"Greg\"], ordered=ordered_fixture), \"Item\": [1, 2]},\n columns=[\"Name\", \"Item\"],\n )\n expected = df.copy()\n\n result = (\n df.groupby(\"Name\", observed=observed)\n .agg(pd.DataFrame.sum, skipna=True)\n .reset_index()\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_get_nonexistent_category():\n # Accessing a Category that is not in the dataframe\n df = pd.DataFrame({\"var\": [\"a\", \"a\", \"b\", \"b\"], \"val\": range(4)})\n with pytest.raises(KeyError, match=\"'vau'\"):\n df.groupby(\"var\").apply(\n lambda rows: pd.DataFrame(\n {\"var\": [rows.iloc[-1][\"var\"]], \"val\": [rows.iloc[-1][\"vau\"]]}\n )\n )\n\n\ndef test_series_groupby_on_2_categoricals_unobserved(\n reduction_func: str, observed: bool\n):\n # GH 17605\n\n if reduction_func == \"ngroup\":\n pytest.skip(\"ngroup is not truly a reduction\")\n\n df = pd.DataFrame(\n {\n \"cat_1\": pd.Categorical(list(\"AABB\"), categories=list(\"ABCD\")),\n \"cat_2\": pd.Categorical(list(\"AB\") * 2, categories=list(\"ABCD\")),\n \"value\": [0.1] * 4,\n }\n )\n args = {\"nth\": [0]}.get(reduction_func, [])\n\n expected_length = 4 if observed else 16\n\n series_groupby = df.groupby([\"cat_1\", \"cat_2\"], observed=observed)[\"value\"]\n agg = getattr(series_groupby, reduction_func)\n result = agg(*args)\n\n assert len(result) == expected_length\n\n\[email protected](\n \"func, zero_or_nan\",\n [\n (\"all\", np.NaN),\n (\"any\", np.NaN),\n (\"count\", 0),\n (\"first\", np.NaN),\n (\"idxmax\", np.NaN),\n (\"idxmin\", np.NaN),\n (\"last\", np.NaN),\n (\"mad\", np.NaN),\n (\"max\", np.NaN),\n (\"mean\", np.NaN),\n (\"median\", np.NaN),\n (\"min\", np.NaN),\n (\"nth\", np.NaN),\n (\"nunique\", 0),\n (\"prod\", np.NaN),\n (\"quantile\", np.NaN),\n (\"sem\", np.NaN),\n (\"size\", 0),\n (\"skew\", np.NaN),\n (\"std\", np.NaN),\n (\"sum\", np.NaN),\n (\"var\", np.NaN),\n ],\n)\ndef test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(func, zero_or_nan):\n # GH 17605\n # Tests whether the unobserved categories in the result contain 0 or NaN\n df = pd.DataFrame(\n {\n \"cat_1\": pd.Categorical(list(\"AABB\"), categories=list(\"ABC\")),\n \"cat_2\": pd.Categorical(list(\"AB\") * 2, categories=list(\"ABC\")),\n \"value\": [0.1] * 4,\n }\n )\n unobserved = [tuple(\"AC\"), tuple(\"BC\"), tuple(\"CA\"), tuple(\"CB\"), tuple(\"CC\")]\n args = {\"nth\": [0]}.get(func, [])\n\n series_groupby = df.groupby([\"cat_1\", \"cat_2\"], observed=False)[\"value\"]\n agg = getattr(series_groupby, func)\n result = agg(*args)\n\n for idx in unobserved:\n val = result.loc[idx]\n assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)\n\n # If we expect unobserved values to be zero, we also expect the dtype to be int\n if zero_or_nan == 0:\n assert np.issubdtype(result.dtype, np.integer)\n\n\ndef test_series_groupby_categorical_aggregation_getitem():\n # GH 8870\n d = {\"foo\": [10, 8, 4, 1], \"bar\": [10, 20, 30, 40], \"baz\": [\"d\", \"c\", \"d\", \"c\"]}\n df = pd.DataFrame(d)\n cat = pd.cut(df[\"foo\"], np.linspace(0, 20, 5))\n df[\"range\"] = cat\n groups = df.groupby([\"range\", \"baz\"], as_index=True, sort=True)\n result = groups[\"foo\"].agg(\"mean\")\n expected = groups.agg(\"mean\")[\"foo\"]\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"func, expected_values\",\n [(pd.Series.nunique, [1, 1, 2]), (pd.Series.count, [1, 2, 2])],\n)\ndef test_groupby_agg_categorical_columns(func, expected_values):\n # 31256\n df = pd.DataFrame(\n {\n \"id\": [0, 1, 2, 3, 4],\n \"groups\": [0, 1, 1, 2, 2],\n \"value\": pd.Categorical([0, 0, 0, 0, 1]),\n }\n ).set_index(\"id\")\n result = df.groupby(\"groups\").agg(func)\n\n expected = pd.DataFrame(\n {\"value\": expected_values}, index=pd.Index([0, 1, 2], name=\"groups\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_agg_non_numeric():\n df = pd.DataFrame(\n {\"A\": pd.Categorical([\"a\", \"a\", \"b\"], categories=[\"a\", \"b\", \"c\"])}\n )\n expected = pd.DataFrame({\"A\": [2, 1]}, index=[1, 2])\n\n result = df.groupby([1, 2, 1]).agg(pd.Series.nunique)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby([1, 2, 1]).nunique()\n tm.assert_frame_equal(result, expected)\n",
"\"\"\"numpy.distutils.fcompiler\n\nContains FCompiler, an abstract base class that defines the interface\nfor the numpy.distutils Fortran compiler abstraction model.\n\nTerminology:\n\nTo be consistent, where the term 'executable' is used, it means the single\nfile, like 'gcc', that is executed, and should be a string. In contrast,\n'command' means the entire command line, like ['gcc', '-c', 'file.c'], and\nshould be a list.\n\nBut note that FCompiler.executables is actually a dictionary of commands.\n\n\"\"\"\n__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',\n 'dummy_fortran_file']\n\nimport os\nimport sys\nimport re\n\nfrom numpy.compat import open_latin1\n\nfrom distutils.sysconfig import get_python_lib\nfrom distutils.fancy_getopt import FancyGetopt\nfrom distutils.errors import DistutilsModuleError, \\\n DistutilsExecError, CompileError, LinkError, DistutilsPlatformError\nfrom distutils.util import split_quoted, strtobool\n\nfrom numpy.distutils.ccompiler import CCompiler, gen_lib_options\nfrom numpy.distutils import log\nfrom numpy.distutils.misc_util import is_string, all_strings, is_sequence, \\\n make_temp_file, get_shared_lib_extension\nfrom numpy.distutils.exec_command import find_executable\nfrom numpy.distutils import _shell_utils\n\nfrom .environment import EnvironmentConfig\n\n__metaclass__ = type\n\nclass CompilerNotFound(Exception):\n pass\n\ndef flaglist(s):\n if is_string(s):\n return split_quoted(s)\n else:\n return s\n\ndef str2bool(s):\n if is_string(s):\n return strtobool(s)\n return bool(s)\n\ndef is_sequence_of_strings(seq):\n return is_sequence(seq) and all_strings(seq)\n\nclass FCompiler(CCompiler):\n \"\"\"Abstract base class to define the interface that must be implemented\n by real Fortran compiler classes.\n\n Methods that subclasses may redefine:\n\n update_executables(), find_executables(), get_version()\n get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()\n get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),\n get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),\n get_flags_arch_f90(), get_flags_debug_f90(),\n get_flags_fix(), get_flags_linker_so()\n\n DON'T call these methods (except get_version) after\n constructing a compiler instance or inside any other method.\n All methods, except update_executables() and find_executables(),\n may call the get_version() method.\n\n After constructing a compiler instance, always call customize(dist=None)\n method that finalizes compiler construction and makes the following\n attributes available:\n compiler_f77\n compiler_f90\n compiler_fix\n linker_so\n archiver\n ranlib\n libraries\n library_dirs\n \"\"\"\n\n # These are the environment variables and distutils keys used.\n # Each configuration description is\n # (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>, <append>)\n # The hook names are handled by the self._environment_hook method.\n # - names starting with 'self.' call methods in this class\n # - names starting with 'exe.' return the key in the executables dict\n # - names like 'flags.YYY' return self.get_flag_YYY()\n # convert is either None or a function to convert a string to the\n # appropriate type used.\n\n distutils_vars = EnvironmentConfig(\n distutils_section='config_fc',\n noopt = (None, None, 'noopt', str2bool, False),\n noarch = (None, None, 'noarch', str2bool, False),\n debug = (None, None, 'debug', str2bool, False),\n verbose = (None, None, 'verbose', str2bool, False),\n )\n\n command_vars = EnvironmentConfig(\n distutils_section='config_fc',\n compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False),\n compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False),\n compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False),\n version_cmd = ('exe.version_cmd', None, None, None, False),\n linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False),\n linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False),\n archiver = (None, 'AR', 'ar', None, False),\n ranlib = (None, 'RANLIB', 'ranlib', None, False),\n )\n\n flag_vars = EnvironmentConfig(\n distutils_section='config_fc',\n f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True),\n f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True),\n free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True),\n fix = ('flags.fix', None, None, flaglist, False),\n opt = ('flags.opt', 'FOPT', 'opt', flaglist, True),\n opt_f77 = ('flags.opt_f77', None, None, flaglist, False),\n opt_f90 = ('flags.opt_f90', None, None, flaglist, False),\n arch = ('flags.arch', 'FARCH', 'arch', flaglist, False),\n arch_f77 = ('flags.arch_f77', None, None, flaglist, False),\n arch_f90 = ('flags.arch_f90', None, None, flaglist, False),\n debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True),\n debug_f77 = ('flags.debug_f77', None, None, flaglist, False),\n debug_f90 = ('flags.debug_f90', None, None, flaglist, False),\n flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True),\n linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True),\n linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True),\n ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True),\n )\n\n language_map = {'.f': 'f77',\n '.for': 'f77',\n '.F': 'f77', # XXX: needs preprocessor\n '.ftn': 'f77',\n '.f77': 'f77',\n '.f90': 'f90',\n '.F90': 'f90', # XXX: needs preprocessor\n '.f95': 'f90',\n }\n language_order = ['f90', 'f77']\n\n\n # These will be set by the subclass\n\n compiler_type = None\n compiler_aliases = ()\n version_pattern = None\n\n possible_executables = []\n executables = {\n 'version_cmd': [\"f77\", \"-v\"],\n 'compiler_f77': [\"f77\"],\n 'compiler_f90': [\"f90\"],\n 'compiler_fix': [\"f90\", \"-fixed\"],\n 'linker_so': [\"f90\", \"-shared\"],\n 'linker_exe': [\"f90\"],\n 'archiver': [\"ar\", \"-cr\"],\n 'ranlib': None,\n }\n\n # If compiler does not support compiling Fortran 90 then it can\n # suggest using another compiler. For example, gnu would suggest\n # gnu95 compiler type when there are F90 sources.\n suggested_f90_compiler = None\n\n compile_switch = \"-c\"\n object_switch = \"-o \" # Ending space matters! It will be stripped\n # but if it is missing then object_switch\n # will be prefixed to object file name by\n # string concatenation.\n library_switch = \"-o \" # Ditto!\n\n # Switch to specify where module files are created and searched\n # for USE statement. Normally it is a string and also here ending\n # space matters. See above.\n module_dir_switch = None\n\n # Switch to specify where module files are searched for USE statement.\n module_include_switch = '-I'\n\n pic_flags = [] # Flags to create position-independent code\n\n src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR']\n obj_extension = \".o\"\n\n shared_lib_extension = get_shared_lib_extension()\n static_lib_extension = \".a\" # or .lib\n static_lib_format = \"lib%s%s\" # or %s%s\n shared_lib_format = \"%s%s\"\n exe_extension = \"\"\n\n _exe_cache = {}\n\n _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',\n 'compiler_fix', 'linker_so', 'linker_exe', 'archiver',\n 'ranlib']\n\n # This will be set by new_fcompiler when called in\n # command/{build_ext.py, build_clib.py, config.py} files.\n c_compiler = None\n\n # extra_{f77,f90}_compile_args are set by build_ext.build_extension method\n extra_f77_compile_args = []\n extra_f90_compile_args = []\n\n def __init__(self, *args, **kw):\n CCompiler.__init__(self, *args, **kw)\n self.distutils_vars = self.distutils_vars.clone(self._environment_hook)\n self.command_vars = self.command_vars.clone(self._environment_hook)\n self.flag_vars = self.flag_vars.clone(self._environment_hook)\n self.executables = self.executables.copy()\n for e in self._executable_keys:\n if e not in self.executables:\n self.executables[e] = None\n\n # Some methods depend on .customize() being called first, so\n # this keeps track of whether that's happened yet.\n self._is_customised = False\n\n def __copy__(self):\n obj = self.__new__(self.__class__)\n obj.__dict__.update(self.__dict__)\n obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)\n obj.command_vars = obj.command_vars.clone(obj._environment_hook)\n obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)\n obj.executables = obj.executables.copy()\n return obj\n\n def copy(self):\n return self.__copy__()\n\n # Use properties for the attributes used by CCompiler. Setting them\n # as attributes from the self.executables dictionary is error-prone,\n # so we get them from there each time.\n def _command_property(key):\n def fget(self):\n assert self._is_customised\n return self.executables[key]\n return property(fget=fget)\n version_cmd = _command_property('version_cmd')\n compiler_f77 = _command_property('compiler_f77')\n compiler_f90 = _command_property('compiler_f90')\n compiler_fix = _command_property('compiler_fix')\n linker_so = _command_property('linker_so')\n linker_exe = _command_property('linker_exe')\n archiver = _command_property('archiver')\n ranlib = _command_property('ranlib')\n\n # Make our terminology consistent.\n def set_executable(self, key, value):\n self.set_command(key, value)\n\n def set_commands(self, **kw):\n for k, v in kw.items():\n self.set_command(k, v)\n\n def set_command(self, key, value):\n if not key in self._executable_keys:\n raise ValueError(\n \"unknown executable '%s' for class %s\" %\n (key, self.__class__.__name__))\n if is_string(value):\n value = split_quoted(value)\n assert value is None or is_sequence_of_strings(value[1:]), (key, value)\n self.executables[key] = value\n\n ######################################################################\n ## Methods that subclasses may redefine. But don't call these methods!\n ## They are private to FCompiler class and may return unexpected\n ## results if used elsewhere. So, you have been warned..\n\n def find_executables(self):\n \"\"\"Go through the self.executables dictionary, and attempt to\n find and assign appropriate executables.\n\n Executable names are looked for in the environment (environment\n variables, the distutils.cfg, and command line), the 0th-element of\n the command list, and the self.possible_executables list.\n\n Also, if the 0th element is \"<F77>\" or \"<F90>\", the Fortran 77\n or the Fortran 90 compiler executable is used, unless overridden\n by an environment setting.\n\n Subclasses should call this if overridden.\n \"\"\"\n assert self._is_customised\n exe_cache = self._exe_cache\n def cached_find_executable(exe):\n if exe in exe_cache:\n return exe_cache[exe]\n fc_exe = find_executable(exe)\n exe_cache[exe] = exe_cache[fc_exe] = fc_exe\n return fc_exe\n def verify_command_form(name, value):\n if value is not None and not is_sequence_of_strings(value):\n raise ValueError(\n \"%s value %r is invalid in class %s\" %\n (name, value, self.__class__.__name__))\n def set_exe(exe_key, f77=None, f90=None):\n cmd = self.executables.get(exe_key, None)\n if not cmd:\n return None\n # Note that we get cmd[0] here if the environment doesn't\n # have anything set\n exe_from_environ = getattr(self.command_vars, exe_key)\n if not exe_from_environ:\n possibles = [f90, f77] + self.possible_executables\n else:\n possibles = [exe_from_environ] + self.possible_executables\n\n seen = set()\n unique_possibles = []\n for e in possibles:\n if e == '<F77>':\n e = f77\n elif e == '<F90>':\n e = f90\n if not e or e in seen:\n continue\n seen.add(e)\n unique_possibles.append(e)\n\n for exe in unique_possibles:\n fc_exe = cached_find_executable(exe)\n if fc_exe:\n cmd[0] = fc_exe\n return fc_exe\n self.set_command(exe_key, None)\n return None\n\n ctype = self.compiler_type\n f90 = set_exe('compiler_f90')\n if not f90:\n f77 = set_exe('compiler_f77')\n if f77:\n log.warn('%s: no Fortran 90 compiler found' % ctype)\n else:\n raise CompilerNotFound('%s: f90 nor f77' % ctype)\n else:\n f77 = set_exe('compiler_f77', f90=f90)\n if not f77:\n log.warn('%s: no Fortran 77 compiler found' % ctype)\n set_exe('compiler_fix', f90=f90)\n\n set_exe('linker_so', f77=f77, f90=f90)\n set_exe('linker_exe', f77=f77, f90=f90)\n set_exe('version_cmd', f77=f77, f90=f90)\n set_exe('archiver')\n set_exe('ranlib')\n\n def update_executables(self):\n \"\"\"Called at the beginning of customisation. Subclasses should\n override this if they need to set up the executables dictionary.\n\n Note that self.find_executables() is run afterwards, so the\n self.executables dictionary values can contain <F77> or <F90> as\n the command, which will be replaced by the found F77 or F90\n compiler.\n \"\"\"\n pass\n\n def get_flags(self):\n \"\"\"List of flags common to all compiler types.\"\"\"\n return [] + self.pic_flags\n\n def _get_command_flags(self, key):\n cmd = self.executables.get(key, None)\n if cmd is None:\n return []\n return cmd[1:]\n\n def get_flags_f77(self):\n \"\"\"List of Fortran 77 specific flags.\"\"\"\n return self._get_command_flags('compiler_f77')\n def get_flags_f90(self):\n \"\"\"List of Fortran 90 specific flags.\"\"\"\n return self._get_command_flags('compiler_f90')\n def get_flags_free(self):\n \"\"\"List of Fortran 90 free format specific flags.\"\"\"\n return []\n def get_flags_fix(self):\n \"\"\"List of Fortran 90 fixed format specific flags.\"\"\"\n return self._get_command_flags('compiler_fix')\n def get_flags_linker_so(self):\n \"\"\"List of linker flags to build a shared library.\"\"\"\n return self._get_command_flags('linker_so')\n def get_flags_linker_exe(self):\n \"\"\"List of linker flags to build an executable.\"\"\"\n return self._get_command_flags('linker_exe')\n def get_flags_ar(self):\n \"\"\"List of archiver flags. \"\"\"\n return self._get_command_flags('archiver')\n def get_flags_opt(self):\n \"\"\"List of architecture independent compiler flags.\"\"\"\n return []\n def get_flags_arch(self):\n \"\"\"List of architecture dependent compiler flags.\"\"\"\n return []\n def get_flags_debug(self):\n \"\"\"List of compiler flags to compile with debugging information.\"\"\"\n return []\n\n get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt\n get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch\n get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug\n\n def get_libraries(self):\n \"\"\"List of compiler libraries.\"\"\"\n return self.libraries[:]\n def get_library_dirs(self):\n \"\"\"List of compiler library directories.\"\"\"\n return self.library_dirs[:]\n\n def get_version(self, force=False, ok_status=[0]):\n assert self._is_customised\n version = CCompiler.get_version(self, force=force, ok_status=ok_status)\n if version is None:\n raise CompilerNotFound()\n return version\n\n\n ############################################################\n\n ## Public methods:\n\n def customize(self, dist = None):\n \"\"\"Customize Fortran compiler.\n\n This method gets Fortran compiler specific information from\n (i) class definition, (ii) environment, (iii) distutils config\n files, and (iv) command line (later overrides earlier).\n\n This method should be always called after constructing a\n compiler instance. But not in __init__ because Distribution\n instance is needed for (iii) and (iv).\n \"\"\"\n log.info('customize %s' % (self.__class__.__name__))\n\n self._is_customised = True\n\n self.distutils_vars.use_distribution(dist)\n self.command_vars.use_distribution(dist)\n self.flag_vars.use_distribution(dist)\n\n self.update_executables()\n\n # find_executables takes care of setting the compiler commands,\n # version_cmd, linker_so, linker_exe, ar, and ranlib\n self.find_executables()\n\n noopt = self.distutils_vars.get('noopt', False)\n noarch = self.distutils_vars.get('noarch', noopt)\n debug = self.distutils_vars.get('debug', False)\n\n f77 = self.command_vars.compiler_f77\n f90 = self.command_vars.compiler_f90\n\n f77flags = []\n f90flags = []\n freeflags = []\n fixflags = []\n\n if f77:\n f77 = _shell_utils.NativeParser.split(f77)\n f77flags = self.flag_vars.f77\n if f90:\n f90 = _shell_utils.NativeParser.split(f90)\n f90flags = self.flag_vars.f90\n freeflags = self.flag_vars.free\n # XXX Assuming that free format is default for f90 compiler.\n fix = self.command_vars.compiler_fix\n # NOTE: this and similar examples are probably just\n # excluding --coverage flag when F90 = gfortran --coverage\n # instead of putting that flag somewhere more appropriate\n # this and similar examples where a Fortran compiler\n # environment variable has been customized by CI or a user\n # should perhaps eventually be more thoroughly tested and more\n # robustly handled\n if fix:\n fix = _shell_utils.NativeParser.split(fix)\n fixflags = self.flag_vars.fix + f90flags\n\n oflags, aflags, dflags = [], [], []\n # examine get_flags_<tag>_<compiler> for extra flags\n # only add them if the method is different from get_flags_<tag>\n def get_flags(tag, flags):\n # note that self.flag_vars.<tag> calls self.get_flags_<tag>()\n flags.extend(getattr(self.flag_vars, tag))\n this_get = getattr(self, 'get_flags_' + tag)\n for name, c, flagvar in [('f77', f77, f77flags),\n ('f90', f90, f90flags),\n ('f90', fix, fixflags)]:\n t = '%s_%s' % (tag, name)\n if c and this_get is not getattr(self, 'get_flags_' + t):\n flagvar.extend(getattr(self.flag_vars, t))\n if not noopt:\n get_flags('opt', oflags)\n if not noarch:\n get_flags('arch', aflags)\n if debug:\n get_flags('debug', dflags)\n\n fflags = self.flag_vars.flags + dflags + oflags + aflags\n\n if f77:\n self.set_commands(compiler_f77=f77+f77flags+fflags)\n if f90:\n self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)\n if fix:\n self.set_commands(compiler_fix=fix+fixflags+fflags)\n\n\n #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS\n linker_so = self.linker_so\n if linker_so:\n linker_so_flags = self.flag_vars.linker_so\n if sys.platform.startswith('aix'):\n python_lib = get_python_lib(standard_lib=1)\n ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')\n python_exp = os.path.join(python_lib, 'config', 'python.exp')\n linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]\n self.set_commands(linker_so=linker_so+linker_so_flags)\n\n linker_exe = self.linker_exe\n if linker_exe:\n linker_exe_flags = self.flag_vars.linker_exe\n self.set_commands(linker_exe=linker_exe+linker_exe_flags)\n\n ar = self.command_vars.archiver\n if ar:\n arflags = self.flag_vars.ar\n self.set_commands(archiver=[ar]+arflags)\n\n self.set_library_dirs(self.get_library_dirs())\n self.set_libraries(self.get_libraries())\n\n def dump_properties(self):\n \"\"\"Print out the attributes of a compiler instance.\"\"\"\n props = []\n for key in list(self.executables.keys()) + \\\n ['version', 'libraries', 'library_dirs',\n 'object_switch', 'compile_switch']:\n if hasattr(self, key):\n v = getattr(self, key)\n props.append((key, None, '= '+repr(v)))\n props.sort()\n\n pretty_printer = FancyGetopt(props)\n for l in pretty_printer.generate_help(\"%s instance properties:\" \\\n % (self.__class__.__name__)):\n if l[:4]==' --':\n l = ' ' + l[4:]\n print(l)\n\n ###################\n\n def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\n \"\"\"Compile 'src' to product 'obj'.\"\"\"\n src_flags = {}\n if is_f_file(src) and not has_f90_header(src):\n flavor = ':f77'\n compiler = self.compiler_f77\n src_flags = get_f77flags(src)\n extra_compile_args = self.extra_f77_compile_args or []\n elif is_free_format(src):\n flavor = ':f90'\n compiler = self.compiler_f90\n if compiler is None:\n raise DistutilsExecError('f90 not supported by %s needed for %s'\\\n % (self.__class__.__name__, src))\n extra_compile_args = self.extra_f90_compile_args or []\n else:\n flavor = ':fix'\n compiler = self.compiler_fix\n if compiler is None:\n raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\\\n % (self.__class__.__name__, src))\n extra_compile_args = self.extra_f90_compile_args or []\n if self.object_switch[-1]==' ':\n o_args = [self.object_switch.strip(), obj]\n else:\n o_args = [self.object_switch.strip()+obj]\n\n assert self.compile_switch.strip()\n s_args = [self.compile_switch, src]\n\n if extra_compile_args:\n log.info('extra %s options: %r' \\\n % (flavor[1:], ' '.join(extra_compile_args)))\n\n extra_flags = src_flags.get(self.compiler_type, [])\n if extra_flags:\n log.info('using compile options from source: %r' \\\n % ' '.join(extra_flags))\n\n command = compiler + cc_args + extra_flags + s_args + o_args \\\n + extra_postargs + extra_compile_args\n\n display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,\n src)\n try:\n self.spawn(command, display=display)\n except DistutilsExecError as e:\n msg = str(e)\n raise CompileError(msg)\n\n def module_options(self, module_dirs, module_build_dir):\n options = []\n if self.module_dir_switch is not None:\n if self.module_dir_switch[-1]==' ':\n options.extend([self.module_dir_switch.strip(), module_build_dir])\n else:\n options.append(self.module_dir_switch.strip()+module_build_dir)\n else:\n print('XXX: module_build_dir=%r option ignored' % (module_build_dir))\n print('XXX: Fix module_dir_switch for ', self.__class__.__name__)\n if self.module_include_switch is not None:\n for d in [module_build_dir]+module_dirs:\n options.append('%s%s' % (self.module_include_switch, d))\n else:\n print('XXX: module_dirs=%r option ignored' % (module_dirs))\n print('XXX: Fix module_include_switch for ', self.__class__.__name__)\n return options\n\n def library_option(self, lib):\n return \"-l\" + lib\n def library_dir_option(self, dir):\n return \"-L\" + dir\n\n def link(self, target_desc, objects,\n output_filename, output_dir=None, libraries=None,\n library_dirs=None, runtime_library_dirs=None,\n export_symbols=None, debug=0, extra_preargs=None,\n extra_postargs=None, build_temp=None, target_lang=None):\n objects, output_dir = self._fix_object_args(objects, output_dir)\n libraries, library_dirs, runtime_library_dirs = \\\n self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)\n\n lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,\n libraries)\n if is_string(output_dir):\n output_filename = os.path.join(output_dir, output_filename)\n elif output_dir is not None:\n raise TypeError(\"'output_dir' must be a string or None\")\n\n if self._need_link(objects, output_filename):\n if self.library_switch[-1]==' ':\n o_args = [self.library_switch.strip(), output_filename]\n else:\n o_args = [self.library_switch.strip()+output_filename]\n\n if is_string(self.objects):\n ld_args = objects + [self.objects]\n else:\n ld_args = objects + self.objects\n ld_args = ld_args + lib_opts + o_args\n if debug:\n ld_args[:0] = ['-g']\n if extra_preargs:\n ld_args[:0] = extra_preargs\n if extra_postargs:\n ld_args.extend(extra_postargs)\n self.mkpath(os.path.dirname(output_filename))\n if target_desc == CCompiler.EXECUTABLE:\n linker = self.linker_exe[:]\n else:\n linker = self.linker_so[:]\n command = linker + ld_args\n try:\n self.spawn(command)\n except DistutilsExecError as e:\n msg = str(e)\n raise LinkError(msg)\n else:\n log.debug(\"skipping %s (up-to-date)\", output_filename)\n\n def _environment_hook(self, name, hook_name):\n if hook_name is None:\n return None\n if is_string(hook_name):\n if hook_name.startswith('self.'):\n hook_name = hook_name[5:]\n hook = getattr(self, hook_name)\n return hook()\n elif hook_name.startswith('exe.'):\n hook_name = hook_name[4:]\n var = self.executables[hook_name]\n if var:\n return var[0]\n else:\n return None\n elif hook_name.startswith('flags.'):\n hook_name = hook_name[6:]\n hook = getattr(self, 'get_flags_' + hook_name)\n return hook()\n else:\n return hook_name()\n\n def can_ccompiler_link(self, ccompiler):\n \"\"\"\n Check if the given C compiler can link objects produced by\n this compiler.\n \"\"\"\n return True\n\n def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):\n \"\"\"\n Convert a set of object files that are not compatible with the default\n linker, to a file that is compatible.\n\n Parameters\n ----------\n objects : list\n List of object files to include.\n output_dir : str\n Output directory to place generated object files.\n extra_dll_dir : str\n Output directory to place extra DLL files that need to be\n included on Windows.\n\n Returns\n -------\n converted_objects : list of str\n List of converted object files.\n Note that the number of output files is not necessarily\n the same as inputs.\n\n \"\"\"\n raise NotImplementedError()\n\n ## class FCompiler\n\n_default_compilers = (\n # sys.platform mappings\n ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',\n 'intelvem', 'intelem', 'flang')),\n ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),\n ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq',\n 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor')),\n ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')),\n ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),\n ('irix.*', ('mips', 'gnu', 'gnu95',)),\n ('aix.*', ('ibm', 'gnu', 'gnu95',)),\n # os.name mappings\n ('posix', ('gnu', 'gnu95',)),\n ('nt', ('gnu', 'gnu95',)),\n ('mac', ('gnu95', 'gnu', 'pg')),\n )\n\nfcompiler_class = None\nfcompiler_aliases = None\n\ndef load_all_fcompiler_classes():\n \"\"\"Cache all the FCompiler classes found in modules in the\n numpy.distutils.fcompiler package.\n \"\"\"\n from glob import glob\n global fcompiler_class, fcompiler_aliases\n if fcompiler_class is not None:\n return\n pys = os.path.join(os.path.dirname(__file__), '*.py')\n fcompiler_class = {}\n fcompiler_aliases = {}\n for fname in glob(pys):\n module_name, ext = os.path.splitext(os.path.basename(fname))\n module_name = 'numpy.distutils.fcompiler.' + module_name\n __import__ (module_name)\n module = sys.modules[module_name]\n if hasattr(module, 'compilers'):\n for cname in module.compilers:\n klass = getattr(module, cname)\n desc = (klass.compiler_type, klass, klass.description)\n fcompiler_class[klass.compiler_type] = desc\n for alias in klass.compiler_aliases:\n if alias in fcompiler_aliases:\n raise ValueError(\"alias %r defined for both %s and %s\"\n % (alias, klass.__name__,\n fcompiler_aliases[alias][1].__name__))\n fcompiler_aliases[alias] = desc\n\ndef _find_existing_fcompiler(compiler_types,\n osname=None, platform=None,\n requiref90=False,\n c_compiler=None):\n from numpy.distutils.core import get_distribution\n dist = get_distribution(always=True)\n for compiler_type in compiler_types:\n v = None\n try:\n c = new_fcompiler(plat=platform, compiler=compiler_type,\n c_compiler=c_compiler)\n c.customize(dist)\n v = c.get_version()\n if requiref90 and c.compiler_f90 is None:\n v = None\n new_compiler = c.suggested_f90_compiler\n if new_compiler:\n log.warn('Trying %r compiler as suggested by %r '\n 'compiler for f90 support.' % (compiler_type,\n new_compiler))\n c = new_fcompiler(plat=platform, compiler=new_compiler,\n c_compiler=c_compiler)\n c.customize(dist)\n v = c.get_version()\n if v is not None:\n compiler_type = new_compiler\n if requiref90 and c.compiler_f90 is None:\n raise ValueError('%s does not support compiling f90 codes, '\n 'skipping.' % (c.__class__.__name__))\n except DistutilsModuleError:\n log.debug(\"_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError\", compiler_type)\n except CompilerNotFound:\n log.debug(\"_find_existing_fcompiler: compiler_type='%s' not found\", compiler_type)\n if v is not None:\n return compiler_type\n return None\n\ndef available_fcompilers_for_platform(osname=None, platform=None):\n if osname is None:\n osname = os.name\n if platform is None:\n platform = sys.platform\n matching_compiler_types = []\n for pattern, compiler_type in _default_compilers:\n if re.match(pattern, platform) or re.match(pattern, osname):\n for ct in compiler_type:\n if ct not in matching_compiler_types:\n matching_compiler_types.append(ct)\n if not matching_compiler_types:\n matching_compiler_types.append('gnu')\n return matching_compiler_types\n\ndef get_default_fcompiler(osname=None, platform=None, requiref90=False,\n c_compiler=None):\n \"\"\"Determine the default Fortran compiler to use for the given\n platform.\"\"\"\n matching_compiler_types = available_fcompilers_for_platform(osname,\n platform)\n log.info(\"get_default_fcompiler: matching types: '%s'\",\n matching_compiler_types)\n compiler_type = _find_existing_fcompiler(matching_compiler_types,\n osname=osname,\n platform=platform,\n requiref90=requiref90,\n c_compiler=c_compiler)\n return compiler_type\n\n# Flag to avoid rechecking for Fortran compiler every time\nfailed_fcompilers = set()\n\ndef new_fcompiler(plat=None,\n compiler=None,\n verbose=0,\n dry_run=0,\n force=0,\n requiref90=False,\n c_compiler = None):\n \"\"\"Generate an instance of some FCompiler subclass for the supplied\n platform/compiler combination.\n \"\"\"\n global failed_fcompilers\n fcompiler_key = (plat, compiler)\n if fcompiler_key in failed_fcompilers:\n return None\n\n load_all_fcompiler_classes()\n if plat is None:\n plat = os.name\n if compiler is None:\n compiler = get_default_fcompiler(plat, requiref90=requiref90,\n c_compiler=c_compiler)\n if compiler in fcompiler_class:\n module_name, klass, long_description = fcompiler_class[compiler]\n elif compiler in fcompiler_aliases:\n module_name, klass, long_description = fcompiler_aliases[compiler]\n else:\n msg = \"don't know how to compile Fortran code on platform '%s'\" % plat\n if compiler is not None:\n msg = msg + \" with '%s' compiler.\" % compiler\n msg = msg + \" Supported compilers are: %s)\" \\\n % (','.join(fcompiler_class.keys()))\n log.warn(msg)\n failed_fcompilers.add(fcompiler_key)\n return None\n\n compiler = klass(verbose=verbose, dry_run=dry_run, force=force)\n compiler.c_compiler = c_compiler\n return compiler\n\ndef show_fcompilers(dist=None):\n \"\"\"Print list of available compilers (used by the \"--help-fcompiler\"\n option to \"config_fc\").\n \"\"\"\n if dist is None:\n from distutils.dist import Distribution\n from numpy.distutils.command.config_compiler import config_fc\n dist = Distribution()\n dist.script_name = os.path.basename(sys.argv[0])\n dist.script_args = ['config_fc'] + sys.argv[1:]\n try:\n dist.script_args.remove('--help-fcompiler')\n except ValueError:\n pass\n dist.cmdclass['config_fc'] = config_fc\n dist.parse_config_files()\n dist.parse_command_line()\n compilers = []\n compilers_na = []\n compilers_ni = []\n if not fcompiler_class:\n load_all_fcompiler_classes()\n platform_compilers = available_fcompilers_for_platform()\n for compiler in platform_compilers:\n v = None\n log.set_verbosity(-2)\n try:\n c = new_fcompiler(compiler=compiler, verbose=dist.verbose)\n c.customize(dist)\n v = c.get_version()\n except (DistutilsModuleError, CompilerNotFound) as e:\n log.debug(\"show_fcompilers: %s not found\" % (compiler,))\n log.debug(repr(e))\n\n if v is None:\n compilers_na.append((\"fcompiler=\"+compiler, None,\n fcompiler_class[compiler][2]))\n else:\n c.dump_properties()\n compilers.append((\"fcompiler=\"+compiler, None,\n fcompiler_class[compiler][2] + ' (%s)' % v))\n\n compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))\n compilers_ni = [(\"fcompiler=\"+fc, None, fcompiler_class[fc][2])\n for fc in compilers_ni]\n\n compilers.sort()\n compilers_na.sort()\n compilers_ni.sort()\n pretty_printer = FancyGetopt(compilers)\n pretty_printer.print_help(\"Fortran compilers found:\")\n pretty_printer = FancyGetopt(compilers_na)\n pretty_printer.print_help(\"Compilers available for this \"\n \"platform, but not found:\")\n if compilers_ni:\n pretty_printer = FancyGetopt(compilers_ni)\n pretty_printer.print_help(\"Compilers not available on this platform:\")\n print(\"For compiler details, run 'config_fc --verbose' setup command.\")\n\n\ndef dummy_fortran_file():\n fo, name = make_temp_file(suffix='.f')\n fo.write(\" subroutine dummy()\\n end\\n\")\n fo.close()\n return name[:-2]\n\n\nis_f_file = re.compile(r'.*[.](for|ftn|f77|f)\\Z', re.I).match\n_has_f_header = re.compile(r'-[*]-\\s*fortran\\s*-[*]-', re.I).search\n_has_f90_header = re.compile(r'-[*]-\\s*f90\\s*-[*]-', re.I).search\n_has_fix_header = re.compile(r'-[*]-\\s*fix\\s*-[*]-', re.I).search\n_free_f90_start = re.compile(r'[^c*!]\\s*[^\\s\\d\\t]', re.I).match\n\ndef is_free_format(file):\n \"\"\"Check if file is in free format Fortran.\"\"\"\n # f90 allows both fixed and free format, assuming fixed unless\n # signs of free format are detected.\n result = 0\n f = open_latin1(file, 'r')\n line = f.readline()\n n = 10000 # the number of non-comment lines to scan for hints\n if _has_f_header(line):\n n = 0\n elif _has_f90_header(line):\n n = 0\n result = 1\n while n>0 and line:\n line = line.rstrip()\n if line and line[0]!='!':\n n -= 1\n if (line[0]!='\\t' and _free_f90_start(line[:5])) or line[-1:]=='&':\n result = 1\n break\n line = f.readline()\n f.close()\n return result\n\ndef has_f90_header(src):\n f = open_latin1(src, 'r')\n line = f.readline()\n f.close()\n return _has_f90_header(line) or _has_fix_header(line)\n\n_f77flags_re = re.compile(r'(c|)f77flags\\s*\\(\\s*(?P<fcname>\\w+)\\s*\\)\\s*=\\s*(?P<fflags>.*)', re.I)\ndef get_f77flags(src):\n \"\"\"\n Search the first 20 lines of fortran 77 code for line pattern\n `CF77FLAGS(<fcompiler type>)=<f77 flags>`\n Return a dictionary {<fcompiler type>:<f77 flags>}.\n \"\"\"\n flags = {}\n f = open_latin1(src, 'r')\n i = 0\n for line in f:\n i += 1\n if i>20: break\n m = _f77flags_re.match(line)\n if not m: continue\n fcname = m.group('fcname').strip()\n fflags = m.group('fflags').strip()\n flags[fcname] = split_quoted(fflags)\n f.close()\n return flags\n\n# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags\n\nif __name__ == '__main__':\n show_fcompilers()\n",
"# coding: utf-8\n\n\"\"\" Test cases for DataFrame.plot \"\"\"\n\nfrom datetime import date, datetime\nimport itertools\nimport string\nimport warnings\n\nimport numpy as np\nfrom numpy.random import rand, randn\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.api import is_list_like\n\nimport pandas as pd\nfrom pandas import DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range\nimport pandas._testing as tm\nfrom pandas.core.arrays import integer_array\nfrom pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n\nfrom pandas.io.formats.printing import pprint_thing\nimport pandas.plotting as plotting\n\n\[email protected]_if_no_mpl\nclass TestDataFramePlots(TestPlotBase):\n def setup_method(self, method):\n TestPlotBase.setup_method(self, method)\n import matplotlib as mpl\n\n mpl.rcdefaults()\n\n self.tdf = tm.makeTimeDataFrame()\n self.hexbin_df = DataFrame(\n {\n \"A\": np.random.uniform(size=20),\n \"B\": np.random.uniform(size=20),\n \"C\": np.arange(20) + np.random.uniform(size=20),\n }\n )\n\n def _assert_ytickslabels_visibility(self, axes, expected):\n for ax, exp in zip(axes, expected):\n self._check_visible(ax.get_yticklabels(), visible=exp)\n\n def _assert_xtickslabels_visibility(self, axes, expected):\n for ax, exp in zip(axes, expected):\n self._check_visible(ax.get_xticklabels(), visible=exp)\n\n @pytest.mark.slow\n def test_plot(self):\n from pandas.plotting._matplotlib.compat import _mpl_ge_3_1_0\n\n df = self.tdf\n _check_plot_works(df.plot, grid=False)\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot, subplots=True)\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot, subplots=True, layout=(-1, 2))\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot, subplots=True, use_index=False)\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n df = DataFrame({\"x\": [1, 2], \"y\": [3, 4]})\n if _mpl_ge_3_1_0():\n msg = \"'Line2D' object has no property 'blarg'\"\n else:\n msg = \"Unknown property blarg\"\n with pytest.raises(AttributeError, match=msg):\n df.plot.line(blarg=True)\n\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n\n _check_plot_works(df.plot, use_index=True)\n _check_plot_works(df.plot, sort_columns=False)\n _check_plot_works(df.plot, yticks=[1, 5, 10])\n _check_plot_works(df.plot, xticks=[1, 5, 10])\n _check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))\n\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.plot, subplots=True, title=\"blah\")\n\n # We have to redo it here because _check_plot_works does two plots,\n # once without an ax kwarg and once with an ax kwarg and the new sharex\n # behaviour does not remove the visibility of the latter axis (as ax is\n # present). see: https://github.com/pandas-dev/pandas/issues/9737\n\n axes = df.plot(subplots=True, title=\"blah\")\n self._check_axes_shape(axes, axes_num=3, layout=(3, 1))\n # axes[0].figure.savefig(\"test.png\")\n for ax in axes[:2]:\n self._check_visible(ax.xaxis) # xaxis must be visible for grid\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n self._check_visible([ax.xaxis.get_label()], visible=False)\n for ax in [axes[2]]:\n self._check_visible(ax.xaxis)\n self._check_visible(ax.get_xticklabels())\n self._check_visible([ax.xaxis.get_label()])\n self._check_ticks_props(ax, xrot=0)\n\n _check_plot_works(df.plot, title=\"blah\")\n\n tuples = zip(string.ascii_letters[:10], range(10))\n df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))\n _check_plot_works(df.plot, use_index=True)\n\n # unicode\n index = MultiIndex.from_tuples(\n [\n (\"\\u03b1\", 0),\n (\"\\u03b1\", 1),\n (\"\\u03b2\", 2),\n (\"\\u03b2\", 3),\n (\"\\u03b3\", 4),\n (\"\\u03b3\", 5),\n (\"\\u03b4\", 6),\n (\"\\u03b4\", 7),\n ],\n names=[\"i0\", \"i1\"],\n )\n columns = MultiIndex.from_tuples(\n [(\"bar\", \"\\u0394\"), (\"bar\", \"\\u0395\")], names=[\"c0\", \"c1\"]\n )\n df = DataFrame(np.random.randint(0, 10, (8, 2)), columns=columns, index=index)\n _check_plot_works(df.plot, title=\"\\u03A3\")\n\n # GH 6951\n # Test with single column\n df = DataFrame({\"x\": np.random.rand(10)})\n axes = _check_plot_works(df.plot.bar, subplots=True)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n # When ax is supplied and required number of axes is 1,\n # passed ax should be used:\n fig, ax = self.plt.subplots()\n axes = df.plot.bar(subplots=True, ax=ax)\n assert len(axes) == 1\n result = ax.axes\n assert result is axes[0]\n\n def test_integer_array_plot(self):\n # GH 25587\n arr = integer_array([1, 2, 3, 4], dtype=\"UInt32\")\n\n s = Series(arr)\n _check_plot_works(s.plot.line)\n _check_plot_works(s.plot.bar)\n _check_plot_works(s.plot.hist)\n _check_plot_works(s.plot.pie)\n\n df = DataFrame({\"x\": arr, \"y\": arr})\n _check_plot_works(df.plot.line)\n _check_plot_works(df.plot.bar)\n _check_plot_works(df.plot.hist)\n _check_plot_works(df.plot.pie, y=\"y\")\n _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\")\n _check_plot_works(df.plot.hexbin, x=\"x\", y=\"y\")\n\n def test_mpl2_color_cycle_str(self):\n # GH 15516\n colors = [\"C\" + str(x) for x in range(10)]\n df = DataFrame(randn(10, 3), columns=[\"a\", \"b\", \"c\"])\n for c in colors:\n _check_plot_works(df.plot, color=c)\n\n def test_color_single_series_list(self):\n # GH 3486\n df = DataFrame({\"A\": [1, 2, 3]})\n _check_plot_works(df.plot, color=[\"red\"])\n\n def test_rgb_tuple_color(self):\n # GH 16695\n df = DataFrame({\"x\": [1, 2], \"y\": [3, 4]})\n _check_plot_works(df.plot, x=\"x\", y=\"y\", color=(1, 0, 0))\n _check_plot_works(df.plot, x=\"x\", y=\"y\", color=(1, 0, 0, 0.5))\n\n def test_color_empty_string(self):\n df = DataFrame(randn(10, 2))\n with pytest.raises(ValueError):\n df.plot(color=\"\")\n\n def test_color_and_style_arguments(self):\n df = DataFrame({\"x\": [1, 2], \"y\": [3, 4]})\n # passing both 'color' and 'style' arguments should be allowed\n # if there is no color symbol in the style strings:\n ax = df.plot(color=[\"red\", \"black\"], style=[\"-\", \"--\"])\n # check that the linestyles are correctly set:\n linestyle = [line.get_linestyle() for line in ax.lines]\n assert linestyle == [\"-\", \"--\"]\n # check that the colors are correctly set:\n color = [line.get_color() for line in ax.lines]\n assert color == [\"red\", \"black\"]\n # passing both 'color' and 'style' arguments should not be allowed\n # if there is a color symbol in the style strings:\n with pytest.raises(ValueError):\n df.plot(color=[\"red\", \"black\"], style=[\"k-\", \"r--\"])\n\n def test_nonnumeric_exclude(self):\n df = DataFrame({\"A\": [\"x\", \"y\", \"z\"], \"B\": [1, 2, 3]})\n ax = df.plot()\n assert len(ax.get_lines()) == 1 # B was plotted\n\n @pytest.mark.slow\n def test_implicit_label(self):\n df = DataFrame(randn(10, 3), columns=[\"a\", \"b\", \"c\"])\n ax = df.plot(x=\"a\", y=\"b\")\n self._check_text_labels(ax.xaxis.get_label(), \"a\")\n\n @pytest.mark.slow\n def test_donot_overwrite_index_name(self):\n # GH 8494\n df = DataFrame(randn(2, 2), columns=[\"a\", \"b\"])\n df.index.name = \"NAME\"\n df.plot(y=\"b\", label=\"LABEL\")\n assert df.index.name == \"NAME\"\n\n @pytest.mark.slow\n def test_plot_xy(self):\n # columns.inferred_type == 'string'\n df = self.tdf\n self._check_data(df.plot(x=0, y=1), df.set_index(\"A\")[\"B\"].plot())\n self._check_data(df.plot(x=0), df.set_index(\"A\").plot())\n self._check_data(df.plot(y=0), df.B.plot())\n self._check_data(df.plot(x=\"A\", y=\"B\"), df.set_index(\"A\").B.plot())\n self._check_data(df.plot(x=\"A\"), df.set_index(\"A\").plot())\n self._check_data(df.plot(y=\"B\"), df.B.plot())\n\n # columns.inferred_type == 'integer'\n df.columns = np.arange(1, len(df.columns) + 1)\n self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())\n self._check_data(df.plot(x=1), df.set_index(1).plot())\n self._check_data(df.plot(y=1), df[1].plot())\n\n # figsize and title\n ax = df.plot(x=1, y=2, title=\"Test\", figsize=(16, 8))\n self._check_text_labels(ax.title, \"Test\")\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0))\n\n # columns.inferred_type == 'mixed'\n # TODO add MultiIndex test\n\n @pytest.mark.slow\n @pytest.mark.parametrize(\n \"input_log, expected_log\", [(True, \"log\"), (\"sym\", \"symlog\")]\n )\n def test_logscales(self, input_log, expected_log):\n df = DataFrame({\"a\": np.arange(100)}, index=np.arange(100))\n\n ax = df.plot(logy=input_log)\n self._check_ax_scales(ax, yaxis=expected_log)\n assert ax.get_yscale() == expected_log\n\n ax = df.plot(logx=input_log)\n self._check_ax_scales(ax, xaxis=expected_log)\n assert ax.get_xscale() == expected_log\n\n ax = df.plot(loglog=input_log)\n self._check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log)\n assert ax.get_xscale() == expected_log\n assert ax.get_yscale() == expected_log\n\n @pytest.mark.parametrize(\"input_param\", [\"logx\", \"logy\", \"loglog\"])\n def test_invalid_logscale(self, input_param):\n # GH: 24867\n df = DataFrame({\"a\": np.arange(100)}, index=np.arange(100))\n\n msg = \"Boolean, None and 'sym' are valid options, 'sm' is given.\"\n with pytest.raises(ValueError, match=msg):\n df.plot(**{input_param: \"sm\"})\n\n @pytest.mark.slow\n def test_xcompat(self):\n import pandas as pd\n\n df = self.tdf\n ax = df.plot(x_compat=True)\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n\n tm.close()\n pd.plotting.plot_params[\"xaxis.compat\"] = True\n ax = df.plot()\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n\n tm.close()\n pd.plotting.plot_params[\"x_compat\"] = False\n\n ax = df.plot()\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)\n\n tm.close()\n # useful if you're plotting a bunch together\n with pd.plotting.plot_params.use(\"x_compat\", True):\n ax = df.plot()\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n\n tm.close()\n ax = df.plot()\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)\n\n def test_period_compat(self):\n # GH 9012\n # period-array conversions\n df = DataFrame(\n np.random.rand(21, 2),\n index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),\n columns=[\"a\", \"b\"],\n )\n\n df.plot()\n self.plt.axhline(y=0)\n tm.close()\n\n def test_unsorted_index(self):\n df = DataFrame(\n {\"y\": np.arange(100)}, index=np.arange(99, -1, -1), dtype=np.int64\n )\n ax = df.plot()\n lines = ax.get_lines()[0]\n rs = lines.get_xydata()\n rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name=\"y\")\n tm.assert_series_equal(rs, df.y, check_index_type=False)\n tm.close()\n\n df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)\n ax = df.plot()\n lines = ax.get_lines()[0]\n rs = lines.get_xydata()\n rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name=\"y\")\n tm.assert_series_equal(rs, df.y)\n\n def test_unsorted_index_lims(self):\n df = DataFrame({\"y\": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0])\n ax = df.plot()\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= np.nanmin(lines[0].get_data()[0])\n assert xmax >= np.nanmax(lines[0].get_data()[0])\n\n df = DataFrame(\n {\"y\": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]},\n index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],\n )\n ax = df.plot()\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= np.nanmin(lines[0].get_data()[0])\n assert xmax >= np.nanmax(lines[0].get_data()[0])\n\n df = DataFrame({\"y\": [0.0, 1.0, 2.0, 3.0], \"z\": [91.0, 90.0, 93.0, 92.0]})\n ax = df.plot(x=\"z\", y=\"y\")\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= np.nanmin(lines[0].get_data()[0])\n assert xmax >= np.nanmax(lines[0].get_data()[0])\n\n @pytest.mark.slow\n def test_subplots(self):\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n\n for kind in [\"bar\", \"barh\", \"line\", \"area\"]:\n axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)\n self._check_axes_shape(axes, axes_num=3, layout=(3, 1))\n assert axes.shape == (3,)\n\n for ax, column in zip(axes, df.columns):\n self._check_legend_labels(ax, labels=[pprint_thing(column)])\n\n for ax in axes[:-2]:\n self._check_visible(ax.xaxis) # xaxis must be visible for grid\n self._check_visible(ax.get_xticklabels(), visible=False)\n if not (kind == \"bar\" and self.mpl_ge_3_1_0):\n # change https://github.com/pandas-dev/pandas/issues/26714\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n self._check_visible(ax.xaxis.get_label(), visible=False)\n self._check_visible(ax.get_yticklabels())\n\n self._check_visible(axes[-1].xaxis)\n self._check_visible(axes[-1].get_xticklabels())\n self._check_visible(axes[-1].get_xticklabels(minor=True))\n self._check_visible(axes[-1].xaxis.get_label())\n self._check_visible(axes[-1].get_yticklabels())\n\n axes = df.plot(kind=kind, subplots=True, sharex=False)\n for ax in axes:\n self._check_visible(ax.xaxis)\n self._check_visible(ax.get_xticklabels())\n self._check_visible(ax.get_xticklabels(minor=True))\n self._check_visible(ax.xaxis.get_label())\n self._check_visible(ax.get_yticklabels())\n\n axes = df.plot(kind=kind, subplots=True, legend=False)\n for ax in axes:\n assert ax.get_legend() is None\n\n def test_groupby_boxplot_sharey(self):\n # https://github.com/pandas-dev/pandas/issues/20968\n # sharey can now be switched check whether the right\n # pair of axes is turned on or off\n\n df = DataFrame(\n {\n \"a\": [-1.43, -0.15, -3.70, -1.43, -0.14],\n \"b\": [0.56, 0.84, 0.29, 0.56, 0.85],\n \"c\": [0, 1, 2, 3, 1],\n },\n index=[0, 1, 2, 3, 4],\n )\n\n # behavior without keyword\n axes = df.groupby(\"c\").boxplot()\n expected = [True, False, True, False]\n self._assert_ytickslabels_visibility(axes, expected)\n\n # set sharey=True should be identical\n axes = df.groupby(\"c\").boxplot(sharey=True)\n expected = [True, False, True, False]\n self._assert_ytickslabels_visibility(axes, expected)\n\n # sharey=False, all yticklabels should be visible\n axes = df.groupby(\"c\").boxplot(sharey=False)\n expected = [True, True, True, True]\n self._assert_ytickslabels_visibility(axes, expected)\n\n def test_groupby_boxplot_sharex(self):\n # https://github.com/pandas-dev/pandas/issues/20968\n # sharex can now be switched check whether the right\n # pair of axes is turned on or off\n\n df = DataFrame(\n {\n \"a\": [-1.43, -0.15, -3.70, -1.43, -0.14],\n \"b\": [0.56, 0.84, 0.29, 0.56, 0.85],\n \"c\": [0, 1, 2, 3, 1],\n },\n index=[0, 1, 2, 3, 4],\n )\n\n # behavior without keyword\n axes = df.groupby(\"c\").boxplot()\n expected = [True, True, True, True]\n self._assert_xtickslabels_visibility(axes, expected)\n\n # set sharex=False should be identical\n axes = df.groupby(\"c\").boxplot(sharex=False)\n expected = [True, True, True, True]\n self._assert_xtickslabels_visibility(axes, expected)\n\n # sharex=True, yticklabels should be visible\n # only for bottom plots\n axes = df.groupby(\"c\").boxplot(sharex=True)\n expected = [False, False, True, True]\n self._assert_xtickslabels_visibility(axes, expected)\n\n @pytest.mark.slow\n def test_subplots_timeseries(self):\n idx = date_range(start=\"2014-07-01\", freq=\"M\", periods=10)\n df = DataFrame(np.random.rand(10, 3), index=idx)\n\n for kind in [\"line\", \"area\"]:\n axes = df.plot(kind=kind, subplots=True, sharex=True)\n self._check_axes_shape(axes, axes_num=3, layout=(3, 1))\n\n for ax in axes[:-2]:\n # GH 7801\n self._check_visible(ax.xaxis) # xaxis must be visible for grid\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n self._check_visible(ax.xaxis.get_label(), visible=False)\n self._check_visible(ax.get_yticklabels())\n\n self._check_visible(axes[-1].xaxis)\n self._check_visible(axes[-1].get_xticklabels())\n self._check_visible(axes[-1].get_xticklabels(minor=True))\n self._check_visible(axes[-1].xaxis.get_label())\n self._check_visible(axes[-1].get_yticklabels())\n self._check_ticks_props(axes, xrot=0)\n\n axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)\n for ax in axes:\n self._check_visible(ax.xaxis)\n self._check_visible(ax.get_xticklabels())\n self._check_visible(ax.get_xticklabels(minor=True))\n self._check_visible(ax.xaxis.get_label())\n self._check_visible(ax.get_yticklabels())\n self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)\n\n def test_subplots_timeseries_y_axis(self):\n # GH16953\n data = {\n \"numeric\": np.array([1, 2, 5]),\n \"timedelta\": [\n pd.Timedelta(-10, unit=\"s\"),\n pd.Timedelta(10, unit=\"m\"),\n pd.Timedelta(10, unit=\"h\"),\n ],\n \"datetime_no_tz\": [\n pd.to_datetime(\"2017-08-01 00:00:00\"),\n pd.to_datetime(\"2017-08-01 02:00:00\"),\n pd.to_datetime(\"2017-08-02 00:00:00\"),\n ],\n \"datetime_all_tz\": [\n pd.to_datetime(\"2017-08-01 00:00:00\", utc=True),\n pd.to_datetime(\"2017-08-01 02:00:00\", utc=True),\n pd.to_datetime(\"2017-08-02 00:00:00\", utc=True),\n ],\n \"text\": [\"This\", \"should\", \"fail\"],\n }\n testdata = DataFrame(data)\n\n ax_numeric = testdata.plot(y=\"numeric\")\n assert (\n ax_numeric.get_lines()[0].get_data()[1] == testdata[\"numeric\"].values\n ).all()\n ax_timedelta = testdata.plot(y=\"timedelta\")\n assert (\n ax_timedelta.get_lines()[0].get_data()[1] == testdata[\"timedelta\"].values\n ).all()\n ax_datetime_no_tz = testdata.plot(y=\"datetime_no_tz\")\n assert (\n ax_datetime_no_tz.get_lines()[0].get_data()[1]\n == testdata[\"datetime_no_tz\"].values\n ).all()\n ax_datetime_all_tz = testdata.plot(y=\"datetime_all_tz\")\n assert (\n ax_datetime_all_tz.get_lines()[0].get_data()[1]\n == testdata[\"datetime_all_tz\"].values\n ).all()\n\n msg = \"no numeric data to plot\"\n with pytest.raises(TypeError, match=msg):\n testdata.plot(y=\"text\")\n\n @pytest.mark.xfail(reason=\"not support for period, categorical, datetime_mixed_tz\")\n def test_subplots_timeseries_y_axis_not_supported(self):\n \"\"\"\n This test will fail for:\n period:\n since period isn't yet implemented in ``select_dtypes``\n and because it will need a custom value converter +\n tick formatter (as was done for x-axis plots)\n\n categorical:\n because it will need a custom value converter +\n tick formatter (also doesn't work for x-axis, as of now)\n\n datetime_mixed_tz:\n because of the way how pandas handles ``Series`` of\n ``datetime`` objects with different timezone,\n generally converting ``datetime`` objects in a tz-aware\n form could help with this problem\n \"\"\"\n data = {\n \"numeric\": np.array([1, 2, 5]),\n \"period\": [\n pd.Period(\"2017-08-01 00:00:00\", freq=\"H\"),\n pd.Period(\"2017-08-01 02:00\", freq=\"H\"),\n pd.Period(\"2017-08-02 00:00:00\", freq=\"H\"),\n ],\n \"categorical\": pd.Categorical(\n [\"c\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"], ordered=False\n ),\n \"datetime_mixed_tz\": [\n pd.to_datetime(\"2017-08-01 00:00:00\", utc=True),\n pd.to_datetime(\"2017-08-01 02:00:00\"),\n pd.to_datetime(\"2017-08-02 00:00:00\"),\n ],\n }\n testdata = pd.DataFrame(data)\n ax_period = testdata.plot(x=\"numeric\", y=\"period\")\n assert (\n ax_period.get_lines()[0].get_data()[1] == testdata[\"period\"].values\n ).all()\n ax_categorical = testdata.plot(x=\"numeric\", y=\"categorical\")\n assert (\n ax_categorical.get_lines()[0].get_data()[1]\n == testdata[\"categorical\"].values\n ).all()\n ax_datetime_mixed_tz = testdata.plot(x=\"numeric\", y=\"datetime_mixed_tz\")\n assert (\n ax_datetime_mixed_tz.get_lines()[0].get_data()[1]\n == testdata[\"datetime_mixed_tz\"].values\n ).all()\n\n @pytest.mark.slow\n def test_subplots_layout(self):\n # GH 6667\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n\n axes = df.plot(subplots=True, layout=(2, 2))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert axes.shape == (2, 2)\n\n axes = df.plot(subplots=True, layout=(-1, 2))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert axes.shape == (2, 2)\n\n axes = df.plot(subplots=True, layout=(2, -1))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert axes.shape == (2, 2)\n\n axes = df.plot(subplots=True, layout=(1, 4))\n self._check_axes_shape(axes, axes_num=3, layout=(1, 4))\n assert axes.shape == (1, 4)\n\n axes = df.plot(subplots=True, layout=(-1, 4))\n self._check_axes_shape(axes, axes_num=3, layout=(1, 4))\n assert axes.shape == (1, 4)\n\n axes = df.plot(subplots=True, layout=(4, -1))\n self._check_axes_shape(axes, axes_num=3, layout=(4, 1))\n assert axes.shape == (4, 1)\n\n with pytest.raises(ValueError):\n df.plot(subplots=True, layout=(1, 1))\n with pytest.raises(ValueError):\n df.plot(subplots=True, layout=(-1, -1))\n\n # single column\n df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))\n axes = df.plot(subplots=True)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n assert axes.shape == (1,)\n\n axes = df.plot(subplots=True, layout=(3, 3))\n self._check_axes_shape(axes, axes_num=1, layout=(3, 3))\n assert axes.shape == (3, 3)\n\n @pytest.mark.slow\n def test_subplots_warnings(self):\n # GH 9464\n with tm.assert_produces_warning(None):\n df = DataFrame(np.random.randn(100, 4))\n df.plot(subplots=True, layout=(3, 2))\n\n df = DataFrame(\n np.random.randn(100, 4), index=date_range(\"1/1/2000\", periods=100)\n )\n df.plot(subplots=True, layout=(3, 2))\n\n @pytest.mark.slow\n def test_subplots_multiple_axes(self):\n # GH 5353, 6970, GH 7069\n fig, axes = self.plt.subplots(2, 3)\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n\n returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n assert returned.shape == (3,)\n assert returned[0].figure is fig\n # draw on second row\n returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n assert returned.shape == (3,)\n assert returned[0].figure is fig\n self._check_axes_shape(axes, axes_num=6, layout=(2, 3))\n tm.close()\n\n with pytest.raises(ValueError):\n fig, axes = self.plt.subplots(2, 3)\n # pass different number of axes from required\n df.plot(subplots=True, ax=axes)\n\n # pass 2-dim axes and invalid layout\n # invalid lauout should not affect to input and return value\n # (show warning is tested in\n # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes\n fig, axes = self.plt.subplots(2, 2)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))\n\n returned = df.plot(\n subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False\n )\n self._check_axes_shape(returned, axes_num=4, layout=(2, 2))\n assert returned.shape == (4,)\n\n returned = df.plot(\n subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False\n )\n self._check_axes_shape(returned, axes_num=4, layout=(2, 2))\n assert returned.shape == (4,)\n\n returned = df.plot(\n subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False\n )\n self._check_axes_shape(returned, axes_num=4, layout=(2, 2))\n assert returned.shape == (4,)\n\n # single column\n fig, axes = self.plt.subplots(1, 1)\n df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))\n\n axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n assert axes.shape == (1,)\n\n def test_subplots_ts_share_axes(self):\n # GH 3964\n fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)\n self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)\n df = DataFrame(\n np.random.randn(10, 9),\n index=date_range(start=\"2014-07-01\", freq=\"M\", periods=10),\n )\n for i, ax in enumerate(axes.ravel()):\n df[i].plot(ax=ax, fontsize=5)\n\n # Rows other than bottom should not be visible\n for ax in axes[0:-1].ravel():\n self._check_visible(ax.get_xticklabels(), visible=False)\n\n # Bottom row should be visible\n for ax in axes[-1].ravel():\n self._check_visible(ax.get_xticklabels(), visible=True)\n\n # First column should be visible\n for ax in axes[[0, 1, 2], [0]].ravel():\n self._check_visible(ax.get_yticklabels(), visible=True)\n\n # Other columns should not be visible\n for ax in axes[[0, 1, 2], [1]].ravel():\n self._check_visible(ax.get_yticklabels(), visible=False)\n for ax in axes[[0, 1, 2], [2]].ravel():\n self._check_visible(ax.get_yticklabels(), visible=False)\n\n def test_subplots_sharex_axes_existing_axes(self):\n # GH 9158\n d = {\"A\": [1.0, 2.0, 3.0, 4.0], \"B\": [4.0, 3.0, 2.0, 1.0], \"C\": [5, 1, 3, 4]}\n df = DataFrame(d, index=date_range(\"2014 10 11\", \"2014 10 14\"))\n\n axes = df[[\"A\", \"B\"]].plot(subplots=True)\n df[\"C\"].plot(ax=axes[0], secondary_y=True)\n\n self._check_visible(axes[0].get_xticklabels(), visible=False)\n self._check_visible(axes[1].get_xticklabels(), visible=True)\n for ax in axes.ravel():\n self._check_visible(ax.get_yticklabels(), visible=True)\n\n @pytest.mark.slow\n def test_subplots_dup_columns(self):\n # GH 10962\n df = DataFrame(np.random.rand(5, 5), columns=list(\"aaaaa\"))\n axes = df.plot(subplots=True)\n for ax in axes:\n self._check_legend_labels(ax, labels=[\"a\"])\n assert len(ax.lines) == 1\n tm.close()\n\n axes = df.plot(subplots=True, secondary_y=\"a\")\n for ax in axes:\n # (right) is only attached when subplots=False\n self._check_legend_labels(ax, labels=[\"a\"])\n assert len(ax.lines) == 1\n tm.close()\n\n ax = df.plot(secondary_y=\"a\")\n self._check_legend_labels(ax, labels=[\"a (right)\"] * 5)\n assert len(ax.lines) == 0\n assert len(ax.right_ax.lines) == 5\n\n def test_negative_log(self):\n df = -DataFrame(\n rand(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"x\", \"y\", \"z\", \"four\"],\n )\n\n with pytest.raises(ValueError):\n df.plot.area(logy=True)\n with pytest.raises(ValueError):\n df.plot.area(loglog=True)\n\n def _compare_stacked_y_cood(self, normal_lines, stacked_lines):\n base = np.zeros(len(normal_lines[0].get_data()[1]))\n for nl, sl in zip(normal_lines, stacked_lines):\n base += nl.get_data()[1] # get y coordinates\n sy = sl.get_data()[1]\n tm.assert_numpy_array_equal(base, sy)\n\n def test_line_area_stacked(self):\n with tm.RNGContext(42):\n df = DataFrame(rand(6, 4), columns=[\"w\", \"x\", \"y\", \"z\"])\n neg_df = -df\n # each column has either positive or negative value\n sep_df = DataFrame(\n {\"w\": rand(6), \"x\": rand(6), \"y\": -rand(6), \"z\": -rand(6)}\n )\n # each column has positive-negative mixed value\n mixed_df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"w\", \"x\", \"y\", \"z\"],\n )\n\n for kind in [\"line\", \"area\"]:\n ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)\n ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)\n self._compare_stacked_y_cood(ax1.lines, ax2.lines)\n\n ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)\n ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)\n self._compare_stacked_y_cood(ax1.lines, ax2.lines)\n\n ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)\n ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)\n self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])\n self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])\n\n _check_plot_works(mixed_df.plot, stacked=False)\n with pytest.raises(ValueError):\n mixed_df.plot(stacked=True)\n\n # Use an index with strictly positive values, preventing\n # matplotlib from warning about ignoring xlim\n df2 = df.set_index(df.index + 1)\n _check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)\n\n def test_line_area_nan_df(self):\n values1 = [1, 2, np.nan, 3]\n values2 = [3, np.nan, 2, 1]\n df = DataFrame({\"a\": values1, \"b\": values2})\n tdf = DataFrame({\"a\": values1, \"b\": values2}, index=tm.makeDateIndex(k=4))\n\n for d in [df, tdf]:\n ax = _check_plot_works(d.plot)\n masked1 = ax.lines[0].get_ydata()\n masked2 = ax.lines[1].get_ydata()\n # remove nan for comparison purpose\n\n exp = np.array([1, 2, 3], dtype=np.float64)\n tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)\n\n exp = np.array([3, 2, 1], dtype=np.float64)\n tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)\n tm.assert_numpy_array_equal(\n masked1.mask, np.array([False, False, True, False])\n )\n tm.assert_numpy_array_equal(\n masked2.mask, np.array([False, True, False, False])\n )\n\n expected1 = np.array([1, 2, 0, 3], dtype=np.float64)\n expected2 = np.array([3, 0, 2, 1], dtype=np.float64)\n\n ax = _check_plot_works(d.plot, stacked=True)\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)\n tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)\n\n ax = _check_plot_works(d.plot.area)\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)\n tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)\n\n ax = _check_plot_works(d.plot.area, stacked=False)\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)\n tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)\n\n def test_line_lim(self):\n df = DataFrame(rand(6, 3), columns=[\"x\", \"y\", \"z\"])\n ax = df.plot()\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= lines[0].get_data()[0][0]\n assert xmax >= lines[0].get_data()[0][-1]\n\n ax = df.plot(secondary_y=True)\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= lines[0].get_data()[0][0]\n assert xmax >= lines[0].get_data()[0][-1]\n\n axes = df.plot(secondary_y=True, subplots=True)\n self._check_axes_shape(axes, axes_num=3, layout=(3, 1))\n for ax in axes:\n assert hasattr(ax, \"left_ax\")\n assert not hasattr(ax, \"right_ax\")\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= lines[0].get_data()[0][0]\n assert xmax >= lines[0].get_data()[0][-1]\n\n def test_area_lim(self):\n df = DataFrame(rand(6, 4), columns=[\"x\", \"y\", \"z\", \"four\"])\n\n neg_df = -df\n for stacked in [True, False]:\n ax = _check_plot_works(df.plot.area, stacked=stacked)\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n lines = ax.get_lines()\n assert xmin <= lines[0].get_data()[0][0]\n assert xmax >= lines[0].get_data()[0][-1]\n assert ymin == 0\n\n ax = _check_plot_works(neg_df.plot.area, stacked=stacked)\n ymin, ymax = ax.get_ylim()\n assert ymax == 0\n\n @pytest.mark.slow\n def test_bar_colors(self):\n import matplotlib.pyplot as plt\n\n default_colors = self._unpack_cycler(plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n ax = df.plot.bar()\n self._check_colors(ax.patches[::5], facecolors=default_colors[:5])\n tm.close()\n\n custom_colors = \"rgcby\"\n ax = df.plot.bar(color=custom_colors)\n self._check_colors(ax.patches[::5], facecolors=custom_colors)\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n ax = df.plot.bar(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::5], facecolors=rgba_colors)\n tm.close()\n\n # Test colormap functionality\n ax = df.plot.bar(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::5], facecolors=rgba_colors)\n tm.close()\n\n ax = df.loc[:, [0]].plot.bar(color=\"DodgerBlue\")\n self._check_colors([ax.patches[0]], facecolors=[\"DodgerBlue\"])\n tm.close()\n\n ax = df.plot(kind=\"bar\", color=\"green\")\n self._check_colors(ax.patches[::5], facecolors=[\"green\"] * 5)\n tm.close()\n\n def test_bar_user_colors(self):\n df = pd.DataFrame(\n {\"A\": range(4), \"B\": range(1, 5), \"color\": [\"red\", \"blue\", \"blue\", \"red\"]}\n )\n # This should *only* work when `y` is specified, else\n # we use one color per column\n ax = df.plot.bar(y=\"A\", color=df[\"color\"])\n result = [p.get_facecolor() for p in ax.patches]\n expected = [\n (1.0, 0.0, 0.0, 1.0),\n (0.0, 0.0, 1.0, 1.0),\n (0.0, 0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0, 1.0),\n ]\n assert result == expected\n\n @pytest.mark.slow\n def test_bar_linewidth(self):\n df = DataFrame(randn(5, 5))\n\n # regular\n ax = df.plot.bar(linewidth=2)\n for r in ax.patches:\n assert r.get_linewidth() == 2\n\n # stacked\n ax = df.plot.bar(stacked=True, linewidth=2)\n for r in ax.patches:\n assert r.get_linewidth() == 2\n\n # subplots\n axes = df.plot.bar(linewidth=2, subplots=True)\n self._check_axes_shape(axes, axes_num=5, layout=(5, 1))\n for ax in axes:\n for r in ax.patches:\n assert r.get_linewidth() == 2\n\n @pytest.mark.slow\n def test_bar_barwidth(self):\n df = DataFrame(randn(5, 5))\n\n width = 0.9\n\n # regular\n ax = df.plot.bar(width=width)\n for r in ax.patches:\n assert r.get_width() == width / len(df.columns)\n\n # stacked\n ax = df.plot.bar(stacked=True, width=width)\n for r in ax.patches:\n assert r.get_width() == width\n\n # horizontal regular\n ax = df.plot.barh(width=width)\n for r in ax.patches:\n assert r.get_height() == width / len(df.columns)\n\n # horizontal stacked\n ax = df.plot.barh(stacked=True, width=width)\n for r in ax.patches:\n assert r.get_height() == width\n\n # subplots\n axes = df.plot.bar(width=width, subplots=True)\n for ax in axes:\n for r in ax.patches:\n assert r.get_width() == width\n\n # horizontal subplots\n axes = df.plot.barh(width=width, subplots=True)\n for ax in axes:\n for r in ax.patches:\n assert r.get_height() == width\n\n @pytest.mark.slow\n def test_bar_barwidth_position(self):\n df = DataFrame(randn(5, 5))\n self._check_bar_alignment(\n df, kind=\"bar\", stacked=False, width=0.9, position=0.2\n )\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, width=0.9, position=0.2)\n self._check_bar_alignment(\n df, kind=\"barh\", stacked=False, width=0.9, position=0.2\n )\n self._check_bar_alignment(\n df, kind=\"barh\", stacked=True, width=0.9, position=0.2\n )\n self._check_bar_alignment(\n df, kind=\"bar\", subplots=True, width=0.9, position=0.2\n )\n self._check_bar_alignment(\n df, kind=\"barh\", subplots=True, width=0.9, position=0.2\n )\n\n @pytest.mark.slow\n def test_bar_barwidth_position_int(self):\n # GH 12979\n df = DataFrame(randn(5, 5))\n\n for w in [1, 1.0]:\n ax = df.plot.bar(stacked=True, width=w)\n ticks = ax.xaxis.get_ticklocs()\n tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))\n assert ax.get_xlim() == (-0.75, 4.75)\n # check left-edge of bars\n assert ax.patches[0].get_x() == -0.5\n assert ax.patches[-1].get_x() == 3.5\n\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, width=1)\n self._check_bar_alignment(df, kind=\"barh\", stacked=False, width=1)\n self._check_bar_alignment(df, kind=\"barh\", stacked=True, width=1)\n self._check_bar_alignment(df, kind=\"bar\", subplots=True, width=1)\n self._check_bar_alignment(df, kind=\"barh\", subplots=True, width=1)\n\n @pytest.mark.slow\n def test_bar_bottom_left(self):\n df = DataFrame(rand(5, 5))\n ax = df.plot.bar(stacked=False, bottom=1)\n result = [p.get_y() for p in ax.patches]\n assert result == [1] * 25\n\n ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])\n result = [p.get_y() for p in ax.patches[:5]]\n assert result == [-1, -2, -3, -4, -5]\n\n ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))\n result = [p.get_x() for p in ax.patches]\n assert result == [1] * 25\n\n ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])\n result = [p.get_x() for p in ax.patches[:5]]\n assert result == [1, 2, 3, 4, 5]\n\n axes = df.plot.bar(subplots=True, bottom=-1)\n for ax in axes:\n result = [p.get_y() for p in ax.patches]\n assert result == [-1] * 5\n\n axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))\n for ax in axes:\n result = [p.get_x() for p in ax.patches]\n assert result == [1] * 5\n\n @pytest.mark.slow\n def test_bar_nan(self):\n df = DataFrame({\"A\": [10, np.nan, 20], \"B\": [5, 10, 20], \"C\": [1, 2, 3]})\n ax = df.plot.bar()\n expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]\n result = [p.get_height() for p in ax.patches]\n assert result == expected\n\n ax = df.plot.bar(stacked=True)\n result = [p.get_height() for p in ax.patches]\n assert result == expected\n\n result = [p.get_y() for p in ax.patches]\n expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]\n assert result == expected\n\n @pytest.mark.slow\n def test_bar_categorical(self):\n # GH 13019\n df1 = pd.DataFrame(\n np.random.randn(6, 5),\n index=pd.Index(list(\"ABCDEF\")),\n columns=pd.Index(list(\"abcde\")),\n )\n # categorical index must behave the same\n df2 = pd.DataFrame(\n np.random.randn(6, 5),\n index=pd.CategoricalIndex(list(\"ABCDEF\")),\n columns=pd.CategoricalIndex(list(\"abcde\")),\n )\n\n for df in [df1, df2]:\n ax = df.plot.bar()\n ticks = ax.xaxis.get_ticklocs()\n tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))\n assert ax.get_xlim() == (-0.5, 5.5)\n # check left-edge of bars\n assert ax.patches[0].get_x() == -0.25\n assert ax.patches[-1].get_x() == 5.15\n\n ax = df.plot.bar(stacked=True)\n tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))\n assert ax.get_xlim() == (-0.5, 5.5)\n assert ax.patches[0].get_x() == -0.25\n assert ax.patches[-1].get_x() == 4.75\n\n @pytest.mark.slow\n def test_plot_scatter(self):\n df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"x\", \"y\", \"z\", \"four\"],\n )\n\n _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\")\n _check_plot_works(df.plot.scatter, x=1, y=2)\n\n with pytest.raises(TypeError):\n df.plot.scatter(x=\"x\")\n with pytest.raises(TypeError):\n df.plot.scatter(y=\"y\")\n\n # GH 6951\n axes = df.plot(x=\"x\", y=\"y\", kind=\"scatter\", subplots=True)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n def test_raise_error_on_datetime_time_data(self):\n # GH 8113, datetime.time type is not supported by matplotlib in scatter\n df = pd.DataFrame(np.random.randn(10), columns=[\"a\"])\n df[\"dtime\"] = pd.date_range(start=\"2014-01-01\", freq=\"h\", periods=10).time\n msg = \"must be a string or a number, not 'datetime.time'\"\n\n with pytest.raises(TypeError, match=msg):\n df.plot(kind=\"scatter\", x=\"dtime\", y=\"a\")\n\n def test_scatterplot_datetime_data(self):\n # GH 30391\n dates = pd.date_range(start=date(2019, 1, 1), periods=12, freq=\"W\")\n vals = np.random.normal(0, 1, len(dates))\n df = pd.DataFrame({\"dates\": dates, \"vals\": vals})\n\n _check_plot_works(df.plot.scatter, x=\"dates\", y=\"vals\")\n _check_plot_works(df.plot.scatter, x=0, y=1)\n\n def test_scatterplot_object_data(self):\n # GH 18755\n df = pd.DataFrame(dict(a=[\"A\", \"B\", \"C\"], b=[2, 3, 4]))\n\n _check_plot_works(df.plot.scatter, x=\"a\", y=\"b\")\n _check_plot_works(df.plot.scatter, x=0, y=1)\n\n df = pd.DataFrame(dict(a=[\"A\", \"B\", \"C\"], b=[\"a\", \"b\", \"c\"]))\n\n _check_plot_works(df.plot.scatter, x=\"a\", y=\"b\")\n _check_plot_works(df.plot.scatter, x=0, y=1)\n\n @pytest.mark.slow\n def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):\n # addressing issue #10611, to ensure colobar does not\n # interfere with x-axis label and ticklabels with\n # ipython inline backend.\n random_array = np.random.random((1000, 3))\n df = pd.DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n ax1 = df.plot.scatter(x=\"A label\", y=\"B label\")\n ax2 = df.plot.scatter(x=\"A label\", y=\"B label\", c=\"C label\")\n\n vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]\n vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]\n assert vis1 == vis2\n\n vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]\n vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]\n assert vis1 == vis2\n\n assert (\n ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()\n )\n\n @pytest.mark.slow\n def test_if_hexbin_xaxis_label_is_visible(self):\n # addressing issue #10678, to ensure colobar does not\n # interfere with x-axis label and ticklabels with\n # ipython inline backend.\n random_array = np.random.random((1000, 3))\n df = pd.DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n ax = df.plot.hexbin(\"A label\", \"B label\", gridsize=12)\n assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())\n assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())\n assert ax.xaxis.get_label().get_visible()\n\n @pytest.mark.slow\n def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):\n import matplotlib.pyplot as plt\n\n random_array = np.random.random((1000, 3))\n df = pd.DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n fig, axes = plt.subplots(1, 2)\n df.plot.scatter(\"A label\", \"B label\", c=\"C label\", ax=axes[0])\n df.plot.scatter(\"A label\", \"B label\", c=\"C label\", ax=axes[1])\n plt.tight_layout()\n\n points = np.array([ax.get_position().get_points() for ax in fig.axes])\n axes_x_coords = points[:, :, 0]\n parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]\n colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]\n assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()\n\n @pytest.mark.parametrize(\"x, y\", [(\"x\", \"y\"), (\"y\", \"x\"), (\"y\", \"y\")])\n @pytest.mark.slow\n def test_plot_scatter_with_categorical_data(self, x, y):\n # after fixing GH 18755, should be able to plot categorical data\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4], \"y\": pd.Categorical([\"a\", \"b\", \"a\", \"c\"])}\n )\n\n _check_plot_works(df.plot.scatter, x=x, y=y)\n\n @pytest.mark.slow\n def test_plot_scatter_with_c(self):\n df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"x\", \"y\", \"z\", \"four\"],\n )\n\n axes = [df.plot.scatter(x=\"x\", y=\"y\", c=\"z\"), df.plot.scatter(x=0, y=1, c=2)]\n for ax in axes:\n # default to Greys\n assert ax.collections[0].cmap.name == \"Greys\"\n\n # n.b. there appears to be no public method\n # to get the colorbar label\n assert ax.collections[0].colorbar._label == \"z\"\n\n cm = \"cubehelix\"\n ax = df.plot.scatter(x=\"x\", y=\"y\", c=\"z\", colormap=cm)\n assert ax.collections[0].cmap.name == cm\n\n # verify turning off colorbar works\n ax = df.plot.scatter(x=\"x\", y=\"y\", c=\"z\", colorbar=False)\n assert ax.collections[0].colorbar is None\n\n # verify that we can still plot a solid color\n ax = df.plot.scatter(x=0, y=1, c=\"red\")\n assert ax.collections[0].colorbar is None\n self._check_colors(ax.collections, facecolors=[\"r\"])\n\n # Ensure that we can pass an np.array straight through to matplotlib,\n # this functionality was accidentally removed previously.\n # See https://github.com/pandas-dev/pandas/issues/8852 for bug report\n #\n # Exercise colormap path and non-colormap path as they are independent\n #\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4]})\n red_rgba = [1.0, 0.0, 0.0, 1.0]\n green_rgba = [0.0, 1.0, 0.0, 1.0]\n rgba_array = np.array([red_rgba, green_rgba])\n ax = df.plot.scatter(x=\"A\", y=\"B\", c=rgba_array)\n # expect the face colors of the points in the non-colormap path to be\n # identical to the values we supplied, normally we'd be on shaky ground\n # comparing floats for equality but here we expect them to be\n # identical.\n tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array)\n # we don't test the colors of the faces in this next plot because they\n # are dependent on the spring colormap, which may change its colors\n # later.\n float_array = np.array([0.0, 1.0])\n df.plot.scatter(x=\"A\", y=\"B\", c=float_array, cmap=\"spring\")\n\n def test_scatter_colors(self):\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [1, 2, 3], \"c\": [1, 2, 3]})\n with pytest.raises(TypeError):\n df.plot.scatter(x=\"a\", y=\"b\", c=\"c\", color=\"green\")\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n ax = df.plot.scatter(x=\"a\", y=\"b\", c=\"c\")\n tm.assert_numpy_array_equal(\n ax.collections[0].get_facecolor()[0],\n np.array(self.colorconverter.to_rgba(default_colors[0])),\n )\n\n ax = df.plot.scatter(x=\"a\", y=\"b\", color=\"white\")\n tm.assert_numpy_array_equal(\n ax.collections[0].get_facecolor()[0],\n np.array([1, 1, 1, 1], dtype=np.float64),\n )\n\n @pytest.mark.slow\n def test_plot_bar(self):\n df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"one\", \"two\", \"three\", \"four\"],\n )\n\n _check_plot_works(df.plot.bar)\n _check_plot_works(df.plot.bar, legend=False)\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.plot.bar, subplots=True)\n _check_plot_works(df.plot.bar, stacked=True)\n\n df = DataFrame(\n randn(10, 15), index=list(string.ascii_letters[:10]), columns=range(15)\n )\n _check_plot_works(df.plot.bar)\n\n df = DataFrame({\"a\": [0, 1], \"b\": [1, 0]})\n ax = _check_plot_works(df.plot.bar)\n self._check_ticks_props(ax, xrot=90)\n\n ax = df.plot.bar(rot=35, fontsize=10)\n self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)\n\n ax = _check_plot_works(df.plot.barh)\n self._check_ticks_props(ax, yrot=0)\n\n ax = df.plot.barh(rot=55, fontsize=11)\n self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)\n\n def _check_bar_alignment(\n self,\n df,\n kind=\"bar\",\n stacked=False,\n subplots=False,\n align=\"center\",\n width=0.5,\n position=0.5,\n ):\n\n axes = df.plot(\n kind=kind,\n stacked=stacked,\n subplots=subplots,\n align=align,\n width=width,\n position=position,\n grid=True,\n )\n\n axes = self._flatten_visible(axes)\n\n for ax in axes:\n if kind == \"bar\":\n axis = ax.xaxis\n ax_min, ax_max = ax.get_xlim()\n min_edge = min(p.get_x() for p in ax.patches)\n max_edge = max(p.get_x() + p.get_width() for p in ax.patches)\n elif kind == \"barh\":\n axis = ax.yaxis\n ax_min, ax_max = ax.get_ylim()\n min_edge = min(p.get_y() for p in ax.patches)\n max_edge = max(p.get_y() + p.get_height() for p in ax.patches)\n else:\n raise ValueError\n\n # GH 7498\n # compare margins between lim and bar edges\n tm.assert_almost_equal(ax_min, min_edge - 0.25)\n tm.assert_almost_equal(ax_max, max_edge + 0.25)\n\n p = ax.patches[0]\n if kind == \"bar\" and (stacked is True or subplots is True):\n edge = p.get_x()\n center = edge + p.get_width() * position\n elif kind == \"bar\" and stacked is False:\n center = p.get_x() + p.get_width() * len(df.columns) * position\n edge = p.get_x()\n elif kind == \"barh\" and (stacked is True or subplots is True):\n center = p.get_y() + p.get_height() * position\n edge = p.get_y()\n elif kind == \"barh\" and stacked is False:\n center = p.get_y() + p.get_height() * len(df.columns) * position\n edge = p.get_y()\n else:\n raise ValueError\n\n # Check the ticks locates on integer\n assert (axis.get_ticklocs() == np.arange(len(df))).all()\n\n if align == \"center\":\n # Check whether the bar locates on center\n tm.assert_almost_equal(axis.get_ticklocs()[0], center)\n elif align == \"edge\":\n # Check whether the bar's edge starts from the tick\n tm.assert_almost_equal(axis.get_ticklocs()[0], edge)\n else:\n raise ValueError\n\n return axes\n\n @pytest.mark.slow\n def test_bar_stacked_center(self):\n # GH2157\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(5))}, index=range(5))\n self._check_bar_alignment(df, kind=\"bar\", stacked=True)\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, width=0.9)\n self._check_bar_alignment(df, kind=\"barh\", stacked=True)\n self._check_bar_alignment(df, kind=\"barh\", stacked=True, width=0.9)\n\n @pytest.mark.slow\n def test_bar_center(self):\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(5))}, index=range(5))\n self._check_bar_alignment(df, kind=\"bar\", stacked=False)\n self._check_bar_alignment(df, kind=\"bar\", stacked=False, width=0.9)\n self._check_bar_alignment(df, kind=\"barh\", stacked=False)\n self._check_bar_alignment(df, kind=\"barh\", stacked=False, width=0.9)\n\n @pytest.mark.slow\n def test_bar_subplots_center(self):\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(5))}, index=range(5))\n self._check_bar_alignment(df, kind=\"bar\", subplots=True)\n self._check_bar_alignment(df, kind=\"bar\", subplots=True, width=0.9)\n self._check_bar_alignment(df, kind=\"barh\", subplots=True)\n self._check_bar_alignment(df, kind=\"barh\", subplots=True, width=0.9)\n\n @pytest.mark.slow\n def test_bar_align_single_column(self):\n df = DataFrame(randn(5))\n self._check_bar_alignment(df, kind=\"bar\", stacked=False)\n self._check_bar_alignment(df, kind=\"bar\", stacked=True)\n self._check_bar_alignment(df, kind=\"barh\", stacked=False)\n self._check_bar_alignment(df, kind=\"barh\", stacked=True)\n self._check_bar_alignment(df, kind=\"bar\", subplots=True)\n self._check_bar_alignment(df, kind=\"barh\", subplots=True)\n\n @pytest.mark.slow\n def test_bar_edge(self):\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(5))}, index=range(5))\n\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, align=\"edge\")\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, width=0.9, align=\"edge\")\n self._check_bar_alignment(df, kind=\"barh\", stacked=True, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"barh\", stacked=True, width=0.9, align=\"edge\"\n )\n\n self._check_bar_alignment(df, kind=\"bar\", stacked=False, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"bar\", stacked=False, width=0.9, align=\"edge\"\n )\n self._check_bar_alignment(df, kind=\"barh\", stacked=False, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"barh\", stacked=False, width=0.9, align=\"edge\"\n )\n\n self._check_bar_alignment(df, kind=\"bar\", subplots=True, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"bar\", subplots=True, width=0.9, align=\"edge\"\n )\n self._check_bar_alignment(df, kind=\"barh\", subplots=True, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"barh\", subplots=True, width=0.9, align=\"edge\"\n )\n\n @pytest.mark.slow\n def test_bar_log_no_subplots(self):\n # GH3254, GH3298 matplotlib/matplotlib#1882, #1892\n # regressions in 1.2.1\n expected = np.array([0.1, 1.0, 10.0, 100])\n\n # no subplots\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(1, 6))}, index=range(5))\n ax = df.plot.bar(grid=True, log=True)\n tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)\n\n @pytest.mark.slow\n def test_bar_log_subplots(self):\n expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])\n\n ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(\n log=True, subplots=True\n )\n\n tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)\n tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)\n\n @pytest.mark.slow\n def test_boxplot(self):\n df = self.hist_df\n series = df[\"height\"]\n numeric_cols = df._get_numeric_data().columns\n labels = [pprint_thing(c) for c in numeric_cols]\n\n ax = _check_plot_works(df.plot.box)\n self._check_text_labels(ax.get_xticklabels(), labels)\n tm.assert_numpy_array_equal(\n ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1)\n )\n assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)\n\n axes = series.plot.box(rot=40)\n self._check_ticks_props(axes, xrot=40, yrot=0)\n tm.close()\n\n ax = _check_plot_works(series.plot.box)\n\n positions = np.array([1, 6, 7])\n ax = df.plot.box(positions=positions)\n numeric_cols = df._get_numeric_data().columns\n labels = [pprint_thing(c) for c in numeric_cols]\n self._check_text_labels(ax.get_xticklabels(), labels)\n tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)\n assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)\n\n @pytest.mark.slow\n def test_boxplot_vertical(self):\n df = self.hist_df\n numeric_cols = df._get_numeric_data().columns\n labels = [pprint_thing(c) for c in numeric_cols]\n\n # if horizontal, yticklabels are rotated\n ax = df.plot.box(rot=50, fontsize=8, vert=False)\n self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)\n self._check_text_labels(ax.get_yticklabels(), labels)\n assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot.box, subplots=True, vert=False, logx=True)\n self._check_axes_shape(axes, axes_num=3, layout=(1, 3))\n self._check_ax_scales(axes, xaxis=\"log\")\n for ax, label in zip(axes, labels):\n self._check_text_labels(ax.get_yticklabels(), [label])\n assert len(ax.lines) == self.bp_n_objects\n\n positions = np.array([3, 2, 8])\n ax = df.plot.box(positions=positions, vert=False)\n self._check_text_labels(ax.get_yticklabels(), labels)\n tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)\n assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)\n\n @pytest.mark.slow\n def test_boxplot_return_type(self):\n df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"one\", \"two\", \"three\", \"four\"],\n )\n with pytest.raises(ValueError):\n df.plot.box(return_type=\"NOTATYPE\")\n\n result = df.plot.box(return_type=\"dict\")\n self._check_box_return_type(result, \"dict\")\n\n result = df.plot.box(return_type=\"axes\")\n self._check_box_return_type(result, \"axes\")\n\n result = df.plot.box() # default axes\n self._check_box_return_type(result, \"axes\")\n\n result = df.plot.box(return_type=\"both\")\n self._check_box_return_type(result, \"both\")\n\n @pytest.mark.slow\n def test_boxplot_subplots_return_type(self):\n df = self.hist_df\n\n # normal style: return_type=None\n result = df.plot.box(subplots=True)\n assert isinstance(result, Series)\n self._check_box_return_type(\n result, None, expected_keys=[\"height\", \"weight\", \"category\"]\n )\n\n for t in [\"dict\", \"axes\", \"both\"]:\n returned = df.plot.box(return_type=t, subplots=True)\n self._check_box_return_type(\n returned,\n t,\n expected_keys=[\"height\", \"weight\", \"category\"],\n check_ax_title=False,\n )\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_kde_df(self):\n df = DataFrame(randn(100, 4))\n ax = _check_plot_works(df.plot, kind=\"kde\")\n expected = [pprint_thing(c) for c in df.columns]\n self._check_legend_labels(ax, labels=expected)\n self._check_ticks_props(ax, xrot=0)\n\n ax = df.plot(kind=\"kde\", rot=20, fontsize=5)\n self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot, kind=\"kde\", subplots=True)\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n axes = df.plot(kind=\"kde\", logy=True, subplots=True)\n self._check_ax_scales(axes, yaxis=\"log\")\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_kde_missing_vals(self):\n df = DataFrame(np.random.uniform(size=(100, 4)))\n df.loc[0, 0] = np.nan\n _check_plot_works(df.plot, kind=\"kde\")\n\n @pytest.mark.slow\n def test_hist_df(self):\n from matplotlib.patches import Rectangle\n\n df = DataFrame(randn(100, 4))\n series = df[0]\n\n ax = _check_plot_works(df.plot.hist)\n expected = [pprint_thing(c) for c in df.columns]\n self._check_legend_labels(ax, labels=expected)\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot.hist, subplots=True, logy=True)\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n self._check_ax_scales(axes, yaxis=\"log\")\n\n axes = series.plot.hist(rot=40)\n self._check_ticks_props(axes, xrot=40, yrot=0)\n tm.close()\n\n ax = series.plot.hist(cumulative=True, bins=4, density=True)\n # height of last bin (index 5) must be 1.0\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n tm.assert_almost_equal(rects[-1].get_height(), 1.0)\n tm.close()\n\n ax = series.plot.hist(cumulative=True, bins=4)\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n\n tm.assert_almost_equal(rects[-2].get_height(), 100.0)\n tm.close()\n\n # if horizontal, yticklabels are rotated\n axes = df.plot.hist(rot=50, fontsize=8, orientation=\"horizontal\")\n self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)\n\n def _check_box_coord(\n self,\n patches,\n expected_y=None,\n expected_h=None,\n expected_x=None,\n expected_w=None,\n ):\n result_y = np.array([p.get_y() for p in patches])\n result_height = np.array([p.get_height() for p in patches])\n result_x = np.array([p.get_x() for p in patches])\n result_width = np.array([p.get_width() for p in patches])\n # dtype is depending on above values, no need to check\n\n if expected_y is not None:\n tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False)\n if expected_h is not None:\n tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False)\n if expected_x is not None:\n tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False)\n if expected_w is not None:\n tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False)\n\n @pytest.mark.slow\n def test_hist_df_coord(self):\n normal_df = DataFrame(\n {\n \"A\": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])),\n \"B\": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])),\n \"C\": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])),\n },\n columns=[\"A\", \"B\", \"C\"],\n )\n\n nan_df = DataFrame(\n {\n \"A\": np.repeat(\n np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6])\n ),\n \"B\": np.repeat(\n np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8])\n ),\n \"C\": np.repeat(\n np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10])\n ),\n },\n columns=[\"A\", \"B\", \"C\"],\n )\n\n for df in [normal_df, nan_df]:\n ax = df.plot.hist(bins=5)\n self._check_box_coord(\n ax.patches[:5],\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n ax.patches[5:10],\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n ax.patches[10:],\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([6, 7, 8, 9, 10]),\n )\n\n ax = df.plot.hist(bins=5, stacked=True)\n self._check_box_coord(\n ax.patches[:5],\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n ax.patches[5:10],\n expected_y=np.array([10, 9, 8, 7, 6]),\n expected_h=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n ax.patches[10:],\n expected_y=np.array([18, 17, 16, 15, 14]),\n expected_h=np.array([6, 7, 8, 9, 10]),\n )\n\n axes = df.plot.hist(bins=5, stacked=True, subplots=True)\n self._check_box_coord(\n axes[0].patches,\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n axes[1].patches,\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n axes[2].patches,\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([6, 7, 8, 9, 10]),\n )\n\n # horizontal\n ax = df.plot.hist(bins=5, orientation=\"horizontal\")\n self._check_box_coord(\n ax.patches[:5],\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n ax.patches[5:10],\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n ax.patches[10:],\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([6, 7, 8, 9, 10]),\n )\n\n ax = df.plot.hist(bins=5, stacked=True, orientation=\"horizontal\")\n self._check_box_coord(\n ax.patches[:5],\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n ax.patches[5:10],\n expected_x=np.array([10, 9, 8, 7, 6]),\n expected_w=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n ax.patches[10:],\n expected_x=np.array([18, 17, 16, 15, 14]),\n expected_w=np.array([6, 7, 8, 9, 10]),\n )\n\n axes = df.plot.hist(\n bins=5, stacked=True, subplots=True, orientation=\"horizontal\"\n )\n self._check_box_coord(\n axes[0].patches,\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n axes[1].patches,\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n axes[2].patches,\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([6, 7, 8, 9, 10]),\n )\n\n @pytest.mark.slow\n def test_plot_int_columns(self):\n df = DataFrame(randn(100, 4)).cumsum()\n _check_plot_works(df.plot, legend=True)\n\n @pytest.mark.slow\n def test_df_legend_labels(self):\n kinds = [\"line\", \"bar\", \"barh\", \"kde\", \"area\", \"hist\"]\n df = DataFrame(rand(3, 3), columns=[\"a\", \"b\", \"c\"])\n df2 = DataFrame(rand(3, 3), columns=[\"d\", \"e\", \"f\"])\n df3 = DataFrame(rand(3, 3), columns=[\"g\", \"h\", \"i\"])\n df4 = DataFrame(rand(3, 3), columns=[\"j\", \"k\", \"l\"])\n\n for kind in kinds:\n\n ax = df.plot(kind=kind, legend=True)\n self._check_legend_labels(ax, labels=df.columns)\n\n ax = df2.plot(kind=kind, legend=False, ax=ax)\n self._check_legend_labels(ax, labels=df.columns)\n\n ax = df3.plot(kind=kind, legend=True, ax=ax)\n self._check_legend_labels(ax, labels=df.columns.union(df3.columns))\n\n ax = df4.plot(kind=kind, legend=\"reverse\", ax=ax)\n expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))\n self._check_legend_labels(ax, labels=expected)\n\n # Secondary Y\n ax = df.plot(legend=True, secondary_y=\"b\")\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\"])\n ax = df2.plot(legend=False, ax=ax)\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\"])\n ax = df3.plot(kind=\"bar\", legend=True, secondary_y=\"h\", ax=ax)\n self._check_legend_labels(\n ax, labels=[\"a\", \"b (right)\", \"c\", \"g\", \"h (right)\", \"i\"]\n )\n\n # Time Series\n ind = date_range(\"1/1/2014\", periods=3)\n df = DataFrame(randn(3, 3), columns=[\"a\", \"b\", \"c\"], index=ind)\n df2 = DataFrame(randn(3, 3), columns=[\"d\", \"e\", \"f\"], index=ind)\n df3 = DataFrame(randn(3, 3), columns=[\"g\", \"h\", \"i\"], index=ind)\n ax = df.plot(legend=True, secondary_y=\"b\")\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\"])\n ax = df2.plot(legend=False, ax=ax)\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\"])\n ax = df3.plot(legend=True, ax=ax)\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\", \"g\", \"h\", \"i\"])\n\n # scatter\n ax = df.plot.scatter(x=\"a\", y=\"b\", label=\"data1\")\n self._check_legend_labels(ax, labels=[\"data1\"])\n ax = df2.plot.scatter(x=\"d\", y=\"e\", legend=False, label=\"data2\", ax=ax)\n self._check_legend_labels(ax, labels=[\"data1\"])\n ax = df3.plot.scatter(x=\"g\", y=\"h\", label=\"data3\", ax=ax)\n self._check_legend_labels(ax, labels=[\"data1\", \"data3\"])\n\n # ensure label args pass through and\n # index name does not mutate\n # column names don't mutate\n df5 = df.set_index(\"a\")\n ax = df5.plot(y=\"b\")\n self._check_legend_labels(ax, labels=[\"b\"])\n ax = df5.plot(y=\"b\", label=\"LABEL_b\")\n self._check_legend_labels(ax, labels=[\"LABEL_b\"])\n self._check_text_labels(ax.xaxis.get_label(), \"a\")\n ax = df5.plot(y=\"c\", label=\"LABEL_c\", ax=ax)\n self._check_legend_labels(ax, labels=[\"LABEL_b\", \"LABEL_c\"])\n assert df5.columns.tolist() == [\"b\", \"c\"]\n\n def test_missing_marker_multi_plots_on_same_ax(self):\n # GH 18222\n df = pd.DataFrame(\n data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=[\"x\", \"r\", \"g\", \"b\"]\n )\n fig, ax = self.plt.subplots(nrows=1, ncols=3)\n # Left plot\n df.plot(x=\"x\", y=\"r\", linewidth=0, marker=\"o\", color=\"r\", ax=ax[0])\n df.plot(x=\"x\", y=\"g\", linewidth=1, marker=\"x\", color=\"g\", ax=ax[0])\n df.plot(x=\"x\", y=\"b\", linewidth=1, marker=\"o\", color=\"b\", ax=ax[0])\n self._check_legend_labels(ax[0], labels=[\"r\", \"g\", \"b\"])\n self._check_legend_marker(ax[0], expected_markers=[\"o\", \"x\", \"o\"])\n # Center plot\n df.plot(x=\"x\", y=\"b\", linewidth=1, marker=\"o\", color=\"b\", ax=ax[1])\n df.plot(x=\"x\", y=\"r\", linewidth=0, marker=\"o\", color=\"r\", ax=ax[1])\n df.plot(x=\"x\", y=\"g\", linewidth=1, marker=\"x\", color=\"g\", ax=ax[1])\n self._check_legend_labels(ax[1], labels=[\"b\", \"r\", \"g\"])\n self._check_legend_marker(ax[1], expected_markers=[\"o\", \"o\", \"x\"])\n # Right plot\n df.plot(x=\"x\", y=\"g\", linewidth=1, marker=\"x\", color=\"g\", ax=ax[2])\n df.plot(x=\"x\", y=\"b\", linewidth=1, marker=\"o\", color=\"b\", ax=ax[2])\n df.plot(x=\"x\", y=\"r\", linewidth=0, marker=\"o\", color=\"r\", ax=ax[2])\n self._check_legend_labels(ax[2], labels=[\"g\", \"b\", \"r\"])\n self._check_legend_marker(ax[2], expected_markers=[\"x\", \"o\", \"o\"])\n\n def test_legend_name(self):\n multi = DataFrame(\n randn(4, 4),\n columns=[np.array([\"a\", \"a\", \"b\", \"b\"]), np.array([\"x\", \"y\", \"x\", \"y\"])],\n )\n multi.columns.names = [\"group\", \"individual\"]\n\n ax = multi.plot()\n leg_title = ax.legend_.get_title()\n self._check_text_labels(leg_title, \"group,individual\")\n\n df = DataFrame(randn(5, 5))\n ax = df.plot(legend=True, ax=ax)\n leg_title = ax.legend_.get_title()\n self._check_text_labels(leg_title, \"group,individual\")\n\n df.columns.name = \"new\"\n ax = df.plot(legend=False, ax=ax)\n leg_title = ax.legend_.get_title()\n self._check_text_labels(leg_title, \"group,individual\")\n\n ax = df.plot(legend=True, ax=ax)\n leg_title = ax.legend_.get_title()\n self._check_text_labels(leg_title, \"new\")\n\n @pytest.mark.slow\n def test_no_legend(self):\n kinds = [\"line\", \"bar\", \"barh\", \"kde\", \"area\", \"hist\"]\n df = DataFrame(rand(3, 3), columns=[\"a\", \"b\", \"c\"])\n\n for kind in kinds:\n\n ax = df.plot(kind=kind, legend=False)\n self._check_legend_labels(ax, visible=False)\n\n @pytest.mark.slow\n def test_style_by_column(self):\n import matplotlib.pyplot as plt\n\n fig = plt.gcf()\n\n df = DataFrame(randn(100, 3))\n for markers in [\n {0: \"^\", 1: \"+\", 2: \"o\"},\n {0: \"^\", 1: \"+\"},\n [\"^\", \"+\", \"o\"],\n [\"^\", \"+\"],\n ]:\n fig.clf()\n fig.add_subplot(111)\n ax = df.plot(style=markers)\n for i, l in enumerate(ax.get_lines()[: len(markers)]):\n assert l.get_marker() == markers[i]\n\n @pytest.mark.slow\n def test_line_label_none(self):\n s = Series([1, 2])\n ax = s.plot()\n assert ax.get_legend() is None\n\n ax = s.plot(legend=True)\n assert ax.get_legend().get_texts()[0].get_text() == \"None\"\n\n @pytest.mark.slow\n def test_line_colors(self):\n from matplotlib import cm\n\n custom_colors = \"rgcby\"\n df = DataFrame(randn(5, 5))\n\n ax = df.plot(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n\n tm.close()\n\n ax2 = df.plot(color=custom_colors)\n lines2 = ax2.get_lines()\n\n for l1, l2 in zip(ax.get_lines(), lines2):\n assert l1.get_color() == l2.get_color()\n\n tm.close()\n\n ax = df.plot(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n ax = df.plot(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n ax = df.loc[:, [0]].plot(color=\"DodgerBlue\")\n self._check_colors(ax.lines, linecolors=[\"DodgerBlue\"])\n\n ax = df.plot(color=\"red\")\n self._check_colors(ax.get_lines(), linecolors=[\"red\"] * 5)\n tm.close()\n\n # GH 10299\n custom_colors = [\"#FF0000\", \"#0000FF\", \"#FFFF00\", \"#000000\", \"#FFFFFF\"]\n ax = df.plot(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n tm.close()\n\n with pytest.raises(ValueError):\n # Color contains shorthand hex value results in ValueError\n custom_colors = [\"#F00\", \"#00F\", \"#FF0\", \"#000\", \"#FFF\"]\n # Forced show plot\n _check_plot_works(df.plot, color=custom_colors)\n\n @pytest.mark.slow\n def test_dont_modify_colors(self):\n colors = [\"r\", \"g\", \"b\"]\n pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)\n assert len(colors) == 3\n\n @pytest.mark.slow\n def test_line_colors_and_styles_subplots(self):\n # GH 9894\n from matplotlib import cm\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n\n axes = df.plot(subplots=True)\n for ax, c in zip(axes, list(default_colors)):\n c = [c]\n self._check_colors(ax.get_lines(), linecolors=c)\n tm.close()\n\n # single color char\n axes = df.plot(subplots=True, color=\"k\")\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"k\"])\n tm.close()\n\n # single color str\n axes = df.plot(subplots=True, color=\"green\")\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"green\"])\n tm.close()\n\n custom_colors = \"rgcby\"\n axes = df.plot(color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n axes = df.plot(color=list(custom_colors), subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # GH 10299\n custom_colors = [\"#FF0000\", \"#0000FF\", \"#FFFF00\", \"#000000\", \"#FFFFFF\"]\n axes = df.plot(color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n with pytest.raises(ValueError):\n # Color contains shorthand hex value results in ValueError\n custom_colors = [\"#F00\", \"#00F\", \"#FF0\", \"#000\", \"#FFF\"]\n # Forced show plot\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.plot, color=custom_colors, subplots=True)\n\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n for cmap in [\"jet\", cm.jet]:\n axes = df.plot(colormap=cmap, subplots=True)\n for ax, c in zip(axes, rgba_colors):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n axes = df.loc[:, [0]].plot(color=\"DodgerBlue\", subplots=True)\n self._check_colors(axes[0].lines, linecolors=[\"DodgerBlue\"])\n\n # single character style\n axes = df.plot(style=\"r\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"r\"])\n tm.close()\n\n # list of styles\n styles = list(\"rgcby\")\n axes = df.plot(style=styles, subplots=True)\n for ax, c in zip(axes, styles):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n @pytest.mark.slow\n def test_area_colors(self):\n from matplotlib import cm\n from matplotlib.collections import PolyCollection\n\n custom_colors = \"rgcby\"\n df = DataFrame(rand(5, 5))\n\n ax = df.plot.area(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n self._check_colors(poly, facecolors=custom_colors)\n\n handles, labels = ax.get_legend_handles_labels()\n self._check_colors(handles, facecolors=custom_colors)\n\n for h in handles:\n assert h.get_alpha() is None\n tm.close()\n\n ax = df.plot.area(colormap=\"jet\")\n jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=jet_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n self._check_colors(poly, facecolors=jet_colors)\n\n handles, labels = ax.get_legend_handles_labels()\n self._check_colors(handles, facecolors=jet_colors)\n for h in handles:\n assert h.get_alpha() is None\n tm.close()\n\n # When stacked=False, alpha is set to 0.5\n ax = df.plot.area(colormap=cm.jet, stacked=False)\n self._check_colors(ax.get_lines(), linecolors=jet_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]\n self._check_colors(poly, facecolors=jet_with_alpha)\n\n handles, labels = ax.get_legend_handles_labels()\n linecolors = jet_with_alpha\n self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)\n for h in handles:\n assert h.get_alpha() == 0.5\n\n @pytest.mark.slow\n def test_hist_colors(self):\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n ax = df.plot.hist()\n self._check_colors(ax.patches[::10], facecolors=default_colors[:5])\n tm.close()\n\n custom_colors = \"rgcby\"\n ax = df.plot.hist(color=custom_colors)\n self._check_colors(ax.patches[::10], facecolors=custom_colors)\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n ax = df.plot.hist(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::10], facecolors=rgba_colors)\n tm.close()\n\n # Test colormap functionality\n ax = df.plot.hist(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::10], facecolors=rgba_colors)\n tm.close()\n\n ax = df.loc[:, [0]].plot.hist(color=\"DodgerBlue\")\n self._check_colors([ax.patches[0]], facecolors=[\"DodgerBlue\"])\n\n ax = df.plot(kind=\"hist\", color=\"green\")\n self._check_colors(ax.patches[::10], facecolors=[\"green\"] * 5)\n tm.close()\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_kde_colors(self):\n from matplotlib import cm\n\n custom_colors = \"rgcby\"\n df = DataFrame(rand(5, 5))\n\n ax = df.plot.kde(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n tm.close()\n\n ax = df.plot.kde(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n ax = df.plot.kde(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_kde_colors_and_styles_subplots(self):\n from matplotlib import cm\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n\n axes = df.plot(kind=\"kde\", subplots=True)\n for ax, c in zip(axes, list(default_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # single color char\n axes = df.plot(kind=\"kde\", color=\"k\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"k\"])\n tm.close()\n\n # single color str\n axes = df.plot(kind=\"kde\", color=\"red\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"red\"])\n tm.close()\n\n custom_colors = \"rgcby\"\n axes = df.plot(kind=\"kde\", color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n for cmap in [\"jet\", cm.jet]:\n axes = df.plot(kind=\"kde\", colormap=cmap, subplots=True)\n for ax, c in zip(axes, rgba_colors):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n axes = df.loc[:, [0]].plot(kind=\"kde\", color=\"DodgerBlue\", subplots=True)\n self._check_colors(axes[0].lines, linecolors=[\"DodgerBlue\"])\n\n # single character style\n axes = df.plot(kind=\"kde\", style=\"r\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"r\"])\n tm.close()\n\n # list of styles\n styles = list(\"rgcby\")\n axes = df.plot(kind=\"kde\", style=styles, subplots=True)\n for ax, c in zip(axes, styles):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n @pytest.mark.slow\n def test_boxplot_colors(self):\n def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c=\"k\", fliers_c=None):\n # TODO: outside this func?\n if fliers_c is None:\n fliers_c = \"k\"\n self._check_colors(bp[\"boxes\"], linecolors=[box_c] * len(bp[\"boxes\"]))\n self._check_colors(\n bp[\"whiskers\"], linecolors=[whiskers_c] * len(bp[\"whiskers\"])\n )\n self._check_colors(\n bp[\"medians\"], linecolors=[medians_c] * len(bp[\"medians\"])\n )\n self._check_colors(bp[\"fliers\"], linecolors=[fliers_c] * len(bp[\"fliers\"]))\n self._check_colors(bp[\"caps\"], linecolors=[caps_c] * len(bp[\"caps\"]))\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n bp = df.plot.box(return_type=\"dict\")\n _check_colors(bp, default_colors[0], default_colors[0], default_colors[2])\n tm.close()\n\n dict_colors = dict(\n boxes=\"#572923\", whiskers=\"#982042\", medians=\"#804823\", caps=\"#123456\"\n )\n bp = df.plot.box(color=dict_colors, sym=\"r+\", return_type=\"dict\")\n _check_colors(\n bp,\n dict_colors[\"boxes\"],\n dict_colors[\"whiskers\"],\n dict_colors[\"medians\"],\n dict_colors[\"caps\"],\n \"r\",\n )\n tm.close()\n\n # partial colors\n dict_colors = dict(whiskers=\"c\", medians=\"m\")\n bp = df.plot.box(color=dict_colors, return_type=\"dict\")\n _check_colors(bp, default_colors[0], \"c\", \"m\")\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n bp = df.plot.box(colormap=\"jet\", return_type=\"dict\")\n jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]\n _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])\n tm.close()\n\n # Test colormap functionality\n bp = df.plot.box(colormap=cm.jet, return_type=\"dict\")\n _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])\n tm.close()\n\n # string color is applied to all artists except fliers\n bp = df.plot.box(color=\"DodgerBlue\", return_type=\"dict\")\n _check_colors(bp, \"DodgerBlue\", \"DodgerBlue\", \"DodgerBlue\", \"DodgerBlue\")\n\n # tuple is also applied to all artists except fliers\n bp = df.plot.box(color=(0, 1, 0), sym=\"#123456\", return_type=\"dict\")\n _check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), \"#123456\")\n\n with pytest.raises(ValueError):\n # Color contains invalid key results in ValueError\n df.plot.box(color=dict(boxes=\"red\", xxxx=\"blue\"))\n\n def test_default_color_cycle(self):\n import matplotlib.pyplot as plt\n import cycler\n\n colors = list(\"rgbk\")\n plt.rcParams[\"axes.prop_cycle\"] = cycler.cycler(\"color\", colors)\n\n df = DataFrame(randn(5, 3))\n ax = df.plot()\n\n expected = self._unpack_cycler(plt.rcParams)[:3]\n self._check_colors(ax.get_lines(), linecolors=expected)\n\n def test_unordered_ts(self):\n df = DataFrame(\n np.array([3.0, 2.0, 1.0]),\n index=[date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)],\n columns=[\"test\"],\n )\n ax = df.plot()\n xticks = ax.lines[0].get_xdata()\n assert xticks[0] < xticks[1]\n ydata = ax.lines[0].get_ydata()\n tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))\n\n @td.skip_if_no_scipy\n def test_kind_both_ways(self):\n df = DataFrame({\"x\": [1, 2, 3]})\n for kind in plotting.PlotAccessor._common_kinds:\n\n df.plot(kind=kind)\n getattr(df.plot, kind)()\n for kind in [\"scatter\", \"hexbin\"]:\n df.plot(\"x\", \"x\", kind=kind)\n getattr(df.plot, kind)(\"x\", \"x\")\n\n def test_all_invalid_plot_data(self):\n df = DataFrame(list(\"abcd\"))\n for kind in plotting.PlotAccessor._common_kinds:\n\n msg = \"no numeric data to plot\"\n with pytest.raises(TypeError, match=msg):\n df.plot(kind=kind)\n\n @pytest.mark.slow\n def test_partially_invalid_plot_data(self):\n with tm.RNGContext(42):\n df = DataFrame(randn(10, 2), dtype=object)\n df[np.random.rand(df.shape[0]) > 0.5] = \"a\"\n for kind in plotting.PlotAccessor._common_kinds:\n\n msg = \"no numeric data to plot\"\n with pytest.raises(TypeError, match=msg):\n df.plot(kind=kind)\n\n with tm.RNGContext(42):\n # area plot doesn't support positive/negative mixed data\n kinds = [\"area\"]\n df = DataFrame(rand(10, 2), dtype=object)\n df[np.random.rand(df.shape[0]) > 0.5] = \"a\"\n for kind in kinds:\n with pytest.raises(TypeError):\n df.plot(kind=kind)\n\n def test_invalid_kind(self):\n df = DataFrame(randn(10, 2))\n with pytest.raises(ValueError):\n df.plot(kind=\"aasdf\")\n\n @pytest.mark.parametrize(\n \"x,y,lbl\",\n [\n ([\"B\", \"C\"], \"A\", \"a\"),\n ([\"A\"], [\"B\", \"C\"], [\"b\", \"c\"]),\n (\"A\", [\"B\", \"C\"], \"badlabel\"),\n ],\n )\n def test_invalid_xy_args(self, x, y, lbl):\n # GH 18671, 19699 allows y to be list-like but not x\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n with pytest.raises(ValueError):\n df.plot(x=x, y=y, label=lbl)\n\n @pytest.mark.parametrize(\"x,y\", [(\"A\", \"B\"), ([\"A\"], \"B\")])\n def test_invalid_xy_args_dup_cols(self, x, y):\n # GH 18671, 19699 allows y to be list-like but not x\n df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list(\"AAB\"))\n with pytest.raises(ValueError):\n df.plot(x=x, y=y)\n\n @pytest.mark.parametrize(\n \"x,y,lbl,colors\",\n [\n (\"A\", [\"B\"], [\"b\"], [\"red\"]),\n (\"A\", [\"B\", \"C\"], [\"b\", \"c\"], [\"red\", \"blue\"]),\n (0, [1, 2], [\"bokeh\", \"cython\"], [\"green\", \"yellow\"]),\n ],\n )\n def test_y_listlike(self, x, y, lbl, colors):\n # GH 19699: tests list-like y and verifies lbls & colors\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n _check_plot_works(df.plot, x=\"A\", y=y, label=lbl)\n\n ax = df.plot(x=x, y=y, label=lbl, color=colors)\n assert len(ax.lines) == len(y)\n self._check_colors(ax.get_lines(), linecolors=colors)\n\n @pytest.mark.parametrize(\"x,y,colnames\", [(0, 1, [\"A\", \"B\"]), (1, 0, [0, 1])])\n def test_xy_args_integer(self, x, y, colnames):\n # GH 20056: tests integer args for xy and checks col names\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4]})\n df.columns = colnames\n _check_plot_works(df.plot, x=x, y=y)\n\n @pytest.mark.slow\n def test_hexbin_basic(self):\n df = self.hexbin_df\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", gridsize=10)\n # TODO: need better way to test. This just does existence.\n assert len(ax.collections) == 1\n\n # GH 6951\n axes = df.plot.hexbin(x=\"A\", y=\"B\", subplots=True)\n # hexbin should have 2 axes in the figure, 1 for plotting and another\n # is colorbar\n assert len(axes[0].figure.axes) == 2\n # return value is single axes\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n @pytest.mark.slow\n def test_hexbin_with_c(self):\n df = self.hexbin_df\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", C=\"C\")\n assert len(ax.collections) == 1\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", C=\"C\", reduce_C_function=np.std)\n assert len(ax.collections) == 1\n\n @pytest.mark.slow\n def test_hexbin_cmap(self):\n df = self.hexbin_df\n\n # Default to BuGn\n ax = df.plot.hexbin(x=\"A\", y=\"B\")\n assert ax.collections[0].cmap.name == \"BuGn\"\n\n cm = \"cubehelix\"\n ax = df.plot.hexbin(x=\"A\", y=\"B\", colormap=cm)\n assert ax.collections[0].cmap.name == cm\n\n @pytest.mark.slow\n def test_no_color_bar(self):\n df = self.hexbin_df\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", colorbar=None)\n assert ax.collections[0].colorbar is None\n\n @pytest.mark.slow\n def test_allow_cmap(self):\n df = self.hexbin_df\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", cmap=\"YlGn\")\n assert ax.collections[0].cmap.name == \"YlGn\"\n\n with pytest.raises(TypeError):\n df.plot.hexbin(x=\"A\", y=\"B\", cmap=\"YlGn\", colormap=\"BuGn\")\n\n @pytest.mark.slow\n def test_pie_df(self):\n df = DataFrame(\n np.random.rand(5, 3),\n columns=[\"X\", \"Y\", \"Z\"],\n index=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n with pytest.raises(ValueError):\n df.plot.pie()\n\n ax = _check_plot_works(df.plot.pie, y=\"Y\")\n self._check_text_labels(ax.texts, df.index)\n\n ax = _check_plot_works(df.plot.pie, y=2)\n self._check_text_labels(ax.texts, df.index)\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot.pie, subplots=True)\n assert len(axes) == len(df.columns)\n for ax in axes:\n self._check_text_labels(ax.texts, df.index)\n for ax, ylabel in zip(axes, df.columns):\n assert ax.get_ylabel() == ylabel\n\n labels = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n color_args = [\"r\", \"g\", \"b\", \"c\", \"m\"]\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.plot.pie, subplots=True, labels=labels, colors=color_args\n )\n assert len(axes) == len(df.columns)\n\n for ax in axes:\n self._check_text_labels(ax.texts, labels)\n self._check_colors(ax.patches, facecolors=color_args)\n\n def test_pie_df_nan(self):\n df = DataFrame(np.random.rand(4, 4))\n for i in range(4):\n df.iloc[i, i] = np.nan\n fig, axes = self.plt.subplots(ncols=4)\n df.plot.pie(subplots=True, ax=axes, legend=True)\n\n base_expected = [\"0\", \"1\", \"2\", \"3\"]\n for i, ax in enumerate(axes):\n expected = list(base_expected) # force copy\n expected[i] = \"\"\n result = [x.get_text() for x in ax.texts]\n assert result == expected\n # legend labels\n # NaN's not included in legend with subplots\n # see https://github.com/pandas-dev/pandas/issues/8390\n assert [x.get_text() for x in ax.get_legend().get_texts()] == base_expected[\n :i\n ] + base_expected[i + 1 :]\n\n @pytest.mark.slow\n def test_errorbar_plot(self):\n with warnings.catch_warnings():\n d = {\"x\": np.arange(12), \"y\": np.arange(12, 0, -1)}\n df = DataFrame(d)\n d_err = {\"x\": np.ones(12) * 0.2, \"y\": np.ones(12) * 0.4}\n df_err = DataFrame(d_err)\n\n # check line plots\n ax = _check_plot_works(df.plot, yerr=df_err, logy=True)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n kinds = [\"line\", \"bar\", \"barh\"]\n for kind in kinds:\n ax = _check_plot_works(df.plot, yerr=df_err[\"x\"], kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)\n self._check_has_errorbars(ax, xerr=2, yerr=2)\n ax = _check_plot_works(\n df.plot, yerr=df_err[\"x\"], xerr=df_err[\"x\"], kind=kind\n )\n self._check_has_errorbars(ax, xerr=2, yerr=2)\n ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)\n self._check_has_errorbars(ax, xerr=2, yerr=2)\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n axes = _check_plot_works(\n df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind\n )\n self._check_has_errorbars(axes, xerr=1, yerr=1)\n\n ax = _check_plot_works(\n (df + 1).plot, yerr=df_err, xerr=df_err, kind=\"bar\", log=True\n )\n self._check_has_errorbars(ax, xerr=2, yerr=2)\n\n # yerr is raw error values\n ax = _check_plot_works(df[\"y\"].plot, yerr=np.ones(12) * 0.4)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n # yerr is column name\n for yerr in [\"yerr\", \"誤差\"]:\n s_df = df.copy()\n s_df[yerr] = np.ones(12) * 0.2\n ax = _check_plot_works(s_df.plot, yerr=yerr)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(s_df.plot, y=\"y\", x=\"x\", yerr=yerr)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n\n with pytest.raises(ValueError):\n df.plot(yerr=np.random.randn(11))\n\n df_err = DataFrame({\"x\": [\"zzz\"] * 12, \"y\": [\"zzz\"] * 12})\n with pytest.raises((ValueError, TypeError)):\n df.plot(yerr=df_err)\n\n @pytest.mark.xfail(reason=\"Iterator is consumed\", raises=ValueError)\n @pytest.mark.slow\n def test_errorbar_plot_iterator(self):\n with warnings.catch_warnings():\n d = {\"x\": np.arange(12), \"y\": np.arange(12, 0, -1)}\n df = DataFrame(d)\n\n # yerr is iterator\n ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n @pytest.mark.slow\n def test_errorbar_with_integer_column_names(self):\n # test with integer column names\n df = DataFrame(np.random.randn(10, 2))\n df_err = DataFrame(np.random.randn(10, 2))\n ax = _check_plot_works(df.plot, yerr=df_err)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, y=0, yerr=1)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n\n @pytest.mark.slow\n def test_errorbar_with_partial_columns(self):\n df = DataFrame(np.random.randn(10, 3))\n df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])\n kinds = [\"line\", \"bar\"]\n for kind in kinds:\n ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n ix = date_range(\"1/1/2000\", periods=10, freq=\"M\")\n df.set_index(ix, inplace=True)\n df_err.set_index(ix, inplace=True)\n ax = _check_plot_works(df.plot, yerr=df_err, kind=\"line\")\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n d = {\"x\": np.arange(12), \"y\": np.arange(12, 0, -1)}\n df = DataFrame(d)\n d_err = {\"x\": np.ones(12) * 0.2, \"z\": np.ones(12) * 0.4}\n df_err = DataFrame(d_err)\n for err in [d_err, df_err]:\n ax = _check_plot_works(df.plot, yerr=err)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n\n @pytest.mark.slow\n def test_errorbar_timeseries(self):\n\n with warnings.catch_warnings():\n d = {\"x\": np.arange(12), \"y\": np.arange(12, 0, -1)}\n d_err = {\"x\": np.ones(12) * 0.2, \"y\": np.ones(12) * 0.4}\n\n # check time-series plots\n ix = date_range(\"1/1/2000\", \"1/1/2001\", freq=\"M\")\n tdf = DataFrame(d, index=ix)\n tdf_err = DataFrame(d_err, index=ix)\n\n kinds = [\"line\", \"bar\", \"barh\"]\n for kind in kinds:\n ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(tdf.plot, y=\"y\", yerr=tdf_err[\"x\"], kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n ax = _check_plot_works(tdf.plot, y=\"y\", yerr=\"x\", kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n axes = _check_plot_works(\n tdf.plot, kind=kind, yerr=tdf_err, subplots=True\n )\n self._check_has_errorbars(axes, xerr=0, yerr=1)\n\n def test_errorbar_asymmetrical(self):\n\n np.random.seed(0)\n err = np.random.rand(3, 2, 5)\n\n # each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...\n df = DataFrame(np.arange(15).reshape(3, 5)).T\n\n ax = df.plot(yerr=err, xerr=err / 2)\n\n yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]\n expected_0_0 = err[0, :, 0] * np.array([-1, 1])\n tm.assert_almost_equal(yerr_0_0, expected_0_0)\n\n with pytest.raises(ValueError):\n df.plot(yerr=err.T)\n\n tm.close()\n\n def test_table(self):\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n _check_plot_works(df.plot, table=True)\n _check_plot_works(df.plot, table=df)\n\n ax = df.plot()\n assert len(ax.tables) == 0\n plotting.table(ax, df.T)\n assert len(ax.tables) == 1\n\n def test_errorbar_scatter(self):\n df = DataFrame(np.random.randn(5, 2), index=range(5), columns=[\"x\", \"y\"])\n df_err = DataFrame(\n np.random.randn(5, 2) / 5, index=range(5), columns=[\"x\", \"y\"]\n )\n\n ax = _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\")\n self._check_has_errorbars(ax, xerr=0, yerr=0)\n ax = _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\", xerr=df_err)\n self._check_has_errorbars(ax, xerr=1, yerr=0)\n\n ax = _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\", yerr=df_err)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n ax = _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\", xerr=df_err, yerr=df_err)\n self._check_has_errorbars(ax, xerr=1, yerr=1)\n\n def _check_errorbar_color(containers, expected, has_err=\"has_xerr\"):\n lines = []\n errs = [c.lines for c in ax.containers if getattr(c, has_err, False)][0]\n for el in errs:\n if is_list_like(el):\n lines.extend(el)\n else:\n lines.append(el)\n err_lines = [x for x in lines if x in ax.collections]\n self._check_colors(\n err_lines, linecolors=np.array([expected] * len(err_lines))\n )\n\n # GH 8081\n df = DataFrame(np.random.randn(10, 5), columns=[\"a\", \"b\", \"c\", \"d\", \"e\"])\n ax = df.plot.scatter(x=\"a\", y=\"b\", xerr=\"d\", yerr=\"e\", c=\"red\")\n self._check_has_errorbars(ax, xerr=1, yerr=1)\n _check_errorbar_color(ax.containers, \"red\", has_err=\"has_xerr\")\n _check_errorbar_color(ax.containers, \"red\", has_err=\"has_yerr\")\n\n ax = df.plot.scatter(x=\"a\", y=\"b\", yerr=\"e\", color=\"green\")\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n _check_errorbar_color(ax.containers, \"green\", has_err=\"has_yerr\")\n\n @pytest.mark.slow\n def test_sharex_and_ax(self):\n # https://github.com/pandas-dev/pandas/issues/9737 using gridspec,\n # the axis in fig.get_axis() are sorted differently than pandas\n # expected them, so make sure that only the right ones are removed\n import matplotlib.pyplot as plt\n\n plt.close(\"all\")\n gs, axes = _generate_4_axes_via_gridspec()\n\n df = DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [1, 2, 3, 4, 5, 6],\n \"c\": [1, 2, 3, 4, 5, 6],\n \"d\": [1, 2, 3, 4, 5, 6],\n }\n )\n\n def _check(axes):\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n for ax in [axes[0], axes[2]]:\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n for ax in [axes[1], axes[3]]:\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n\n for ax in axes:\n df.plot(x=\"a\", y=\"b\", title=\"title\", ax=ax, sharex=True)\n gs.tight_layout(plt.gcf())\n _check(axes)\n tm.close()\n\n gs, axes = _generate_4_axes_via_gridspec()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=axes, sharex=True)\n _check(axes)\n tm.close()\n\n gs, axes = _generate_4_axes_via_gridspec()\n # without sharex, no labels should be touched!\n for ax in axes:\n df.plot(x=\"a\", y=\"b\", title=\"title\", ax=ax)\n\n gs.tight_layout(plt.gcf())\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n @pytest.mark.slow\n def test_sharey_and_ax(self):\n # https://github.com/pandas-dev/pandas/issues/9737 using gridspec,\n # the axis in fig.get_axis() are sorted differently than pandas\n # expected them, so make sure that only the right ones are removed\n import matplotlib.pyplot as plt\n\n gs, axes = _generate_4_axes_via_gridspec()\n\n df = DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [1, 2, 3, 4, 5, 6],\n \"c\": [1, 2, 3, 4, 5, 6],\n \"d\": [1, 2, 3, 4, 5, 6],\n }\n )\n\n def _check(axes):\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n for ax in [axes[0], axes[1]]:\n self._check_visible(ax.get_yticklabels(), visible=True)\n for ax in [axes[2], axes[3]]:\n self._check_visible(ax.get_yticklabels(), visible=False)\n\n for ax in axes:\n df.plot(x=\"a\", y=\"b\", title=\"title\", ax=ax, sharey=True)\n gs.tight_layout(plt.gcf())\n _check(axes)\n tm.close()\n\n gs, axes = _generate_4_axes_via_gridspec()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=axes, sharey=True)\n\n gs.tight_layout(plt.gcf())\n _check(axes)\n tm.close()\n\n gs, axes = _generate_4_axes_via_gridspec()\n # without sharex, no labels should be touched!\n for ax in axes:\n df.plot(x=\"a\", y=\"b\", title=\"title\", ax=ax)\n\n gs.tight_layout(plt.gcf())\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n\n @td.skip_if_no_scipy\n def test_memory_leak(self):\n \"\"\" Check that every plot type gets properly collected. \"\"\"\n import weakref\n import gc\n\n results = {}\n for kind in plotting.PlotAccessor._all_kinds:\n\n args = {}\n if kind in [\"hexbin\", \"scatter\", \"pie\"]:\n df = self.hexbin_df\n args = {\"x\": \"A\", \"y\": \"B\"}\n elif kind == \"area\":\n df = self.tdf.abs()\n else:\n df = self.tdf\n\n # Use a weakref so we can see if the object gets collected without\n # also preventing it from being collected\n results[kind] = weakref.proxy(df.plot(kind=kind, **args))\n\n # have matplotlib delete all the figures\n tm.close()\n # force a garbage collection\n gc.collect()\n for key in results:\n # check that every plot was collected\n with pytest.raises(ReferenceError):\n # need to actually access something to get an error\n results[key].lines\n\n @pytest.mark.slow\n def test_df_subplots_patterns_minorticks(self):\n # GH 10657\n import matplotlib.pyplot as plt\n\n df = DataFrame(\n np.random.randn(10, 2),\n index=date_range(\"1/1/2000\", periods=10),\n columns=list(\"AB\"),\n )\n\n # shared subplots\n fig, axes = plt.subplots(2, 1, sharex=True)\n axes = df.plot(subplots=True, ax=axes)\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n # xaxis of 1st ax must be hidden\n self._check_visible(axes[0].get_xticklabels(), visible=False)\n self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)\n self._check_visible(axes[1].get_xticklabels(), visible=True)\n self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)\n tm.close()\n\n fig, axes = plt.subplots(2, 1)\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=axes, sharex=True)\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n # xaxis of 1st ax must be hidden\n self._check_visible(axes[0].get_xticklabels(), visible=False)\n self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)\n self._check_visible(axes[1].get_xticklabels(), visible=True)\n self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # not shared\n fig, axes = plt.subplots(2, 1)\n axes = df.plot(subplots=True, ax=axes)\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n @pytest.mark.slow\n def test_df_gridspec_patterns(self):\n # GH 10819\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n\n ts = Series(np.random.randn(10), index=date_range(\"1/1/2000\", periods=10))\n\n df = DataFrame(np.random.randn(10, 2), index=ts.index, columns=list(\"AB\"))\n\n def _get_vertical_grid():\n gs = gridspec.GridSpec(3, 1)\n fig = plt.figure()\n ax1 = fig.add_subplot(gs[:2, :])\n ax2 = fig.add_subplot(gs[2, :])\n return ax1, ax2\n\n def _get_horizontal_grid():\n gs = gridspec.GridSpec(1, 3)\n fig = plt.figure()\n ax1 = fig.add_subplot(gs[:, :2])\n ax2 = fig.add_subplot(gs[:, 2])\n return ax1, ax2\n\n for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:\n ax1 = ts.plot(ax=ax1)\n assert len(ax1.lines) == 1\n ax2 = df.plot(ax=ax2)\n assert len(ax2.lines) == 2\n for ax in [ax1, ax2]:\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # subplots=True\n for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:\n axes = df.plot(subplots=True, ax=[ax1, ax2])\n assert len(ax1.lines) == 1\n assert len(ax2.lines) == 1\n for ax in axes:\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # vertical / subplots / sharex=True / sharey=True\n ax1, ax2 = _get_vertical_grid()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)\n assert len(axes[0].lines) == 1\n assert len(axes[1].lines) == 1\n for ax in [ax1, ax2]:\n # yaxis are visible because there is only one column\n self._check_visible(ax.get_yticklabels(), visible=True)\n # xaxis of axes0 (top) are hidden\n self._check_visible(axes[0].get_xticklabels(), visible=False)\n self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)\n self._check_visible(axes[1].get_xticklabels(), visible=True)\n self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # horizontal / subplots / sharex=True / sharey=True\n ax1, ax2 = _get_horizontal_grid()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)\n assert len(axes[0].lines) == 1\n assert len(axes[1].lines) == 1\n self._check_visible(axes[0].get_yticklabels(), visible=True)\n # yaxis of axes1 (right) are hidden\n self._check_visible(axes[1].get_yticklabels(), visible=False)\n for ax in [ax1, ax2]:\n # xaxis are visible because there is only one column\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # boxed\n def _get_boxed_grid():\n gs = gridspec.GridSpec(3, 3)\n fig = plt.figure()\n ax1 = fig.add_subplot(gs[:2, :2])\n ax2 = fig.add_subplot(gs[:2, 2])\n ax3 = fig.add_subplot(gs[2, :2])\n ax4 = fig.add_subplot(gs[2, 2])\n return ax1, ax2, ax3, ax4\n\n axes = _get_boxed_grid()\n df = DataFrame(np.random.randn(10, 4), index=ts.index, columns=list(\"ABCD\"))\n axes = df.plot(subplots=True, ax=axes)\n for ax in axes:\n assert len(ax.lines) == 1\n # axis are visible because these are not shared\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # subplots / sharex=True / sharey=True\n axes = _get_boxed_grid()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)\n for ax in axes:\n assert len(ax.lines) == 1\n for ax in [axes[0], axes[2]]: # left column\n self._check_visible(ax.get_yticklabels(), visible=True)\n for ax in [axes[1], axes[3]]: # right column\n self._check_visible(ax.get_yticklabels(), visible=False)\n for ax in [axes[0], axes[1]]: # top row\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n for ax in [axes[2], axes[3]]: # bottom row\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n @pytest.mark.slow\n def test_df_grid_settings(self):\n # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792\n self._check_grid_settings(\n DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4]}),\n plotting.PlotAccessor._dataframe_kinds,\n kws={\"x\": \"a\", \"y\": \"b\"},\n )\n\n def test_invalid_colormap(self):\n df = DataFrame(randn(3, 2), columns=[\"A\", \"B\"])\n\n with pytest.raises(ValueError):\n df.plot(colormap=\"invalid_colormap\")\n\n def test_plain_axes(self):\n\n # supplied ax itself is a SubplotAxes, but figure contains also\n # a plain Axes object (GH11556)\n fig, ax = self.plt.subplots()\n fig.add_axes([0.2, 0.2, 0.2, 0.2])\n Series(rand(10)).plot(ax=ax)\n\n # supplied ax itself is a plain Axes, but because the cmap keyword\n # a new ax is created for the colorbar -> also multiples axes (GH11520)\n df = DataFrame({\"a\": randn(8), \"b\": randn(8)})\n fig = self.plt.figure()\n ax = fig.add_axes((0, 0, 1, 1))\n df.plot(kind=\"scatter\", ax=ax, x=\"a\", y=\"b\", c=\"a\", cmap=\"hsv\")\n\n # other examples\n fig, ax = self.plt.subplots()\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n Series(rand(10)).plot(ax=ax)\n Series(rand(10)).plot(ax=cax)\n\n fig, ax = self.plt.subplots()\n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\n iax = inset_axes(ax, width=\"30%\", height=1.0, loc=3)\n Series(rand(10)).plot(ax=ax)\n Series(rand(10)).plot(ax=iax)\n\n def test_passed_bar_colors(self):\n import matplotlib as mpl\n\n color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]\n colormap = mpl.colors.ListedColormap(color_tuples)\n barplot = pd.DataFrame([[1, 2, 3]]).plot(kind=\"bar\", cmap=colormap)\n assert color_tuples == [c.get_facecolor() for c in barplot.patches]\n\n def test_rcParams_bar_colors(self):\n import matplotlib as mpl\n\n color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]\n with mpl.rc_context(rc={\"axes.prop_cycle\": mpl.cycler(\"color\", color_tuples)}):\n barplot = pd.DataFrame([[1, 2, 3]]).plot(kind=\"bar\")\n assert color_tuples == [c.get_facecolor() for c in barplot.patches]\n\n @pytest.mark.parametrize(\"method\", [\"line\", \"barh\", \"bar\"])\n def test_secondary_axis_font_size(self, method):\n # GH: 12565\n df = (\n pd.DataFrame(np.random.randn(15, 2), columns=list(\"AB\"))\n .assign(C=lambda df: df.B.cumsum())\n .assign(D=lambda df: df.C * 1.1)\n )\n\n fontsize = 20\n sy = [\"C\", \"D\"]\n\n kwargs = dict(secondary_y=sy, fontsize=fontsize, mark_right=True)\n ax = getattr(df.plot, method)(**kwargs)\n self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)\n\n @pytest.mark.slow\n def test_x_string_values_ticks(self):\n # Test if string plot index have a fixed xtick position\n # GH: 7612, GH: 22334\n df = pd.DataFrame(\n {\n \"sales\": [3, 2, 3],\n \"visits\": [20, 42, 28],\n \"day\": [\"Monday\", \"Tuesday\", \"Wednesday\"],\n }\n )\n ax = df.plot.area(x=\"day\")\n ax.set_xlim(-1, 3)\n xticklabels = [t.get_text() for t in ax.get_xticklabels()]\n labels_position = dict(zip(xticklabels, ax.get_xticks()))\n # Testing if the label stayed at the right position\n assert labels_position[\"Monday\"] == 0.0\n assert labels_position[\"Tuesday\"] == 1.0\n assert labels_position[\"Wednesday\"] == 2.0\n\n @pytest.mark.slow\n def test_x_multiindex_values_ticks(self):\n # Test if multiindex plot index have a fixed xtick position\n # GH: 15912\n index = pd.MultiIndex.from_product([[2012, 2013], [1, 2]])\n df = pd.DataFrame(np.random.randn(4, 2), columns=[\"A\", \"B\"], index=index)\n ax = df.plot()\n ax.set_xlim(-1, 4)\n xticklabels = [t.get_text() for t in ax.get_xticklabels()]\n labels_position = dict(zip(xticklabels, ax.get_xticks()))\n # Testing if the label stayed at the right position\n assert labels_position[\"(2012, 1)\"] == 0.0\n assert labels_position[\"(2012, 2)\"] == 1.0\n assert labels_position[\"(2013, 1)\"] == 2.0\n assert labels_position[\"(2013, 2)\"] == 3.0\n\n @pytest.mark.parametrize(\"kind\", [\"line\", \"area\"])\n def test_xlim_plot_line(self, kind):\n # test if xlim is set correctly in plot.line and plot.area\n # GH 27686\n df = pd.DataFrame([2, 4], index=[1, 2])\n ax = df.plot(kind=kind)\n xlims = ax.get_xlim()\n assert xlims[0] < 1\n assert xlims[1] > 2\n\n def test_xlim_plot_line_correctly_in_mixed_plot_type(self):\n # test if xlim is set correctly when ax contains multiple different kinds\n # of plots, GH 27686\n fig, ax = self.plt.subplots()\n\n indexes = [\"k1\", \"k2\", \"k3\", \"k4\"]\n df = pd.DataFrame(\n {\n \"s1\": [1000, 2000, 1500, 2000],\n \"s2\": [900, 1400, 2000, 3000],\n \"s3\": [1500, 1500, 1600, 1200],\n \"secondary_y\": [1, 3, 4, 3],\n },\n index=indexes,\n )\n df[[\"s1\", \"s2\", \"s3\"]].plot.bar(ax=ax, stacked=False)\n df[[\"secondary_y\"]].plot(ax=ax, secondary_y=True)\n\n xlims = ax.get_xlim()\n assert xlims[0] < 0\n assert xlims[1] > 3\n\n # make sure axis labels are plotted correctly as well\n xticklabels = [t.get_text() for t in ax.get_xticklabels()]\n assert xticklabels == indexes\n\n def test_subplots_sharex_false(self):\n # test when sharex is set to False, two plots should have different\n # labels, GH 25160\n df = pd.DataFrame(np.random.rand(10, 2))\n df.iloc[5:, 1] = np.nan\n df.iloc[:5, 0] = np.nan\n\n figs, axs = self.plt.subplots(2, 1)\n df.plot.line(ax=axs, subplots=True, sharex=False)\n\n expected_ax1 = np.arange(4.5, 10, 0.5)\n expected_ax2 = np.arange(-0.5, 5, 0.5)\n\n tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)\n tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)\n\n def test_plot_no_rows(self):\n # GH 27758\n df = pd.DataFrame(columns=[\"foo\"], dtype=int)\n assert df.empty\n ax = df.plot()\n assert len(ax.get_lines()) == 1\n line = ax.get_lines()[0]\n assert len(line.get_xdata()) == 0\n assert len(line.get_ydata()) == 0\n\n def test_plot_no_numeric_data(self):\n df = pd.DataFrame([\"a\", \"b\", \"c\"])\n with pytest.raises(TypeError):\n df.plot()\n\n def test_missing_markers_legend(self):\n # 14958\n df = pd.DataFrame(np.random.randn(8, 3), columns=[\"A\", \"B\", \"C\"])\n ax = df.plot(y=[\"A\"], marker=\"x\", linestyle=\"solid\")\n df.plot(y=[\"B\"], marker=\"o\", linestyle=\"dotted\", ax=ax)\n df.plot(y=[\"C\"], marker=\"<\", linestyle=\"dotted\", ax=ax)\n\n self._check_legend_labels(ax, labels=[\"A\", \"B\", \"C\"])\n self._check_legend_marker(ax, expected_markers=[\"x\", \"o\", \"<\"])\n\n def test_missing_markers_legend_using_style(self):\n # 14563\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6],\n \"B\": [2, 4, 1, 3, 2, 4],\n \"C\": [3, 3, 2, 6, 4, 2],\n \"X\": [1, 2, 3, 4, 5, 6],\n }\n )\n\n fig, ax = self.plt.subplots()\n for kind in \"ABC\":\n df.plot(\"X\", kind, label=kind, ax=ax, style=\".\")\n\n self._check_legend_labels(ax, labels=[\"A\", \"B\", \"C\"])\n self._check_legend_marker(ax, expected_markers=[\".\", \".\", \".\"])\n\n\ndef _generate_4_axes_via_gridspec():\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import matplotlib.gridspec # noqa\n\n gs = mpl.gridspec.GridSpec(2, 2)\n ax_tl = plt.subplot(gs[0, 0])\n ax_ll = plt.subplot(gs[1, 0])\n ax_tr = plt.subplot(gs[0, 1])\n ax_lr = plt.subplot(gs[1, 1])\n\n return gs, [ax_tl, ax_ll, ax_tr, ax_lr]\n",
"from datetime import datetime\n\nimport pytest\nimport pytz\n\nfrom pandas.errors import NullFrequencyError\n\nimport pandas as pd\nfrom pandas import DatetimeIndex, Series, date_range\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndexShift:\n\n # -------------------------------------------------------------\n # DatetimeIndex.shift is used in integer addition\n\n def test_dti_shift_tzaware(self, tz_naive_fixture):\n # GH#9903\n tz = tz_naive_fixture\n idx = pd.DatetimeIndex([], name=\"xxx\", tz=tz)\n tm.assert_index_equal(idx.shift(0, freq=\"H\"), idx)\n tm.assert_index_equal(idx.shift(3, freq=\"H\"), idx)\n\n idx = pd.DatetimeIndex(\n [\"2011-01-01 10:00\", \"2011-01-01 11:00\", \"2011-01-01 12:00\"],\n name=\"xxx\",\n tz=tz,\n )\n tm.assert_index_equal(idx.shift(0, freq=\"H\"), idx)\n exp = pd.DatetimeIndex(\n [\"2011-01-01 13:00\", \"2011-01-01 14:00\", \"2011-01-01 15:00\"],\n name=\"xxx\",\n tz=tz,\n )\n tm.assert_index_equal(idx.shift(3, freq=\"H\"), exp)\n exp = pd.DatetimeIndex(\n [\"2011-01-01 07:00\", \"2011-01-01 08:00\", \"2011-01-01 09:00\"],\n name=\"xxx\",\n tz=tz,\n )\n tm.assert_index_equal(idx.shift(-3, freq=\"H\"), exp)\n\n def test_dti_shift_freqs(self):\n # test shift for DatetimeIndex and non DatetimeIndex\n # GH#8083\n drange = pd.date_range(\"20130101\", periods=5)\n result = drange.shift(1)\n expected = pd.DatetimeIndex(\n [\"2013-01-02\", \"2013-01-03\", \"2013-01-04\", \"2013-01-05\", \"2013-01-06\"],\n freq=\"D\",\n )\n tm.assert_index_equal(result, expected)\n\n result = drange.shift(-1)\n expected = pd.DatetimeIndex(\n [\"2012-12-31\", \"2013-01-01\", \"2013-01-02\", \"2013-01-03\", \"2013-01-04\"],\n freq=\"D\",\n )\n tm.assert_index_equal(result, expected)\n\n result = drange.shift(3, freq=\"2D\")\n expected = pd.DatetimeIndex(\n [\"2013-01-07\", \"2013-01-08\", \"2013-01-09\", \"2013-01-10\", \"2013-01-11\"],\n freq=\"D\",\n )\n tm.assert_index_equal(result, expected)\n\n def test_dti_shift_int(self):\n rng = date_range(\"1/1/2000\", periods=20)\n\n result = rng + 5 * rng.freq\n expected = rng.shift(5)\n tm.assert_index_equal(result, expected)\n\n result = rng - 5 * rng.freq\n expected = rng.shift(-5)\n tm.assert_index_equal(result, expected)\n\n def test_dti_shift_no_freq(self):\n # GH#19147\n dti = pd.DatetimeIndex([\"2011-01-01 10:00\", \"2011-01-01\"], freq=None)\n with pytest.raises(NullFrequencyError):\n dti.shift(2)\n\n @pytest.mark.parametrize(\"tzstr\", [\"US/Eastern\", \"dateutil/US/Eastern\"])\n def test_dti_shift_localized(self, tzstr):\n dr = date_range(\"2011/1/1\", \"2012/1/1\", freq=\"W-FRI\")\n dr_tz = dr.tz_localize(tzstr)\n\n result = dr_tz.shift(1, \"10T\")\n assert result.tz == dr_tz.tz\n\n def test_dti_shift_across_dst(self):\n # GH 8616\n idx = date_range(\"2013-11-03\", tz=\"America/Chicago\", periods=7, freq=\"H\")\n s = Series(index=idx[:-1], dtype=object)\n result = s.shift(freq=\"H\")\n expected = Series(index=idx[1:], dtype=object)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"shift, result_time\",\n [\n [0, \"2014-11-14 00:00:00\"],\n [-1, \"2014-11-13 23:00:00\"],\n [1, \"2014-11-14 01:00:00\"],\n ],\n )\n def test_dti_shift_near_midnight(self, shift, result_time):\n # GH 8616\n dt = datetime(2014, 11, 14, 0)\n dt_est = pytz.timezone(\"EST\").localize(dt)\n s = Series(data=[1], index=[dt_est])\n result = s.shift(shift, freq=\"H\")\n expected = Series(1, index=DatetimeIndex([result_time], tz=\"EST\"))\n tm.assert_series_equal(result, expected)\n",
"from __future__ import division, absolute_import, print_function\n\nfrom numpy.testing import assert_\nimport numpy.distutils.fcompiler\n\nnag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '\n '6.2(Chiyoda) Build 6200', '6.2'),\n ('nagfor', 'NAG Fortran Compiler Release '\n '6.1(Tozai) Build 6136', '6.1'),\n ('nagfor', 'NAG Fortran Compiler Release '\n '6.0(Hibiya) Build 1021', '6.0'),\n ('nagfor', 'NAG Fortran Compiler Release '\n '5.3.2(971)', '5.3.2'),\n ('nag', 'NAGWare Fortran 95 compiler Release 5.1'\n '(347,355-367,375,380-383,389,394,399,401-402,407,'\n '431,435,437,446,459-460,463,472,494,496,503,508,'\n '511,517,529,555,557,565)', '5.1')]\n\nclass TestNagFCompilerVersions(object):\n def test_version_match(self):\n for comp, vs, version in nag_version_strings:\n fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)\n v = fc.version_match(vs)\n assert_(v == version)\n",
"import pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\nfrom pandas.io.excel import ExcelFile\n\nxlrd = pytest.importorskip(\"xlrd\")\nxlwt = pytest.importorskip(\"xlwt\")\n\n\[email protected](autouse=True)\ndef skip_ods_and_xlsb_files(read_ext):\n if read_ext == \".ods\":\n pytest.skip(\"Not valid for xlrd\")\n if read_ext == \".xlsb\":\n pytest.skip(\"Not valid for xlrd\")\n\n\ndef test_read_xlrd_book(read_ext, frame):\n df = frame\n\n engine = \"xlrd\"\n sheet_name = \"SheetA\"\n\n with tm.ensure_clean(read_ext) as pth:\n df.to_excel(pth, sheet_name)\n book = xlrd.open_workbook(pth)\n\n with ExcelFile(book, engine=engine) as xl:\n result = pd.read_excel(xl, sheet_name=sheet_name, index_col=0)\n tm.assert_frame_equal(df, result)\n\n result = pd.read_excel(book, sheet_name=sheet_name, engine=engine, index_col=0)\n tm.assert_frame_equal(df, result)\n\n\n# TODO: test for openpyxl as well\ndef test_excel_table_sheet_by_index(datapath, read_ext):\n path = datapath(\"io\", \"data\", \"excel\", f\"test1{read_ext}\")\n with pd.ExcelFile(path) as excel:\n with pytest.raises(xlrd.XLRDError):\n pd.read_excel(excel, sheet_name=\"asdf\")\n",
"import numpy as np\n\nimport pandas as pd\nfrom pandas import Categorical, MultiIndex, Series\nimport pandas._testing as tm\n\n\nclass TestSeriesCount:\n def test_count(self, datetime_series):\n assert datetime_series.count() == len(datetime_series)\n\n datetime_series[::2] = np.NaN\n\n assert datetime_series.count() == np.isfinite(datetime_series).sum()\n\n mi = MultiIndex.from_arrays([list(\"aabbcc\"), [1, 2, 2, np.nan, 1, 2]])\n ts = Series(np.arange(len(mi)), index=mi)\n\n left = ts.count(level=1)\n right = Series([2, 3, 1], index=[1, 2, np.nan])\n tm.assert_series_equal(left, right)\n\n ts.iloc[[0, 3, 5]] = np.nan\n tm.assert_series_equal(ts.count(level=1), right - 1)\n\n # GH#29478\n with pd.option_context(\"use_inf_as_na\", True):\n assert pd.Series([pd.Timestamp(\"1990/1/1\")]).count() == 1\n\n def test_count_categorical(self):\n\n ser = Series(\n Categorical(\n [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True\n )\n )\n result = ser.count()\n assert result == 2\n",
"\"\"\" test the scalar Timestamp \"\"\"\n\nimport calendar\nfrom datetime import datetime, timedelta\nimport locale\nimport unicodedata\n\nimport dateutil\nfrom dateutil.tz import tzutc\nimport numpy as np\nimport pytest\nimport pytz\nfrom pytz import timezone, utc\n\nfrom pandas._libs.tslibs import conversion\nfrom pandas._libs.tslibs.timezones import dateutil_gettz as gettz, get_timezone\nimport pandas.compat as compat\nfrom pandas.compat.numpy import np_datetime64_compat\nfrom pandas.errors import OutOfBoundsDatetime\nimport pandas.util._test_decorators as td\n\nfrom pandas import NaT, Period, Timedelta, Timestamp\nimport pandas._testing as tm\n\nfrom pandas.tseries import offsets\n\n\nclass TestTimestampProperties:\n def test_properties_business(self):\n ts = Timestamp(\"2017-10-01\", freq=\"B\")\n control = Timestamp(\"2017-10-01\")\n assert ts.dayofweek == 6\n assert not ts.is_month_start # not a weekday\n assert not ts.is_quarter_start # not a weekday\n # Control case: non-business is month/qtr start\n assert control.is_month_start\n assert control.is_quarter_start\n\n ts = Timestamp(\"2017-09-30\", freq=\"B\")\n control = Timestamp(\"2017-09-30\")\n assert ts.dayofweek == 5\n assert not ts.is_month_end # not a weekday\n assert not ts.is_quarter_end # not a weekday\n # Control case: non-business is month/qtr start\n assert control.is_month_end\n assert control.is_quarter_end\n\n def test_fields(self):\n def check(value, equal):\n # that we are int like\n assert isinstance(value, int)\n assert value == equal\n\n # GH 10050\n ts = Timestamp(\"2015-05-10 09:06:03.000100001\")\n check(ts.year, 2015)\n check(ts.month, 5)\n check(ts.day, 10)\n check(ts.hour, 9)\n check(ts.minute, 6)\n check(ts.second, 3)\n msg = \"'Timestamp' object has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n ts.millisecond\n check(ts.microsecond, 100)\n check(ts.nanosecond, 1)\n check(ts.dayofweek, 6)\n check(ts.quarter, 2)\n check(ts.dayofyear, 130)\n check(ts.week, 19)\n check(ts.daysinmonth, 31)\n check(ts.daysinmonth, 31)\n\n # GH 13303\n ts = Timestamp(\"2014-12-31 23:59:00-05:00\", tz=\"US/Eastern\")\n check(ts.year, 2014)\n check(ts.month, 12)\n check(ts.day, 31)\n check(ts.hour, 23)\n check(ts.minute, 59)\n check(ts.second, 0)\n msg = \"'Timestamp' object has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n ts.millisecond\n check(ts.microsecond, 0)\n check(ts.nanosecond, 0)\n check(ts.dayofweek, 2)\n check(ts.quarter, 4)\n check(ts.dayofyear, 365)\n check(ts.week, 1)\n check(ts.daysinmonth, 31)\n\n ts = Timestamp(\"2014-01-01 00:00:00+01:00\")\n starts = [\"is_month_start\", \"is_quarter_start\", \"is_year_start\"]\n for start in starts:\n assert getattr(ts, start)\n ts = Timestamp(\"2014-12-31 23:59:59+01:00\")\n ends = [\"is_month_end\", \"is_year_end\", \"is_quarter_end\"]\n for end in ends:\n assert getattr(ts, end)\n\n # GH 12806\n @pytest.mark.parametrize(\n \"data\",\n [Timestamp(\"2017-08-28 23:00:00\"), Timestamp(\"2017-08-28 23:00:00\", tz=\"EST\")],\n )\n @pytest.mark.parametrize(\n \"time_locale\", [None] if tm.get_locales() is None else [None] + tm.get_locales()\n )\n def test_names(self, data, time_locale):\n # GH 17354\n # Test .day_name(), .month_name\n if time_locale is None:\n expected_day = \"Monday\"\n expected_month = \"August\"\n else:\n with tm.set_locale(time_locale, locale.LC_TIME):\n expected_day = calendar.day_name[0].capitalize()\n expected_month = calendar.month_name[8].capitalize()\n\n result_day = data.day_name(time_locale)\n result_month = data.month_name(time_locale)\n\n # Work around https://github.com/pandas-dev/pandas/issues/22342\n # different normalizations\n expected_day = unicodedata.normalize(\"NFD\", expected_day)\n expected_month = unicodedata.normalize(\"NFD\", expected_month)\n\n result_day = unicodedata.normalize(\"NFD\", result_day)\n result_month = unicodedata.normalize(\"NFD\", result_month)\n\n assert result_day == expected_day\n assert result_month == expected_month\n\n # Test NaT\n nan_ts = Timestamp(NaT)\n assert np.isnan(nan_ts.day_name(time_locale))\n assert np.isnan(nan_ts.month_name(time_locale))\n\n def test_is_leap_year(self, tz_naive_fixture):\n tz = tz_naive_fixture\n # GH 13727\n dt = Timestamp(\"2000-01-01 00:00:00\", tz=tz)\n assert dt.is_leap_year\n assert isinstance(dt.is_leap_year, bool)\n\n dt = Timestamp(\"1999-01-01 00:00:00\", tz=tz)\n assert not dt.is_leap_year\n\n dt = Timestamp(\"2004-01-01 00:00:00\", tz=tz)\n assert dt.is_leap_year\n\n dt = Timestamp(\"2100-01-01 00:00:00\", tz=tz)\n assert not dt.is_leap_year\n\n def test_woy_boundary(self):\n # make sure weeks at year boundaries are correct\n d = datetime(2013, 12, 31)\n result = Timestamp(d).week\n expected = 1 # ISO standard\n assert result == expected\n\n d = datetime(2008, 12, 28)\n result = Timestamp(d).week\n expected = 52 # ISO standard\n assert result == expected\n\n d = datetime(2009, 12, 31)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n d = datetime(2010, 1, 1)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n d = datetime(2010, 1, 3)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n result = np.array(\n [\n Timestamp(datetime(*args)).week\n for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)]\n ]\n )\n assert (result == [52, 52, 53, 53]).all()\n\n def test_resolution(self):\n # GH#21336, GH#21365\n dt = Timestamp(\"2100-01-01 00:00:00\")\n assert dt.resolution == Timedelta(nanoseconds=1)\n\n # Check that the attribute is available on the class, mirroring\n # the stdlib datetime behavior\n assert Timestamp.resolution == Timedelta(nanoseconds=1)\n\n\nclass TestTimestampConstructors:\n def test_constructor(self):\n base_str = \"2014-07-01 09:00\"\n base_dt = datetime(2014, 7, 1, 9)\n base_expected = 1_404_205_200_000_000_000\n\n # confirm base representation is correct\n assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected\n\n tests = [\n (base_str, base_dt, base_expected),\n (\n \"2014-07-01 10:00\",\n datetime(2014, 7, 1, 10),\n base_expected + 3600 * 1_000_000_000,\n ),\n (\n \"2014-07-01 09:00:00.000008000\",\n datetime(2014, 7, 1, 9, 0, 0, 8),\n base_expected + 8000,\n ),\n (\n \"2014-07-01 09:00:00.000000005\",\n Timestamp(\"2014-07-01 09:00:00.000000005\"),\n base_expected + 5,\n ),\n ]\n\n timezones = [\n (None, 0),\n (\"UTC\", 0),\n (pytz.utc, 0),\n (\"Asia/Tokyo\", 9),\n (\"US/Eastern\", -4),\n (\"dateutil/US/Pacific\", -7),\n (pytz.FixedOffset(-180), -3),\n (dateutil.tz.tzoffset(None, 18000), 5),\n ]\n\n for date_str, date, expected in tests:\n for result in [Timestamp(date_str), Timestamp(date)]:\n # only with timestring\n assert result.value == expected\n assert conversion.pydt_to_i8(result) == expected\n\n # re-creation shouldn't affect to internal value\n result = Timestamp(result)\n assert result.value == expected\n assert conversion.pydt_to_i8(result) == expected\n\n # with timezone\n for tz, offset in timezones:\n for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:\n expected_tz = expected - offset * 3600 * 1_000_000_000\n assert result.value == expected_tz\n assert conversion.pydt_to_i8(result) == expected_tz\n\n # should preserve tz\n result = Timestamp(result)\n assert result.value == expected_tz\n assert conversion.pydt_to_i8(result) == expected_tz\n\n # should convert to UTC\n if tz is not None:\n result = Timestamp(result).tz_convert(\"UTC\")\n else:\n result = Timestamp(result, tz=\"UTC\")\n expected_utc = expected - offset * 3600 * 1_000_000_000\n assert result.value == expected_utc\n assert conversion.pydt_to_i8(result) == expected_utc\n\n def test_constructor_with_stringoffset(self):\n # GH 7833\n base_str = \"2014-07-01 11:00:00+02:00\"\n base_dt = datetime(2014, 7, 1, 9)\n base_expected = 1_404_205_200_000_000_000\n\n # confirm base representation is correct\n assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected\n\n tests = [\n (base_str, base_expected),\n (\"2014-07-01 12:00:00+02:00\", base_expected + 3600 * 1_000_000_000),\n (\"2014-07-01 11:00:00.000008000+02:00\", base_expected + 8000),\n (\"2014-07-01 11:00:00.000000005+02:00\", base_expected + 5),\n ]\n\n timezones = [\n (None, 0),\n (\"UTC\", 0),\n (pytz.utc, 0),\n (\"Asia/Tokyo\", 9),\n (\"US/Eastern\", -4),\n (\"dateutil/US/Pacific\", -7),\n (pytz.FixedOffset(-180), -3),\n (dateutil.tz.tzoffset(None, 18000), 5),\n ]\n\n for date_str, expected in tests:\n for result in [Timestamp(date_str)]:\n # only with timestring\n assert result.value == expected\n assert conversion.pydt_to_i8(result) == expected\n\n # re-creation shouldn't affect to internal value\n result = Timestamp(result)\n assert result.value == expected\n assert conversion.pydt_to_i8(result) == expected\n\n # with timezone\n for tz, offset in timezones:\n result = Timestamp(date_str, tz=tz)\n expected_tz = expected\n assert result.value == expected_tz\n assert conversion.pydt_to_i8(result) == expected_tz\n\n # should preserve tz\n result = Timestamp(result)\n assert result.value == expected_tz\n assert conversion.pydt_to_i8(result) == expected_tz\n\n # should convert to UTC\n result = Timestamp(result).tz_convert(\"UTC\")\n expected_utc = expected\n assert result.value == expected_utc\n assert conversion.pydt_to_i8(result) == expected_utc\n\n # This should be 2013-11-01 05:00 in UTC\n # converted to Chicago tz\n result = Timestamp(\"2013-11-01 00:00:00-0500\", tz=\"America/Chicago\")\n assert result.value == Timestamp(\"2013-11-01 05:00\").value\n expected = \"Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')\" # noqa\n assert repr(result) == expected\n assert result == eval(repr(result))\n\n # This should be 2013-11-01 05:00 in UTC\n # converted to Tokyo tz (+09:00)\n result = Timestamp(\"2013-11-01 00:00:00-0500\", tz=\"Asia/Tokyo\")\n assert result.value == Timestamp(\"2013-11-01 05:00\").value\n expected = \"Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')\"\n assert repr(result) == expected\n assert result == eval(repr(result))\n\n # GH11708\n # This should be 2015-11-18 10:00 in UTC\n # converted to Asia/Katmandu\n result = Timestamp(\"2015-11-18 15:45:00+05:45\", tz=\"Asia/Katmandu\")\n assert result.value == Timestamp(\"2015-11-18 10:00\").value\n expected = \"Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')\"\n assert repr(result) == expected\n assert result == eval(repr(result))\n\n # This should be 2015-11-18 10:00 in UTC\n # converted to Asia/Kolkata\n result = Timestamp(\"2015-11-18 15:30:00+05:30\", tz=\"Asia/Kolkata\")\n assert result.value == Timestamp(\"2015-11-18 10:00\").value\n expected = \"Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')\"\n assert repr(result) == expected\n assert result == eval(repr(result))\n\n def test_constructor_invalid(self):\n with pytest.raises(TypeError, match=\"Cannot convert input\"):\n Timestamp(slice(2))\n with pytest.raises(ValueError, match=\"Cannot convert Period\"):\n Timestamp(Period(\"1000-01-01\"))\n\n def test_constructor_invalid_tz(self):\n # GH#17690\n with pytest.raises(TypeError, match=\"must be a datetime.tzinfo\"):\n Timestamp(\"2017-10-22\", tzinfo=\"US/Eastern\")\n\n with pytest.raises(ValueError, match=\"at most one of\"):\n Timestamp(\"2017-10-22\", tzinfo=utc, tz=\"UTC\")\n\n with pytest.raises(ValueError, match=\"Invalid frequency:\"):\n # GH#5168\n # case where user tries to pass tz as an arg, not kwarg, gets\n # interpreted as a `freq`\n Timestamp(\"2012-01-01\", \"US/Pacific\")\n\n def test_constructor_strptime(self):\n # GH25016\n # Test support for Timestamp.strptime\n fmt = \"%Y%m%d-%H%M%S-%f%z\"\n ts = \"20190129-235348-000001+0000\"\n with pytest.raises(NotImplementedError):\n Timestamp.strptime(ts, fmt)\n\n def test_constructor_tz_or_tzinfo(self):\n # GH#17943, GH#17690, GH#5168\n stamps = [\n Timestamp(year=2017, month=10, day=22, tz=\"UTC\"),\n Timestamp(year=2017, month=10, day=22, tzinfo=utc),\n Timestamp(year=2017, month=10, day=22, tz=utc),\n Timestamp(datetime(2017, 10, 22), tzinfo=utc),\n Timestamp(datetime(2017, 10, 22), tz=\"UTC\"),\n Timestamp(datetime(2017, 10, 22), tz=utc),\n ]\n assert all(ts == stamps[0] for ts in stamps)\n\n def test_constructor_positional(self):\n # see gh-10758\n with pytest.raises(TypeError):\n Timestamp(2000, 1)\n with pytest.raises(ValueError):\n Timestamp(2000, 0, 1)\n with pytest.raises(ValueError):\n Timestamp(2000, 13, 1)\n with pytest.raises(ValueError):\n Timestamp(2000, 1, 0)\n with pytest.raises(ValueError):\n Timestamp(2000, 1, 32)\n\n # see gh-11630\n assert repr(Timestamp(2015, 11, 12)) == repr(Timestamp(\"20151112\"))\n assert repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) == repr(\n Timestamp(\"2015-11-12 01:02:03.999999\")\n )\n\n def test_constructor_keyword(self):\n # GH 10758\n with pytest.raises(TypeError):\n Timestamp(year=2000, month=1)\n with pytest.raises(ValueError):\n Timestamp(year=2000, month=0, day=1)\n with pytest.raises(ValueError):\n Timestamp(year=2000, month=13, day=1)\n with pytest.raises(ValueError):\n Timestamp(year=2000, month=1, day=0)\n with pytest.raises(ValueError):\n Timestamp(year=2000, month=1, day=32)\n\n assert repr(Timestamp(year=2015, month=11, day=12)) == repr(\n Timestamp(\"20151112\")\n )\n\n assert repr(\n Timestamp(\n year=2015,\n month=11,\n day=12,\n hour=1,\n minute=2,\n second=3,\n microsecond=999999,\n )\n ) == repr(Timestamp(\"2015-11-12 01:02:03.999999\"))\n\n def test_constructor_fromordinal(self):\n base = datetime(2000, 1, 1)\n\n ts = Timestamp.fromordinal(base.toordinal(), freq=\"D\")\n assert base == ts\n assert ts.freq == \"D\"\n assert base.toordinal() == ts.toordinal()\n\n ts = Timestamp.fromordinal(base.toordinal(), tz=\"US/Eastern\")\n assert Timestamp(\"2000-01-01\", tz=\"US/Eastern\") == ts\n assert base.toordinal() == ts.toordinal()\n\n # GH#3042\n dt = datetime(2011, 4, 16, 0, 0)\n ts = Timestamp.fromordinal(dt.toordinal())\n assert ts.to_pydatetime() == dt\n\n # with a tzinfo\n stamp = Timestamp(\"2011-4-16\", tz=\"US/Eastern\")\n dt_tz = stamp.to_pydatetime()\n ts = Timestamp.fromordinal(dt_tz.toordinal(), tz=\"US/Eastern\")\n assert ts.to_pydatetime() == dt_tz\n\n @pytest.mark.parametrize(\n \"result\",\n [\n Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),\n Timestamp(\n year=2000,\n month=1,\n day=2,\n hour=3,\n minute=4,\n second=5,\n microsecond=6,\n nanosecond=1,\n ),\n Timestamp(\n year=2000,\n month=1,\n day=2,\n hour=3,\n minute=4,\n second=5,\n microsecond=6,\n nanosecond=1,\n tz=\"UTC\",\n ),\n Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),\n Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC),\n ],\n )\n def test_constructor_nanosecond(self, result):\n # GH 18898\n expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)\n expected = expected + Timedelta(nanoseconds=1)\n assert result == expected\n\n @pytest.mark.parametrize(\"z\", [\"Z0\", \"Z00\"])\n def test_constructor_invalid_Z0_isostring(self, z):\n # GH 8910\n with pytest.raises(ValueError):\n Timestamp(\"2014-11-02 01:00{}\".format(z))\n\n @pytest.mark.parametrize(\n \"arg\",\n [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"microsecond\",\n \"nanosecond\",\n ],\n )\n def test_invalid_date_kwarg_with_string_input(self, arg):\n kwarg = {arg: 1}\n with pytest.raises(ValueError):\n Timestamp(\"2010-10-10 12:59:59.999999999\", **kwarg)\n\n def test_out_of_bounds_integer_value(self):\n # GH#26651 check that we raise OutOfBoundsDatetime, not OverflowError\n with pytest.raises(OutOfBoundsDatetime):\n Timestamp(Timestamp.max.value * 2)\n with pytest.raises(OutOfBoundsDatetime):\n Timestamp(Timestamp.min.value * 2)\n\n def test_out_of_bounds_value(self):\n one_us = np.timedelta64(1).astype(\"timedelta64[us]\")\n\n # By definition we can't go out of bounds in [ns], so we\n # convert the datetime64s to [us] so we can go out of bounds\n min_ts_us = np.datetime64(Timestamp.min).astype(\"M8[us]\")\n max_ts_us = np.datetime64(Timestamp.max).astype(\"M8[us]\")\n\n # No error for the min/max datetimes\n Timestamp(min_ts_us)\n Timestamp(max_ts_us)\n\n # One us less than the minimum is an error\n with pytest.raises(ValueError):\n Timestamp(min_ts_us - one_us)\n\n # One us more than the maximum is an error\n with pytest.raises(ValueError):\n Timestamp(max_ts_us + one_us)\n\n def test_out_of_bounds_string(self):\n with pytest.raises(ValueError):\n Timestamp(\"1676-01-01\")\n with pytest.raises(ValueError):\n Timestamp(\"2263-01-01\")\n\n def test_barely_out_of_bounds(self):\n # GH#19529\n # GH#19382 close enough to bounds that dropping nanos would result\n # in an in-bounds datetime\n with pytest.raises(OutOfBoundsDatetime):\n Timestamp(\"2262-04-11 23:47:16.854775808\")\n\n def test_bounds_with_different_units(self):\n out_of_bounds_dates = (\"1677-09-21\", \"2262-04-12\")\n\n time_units = (\"D\", \"h\", \"m\", \"s\", \"ms\", \"us\")\n\n for date_string in out_of_bounds_dates:\n for unit in time_units:\n dt64 = np.datetime64(date_string, unit)\n with pytest.raises(ValueError):\n Timestamp(dt64)\n\n in_bounds_dates = (\"1677-09-23\", \"2262-04-11\")\n\n for date_string in in_bounds_dates:\n for unit in time_units:\n dt64 = np.datetime64(date_string, unit)\n Timestamp(dt64)\n\n def test_min_valid(self):\n # Ensure that Timestamp.min is a valid Timestamp\n Timestamp(Timestamp.min)\n\n def test_max_valid(self):\n # Ensure that Timestamp.max is a valid Timestamp\n Timestamp(Timestamp.max)\n\n def test_now(self):\n # GH#9000\n ts_from_string = Timestamp(\"now\")\n ts_from_method = Timestamp.now()\n ts_datetime = datetime.now()\n\n ts_from_string_tz = Timestamp(\"now\", tz=\"US/Eastern\")\n ts_from_method_tz = Timestamp.now(tz=\"US/Eastern\")\n\n # Check that the delta between the times is less than 1s (arbitrarily\n # small)\n delta = Timedelta(seconds=1)\n assert abs(ts_from_method - ts_from_string) < delta\n assert abs(ts_datetime - ts_from_method) < delta\n assert abs(ts_from_method_tz - ts_from_string_tz) < delta\n assert (\n abs(\n ts_from_string_tz.tz_localize(None)\n - ts_from_method_tz.tz_localize(None)\n )\n < delta\n )\n\n def test_today(self):\n ts_from_string = Timestamp(\"today\")\n ts_from_method = Timestamp.today()\n ts_datetime = datetime.today()\n\n ts_from_string_tz = Timestamp(\"today\", tz=\"US/Eastern\")\n ts_from_method_tz = Timestamp.today(tz=\"US/Eastern\")\n\n # Check that the delta between the times is less than 1s (arbitrarily\n # small)\n delta = Timedelta(seconds=1)\n assert abs(ts_from_method - ts_from_string) < delta\n assert abs(ts_datetime - ts_from_method) < delta\n assert abs(ts_from_method_tz - ts_from_string_tz) < delta\n assert (\n abs(\n ts_from_string_tz.tz_localize(None)\n - ts_from_method_tz.tz_localize(None)\n )\n < delta\n )\n\n @pytest.mark.parametrize(\"tz\", [None, pytz.timezone(\"US/Pacific\")])\n def test_disallow_setting_tz(self, tz):\n # GH 3746\n ts = Timestamp(\"2010\")\n with pytest.raises(AttributeError):\n ts.tz = tz\n\n @pytest.mark.parametrize(\"offset\", [\"+0300\", \"+0200\"])\n def test_construct_timestamp_near_dst(self, offset):\n # GH 20854\n expected = Timestamp(\n \"2016-10-30 03:00:00{}\".format(offset), tz=\"Europe/Helsinki\"\n )\n result = Timestamp(expected).tz_convert(\"Europe/Helsinki\")\n assert result == expected\n\n @pytest.mark.parametrize(\n \"arg\", [\"2013/01/01 00:00:00+09:00\", \"2013-01-01 00:00:00+09:00\"]\n )\n def test_construct_with_different_string_format(self, arg):\n # GH 12064\n result = Timestamp(arg)\n expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540))\n assert result == expected\n\n def test_construct_timestamp_preserve_original_frequency(self):\n # GH 22311\n result = Timestamp(Timestamp(\"2010-08-08\", freq=\"D\")).freq\n expected = offsets.Day()\n assert result == expected\n\n def test_constructor_invalid_frequency(self):\n # GH 22311\n with pytest.raises(ValueError, match=\"Invalid frequency:\"):\n Timestamp(\"2012-01-01\", freq=[])\n\n @pytest.mark.parametrize(\"box\", [datetime, Timestamp])\n def test_raise_tz_and_tzinfo_in_datetime_input(self, box):\n # GH 23579\n kwargs = {\"year\": 2018, \"month\": 1, \"day\": 1, \"tzinfo\": utc}\n with pytest.raises(ValueError, match=\"Cannot pass a datetime or Timestamp\"):\n Timestamp(box(**kwargs), tz=\"US/Pacific\")\n with pytest.raises(ValueError, match=\"Cannot pass a datetime or Timestamp\"):\n Timestamp(box(**kwargs), tzinfo=pytz.timezone(\"US/Pacific\"))\n\n def test_dont_convert_dateutil_utc_to_pytz_utc(self):\n result = Timestamp(datetime(2018, 1, 1), tz=tzutc())\n expected = Timestamp(datetime(2018, 1, 1)).tz_localize(tzutc())\n assert result == expected\n\n def test_constructor_subclassed_datetime(self):\n # GH 25851\n # ensure that subclassed datetime works for\n # Timestamp creation\n class SubDatetime(datetime):\n pass\n\n data = SubDatetime(2000, 1, 1)\n result = Timestamp(data)\n expected = Timestamp(2000, 1, 1)\n assert result == expected\n\n @pytest.mark.skipif(\n not compat.PY38,\n reason=\"datetime.fromisocalendar was added in Python version 3.8\",\n )\n def test_constructor_fromisocalendar(self):\n # GH 30395\n expected_timestamp = Timestamp(\"2000-01-03 00:00:00\")\n expected_stdlib = datetime.fromisocalendar(2000, 1, 1)\n result = Timestamp.fromisocalendar(2000, 1, 1)\n assert result == expected_timestamp\n assert result == expected_stdlib\n assert isinstance(result, Timestamp)\n\n\nclass TestTimestamp:\n def test_tz(self):\n tstr = \"2014-02-01 09:00\"\n ts = Timestamp(tstr)\n local = ts.tz_localize(\"Asia/Tokyo\")\n assert local.hour == 9\n assert local == Timestamp(tstr, tz=\"Asia/Tokyo\")\n conv = local.tz_convert(\"US/Eastern\")\n assert conv == Timestamp(\"2014-01-31 19:00\", tz=\"US/Eastern\")\n assert conv.hour == 19\n\n # preserves nanosecond\n ts = Timestamp(tstr) + offsets.Nano(5)\n local = ts.tz_localize(\"Asia/Tokyo\")\n assert local.hour == 9\n assert local.nanosecond == 5\n conv = local.tz_convert(\"US/Eastern\")\n assert conv.nanosecond == 5\n assert conv.hour == 19\n\n def test_utc_z_designator(self):\n assert get_timezone(Timestamp(\"2014-11-02 01:00Z\").tzinfo) is utc\n\n def test_asm8(self):\n np.random.seed(7_960_929)\n ns = [Timestamp.min.value, Timestamp.max.value, 1000]\n\n for n in ns:\n assert (\n Timestamp(n).asm8.view(\"i8\") == np.datetime64(n, \"ns\").view(\"i8\") == n\n )\n\n assert Timestamp(\"nat\").asm8.view(\"i8\") == np.datetime64(\"nat\", \"ns\").view(\"i8\")\n\n def test_class_ops_pytz(self):\n def compare(x, y):\n assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0\n\n compare(Timestamp.now(), datetime.now())\n compare(Timestamp.now(\"UTC\"), datetime.now(timezone(\"UTC\")))\n compare(Timestamp.utcnow(), datetime.utcnow())\n compare(Timestamp.today(), datetime.today())\n current_time = calendar.timegm(datetime.now().utctimetuple())\n compare(\n Timestamp.utcfromtimestamp(current_time),\n datetime.utcfromtimestamp(current_time),\n )\n compare(\n Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)\n )\n\n date_component = datetime.utcnow()\n time_component = (date_component + timedelta(minutes=10)).time()\n compare(\n Timestamp.combine(date_component, time_component),\n datetime.combine(date_component, time_component),\n )\n\n def test_class_ops_dateutil(self):\n def compare(x, y):\n assert (\n int(\n np.round(Timestamp(x).value / 1e9)\n - np.round(Timestamp(y).value / 1e9)\n )\n == 0\n )\n\n compare(Timestamp.now(), datetime.now())\n compare(Timestamp.now(\"UTC\"), datetime.now(tzutc()))\n compare(Timestamp.utcnow(), datetime.utcnow())\n compare(Timestamp.today(), datetime.today())\n current_time = calendar.timegm(datetime.now().utctimetuple())\n compare(\n Timestamp.utcfromtimestamp(current_time),\n datetime.utcfromtimestamp(current_time),\n )\n compare(\n Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)\n )\n\n date_component = datetime.utcnow()\n time_component = (date_component + timedelta(minutes=10)).time()\n compare(\n Timestamp.combine(date_component, time_component),\n datetime.combine(date_component, time_component),\n )\n\n def test_basics_nanos(self):\n val = np.int64(946_684_800_000_000_000).view(\"M8[ns]\")\n stamp = Timestamp(val.view(\"i8\") + 500)\n assert stamp.year == 2000\n assert stamp.month == 1\n assert stamp.microsecond == 0\n assert stamp.nanosecond == 500\n\n # GH 14415\n val = np.iinfo(np.int64).min + 80_000_000_000_000\n stamp = Timestamp(val)\n assert stamp.year == 1677\n assert stamp.month == 9\n assert stamp.day == 21\n assert stamp.microsecond == 145224\n assert stamp.nanosecond == 192\n\n @pytest.mark.parametrize(\n \"value, check_kwargs\",\n [\n [946688461000000000, {}],\n [946688461000000000 / 1000, dict(unit=\"us\")],\n [946688461000000000 / 1_000_000, dict(unit=\"ms\")],\n [946688461000000000 / 1_000_000_000, dict(unit=\"s\")],\n [10957, dict(unit=\"D\", h=0)],\n [\n (946688461000000000 + 500000) / 1000000000,\n dict(unit=\"s\", us=499, ns=964),\n ],\n [(946688461000000000 + 500000000) / 1000000000, dict(unit=\"s\", us=500000)],\n [(946688461000000000 + 500000) / 1000000, dict(unit=\"ms\", us=500)],\n [(946688461000000000 + 500000) / 1000, dict(unit=\"us\", us=500)],\n [(946688461000000000 + 500000000) / 1000000, dict(unit=\"ms\", us=500000)],\n [946688461000000000 / 1000.0 + 5, dict(unit=\"us\", us=5)],\n [946688461000000000 / 1000.0 + 5000, dict(unit=\"us\", us=5000)],\n [946688461000000000 / 1000000.0 + 0.5, dict(unit=\"ms\", us=500)],\n [946688461000000000 / 1000000.0 + 0.005, dict(unit=\"ms\", us=5, ns=5)],\n [946688461000000000 / 1000000000.0 + 0.5, dict(unit=\"s\", us=500000)],\n [10957 + 0.5, dict(unit=\"D\", h=12)],\n ],\n )\n def test_unit(self, value, check_kwargs):\n def check(value, unit=None, h=1, s=1, us=0, ns=0):\n stamp = Timestamp(value, unit=unit)\n assert stamp.year == 2000\n assert stamp.month == 1\n assert stamp.day == 1\n assert stamp.hour == h\n if unit != \"D\":\n assert stamp.minute == 1\n assert stamp.second == s\n assert stamp.microsecond == us\n else:\n assert stamp.minute == 0\n assert stamp.second == 0\n assert stamp.microsecond == 0\n assert stamp.nanosecond == ns\n\n check(value, **check_kwargs)\n\n def test_roundtrip(self):\n\n # test value to string and back conversions\n # further test accessors\n base = Timestamp(\"20140101 00:00:00\")\n\n result = Timestamp(base.value + Timedelta(\"5ms\").value)\n assert result == Timestamp(f\"{base}.005000\")\n assert result.microsecond == 5000\n\n result = Timestamp(base.value + Timedelta(\"5us\").value)\n assert result == Timestamp(f\"{base}.000005\")\n assert result.microsecond == 5\n\n result = Timestamp(base.value + Timedelta(\"5ns\").value)\n assert result == Timestamp(f\"{base}.000000005\")\n assert result.nanosecond == 5\n assert result.microsecond == 0\n\n result = Timestamp(base.value + Timedelta(\"6ms 5us\").value)\n assert result == Timestamp(f\"{base}.006005\")\n assert result.microsecond == 5 + 6 * 1000\n\n result = Timestamp(base.value + Timedelta(\"200ms 5us\").value)\n assert result == Timestamp(f\"{base}.200005\")\n assert result.microsecond == 5 + 200 * 1000\n\n def test_hash_equivalent(self):\n d = {datetime(2011, 1, 1): 5}\n stamp = Timestamp(datetime(2011, 1, 1))\n assert d[stamp] == 5\n\n def test_tz_conversion_freq(self, tz_naive_fixture):\n # GH25241\n t1 = Timestamp(\"2019-01-01 10:00\", freq=\"H\")\n assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq\n t2 = Timestamp(\"2019-01-02 12:00\", tz=\"UTC\", freq=\"T\")\n assert t2.tz_convert(tz=\"UTC\").freq == t2.freq\n\n\nclass TestTimestampNsOperations:\n def test_nanosecond_string_parsing(self):\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\")\n # GH 7878\n expected_repr = \"2013-05-01 07:15:45.123456789\"\n expected_value = 1_367_392_545_123_456_789\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789+09:00\", tz=\"Asia/Tokyo\")\n assert ts.value == expected_value - 9 * 3600 * 1_000_000_000\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\", tz=\"UTC\")\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\", tz=\"US/Eastern\")\n assert ts.value == expected_value + 4 * 3600 * 1_000_000_000\n assert expected_repr in repr(ts)\n\n # GH 10041\n ts = Timestamp(\"20130501T071545.123456789\")\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n def test_nanosecond_timestamp(self):\n # GH 7610\n expected = 1_293_840_000_000_000_005\n t = Timestamp(\"2011-01-01\") + offsets.Nano(5)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n t = Timestamp(t)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n t = Timestamp(np_datetime64_compat(\"2011-01-01 00:00:00.000000005Z\"))\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n expected = 1_293_840_000_000_000_010\n t = t + offsets.Nano(5)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n t = Timestamp(t)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n t = Timestamp(np_datetime64_compat(\"2011-01-01 00:00:00.000000010Z\"))\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n\nclass TestTimestampToJulianDate:\n def test_compare_1700(self):\n r = Timestamp(\"1700-06-23\").to_julian_date()\n assert r == 2_342_145.5\n\n def test_compare_2000(self):\n r = Timestamp(\"2000-04-12\").to_julian_date()\n assert r == 2_451_646.5\n\n def test_compare_2100(self):\n r = Timestamp(\"2100-08-12\").to_julian_date()\n assert r == 2_488_292.5\n\n def test_compare_hour01(self):\n r = Timestamp(\"2000-08-12T01:00:00\").to_julian_date()\n assert r == 2_451_768.5416666666666666\n\n def test_compare_hour13(self):\n r = Timestamp(\"2000-08-12T13:00:00\").to_julian_date()\n assert r == 2_451_769.0416666666666666\n\n\nclass TestTimestampConversion:\n def test_conversion(self):\n # GH#9255\n ts = Timestamp(\"2000-01-01\")\n\n result = ts.to_pydatetime()\n expected = datetime(2000, 1, 1)\n assert result == expected\n assert type(result) == type(expected)\n\n result = ts.to_datetime64()\n expected = np.datetime64(ts.value, \"ns\")\n assert result == expected\n assert type(result) == type(expected)\n assert result.dtype == expected.dtype\n\n def test_to_pydatetime_nonzero_nano(self):\n ts = Timestamp(\"2011-01-01 9:00:00.123456789\")\n\n # Warn the user of data loss (nanoseconds).\n with tm.assert_produces_warning(UserWarning, check_stacklevel=False):\n expected = datetime(2011, 1, 1, 9, 0, 0, 123456)\n result = ts.to_pydatetime()\n assert result == expected\n\n def test_timestamp_to_datetime(self):\n stamp = Timestamp(\"20090415\", tz=\"US/Eastern\", freq=\"D\")\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_timestamp_to_datetime_dateutil(self):\n stamp = Timestamp(\"20090415\", tz=\"dateutil/US/Eastern\", freq=\"D\")\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_timestamp_to_datetime_explicit_pytz(self):\n stamp = Timestamp(\"20090415\", tz=pytz.timezone(\"US/Eastern\"), freq=\"D\")\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n @td.skip_if_windows_python_3\n def test_timestamp_to_datetime_explicit_dateutil(self):\n stamp = Timestamp(\"20090415\", tz=gettz(\"US/Eastern\"), freq=\"D\")\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_to_datetime_bijective(self):\n # Ensure that converting to datetime and back only loses precision\n # by going from nanoseconds to microseconds.\n exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning\n with tm.assert_produces_warning(exp_warning, check_stacklevel=False):\n assert (\n Timestamp(Timestamp.max.to_pydatetime()).value / 1000\n == Timestamp.max.value / 1000\n )\n\n exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning\n with tm.assert_produces_warning(exp_warning, check_stacklevel=False):\n assert (\n Timestamp(Timestamp.min.to_pydatetime()).value / 1000\n == Timestamp.min.value / 1000\n )\n\n def test_to_period_tz_warning(self):\n # GH#21333 make sure a warning is issued when timezone\n # info is lost\n ts = Timestamp(\"2009-04-15 16:17:18\", tz=\"US/Eastern\")\n with tm.assert_produces_warning(UserWarning):\n # warning that timezone info will be lost\n ts.to_period(\"D\")\n\n def test_to_numpy_alias(self):\n # GH 24653: alias .to_numpy() for scalars\n ts = Timestamp(datetime.now())\n assert ts.to_datetime64() == ts.to_numpy()\n\n\nclass SubDatetime(datetime):\n pass\n\n\[email protected](\n \"lh,rh\",\n [\n (SubDatetime(2000, 1, 1), Timedelta(hours=1)),\n (Timedelta(hours=1), SubDatetime(2000, 1, 1)),\n ],\n)\ndef test_dt_subclass_add_timedelta(lh, rh):\n # GH#25851\n # ensure that subclassed datetime works for\n # Timedelta operations\n result = lh + rh\n expected = SubDatetime(2000, 1, 1, 1)\n assert result == expected\n",
"\"\"\"\n========================\nRandom Number Generation\n========================\n\nUse ``default_rng()`` to create a `Generator` and call its methods.\n\n=============== =========================================================\nGenerator\n--------------- ---------------------------------------------------------\nGenerator Class implementing all of the random number distributions\ndefault_rng Default constructor for ``Generator``\n=============== =========================================================\n\n============================================= ===\nBitGenerator Streams that work with Generator\n--------------------------------------------- ---\nMT19937\nPCG64\nPhilox\nSFC64\n============================================= ===\n\n============================================= ===\nGetting entropy to initialize a BitGenerator\n--------------------------------------------- ---\nSeedSequence\n============================================= ===\n\n\nLegacy\n------\n\nFor backwards compatibility with previous versions of numpy before 1.17, the\nvarious aliases to the global `RandomState` methods are left alone and do not\nuse the new `Generator` API.\n\n==================== =========================================================\nUtility functions\n-------------------- ---------------------------------------------------------\nrandom Uniformly distributed floats over ``[0, 1)``\nbytes Uniformly distributed random bytes.\npermutation Randomly permute a sequence / generate a random sequence.\nshuffle Randomly permute a sequence in place.\nchoice Random sample from 1-D array.\n==================== =========================================================\n\n==================== =========================================================\nCompatibility\nfunctions - removed\nin the new API\n-------------------- ---------------------------------------------------------\nrand Uniformly distributed values.\nrandn Normally distributed values.\nranf Uniformly distributed floating point numbers.\nrandom_integers Uniformly distributed integers in a given range.\n (deprecated, use ``integers(..., closed=True)`` instead)\nrandom_sample Alias for `random_sample`\nrandint Uniformly distributed integers in a given range\nseed Seed the legacy random number generator.\n==================== =========================================================\n\n==================== =========================================================\nUnivariate\ndistributions\n-------------------- ---------------------------------------------------------\nbeta Beta distribution over ``[0, 1]``.\nbinomial Binomial distribution.\nchisquare :math:`\\\\chi^2` distribution.\nexponential Exponential distribution.\nf F (Fisher-Snedecor) distribution.\ngamma Gamma distribution.\ngeometric Geometric distribution.\ngumbel Gumbel distribution.\nhypergeometric Hypergeometric distribution.\nlaplace Laplace distribution.\nlogistic Logistic distribution.\nlognormal Log-normal distribution.\nlogseries Logarithmic series distribution.\nnegative_binomial Negative binomial distribution.\nnoncentral_chisquare Non-central chi-square distribution.\nnoncentral_f Non-central F distribution.\nnormal Normal / Gaussian distribution.\npareto Pareto distribution.\npoisson Poisson distribution.\npower Power distribution.\nrayleigh Rayleigh distribution.\ntriangular Triangular distribution.\nuniform Uniform distribution.\nvonmises Von Mises circular distribution.\nwald Wald (inverse Gaussian) distribution.\nweibull Weibull distribution.\nzipf Zipf's distribution over ranked data.\n==================== =========================================================\n\n==================== ==========================================================\nMultivariate\ndistributions\n-------------------- ----------------------------------------------------------\ndirichlet Multivariate generalization of Beta distribution.\nmultinomial Multivariate generalization of the binomial distribution.\nmultivariate_normal Multivariate generalization of the normal distribution.\n==================== ==========================================================\n\n==================== =========================================================\nStandard\ndistributions\n-------------------- ---------------------------------------------------------\nstandard_cauchy Standard Cauchy-Lorentz distribution.\nstandard_exponential Standard exponential distribution.\nstandard_gamma Standard Gamma distribution.\nstandard_normal Standard normal distribution.\nstandard_t Standard Student's t-distribution.\n==================== =========================================================\n\n==================== =========================================================\nInternal functions\n-------------------- ---------------------------------------------------------\nget_state Get tuple representing internal state of generator.\nset_state Set state of generator.\n==================== =========================================================\n\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n__all__ = [\n 'beta',\n 'binomial',\n 'bytes',\n 'chisquare',\n 'choice',\n 'dirichlet',\n 'exponential',\n 'f',\n 'gamma',\n 'geometric',\n 'get_state',\n 'gumbel',\n 'hypergeometric',\n 'laplace',\n 'logistic',\n 'lognormal',\n 'logseries',\n 'multinomial',\n 'multivariate_normal',\n 'negative_binomial',\n 'noncentral_chisquare',\n 'noncentral_f',\n 'normal',\n 'pareto',\n 'permutation',\n 'poisson',\n 'power',\n 'rand',\n 'randint',\n 'randn',\n 'random',\n 'random_integers',\n 'random_sample',\n 'ranf',\n 'rayleigh',\n 'sample',\n 'seed',\n 'set_state',\n 'shuffle',\n 'standard_cauchy',\n 'standard_exponential',\n 'standard_gamma',\n 'standard_normal',\n 'standard_t',\n 'triangular',\n 'uniform',\n 'vonmises',\n 'wald',\n 'weibull',\n 'zipf',\n]\n\n# add these for module-freeze analysis (like PyInstaller)\nfrom . import _pickle\nfrom . import _common\nfrom . import _bounded_integers\n\nfrom ._generator import Generator, default_rng\nfrom ._bit_generator import SeedSequence, BitGenerator\nfrom ._mt19937 import MT19937\nfrom ._pcg64 import PCG64\nfrom ._philox import Philox\nfrom ._sfc64 import SFC64\nfrom .mtrand import *\n\n__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',\n 'Philox', 'PCG64', 'SFC64', 'default_rng', 'BitGenerator']\n\n\ndef __RandomState_ctor():\n \"\"\"Return a RandomState instance.\n\n This function exists solely to assist (un)pickling.\n\n Note that the state of the RandomState returned here is irrelevant, as this\n function's entire purpose is to return a newly allocated RandomState whose\n state pickle can set. Consequently the RandomState returned by this function\n is a freshly allocated copy with a seed=0.\n\n See https://github.com/numpy/numpy/issues/4763 for a detailed discussion\n\n \"\"\"\n return RandomState(seed=0)\n\n\nfrom numpy._pytesttester import PytestTester\ntest = PytestTester(__name__)\ndel PytestTester\n",
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import timedelta_range, to_timedelta\nimport pandas._testing as tm\n\nfrom pandas.tseries.offsets import Day, Second\n\n\nclass TestTimedeltas:\n def test_timedelta_range(self):\n\n expected = to_timedelta(np.arange(5), unit=\"D\")\n result = timedelta_range(\"0 days\", periods=5, freq=\"D\")\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(11), unit=\"D\")\n result = timedelta_range(\"0 days\", \"10 days\", freq=\"D\")\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(5), unit=\"D\") + Second(2) + Day()\n result = timedelta_range(\"1 days, 00:00:02\", \"5 days, 00:00:02\", freq=\"D\")\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta([1, 3, 5, 7, 9], unit=\"D\") + Second(2)\n result = timedelta_range(\"1 days, 00:00:02\", periods=5, freq=\"2D\")\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(50), unit=\"T\") * 30\n result = timedelta_range(\"0 days\", freq=\"30T\", periods=50)\n tm.assert_index_equal(result, expected)\n\n # GH 11776\n arr = np.arange(10).reshape(2, 5)\n df = pd.DataFrame(np.arange(10).reshape(2, 5))\n for arg in (arr, df):\n with pytest.raises(TypeError, match=\"1-d array\"):\n to_timedelta(arg)\n for errors in [\"ignore\", \"raise\", \"coerce\"]:\n with pytest.raises(TypeError, match=\"1-d array\"):\n to_timedelta(arg, errors=errors)\n\n # issue10583\n df = pd.DataFrame(np.random.normal(size=(10, 4)))\n df.index = pd.timedelta_range(start=\"0s\", periods=10, freq=\"s\")\n expected = df.loc[pd.Timedelta(\"0s\") :, :]\n result = df.loc[\"0s\":, :]\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize(\n \"periods, freq\", [(3, \"2D\"), (5, \"D\"), (6, \"19H12T\"), (7, \"16H\"), (9, \"12H\")]\n )\n def test_linspace_behavior(self, periods, freq):\n # GH 20976\n result = timedelta_range(start=\"0 days\", end=\"4 days\", periods=periods)\n expected = timedelta_range(start=\"0 days\", end=\"4 days\", freq=freq)\n tm.assert_index_equal(result, expected)\n\n def test_errors(self):\n # not enough params\n msg = (\n \"Of the four parameters: start, end, periods, and freq, \"\n \"exactly three must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n timedelta_range(start=\"0 days\")\n\n with pytest.raises(ValueError, match=msg):\n timedelta_range(end=\"5 days\")\n\n with pytest.raises(ValueError, match=msg):\n timedelta_range(periods=2)\n\n with pytest.raises(ValueError, match=msg):\n timedelta_range()\n\n # too many params\n with pytest.raises(ValueError, match=msg):\n timedelta_range(start=\"0 days\", end=\"5 days\", periods=10, freq=\"H\")\n",
"\"\"\"\nPrinting tools.\n\"\"\"\n\nimport sys\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n)\n\nfrom pandas._config import get_option\n\nfrom pandas.core.dtypes.inference import is_sequence\n\nEscapeChars = Union[Mapping[str, str], Iterable[str]]\n_KT = TypeVar(\"_KT\")\n_VT = TypeVar(\"_VT\")\n\n\ndef adjoin(space: int, *lists: List[str], **kwargs) -> str:\n \"\"\"\n Glues together two sets of strings using the amount of space requested.\n The idea is to prettify.\n\n ----------\n space : int\n number of spaces for padding\n lists : str\n list of str which being joined\n strlen : callable\n function used to calculate the length of each str. Needed for unicode\n handling.\n justfunc : callable\n function used to justify str. Needed for unicode handling.\n \"\"\"\n strlen = kwargs.pop(\"strlen\", len)\n justfunc = kwargs.pop(\"justfunc\", justify)\n\n out_lines = []\n newLists = []\n lengths = [max(map(strlen, x)) + space for x in lists[:-1]]\n # not the last one\n lengths.append(max(map(len, lists[-1])))\n maxLen = max(map(len, lists))\n for i, lst in enumerate(lists):\n nl = justfunc(lst, lengths[i], mode=\"left\")\n nl.extend([\" \" * lengths[i]] * (maxLen - len(lst)))\n newLists.append(nl)\n toJoin = zip(*newLists)\n for lines in toJoin:\n out_lines.append(\"\".join(lines))\n return \"\\n\".join(out_lines)\n\n\ndef justify(texts: Iterable[str], max_len: int, mode: str = \"right\") -> List[str]:\n \"\"\"\n Perform ljust, center, rjust against string or list-like\n \"\"\"\n if mode == \"left\":\n return [x.ljust(max_len) for x in texts]\n elif mode == \"center\":\n return [x.center(max_len) for x in texts]\n else:\n return [x.rjust(max_len) for x in texts]\n\n\n# Unicode consolidation\n# ---------------------\n#\n# pprinting utility functions for generating Unicode text or\n# bytes(3.x)/str(2.x) representations of objects.\n# Try to use these as much as possible rather than rolling your own.\n#\n# When to use\n# -----------\n#\n# 1) If you're writing code internal to pandas (no I/O directly involved),\n# use pprint_thing().\n#\n# It will always return unicode text which can handled by other\n# parts of the package without breakage.\n#\n# 2) if you need to write something out to file, use\n# pprint_thing_encoded(encoding).\n#\n# If no encoding is specified, it defaults to utf-8. Since encoding pure\n# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're\n# working with straight ascii.\n\n\ndef _pprint_seq(\n seq: Sequence, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds\n) -> str:\n \"\"\"\n internal. pprinter for iterables. you should probably use pprint_thing()\n rather than calling this directly.\n\n bounds length of printed sequence, depending on options\n \"\"\"\n if isinstance(seq, set):\n fmt = \"{{{body}}}\"\n else:\n fmt = \"[{body}]\" if hasattr(seq, \"__setitem__\") else \"({body})\"\n\n if max_seq_items is False:\n nitems = len(seq)\n else:\n nitems = max_seq_items or get_option(\"max_seq_items\") or len(seq)\n\n s = iter(seq)\n # handle sets, no slicing\n r = [\n pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)\n for i in range(min(nitems, len(seq)))\n ]\n body = \", \".join(r)\n\n if nitems < len(seq):\n body += \", ...\"\n elif isinstance(seq, tuple) and len(seq) == 1:\n body += \",\"\n\n return fmt.format(body=body)\n\n\ndef _pprint_dict(\n seq: Mapping, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds\n) -> str:\n \"\"\"\n internal. pprinter for iterables. you should probably use pprint_thing()\n rather than calling this directly.\n \"\"\"\n fmt = \"{{{things}}}\"\n pairs = []\n\n pfmt = \"{key}: {val}\"\n\n if max_seq_items is False:\n nitems = len(seq)\n else:\n nitems = max_seq_items or get_option(\"max_seq_items\") or len(seq)\n\n for k, v in list(seq.items())[:nitems]:\n pairs.append(\n pfmt.format(\n key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),\n val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),\n )\n )\n\n if nitems < len(seq):\n return fmt.format(things=\", \".join(pairs) + \", ...\")\n else:\n return fmt.format(things=\", \".join(pairs))\n\n\ndef pprint_thing(\n thing: Any,\n _nest_lvl: int = 0,\n escape_chars: Optional[EscapeChars] = None,\n default_escapes: bool = False,\n quote_strings: bool = False,\n max_seq_items: Optional[int] = None,\n) -> str:\n \"\"\"\n This function is the sanctioned way of converting objects\n to a string representation and properly handles nested sequences.\n\n Parameters\n ----------\n thing : anything to be formatted\n _nest_lvl : internal use only. pprint_thing() is mutually-recursive\n with pprint_sequence, this argument is used to keep track of the\n current nesting level, and limit it.\n escape_chars : list or dict, optional\n Characters to escape. If a dict is passed the values are the\n replacements\n default_escapes : bool, default False\n Whether the input escape characters replaces or adds to the defaults\n max_seq_items : int or None, default None\n Pass through to other pretty printers to limit sequence printing\n\n Returns\n -------\n str\n \"\"\"\n\n def as_escaped_string(\n thing: Any, escape_chars: Optional[EscapeChars] = escape_chars\n ) -> str:\n translate = {\"\\t\": r\"\\t\", \"\\n\": r\"\\n\", \"\\r\": r\"\\r\"}\n if isinstance(escape_chars, dict):\n if default_escapes:\n translate.update(escape_chars)\n else:\n translate = escape_chars\n escape_chars = list(escape_chars.keys())\n else:\n escape_chars = escape_chars or tuple()\n\n result = str(thing)\n for c in escape_chars:\n result = result.replace(c, translate[c])\n return result\n\n if hasattr(thing, \"__next__\"):\n return str(thing)\n elif isinstance(thing, dict) and _nest_lvl < get_option(\n \"display.pprint_nest_depth\"\n ):\n result = _pprint_dict(\n thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items\n )\n elif is_sequence(thing) and _nest_lvl < get_option(\"display.pprint_nest_depth\"):\n result = _pprint_seq(\n thing,\n _nest_lvl,\n escape_chars=escape_chars,\n quote_strings=quote_strings,\n max_seq_items=max_seq_items,\n )\n elif isinstance(thing, str) and quote_strings:\n result = f\"'{as_escaped_string(thing)}'\"\n else:\n result = as_escaped_string(thing)\n\n return result\n\n\ndef pprint_thing_encoded(\n object, encoding: str = \"utf-8\", errors: str = \"replace\"\n) -> bytes:\n value = pprint_thing(object) # get unicode representation of object\n return value.encode(encoding, errors)\n\n\ndef _enable_data_resource_formatter(enable: bool) -> None:\n if \"IPython\" not in sys.modules:\n # definitely not in IPython\n return\n from IPython import get_ipython\n\n ip = get_ipython()\n if ip is None:\n # still not in IPython\n return\n\n formatters = ip.display_formatter.formatters\n mimetype = \"application/vnd.dataresource+json\"\n\n if enable:\n if mimetype not in formatters:\n # define tableschema formatter\n from IPython.core.formatters import BaseFormatter\n\n class TableSchemaFormatter(BaseFormatter):\n print_method = \"_repr_data_resource_\"\n _return_type = (dict,)\n\n # register it:\n formatters[mimetype] = TableSchemaFormatter()\n # enable it if it's been disabled:\n formatters[mimetype].enabled = True\n else:\n # unregister tableschema mime-type\n if mimetype in formatters:\n formatters[mimetype].enabled = False\n\n\ndef default_pprint(thing: Any, max_seq_items: Optional[int] = None) -> str:\n return pprint_thing(\n thing,\n escape_chars=(\"\\t\", \"\\r\", \"\\n\"),\n quote_strings=True,\n max_seq_items=max_seq_items,\n )\n\n\ndef format_object_summary(\n obj,\n formatter: Callable,\n is_justify: bool = True,\n name: Optional[str] = None,\n indent_for_name: bool = True,\n line_break_each_value: bool = False,\n) -> str:\n \"\"\"\n Return the formatted obj as a unicode string\n\n Parameters\n ----------\n obj : object\n must be iterable and support __getitem__\n formatter : callable\n string formatter for an element\n is_justify : boolean\n should justify the display\n name : name, optional\n defaults to the class name of the obj\n indent_for_name : bool, default True\n Whether subsequent lines should be be indented to\n align with the name.\n line_break_each_value : bool, default False\n If True, inserts a line break for each value of ``obj``.\n If False, only break lines when the a line of values gets wider\n than the display width.\n\n .. versionadded:: 0.25.0\n\n Returns\n -------\n summary string\n \"\"\"\n from pandas.io.formats.console import get_console_size\n from pandas.io.formats.format import _get_adjustment\n\n display_width, _ = get_console_size()\n if display_width is None:\n display_width = get_option(\"display.width\") or 80\n if name is None:\n name = type(obj).__name__\n\n if indent_for_name:\n name_len = len(name)\n space1 = f'\\n{(\" \" * (name_len + 1))}'\n space2 = f'\\n{(\" \" * (name_len + 2))}'\n else:\n space1 = \"\\n\"\n space2 = \"\\n \" # space for the opening '['\n\n n = len(obj)\n if line_break_each_value:\n # If we want to vertically align on each value of obj, we need to\n # separate values by a line break and indent the values\n sep = \",\\n \" + \" \" * len(name)\n else:\n sep = \",\"\n max_seq_items = get_option(\"display.max_seq_items\") or n\n\n # are we a truncated display\n is_truncated = n > max_seq_items\n\n # adj can optionally handle unicode eastern asian width\n adj = _get_adjustment()\n\n def _extend_line(\n s: str, line: str, value: str, display_width: int, next_line_prefix: str\n ) -> Tuple[str, str]:\n\n if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width:\n s += line.rstrip()\n line = next_line_prefix\n line += value\n return s, line\n\n def best_len(values: List[str]) -> int:\n if values:\n return max(adj.len(x) for x in values)\n else:\n return 0\n\n close = \", \"\n\n if n == 0:\n summary = f\"[]{close}\"\n elif n == 1 and not line_break_each_value:\n first = formatter(obj[0])\n summary = f\"[{first}]{close}\"\n elif n == 2 and not line_break_each_value:\n first = formatter(obj[0])\n last = formatter(obj[-1])\n summary = f\"[{first}, {last}]{close}\"\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in obj[:n]]\n tail = [formatter(x) for x in obj[-n:]]\n else:\n head = []\n tail = [formatter(x) for x in obj]\n\n # adjust all values to max length if needed\n if is_justify:\n if line_break_each_value:\n # Justify each string in the values of head and tail, so the\n # strings will right align when head and tail are stacked\n # vertically.\n head, tail = _justify(head, tail)\n elif is_truncated or not (\n len(\", \".join(head)) < display_width\n and len(\", \".join(tail)) < display_width\n ):\n # Each string in head and tail should align with each other\n max_length = max(best_len(head), best_len(tail))\n head = [x.rjust(max_length) for x in head]\n tail = [x.rjust(max_length) for x in tail]\n # If we are not truncated and we are only a single\n # line, then don't justify\n\n if line_break_each_value:\n # Now head and tail are of type List[Tuple[str]]. Below we\n # convert them into List[str], so there will be one string per\n # value. Also truncate items horizontally if wider than\n # max_space\n max_space = display_width - len(space2)\n value = tail[0]\n for max_items in reversed(range(1, len(value) + 1)):\n pprinted_seq = _pprint_seq(value, max_seq_items=max_items)\n if len(pprinted_seq) < max_space:\n break\n head = [_pprint_seq(x, max_seq_items=max_items) for x in head]\n tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail]\n\n summary = \"\"\n line = space2\n\n for max_items in range(len(head)):\n word = head[max_items] + sep + \" \"\n summary, line = _extend_line(summary, line, word, display_width, space2)\n\n if is_truncated:\n # remove trailing space of last line\n summary += line.rstrip() + space2 + \"...\"\n line = space2\n\n for max_items in range(len(tail) - 1):\n word = tail[max_items] + sep + \" \"\n summary, line = _extend_line(summary, line, word, display_width, space2)\n\n # last value: no sep added + 1 space of width used for trailing ','\n summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2)\n summary += line\n\n # right now close is either '' or ', '\n # Now we want to include the ']', but not the maybe space.\n close = \"]\" + close.rstrip(\" \")\n summary += close\n\n if len(summary) > (display_width) or line_break_each_value:\n summary += space1\n else: # one row\n summary += \" \"\n\n # remove initial space\n summary = \"[\" + summary[len(space2) :]\n\n return summary\n\n\ndef _justify(\n head: List[Sequence[str]], tail: List[Sequence[str]]\n) -> Tuple[List[Tuple[str, ...]], List[Tuple[str, ...]]]:\n \"\"\"\n Justify items in head and tail, so they are right-aligned when stacked.\n\n Parameters\n ----------\n head : list-like of list-likes of strings\n tail : list-like of list-likes of strings\n\n Returns\n -------\n tuple of list of tuples of strings\n Same as head and tail, but items are right aligned when stacked\n vertically.\n\n Examples\n --------\n >>> _justify([['a', 'b']], [['abc', 'abcd']])\n ([(' a', ' b')], [('abc', 'abcd')])\n \"\"\"\n combined = head + tail\n\n # For each position for the sequences in ``combined``,\n # find the length of the largest string.\n max_length = [0] * len(combined[0])\n for inner_seq in combined:\n length = [len(item) for item in inner_seq]\n max_length = [max(x, y) for x, y in zip(max_length, length)]\n\n # justify each item in each list-like in head and tail using max_length\n head = [\n tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head\n ]\n tail = [\n tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail\n ]\n # https://github.com/python/mypy/issues/4975\n # error: Incompatible return value type (got \"Tuple[List[Sequence[str]],\n # List[Sequence[str]]]\", expected \"Tuple[List[Tuple[str, ...]],\n # List[Tuple[str, ...]]]\")\n return head, tail # type: ignore\n\n\ndef format_object_attrs(\n obj: Sequence, include_dtype: bool = True\n) -> List[Tuple[str, Union[str, int]]]:\n \"\"\"\n Return a list of tuples of the (attr, formatted_value)\n for common attrs, including dtype, name, length\n\n Parameters\n ----------\n obj : object\n must be iterable\n include_dtype : bool\n If False, dtype won't be in the returned list\n\n Returns\n -------\n list of 2-tuple\n\n \"\"\"\n attrs: List[Tuple[str, Union[str, int]]] = []\n if hasattr(obj, \"dtype\") and include_dtype:\n # error: \"Sequence[Any]\" has no attribute \"dtype\"\n attrs.append((\"dtype\", f\"'{obj.dtype}'\")) # type: ignore\n if getattr(obj, \"name\", None) is not None:\n # error: \"Sequence[Any]\" has no attribute \"name\"\n attrs.append((\"name\", default_pprint(obj.name))) # type: ignore\n # error: \"Sequence[Any]\" has no attribute \"names\"\n elif getattr(obj, \"names\", None) is not None and any(obj.names): # type: ignore\n # error: \"Sequence[Any]\" has no attribute \"names\"\n attrs.append((\"names\", default_pprint(obj.names))) # type: ignore\n max_seq_items = get_option(\"display.max_seq_items\") or len(obj)\n if len(obj) > max_seq_items:\n attrs.append((\"length\", len(obj)))\n return attrs\n\n\nclass PrettyDict(Dict[_KT, _VT]):\n \"\"\"Dict extension to support abbreviated __repr__\"\"\"\n\n def __repr__(self) -> str:\n return pprint_thing(self)\n"
] | [
[
"pandas.to_datetime",
"numpy.linspace",
"numpy.asarray",
"numpy.around",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.dtype",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.nanops.nanmin",
"pandas.core.algorithms.unique",
"pandas.core.algorithms.take_nd",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.Categorical.from_codes",
"numpy.putmask",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.algorithms.quantile",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas._libs.Timestamp",
"numpy.modf",
"numpy.iterable",
"pandas.core.dtypes.common.is_bool_dtype",
"numpy.isfinite",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas.IntervalIndex.from_breaks",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.core.dtypes.missing.isna",
"pandas.to_timedelta",
"pandas._libs.lib.infer_dtype",
"pandas.core.nanops.nanmax",
"numpy.isinf",
"pandas._libs.Timedelta"
],
[
"pandas.Categorical.from_codes",
"numpy.array",
"pandas.api.types.CategoricalDtype",
"pandas.Series",
"pandas.Categorical",
"pandas.Period",
"pandas.Interval",
"pandas._testing.assert_series_equal",
"pandas.Timestamp"
],
[
"pandas.MultiIndex",
"numpy.arange",
"pandas.Index",
"pandas.option_context",
"pandas.DataFrame",
"pandas.set_option",
"pandas._testing.reset_display_options"
],
[
"pandas.core.dtypes.common.is_list_like",
"pandas.core.common.random_state",
"numpy.linspace",
"matplotlib.cm.get_cmap"
],
[
"pandas.Series",
"pandas.Timestamp",
"numpy.isnan",
"numpy.asarray",
"pandas.core.dtypes.common.is_integer",
"pandas.Timedelta",
"pandas.Index",
"pandas.to_timedelta",
"numpy.timedelta64",
"pandas.core.indexes.datetimes.Timestamp",
"pandas._testing.assert_series_equal",
"numpy.array"
],
[
"numpy.testing.assert_equal"
],
[
"numpy.arange",
"pandas._testing.assert_series_equal",
"pandas.Interval",
"pandas.IntervalIndex.from_tuples"
],
[
"numpy.true_divide",
"numpy.can_cast",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.issubdtype",
"numpy.dtype",
"numpy.mean",
"numpy.any",
"numpy.searchsorted",
"numpy.ravel_multi_index",
"numpy.histogram",
"numpy.subtract",
"numpy.logical_and.reduce",
"numpy.std",
"numpy.diff",
"numpy.zeros",
"numpy.power",
"numpy.ndim",
"numpy.atleast_2d",
"numpy.argsort",
"numpy.log2",
"numpy.absolute",
"numpy.isfinite",
"numpy.percentile",
"numpy.ones",
"numpy.sort",
"numpy.result_type",
"numpy.bincount",
"numpy.empty"
],
[
"pandas.core.indexes.numeric.Int64Index.join",
"numpy.asarray",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.tslibs.Resolution.from_attrname",
"numpy.where",
"pandas.core.indexes.extension.inherit_names",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.arrays.period.PeriodArray",
"pandas.core.ops.get_op_result_name",
"pandas.core.arrays.period.raise_on_incompatible",
"pandas.core.dtypes.common.is_float",
"pandas.errors.InvalidIndexError",
"pandas.util._decorators.Appender",
"pandas.core.arrays.period.period_array",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.indexes.base.ensure_index",
"pandas.core.arrays.period.validate_dtype_freq",
"pandas.core.indexes.numeric.Int64Index._simple_new",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.indexes.datetimes.Index.get_loc",
"pandas._libs.tslibs.Period",
"numpy.array",
"pandas.core.common.count_not_none",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.arrays.period.PeriodArray._generate_range",
"pandas._libs.tslibs.parsing.parse_time_string",
"pandas.core.dtypes.common.is_integer",
"numpy.ones",
"pandas.core.indexes.datetimes.Index",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.indexes.datetimes.Index.get_indexer",
"pandas.util._decorators.doc"
],
[
"pandas.Series",
"pandas.period_range",
"numpy.asarray",
"pandas.isna",
"pandas.Period",
"pandas._testing.assert_series_equal"
],
[
"pandas._testing.makeTimeSeries",
"pandas._testing.makeObjectSeries",
"pandas._testing.makeStringSeries"
],
[
"pandas.util.hash_pandas_object",
"pandas.Series",
"numpy.asarray",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas._testing.makeMixedDataFrame",
"numpy.concatenate",
"pandas._testing.makePeriodIndex",
"pandas._testing.assert_numpy_array_equal",
"pandas.util.hash_array",
"pandas.core.util.hashing.hash_tuples",
"pandas._testing.makeTimeSeries",
"pandas.Index",
"pandas._testing.assert_series_equal",
"pandas.MultiIndex",
"pandas._testing.makeTimedeltaIndex",
"pandas._testing.rands_array",
"pandas._testing.makeMissingDataframe",
"pandas.date_range",
"numpy.array",
"pandas.timedelta_range",
"pandas._testing.makeTimeDataFrame",
"pandas.Timestamp"
],
[
"pandas.io.json._table_schema.build_table_schema",
"pandas.io.json._normalize.convert_to_line_delimits",
"pandas.to_datetime",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas.util._decorators.deprecate_kwarg",
"pandas.io.parsers._validate_integer",
"pandas.io.common.get_handle",
"pandas.errors.AbstractMethodError",
"pandas.io.json._table_schema.parse_table_schema",
"pandas.core.dtypes.common.is_period_dtype",
"pandas.DataFrame",
"pandas.core.dtypes.common.ensure_str",
"numpy.dtype",
"pandas.io.common.get_filepath_or_buffer",
"pandas.core.reshape.concat.concat",
"pandas.io.common.infer_compression",
"pandas.util._decorators.deprecate_nonkeyword_arguments",
"pandas.isna"
],
[
"numpy.distutils.misc_util.is_sequence",
"numpy.distutils.misc_util.green_text",
"numpy.distutils.misc_util.is_string"
],
[
"pandas.to_datetime",
"pandas.PeriodIndex",
"pandas.Series",
"pandas.offsets.BQuarterEnd",
"pandas.offsets.Week",
"pandas._libs.tslibs.conversion.ensure_datetime64ns",
"pandas.offsets.QuarterBegin",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.offsets.BDay",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_produces_warning",
"pandas.Categorical",
"pandas.core.arrays.period_array",
"pandas.date_range",
"numpy.array",
"pandas.timedelta_range",
"pandas.CategoricalIndex",
"pandas.TimedeltaIndex",
"numpy.datetime64",
"pandas.Timestamp"
],
[
"numpy.binary_repr",
"numpy.broadcast",
"numpy.all",
"numpy.searchsorted",
"numpy.ma.masked_where",
"numpy.complex64",
"numpy.full",
"numpy.outer",
"numpy.zeros",
"numpy.testing.assert_raises_regex",
"numpy.sctypes.values",
"numpy.testing.assert_raises",
"numpy.array",
"numpy.sum",
"numpy.void",
"numpy.indices",
"numpy.argwhere",
"numpy.testing.assert_array_equal",
"numpy.add",
"numpy.isinf",
"numpy.complex128",
"numpy.resize",
"numpy.asarray",
"numpy.ndarray",
"numpy.promote_types",
"numpy.seterr",
"numpy.geterrobj",
"numpy.iinfo",
"numpy.var",
"numpy.divide",
"numpy.allclose",
"numpy.uint32",
"numpy.reshape",
"numpy.less",
"numpy.float16",
"numpy.std",
"numpy.size",
"numpy.float32",
"numpy.timedelta64",
"numpy.int64",
"numpy.base_repr",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.require",
"numpy.errstate",
"numpy.random.RandomState",
"numpy.testing.assert_warns",
"numpy.maximum",
"numpy.ptp",
"numpy.ones",
"numpy.isscalar",
"numpy.empty",
"numpy.rollaxis",
"numpy.logical_xor",
"numpy.can_cast",
"numpy.take",
"numpy.linspace",
"numpy.around",
"numpy.longdouble",
"numpy.ndarray.sum",
"numpy.alltrue",
"numpy.mean",
"numpy.zeros_like",
"numpy.moveaxis",
"numpy.bool_",
"numpy.roll",
"numpy.trace",
"numpy.random.randint",
"numpy.testing.assert_equal",
"numpy.swapaxes",
"numpy.greater",
"numpy.clip",
"numpy.cumproduct",
"numpy.eye",
"numpy.int8",
"numpy.choose",
"numpy.uint16",
"numpy.count_nonzero",
"numpy.repeat",
"numpy.testing.assert_array_almost_equal",
"numpy.logical_not",
"numpy.set_string_function",
"numpy.nonzero",
"numpy.isnan",
"numpy.logical_or",
"numpy.transpose",
"numpy.argsort",
"numpy.correlate",
"numpy.diagonal",
"numpy.convolve",
"numpy.tile",
"numpy.uint64",
"numpy.dot",
"numpy.einsum",
"numpy.squeeze",
"numpy.dtype",
"numpy.random.randn",
"numpy.any",
"numpy.cross",
"numpy.clongdouble",
"numpy.obj2sctype",
"numpy.geterr",
"numpy.arange",
"numpy.uint8",
"numpy.empty_like",
"numpy.finfo",
"numpy.tensordot",
"numpy.ravel",
"numpy.ma.zeros",
"numpy.isclose",
"numpy.seterrobj",
"numpy.signbit",
"numpy.logical_and",
"numpy.abs",
"numpy.isfinite",
"numpy.intp",
"numpy.int32",
"numpy.compress",
"numpy.int16",
"numpy.result_type",
"numpy.float64",
"numpy.prod"
],
[
"pandas._testing.assert_frame_equal",
"numpy.linspace"
],
[
"pandas.CategoricalIndex",
"pandas.concat",
"pandas._testing.assert_produces_warning",
"numpy.random.random",
"pandas.Series",
"pandas.interval_range",
"pandas.DataFrame",
"numpy.round",
"numpy.random.rand",
"pandas.date_range",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_index_equal"
],
[
"numpy.matrix",
"numpy.dot",
"numpy.linspace",
"numpy.kron",
"numpy.trapz",
"numpy.partition",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.eye",
"numpy.testing.assert_almost_equal",
"numpy.apply_along_axis",
"numpy.testing.assert_raises_regex",
"numpy.multiply",
"numpy.isnan",
"numpy.asmatrix",
"numpy.testing.assert_raises",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.array",
"numpy.polynomial.polyutils.mapdomain",
"numpy.nditer",
"numpy.inner",
"numpy.sort",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.bmat",
"numpy.isscalar",
"numpy.average"
],
[
"pandas.util._decorators.Appender",
"numpy.arange",
"pandas._libs.window.indexers.calculate_variable_window_bounds",
"numpy.full",
"numpy.concatenate",
"pandas.tseries.offsets.Nano",
"numpy.append",
"numpy.zeros",
"numpy.empty"
],
[
"pandas._testing.assert_produces_warning",
"pandas.Series",
"pandas.api.extensions.register_series_accessor"
],
[
"pandas._testing.assert_almost_equal",
"pandas.Series",
"pandas._testing.assert_dict_equal",
"pandas.DataFrame",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"numpy.random.randint",
"pandas._testing.assert_numpy_array_equal",
"pandas._testing.makeCustomDataframe",
"numpy.sin",
"pandas._testing.assert_series_equal",
"pandas._testing.randbool",
"pandas._testing.assert_produces_warning",
"pandas.core.dtypes.common.is_list_like",
"numpy.isnan",
"numpy.log10",
"numpy.random.rand",
"pandas.date_range",
"numpy.errstate",
"numpy.array",
"pandas.core.dtypes.common.is_bool",
"pandas.eval",
"pandas.core.dtypes.common.is_scalar",
"numpy.float64"
],
[
"pandas._testing.assert_almost_equal",
"pandas._testing.assert_numpy_array_equal",
"numpy.asarray",
"pandas._libs.parsers.TextReader",
"pandas.DataFrame",
"numpy.dtype",
"pandas._testing.assert_frame_equal",
"numpy.array"
],
[
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"numpy.issubdtype",
"pandas._testing.assert_dict_equal",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.max",
"numpy.random.randn",
"numpy.mean",
"pandas.isna",
"pandas._testing.assert_frame_equal",
"numpy.random.randint",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.Index",
"pandas.cut",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas.Categorical.from_codes",
"pandas.Categorical",
"pandas.date_range",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"pandas.CategoricalIndex",
"pandas._testing.assert_equal",
"pandas.MultiIndex.from_arrays",
"pandas._testing.assert_categorical_equal",
"pandas.qcut"
],
[
"numpy.distutils._shell_utils.NativeParser.split",
"numpy.distutils.misc_util.make_temp_file",
"numpy.distutils.misc_util.is_string",
"numpy.distutils.log.set_verbosity",
"numpy.distutils.misc_util.get_shared_lib_extension",
"numpy.distutils.ccompiler.CCompiler.get_version",
"numpy.distutils.log.debug",
"numpy.distutils.ccompiler.CCompiler.__init__",
"numpy.distutils.misc_util.is_sequence",
"numpy.distutils.log.warn",
"numpy.distutils.core.get_distribution",
"numpy.distutils.exec_command.find_executable",
"numpy.distutils.log.info",
"numpy.compat.open_latin1",
"numpy.distutils.ccompiler.gen_lib_options",
"numpy.distutils.misc_util.all_strings"
],
[
"pandas._testing.assert_almost_equal",
"pandas.to_datetime",
"pandas.Series",
"numpy.linspace",
"pandas.plotting.table",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.tests.plotting.common.TestPlotBase.setup_method",
"matplotlib.rcdefaults",
"numpy.random.randn",
"pandas.core.dtypes.api.is_list_like",
"pandas.plotting._matplotlib.compat._mpl_ge_3_1_0",
"pandas._testing.RNGContext",
"pandas._testing.makeDateIndex",
"numpy.random.randint",
"matplotlib.pyplot.tight_layout",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.core.arrays.integer_array",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.close",
"matplotlib.cycler",
"pandas._testing.assert_series_equal",
"matplotlib.pyplot.figure",
"numpy.isclose",
"pandas._testing.assert_produces_warning",
"pandas.plotting.plot_params.use",
"pandas.Categorical",
"pandas.Timedelta",
"numpy.delete",
"matplotlib.colors.ListedColormap",
"pandas._testing.close",
"numpy.random.rand",
"pandas.date_range",
"pandas.MultiIndex.from_product",
"numpy.array",
"pandas.tests.plotting.common._check_plot_works",
"matplotlib.cm.jet",
"numpy.random.random",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.ones",
"pandas.Period",
"pandas._testing.makeTimeDataFrame",
"numpy.random.uniform",
"pandas.io.formats.printing.pprint_thing"
],
[
"pandas.Series",
"pandas.DatetimeIndex",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal"
],
[
"numpy.testing.assert_"
],
[
"pandas.read_excel",
"pandas.io.excel.ExcelFile",
"pandas._testing.ensure_clean",
"pandas.ExcelFile",
"pandas._testing.assert_frame_equal"
],
[
"pandas.Series",
"numpy.isfinite",
"pandas.Categorical",
"pandas.option_context",
"pandas._testing.assert_series_equal",
"pandas.Timestamp"
],
[
"pandas.tseries.offsets.Day",
"pandas._libs.tslibs.conversion.pydt_to_i8",
"pandas.Timestamp.fromtimestamp",
"pandas.Timestamp.strptime",
"pandas._testing.get_locales",
"numpy.iinfo",
"pandas.Timestamp.today",
"pandas.Timestamp.utcfromtimestamp",
"pandas.Timestamp.fromisocalendar",
"pandas.Timestamp.combine",
"pandas.Timestamp.min.to_pydatetime",
"pandas.Timestamp.utcnow",
"pandas._libs.tslibs.timezones.dateutil_gettz",
"pandas._testing.assert_produces_warning",
"pandas.compat.numpy.np_datetime64_compat",
"pandas.Timedelta",
"numpy.timedelta64",
"numpy.int64",
"numpy.random.seed",
"pandas.Timestamp.max.to_pydatetime",
"numpy.datetime64",
"pandas.tseries.offsets.Nano",
"pandas._testing.set_locale",
"pandas.Timestamp.now",
"pandas.Period",
"pandas.Timestamp"
],
[
"numpy._pytesttester.PytestTester"
],
[
"pandas.timedelta_range",
"pandas.tseries.offsets.Day",
"numpy.arange",
"pandas.Timedelta",
"pandas.tseries.offsets.Second",
"numpy.random.normal",
"pandas.to_timedelta",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_index_equal"
],
[
"pandas._config.get_option",
"pandas.core.dtypes.inference.is_sequence",
"pandas.io.formats.console.get_console_size",
"pandas.io.formats.format._get_adjustment"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.24"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.24",
"1.22",
"1.23"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XDynames/pytorch-lightning | [
"a5d1176cf6ef9e637144f980da11bbe63290c994",
"d5254ff9dfb67fba388de224a320f3a562561a80"
] | [
"pytorch_lightning/core/lightning.py",
"pytorch_lightning/callbacks/early_stopping.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport inspect\nimport os\nimport re\nimport tempfile\nfrom abc import ABC, abstractmethod\nfrom argparse import Namespace\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\n\nimport torch\nimport torch.distributed as torch_distrib\nfrom torch import Tensor\nfrom torch.nn import Module\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.core.grads import GradInformation\nfrom pytorch_lightning.core.hooks import ModelHooks\nfrom pytorch_lightning.core.memory import ModelSummary\nfrom pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, PRIMITIVE_TYPES, ModelIO\nfrom pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin\nfrom pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args\nfrom pytorch_lightning.core.step_result import TrainResult, EvalResult\n\ntry:\n import torch_xla.core.xla_model as xm\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\nclass LightningModule(ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, ModelHooks, Module):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.exp_save_path = None\n\n #: The current epoch\n self.current_epoch = 0\n\n #: Total training batches seen across all epochs\n self.global_step = 0\n\n self.loaded_optimizer_states_dict = {}\n\n #: Pointer to the trainer object\n self.trainer = None\n\n #: Pointer to the logger object\n self.logger = None\n\n #: True if using dp\n self.use_dp = False\n\n #: True if using ddp\n self.use_ddp = False\n\n #: True if using ddp2\n self.use_ddp2 = False\n\n # True if on tpu\n self.use_tpu = False\n\n #: True if using amp\n self.use_amp = False\n\n #: The precision used\n self.precision = 32\n\n # optionally can be set by user\n self._example_input_array = None\n self._datamodule = None\n\n @property\n def example_input_array(self) -> Any:\n return self._example_input_array\n\n @example_input_array.setter\n def example_input_array(self, example: Any) -> None:\n self._example_input_array = example\n\n @property\n def datamodule(self) -> Any:\n return self._datamodule\n\n @datamodule.setter\n def datamodule(self, datamodule: Any) -> None:\n self._datamodule = datamodule\n\n @property\n def on_gpu(self):\n \"\"\"\n True if your model is currently running on GPUs.\n Useful to set flags around the LightningModule for different CPU vs GPU behavior.\n \"\"\"\n return self.device.type == 'cuda'\n\n def print(self, *args, **kwargs) -> None:\n r\"\"\"\n Prints only from process 0. Use this in any distributed mode to log only once.\n\n Args:\n *args: The thing to print. Will be passed to Python's built-in print function.\n **kwargs: Will be passed to Python's built-in print function.\n\n Example:\n\n .. code-block:: python\n\n def forward(self, x):\n self.print(x, 'in forward')\n\n \"\"\"\n if self.trainer.is_global_zero:\n print(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n r\"\"\"\n Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define\n the operations you want to use for prediction (i.e.: on a server or as a feature extractor).\n\n Normally you'd call ``self()`` from your :meth:`training_step` method.\n This makes it easy to write a complex system for training with the outputs\n you'd want in a prediction setting.\n\n You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful\n when using the module outside Lightning in a production setting.\n\n Args:\n *args: Whatever you decide to pass into the forward method.\n **kwargs: Keyword arguments are also possible.\n\n Return:\n Predicted output\n\n Examples:\n .. code-block:: python\n\n # example if we were using this model as a feature extractor\n def forward(self, x):\n feature_maps = self.convnet(x)\n return feature_maps\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n feature_maps = self(x)\n logits = self.classifier(feature_maps)\n\n # ...\n return loss\n\n # splitting it this way allows model to be used a feature extractor\n model = MyModelAbove()\n\n inputs = server.get_request()\n results = model(inputs)\n server.write_results(results)\n\n # -------------\n # This is in stark contrast to torch.nn.Module where normally you would have this:\n def forward(self, batch):\n x, y = batch\n feature_maps = self.convnet(x)\n logits = self.classifier(feature_maps)\n return logits\n\n \"\"\"\n\n def training_step(self, *args, **kwargs):\n r\"\"\"\n Here you compute and return the training loss and some additional metrics for e.g.\n the progress bar or logger.\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): Integer displaying index of this batch\n optimizer_idx (int): When using multiple optimizers, this argument will also be present.\n hiddens(:class:`~torch.Tensor`): Passed in if\n :paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n .. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient\n functions for logging, distributed sync and error checking.\n\n In this step you'd normally do the forward pass and calculate the loss for a batch.\n You can also do fancier things like multiple forward passes or something model specific.\n\n Example::\n\n def training_step(self, batch, batch_idx):\n x, y, z = batch\n\n # implement your own\n out = self(x)\n loss = self.loss(out, x)\n\n # TrainResult auto-detaches the loss after the optimization steps are complete\n result = pl.TrainResult(minimize=loss)\n\n The return object :class:`~pytorch_lightning.core.step_result.TrainResult` controls where to log,\n when to log (step or epoch) and syncing with multiple GPUs.\n\n .. code-block:: python\n\n # log to progress bar and logger\n result.log('train_loss', loss, prog_bar=True, logger=True)\n\n # sync metric value across GPUs in distributed training\n result.log('train_loss_2', loss, sync_dist=True)\n\n # log to progress bar as well\n result.log('train_loss_2', loss, prog_bar=True)\n\n # assign arbitrary values\n result.predictions = predictions\n result.some_value = 'some_value'\n\n If you define multiple optimizers, this step will be called with an additional\n ``optimizer_idx`` parameter.\n\n .. code-block:: python\n\n # Multiple optimizers (e.g.: GANs)\n def training_step(self, batch, batch_idx, optimizer_idx):\n if optimizer_idx == 0:\n # do training_step with encoder\n if optimizer_idx == 1:\n # do training_step with decoder\n\n\n If you add truncated back propagation through time you will also get an additional\n argument with the hidden states of the previous step.\n\n .. code-block:: python\n\n # Truncated back-propagation through time\n def training_step(self, batch, batch_idx, hiddens):\n # hiddens are the hidden states from the previous truncated backprop step\n ...\n out, hiddens = self.lstm(data, hiddens)\n ...\n\n # TrainResult auto-detaches hiddens\n result = pl.TrainResult(minimize=loss, hiddens=hiddens)\n return result\n\n Notes:\n The loss value shown in the progress bar is smoothed (averaged) over the last values,\n so it differs from the actual loss returned in train/validation step.\n \"\"\"\n rank_zero_warn('`training_step` must be implemented to be used with the Lightning Trainer')\n\n def training_step_end(self, *args, **kwargs):\n \"\"\"\n Use this when training with dp or ddp2 because :meth:`training_step`\n will operate on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]\n training_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in `training_step` for each batch part.\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n .. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient\n functions for logging, distributed sync and error checking.\n\n When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:\n\n .. code-block:: python\n\n def training_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n\n # softmax uses only a portion of the batch in the denomintaor\n loss = self.softmax(out)\n loss = nce_loss(loss)\n return pl.TrainResult(loss)\n\n If you wish to do something with all the parts of the batch, then use this method to do it:\n\n .. code-block:: python\n\n def training_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n result = pl.TrainResult()\n result.out = out\n\n def training_step_end(self, training_step_outputs):\n # this out is now the full size of the batch\n all_outs = training_step_outputs.out\n\n # this softmax now uses the full batch\n loss = nce_loss(all_outs)\n result = pl.TrainResult(loss)\n return result\n\n See Also:\n See the :ref:`multi-gpu-training` guide for more details.\n \"\"\"\n\n def training_epoch_end(\n self, outputs: Union[TrainResult, List[TrainResult]]\n ):\n \"\"\"\n Called at the end of the training epoch with the outputs of all training steps.\n Use this in case you need to do something with all the outputs for every training_step.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n train_outs = []\n for train_batch in train_data:\n out = training_step(train_batch)\n train_outs.append(out)\n training_epoch_end(train_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`training_step`, or if there are\n multiple dataloaders, a list containing a list of outputs for each dataloader.\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n .. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient\n functions for logging, distributed sync and error checking.\n\n Note:\n If this method is not overridden, this won't be called.\n\n Example::\n\n def training_epoch_end(self, training_step_outputs):\n # do something with all training_step outputs\n return result\n\n With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each training step for that dataloader.\n\n .. code-block:: python\n\n def training_epoch_end(self, outputs):\n epoch_result = pl.TrainResult()\n for train_result in outputs:\n all_losses = train_result.minimize\n # do something with all losses\n return results\n \"\"\"\n\n def validation_step(self, *args, **kwargs) -> EvalResult:\n r\"\"\"\n Operates on a single batch of data from the validation set.\n In this step you'd might generate examples or calculate anything of interest like accuracy.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n val_outs = []\n for val_batch in val_data:\n out = validation_step(train_batch)\n val_outs.append(out)\n validation_epoch_end(val_outs)\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): The index of this batch\n dataloader_idx (int): The index of the dataloader that produced this batch\n (only if multiple val datasets used)\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n .. code-block:: python\n\n # pseudocode of order\n out = validation_step()\n if defined('validation_step_end'):\n out = validation_step_end(out)\n out = validation_epoch_end(out)\n\n\n .. code-block:: python\n\n # if you have one val dataloader:\n def validation_step(self, batch, batch_idx)\n\n # if you have multiple val dataloaders:\n def validation_step(self, batch, batch_idx, dataloader_idx)\n\n Examples:\n .. code-block:: python\n\n # CASE 1: A single validation dataset\n def validation_step(self, batch, batch_idx):\n x, y = batch\n\n # implement your own\n out = self(x)\n loss = self.loss(out, y)\n\n # log 6 example images\n # or generated text... or whatever\n sample_imgs = x[:6]\n grid = torchvision.utils.make_grid(sample_imgs)\n self.logger.experiment.add_image('example_images', grid, 0)\n\n # calculate acc\n labels_hat = torch.argmax(out, dim=1)\n val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n\n # log the outputs!\n result = pl.EvalResult(checkpoint_on=loss)\n result.log_dict({'val_loss': loss, 'val_acc': val_acc})\n return result\n\n If you pass in multiple val datasets, validation_step will have an additional argument.\n\n .. code-block:: python\n\n # CASE 2: multiple validation datasets\n def validation_step(self, batch, batch_idx, dataloader_idx):\n # dataloader_idx tells you which dataset this is.\n\n Note:\n If you don't need to validate you don't need to implement this method.\n\n Note:\n When the :meth:`validation_step` is called, the model has been put in eval mode\n and PyTorch gradients have been disabled. At the end of validation,\n the model goes back to training mode and gradients are enabled.\n \"\"\"\n\n def validation_step_end(self, *args, **kwargs) -> EvalResult:\n \"\"\"\n Use this when validating with dp or ddp2 because :meth:`validation_step`\n will operate on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code.\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]\n validation_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in :meth:`validation_step`\n for each batch part.\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n .. code-block:: python\n\n # WITHOUT validation_step_end\n # if used in DP or DDP2, this batch is 1/num_gpus large\n def validation_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n loss = self.softmax(out)\n loss = nce_loss(loss)\n result = pl.EvalResult()\n result.log('val_loss', loss)\n return result\n\n # --------------\n # with validation_step_end to do softmax over the full batch\n def validation_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n result = pl.EvalResult()\n result.out = out\n return result\n\n def validation_epoch_end(self, output_results):\n # this out is now the full size of the batch\n all_val_step_outs = output_results.out\n loss = nce_loss(all_val_step_outs)\n\n result = pl.EvalResult(checkpoint_on=loss)\n result.log('val_loss', loss)\n return result\n\n See Also:\n See the :ref:`multi-gpu-training` guide for more details.\n \"\"\"\n\n def validation_end(self, outputs):\n \"\"\"\n Warnings:\n Deprecated in v0.7.0. Use :meth:`validation_epoch_end` instead.\n Will be removed in 1.0.0.\n \"\"\"\n\n def validation_epoch_end(\n self, outputs: Union[EvalResult, List[EvalResult]]\n ) -> EvalResult:\n \"\"\"\n Called at the end of the validation epoch with the outputs of all validation steps.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n val_outs = []\n for val_batch in val_data:\n out = validation_step(val_batch)\n val_outs.append(out)\n validation_epoch_end(val_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`validation_step`, or if there\n are multiple dataloaders, a list containing a list of outputs for each dataloader.\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n Note:\n If you didn't define a :meth:`validation_step`, this won't be called.\n\n - The outputs here are strictly for logging or progress bar.\n - If you don't need to display anything, don't return anything.\n\n Examples:\n With a single dataloader:\n\n .. code-block:: python\n\n def validation_epoch_end(self, val_step_outputs):\n # do something with the outputs of all val batches\n all_val_preds = val_step_outputs.predictions\n\n val_step_outputs.some_result = calc_all_results(all_val_preds)\n return val_step_outputs\n\n With multiple dataloaders, `outputs` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each validation step for that dataloader.\n\n .. code-block:: python\n\n def validation_epoch_end(self, outputs):\n for dataloader_output_result in outputs:\n dataloader_outs = dataloader_output_result.dataloader_i_outputs\n\n result = pl.EvalResult()\n result.log('final_metric', final_value)\n return result\n \"\"\"\n\n def test_step(self, *args, **kwargs) -> EvalResult:\n r\"\"\"\n Operates on a single batch of data from the test set.\n In this step you'd normally generate examples or calculate anything of interest\n such as accuracy.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n test_outs = []\n for test_batch in test_data:\n out = test_step(test_batch)\n test_outs.append(out)\n test_epoch_end(test_outs)\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): The index of this batch.\n dataloader_idx (int): The index of the dataloader that produced this batch\n (only if multiple test datasets used).\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n .. code-block:: python\n\n # if you have one test dataloader:\n def test_step(self, batch, batch_idx)\n\n # if you have multiple test dataloaders:\n def test_step(self, batch, batch_idx, dataloader_idx)\n\n Examples:\n .. code-block:: python\n\n # CASE 1: A single test dataset\n def test_step(self, batch, batch_idx):\n x, y = batch\n\n # implement your own\n out = self(x)\n loss = self.loss(out, y)\n\n # log 6 example images\n # or generated text... or whatever\n sample_imgs = x[:6]\n grid = torchvision.utils.make_grid(sample_imgs)\n self.logger.experiment.add_image('example_images', grid, 0)\n\n # calculate acc\n labels_hat = torch.argmax(out, dim=1)\n test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n\n # log the outputs!\n result = pl.EvalResult(checkpoint_on=loss)\n result.log_dict({'test_loss': loss, 'test_acc': test_acc})\n return resultt\n\n If you pass in multiple validation datasets, :meth:`test_step` will have an additional\n argument.\n\n .. code-block:: python\n\n # CASE 2: multiple test datasets\n def test_step(self, batch, batch_idx, dataloader_idx):\n # dataloader_idx tells you which dataset this is.\n\n Note:\n If you don't need to validate you don't need to implement this method.\n\n Note:\n When the :meth:`test_step` is called, the model has been put in eval mode and\n PyTorch gradients have been disabled. At the end of the test epoch, the model goes back\n to training mode and gradients are enabled.\n \"\"\"\n\n def test_step_end(self, *args, **kwargs) -> EvalResult:\n \"\"\"\n Use this when testing with dp or ddp2 because :meth:`test_step` will operate\n on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code.\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]\n test_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in :meth:`test_step` for each batch part.\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n .. code-block:: python\n\n # WITHOUT test_step_end\n # if used in DP or DDP2, this batch is 1/num_gpus large\n def test_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n loss = self.softmax(out)\n loss = nce_loss(loss)\n result = pl.EvalResult()\n result.log('test_loss', loss)\n return result\n\n # --------------\n # with test_step_end to do softmax over the full batch\n def test_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n result = pl.EvalResult()\n result.out = out\n return result\n\n def test_epoch_end(self, output_results):\n # this out is now the full size of the batch\n all_test_step_outs = output_results.out\n loss = nce_loss(all_test_step_outs)\n\n result = pl.EvalResult(checkpoint_on=loss)\n result.log('test_loss', loss)\n return result\n\n See Also:\n See the :ref:`multi-gpu-training` guide for more details.\n \"\"\"\n\n def test_end(self, outputs):\n \"\"\"\n Warnings:\n Deprecated in v0.7.0. Use :meth:`test_epoch_end` instead.\n Will be removed in 1.0.0.\n \"\"\"\n\n def test_epoch_end(\n self, outputs: Union[EvalResult, List[EvalResult]]\n ) -> EvalResult:\n\n \"\"\"\n Called at the end of a test epoch with the output of all test steps.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n test_outs = []\n for test_batch in test_data:\n out = test_step(test_batch)\n test_outs.append(out)\n test_epoch_end(test_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`test_step_end`, or if there\n are multiple dataloaders, a list containing a list of outputs for each dataloader\n\n Return:\n :class:`~pytorch_lightning.core.step_result.TrainResult`\n\n Note:\n If you didn't define a :meth:`test_step`, this won't be called.\n\n - The outputs here are strictly for logging or progress bar.\n - If you don't need to display anything, don't return anything.\n\n Examples:\n With a single dataloader:\n\n .. code-block:: python\n\n def test_epoch_end(self, outputs):\n # do something with the outputs of all test batches\n all_test_preds = test_step_outputs.predictions\n\n test_step_outputs.some_result = calc_all_results(all_test_preds)\n return test_step_outputs\n\n With multiple dataloaders, `outputs` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each test step for that dataloader.\n\n .. code-block:: python\n\n def test_epoch_end(self, outputs):\n for dataloader_output_result in outputs:\n dataloader_outs = dataloader_output_result.dataloader_i_outputs\n\n result = pl.EvalResult()\n result.log('final_metric', final_value)\n return results\n \"\"\"\n\n def configure_ddp(self, model: 'LightningModule', device_ids: List[int]) -> DistributedDataParallel:\n r\"\"\"\n Override to init DDP in your own way or with your own wrapper.\n The only requirements are that:\n\n 1. On a validation batch, the call goes to ``model.validation_step``.\n 2. On a training batch, the call goes to ``model.training_step``.\n 3. On a testing batch, the call goes to ``model.test_step``.\n\n Args:\n model: the :class:`LightningModule` currently being optimized.\n device_ids: the list of GPU ids.\n\n Return:\n DDP wrapped model\n\n Examples:\n .. code-block:: python\n\n # default implementation used in Trainer\n def configure_ddp(self, model, device_ids):\n # Lightning DDP simply routes to test_step, val_step, etc...\n model = LightningDistributedDataParallel(\n model,\n device_ids=device_ids,\n find_unused_parameters=True\n )\n return model\n\n \"\"\"\n model = LightningDistributedDataParallel(model, device_ids=device_ids, find_unused_parameters=True)\n return model\n\n def _init_slurm_connection(self) -> None:\n \"\"\"\"\"\"\n \"\"\"\n Sets up environment variables necessary for pytorch distributed communications\n based on slurm environment.\n \"\"\"\n # use slurm job id for the port number\n # guarantees unique ports across jobs from same grid search\n try:\n # use the last 4 numbers in the job id as the id\n default_port = os.environ['SLURM_JOB_ID']\n default_port = default_port[-4:]\n\n # all ports should be in the 10k+ range\n default_port = int(default_port) + 15000\n\n except Exception:\n default_port = 12910\n\n # if user gave a port number, use that one instead\n try:\n default_port = os.environ['MASTER_PORT']\n except Exception:\n os.environ['MASTER_PORT'] = str(default_port)\n\n # figure out the root node addr\n try:\n root_node = os.environ['SLURM_NODELIST'].split(' ')[0]\n except Exception:\n root_node = '127.0.0.1'\n\n root_node = self.trainer.resolve_root_node_address(root_node)\n os.environ['MASTER_ADDR'] = root_node\n\n def init_ddp_connection(self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True) -> None:\n \"\"\"\n Override to define your custom way of setting up a distributed environment.\n\n Lightning's implementation uses env:// init by default and sets the first node as root\n for SLURM managed cluster.\n\n Args:\n global_rank: The global process idx.\n world_size: Number of GPUs being use across all nodes. (num_nodes * num_gpus).\n is_slurm_managing_tasks: is cluster managed by SLURM.\n\n \"\"\"\n if is_slurm_managing_tasks:\n self._init_slurm_connection()\n\n if 'MASTER_ADDR' not in os.environ:\n rank_zero_warn(\"MASTER_ADDR environment variable is not defined. Set as localhost\")\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n log.debug(f\"MASTER_ADDR: {os.environ['MASTER_ADDR']}\")\n\n if 'MASTER_PORT' not in os.environ:\n rank_zero_warn(\"MASTER_PORT environment variable is not defined. Set as 12910\")\n os.environ['MASTER_PORT'] = '12910'\n log.debug(f\"MASTER_PORT: {os.environ['MASTER_PORT']}\")\n\n if 'WORLD_SIZE' in os.environ and int(os.environ['WORLD_SIZE']) != world_size:\n rank_zero_warn(\n f\"WORLD_SIZE environment variable ({os.environ['WORLD_SIZE']}) \"\n f\"is not equal to the computed world size ({world_size}). Ignored.\"\n )\n\n torch_backend = \"nccl\" if self.trainer.on_gpu else \"gloo\"\n log.info(f\"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank+1}/{world_size}\")\n torch_distrib.init_process_group(torch_backend, rank=global_rank, world_size=world_size)\n\n def configure_sync_batchnorm(self, model: 'LightningModule') -> 'LightningModule':\n \"\"\"\n Add global batchnorm for a model spread across multiple GPUs and nodes.\n\n Override to synchronize batchnorm between specific process groups instead\n of the whole world or use a different sync_bn like `apex`'s version.\n\n Args:\n model: pointer to current :class:`LightningModule`.\n\n Return:\n LightningModule with batchnorm layers synchronized between process groups\n \"\"\"\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model, process_group=None)\n\n return model\n\n def configure_apex(\n self, amp: object, model: 'LightningModule', optimizers: List[Optimizer], amp_level: str\n ) -> Tuple['LightningModule', List[Optimizer]]:\n r\"\"\"\n Override to init AMP your own way.\n Must return a model and list of optimizers.\n\n Args:\n amp: pointer to amp library object.\n model: pointer to current :class:`LightningModule`.\n optimizers: list of optimizers passed in :meth:`configure_optimizers`.\n amp_level: AMP mode chosen ('O1', 'O2', etc...)\n\n Return:\n Apex wrapped model and optimizers\n\n Examples:\n .. code-block:: python\n\n # Default implementation used by Trainer.\n def configure_apex(self, amp, model, optimizers, amp_level):\n model, optimizers = amp.initialize(\n model, optimizers, opt_level=amp_level,\n )\n\n return model, optimizers\n \"\"\"\n model, optimizers = amp.initialize(model, optimizers, opt_level=amp_level)\n\n return model, optimizers\n\n def configure_optimizers(\n self,\n ) -> Optional[Union[Optimizer, Sequence[Optimizer], Dict, Sequence[Dict], Tuple[List, List]]]:\n r\"\"\"\n Choose what optimizers and learning-rate schedulers to use in your optimization.\n Normally you'd need one. But in the case of GANs or similar you might have multiple.\n\n Return:\n Any of these 6 options.\n\n - Single optimizer.\n - List or Tuple - List of optimizers.\n - Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).\n - Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler' key which value is a single LR scheduler or lr_dict.\n - Tuple of dictionaries as described, with an optional 'frequency' key.\n - None - Fit will run without any optimizer.\n\n Note:\n The 'frequency' value is an int corresponding to the number of sequential batches\n optimized with the specific optimizer. It should be given to none or to all of the optimizers.\n There is a difference between passing multiple optimizers in a list,\n and passing multiple optimizers in dictionaries with a frequency of 1:\n In the former case, all optimizers will operate on the given batch in each optimization step.\n In the latter, only one optimizer will operate on the given batch at every step.\n\n The lr_dict is a dictionary which contains scheduler and its associated configuration.\n It has five keys. The default configuration is shown below.\n\n .. code-block:: python\n\n {\n 'scheduler': lr_scheduler, # The LR schduler\n 'interval': 'epoch', # The unit of the scheduler's step size\n 'frequency': 1, # The frequency of the scheduler\n 'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler\n 'monitor': 'val_loss' # Metric to monitor\n }\n\n If user only provides LR schedulers, then their configuration will set to default as shown above.\n\n Examples:\n .. code-block:: python\n\n # most cases\n def configure_optimizers(self):\n opt = Adam(self.parameters(), lr=1e-3)\n return opt\n\n # multiple optimizer case (e.g.: GAN)\n def configure_optimizers(self):\n generator_opt = Adam(self.model_gen.parameters(), lr=0.01)\n disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)\n return generator_opt, disriminator_opt\n\n # example with learning rate schedulers\n def configure_optimizers(self):\n generator_opt = Adam(self.model_gen.parameters(), lr=0.01)\n disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)\n discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)\n return [generator_opt, disriminator_opt], [discriminator_sched]\n\n # example with step-based learning rate schedulers\n def configure_optimizers(self):\n gen_opt = Adam(self.model_gen.parameters(), lr=0.01)\n dis_opt = Adam(self.model_disc.parameters(), lr=0.02)\n gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),\n 'interval': 'step'} # called after each training step\n dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch\n return [gen_opt, dis_opt], [gen_sched, dis_sched]\n\n # example with optimizer frequencies\n # see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1\n # https://arxiv.org/abs/1704.00028\n def configure_optimizers(self):\n gen_opt = Adam(self.model_gen.parameters(), lr=0.01)\n dis_opt = Adam(self.model_disc.parameters(), lr=0.02)\n n_critic = 5\n return (\n {'optimizer': dis_opt, 'frequency': n_critic},\n {'optimizer': gen_opt, 'frequency': 1}\n )\n\n Note:\n\n Some things to know:\n\n - Lightning calls ``.backward()`` and ``.step()`` on each optimizer\n and learning rate scheduler as needed.\n\n - If you use 16-bit precision (``precision=16``), Lightning will automatically\n handle the optimizers for you.\n\n - If you use multiple optimizers, :meth:`training_step` will have an additional\n ``optimizer_idx`` parameter.\n\n - If you use LBFGS Lightning handles the closure function automatically for you.\n\n - If you use multiple optimizers, gradients will be calculated only\n for the parameters of current optimizer at each training step.\n\n - If you need to control how often those optimizers step or override the\n default ``.step()`` schedule, override the :meth:`optimizer_step` hook.\n\n - If you only want to call a learning rate scheduler every ``x`` step or epoch,\n or want to monitor a custom metric, you can specify these in a lr_dict:\n\n .. code-block:: python\n\n {\n 'scheduler': lr_scheduler,\n 'interval': 'step', # or 'epoch'\n 'monitor': 'val_f1',\n 'frequency': x,\n }\n\n \"\"\"\n rank_zero_warn('`configure_optimizers` must be implemented to be used with the Lightning Trainer')\n\n def optimizer_step(\n self,\n epoch: int,\n batch_idx: int,\n optimizer: Optimizer,\n optimizer_idx: int,\n second_order_closure: Optional[Callable] = None,\n on_tpu: bool = False,\n using_native_amp: bool = False,\n using_lbfgs: bool = False,\n ) -> None:\n r\"\"\"\n Override this method to adjust the default way the\n :class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.\n By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example\n once per optimizer.\n\n Args:\n epoch: Current epoch\n batch_idx: Index of current batch\n optimizer: A PyTorch optimizer\n optimizer_idx: If you used multiple optimizers this indexes into that list.\n second_order_closure: closure for second order methods\n on_tpu: true if TPU backward is required\n using_native_amp: True if using native amp\n using_lbfgs: True if the matching optimizer is lbfgs\n\n Examples:\n .. code-block:: python\n\n # DEFAULT\n def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx,\n second_order_closure, on_tpu, using_native_amp, using_lbfgs):\n optimizer.step()\n\n # Alternating schedule for optimizer steps (i.e.: GANs)\n def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx,\n second_order_closure, on_tpu, using_native_amp, using_lbfgs):\n # update generator opt every 2 steps\n if optimizer_idx == 0:\n if batch_idx % 2 == 0 :\n optimizer.step()\n optimizer.zero_grad()\n\n # update discriminator opt every 4 steps\n if optimizer_idx == 1:\n if batch_idx % 4 == 0 :\n optimizer.step()\n optimizer.zero_grad()\n\n # ...\n # add as many optimizers as you want\n\n\n Here's another example showing how to use this for more advanced things such as\n learning rate warm-up:\n\n .. code-block:: python\n\n # learning rate warm-up\n def optimizer_step(self, current_epoch, batch_idx, optimizer,\n optimizer_idx, second_order_closure, on_tpu, using_native_amp, using_lbfgs):\n # warm up lr\n if self.trainer.global_step < 500:\n lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)\n for pg in optimizer.param_groups:\n pg['lr'] = lr_scale * self.learning_rate\n\n # update params\n optimizer.step()\n optimizer.zero_grad()\n\n Note:\n If you also override the :meth:`~pytorch_lightning.core.hooks.ModelHooks.on_before_zero_grad`\n model hook don't forget to add the call to it before ``optimizer.zero_grad()`` yourself.\n\n \"\"\"\n if on_tpu:\n xm.optimizer_step(optimizer)\n elif using_native_amp:\n self.trainer.scaler.step(optimizer)\n elif using_lbfgs:\n optimizer.step(second_order_closure)\n else:\n optimizer.step()\n\n def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):\n optimizer.zero_grad()\n\n def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:\n r\"\"\"\n When using truncated backpropagation through time, each batch must be split along the\n time dimension. Lightning handles this by default, but for custom behavior override\n this function.\n\n Args:\n batch: Current batch\n split_size: The size of the split\n\n Return:\n List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated\n back propagation through time. The default implementation splits root level Tensors and\n Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.\n\n Examples:\n .. code-block:: python\n\n def tbptt_split_batch(self, batch, split_size):\n splits = []\n for t in range(0, time_dims[0], split_size):\n batch_split = []\n for i, x in enumerate(batch):\n if isinstance(x, torch.Tensor):\n split_x = x[:, t:t + split_size]\n elif isinstance(x, collections.Sequence):\n split_x = [None] * len(x)\n for batch_idx in range(len(x)):\n split_x[batch_idx] = x[batch_idx][t:t + split_size]\n\n batch_split.append(split_x)\n\n splits.append(batch_split)\n\n return splits\n\n Note:\n Called in the training loop after\n :meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`\n if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.\n Each returned batch split is passed separately to :meth:`training_step`.\n\n \"\"\"\n time_dims = [len(x[0]) for x in batch if isinstance(x, (torch.Tensor, collections.Sequence))]\n assert len(time_dims) >= 1, \"Unable to determine batch time dimension\"\n assert all(x == time_dims[0] for x in time_dims), \"Batch time dimension length is ambiguous\"\n\n splits = []\n for t in range(0, time_dims[0], split_size):\n batch_split = []\n for i, x in enumerate(batch):\n if isinstance(x, torch.Tensor):\n split_x = x[:, t : t + split_size]\n elif isinstance(x, collections.Sequence):\n split_x = [None] * len(x)\n for batch_idx in range(len(x)):\n split_x[batch_idx] = x[batch_idx][t : t + split_size]\n\n batch_split.append(split_x)\n\n splits.append(batch_split)\n\n return splits\n\n def prepare_data(self) -> None:\n \"\"\"\n Use this to download and prepare data.\n\n .. warning:: DO NOT set state to the model (use `setup` instead)\n since this is NOT called on every GPU in DDP/TPU\n\n Example::\n\n def prepare_data(self):\n # good\n download_data()\n tokenize()\n etc()\n\n # bad\n self.split = data_split\n self.some_state = some_other_state()\n\n In DDP prepare_data can be called in two ways (using Trainer(prepare_data_per_node)):\n\n 1. Once per node. This is the default and is only called on LOCAL_RANK=0.\n 2. Once in total. Only called on GLOBAL_RANK=0.\n\n Example::\n\n # DEFAULT\n # called once per node on LOCAL_RANK=0 of that node\n Trainer(prepare_data_per_node=True)\n\n # call on GLOBAL_RANK=0 (great for shared file systems)\n Trainer(prepare_data_per_node=False)\n\n This is called before requesting the dataloaders:\n\n .. code-block:: python\n\n model.prepare_data()\n if ddp/tpu: init()\n model.setup(stage)\n model.train_dataloader()\n model.val_dataloader()\n model.test_dataloader()\n \"\"\"\n\n def train_dataloader(self) -> DataLoader:\n \"\"\"\n Implement a PyTorch DataLoader for training.\n\n Return:\n Single PyTorch :class:`~torch.utils.data.DataLoader`.\n\n The dataloader you return will not be called every epoch unless you set\n :paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.\n\n For data processing use the following pattern:\n\n - download in :meth:`prepare_data`\n - process and split in :meth:`setup`\n\n However, the above are only necessary for distributed processing.\n\n .. warning:: do not assign state in prepare_data\n\n - :meth:`~pytorch_lightning.trainer.Trainer.fit`\n - ...\n - :meth:`prepare_data`\n - :meth:`setup`\n - :meth:`train_dataloader`\n\n Note:\n Lightning adds the correct sampler for distributed and arbitrary hardware.\n There is no need to set it yourself.\n\n Example:\n .. code-block:: python\n\n def train_dataloader(self):\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (1.0,))])\n dataset = MNIST(root='/path/to/mnist/', train=True, transform=transform,\n download=True)\n loader = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=self.batch_size,\n shuffle=True\n )\n return loader\n\n \"\"\"\n rank_zero_warn('`train_dataloader` must be implemented to be used with the Lightning Trainer')\n\n def tng_dataloader(self): # todo: remove in v1.0.0\n \"\"\"\n Warnings:\n Deprecated in v0.5.0. Use :meth:`train_dataloader` instead. Will be removed in 1.0.0.\n \"\"\"\n output = self.train_dataloader()\n rank_zero_warn(\n \"`tng_dataloader` has been renamed to `train_dataloader` since v0.5.0.\"\n \" and this method will be removed in v1.0.0\",\n DeprecationWarning,\n )\n return output\n\n def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n r\"\"\"\n Implement one or multiple PyTorch DataLoaders for testing.\n\n The dataloader you return will not be called every epoch unless you set\n :paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.\n\n For data processing use the following pattern:\n\n - download in :meth:`prepare_data`\n - process and split in :meth:`setup`\n\n However, the above are only necessary for distributed processing.\n\n .. warning:: do not assign state in prepare_data\n\n\n - :meth:`~pytorch_lightning.trainer.Trainer.fit`\n - ...\n - :meth:`prepare_data`\n - :meth:`setup`\n - :meth:`train_dataloader`\n - :meth:`val_dataloader`\n - :meth:`test_dataloader`\n\n Note:\n Lightning adds the correct sampler for distributed and arbitrary hardware.\n There is no need to set it yourself.\n\n Return:\n Single or multiple PyTorch DataLoaders.\n\n Example:\n .. code-block:: python\n\n def test_dataloader(self):\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (1.0,))])\n dataset = MNIST(root='/path/to/mnist/', train=False, transform=transform,\n download=True)\n loader = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=self.batch_size,\n shuffle=False\n )\n\n return loader\n\n # can also return multiple dataloaders\n def test_dataloader(self):\n return [loader_a, loader_b, ..., loader_n]\n\n Note:\n If you don't need a test dataset and a :meth:`test_step`, you don't need to implement\n this method.\n\n Note:\n In the case where you return multiple test dataloaders, the :meth:`test_step`\n will have an argument ``dataloader_idx`` which matches the order here.\n \"\"\"\n\n def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n r\"\"\"\n Implement one or multiple PyTorch DataLoaders for validation.\n\n The dataloader you return will not be called every epoch unless you set\n :paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.\n\n It's recommended that all data downloads and preparation happen in :meth:`prepare_data`.\n\n - :meth:`~pytorch_lightning.trainer.Trainer.fit`\n - ...\n - :meth:`prepare_data`\n - :meth:`train_dataloader`\n - :meth:`val_dataloader`\n - :meth:`test_dataloader`\n\n Note:\n Lightning adds the correct sampler for distributed and arbitrary hardware\n There is no need to set it yourself.\n\n Return:\n Single or multiple PyTorch DataLoaders.\n\n Examples:\n .. code-block:: python\n\n def val_dataloader(self):\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (1.0,))])\n dataset = MNIST(root='/path/to/mnist/', train=False,\n transform=transform, download=True)\n loader = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=self.batch_size,\n shuffle=False\n )\n\n return loader\n\n # can also return multiple dataloaders\n def val_dataloader(self):\n return [loader_a, loader_b, ..., loader_n]\n\n Note:\n If you don't need a validation dataset and a :meth:`validation_step`, you don't need to\n implement this method.\n\n Note:\n In the case where you return multiple validation dataloaders, the :meth:`validation_step`\n will have an argument ``dataloader_idx`` which matches the order here.\n \"\"\"\n\n def summarize(self, mode: str = ModelSummary.MODE_DEFAULT) -> ModelSummary:\n model_summary = ModelSummary(self, mode=mode)\n log.info('\\n' + str(model_summary))\n return model_summary\n\n def freeze(self) -> None:\n r\"\"\"\n Freeze all params for inference.\n\n Example:\n .. code-block:: python\n\n model = MyLightningModule(...)\n model.freeze()\n\n \"\"\"\n for param in self.parameters():\n param.requires_grad = False\n\n self.eval()\n\n def unfreeze(self) -> None:\n \"\"\"\n Unfreeze all parameters for training.\n\n .. code-block:: python\n\n model = MyLightningModule(...)\n model.unfreeze()\n\n \"\"\"\n for param in self.parameters():\n param.requires_grad = True\n\n self.train()\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n r\"\"\"\n Called by Lightning to restore your model.\n If you saved something with :meth:`on_save_checkpoint` this is your chance to restore this.\n\n Args:\n checkpoint: Loaded checkpoint\n\n\n Example:\n .. code-block:: python\n\n def on_load_checkpoint(self, checkpoint):\n # 99% of the time you don't need to implement this method\n self.something_cool_i_want_to_save = checkpoint['something_cool_i_want_to_save']\n\n Note:\n Lightning auto-restores global step, epoch, and train state including amp scaling.\n There is no need for you to restore anything regarding training.\n \"\"\"\n\n def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n r\"\"\"\n Called by Lightning when saving a checkpoint to give you a chance to store anything\n else you might want to save.\n\n Args:\n checkpoint: Checkpoint to be saved\n\n Example:\n .. code-block:: python\n\n def on_save_checkpoint(self, checkpoint):\n # 99% of use cases you don't need to implement this method\n checkpoint['something_cool_i_want_to_save'] = my_cool_pickable_object\n\n Note:\n Lightning saves all aspects of training (epoch, global step, etc...)\n including amp scaling.\n There is no need for you to store anything about training.\n\n \"\"\"\n\n def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:\n r\"\"\"\n Implement this to override the default items displayed in the progress bar.\n By default it includes the average loss value, split index of BPTT (if used)\n and the version of the experiment when using a logger.\n\n .. code-block::\n\n Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]\n\n Here is an example how to override the defaults:\n\n .. code-block:: python\n\n def get_progress_bar_dict(self):\n # don't show the version number\n items = super().get_progress_bar_dict()\n items.pop(\"v_num\", None)\n return items\n\n Return:\n Dictionary with the items to be displayed in the progress bar.\n \"\"\"\n # call .item() only once but store elements without graphs\n running_train_loss = self.trainer.running_loss.mean()\n avg_training_loss = running_train_loss.cpu().item() if running_train_loss is not None else float('NaN')\n tqdm_dict = {'loss': '{:.3f}'.format(avg_training_loss)}\n\n if self.trainer.truncated_bptt_steps is not None:\n tqdm_dict['split_idx'] = self.trainer.split_idx\n\n if self.trainer.logger is not None and self.trainer.logger.version is not None:\n version = self.trainer.logger.version\n # show last 4 places of long version strings\n version = version[-4:] if isinstance(version, str) else version\n tqdm_dict['v_num'] = version\n\n return tqdm_dict\n\n def get_tqdm_dict(self) -> Dict[str, Union[int, str]]:\n \"\"\"\n Additional items to be displayed in the progress bar.\n\n Return:\n Dictionary with the items to be displayed in the progress bar.\n\n Warning:\n Deprecated since v0.7.3.\n Use :meth:`get_progress_bar_dict` instead.\n \"\"\"\n rank_zero_warn(\n \"`get_tqdm_dict` was renamed to `get_progress_bar_dict` in v0.7.3\"\n \" and this method will be removed in v1.0.0\",\n DeprecationWarning,\n )\n return self.get_progress_bar_dict()\n\n @classmethod\n def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:\n \"\"\"\"\"\"\n \"\"\"\n Collect all module arguments in the current constructor and all child constructors.\n The child constructors are all the ``__init__`` methods that reach the current class through\n (chained) ``super().__init__()`` calls.\n\n Args:\n frame: instance frame\n\n Returns:\n self_arguments: arguments dictionary of the first instance\n parents_arguments: arguments dictionary of the parent's instances\n \"\"\"\n if not frame:\n frame = inspect.currentframe()\n\n frame_args = collect_init_args(frame.f_back, [])\n self_arguments = frame_args[-1]\n\n # set module_arguments in child\n self_arguments = self_arguments\n parents_arguments = {}\n\n # add all arguments from parents\n for args in frame_args[:-1]:\n parents_arguments.update(args)\n return self_arguments, parents_arguments\n\n def save_hyperparameters(self, *args, frame=None) -> None:\n \"\"\"Save all model arguments.\n\n Args:\n args: single object of `dict`, `NameSpace` or `OmegaConf`\n or string names or argumenst from class `__init__`\n\n >>> from collections import OrderedDict\n >>> class ManuallyArgsModel(LightningModule):\n ... def __init__(self, arg1, arg2, arg3):\n ... super().__init__()\n ... # manually assign arguments\n ... self.save_hyperparameters('arg1', 'arg3')\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = ManuallyArgsModel(1, 'abc', 3.14)\n >>> model.hparams\n \"arg1\": 1\n \"arg3\": 3.14\n\n >>> class AutomaticArgsModel(LightningModule):\n ... def __init__(self, arg1, arg2, arg3):\n ... super().__init__()\n ... # equivalent automatic\n ... self.save_hyperparameters()\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = AutomaticArgsModel(1, 'abc', 3.14)\n >>> model.hparams\n \"arg1\": 1\n \"arg2\": abc\n \"arg3\": 3.14\n\n >>> class SingleArgModel(LightningModule):\n ... def __init__(self, params):\n ... super().__init__()\n ... # manually assign single argument\n ... self.save_hyperparameters(params)\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))\n >>> model.hparams\n \"p1\": 1\n \"p2\": abc\n \"p3\": 3.14\n \"\"\"\n if not frame:\n frame = inspect.currentframe().f_back\n init_args = get_init_args(frame)\n assert init_args, 'failed to inspect the self init'\n if not args:\n hp = init_args\n self._hparams_name = 'kwargs' if hp else None\n else:\n isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]\n if len(isx_non_str) == 1:\n hp = args[isx_non_str[0]]\n cand_names = [k for k, v in init_args.items() if v == hp]\n self._hparams_name = cand_names[0] if cand_names else None\n else:\n hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}\n self._hparams_name = 'kwargs'\n\n # `hparams` are expected here\n if hp:\n self._set_hparams(hp)\n\n def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:\n if isinstance(hp, Namespace):\n hp = vars(hp)\n if isinstance(hp, dict):\n hp = AttributeDict(hp)\n elif isinstance(hp, PRIMITIVE_TYPES):\n raise ValueError(f'Primitives {PRIMITIVE_TYPES} are not allowed.')\n elif not isinstance(hp, ALLOWED_CONFIG_TYPES):\n raise ValueError(f'Unsupported config type of {type(hp)}.')\n\n if isinstance(hp, dict) and isinstance(self.hparams, dict):\n self.hparams.update(hp)\n else:\n self._hparams = hp\n\n def to_onnx(self, file_path: str, input_sample: Optional[Tensor] = None, **kwargs):\n \"\"\"Saves the model in ONNX format\n\n Args:\n file_path: The path of the file the model should be saved to.\n input_sample: A sample of an input tensor for tracing.\n **kwargs: Will be passed to torch.onnx.export function.\n\n Example:\n >>> class SimpleModel(LightningModule):\n ... def __init__(self):\n ... super().__init__()\n ... self.l1 = torch.nn.Linear(in_features=64, out_features=4)\n ...\n ... def forward(self, x):\n ... return torch.relu(self.l1(x.view(x.size(0), -1)))\n\n >>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:\n ... model = SimpleModel()\n ... input_sample = torch.randn((1, 64))\n ... model.to_onnx(tmpfile.name, input_sample, export_params=True)\n ... os.path.isfile(tmpfile.name)\n True\n \"\"\"\n\n if isinstance(input_sample, Tensor):\n input_data = input_sample\n elif self.example_input_array is not None:\n input_data = self.example_input_array\n else:\n if input_sample is not None:\n raise ValueError(f'Received `input_sample` of type {type(input_sample)}. Expected type is `Tensor`')\n else:\n raise ValueError('Could not export to ONNX since neither `input_sample` nor'\n ' `model.example_input_array` attribute is set.')\n input_data = input_data.to(self.device)\n if 'example_outputs' not in kwargs:\n self.eval()\n with torch.no_grad():\n kwargs['example_outputs'] = self(input_data)\n\n torch.onnx.export(self, input_data, file_path, **kwargs)\n\n @property\n def hparams(self) -> Union[AttributeDict, str]:\n if not hasattr(self, '_hparams'):\n self._hparams = AttributeDict()\n return self._hparams\n\n @hparams.setter\n def hparams(self, hp: Union[dict, Namespace, Any]):\n hparams_assignment_name = self.__get_hparams_assignment_variable()\n self._hparams_name = hparams_assignment_name\n self._set_hparams(hp)\n\n def __get_hparams_assignment_variable(self):\n \"\"\"\"\"\"\n \"\"\"\n looks at the code of the class to figure out what the user named self.hparams\n this only happens when the user explicitly sets self.hparams\n \"\"\"\n try:\n class_code = inspect.getsource(self.__class__)\n lines = class_code.split('\\n')\n for line in lines:\n line = re.sub(r\"\\s+\", \"\", line, flags=re.UNICODE)\n if '.hparams=' in line:\n return line.split('=')[1]\n except Exception as e:\n return 'hparams'\n\n return None\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nEarly Stopping\n^^^^^^^^^^^^^^\n\nMonitor a validation metric and stop training when it stops improving.\n\n\"\"\"\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks.base import Callback\nfrom pytorch_lightning.utilities import rank_zero_warn\nimport os\n\ntorch_inf = torch.tensor(np.Inf)\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\nclass EarlyStopping(Callback):\n r\"\"\"\n\n Args:\n monitor: quantity to be monitored. Default: ``'val_loss'``.\n .. note:: Has no effect when using `EvalResult` or `TrainResult`\n min_delta: minimum change in the monitored quantity\n to qualify as an improvement, i.e. an absolute\n change of less than `min_delta`, will count as no\n improvement. Default: ``0.0``.\n patience: number of validation epochs with no improvement\n after which training will be stopped. Default: ``3``.\n verbose: verbosity mode. Default: ``False``.\n mode: one of {auto, min, max}. In `min` mode,\n training will stop when the quantity\n monitored has stopped decreasing; in `max`\n mode it will stop when the quantity\n monitored has stopped increasing; in `auto`\n mode, the direction is automatically inferred\n from the name of the monitored quantity. Default: ``'auto'``.\n strict: whether to crash the training if `monitor` is\n not found in the validation metrics. Default: ``True``.\n\n Example::\n\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.callbacks import EarlyStopping\n >>> early_stopping = EarlyStopping('val_loss')\n >>> trainer = Trainer(early_stop_callback=early_stopping)\n \"\"\"\n mode_dict = {\n 'min': torch.lt,\n 'max': torch.gt,\n }\n\n def __init__(self, monitor: str = 'val_loss', min_delta: float = 0.0, patience: int = 3,\n verbose: bool = False, mode: str = 'auto', strict: bool = True):\n super().__init__()\n self.monitor = monitor\n self.patience = patience\n self.verbose = verbose\n self.strict = strict\n self.min_delta = min_delta\n self.wait_count = 0\n self.stopped_epoch = 0\n self.mode = mode\n self.warned_result_obj = False\n\n if mode not in self.mode_dict:\n if self.verbose > 0:\n log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')\n self.mode = 'auto'\n\n if self.mode == 'auto':\n if self.monitor == 'acc':\n self.mode = 'max'\n else:\n self.mode = 'min'\n if self.verbose > 0:\n log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')\n\n self.min_delta *= 1 if self.monitor_op == torch.gt else -1\n self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf\n\n def _validate_condition_metric(self, logs):\n monitor_val = logs.get(self.monitor)\n error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'\n f' which is not available. Either add `{self.monitor}` to the return of '\n f' validation_epoch end or modify your EarlyStopping callback to use any of the '\n f'following: `{\"`, `\".join(list(logs.keys()))}`')\n\n if monitor_val is None:\n if self.strict:\n raise RuntimeError(error_msg)\n if self.verbose > 0:\n rank_zero_warn(error_msg, RuntimeWarning)\n\n return False\n\n return True\n\n @property\n def monitor_op(self):\n return self.mode_dict[self.mode]\n\n def state_dict(self):\n return {\n 'wait_count': self.wait_count,\n 'stopped_epoch': self.stopped_epoch,\n 'best_score': self.best_score,\n 'patience': self.patience\n }\n\n def load_state_dict(self, state_dict):\n state_dict = deepcopy(state_dict)\n self.wait_count = state_dict['wait_count']\n self.stopped_epoch = state_dict['stopped_epoch']\n self.best_score = state_dict['best_score']\n self.patience = state_dict['patience']\n\n def on_validation_end(self, trainer, pl_module):\n if trainer.running_sanity_check:\n return\n\n self._run_early_stopping_check(trainer, pl_module)\n\n def on_validation_epoch_end(self, trainer, pl_module):\n if trainer.running_sanity_check:\n return\n\n self.__warn_deprecated_monitor_key()\n\n val_es_key = 'val_early_stop_on'\n if trainer.callback_metrics.get(val_es_key) is not None:\n self.monitor = val_es_key\n\n # disable strict checking when using structured results\n if val_es_key in trainer.callback_metrics:\n self.strict = False\n\n self._validate_condition_metric(trainer.callback_metrics)\n\n def on_train_epoch_end(self, trainer, pl_module):\n # disable early stopping in train loop when there's a val loop\n if self.monitor == 'val_early_stop_on':\n return\n\n # early stopping can also work in the train loop when there is no val loop and when using structured results\n should_check_early_stop = False\n train_es_key = 'early_stop_on'\n if trainer.callback_metrics.get(train_es_key, None) is not None:\n self.monitor = train_es_key\n should_check_early_stop = True\n\n if should_check_early_stop:\n self._run_early_stopping_check(trainer, pl_module)\n\n def __warn_deprecated_monitor_key(self):\n using_result_obj = os.environ.get('PL_USING_RESULT_OBJ', None)\n invalid_key = self.monitor not in ['val_loss', 'early_stop_on', 'val_early_stop_on', 'loss']\n if using_result_obj and not self.warned_result_obj and invalid_key:\n self.warned_result_obj = True\n m = f\"\"\"\n When using EvalResult(early_stop_on=X) or TrainResult(early_stop_on=X) the\n 'monitor' key of EarlyStopping has no effect.\n Remove EarlyStopping(monitor='{self.monitor}) to fix')\n \"\"\"\n rank_zero_warn(m)\n\n def _run_early_stopping_check(self, trainer, pl_module):\n logs = trainer.callback_metrics\n\n if not self._validate_condition_metric(logs):\n return # short circuit if metric not present\n\n current = logs.get(self.monitor)\n\n # when in dev debugging\n trainer.dev_debugger.track_early_stopping_history(current)\n\n if not isinstance(current, torch.Tensor):\n current = torch.tensor(current, device=pl_module.device)\n\n if trainer.use_tpu and XLA_AVAILABLE:\n current = current.cpu()\n\n if self.monitor_op(current - self.min_delta, self.best_score):\n self.best_score = current\n self.wait_count = 0\n else:\n self.wait_count += 1\n should_stop = self.wait_count >= self.patience\n\n if bool(should_stop):\n self.stopped_epoch = trainer.current_epoch\n trainer.should_stop = True\n\n # stop every ddp process if any world process decides to stop\n self._stop_distributed_training(trainer, pl_module)\n\n def _stop_distributed_training(self, trainer, pl_module):\n\n # in ddp make sure all processes stop when one is flagged\n if trainer.use_ddp or trainer.use_ddp2:\n stop = torch.tensor(int(trainer.should_stop), device=pl_module.device)\n dist.all_reduce(stop, op=dist.reduce_op.SUM)\n dist.barrier()\n trainer.should_stop = stop == trainer.world_size\n\n if trainer.use_tpu:\n stop = torch.tensor(int(trainer.should_stop), device=pl_module.device, dtype=torch.int32)\n stop = xm.mesh_reduce(\"stop_signal\", stop, torch.cat)\n torch_xla.core.xla_model.rendezvous(\"pl.EarlyStoppingCallback.stop_distributed_training_check\")\n trainer.should_stop = int(stop.item()) == trainer.world_size\n\n def on_train_end(self, trainer, pl_module):\n if self.stopped_epoch > 0 and self.verbose > 0:\n rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from \"1\" until v0.6.x,'\n ' but will start from \"0\" in v0.8.0.', DeprecationWarning)\n log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')\n"
] | [
[
"torch.onnx.export",
"torch.no_grad",
"torch.distributed.init_process_group",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm"
],
[
"torch.distributed.all_reduce",
"torch.distributed.barrier",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uw-loci/demo_wsi_superres | [
"38283031eee4823d332fae1b6b32b5da33fb957f"
] | [
"train_paired.py"
] | [
"import os, argparse, sys, shutil, warnings, glob\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom math import log2, log10\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom torchvision import transforms, utils\nimport torchvision\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport torch.optim.lr_scheduler as lr_scheduler\n\nfrom skimage import exposure, color, io, img_as_float, img_as_ubyte\nfrom skimage.util import view_as_windows, pad, montage\nfrom PIL import Image, ImageFilter\nimport imagej\n\nimport data_loader as data\nimport models\n\nimport pytorch_fid.fid_score as fid_score\n\n\ndef paired_dataloader(args, csv='train'):\n transformed_dataset = data.Paired_Dataset(csv_file=data.paired_csv_path(csv, dataset=args.dataset),\n img_size=args.patch_size,\n transform=data.Compose([data.ToTensor()])\n )\n dataloader = DataLoader(transformed_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\n return dataloader\n\ndef train(args, epoch, run, dataloader, generator, discriminator, optimizer_G, optimizer_D, criterionL, criterionMSE, Tensor=None, device='cuda:0', patch=None):\n l = args.percep_weight\n if args.gan == 0:\n gan = False\n else:\n gan = True\n epoch_loss = 0\n gan_loss = 0\n total_loss = 0\n dis_loss = 0\n generator.train()\n for iteration, batch in enumerate(dataloader):\n real_mid = Variable(batch['input'].type(Tensor).to(device), requires_grad=False)\n real_high = Variable(batch['output'].type(Tensor).to(device), requires_grad=False) \n # Adversarial ground truths\n valid = Variable(Tensor(np.ones((real_mid.size(0), *patch))).to(device), requires_grad=False)\n fake = Variable(Tensor(np.zeros((real_mid.size(0), *patch))).to(device), requires_grad=False) \n #---------------\n # Train Generator\n #--------------- \n optimizer_G.zero_grad() \n # GAN loss\n fake_high = generator(real_mid)\n if gan:\n pred_fake = discriminator(fake_high, real_mid)\n loss_GAN = criterionMSE(pred_fake, valid)\n \n # Identity\n lossL1 = criterionL(fake_high, real_high) \n loss_pixel = lossL1 \n # Total loss\n if gan:\n loss_G = l * loss_GAN + (1-l) * loss_pixel \n loss_G.backward()\n total_loss = total_loss + loss_G.item()\n gan_loss = gan_loss + loss_GAN.item()\n else:\n loss_pixel.backward()\n optimizer_G.step() \n #---------------\n # Train Discriminator\n #--------------- \n if gan and iteration % args.num_critic == 0:\n optimizer_D.zero_grad() \n # Real loss\n pred_real = discriminator(real_high, real_mid)\n loss_real = criterionMSE(pred_real, valid) \n # Fake loss\n pred_fake = discriminator(fake_high.detach(), real_mid)\n loss_fake = criterionMSE(pred_fake, fake)\n # Total loss\n loss_D = 0.5 * (loss_real + loss_fake)\n loss_D.backward()\n optimizer_D.step()\n dis_loss = dis_loss + loss_D.item() \n epoch_loss = epoch_loss + loss_pixel.item() \n if gan:\n sys.stdout.write('\\r[%d/%d][%d/%d] Discriminator_Loss: %.4f Generator_Loss (Identity/Advers/Total): %.4f/%.4f/%.4f' \n % (epoch, args.num_epochs, iteration, len(dataloader), loss_D.item(), \n loss_pixel.item(), loss_GAN.item(), loss_G.item()))\n else:\n sys.stdout.write('\\r[%d/%d][%d/%d] Generator_L1_Loss: %.4f' \n % (epoch, args.num_epochs, iteration, len(dataloader), loss_pixel.item()))\n print(\"\\n ===> Epoch {} Complete: Avg. Loss: {:.4f}\".format(epoch, epoch_loss / len(dataloader))) \n g_path = os.path.join('weights', run, 'generator.pth')\n d_path = os.path.join('weights', run, 'discriminator.pth')\n os.makedirs(os.path.join('weights', run), exist_ok=True)\n torch.save(generator.state_dict(), g_path)\n if gan:\n os.makedirs(os.path.join('weights', run), exist_ok=True)\n torch.save(discriminator.state_dict(), d_path)\n\ndef compute_p_snr(path_input, path_ref):\n MSE = nn.MSELoss()\n imgs_input = glob.glob(os.path.join(path_input, '*.tiff'))\n imgs_ref = glob.glob(os.path.join(path_ref, '*.tiff'))\n ave_psnr = 0\n for i in range(len(imgs_input)):\n img_input = torch.from_numpy(img_as_float(io.imread(imgs_input[i]).transpose(2, 1, 0))) \n img_ref = torch.from_numpy(img_as_float(io.imread(imgs_ref[i]).transpose(2, 1, 0)))\n img_input = img_input[None, :]\n img_ref = img_ref[None, :] \n mse = MSE(img_input, img_ref) \n psnr = 10 * log10(1 / mse.item())\n ave_psnr += psnr\n ave_psnr = ave_psnr / len(imgs_input)\n return ave_psnr\n\ndef print_output(generator, dataloader_valid, device='cuda:0'):\n os.makedirs('output/print', exist_ok=True)\n os.makedirs('output/print/lr', exist_ok=True)\n os.makedirs('output/print/hr', exist_ok=True)\n os.makedirs('output/print/sr', exist_ok=True)\n with torch.no_grad(): \n generator.eval()\n print(\"=> Printing sampled patches\")\n for k, batch in enumerate(dataloader_valid): \n input, target = batch['input'].to(device), batch['output'].to(device)\n imgs_input =input.float().to(device)\n prediction = generator(imgs_input)\n target = target.float()\n for i in range(target.shape[0]):\n utils.save_image(imgs_input[i], 'output/print/lr/{}_{}.tiff'.format(k, i))\n utils.save_image(target[i], 'output/print/hr/{}_{}.tiff'.format(k, i))\n utils.save_image(prediction[i], 'output/print/sr/{}_{}.tiff'.format(k, i))\n sys.stdout.write(\"\\r ==> Batch {}/{}\".format(k+1, len(dataloader_valid)))\n print(\"\\n Computing FID score\")\n fid = fid_score.calculate_fid_given_paths(('output/print/sr', 'output/print/hr'), 8, 'cuda:0', 2048)\n print(\"\\n Computing PSNR\")\n psnr = compute_p_snr('output/print/sr', 'output/print/hr')\n print(\"FID score: {}, PSNR: {}\".format(fid, psnr))\n return fid, psnr\n\ndef main():\n parser = argparse.ArgumentParser(description='Train WSISR on compressed TMA dataset')\n parser.add_argument('--batch-size', default=32, type=int, help='Batch size')\n parser.add_argument('--patch-size', default=256, type=int, help='Patch size')\n parser.add_argument('--num-workers', default=4, type=int, help='Number of workers')\n parser.add_argument('--num-epochs', default=900, type=int, help='Number of epochs, more epochs are desired for GAN training')\n parser.add_argument('--g-lr', default=0.0001, type=float, help='Learning rate of the generator')\n parser.add_argument('--d-lr', default=0.00001, type=float, help='Learning rate of the descriminator')\n parser.add_argument('--percep-weight', default=0.01, type=float, help='GAN loss weight')\n parser.add_argument('--run-from', default=None, type=str, help='Load weights from a previous run, use folder name in [weights] folder')\n parser.add_argument('--gan', default=1, type=int, help='Use GAN')\n parser.add_argument('--num-critic', default=1, type=int, help='Iteration interval for training the descriminator') \n parser.add_argument('--test-interval', default=50, type=int, help='Epoch interval for FID score testing')\n parser.add_argument('--print-interval', default=10, type=int, help='Epoch interval for output printing')\n parser.add_argument('--dataset', default='TMA', type=str, help='Dataset folder name')\n parser.add_argument('--in-folder', default='low', type=str, help='Low NA image folder name')\n parser.add_argument('--out-folder', default='high', type=str, help='High NA image folder name') \n parser.add_argument('--extension', default='jpg', type=str, help='Training image extension') \n args = parser.parse_args()\n warnings.filterwarnings('ignore')\n device = torch.device('cuda:0')\n tensor = torch.cuda.FloatTensor\n data.generate_paired_csv(dataset=args.dataset, in_folder=args.in_folder, out_folder=args.out_folder, ext=args.extension)\n valid_dataset = paired_dataloader(args, 'valid')\n train_dataset = paired_dataloader(args, 'train')\n test_dataset = paired_dataloader(args, 'test')\n generator = models.Generator()\n generator.to(device);\n discriminator = models.Discriminator()\n discriminator.to(device);\n criterionL = nn.L1Loss().cuda()\n criterionMSE = nn.MSELoss().cuda()\n optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.g_lr)\n optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.d_lr)\n patch = (1, args.patch_size // 2 ** 4, args.patch_size // 2 ** 4)\n if args.run_from is not None:\n generator.load_state_dict(torch.load(os.path.join('weights', args.run_from, 'generator.pth')))\n try:\n discriminator.load_state_dict(torch.load(os.path.join('weights', args.run_from, 'discriminator.pth')))\n except:\n print('Discriminator weights not found!')\n pass\n optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.g_lr)\n optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.d_lr)\n scheduler_G = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_G, args.num_epochs, args.g_lr*0.05)\n scheduler_D = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_D, args.num_epochs, args.d_lr*0.05)\n run = datetime.now().strftime(\"%Y-%m-%d--%H-%M-%S\")\n for epoch in range(0, args.num_epochs):\n train(args, epoch, run, train_dataset, generator, discriminator, optimizer_G, optimizer_D, criterionL, criterionMSE, tensor, device, patch)\n scheduler_G.step()\n scheduler_D.step()\n if epoch % args.print_interval == 0:\n print_output(generator, valid_dataset, device)\n print_output(generator, test_dataset, device)\n \nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.nn.L1Loss",
"torch.device",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thomascherickal/Auto-PyTorch | [
"9e25a3bdef8e836e63979229eef77830cd64bb53",
"9e25a3bdef8e836e63979229eef77830cd64bb53"
] | [
"autoPyTorch/training/mixup.py",
"autoPyTorch/components/networks/feature/shapedmlpnet.py"
] | [
"from autoPyTorch.training.base_training import BaseBatchLossComputationTechnique\nimport numpy as np\nfrom torch.autograd import Variable\nimport ConfigSpace\nimport torch\n\nclass Mixup(BaseBatchLossComputationTechnique):\n def set_up(self, pipeline_config, hyperparameter_config, logger):\n super(Mixup, self).set_up(pipeline_config, hyperparameter_config, logger)\n self.alpha = hyperparameter_config[\"alpha\"]\n\n def prepare_batch_data(self, X_batch, y_batch):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if self.alpha > 0:\n self.lam = np.random.beta(self.alpha, self.alpha)\n else:\n self.lam = 1\n\n batch_size = X_batch.size()[0]\n if X_batch.is_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n self.mixed_x = self.lam * X_batch + (1 - self.lam) * X_batch[index, :]\n self.y_a, self.y_b = y_batch, y_batch[index]\n \n def compute_batch_loss(self, loss_function, y_batch_pred):\n # self.logger.debug(\"Computing batch loss with mixup\")\n\n result = self.lam * loss_function(y_batch_pred, Variable(self.y_a)) + \\\n (1 - self.lam) * loss_function(y_batch_pred, Variable(self.y_b))\n self.lam = None\n self.mixed_x = None\n self.y_a = None\n self.y_b = None\n return result\n\n @staticmethod\n def get_hyperparameter_search_space(**pipeline_config):\n cs = ConfigSpace.ConfigurationSpace()\n cs.add_hyperparameter(ConfigSpace.hyperparameters.UniformFloatHyperparameter(\"alpha\", lower=0, upper=1, default_value=1))\n return cs",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMultilayer Perceptrons in fancy shapes.\n\"\"\"\n\nimport ConfigSpace as CS\nimport ConfigSpace.hyperparameters as CSH\nimport torch.nn as nn\n\nfrom autoPyTorch.components.networks.feature.mlpnet import MlpNet\n\n__author__ = \"Max Dippel, Michael Burkart and Matthias Urban\"\n__version__ = \"0.0.1\"\n__license__ = \"BSD\"\n\nclass ShapedMlpNet(MlpNet):\n def __init__(self, *args, **kwargs):\n super(ShapedMlpNet, self).__init__(*args, **kwargs)\n\n def _build_net(self, in_features, out_features):\n layers = list()\n neuron_counts = get_shaped_neuron_counts(self.config['mlp_shape'],\n in_features,\n out_features,\n self.config['max_units'],\n self.config['num_layers'])\n if self.config[\"use_dropout\"]:\n dropout_shape = get_shaped_neuron_counts( self.config['dropout_shape'], 0, 0, 1000, self.config['num_layers'])\n\n previous = in_features\n for i in range(self.config['num_layers']-1):\n if (i >= len(neuron_counts)):\n break\n dropout = dropout_shape[i] / 1000 * self.config[\"max_dropout\"] if self.config[\"use_dropout\"] else 0\n self._add_layer(layers, previous, neuron_counts[i], dropout)\n previous = neuron_counts[i]\n\n layers.append(nn.Linear(previous, out_features))\n return nn.Sequential(*layers)\n\n def _add_layer(self, layers, in_features, out_features, dropout):\n layers.append(nn.Linear(in_features, out_features))\n layers.append(self.activation())\n if self.config[\"use_dropout\"]:\n layers.append(nn.Dropout(dropout))\n\n @staticmethod\n\n def get_config_space(user_updates=None):\n cs = CS.ConfigurationSpace()\n range_num_layers=(1, 15)\n range_max_num_units=(10, 1024)\n possible_activations=('sigmoid', 'tanh', 'relu')\n possible_net_shapes=('funnel', 'long_funnel', 'diamond', 'hexagon', 'brick', 'triangle', 'stairs')\n possible_dropout_shapes=('funnel', 'long_funnel', 'diamond', 'hexagon', 'brick', 'triangle', 'stairs')\n range_max_dropout=(0, 0.8)\n \n layer_shape = CSH.CategoricalHyperparameter('mlp_shape', possible_net_shapes)\n cs.add_hyperparameter(layer_shape)\n\n num_layers = CSH.UniformIntegerHyperparameter('num_layers', lower=range_num_layers[0], upper=range_num_layers[1])\n cs.add_hyperparameter(num_layers)\n max_units = CSH.UniformIntegerHyperparameter(\"max_units\", lower=range_max_num_units[0], upper=range_max_num_units[1], log=True)\n cs.add_hyperparameter(max_units)\n\n use_dropout = cs.add_hyperparameter(CS.CategoricalHyperparameter(\"use_dropout\", [True, False], default_value=True))\n dropout_shape = cs.add_hyperparameter(CSH.CategoricalHyperparameter('dropout_shape', possible_dropout_shapes))\n max_dropout = cs.add_hyperparameter(CSH.UniformFloatHyperparameter(\"max_dropout\", lower=range_max_dropout[0], upper=range_max_dropout[1], default_value=0.2))\n cs.add_condition(CS.EqualsCondition(dropout_shape, use_dropout, True))\n cs.add_condition(CS.EqualsCondition(max_dropout, use_dropout, True))\n\n cs.add_hyperparameter(CSH.CategoricalHyperparameter('activation', possible_activations))\n return(cs)\n \n \ndef get_shaped_neuron_counts(shape, in_feat, out_feat, max_neurons, layer_count):\n counts = []\n\n if (layer_count <= 0):\n return counts\n\n if (layer_count == 1):\n counts.append(out_feat)\n return counts\n\n max_neurons = max(in_feat, max_neurons)\n # https://mikkokotila.github.io/slate/#shapes\n\n if shape == 'brick':\n #\n # | |\n # | |\n # | |\n # | |\n # | |\n # |___ ___|\n #\n for _ in range(layer_count-1):\n counts.append(max_neurons)\n counts.append(out_feat)\n\n if shape == 'triangle':\n #\n # / \\\n # / \\\n # / \\\n # / \\\n # / \\\n # /_____ _____\\\n #\n previous = in_feat\n step_size = int((max_neurons - previous) / (layer_count-1))\n step_size = max(0, step_size)\n for _ in range(layer_count-2):\n previous = previous + step_size\n counts.append(previous)\n counts.append(max_neurons)\n counts.append(out_feat)\n\n if shape == 'funnel':\n #\n # \\ /\n # \\ /\n # \\ /\n # \\ /\n # \\ /\n # \\ /\n #\n previous = max_neurons\n counts.append(previous)\n \n step_size = int((previous - out_feat) / (layer_count-1))\n step_size = max(0, step_size)\n for _ in range(layer_count-2):\n previous = previous - step_size\n counts.append(previous)\n\n counts.append(out_feat)\n\n if shape == 'long_funnel':\n #\n # | |\n # | |\n # | |\n # \\ /\n # \\ /\n # \\ /\n #\n brick_layer = int(layer_count / 2)\n funnel_layer = layer_count - brick_layer\n counts.extend(get_shaped_neuron_counts('brick', in_feat, max_neurons, max_neurons, brick_layer))\n counts.extend(get_shaped_neuron_counts('funnel', in_feat, out_feat, max_neurons, funnel_layer))\n \n if (len(counts) != layer_count):\n print(\"\\nWarning: long funnel layer count does not match \" + str(layer_count) + \" != \" + str(len(counts)) + \"\\n\")\n \n if shape == 'diamond':\n #\n # / \\\n # / \\\n # / \\\n # \\ /\n # \\ /\n # \\ /\n #\n triangle_layer = int(layer_count / 2) + 1\n funnel_layer = layer_count - triangle_layer\n counts.extend(get_shaped_neuron_counts('triangle', in_feat, max_neurons, max_neurons, triangle_layer))\n remove_triangle_layer = len(counts) > 1\n if (remove_triangle_layer):\n counts = counts[0:-2] # remove the last two layers since max_neurons == out_features (-> two layers with the same size)\n counts.extend(get_shaped_neuron_counts('funnel', max_neurons, out_feat, max_neurons, funnel_layer + (2 if remove_triangle_layer else 0)))\n\n if (len(counts) != layer_count):\n print(\"\\nWarning: diamond layer count does not match \" + str(layer_count) + \" != \" + str(len(counts)) + \"\\n\")\n\n if shape == 'hexagon':\n #\n # / \\\n # / \\\n # | |\n # | |\n # \\ /\n # \\ /\n #\n triangle_layer = int(layer_count / 3) + 1\n funnel_layer = triangle_layer\n brick_layer = layer_count - triangle_layer - funnel_layer\n counts.extend(get_shaped_neuron_counts('triangle', in_feat, max_neurons, max_neurons, triangle_layer))\n counts.extend(get_shaped_neuron_counts('brick', max_neurons, max_neurons, max_neurons, brick_layer))\n counts.extend(get_shaped_neuron_counts('funnel', max_neurons, out_feat, max_neurons, funnel_layer))\n\n if (len(counts) != layer_count):\n print(\"\\nWarning: hexagon layer count does not match \" + str(layer_count) + \" != \" + str(len(counts)) + \"\\n\")\n\n if shape == 'stairs':\n #\n # | |\n # |_ _|\n # | |\n # |_ _|\n # | |\n # | |\n #\n previous = max_neurons\n counts.append(previous)\n\n if layer_count % 2 == 1:\n counts.append(previous)\n\n step_size = 2 * int((max_neurons - out_feat) / (layer_count-1))\n step_size = max(0, step_size)\n for _ in range(int(layer_count / 2 - 1)):\n previous = previous - step_size\n counts.append(previous)\n counts.append(previous)\n\n counts.append(out_feat)\n \n if (len(counts) != layer_count):\n print(\"\\nWarning: stairs layer count does not match \" + str(layer_count) + \" != \" + str(len(counts)) + \"\\n\")\n\n return counts\n"
] | [
[
"torch.randperm",
"numpy.random.beta",
"torch.autograd.Variable"
],
[
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.Dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yick2232/google-research | [
"4a59cab927579ea9722e43252c695de5da4eb5e2",
"4a59cab927579ea9722e43252c695de5da4eb5e2",
"562c7c6ef959cb3cb382b1b660ccc45e8f5289c4",
"99021ebda945e232abdcc592f2cea1375b3c84f7",
"562c7c6ef959cb3cb382b1b660ccc45e8f5289c4",
"4a59cab927579ea9722e43252c695de5da4eb5e2"
] | [
"unprocessing/network.py",
"r4r/r4r_generate_data.py",
"state_of_sparsity/sparse_transformer/layers/sparse_transformer_layers.py",
"large_margin/margin_loss.py",
"enas_lm/src/fixed_lib.py",
"recursive_optimizer/synthetic_experiment.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unprocessing neural network architecture.\n\nUnprocessing Images for Learned Raw Denoising\nhttp://timothybrooks.com/tech/unprocessing\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef conv(features, num_channels, activation=tf.nn.leaky_relu):\n \"\"\"Applies a 3x3 conv layer.\"\"\"\n return tf.layers.conv2d(features, num_channels, 3, padding='same',\n activation=activation)\n\n\ndef conv_block(features, num_channels):\n \"\"\"Applies 3x conv layers.\"\"\"\n with tf.name_scope(None, 'conv_block'):\n features = conv(features, num_channels)\n features = conv(features, num_channels)\n features = conv(features, num_channels)\n return features\n\n\ndef downsample_2x(features):\n \"\"\"Applies a 2x spatial downsample via max pooling.\"\"\"\n with tf.name_scope(None, 'downsample_2x'):\n return tf.layers.max_pooling2d(features, 2, 2, padding='same')\n\n\ndef upsample_2x(features):\n \"\"\"Applies a 2x spatial upsample via bilinear interpolation.\"\"\"\n with tf.name_scope(None, 'upsample_2x'):\n shape = tf.shape(features)\n shape = [shape[1] * 2, shape[2] * 2]\n features = tf.image.resize_bilinear(features, shape)\n return features\n\n\ndef inference(noisy_img, variance):\n \"\"\"Residual U-Net with skip connections.\n\n Expects four input channels for the Bayer color filter planes (e.g. RGGB).\n This is the format of real raw images before they are processed, and an\n effective time to denoise images in an image processing pipelines.\n\n Args:\n noisy_img: Tensor of shape [B, H, W, 4].\n variance: Tensor of shape [B, H, W, 4].\n\n Returns:\n Denoised image in Tensor of shape [B, H, W, 4].\n \"\"\"\n\n noisy_img = tf.identity(noisy_img, 'noisy_img')\n noisy_img.set_shape([None, None, None, 4])\n variance = tf.identity(variance, 'variance')\n variance.shape.assert_is_compatible_with(noisy_img.shape)\n variance.set_shape([None, None, None, 4])\n\n features = tf.concat([noisy_img, variance], axis=-1)\n skip_connections = []\n\n with tf.name_scope(None, 'encoder'):\n for num_channels in (32, 64, 128, 256):\n features = conv_block(features, num_channels)\n skip_connections.append(features)\n features = downsample_2x(features)\n features = conv_block(features, 512)\n\n with tf.name_scope(None, 'decoder'):\n for num_channels in (256, 128, 64, 32):\n features = upsample_2x(features)\n with tf.name_scope(None, 'skip_connection'):\n features = tf.concat([features, skip_connections.pop()], axis=-1)\n features = conv_block(features, num_channels)\n\n residual = conv(features, 4, None)\n return tf.identity(noisy_img + residual, 'denoised_img')\n",
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Script to build R4R data from the original R2R data.\n\nLink to the original R2R:\n https://niessner.github.io/Matterport/\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport json\nimport os\n\nimport graph_utils\n\nimport networkx as nx\nimport numpy as np\n\n\ndef main(args):\n \"\"\"Generate R4R data from the original R2R data.\n\n Args:\n args: argparse containing paths to input and output files.\n \"\"\"\n print('******Generating R4R Data********')\n print(' Distance threshold: {} meters'.format(args.distance_threshold))\n print(' Heading threshold: {} radians'.format(args.heading_threshold))\n\n def _connections_file_path(scan):\n return os.path.join(\n args.connections_dir, '{}_connectivity.json'.format(scan))\n\n inputs = json.load(open(args.input_file_path))\n outputs = list()\n filtered = collections.Counter()\n\n # Group by scan to save memory.\n scans = dict()\n for value in inputs:\n scan = value['scan']\n if scan not in scans:\n scans[scan] = []\n scans[scan].append(value)\n\n for scan, values in scans.items():\n print('Loading graph for scan {}.'.format(scan))\n graph = graph_utils.load(_connections_file_path(scan))\n pos2d = nx.get_node_attributes(graph, 'pos2d')\n\n # Cache format: (node, (distance, path)) ((node obj, (dict, dict)))\n cache = dict(nx.all_pairs_dijkstra(graph, weight='weight3d'))\n shortest_distance = {k: v[0] for k, v in cache.items()}\n shortest_path = {k: v[1] for k, v in cache.items()}\n\n for first in values:\n for second in values:\n first_target = first['path'][-1]\n second_source = second['path'][0]\n\n # Compute the end-start distance (meters).\n distance = shortest_distance[first_target][second_source]\n\n # Compute the absolute end-start heading difference (radians).\n x, y = pos2d[first['path'][-1]] - pos2d[first['path'][-2]]\n heading = abs(second['heading'] - np.arctan2(y, x) % (2 * np.pi))\n\n if (args.distance_threshold is not None\n and distance > args.distance_threshold):\n filtered['distance'] += 1\n elif (args.heading_threshold is not None\n and heading > args.heading_threshold):\n filtered['heading'] += 1\n else:\n value = dict()\n value['path'] = (\n first['path'][:-1]\n + shortest_path[first_target][second_source]\n + second['path'][1:])\n value['distance'] = (\n first['distance']\n + shortest_distance[first_target][second_source]\n + second['distance'])\n value['instructions'] = [\n x + y # pylint: disable=g-complex-comprehension\n for x in first['instructions']\n for y in second['instructions']]\n value['heading'] = first['heading']\n value['path_id'] = len(outputs)\n value['scan'] = scan\n\n # Additional data.\n path_source = first['path'][0]\n path_target = second['path'][-1]\n value['shortest_path_distance'] = cache[path_source][0][path_target]\n value['shortest_path'] = cache[path_source][1][path_target]\n value['first_path_id'] = first['path_id']\n value['second_path_id'] = second['path_id']\n\n outputs.append(value)\n\n with open(args.output_file_path, 'w') as f:\n json.dump(outputs, f, indent=2, sort_keys=True, separators=(',', ': '))\n\n # Dataset summary metrics.\n tot_instructions = np.sum([len(x['instructions']) for x in outputs])\n avg_distance = np.mean([x['distance'] for x in outputs])\n avg_path_len = np.mean([len(x['path']) for x in outputs])\n avg_sp_distance = np.mean([x['shortest_path_distance'] for x in outputs])\n avg_sp_path_len = np.mean([len(x['shortest_path']) for x in outputs])\n\n print('******Final Results********')\n print(' Total instructions generated: {}'.format(tot_instructions))\n print(' Average path distance (meters): {}'.format(avg_distance))\n print(' Average shortest path distance: {}'.format(avg_sp_distance))\n print(' Average path length (steps): {}'.format(avg_path_len))\n print(' Average shortest path length: {}'.format(avg_sp_path_len))\n print(' Total paths generated: {}'.format(len(outputs)))\n print(' Total distance filtered paths: {}'.format(filtered['distance']))\n print(' Total heading filtered paths: {}'.format(filtered['heading']))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--connections_dir',\n dest='connections_dir',\n required=True,\n help='Path to the Matterport simulator connection data.')\n parser.add_argument(\n '--input_file_path',\n dest='input_file_path',\n required=True,\n help='Path to read the R2R input data.')\n parser.add_argument(\n '--output_file_path',\n dest='output_file_path',\n required=True,\n help='Path to write the R4R output data.')\n parser.add_argument(\n '--distance_threshold',\n dest='distance_threshold',\n required=False,\n nargs='?',\n const=3.0,\n type=float,\n help='Maximum end-start distance (meters) to join R2R paths.')\n parser.add_argument(\n '--heading_threshold',\n dest='heading_threshold',\n required=False,\n nargs='?',\n const=None,\n type=float,\n help='Maximum end-start heading difference (radians) to join R2R paths.')\n main(parser.parse_args())\n",
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sparse transformer layers.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_attention\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.utils import expert_utils\nfrom tensor2tensor.utils import mlperf_log\n\nimport tensorflow as tf\nfrom state_of_sparsity.sparse_transformer.layers import sparse_attention\nfrom state_of_sparsity.sparse_transformer.layers import sparse_layers\n\n\ndef transformer_encoder(encoder_input,\n encoder_self_attention_bias,\n hparams,\n name=\"encoder\",\n nonpadding=None,\n save_weights_to=None,\n make_image_summary=True):\n \"\"\"A stack of transformer layers.\n\n Args:\n encoder_input: a Tensor\n encoder_self_attention_bias: bias Tensor for self-attention\n (see common_attention.attention_bias())\n hparams: hyperparameters for model\n name: a string\n nonpadding: optional Tensor with shape [batch_size, encoder_length]\n indicating what positions are not padding. This must either be\n passed in, which we do for \"packed\" datasets, or inferred from\n encoder_self_attention_bias. The knowledge about padding is used\n for pad_remover(efficiency) and to mask out padding in convolutional\n layers.\n save_weights_to: an optional dictionary to capture attention weights\n for visualization; the weights tensor will be appended there under\n a string key created from the variable scope (including name).\n make_image_summary: Whether to make an attention image summary.\n\n Returns:\n y: a Tensors\n \"\"\"\n x = encoder_input\n attention_dropout_broadcast_dims = (\n common_layers.comma_separated_string_to_integer_list(\n getattr(hparams, \"attention_dropout_broadcast_dims\", \"\")))\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,\n value=hparams.num_encoder_layers or hparams.num_hidden_layers)\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,\n value=hparams.attention_dropout)\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_ATTENTION_DENSE,\n value={\n \"use_bias\": \"false\",\n \"num_heads\": hparams.num_heads,\n \"hidden_size\": hparams.hidden_size\n })\n\n with tf.variable_scope(name):\n if nonpadding is not None:\n padding = 1.0 - nonpadding\n else:\n padding = common_attention.attention_bias_to_padding(\n encoder_self_attention_bias)\n nonpadding = 1.0 - padding\n pad_remover = None\n if hparams.use_pad_remover and not common_layers.is_xla_compiled():\n pad_remover = expert_utils.PadRemover(padding)\n for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):\n\n initial_sparsity = None\n if hparams.get(\"load_masks_from\"):\n initial_sparsity = hparams.get(\"initial_sparsity\")\n\n with tf.variable_scope(\"layer_%d\" % layer):\n with tf.variable_scope(\"self_attention\"):\n y = sparse_attention.multihead_attention(\n common_layers.layer_preprocess(x, hparams),\n None,\n encoder_self_attention_bias,\n hparams.attention_key_channels or hparams.hidden_size,\n hparams.attention_value_channels or hparams.hidden_size,\n hparams.hidden_size,\n hparams.num_heads,\n hparams.attention_dropout,\n attention_type=hparams.self_attention_type,\n max_relative_position=hparams.max_relative_position,\n heads_share_relative_embedding=(\n hparams.heads_share_relative_embedding),\n add_relative_to_values=hparams.add_relative_to_values,\n save_weights_to=save_weights_to,\n make_image_summary=make_image_summary,\n dropout_broadcast_dims=attention_dropout_broadcast_dims,\n max_length=hparams.get(\"max_length\"),\n vars_3d=hparams.get(\"attention_variables_3d\"),\n sparsity_technique=hparams.get(\"sparsity_technique\"),\n threshold=hparams.get(\"log_alpha_threshold\"),\n training=hparams.get(\"mode\") == tf.estimator.ModeKeys.TRAIN,\n clip_alpha=hparams.get(\"clip_log_alpha\"),\n initial_sparsity=initial_sparsity,\n split_heads=hparams.get(\"split_heads\"))\n x = common_layers.layer_postprocess(x, y, hparams)\n with tf.variable_scope(\"ffn\"):\n y = transformer_ffn_layer(\n common_layers.layer_preprocess(x, hparams),\n hparams,\n pad_remover)\n x = common_layers.layer_postprocess(x, y, hparams)\n # if normalization is done in layer_preprocess, then it should also be done\n # on the output, since the output can grow very large, being the sum of\n # a whole stack of unnormalized layer outputs.\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_NORM,\n value={\"hidden_size\": hparams.hidden_size})\n return common_layers.layer_preprocess(x, hparams)\n\n\ndef transformer_ffn_layer(x, hparams, pad_remover=None):\n \"\"\"Feed-forward layer in the transformer.\n\n Args:\n x: a Tensor of shape [batch_size, length, hparams.hidden_size]\n hparams: hyperparameters for model\n pad_remover: an expert_utils.PadRemover object tracking the padding\n positions. If provided, when using convolutional settings, the padding\n is removed before applying the convolution, and restored afterward. This\n can give a significant speedup.\n\n Returns:\n a Tensor of shape [batch_size, length, hparams.hidden_size]\n\n Raises:\n ValueError: If losses arg is None, but layer generates extra losses.\n \"\"\"\n ffn_layer = hparams.ffn_layer\n if ffn_layer != \"dense_relu_dense\":\n raise ValueError(\"sparse transformer only supports dense_relu_dense ffn.\")\n\n relu_dropout_broadcast_dims = (\n common_layers.comma_separated_string_to_integer_list(\n getattr(hparams, \"relu_dropout_broadcast_dims\", \"\")))\n # In simple convolution mode, use `pad_remover` to speed up processing.\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE,\n value={\n \"filter_size\": hparams.filter_size,\n \"use_bias\": \"True\",\n \"activation\": mlperf_log.RELU\n })\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE,\n value={\n \"hidden_size\": hparams.hidden_size,\n \"use_bias\": \"True\",\n })\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout)\n if pad_remover:\n original_shape = common_layers.shape_list(x)\n # Collapse `x` across examples, and remove padding positions.\n x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0))\n x = tf.expand_dims(pad_remover.remove(x), axis=0)\n\n initial_sparsity = None\n if hparams.get(\"load_masks_from\"):\n initial_sparsity = hparams.get(\"initial_sparsity\")\n\n conv_output = sparse_layers.dense_relu_dense(\n x,\n hparams.filter_size,\n hparams.hidden_size,\n dropout=hparams.relu_dropout,\n dropout_broadcast_dims=relu_dropout_broadcast_dims,\n sparsity_technique=hparams.get(\"sparsity_technique\"),\n threshold=hparams.get(\"log_alpha_threshold\"),\n training=hparams.get(\"mode\") == tf.estimator.ModeKeys.TRAIN,\n clip_alpha=hparams.get(\"clip_log_alpha\"),\n initial_sparsity=initial_sparsity)\n if pad_remover:\n # Restore `conv_output` to the original shape of `x`, including padding.\n conv_output = tf.reshape(\n pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape)\n return conv_output\n",
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Large Margin Loss Function.\n\nLet d be the estimated distance to boundary, and gamma and alpha\nare margin loss parameters (gamma > 0 and alpha > 0).\n\nThe original margin loss can be written as:\n\nloss = max(0, min(gamma - d, alpha * gamma))\n\nThe formulation written here can be obtained as:\nmin(gamma - d, alpha * gamma)\n = gamma + min(-d, alpha * gamma - gamma)\n = gamma - max(d, gamma - alpha * gamma)\n = gamma - max(d, gamma * (1-alpha))\n\nOne can see from here that the lower bound to distance to boundary is\ndistance_lower = gamma * (1-alpha).\n\nloss = max(0, gamma - max(d, distance_lower))\nLooking further:\nloss = gamma + max(-gamma, -max(d, distance_lower))\n = gamma - min(gamma, max(d, distance_lower))\n\nOne can see from here that the distance is upper bounded by gamma.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef get_norm_fn(norm_type):\n norm_fn = lambda x: tf.norm(x, ord=norm_type)\n return norm_fn\n\n\ndef maximum_with_relu(a, b):\n return a + tf.nn.relu(b - a)\n\n\ndef _ensure_large_margin_args(name, sentinel, one_hot_labels, logits,\n layers_list, dist_norm, layers_weights):\n \"\"\"Ensures arguments are correct.\"\"\"\n # Make sure that all arguments were passed as named arguments.\n if sentinel is not None:\n raise ValueError(\n \"Only call `%s` with \"\n \"named arguments (one_hot_labels=..., logits=..., ...)\" % name)\n if (one_hot_labels is None or logits is None or not layers_list):\n raise ValueError(\"logits, one_hot_labels and layers_list must be provided.\")\n\n if dist_norm not in {1, 2, np.inf}:\n raise ValueError(\"dist_norm must be 1, 2, or np.inf.\")\n\n if layers_weights is not None and len(layers_weights) != len(layers_list):\n raise ValueError(\n \"layers_weights must have the same length as layers_list.\")\n\n\ndef large_margin( # pylint: disable=invalid-name\n _sentinel=None,\n logits=None,\n one_hot_labels=None,\n layers_list=None,\n gamma=10000,\n alpha_factor=2,\n top_k=1,\n dist_norm=2,\n epsilon=1e-8,\n use_approximation=True,\n worst_case_loss=True,\n layers_weights=None,\n loss_collection=tf.GraphKeys.LOSSES):\n \"\"\"Creates a large margin loss.\n\n Args:\n _sentinel: Used to prevent positional parameters. Internal, do not use.\n logits: Float `[batch_size, num_classes]` logits outputs of the network.\n one_hot_labels: `[batch_size, num_classes]` Target integer labels in `{0,\n 1}`.\n layers_list: List of network Tensors at different layers. The large margin\n is enforced at the layers specified.\n gamma: Desired margin, and distance to boundary above the margin will be\n clipped.\n alpha_factor: Factor to determine the lower bound of margin. Both gamma and\n alpha_factor determine points to include in training the margin these\n points lie with distance to boundary of [gamma * (1 - alpha), gamma]\n top_k: Number of top classes to include in the margin loss.\n dist_norm: Distance to boundary defined on norm (options: be 1, 2, np.inf).\n epsilon: Small number to avoid division by 0.\n use_approximation: If true, use approximation of the margin gradient for\n less computationally expensive training.\n worst_case_loss: (Boolean) Use the minimum distance to boundary of the top_k\n if true, otherwise, use the of the losses of the top_k classes. When\n top_k = 1, both True and False choices are equivalent.\n layers_weights: (List of float) Weight for loss from each layer.\n loss_collection: Collection to which the loss will be added.\n\n Returns:\n loss: Scalar `Tensor` of the same type as `logits`.\n Raises:\n ValueError: If the shape of `logits` doesn't match that of\n `one_hot_labels`. Also if `one_hot_labels` or `logits` is None.\n \"\"\"\n\n _ensure_large_margin_args(\"large_margin\", _sentinel, one_hot_labels, logits,\n layers_list, dist_norm, layers_weights)\n logits = tf.convert_to_tensor(logits)\n one_hot_labels = tf.cast(one_hot_labels, logits.dtype)\n logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape())\n\n layers_weights = [1.] * len(\n layers_list) if layers_weights is None else layers_weights\n assert top_k > 0\n assert top_k <= logits.get_shape()[1]\n\n dual_norm = {1: np.inf, 2: 2, np.inf: 1}\n norm_fn = get_norm_fn(dual_norm[dist_norm])\n with tf.name_scope(\"large_margin_loss\"):\n class_prob = tf.nn.softmax(logits)\n # Pick the correct class probability.\n correct_class_prob = tf.reduce_sum(\n class_prob * one_hot_labels, axis=1, keepdims=True)\n\n # Class probabilities except the correct.\n other_class_prob = class_prob * (1. - one_hot_labels)\n if top_k > 1:\n # Pick the top k class probabilities other than the correct.\n top_k_class_prob, _ = tf.nn.top_k(other_class_prob, k=top_k)\n else:\n top_k_class_prob = tf.reduce_max(other_class_prob, axis=1, keepdims=True)\n\n # Difference between correct class probailities and top_k probabilities.\n difference_prob = correct_class_prob - top_k_class_prob\n losses_list = []\n for wt, layer in zip(layers_weights, layers_list):\n difference_prob_grad = [\n tf.layers.flatten(tf.gradients(difference_prob[:, i], layer)[0])\n for i in range(top_k)\n ]\n\n difference_prob_gradnorm = tf.concat([\n tf.map_fn(norm_fn, difference_prob_grad[i])[:, tf.newaxis] / wt\n for i in range(top_k)\n ], axis=1)\n\n if use_approximation:\n difference_prob_gradnorm = tf.stop_gradient(difference_prob_gradnorm)\n\n distance_to_boundary = difference_prob / (\n difference_prob_gradnorm + epsilon)\n\n if worst_case_loss:\n # Only consider worst distance to boundary.\n distance_to_boundary = tf.reduce_min(distance_to_boundary, axis=1,\n keepdims=True)\n\n # Distances to consider between distance_upper and distance_lower bounds\n distance_upper = gamma\n distance_lower = gamma * (1 - alpha_factor)\n\n # Enforce lower bound.\n loss_layer = maximum_with_relu(distance_to_boundary, distance_lower)\n\n # Enforce upper bound.\n loss_layer = maximum_with_relu(\n 0, distance_upper - loss_layer) - distance_upper\n\n loss_layer = tf.reduce_sum(loss_layer, axis=1)\n\n losses_list.append(tf.reduce_mean(loss_layer))\n\n loss = tf.reduce_mean(losses_list)\n # Add loss to loss_collection.\n tf.losses.add_loss(loss, loss_collection)\n return loss\n",
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"AWD ENAS fixed model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom enas_lm.src import data_utils\nfrom enas_lm.src import utils\n\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n\nflags.DEFINE_string('fixed_arc', None, '')\nflags.DEFINE_float('child_alpha', 0.7, 'activation L2 reg')\nflags.DEFINE_float('child_drop_e', 0.125, 'drop rate words')\nflags.DEFINE_float('child_drop_i', 0.175, 'drop rate embeddings')\nflags.DEFINE_float('child_drop_l', 0.225, 'drop rate between layers')\nflags.DEFINE_float('child_drop_o', 0.75, 'drop rate output')\nflags.DEFINE_float('child_drop_w', 0.00, 'drop rate weight')\nflags.DEFINE_float('child_drop_x', 0.725, 'drop rate at input of RNN cells')\nflags.DEFINE_float('child_init_range', 0.05, '')\nflags.DEFINE_float('child_grad_bound', 0.25, '')\nflags.DEFINE_float('child_weight_decay', 8e-7, '')\nflags.DEFINE_integer('child_num_train_epochs', 3000, '')\nflags.DEFINE_integer('child_hidden_size', 800, '')\n\n\ndef _gen_mask(shape, drop_prob):\n \"\"\"Generate a droppout mask.\"\"\"\n keep_prob = 1. - drop_prob\n mask = tf.random_uniform(shape, minval=0., maxval=1., dtype=tf.float32)\n mask = tf.floor(mask + keep_prob) / keep_prob\n return mask\n\n\ndef _rnn_fn(x, prev_s, w_prev, w_skip, input_mask, layer_mask, params):\n \"\"\"Multi-layer LSTM.\n\n Args:\n x: [batch_size, num_steps, hidden_size].\n prev_s: [batch_size, hidden_size].\n w_prev: [2 * hidden_size, 2 * hidden_size].\n w_skip: [None, [hidden_size, 2 * hidden_size] * (num_layers-1)].\n input_mask: [batch_size, hidden_size].\n layer_mask: [batch_size, hidden_size].\n params: hyper-params object.\n\n Returns:\n next_s: [batch_size, hidden_size].\n all_s: [[batch_size, num_steps, hidden_size] * num_layers].\n \"\"\"\n batch_size = x.get_shape()[0].value\n num_steps = tf.shape(x)[1]\n fixed_arc = params.fixed_arc\n num_layers = len(fixed_arc) // 2\n\n all_s = tf.TensorArray(dtype=tf.float32, size=num_steps, infer_shape=False)\n\n def _condition(step, *unused_args):\n return tf.less(step, num_steps)\n\n def _body(step, prev_s, all_s):\n \"\"\"Body fn for `tf.while_loop`.\"\"\"\n inp = x[:, step, :]\n if layer_mask is not None:\n assert input_mask is not None\n ht = tf.matmul(\n tf.concat([inp * input_mask, prev_s * layer_mask], axis=1), w_prev)\n else:\n ht = tf.matmul(tf.concat([inp, prev_s], axis=1), w_prev)\n h, t = tf.split(ht, 2, axis=1)\n h = tf.tanh(h)\n t = tf.sigmoid(t)\n s = prev_s + t * (h - prev_s)\n layers = [s]\n\n def _select_function(h, function_id):\n if function_id == 0:\n return tf.tanh(h)\n elif function_id == 1:\n return tf.nn.relu(h)\n elif function_id == 2:\n return tf.sigmoid(h)\n elif function_id == 3:\n return h\n raise ValueError('Unknown func_idx {0}'.format(function_id))\n\n start_idx = 0\n for layer_id in range(num_layers):\n prev_idx = fixed_arc[start_idx]\n func_idx = fixed_arc[start_idx + 1]\n prev_s = layers[prev_idx]\n if layer_mask is not None:\n ht = tf.matmul(prev_s * layer_mask, w_skip[layer_id])\n else:\n ht = tf.matmul(prev_s, w_skip[layer_id])\n h, t = tf.split(ht, 2, axis=1)\n\n h = _select_function(h, func_idx)\n t = tf.sigmoid(t)\n s = prev_s + t * (h - prev_s)\n s.set_shape([batch_size, params.hidden_size])\n layers.append(s)\n start_idx += 2\n\n next_s = tf.add_n(layers[1:]) / tf.cast(num_layers, dtype=tf.float32)\n all_s = all_s.write(step, next_s)\n return step + 1, next_s, all_s\n\n loop_inps = [tf.constant(0, dtype=tf.int32), prev_s, all_s]\n _, next_s, all_s = tf.while_loop(_condition, _body, loop_inps)\n all_s = tf.transpose(all_s.stack(), [1, 0, 2])\n\n return next_s, all_s\n\n\ndef _set_default_params(params):\n \"\"\"Set default values for the hparams.\"\"\"\n params.add_hparam('alpha', FLAGS.child_alpha) # activation L2 reg\n params.add_hparam('best_valid_ppl_threshold', 10)\n\n params.add_hparam('batch_size', 64)\n params.add_hparam('bptt_steps', 35)\n\n # for dropouts: dropping rate, NOT keeping rate\n params.add_hparam('drop_e', FLAGS.child_drop_e) # word\n params.add_hparam('drop_i', FLAGS.child_drop_i) # embeddings\n params.add_hparam('drop_l', FLAGS.child_drop_l) # between RNN nodes\n params.add_hparam('drop_o', FLAGS.child_drop_o) # output\n params.add_hparam('drop_w', FLAGS.child_drop_w) # weight\n params.add_hparam('drop_x', FLAGS.child_drop_x) # input to RNN layers\n\n assert FLAGS.fixed_arc is not None\n params.add_hparam('fixed_arc', [int(d) for d in FLAGS.fixed_arc.split(' ')])\n\n params.add_hparam('grad_bound', FLAGS.child_grad_bound)\n params.add_hparam('hidden_size', FLAGS.child_hidden_size)\n params.add_hparam('init_range', FLAGS.child_init_range)\n params.add_hparam('learning_rate', 20.)\n params.add_hparam('num_train_epochs', FLAGS.child_num_train_epochs)\n params.add_hparam('num_warmup_epochs', 0.0)\n params.add_hparam('vocab_size', 10000)\n\n params.add_hparam('weight_decay', FLAGS.child_weight_decay)\n return params\n\n\nclass LM(object):\n \"\"\"Language model.\"\"\"\n\n def __init__(self, params, x_train, x_valid, x_test, name='language_model'):\n print('-' * 80)\n print('Building LM')\n\n self.params = _set_default_params(params)\n self.name = name\n\n # train data\n (self.x_train, self.y_train,\n self.num_train_batches, self.reset_start_idx,\n self.should_reset, self.base_bptt) = data_utils.input_producer(\n x_train, params.batch_size, params.bptt_steps, random_len=True)\n params.add_hparam(\n 'num_train_steps', self.num_train_batches * params.num_train_epochs)\n\n # valid data\n (self.x_valid, self.y_valid,\n self.num_valid_batches) = data_utils.input_producer(\n x_valid, params.batch_size, params.bptt_steps)\n\n # test data\n (self.x_test, self.y_test,\n self.num_test_batches) = data_utils.input_producer(x_test, 1, 1)\n\n params.add_hparam('num_warmup_steps',\n params.num_warmup_epochs * self.num_train_batches)\n self._build_params()\n self._build_train()\n self._build_valid()\n self._build_test()\n\n def _build_params(self):\n \"\"\"Create model parameters.\"\"\"\n\n print('-' * 80)\n print('Building model params')\n initializer = tf.initializers.random_uniform(minval=-self.params.init_range,\n maxval=self.params.init_range)\n with tf.variable_scope(self.name, initializer=initializer):\n with tf.variable_scope('embedding'):\n w_emb = tf.get_variable(\n 'w', [self.params.vocab_size, self.params.hidden_size],\n initializer=initializer)\n dropped_w_emb = tf.layers.dropout(\n w_emb, self.params.drop_e, [self.params.vocab_size, 1],\n training=True)\n\n hidden_size = self.params.hidden_size\n fixed_arc = self.params.fixed_arc\n num_layers = len(fixed_arc) // 2\n with tf.variable_scope('rnn_cell'):\n w_prev = tf.get_variable('w_prev', [2 * hidden_size, 2 * hidden_size])\n i_mask = tf.ones([hidden_size, 2 * hidden_size], dtype=tf.float32)\n h_mask = _gen_mask([hidden_size, 2 * hidden_size], self.params.drop_w)\n mask = tf.concat([i_mask, h_mask], axis=0)\n dropped_w_prev = w_prev * mask\n\n w_skip, dropped_w_skip = [], []\n for layer_id in range(num_layers):\n mask = _gen_mask([hidden_size, 2 * hidden_size], self.params.drop_w)\n with tf.variable_scope('layer_{}'.format(layer_id)):\n w = tf.get_variable('w', [hidden_size, 2 * hidden_size])\n dropped_w = w * mask\n w_skip.append(w)\n dropped_w_skip.append(dropped_w)\n\n with tf.variable_scope('init_states'):\n with tf.variable_scope('batch'):\n init_shape = [self.params.batch_size, hidden_size]\n batch_prev_s = tf.get_variable(\n 's', init_shape, dtype=tf.float32, trainable=False)\n zeros = np.zeros(init_shape, dtype=np.float32)\n batch_reset = tf.assign(batch_prev_s, zeros)\n with tf.variable_scope('test'):\n init_shape = [1, hidden_size]\n test_prev_s = tf.get_variable(\n 's', init_shape, dtype=tf.float32, trainable=False)\n zeros = np.zeros(init_shape, dtype=np.float32)\n test_reset = tf.assign(test_prev_s, zeros)\n\n num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])\n print('Model has {0} params'.format(num_params))\n\n self.batch_init_states = {\n 's': batch_prev_s,\n 'reset': batch_reset,\n }\n self.train_params = {\n 'w_emb': dropped_w_emb,\n 'w_prev': dropped_w_prev,\n 'w_skip': dropped_w_skip,\n 'w_soft': w_emb,\n }\n self.test_init_states = {\n 's': test_prev_s,\n 'reset': test_reset,\n }\n self.eval_params = {\n 'w_emb': w_emb,\n 'w_prev': w_prev,\n 'w_skip': w_skip,\n 'w_soft': w_emb,\n }\n\n def _forward(self, x, y, model_params, init_states, is_training=False):\n \"\"\"Computes the logits.\n\n Args:\n x: [batch_size, num_steps], input batch.\n y: [batch_size, num_steps], output batch.\n model_params: a `dict` of params to use.\n init_states: a `dict` of params to use.\n is_training: if `True`, will apply regularizations.\n\n Returns:\n loss: scalar, cross-entropy loss\n \"\"\"\n w_emb = model_params['w_emb']\n w_prev = model_params['w_prev']\n w_skip = model_params['w_skip']\n w_soft = model_params['w_soft']\n prev_s = init_states['s']\n\n emb = tf.nn.embedding_lookup(w_emb, x)\n batch_size = self.params.batch_size\n hidden_size = self.params.hidden_size\n if is_training:\n emb = tf.layers.dropout(\n emb, self.params.drop_i,\n [self.params.batch_size, 1, hidden_size], training=True)\n\n input_mask = _gen_mask([batch_size, hidden_size], self.params.drop_x)\n layer_mask = _gen_mask([batch_size, hidden_size], self.params.drop_l)\n else:\n input_mask = None\n layer_mask = None\n\n out_s, all_s = _rnn_fn(emb, prev_s, w_prev, w_skip, input_mask, layer_mask,\n self.params)\n top_s = all_s\n if is_training:\n top_s = tf.layers.dropout(top_s, self.params.drop_o,\n [batch_size, 1, hidden_size], training=True)\n\n carry_on = [tf.assign(prev_s, out_s)]\n logits = tf.einsum('bnh,vh->bnv', top_s, w_soft)\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,\n logits=logits)\n loss = tf.reduce_mean(loss)\n\n reg_loss = loss # loss + regularization_terms, for training only\n if is_training:\n # L2 weight reg\n reg_loss += self.params.weight_decay * tf.add_n(\n [tf.reduce_sum(w ** 2) for w in tf.trainable_variables()])\n\n # activation L2 reg\n reg_loss += self.params.alpha * tf.reduce_mean(all_s ** 2)\n\n with tf.control_dependencies(carry_on):\n loss = tf.identity(loss)\n if is_training:\n reg_loss = tf.identity(reg_loss)\n\n return reg_loss, loss\n\n def _build_train(self):\n \"\"\"Build training ops.\"\"\"\n print('-' * 80)\n print('Building train graph')\n reg_loss, loss = self._forward(self.x_train, self.y_train,\n self.train_params, self.batch_init_states,\n is_training=True)\n\n tf_vars = tf.trainable_variables()\n global_step = tf.train.get_or_create_global_step()\n lr_scale = (tf.cast(tf.shape(self.y_train)[-1], dtype=tf.float32) /\n tf.cast(self.params.bptt_steps, dtype=tf.float32))\n learning_rate = utils.get_lr(global_step, self.params) * lr_scale\n grads = tf.gradients(reg_loss, tf_vars)\n clipped_grads, grad_norm = tf.clip_by_global_norm(grads,\n self.params.grad_bound)\n\n (self.update_moving_avg_ops, self.use_moving_avg_vars,\n self.restore_normal_vars) = self._create_average_ops()\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n train_op = optimizer.apply_gradients(zip(clipped_grads, tf_vars),\n global_step=global_step)\n\n self.train_loss = loss\n self.train_op = train_op\n self.grad_norm = grad_norm\n self.learning_rate = learning_rate\n\n def _create_average_ops(self):\n \"\"\"Build moving average ops.\"\"\"\n print('Creating moving average ops')\n\n with tf.variable_scope('moving_avg_flag'):\n self.moving_avg_started = tf.get_variable(\n 'flag', [], tf.int32, initializer=tf.initializers.zeros(),\n trainable=False)\n self.start_moving_avg_op = tf.assign(self.moving_avg_started, 1)\n\n all_vars = tf.trainable_variables()\n average_pairs = []\n var_cnt = 0\n with tf.variable_scope('average'):\n for v in all_vars:\n avg_v = tf.get_variable(\n str(var_cnt), shape=v.shape, dtype=v.dtype,\n initializer=tf.zeros_initializer, trainable=False)\n var_cnt += 1\n average_pairs.append([v, avg_v])\n backup_pairs = []\n var_cnt = 0\n with tf.variable_scope('backup'):\n for v in all_vars:\n backup_v = tf.get_variable(str(var_cnt), shape=v.shape, dtype=v.dtype,\n trainable=False)\n var_cnt += 1\n backup_pairs.append([v, backup_v])\n\n with tf.variable_scope('avg_step'):\n avg_step = tf.get_variable('step', [], dtype=tf.float32, trainable=False)\n\n with tf.control_dependencies([tf.assign_add(avg_step, 1.0)]):\n average_op = []\n for v, avg_v in average_pairs:\n mu = 1 / avg_step\n new_avg = mu * v + (1 - mu) * avg_v\n with tf.control_dependencies([new_avg]):\n average_op.append(tf.assign(avg_v, new_avg))\n\n assert len(average_pairs) == len(all_vars)\n assert len(average_pairs) == len(backup_pairs)\n use_average_op = []\n for i in range(len(average_pairs)):\n v, avg_v = average_pairs[i]\n _, backup_v = backup_pairs[i]\n with tf.control_dependencies([tf.assign(backup_v, v)]):\n use_average_op.append(tf.assign(v, avg_v))\n use_average_op = tf.group(* use_average_op)\n\n reverse_average_op = []\n for v, backup_v in backup_pairs:\n reverse_average_op.append(tf.assign(v, backup_v))\n reverse_average_op = tf.group(* reverse_average_op)\n\n return average_op, use_average_op, reverse_average_op\n\n def _build_valid(self):\n print('Building valid graph')\n _, loss = self._forward(self.x_valid, self.y_valid,\n self.eval_params, self.batch_init_states)\n self.valid_loss = loss\n\n def _build_test(self):\n print('Building test graph')\n _, loss = self._forward(self.x_test, self.y_test,\n self.eval_params, self.test_init_states)\n self.test_loss = loss\n\n def eval_valid(self, sess, use_moving_avg=False):\n \"\"\"Eval 1 round on valid set.\"\"\"\n total_loss = 0\n if use_moving_avg:\n sess.run([self.use_moving_avg_vars, self.batch_init_states['reset']])\n for _ in range(self.num_valid_batches):\n total_loss += sess.run(self.valid_loss)\n valid_ppl = np.exp(total_loss / self.num_valid_batches)\n print('valid_ppl={0:<.2f}'.format(valid_ppl))\n if use_moving_avg:\n sess.run(self.restore_normal_vars)\n\n return valid_ppl\n\n def eval_test(self, sess, use_moving_avg=False):\n \"\"\"Eval 1 round on test set.\"\"\"\n total_loss = 0\n if use_moving_avg:\n sess.run([self.use_moving_avg_vars, self.test_init_states['reset']])\n for step in range(self.num_test_batches):\n total_loss += sess.run(self.test_loss)\n if (step + 1) % 1000 == 0:\n test_ppl = np.exp(total_loss / (step + 1))\n log_string = 'step={0:<6d}'.format(step + 1)\n log_string += ' test_ppl={0:<.2f}'.format(test_ppl)\n print(log_string)\n test_ppl = np.exp(total_loss / self.num_test_batches)\n log_string = 'step={0:<6d}'.format(self.num_test_batches)\n log_string += ' test_ppl={0:<.2f}'.format(test_ppl)\n print(log_string)\n if use_moving_avg:\n sess.run(self.restore_normal_vars)\n\n return test_ppl\n\n",
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=line-too-long\n\"\"\"Simple simulated linear regression experiment.\n\nRuns RecursiveOptimizer on a linear regression problem.\nUsage:\nFrom google-research/\npython -m recursive_optimizer.synthetic_experiment --optimizer=recursive --steps=10000 --learning_rate=1.0 --conditioning=min --inner_optimizer=SCINOL --eta=0.0 --tau=0.00001 --betting_domain=0.5 --epsilon=1.0 --epsilon_v=1.0\n\n(set ARGS to --help to see list of arguments with defaults)\n\"\"\"\n# pylint: enable=line-too-long\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nimport numpy as np\nimport tensorflow as tf\nimport time\n\nfrom recursive_optimizer.recursive_optimizer import RecursiveOptimizer\n\nFLAGS = flags.FLAGS\n\n# Problem definition\nflags.DEFINE_integer('dimension', 100, 'Dataset dimension.')\nflags.DEFINE_integer('dataset_size', 1000, 'Dataset size.')\nflags.DEFINE_float('noise_scale', 0.0, 'scale of noise in data.')\nflags.DEFINE_float('skewness', 750.0, 'Amount of ill-conditioning in data.')\nflags.DEFINE_float('distance', 10.0, 'Norm of optimal weights.')\nflags.DEFINE_enum(\n 'conditioning', 'min', ['min', 'max'],\n 'Whether to set the target to be the minimum or maximum eigenvalue of the'\n 'covariance matrix.')\nflags.DEFINE_string('loss', 'abs', 'Loss function to use.')\n# Optimization\nflags.DEFINE_integer('steps', 200000, 'Number of train steps.')\nflags.DEFINE_enum('optimizer', 'recursive', ['recursive', 'adagrad'],\n 'Which optimizer to test.')\nflags.DEFINE_string(\n 'data_output', '',\n 'If not empty then save Step,Wall Time,Loss details into this CSV file.')\n\n# Remaining flags configure the optimizer.\n# See parameters of RecursiveOptimizer's constructor in recursive_optimizer.py\n# for their description.\n\n# Should be set to 0.01 for Adagrad.\nflags.DEFINE_float('learning_rate', 1.0, 'learning rate')\nflags.DEFINE_float('tau', 0.0, 'tau')\nflags.DEFINE_float('g_max', 0.0, 'g_max')\nflags.DEFINE_string('inner_optimizer', 'SCINOL', 'inner_optimizer')\n# The only non-theoretically sound part of the code takes a minimum between eta\n# and some other quantitiy. This is useful in practice in deep learning tasks,\n# but for simulations to test the pure theory, we can turn it off\n# by setting eta to be very large.\nflags.DEFINE_float('eta', 10000000.0, 'eta')\nflags.DEFINE_float('epsilon', 1.0, 'epsilon')\nflags.DEFINE_float('epsilon_v', 1.0, 'epsilon_v')\nflags.DEFINE_bool('add_average', False, 'add_average')\nflags.DEFINE_float('betting_domain', 0.5, 'betting_domain')\n\n\ndef make_features(dataset_size, dimension, optimal_weights, root_covariance,\n noise_scale):\n \"\"\"Creates synthetic linear regression dataset.\n\n Args:\n dataset_size: number of rows in the dataset\n dimension: number of columns\n optimal_weights: target weight vector\n root_covariance: scaling matrix for dataset. The features in the dataset\n are drawn from a normal distribution with covariance root_covariance**2\n The labels are generated by multiplying the features by the optimal weight\n vector.\n\n Returns:\n features, labels\n \"\"\"\n\n un_skewed = np.random.normal(0.0, 1.0,\n [dataset_size, dimension]) / np.sqrt(dimension)\n skewed = np.dot(un_skewed, root_covariance.T)\n evals, _ = np.linalg.eig(root_covariance.dot(root_covariance.T))\n # print('eigenvalues: ', evals)\n\n products = skewed.dot(optimal_weights)\n product_sum = np.sum(products**2)\n\n average_product_sum = product_sum / dataset_size\n # skewed = skewed / np.sqrt(average_product_sum)\n\n products = skewed.dot(optimal_weights)\n product_sum = np.sum(products**2)\n\n coord_sum = np.sum(np.sqrt(np.sum(skewed * skewed, axis=0)))\n\n labels = np.dot(skewed, optimal_weights)\n\n # Uncomment next two lines to normalize so that loss at origin is 1.\n # Note that by default we are normalized so that target weights have norm\n # FLAGS.distance, which may be more appropriate since this is the parameter\n # that shows up in regret bounds.\n\n skewed = skewed/np.linalg.norm(labels)*np.sqrt(dataset_size)\n labels = np.dot(skewed, optimal_weights)\n\n print('label norm: ', np.sum(labels * labels) / dataset_size)\n skewed = tf.constant(skewed, dtype=tf.float32)\n labels = tf.constant(labels, dtype=tf.float32)\n\n # Uncomment to add noise to the labels.\n\n # noise = tf.constant(np.random.normal(0,\n # noise_scale,\n # [dataset_size, 1]),\n # dtype=tf.float32)\n # labels = labels + noise\n\n print('product sum: ', product_sum)\n print('coord sum: ', coord_sum)\n return skewed, labels\n\n\ndef generate_random_root_covariance(skewness, dimension):\n \"\"\"creates random root covariance matrix for use in make_features.\n\n Args:\n skewness: parameter indicating how much the eigenvalues of the matrix should\n decay.\n dimension: dimension of covariance matrix.\n\n Returns:\n covariance matrix, minimum eigenvector\n \"\"\"\n print('skewness: ', skewness)\n print('dimension: ', dimension)\n D = np.diag(\n np.sqrt(np.exp(np.linspace(0.0, np.log(skewness), dimension)) / skewness))\n\n U, _ = np.linalg.qr(np.random.normal(0.0, 1.0, [dimension, dimension]))\n V, _ = np.linalg.qr(np.random.normal(0.0, 1.0, [dimension, dimension]))\n\n root_covariance = U.dot(D).dot(V.T)\n\n evals, evecs = np.linalg.eig(root_covariance.dot(root_covariance.T))\n\n if FLAGS.conditioning == 'min':\n target = (evecs[:, np.argmin(evals)]).flatten()\n else:\n target = (evecs[:, np.argmax(evals)]).flatten()\n return root_covariance, target\n\n\ndef generate_optimal_weights(dimension, distance):\n \"\"\"Makes random optimal weights for use in make_features.\"\"\"\n direction = np.random.normal(0.0, 1.0, [dimension])\n direction = direction / np.linalg.norm(direction)\n weights = distance * direction\n weights = np.reshape(weights, [dimension, 1])\n return tf.to_float(tf.constant(weights))\n\n\ndef get_train_step(weights, training_data, optimizer, wealth):\n features = training_data[0]\n label = training_data[1]\n reshaped_weights = tf.reshape(weights, [-1])\n if FLAGS.loss == 'abs':\n loss = tf.abs(tf.reduce_sum(features * reshaped_weights) - label)\n else:\n loss = tf.reduce_sum(features * reshaped_weights) #tf.log(1+tf.exp(-tf.reduce_sum(features * reshaped_weights) * label))\n wealth_update = tf.assign_add(wealth, loss)\n with tf.control_dependencies([wealth_update]):\n train_step = optimizer.minimize(loss, var_list=[weights])\n return tf.group(train_step, wealth_update)\n\n\ndef eval_model(weights, data, dataset_size):\n features = data[0]\n labels = data[1]\n if FLAGS.loss=='abs':\n loss = tf.reduce_sum(\n tf.abs(tf.reshape(tf.matmul(features, weights), [-1]) - labels) /\n dataset_size)\n else:\n loss = tf.reduce_sum(tf.matmul(features, weights)/dataset_size)# tf.reduce_sum(tf.log(1+tf.exp(-tf.reshape(tf.matmul(features, weights), [-1]) * labels)) / dataset_size)\n return loss\n\n\ndef train_model(optimizer, dimension, dataset_size, root_covariance, scale,\n optimal_weights, steps):\n \"\"\"Generates synthetic data and trains the model.\n\n Args:\n optimizer: optimizer for use in training.\n dimension: dimension of features.\n dataset_size: size of synthetic dataset.\n root_covariance: root covariance matrix of data features.\n scale: Noise scale\n optimal_weights: target weight vector.\n steps: number of training steps.\n\n Returns:\n final_difference: scaled norm of difference between learned and optimal\n weights.\n final_loss: final training loss.\n final_weights: learned weights.\n optimal_weights_eval: optimal_weights evaluated (not as a Tensor).\n \"\"\"\n if FLAGS.data_output != '':\n outputfile = open(FLAGS.data_output, 'w')\n outputfile.write('{}, {}, {}\\n'.format('Step', 'Value', 'Wall time'))\n full_batch = make_features(dataset_size, dimension, optimal_weights,\n root_covariance, scale)\n optimal_weights = tf.constant(optimal_weights, dtype=tf.float32)\n training_data = tf.data.Dataset.from_tensor_slices(full_batch)\n weights = tf.Variable(tf.zeros([dimension, 1]))\n wealth = tf.Variable(0.0)\n iterator = training_data.shuffle(\n buffer_size=1000).repeat().make_initializable_iterator()\n example = iterator.get_next()\n train_step = get_train_step(weights, example, optimizer, wealth)\n global_initializer = tf.global_variables_initializer()\n session = tf.Session()\n iter_initializer = iterator.initializer\n\n difference = tf.norm(\n tf.reshape(optimal_weights, [-1]) -\n tf.reshape(weights, [-1])) / tf.norm(optimal_weights)\n eval_loss = eval_model(weights, full_batch, dataset_size)\n eval_zero = eval_model(0 * weights, full_batch, dataset_size)\n session.run(global_initializer)\n session.run(iter_initializer)\n\n for stepnum in range(steps):\n if stepnum % 50000 == 0:\n print('iteration: ', stepnum)\n session.run(train_step)\n if stepnum % 1000 == 0 and FLAGS.data_output != '':\n with tf.control_dependencies([eval_zero]):\n current_eval = session.run(eval_loss)\n outputfile.write('{}, {}, {}\\n'.format(stepnum, current_eval, time.time()))\n final_wealth = session.run(wealth)\n print('final wealth: ', final_wealth)\n final_difference = session.run(difference)\n final_loss = session.run(eval_loss)\n initial_loss = session.run(eval_zero)\n final_weights = session.run(tf.reshape(weights, [-1]))\n optimal_weights_eval = session.run(tf.reshape(optimal_weights, [-1]))\n session.close()\n print('initial loss: ', initial_loss)\n return final_difference, final_loss, final_weights, optimal_weights_eval\n\n\ndef train_and_report(dimension, dataset_size, skewness, scale, distance, steps,\n optimizer):\n \"\"\"Train on synthetic data.\n\n Args:\n dimension: dimension of features.\n dataset_size: size of dataset.\n skewness: parameter controlling eigenvalue decay of covariance. 1.0\n indicates no decay, higher indicates more decay.\n scale: parameter controlling overall scale of noise (operator norm of\n covariance matrix)\n distance: norm of target features.\n steps: number of train steps.\n optimizer: optimizer to use in training.\n\n Returns:\n final_difference: scaled norm of difference between learned and optimal\n weights.\n final_loss: final training loss.\n final_weights: learned weights.\n optimal_weights_eval: optimal_weights evaluated (not as a Tensor).\n \"\"\"\n root_covariance, optimal_weights = generate_random_root_covariance(\n skewness, dimension)\n optimal_weights = optimal_weights * distance\n return train_model(optimizer, dimension, dataset_size, root_covariance, scale,\n optimal_weights, steps)\n\n\ndef main(argv):\n del argv # Unused.\n\n dimension = FLAGS.dimension\n dataset_size = FLAGS.dataset_size\n skewness = FLAGS.skewness\n distance = FLAGS.distance\n steps = FLAGS.steps\n scale = FLAGS.noise_scale\n lr = FLAGS.learning_rate\n tau = FLAGS.tau\n g_max = FLAGS.g_max\n inner_optimizer = FLAGS.inner_optimizer\n eta = FLAGS.eta\n epsilon = FLAGS.epsilon\n epsilon_v = FLAGS.epsilon_v\n betting_domain = FLAGS.betting_domain\n add_average = FLAGS.add_average\n recursive_optimizer = RecursiveOptimizer(\n betting_domain=betting_domain,\n eta=eta,\n tau=tau,\n epsilon=epsilon,\n epsilon_v=epsilon_v,\n lr=lr,\n g_max=g_max,\n inner_optimizer=inner_optimizer,\n add_average=add_average)\n adagrad_optimizer = tf.train.AdagradOptimizer(FLAGS.learning_rate)\n if FLAGS.optimizer == 'recursive':\n optimizer = recursive_optimizer\n else:\n optimizer = adagrad_optimizer\n\n results, final_loss, final_weights, optimal_weights = train_and_report(\n dimension, dataset_size, skewness, scale, distance, steps, optimizer)\n print('Using Optimizer: ', FLAGS.optimizer)\n print('final difference: ', results)\n print('final loss: ', final_loss)\n # print('final weights: ', final_weights)\n # print('optimal weights: ', optimal_weights)\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.image.resize_bilinear",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.layers.max_pooling2d",
"tensorflow.name_scope"
],
[
"numpy.arctan2",
"numpy.mean"
],
[
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.squeeze"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.nn.relu",
"tensorflow.norm",
"tensorflow.nn.softmax",
"tensorflow.reduce_max",
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.stop_gradient",
"tensorflow.nn.top_k",
"tensorflow.reduce_min",
"tensorflow.map_fn",
"tensorflow.name_scope",
"tensorflow.losses.add_loss"
],
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.initializers.zeros",
"tensorflow.control_dependencies",
"tensorflow.layers.dropout",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.tanh",
"tensorflow.group",
"numpy.exp",
"tensorflow.add_n",
"tensorflow.while_loop",
"tensorflow.assign_add",
"tensorflow.floor",
"tensorflow.gradients",
"tensorflow.train.get_or_create_global_step",
"tensorflow.trainable_variables",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.TensorArray",
"tensorflow.less",
"tensorflow.identity",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.initializers.random_uniform",
"tensorflow.split",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.relu",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.assign",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.einsum",
"tensorflow.clip_by_global_norm",
"numpy.prod",
"tensorflow.variable_scope",
"tensorflow.random_uniform"
],
[
"numpy.dot",
"numpy.sqrt",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"numpy.argmin",
"tensorflow.group",
"tensorflow.assign_add",
"tensorflow.Variable",
"numpy.reshape",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.train.AdagradOptimizer",
"tensorflow.norm",
"tensorflow.matmul",
"numpy.log",
"tensorflow.global_variables_initializer",
"numpy.sum",
"tensorflow.constant",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.reshape",
"numpy.linalg.norm",
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Room-10/Opymize | [
"8632e5b618f41a65b1a37df099a6da254500e14b"
] | [
"opymize/tools/__init__.py"
] | [
"\nimport numpy as np\n\ndef truncate(x, n):\n k = -int(np.floor(np.log10(abs(x))))\n # Example: x = 0.006142 => k = 3 / x = 2341.2 => k = -3\n k += n - 1\n if k > 0:\n x_str = str(abs(x))[:(k+2)]\n else:\n x_str = str(abs(x))[:n]+\"0\"*(-k)\n return np.sign(x)*float(x_str)\n\ndef solve_reduced_monic_cubic(a, b, soln=0):\n \"\"\" Solve x**3 + a*x + b = 0 using explicit formulas.\n\n Only real solutions are computed and in case more than one real\n solution exists, only one of them is returned.\n\n Args:\n a, b : array-like of floats, shape (nvals,)\n soln : int, one of 0,1,2\n Indicate which solution is returned in case of non-uniqueness.\n\n Returns:\n x : ndarray of floats, shape (nvals,)\n \"\"\"\n a, b = np.asarray(a), np.asarray(b)\n assert a.size == b.size\n a, b = a.ravel(), b.ravel()\n x, Q, Q3, R, D, arr1, arr2 = [np.zeros_like(a) for i in range(7)]\n\n # trivial case (a == 0):\n msk = (a == 0)\n x[msk] = np.cbrt(-b[msk])\n\n # nontrivial case (a != 0):\n msk = ~msk\n Q[msk], R[msk] = a[msk]/3, -b[msk]/2\n Q3[msk] = Q[msk]**3\n D[msk] = Q3[msk] + R[msk]**2\n\n # subcase with three real roots:\n msk2 = msk & (D <= 0)\n theta, sqrt_Q = arr1, arr2\n theta[msk2] = np.arccos(R[msk2]/np.sqrt(-Q3[msk2]))\n sqrt_Q[msk2] = np.sqrt(-Q[msk2])\n x[msk2] = 2*sqrt_Q[msk2]*np.cos((theta[msk2] + 2*soln*np.pi)/3.0)\n\n # subcase with unique real root:\n msk2 = msk & (D > 0)\n AD, BD = arr1, arr2\n AD[msk2] = np.cbrt(np.abs(R[msk2]) + np.sqrt(D[msk2]))*np.sign(R[msk2])\n msk3 = msk2 & (AD != 0)\n BD[msk3] = -Q[msk3]/AD[msk3]\n x[msk2] = AD[msk2] + BD[msk2]\n\n return x\n"
] | [
[
"numpy.sqrt",
"numpy.abs",
"numpy.asarray",
"numpy.cos",
"numpy.cbrt",
"numpy.sign",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kaka-lin/ML-Notes | [
"047b88d59346b2ec719b1b3e2fcd605e1ccfaf91",
"047b88d59346b2ec719b1b3e2fcd605e1ccfaf91"
] | [
"Object Detection/iou.py",
"Model Serving/yolov3-ovms/app/yolov3-ovms-app.py"
] | [
"import numpy as np\n\n\ndef calculate_iou(bbox1, bbox2):\n \"\"\"\n calculate iou\n args:\n - bbox1 [array]: 1x4 single bbox\n - bbox2 [array]: 1x4 single bbox\n returns:\n - iou [float]: iou between 2 bboxes\n \"\"\"\n xmin = max(bbox1[0], bbox2[0]) # x_left\n ymin = max(bbox1[1], bbox2[1]) # y_top\n xmax = min(bbox1[2], bbox2[2]) # x_right\n ymax = min(bbox1[3], bbox2[3]) # y_bottom\n\n intersection = max(0, xmax - xmin + 1) * max(0, ymax - ymin + 1)\n bbox1_area = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1])\n bbox2_area = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1])\n\n union = bbox1_area + bbox2_area - intersection\n return intersection / union\n\n\nif __name__ == \"__main__\":\n bbox1 = np.array([661, 27, 679, 47])\n bbox2 = np.array([662, 27, 682, 47])\n iou = calculate_iou(bbox1, bbox2)\n print(iou)\n\n bbox1 = np.array([0, 0, 100, 100])\n bbox2 = np.array([101, 101, 200, 200])\n iou = calculate_iou(bbox1, bbox2)\n print(iou)\n",
"import os\nimport io\nimport json\nimport datetime\n\nimport cv2\nimport grpc\nimport numpy as np\nfrom flask import Flask, request, jsonify, Response\nfrom tensorflow import make_tensor_proto, make_ndarray\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\n\nfrom common import load_classes, generate_colors, draw_outputs\nfrom yolo_utils import yolo_eval\n\n\ndef preprocess(image):\n image = np.array(image, dtype=np.float32)\n image = cv2.resize(image, (416, 416))\n\n # switch from HWC to CHW\n # and reshape to (1, 3, size, size)\n # for model input requirements\n image = image.transpose(2, 0, 1).reshape(1, 3, 416, 416)\n\n return image\n\n\ndef postprocess(boxes, scores, classes, class_names):\n detectedObjects = []\n\n if len(classes) > 0:\n for i in range(len(classes)):\n idx = int(classes[i])\n temp = boxes[i] # xmin, ymin, xmax, ymax\n\n dobj = {\n \"type\" : \"entity\",\n \"entity\" : {\n \"tag\" : {\n \"value\" : class_names[idx],\n \"confidence\" : str(scores[i].numpy())\n },\n \"box\" : {\n \"l\" : str(temp[0].numpy()), # xmin\n \"t\" : str(temp[1].numpy()), # ymax (from top)\n \"w\" : str((temp[2]-temp[0]).numpy()), # xmax-xmin\n \"h\" : str((temp[3]-temp[1]).numpy()) # ymax-ymin\n }\n }\n }\n\n detectedObjects.append(dobj)\n\n return detectedObjects\n\n\ndef yolo_score(image):\n model_name = \"yolov3\"\n input_layer = \"inputs\"\n output_layers = [\n \"detector/yolo-v3/Conv_14/BiasAdd/YoloRegion\",\n \"detector/yolo-v3/Conv_22/BiasAdd/YoloRegion\",\n \"detector/yolo-v3/Conv_6/BiasAdd/YoloRegion\"\n ]\n class_names = load_classes(\"model_data/coco.names\")\n results = {}\n\n print(\"Start processing:\")\n print(f\"\\tModel name: {model_name}\")\n\n with grpc.insecure_channel('ovms:9000') as channel:\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n\n image = preprocess(image)\n\n request = predict_pb2.PredictRequest()\n request.model_spec.name = model_name\n request.inputs[input_layer].CopyFrom(\n make_tensor_proto(image, shape=(image.shape)))\n start_time = datetime.datetime.now()\n # result includes a dictionary with all model outputs\n result = stub.Predict(request, 10.0)\n end_time = datetime.datetime.now()\n\n yolo_outputs = [[], [], []]\n for output_layer in output_layers:\n output = make_ndarray(result.outputs[output_layer])\n output_numpy = np.array(output)\n anchor_size = output_numpy.shape[2]\n output_numpy = output_numpy.transpose(0, 2, 3, 1).reshape(\n 1, anchor_size, anchor_size, 3, 85)\n yolo_outputs[int((anchor_size / 13) / 2)] = output_numpy\n\n scores, boxes, classes = yolo_eval(\n yolo_outputs,\n classes=80,\n score_threshold=0.5,\n iou_threshold=0.3\n )\n\n results = postprocess(boxes, scores, classes, class_names)\n\n return results\n\n\napp = Flask(__name__)\n\n# / routes to the default function which returns 'Hello World'\[email protected]('/', methods=['GET'])\ndef defaultPage():\n return Response(response='Hello from Yolov3 inferencing based OVMS', status=200)\n\n# /score routes to scoring function\n# This function returns a JSON object with inference duration and detected objects\[email protected]('/score', methods=['POST'])\ndef score():\n try:\n # get request as byte stream\n reqBody = request.get_data(False)\n\n # convert from byte stream\n inMemFile = io.BytesIO(reqBody)\n\n # load a sample image\n inMemFile.seek(0)\n fileBytes = np.asarray(bytearray(inMemFile.read()), dtype=np.uint8)\n\n cvImage = cv2.imdecode(fileBytes, cv2.IMREAD_COLOR)\n\n # Infer Image\n detectedObjects = yolo_score(cvImage)\n\n if len(detectedObjects) > 0:\n respBody = {\n \"inferences\" : detectedObjects\n }\n\n respBody = json.dumps(respBody)\n return Response(respBody, status= 200, mimetype ='application/json')\n else:\n return Response(status= 204)\n\n except Exception as err:\n return Response(response='[ERROR] Exception in score : {}'.format(repr(err)), status=500)\n\n\nif __name__ == '__main__':\n # Run the server\n app.run(host='0.0.0.0', port=8888)\n"
] | [
[
"numpy.array"
],
[
"tensorflow.make_ndarray",
"numpy.array",
"tensorflow.make_tensor_proto"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"1.2",
"2.10"
]
}
] |
noatgnu/colossi | [
"c936bc25b5990f8dfbf4db3ed11ce8a893553668"
] | [
"model_test.py"
] | [
"import unittest\nfrom model import prediction_with_model\nimport pandas as pd\nimport numpy as np\n\nclass PredictionWithModel(unittest.TestCase):\n def test_prediction(self):\n d = pd.read_csv(r\"C:\\Users\\Toan\\Documents\\GitHub\\colossi\\static\\temp\\cc7deed8140745d89f2f42f716f6fd1b\\out_imac_atlas_expression_v7.1.tsv\", \" \")\n\n result = np.array([d['Freq'].to_list() + [0, 1800]])\n print(prediction_with_model(result))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mhrmm/geosolver | [
"13ae2972c58d5ba4c4878576f9fba8569cc99629",
"13ae2972c58d5ba4c4878576f9fba8569cc99629"
] | [
"geosolver/diagram/select_primitives.py",
"geosolver/utils/analysis.py"
] | [
"import logging\n\nimport numpy as np\n\nfrom geosolver.diagram.states import PrimitiveParse\nfrom geosolver.diagram.computational_geometry import line_length, circumference, \\\n distance_between_circle_and_point, midpoint, line_unit_vector, line_normal_vector, \\\n distance_between_points_squared, distance_between_line_and_point\nfrom geosolver.ontology.instantiator_definitions import instantiators\nimport geosolver.parameters as params\n\n\n__author__ = 'minjoon'\n\n\ndef select_primitives(primitive_parse):\n assert isinstance(primitive_parse, PrimitiveParse)\n if len(primitive_parse.primitives) == 0:\n logging.error(\"No primitive detected.\")\n return primitive_parse\n pixels_dict = _get_pixels_dict(primitive_parse,\n params.LINE_EPS, params.CIRCLE_EPS)\n selected_primitives = {}\n remaining_primitives = primitive_parse.primitives.copy()\n reward = 0\n while len(remaining_primitives) > 0:\n key = _get_next_primitive_key(selected_primitives, remaining_primitives, pixels_dict)\n updated_selected_primitives = selected_primitives.copy()\n updated_selected_primitives[key] = remaining_primitives[key]\n new_reward = _evaluate_reward(updated_selected_primitives, pixels_dict)\n if new_reward - reward > params.PRIMITIVE_SELECTION_MIN_GAIN:\n selected_primitives = updated_selected_primitives\n del remaining_primitives[key]\n reward = new_reward\n else:\n break\n\n new_primitive_parse = _get_primitive_parse(primitive_parse.image_segment_parse, selected_primitives)\n return new_primitive_parse\n\ndef _get_primitive_parse(segment_parse, primitives):\n lines = dict(pair for pair in primitives.iteritems()\n if isinstance(pair[1], instantiators['line']))\n circles = dict(pair for pair in primitives.iteritems()\n if isinstance(pair[1], instantiators['circle']))\n return PrimitiveParse(segment_parse, lines, circles)\n\n\ndef _get_next_primitive_key(selected_primitives, remaining_primitives, pixels_dict):\n return max(remaining_primitives.items(),\n key=lambda p: _evaluate_reward(dict(selected_primitives.items()+[p]), pixels_dict))[0]\n\n\ndef _get_pixels_dict(primitive_parse, line_eps, circle_eps):\n primitives = primitive_parse.primitives\n pixels = primitive_parse.image_segment_parse.diagram_image_segment.pixels\n pixels_dict = {'all': pixels}\n for key, primitive in primitives.iteritems():\n if isinstance(primitive, instantiators['line']):\n eps = line_eps\n curr_pixels = _get_pixels_near_line(pixels, primitive, eps)\n pixels_dict[key] = curr_pixels\n\n \"\"\"\n image = cv2.cvtColor(primitive_parse.image_segment_parse.diagram_image_segment.segmented_image, cv2.COLOR_GRAY2BGR)\n draw_line(image, primitive)\n display_image(image)\n for pixel in curr_pixels:\n draw_point(image, pixel)\n display_image(image)\n \"\"\"\n\n a_pixels = _get_pixels_near_point(pixels, primitive.a, eps)\n b_pixels = _get_pixels_near_point(pixels, primitive.b, eps)\n pixels_dict[primitive.a] = a_pixels\n pixels_dict[primitive.b] = b_pixels\n\n elif isinstance(primitive, instantiators['circle']):\n eps = circle_eps\n curr_pixels = set(pixel for pixel in pixels if distance_between_circle_and_point(primitive, pixel) < eps)\n pixels_dict[key] = curr_pixels\n return pixels_dict\n\n\ndef _get_pixels_near_point(pixels, point, eps):\n return set(pixel for pixel in pixels if distance_between_points_squared(pixel, point) <= eps**2)\n\n\ndef _evaluate_reward(partial_primitives, pixels_dict):\n x = [_coverage(partial_primitives, pixels_dict),\n _pixel_num(partial_primitives, pixels_dict),\n _length_sum(partial_primitives),\n _coherence(partial_primitives),\n _end_pixel_num(partial_primitives, pixels_dict),\n ]\n w = [1, -0.1, -0.7, 00, 0.1]\n return np.dot(x, w)\n\n\ndef _coverage(partial_primitives, pixels_dict):\n if len(partial_primitives) == 0:\n return 0\n coverage = len(set.union(*[pixels_dict[key] for key in partial_primitives]))\n return coverage\n\n\ndef _pixel_num(partial_primitives, pixels_dict):\n if len(partial_primitives) == 0:\n return 0\n num = sum(len(pixels_dict[key]) for key in partial_primitives)\n return num\n\ndef _end_pixel_num(partial_primitives, pixels_dict):\n lines = _get_lines(partial_primitives)\n if len(lines) == 0:\n return 0\n coverage = set.union(*[pixels_dict[primitive.a] for primitive in lines])\n coverage2 = set.union(*[pixels_dict[primitive.b] for primitive in lines])\n return len(set.union(coverage, coverage2))\n\n\n\ndef _get_pixels_near_line(pixels, line, eps):\n \"\"\"\n This can be replaced with shorter, more inefficient code.\n Written to boost up the speed.\n :param pixels:\n :param line:\n :param eps:\n :return:\n \"\"\"\n #return set(pixel for pixel in pixels if distance_between_line_and_point(line, pixel) <= eps)\n\n p = midpoint(line.a, line.b)\n u = line_unit_vector(line)\n n = line_normal_vector(line)\n half_length = line_length(line)/2.0\n eps_squared = eps**2\n\n near_pixels = set()\n for point in pixels:\n vector = point.x - p.x, point.y - p.y\n perpendicular_distance = abs(np.dot(vector, n))\n if perpendicular_distance > eps:\n continue\n parallel_distance = abs(np.dot(vector, u))\n if parallel_distance <= half_length:\n near_pixels.add(point)\n else:\n if distance_between_points_squared(point, line.a) <= eps_squared or \\\n distance_between_points_squared(point, line.b) <= eps_squared:\n near_pixels.add(point)\n return near_pixels\n\n\ndef _length_sum(partial_primitives):\n \"\"\"\n Computes the sum of squareroot of sum of lengths.\n This way, longer lines / bigger circles are preferred.\n\n :param partial_primitives:\n :return:\n \"\"\"\n if len(partial_primitives) == 0:\n return 0\n total = 0\n for primitive in partial_primitives.values():\n if isinstance(primitive, instantiators['circle']):\n total += circumference(primitive)\n elif isinstance(primitive, instantiators['line']):\n pass\n else:\n raise Exception()\n return total\n\n\ndef _coherence(partial_primitives):\n scores = []\n for idx, primitive in partial_primitives.iteritems():\n if isinstance(primitive, instantiators['line']):\n score = _line_coherence(partial_primitives, idx)\n elif isinstance(primitive, instantiators['circle']):\n score = _circle_coherence(partial_primitives, idx)\n scores.append(score)\n return np.mean(scores)\n\n\ndef _line_coherence(partial_primitives, curr_idx):\n if len(partial_primitives) == 0:\n return 0\n line = partial_primitives[curr_idx]\n distances0 = [_distance_from_point(line.a, primitive) for primitive in partial_primitives.values()]\n distances1 = [_distance_from_point(line.b, primitive) for primitive in partial_primitives.values()]\n return _distance_score(np.mean([min(distances0), min(distances1)]))\n\n\ndef _circle_coherence(partial_primitives, curr_idx):\n if len(partial_primitives) == 0:\n return 0\n circle = partial_primitives[curr_idx]\n distances = [_distance_from_point(circle.center, primitive) for primitive in partial_primitives.values()]\n return _distance_score(min(distances))\n\n\ndef _distance_from_point(point, primitive):\n if isinstance(primitive, instantiators['line']):\n return distance_between_line_and_point(primitive, point)\n elif isinstance(primitive, instantiators['circle']):\n return distance_between_circle_and_point(primitive, point)\n\n\ndef _distance_score(distance):\n eps = 10\n if distance < eps:\n return float(eps-distance)/eps\n else:\n return 0\n\n\ndef _get_lines(partial_primitives):\n return [p for p in partial_primitives.values() if isinstance(p, instantiators['line'])]",
"import matplotlib.pyplot as plt\n\n__author__ = 'minjoon'\n\ndef draw_pr(triple_dict):\n \"\"\"\n {threshold: [ref, ret, mat]}\n\n :param triple_dict:\n :return:\n \"\"\"\n ts = sorted(triple_dict.keys())\n ps = [float(triple_dict[th][2])/max(1,triple_dict[th][1]) for th in ts]\n rs = [float(triple_dict[th][2])/max(1,triple_dict[th][0]) for th in ts]\n plt.plot(ts, ps)\n plt.plot(ts, rs)\n plt.show()\n"
] | [
[
"numpy.dot",
"numpy.mean"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
focusunsink/study_python | [
"322326642db54df8725793d70a95d21ac40b6507",
"322326642db54df8725793d70a95d21ac40b6507"
] | [
"np/reference/ch9code/animation.py",
"np/2_numpy_basic/2.6_change_shape.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nN = 10\nx = np.random.rand(N)\ny = np.random.rand(N)\nz = np.random.rand(N)\ncircles, triangles, dots = ax.plot(x, 'ro', y, 'g^', z, 'b.')\nax.set_ylim(0, 1)\nplt.axis('off')\n\ndef update(data):\n circles.set_ydata(data[0])\n triangles.set_ydata(data[1])\n return circles, triangles\n\ndef generate():\n while True: yield np.random.rand(2, N)\n\nanim = animation.FuncAnimation(fig, update, generate, interval=150)\nplt.show()\n",
"\"\"\"\nravel vs flatten\nravel make a new obj, but a will be changed as long as a.ravel changed.\nflatten also make a new obj, but will not influence ori array whatever change in a.flatten.\nBut Why ravel could change ori array?\n\"\"\"\nimport numpy as np\na = np.arange(24).reshape(4,6)\nb = a.copy()\nc = a.ravel()\nd = b.flatten()\nprint(\"a:\",a)\nprint(\"b:\",b)\nprint(\"c:\",c)\nprint(\"d:\",d)\nc[1] = 99\nd[1] = 99\nprint(\"a:\",a)\nprint(\"b:\",b)\nprint(\"c:\",c)\nprint(\"d:\",d)"
] | [
[
"matplotlib.animation.FuncAnimation",
"numpy.random.rand",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BreastGAN/experiment2 | [
"2a1d15c1f479bbd6ca58af4e3b1379bf34b89f51"
] | [
"models/breast_cycle_gan/data_provider.py"
] | [
"# Copyright 2018 Lukas Jendele and Ondrej Skopek.\n# Adapted from The TensorFlow Authors, under the ASL 2.0.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains code for loading and preprocessing image data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport resources.synthetic_data as synth_data\n\n\ndef normalize_image(image, img_size):\n \"\"\"Rescale from range [0, 255] to [-1, 1].\"\"\"\n image = tf.expand_dims(image, axis=-1)\n image = tf.image.crop_to_bounding_box(image, 0, 0, img_size[0], img_size[1])\n image = tf.to_float(image) - tf.reduce_min(image)\n val = tf.reduce_max(image) / 2\n image = (image - val) / val\n image = tf.reshape(image, (img_size[0], img_size[1], 1))\n image.shape.assert_is_compatible_with([img_size[0], img_size[1], 1])\n return image\n\n\ndef undo_normalize_image(normalized_image):\n \"\"\"Convert to a numpy array that can be read by PIL.\"\"\"\n # Convert from NHWC to HWC.\n normalized_image = np.squeeze(normalized_image)\n return np.uint8(normalized_image * 127.5 + 127.5)\n\n\n# def _sample_patch(image, patch_size):\n# \"\"\"Crop image to square shape and resize it to `patch_size`.\n\n# Args:\n# image: A 3D `Tensor` of HWC format.\n# patch_size: A Python scalar. The output image size.\n\n# Returns:\n# A 3D `Tensor` of HWC format which has the shape of\n# [patch_size, patch_size, 3].\n# \"\"\"\n# image_shape = tf.shape(image)\n# height, width = image_shape[0], image_shape[1]\n# target_size = tf.minimum(height, width)\n# image = tf.image.resize_image_with_crop_or_pad(image, target_size,\n# target_size)\n# # tf.image.resize_area only accepts 4D tensor, so expand dims first.\n# image = tf.expand_dims(image, axis=0)\n# image = tf.image.resize_images(image, [patch_size, patch_size])\n# image = tf.squeeze(image, axis=0)\n# # Force image num_channels = 3\n# image = tf.tile(image, [1, 1, tf.maximum(1, 4 - tf.shape(image)[2])])\n# image = tf.slice(image, [0, 0, 0], [patch_size, patch_size, 1])\n# return image\n\n\ndef _provide_custom_dataset(image_file_pattern, batch_size, shuffle=True, num_threads=1, img_size=256):\n \"\"\"Provides batches of custom image data.\n\n Args:\n image_file_pattern: A string of glob pattern of image files.\n batch_size: The number of images in each batch.\n shuffle: Whether to shuffle the read images. Defaults to True.\n num_threads: Number of prefetching threads. Defaults to 1.\n img_size: Size of the image. Defaults to 256.\n\n Returns:\n A float `Tensor` of shape [batch_size, img_size, img_size, 3]\n representing a batch of images.\n \"\"\"\n filename_queue = tf.train.string_input_producer(\n tf.train.match_filenames_once(image_file_pattern), shuffle=shuffle, capacity=5 * batch_size)\n image_reader = tf.WholeFileReader()\n\n _, image_bytes = image_reader.read(filename_queue)\n image = tf.image.decode_image(image_bytes, channels=1)\n image_norm = normalize_image(image, (img_size, img_size))\n\n if shuffle:\n return tf.train.shuffle_batch([image_norm],\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=5 * batch_size,\n min_after_dequeue=batch_size)\n else:\n return tf.train.batch(\n [image_norm],\n batch_size=batch_size,\n num_threads=1, # no threads so it's deterministic\n capacity=5 * batch_size)\n\n\ndef provide_custom_datasets(image_file_patterns, batch_size, shuffle=True, num_threads=1, img_size=256):\n \"\"\"Provides multiple batches of custom image data.\n\n Args:\n image_file_patterns: A list of glob patterns of image files.\n batch_size: The number of images in each batch.\n shuffle: Whether to shuffle the read images. Defaults to True.\n num_threads: Number of prefetching threads. Defaults to 1.\n img_size: Size of the patch to extract from the image. Defaults to 256.\n\n Returns:\n A list of float `Tensor`s with the same size of `image_file_patterns`.\n Each of the `Tensor` in the list has a shape of\n [batch_size, img_size, img_size, 1] representing a batch of images.\n\n Raises:\n ValueError: If image_file_patterns is not a list or tuple.\n \"\"\"\n if not isinstance(image_file_patterns, (list, tuple)):\n raise ValueError('`image_file_patterns` should be either list or tuple, but was {}.'.format(\n type(image_file_patterns)))\n custom_datasets = []\n for pattern in image_file_patterns:\n custom_datasets.append(\n _provide_custom_dataset(\n pattern, batch_size=batch_size, shuffle=shuffle, num_threads=num_threads, img_size=img_size))\n return custom_datasets\n\n\ndef normalize_synth_image(image, img_size):\n \"\"\"Rescale to [-1, 1].\"\"\"\n # 2* ((res + min(res)) / max(res)) - 1\n image = tf.to_float(image)\n maxs = tf.reduce_max(image, axis=[0, 1])\n mins = tf.reduce_min(image, axis=[0, 1])\n res = (2 * (image + mins) / maxs) - 1\n res = tf.reshape(res, (img_size[0], img_size[1], 1))\n res.shape.assert_is_compatible_with([img_size[0], img_size[1], 1])\n return res\n\n\ndef provide_synth_dataset(batch_size, num_threads=1, img_size=(256, 256), max_thresh=2.5):\n img_size = list(img_size)\n yield_generator = synth_data.generate_synth(size=img_size, max_thresh=max_thresh)\n\n def generate_synth_image():\n img1, mask1, _ = next(yield_generator)\n img2, mask2, _ = next(yield_generator)\n # Expand dims\n img_size_c = img_size + [1]\n img1, mask1 = np.reshape(img1, img_size_c), np.reshape(mask1, img_size_c)\n img2, mask2 = np.reshape(img2, img_size_c), np.reshape(mask2, img_size_c)\n # Concat mask\n h = np.concatenate([img1, mask1], axis=2)\n c = np.concatenate([img2 + mask2, mask2], axis=2)\n # print(\"generated healthy shape:\", h.shape, \" generated cancer shape:\", c.shape)\n return h.astype(np.float32), c.astype(np.float32)\n\n healthy_img, cancer_img = tf.py_func(generate_synth_image, [], (tf.float32, tf.float32))\n\n img_size_c = img_size + [2]\n healthy_img = tf.reshape(healthy_img, img_size_c)\n cancer_img = tf.reshape(cancer_img, img_size_c)\n\n # No shuffling needed. Pictures are random anyway.\n healthy_dataset = tf.train.batch(\n [healthy_img],\n batch_size=batch_size,\n num_threads=1, # no threads so it's deterministic\n capacity=5 * batch_size)\n cancer_dataset = tf.train.batch(\n [cancer_img],\n batch_size=batch_size,\n num_threads=1, # no threads so it's deterministic\n capacity=5 * batch_size)\n return [healthy_dataset, cancer_dataset]\n\n\ndef parse_example(proto, img_size):\n features = {\n \"path\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n \"image\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n \"mask\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n \"width\": tf.FixedLenFeature((), tf.int64, default_value=0),\n \"height\": tf.FixedLenFeature((), tf.int64, default_value=0),\n \"label\": tf.FixedLenFeature((), tf.int64, default_value=0),\n }\n parsed_features = tf.parse_single_example(proto, features)\n\n def decode_img(img):\n img = tf.decode_raw(img, tf.float32)\n img = tf.reshape(img, img_size)\n print('final img', img.get_shape())\n return img\n\n path = parsed_features[\"path\"]\n image = decode_img(parsed_features[\"image\"])\n mask = decode_img(parsed_features[\"mask\"])\n print('image decoded', image.get_shape())\n print('mask decoded', mask.get_shape())\n imgs = [normalize_image(img, img_size) for img in [image, mask]]\n print('normalized', imgs[0].get_shape())\n concat = tf.concat(imgs, axis=-1)\n print('concat', concat.get_shape())\n return concat, image, mask, parsed_features['label'], path\n\n\ndef provide_cbis_dataset(datasets, batch_size, img_size=(256, 208), num_threads=1, max_thresh=2.5):\n\n def load_dataset(filename):\n dataset = tf.data.TFRecordDataset(\n filename, compression_type=\"GZIP\", num_parallel_reads=num_threads, buffer_size=buffer_size)\n dataset = dataset.map(lambda x: parse_example(x, img_size)[0], num_parallel_calls=num_threads)\n dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(shuffle_buffer_size, count=None, seed=42))\n dataset = dataset.batch(batch_size)\n # dataset = dataset.prefetch(1)\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n return features\n\n buffer_size = 100\n shuffle_buffer_size = 100\n\n return [load_dataset(x) for x in datasets]\n"
] | [
[
"tensorflow.concat",
"tensorflow.FixedLenFeature",
"numpy.squeeze",
"numpy.concatenate",
"tensorflow.image.decode_image",
"tensorflow.train.batch",
"tensorflow.py_func",
"tensorflow.WholeFileReader",
"numpy.uint8",
"tensorflow.decode_raw",
"tensorflow.data.TFRecordDataset",
"numpy.reshape",
"tensorflow.contrib.data.shuffle_and_repeat",
"tensorflow.to_float",
"tensorflow.parse_single_example",
"tensorflow.train.shuffle_batch",
"tensorflow.reduce_max",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.train.match_filenames_once",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.reduce_min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Leandro-Bertoluzzi/pyspice-power-electronics | [
"960494b23e36c0fac61289744a64c991d62784a2"
] | [
"thyristor/half-wave-converter-RL.py"
] | [
"#r# ============================================\n#r# Controlled half-wave rectifier with an SCR\n#r# ============================================\n\n#r# This example shows the simulation of a controlled half-wave rectifier with an SCR with an RL load\n\n######################################### IMPORT MODULES #########################################\n\nimport matplotlib.pyplot as plt\nimport numpy\nfrom scipy.fft import fft, fftfreq\n\n######################################### IMPORT UTILITIES #########################################\n\nimport sys\nsys.path.insert(1, '../utilities/')\nfrom utilities import format_output\n\n#####################################################################################################\n\nimport PySpice.Logging.Logging as Logging\nlogger = Logging.setup_logging()\n\n#####################################################################################################\n\nfrom PySpice.Probe.Plot import plot\nfrom PySpice.Spice.Library import SpiceLibrary\nfrom PySpice.Spice.Netlist import Circuit\nfrom PySpice.Unit import *\n\n############################# LIBRARIES WITH DEFINITIONS OF COMPONENTS #############################\n\nlibraries_path = '..\\libraries'\nspice_library = SpiceLibrary(libraries_path)\n\n#####################################################################################################\n# DEFINING PLOTS\n#####################################################################################################\n\nfigure1, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 10))\nfigure2, (ax3, ax4) = plt.subplots(2, 1, figsize=(20, 10))\nfigure3, (ax5, ax6) = plt.subplots(2, 1, figsize=(20, 10))\n\n####################################################################################################\n# CIRCUIT DEFINITION\n####################################################################################################\n\ncircuit = Circuit('SCR half wave rectifier')\nR = 1@u_Ω\nL = 60@u_mH\nperiods = 20 # Amount of periods of source signal to show in plot\n\n####################################### UNFILTERED OUTPUT #######################################\n\n# Input voltage\nsource = circuit.SinusoidalVoltageSource('input', 'source', circuit.gnd, amplitude=220@u_V, frequency=50@u_Hz)\n# SCR gate triggering signal\nalpha = 0.5 # trigger angle [0; 1]\ndelay_time = (source.period/2) * alpha\npulse_width = (source.period/2) * (1- alpha)\ncircuit.PulseVoltageSource('trigger', 'gate', 'output', 0@u_V, 1@u_V, delay_time=delay_time, pulse_width=pulse_width, period=source.period, rise_time=1@u_ms, fall_time=1@u_ms)\n# SCR\ncircuit.include(spice_library['EC103D1'])\ncircuit.X('scr', 'EC103D1', 'source', 'gate', 'output')\n# Flyback diode Dm\ncircuit.include(spice_library['BAV21'])\ncircuit.X('Dm', 'BAV21', circuit.gnd, 'output')\n# Series RL load\ncircuit.R('load', 'output', 'RL_middle', R)\ncircuit.L('1', 'RL_middle', circuit.gnd, L)\n\n# Show the netlist\nprint('**** Circuit netlist: ****')\nprint(circuit)\n\n####################################################################################################\n# SIMULATION\n####################################################################################################\n\nsimulator = circuit.simulator(temperature=25, nominal_temperature=25)\nanalysis = simulator.transient(step_time=source.period/50000, end_time=source.period*periods)\n\n# Formatting results\nvoltages, currents = format_output(analysis, 'transient')\nv_source = voltages['source']\nv_gate = voltages['gate']\nv_output = voltages['output']\nt = voltages['time']\ni_load = currents['l1']\n\n#Voltages\nax1.set_title('Half-Wave Rectification - Voltage')\nax1.set_xlabel('Time [s]')\nax1.set_ylabel('Voltage [V]')\nax1.grid()\nax1.plot(t, v_source)\nax1.plot(t, v_gate)\nax1.plot(t, v_output)\nax1.legend(('source', 'gate', 'output'), loc=(.05,.1))\nax1.set_ylim(float(-source.amplitude*1.1), float(source.amplitude*1.1))\n\n# Current\nmax_current = i_load.max()\nmin_current = i_load.min()\n\nax2.set_title('Half-Wave Rectification - Current')\nax2.set_xlabel('Time [s]')\nax2.set_ylabel('Current [A]')\nax2.grid()\nax2.plot(t, i_load)\nax2.legend('l1', loc=(.05,.1))\nax2.set_ylim(float(1.1 * min_current), float(1.1 * max_current))\n\n####################################################################################################\n# FREQUENCY DOMAIN\n####################################################################################################\n\n# Number of samplepoints\nN = len(i_load)\nDURATION = source.period*periods\nSAMPLE_RATE = N / DURATION\n\nyf = fft(i_load)\nxf = fftfreq(N, 1 / SAMPLE_RATE)[:N//5000]\n\nax5.set_title('Half-Wave Rectification - Without filter')\nax5.set_xlabel('Frequency [Hz]')\nax5.set_ylabel('Amplitude')\nax5.grid()\nax5.plot(xf, 2.0/N * numpy.abs(yf[0:N//5000]))\n\n####################################################################################################\n# CIRCUIT DEFINITION - FILTERED\n####################################################################################################\n\n# We add a capacitor to filter the output voltage\ncircuit.C('1', 'output', circuit.gnd, 100@u_mF)\n\n# Show the netlist\nprint('**** Circuit netlist (with filter): ****')\nprint(circuit)\n\n####################################################################################################\n# SIMULATION\n####################################################################################################\n\nsimulator = circuit.simulator(temperature=25, nominal_temperature=25)\nsimulator.save_currents = True\nanalysis = simulator.transient(step_time=source.period/1000, end_time=source.period*periods)\n\n# Formatting results\nvoltages, currents = format_output(analysis, 'transient')\nv_source = voltages['source']\nv_gate = voltages['gate']\nv_output = voltages['output']\nt = voltages['time']\ni_load = currents['l1']\n\n# Voltages\nax3.set_title('Half-Wave Rectification with filtering')\nax3.set_xlabel('Time [s]')\nax3.set_ylabel('Voltage [V]')\nax3.grid()\nax3.plot(t, v_source)\nax3.plot(t, v_gate)\nax3.plot(t, v_output)\nax3.legend(('source', 'gate', 'output'), loc=(.05,.1))\nax3.set_ylim(float(-source.amplitude*1.1), float(source.amplitude*1.1))\n\n# Current\nmax_current = i_load.max()\nmin_current = i_load.min()\n\nax4.set_title('Half-Wave Rectification with filtering - Current')\nax4.set_xlabel('Time [s]')\nax4.set_ylabel('Current [A]')\nax4.grid()\nax4.plot(t, i_load)\nax4.legend('l1', loc=(.05,.1))\nax4.set_ylim(float(1.1 * min_current), float(1.1 * max_current))\n\n####################################################################################################\n# FREQUENCY DOMAIN\n####################################################################################################\n\nN = len(i_load)\nSAMPLE_RATE = N / DURATION\nyf = fft(i_load)\nxf = fftfreq(N, 1 / SAMPLE_RATE)[:N//100]\n\nax6.set_title('Half-Wave Rectification - Filtered')\nax6.set_xlabel('Frequency [Hz]')\nax6.set_ylabel('Amplitude')\nax6.grid()\nax6.plot(xf, 2.0/N * numpy.abs(yf[0:N//100]))\n\n####################################################################################################\n\n# Adjusts the spacing between subplots\nfigure1.tight_layout(pad=3.0)\nfigure2.tight_layout(pad=3.0)\nfigure3.tight_layout(pad=3.0)\n# Show plots\nplt.show()"
] | [
[
"numpy.abs",
"matplotlib.pyplot.subplots",
"scipy.fft.fftfreq",
"matplotlib.pyplot.show",
"scipy.fft.fft"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.7",
"1.8"
],
"tensorflow": []
}
] |
yienxu/counting-strokes-in-chinese-characters | [
"6b8d532b78ec37842c4cf028a48271c6bb735cbf"
] | [
"scripts/df_visualization.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\ndf = pd.read_csv('data/dataset.csv', encoding='utf-8')\n\n\ndef plot_df_counts(df):\n x_ticks = np.asarray(list(set(df['count'])))\n xx = np.arange(np.max(x_ticks) + 1)\n yy = np.bincount(df['count'])\n\n for x, y in zip(xx, yy):\n print(\"{}->{}\\t\".format(x, y), end='')\n\n plt.bar(xx, yy)\n plt.title('Stroke Counts of Characters')\n plt.xlabel('Number of Strokes')\n plt.ylabel('Number of Characters')\n # plt.savefig('counts.eps')\n plt.show()\n\n\nprint('numdata = {}\\n'.format(np.sum((df['count'] > 30) | (df['count'] == 1))))\n\ndf = df[(df['count'] <= 30) & (df['count'] != 1)]\nplot_df_counts(df)\nprint(df.shape)\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"numpy.max",
"numpy.bincount",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mhashas/intelligent-systems | [
"a108887fa5b13bbc2343423f21a94ca20093ae5a"
] | [
"bots/smt/kb.py"
] | [
"import sys\nimport numpy as np\nimport scipy.optimize as opt\n\nclass Symbol(object):\n \"\"\"\n A class representing a single unit in the boolean SAT problem. This can either refer to an atomic boolean, or a\n constraint based on integer variables\n \"\"\"\n pass\n\nclass Boolean(Symbol):\n\n def __init__(self, name):\n self.__name = name\n\n def name(self):\n return self.__name\n\n def __invert__(self):\n # type: () -> Boolean\n \"\"\"\n\n :return:\n \"\"\"\n return _NegBoolean(self)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.name() == other.name()\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.name())\n\n def __repr__(self):\n return self.name()\n\nclass _NegBoolean(Boolean):\n\n def __init__(self, symbol):\n self.__symbol = symbol\n\n def name(self):\n return self.__symbol.name()\n\n def __invert__(self):\n return self.__symbol\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.name() == other.name()\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.name(), False)\n\n def __repr__(self):\n return '~' + self.name()\n\nclass Constraint(Symbol):\n\n def __init__(self, left, right):\n self._left = left\n self._right = right\n\n if not isinstance(self._right, Constant):\n self._left = Sum(self._left, - self._right)\n self._right = Constant(0)\n\n # Cluster the symbols on the left\n symbols = {None: 0}\n self.cluster(self._left, symbols)\n\n # create new left and right\n self._right = Constant(self._right.value() - symbols[None])\n\n nwterms = []\n for name, mult in symbols.iteritems():\n if name is not None:\n nwterms.append(Integer(name, mult))\n\n self._left = Sum(*nwterms)\n\n def cluster(self, term, symbols):\n\n if isinstance(term, Constant):\n symbols[None] += term.value()\n return\n\n if isinstance(term, Integer):\n if term.name() not in symbols:\n symbols[term.name()] = 0\n symbols[term.name()] += term.mult()\n return\n\n if isinstance(term, Sum):\n for subterm in term.terms():\n self.cluster(subterm, symbols)\n return\n\n raise ValueError('Encountered element {} of type {}. Arithmetic expressions should contain only KB objects or integers.'.format(term, term.__class__))\n\n\n def symbol(self):\n return '?'\n\n def __repr__(self):\n return '[' + str(self._left) + ' ' + self.symbol() + ' ' + str(self._right) + ']'\n\n def symbols(self):\n '''\n Returns a list of all integer symbols appearing in this constraint\n :return:\n '''\n\n return union(self._left.symbols(), self._right.symbols())\n\nclass GT(Constraint):\n def __init__(self, left, right):\n super(GT, self).__init__(left, right)\n\n def symbol(self):\n return '>'\n\n def __invert__(self):\n return LEQ(self._left, self._right)\n\n def canonical(self):\n \"\"\"\n Convert to a LEQ relation\n \"\"\"\n return LEQ(self._right, self._left - 1)\n\nclass GEQ(Constraint):\n def __init__(self, left, right):\n super(GEQ, self).__init__(left, right)\n\n def symbol(self):\n return '>='\n\n def __invert__(self):\n return LT(self._left, self._right)\n\n def canonical(self):\n \"\"\"\n Convert to a LEQ relation\n \"\"\"\n return LEQ(self._right, self._left)\n\n\nclass LT(Constraint):\n def __init__(self, left, right):\n super(LT, self).__init__(left, right)\n\n def symbol(self):\n return '<'\n\n def __invert__(self):\n return GEQ(self._left, self._right)\n\n def canonical(self):\n \"\"\"\n Convert to a LEQ relation\n \"\"\"\n return LEQ(self._left, self._right - 1)\n\n\nclass LEQ(Constraint):\n def __init__(self, left, right):\n super(LEQ, self).__init__(left, right)\n\n def symbol(self):\n return '<='\n\n def __invert__(self):\n return GT(self._left, self._right)\n\n def canonical(self):\n \"\"\"\n Convert to a LEQ relation\n \"\"\"\n return self\n\n\nclass EQ(Constraint):\n def __init__(self, left, right):\n super(EQ, self).__init__(left, right)\n\n def symbol(self):\n return '=='\n\n def canonical(self):\n \"\"\"\n The canonical for of an EQ relation is itself.\n \"\"\"\n return self\n\n# Not used, as it makes the LP problem nonconvex\n#\n# class NEQ(Constraint):\n# def __init__(self, left, right):\n# super(NEQ, self).__init__(left, right)\n#\n# def symbol(self):\n# return '!='\n#\n# def __invert__(self):\n# return EQ(self._left, self._right)\n\nclass IntSymbol:\n \"\"\"\n A symbolic expression representing an integer: either an atomic symbol like 'x', a constant\n like 15 or a compound expression like 'x + 15 - y'\n \"\"\"\n\n def __lt__(self, other):\n other = self.check(other)\n return LT(self, other)\n\n def __gt__(self, other):\n other = self.check(other)\n return GT(self, other)\n\n def __le__(self, other):\n other = self.check(other)\n return LEQ(self, other)\n\n def __ge__(self, other):\n other = self.check(other)\n return GEQ(self, other)\n\n def __eq__(self, other):\n other = self.check(other)\n return EQ(self, other)\n\n # def __ne__(self, other):\n # other = self.check(other)\n # return NEQ(self, other)\n\n def __add__(self, other):\n other = self.check(other)\n return Sum(self, other)\n __radd__ = __add__\n\n def __sub__(self, other):\n other = self.check(other)\n return Sum(self, - other)\n __rub__ = __sub__\n\n\n def check(self, other):\n if not isinstance(other, IntSymbol):\n if isinstance(other, int):\n return Constant(other)\n raise ValueError('You can only use KB objects or ints in comparisons. Encountered: {} {}'.format(other, other.__class__))\n return other\n\n\nclass Sum(IntSymbol):\n\n def __init__(self, *terms):\n self.__terms = terms\n for term in self.__terms:\n if isinstance(term, int):\n raise ValueError('Unwrapped int {}, {}'.format(term, term.__class__))\n\n self.__name = ''\n for i, term in enumerate(terms):\n self.__name += ('' if i == 0 else ' + ') + str(term)\n\n def name(self):\n return self.__name\n\n def terms(self):\n return self.__terms\n\n def allterms(self):\n return self.__terms\n\n def __neg__(self):\n neg_terms = []\n\n for term in self.__terms:\n neg_terms.append(- term)\n\n return Sum(*neg_terms)\n\n def __hash__(self):\n return hash(self.name())\n\n def __repr__(self):\n return self.__name\n\n def symbols(self):\n '''\n Returns a set of all integer symbols appearing in this constraint\n :return:\n '''\n return union(*[term.symbols() for term in self.__terms])\n\nclass Integer(IntSymbol):\n\n def __init__(self, name, mult = 1):\n \"\"\"\n\n :rtype: object\n \"\"\"\n self.__name = name\n self.__mult = mult\n\n def name(self):\n return self.__name\n\n def mult(self):\n return self.__mult\n\n def __neg__(self):\n return Integer(self.name(), - self.__mult)\n\n def __hash__(self):\n return hash(self.name())\n\n def __mul__(self, other):\n if not isinstance(other, int):\n raise ValueError('Can only multiply number symbol by int.')\n\n return Integer(self.__name, other)\n __rmul__ = __mul__\n\n def __repr__(self):\n if self.__mult == 1:\n return self.name()\n if self.__mult == -1:\n return '(-{})'.format(self.name())\n if self.__mult < 0:\n return '({}{})'.format(self.__mult, self.name())\n return '{}{}'.format(self.__mult, self.name())\n\n def allterms(self):\n '''\n Returns a flat representation of this sum (ie. all elements returned are\n Integers or Constants). May return multiple copies of the same integer if\n the sum has not been simplified.\n :return:\n '''\n result = []\n for term in self.__terms:\n result.extend(term.allterms())\n\n return result\n\n\n def symbols(self):\n return [Integer(self.__name)]\n\nclass Constant(Integer):\n \"\"\"\n An integer with a fixed value\n \"\"\"\n def __init__(self, value):\n if not isinstance(value, int):\n raise ValueError('Constant should be instantiated with an integer value')\n\n self.__value = value\n\n def name(self):\n return str(self.__value)\n\n def value(self):\n return self.__value\n\n def __neg__(self):\n return Constant(-self.__value)\n\n def __hash__(self):\n return hash(self.__value)\n\n def __repr__(self):\n return self.name()\n\n def symbols(self):\n return []\n\n def allterms(self):\n return [self]\n\nclass KB(object):\n \"\"\"\n A class representing a knowledge base.\n \"\"\"\n\n def __init__(self):\n self._symbols = []\n self._clauses = []\n self._pos_occurrences = {}\n self._neg_occurrences = {}\n\n def add_clause(self, *symbols):\n \"\"\"\n Adds a clause. A clause is a disjunction of atomic symbols or theiur negations. For instance:\n ```\n A = Symbol('A')\n B = Symbol('B')\n C = Symbol('C')\n\n kb = KB()\n kb.add_clause(A, B, ~C) # A or B or not C\n kb.add_clause(A, ~B) # A or not B\n ```\n\n :param symbols:\n :return:\n \"\"\"\n\n clause = list(symbols)\n\n # Check the types of the input\n for elem in clause:\n if not (isinstance(elem, Boolean) or isinstance(elem, Constraint)):\n raise ValueError('Only constraints or boolean values can be part of clauses. Encountered {} of type {}'.format(elem, elem.__class__))\n\n if isinstance(elem, EQ) and len(clause) != 1:\n raise ValueError(\n 'Equality constraints may only occur in unit clauses (so kb.add_clause(x == 5, y > 3) is not allowed). Encountered clause {}'.format(clause))\n\n\n index = len(self._clauses)\n self._clauses.append(clause)\n\n for symbol in symbols:\n\n raw_symbol = ~symbol if isinstance(symbol, _NegBoolean) else symbol\n\n if raw_symbol not in self._symbols:\n self._symbols.append(raw_symbol)\n\n # Map symbols to the clauses they occur in\n if raw_symbol not in self._neg_occurrences:\n self._neg_occurrences[raw_symbol] = []\n if raw_symbol not in self._pos_occurrences:\n self._pos_occurrences[raw_symbol] = []\n\n if isinstance(symbol, _NegBoolean):\n self._neg_occurrences[raw_symbol].append(index)\n else:\n self._pos_occurrences[raw_symbol].append(index)\n\n def satisfiable(self):\n \"\"\"\n :return: True if there is a way to assign values to the variables in this knowledge base with\n creating inconsistencies.\n \"\"\"\n first = next(self.models(), None)\n\n return first is not None\n\n def models(self, check_theory=True):\n \"\"\"\n Generator for the models satisfying the current knowledge base\n :return:\n \"\"\"\n fringe = [_Node(self)]\n\n while len(fringe) > 0:\n head = fringe.pop()\n\n if head.consistent():\n if head.finished():\n # the SAT problem returned a model,\n # check if the underlying theory is satisfiable\n sat_model = head.model()\n\n if (not check_theory) or is_feasible(sat_model):\n yield sat_model\n else:\n fringe.extend(head.children())\n\n def __repr__(self):\n return 'symbols: {}, clauses {}'.format(self._symbols, self._clauses)\n\nclass _Node:\n \"\"\"\n Node in the KB's search tree.\n \"\"\"\n __assignments = {}\n __clauses = []\n __kb = None\n __consistent = True\n\n def __init__(self,\n kb # type: KB\n ):\n \"\"\"\n Creates a root node for the given knowledge base\n :param kb:\n \"\"\"\n self.__kb = kb\n\n self.__clauses = list(kb._clauses)\n\n def child(self, symbol, value):\n # type: (Symbol, bool) -> _Node\n \"\"\"\n Return the node reached by setting the given symbol to the given value\n \"\"\"\n\n # Copy the node\n child = _Node(self.__kb)\n child.__assignments = dict(self.__assignments)\n child.__clauses = list(self.__clauses)\n\n # Perform unit propagation\n nw_assignments = {symbol: value}\n while len(nw_assignments) > 0:\n nw_symbol, nw_value = nw_assignments.popitem()\n\n # Move the unit clause to the assignments\n child.__assignments[nw_symbol] = nw_value\n\n # Rewrite the knowledge base with the new information\n for index in child.__kb._pos_occurrences[nw_symbol]:\n if nw_value:\n child.__clauses[index] = None # Remove the clause\n else:\n # Remove the symbol from the clause\n if child.__clauses[index] is not None: # Clause was already removed earlier\n\n clause = list(child.__clauses[index])\n clause.remove(nw_symbol)\n\n if len(clause) == 0: # Empty clauses indicates inconsistency\n child.__consistent = False\n return child\n\n if len(clause) == 1: # New unit clause created\n s = clause[0]\n if isinstance(s, _NegBoolean):\n nw_assignments[~ s] = False\n else:\n nw_assignments[s] = True\n child.__clauses[index] = None\n\n child.__clauses[index] = clause\n\n for index in self.__kb._neg_occurrences[nw_symbol]:\n if nw_value:\n # Remove the symbol from the clause\n if child.__clauses[index] is not None: # Clause was already removed earlier\n\n clause = list(child.__clauses[index])\n clause.remove(~ nw_symbol)\n\n if len(clause) == 0: # Empty clauses indicates inconsistency\n child.__consistent = False\n return child\n\n if len(clause) == 1: # New unit clause created\n s = clause[0]\n if isinstance(s, _NegBoolean):\n nw_assignments[~ s] = False\n else:\n nw_assignments[s] = True\n child.__clauses[index] = None\n\n child.__clauses[index] = clause\n else:\n child.__clauses[index] = None # Remove the clause\n\n return child\n\n def children(self):\n if not self.consistent():\n return []\n\n next_symbol = next(self.free(), None)\n if not next_symbol:\n return []\n\n return self.child(next_symbol, True), self.child(next_symbol, False)\n\n def free(self):\n for symbol in self.__kb._symbols:\n if symbol not in self.__assignments:\n yield symbol\n\n def consistent(self):\n return self.__consistent\n\n def finished(self):\n \"\"\"\n :return: True if the current node represents a complete model, with all symbols\n assigned definite values.\n \"\"\"\n return len(self.__kb._symbols) == len(self.__assignments.keys())\n\n def model(self):\n if not self.finished():\n return None\n else:\n return self.__assignments\n\n def __repr__(self):\n return str(self.__assignments) + (' finished' if self.finished() else ' incomplete') \\\n + ' ' + (' consistent' if self.consistent() else ' inconsistent') \\\n + ', clauses:' + str(self.__clauses)\n\ndef optimize(*constraints):\n \"\"\"\n Minimizes the given set of symbols under the given linear arithmetical constraints\n :param constraint:\n :return:\n \"\"\"\n\n # Gather all symbols\n symbols = union(*[c.symbols() for c in constraints])\n symbols = [s.name() for s in symbols]\n n = len(symbols)\n\n # Canonicalize the constraints, and sort by equalities ad inequalities\n equalities = []\n inequalities = []\n\n for constraint in constraints:\n canonical = constraint.canonical()\n if isinstance(canonical, LEQ):\n inequalities.append(canonical)\n elif isinstance(canonical, EQ):\n equalities.append(canonical)\n else:\n raise ValueError('Encountered constraint that did not canonize to LEQ or EQ: {}, canonical class {}'.format(canonical, canonical.__class__))\n\n # Create matrices, add constraints\n A_ub = np.zeros((len(inequalities), len(symbols)))\n A_eq = np.zeros((len(equalities), len(symbols)))\n\n b_ub = np.zeros((len(inequalities)))\n b_eq = np.zeros((len(equalities)))\n\n c = np.ones((len(symbols)))\n\n for i, constraint in enumerate(inequalities):\n b_ub[i] = constraint._right.value()\n\n for term in constraint._left.allterms():\n if not isinstance(term, Constant):\n name = term.name()\n mult = term.mult()\n j = symbols.index(name)\n\n A_ub[i, j] += mult\n else:\n raise ValueError('Unexpected state: the left part of a constraint should not contain constants.')\n\n for i, constraint in enumerate(equalities):\n b_eq[i] = constraint._right.value()\n\n for term in constraint._left.allterms():\n if not isinstance(term, Constant):\n symbol = term.name()\n mult = term.mult()\n j = symbols.index(symbol)\n\n A_eq[i, j] += mult\n else:\n raise ValueError(\n 'Unexpected state: the left part of a constraint should not contain constants.')\n\n result = opt.linprog(c, A_ub, b_ub, A_eq, b_eq, bounds = [(None, None)] * n)\n\n return result\n\n\ndef is_feasible(model):\n\n constraints = []\n for symbol, value in model.iteritems():\n if isinstance(symbol, Constraint):\n if value:\n constraints.append(symbol)\n else:\n if isinstance(symbol, EQ):\n raise ValueError('Something went wrong. The SAT solver should not assign False to EQ constraints. Encountered model {}.'.format(model))\n constraints.append(~ symbol)\n if len(constraints) == 0:\n return True\n\n return optimize(*constraints).status != 2\n\ndef union(*lists):\n '''\n We can't store the Integer objects in sets, because we overwrote __eq__. So we'll store them\n in lists instead, and do unions this way.\n :param lists: Lists cotaining integers and constants\n :return:\n '''\n result = []\n seen = set()\n\n for list in lists:\n for symbol in list:\n if symbol.name() not in seen:\n seen.add(symbol.name())\n result.append(symbol)\n\n return result\n\n"
] | [
[
"scipy.optimize.linprog"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
colesbury/fairo | [
"9e50a3aa7369c68c80e84d80abd5fcdee8a9277a",
"9e50a3aa7369c68c80e84d80abd5fcdee8a9277a",
"9e50a3aa7369c68c80e84d80abd5fcdee8a9277a",
"9e50a3aa7369c68c80e84d80abd5fcdee8a9277a",
"9e50a3aa7369c68c80e84d80abd5fcdee8a9277a"
] | [
"polymetis/tests/scripts/5_multithreaded.py",
"droidlet/lowlevel/rotation.py",
"perception/sandbox/eyehandcal/src/eyehandcal/utils.py",
"droidlet/interpreter/craftassist/tests/test_conditions.py",
"droidlet/interpreter/robot/spatial_reasoning.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport time\nimport threading\n\nimport torch\n\nfrom polymetis import RobotInterface\nfrom utils import check_episode_log\n\n\nsuccess = []\nexceptions = []\n\n\ndef connect_and_send_policy():\n try:\n # Initialize robot interface\n robot = RobotInterface(\n ip_address=\"localhost\",\n )\n hz = robot.metadata.hz\n robot.go_home()\n time.sleep(0.5)\n\n # Get joint positions\n joint_pos = robot.get_joint_positions()\n print(f\"Initial joint positions: {joint_pos}\")\n\n # Go to joint positions\n print(\"=== RobotInterface.move_to_joint_positions ===\")\n delta_joint_pos_desired = torch.Tensor([0.0, 0.0, 0.0, 0.5, 0.0, -0.5, 0.0])\n joint_pos_desired = joint_pos + delta_joint_pos_desired\n\n time_to_go = 4.0\n state_log = robot.move_to_joint_positions(\n joint_pos_desired, time_to_go=time_to_go\n )\n check_episode_log(state_log, int(time_to_go * hz))\n\n joint_pos = robot.get_joint_positions()\n assert torch.allclose(joint_pos, joint_pos_desired, atol=0.01)\n\n success.append(True)\n except Exception as e:\n exceptions.append(e)\n\n\nif __name__ == \"__main__\":\n thread = threading.Thread(target=connect_and_send_policy)\n thread.start()\n thread.join()\n\n assert success, f\"Exception: {exceptions[0]}\"\n",
"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\n\nimport math\nimport numpy as np\nfrom numpy import sin, cos\n\nDIRECTIONS = {\n \"AWAY\": np.array([0, 0, 1]),\n \"FRONT\": np.array([0, 0, 1]),\n \"BACK\": np.array([0, 0, -1]),\n \"LEFT\": np.array([-1, 0, 0]),\n \"RIGHT\": np.array([1, 0, 0]),\n \"DOWN\": np.array([0, -1, 0]),\n \"UP\": np.array([0, 1, 0]),\n}\n\n# FIXME add the xz_only option for mc also, shouldn't use yaw for determining \"up\"\ndef transform(direction, yaw, pitch, inverted=False, xz_only=False):\n \"\"\"Coordinate transforms with respect to yaw/pitch of the viewer direction\n should be relative to the viewer *before* pitch/yaw transform If we want to\n transform any of DIRECTIONS back, then it would be inverted=True that is:\n inverted=True finds the vector in canonical coords pointing towards\n direction where direction is specified if you were facing yaw, pitch.\n\n conversely, inverted=False takes a direction in canonical\n coordinates and converts to coordinates where FRONT (z+) is yaw=0\n and pitch=0 yaw is assumed to be in the range [-pi, pi], and\n increasing yaw moves *counterclockwise* pitch is assumed to be in\n the range [-pi/2, pi/2]. pi/2 is down, -pi/2 is up.\n \"\"\"\n\n # 0 yaw is z axis\n # z+\n # +yaw | -yaw\n # |\n # |\n # |\n # x-___________|___________x+\n # |\n # |\n # z-\n\n # fmt: off\n ryaw = np.array([[cos(yaw), 0, -sin(yaw)],\n [0, 1, 0 ],\n [sin(yaw), 0, cos(yaw)]])\n\n rpitch = np.array([[1, 0, 0 ],\n [0, cos(-pitch), sin(-pitch)],\n [0, -sin(-pitch), cos(-pitch)]])\n\n # fmt: on\n\n # canonical world coords:\n # ^ y+\n # | z+\n # | /\n # | /\n # 0 -----> x+\n if not inverted:\n trans_mat = rpitch @ ryaw\n else:\n trans_mat = np.linalg.inv(rpitch @ ryaw)\n return trans_mat @ direction\n\n\ndef yaw_pitch(look_vec):\n xz_dir = np.array([look_vec[0], look_vec[2]])\n xz_dir = xz_dir / np.linalg.norm(xz_dir)\n yaw = np.arctan2(-xz_dir[0], xz_dir[1])\n\n # get the pitch value/tilt angle\n pitch = -np.arctan2(look_vec[1], np.sqrt(look_vec[0] ** 2 + look_vec[2] ** 2))\n\n yaw = yaw % (2 * np.pi)\n if yaw > np.pi:\n yaw = yaw - 2 * np.pi\n return yaw, pitch\n\n\n# this should invert yaw_pitch (up to norm)\ndef look_vec(yaw, pitch):\n # yaw = deg2rad(yaw)\n # pitch = deg2rad(pitch)\n x = -cos(pitch) * sin(yaw)\n y = sin(pitch)\n z = cos(pitch) * cos(yaw)\n return np.array([x, y, z])\n\n\ndef rotation_matrix_x(a):\n ar = float(a) * math.pi / 180.0\n cos = math.cos\n sin = math.sin\n return np.array([[1, 0, 0], [0, cos(ar), -sin(ar)], [0, sin(ar), cos(ar)]])\n\n\ndef rotation_matrix_y(a):\n ar = float(a) * math.pi / 180.0\n cos = math.cos\n sin = math.sin\n return np.array([[cos(ar), 0, sin(ar)], [0, 1, 0], [-sin(ar), 0, cos(ar)]])\n\n\ndef rotation_matrix_z(a):\n ar = float(a) * math.pi / 180.0\n cos = math.cos\n sin = math.sin\n return np.array(\n [\n [cos(ar), -sin(ar), 0],\n [sin(ar), cos(ar), 0],\n [0, 0, 1],\n ]\n )\n\n\nif __name__ == \"__main__\":\n A = (4, 0, 1)\n B = (4, 4, 4)\n print(transform(DIRECTIONS[\"RIGHT\"], 45, 0, inverted=True))\n print(\"yaw_pitch(look_vec(3.1, -1.0))\")\n print(yaw_pitch(look_vec(3.1, -1.0)))\n print(\"look_vec(*yaw_pitch(np.array((-2,1,1))))\")\n print(look_vec(*yaw_pitch(np.array((-2, 1, 1)))))\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport cv2\nimport math\n\n\ndef detect_corners(data, target_idx=9):\n \"\"\"\n data: [{'img': [np.ndarray]}]\n return: [{'corners', [(x,y)]}]\n \"\"\"\n aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_50)\n aruco_param = cv2.aruco.DetectorParameters_create()\n aruco_param.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX\n for i,d in enumerate(data):\n d['corners'] = []\n for j, img in enumerate(d['imgs']):\n result=cv2.aruco.detectMarkers(img, dictionary=aruco_dict, parameters=aruco_param)\n corners, idx, rej = result\n if idx is not None and target_idx in idx:\n corner_i = idx.squeeze().tolist().index(target_idx)\n target_corner=corners[corner_i][0,0,:].tolist()\n d['corners'].append(target_corner)\n else:\n d['corners'].append(None)\n return data\n\n\ndef skewsym(v):\n \"\"\"\n pytorch backwark() compatible\n \"\"\"\n zero=torch.tensor([0.])\n return torch.stack([\n zero, -v[2:3], v[1:2],\n v[2:3], zero, -v[0:1],\n -v[1:2], v[0:1], zero\n ]).reshape(3,3)\n\n\ndef quat2rotvec(v):\n u = v[:3]\n u = u / u.norm()\n theta = 2 * torch.acos(v[3])\n return u * theta\n\n\ndef rotmat(v):\n assert len(v)==3\n v_ss = skewsym(v)\n return torch.matrix_exp(v_ss)\n\n\n\n# TODO: use fairotag.camera.Camera._intrinsic\ndef build_proj_matrix(fx, fy, ppx, ppy, coeff=None):\n # consider handle distortion here\n return torch.DoubleTensor([[fx, 0., ppx],\n [0., fy, ppy],\n [0., 0., 1.]])\n\n\ndef marker_proj(param, pos_ee_base, ori_ee_base, K):\n camera_base_ori = param[:3]\n camera_base_pos = param[3:6]\n p_marker_ee = param[6:9]\n p_marker_camera = rotmat(-camera_base_ori).matmul(\n (rotmat(ori_ee_base).matmul(p_marker_ee) + pos_ee_base)-camera_base_pos)\n p_marker_image = K.matmul(p_marker_camera)\n return p_marker_image[:2]/p_marker_image[2]\n\n\n\ndef pointloss(param, obs_marker_2d, pos_ee_base, ori_ee_base, K):\n proj_marker_2d = marker_proj(param, pos_ee_base, ori_ee_base, K)\n return (obs_marker_2d - proj_marker_2d).norm()\n\n\n\ndef mean_loss(data, param, K):\n losses = []\n for d in data:\n corner = d[0]\n ee_base_pos = d[1]\n ee_base_ori = d[2]\n ploss = pointloss(param, corner, ee_base_pos, ee_base_ori, K)\n losses.append(ploss)\n return torch.stack(losses).mean()\n\ndef find_parameter(param, obs_data_std, K):\n optimizer=torch.optim.LBFGS([param], max_iter=1000, lr=1, line_search_fn='strong_wolfe')\n def closure():\n optimizer.zero_grad()\n loss=mean_loss(obs_data_std, param, K)\n loss.backward()\n return loss\n \n L=optimizer.step(closure)\n return param.detach()\n\n\ndef sim_data(n, K, noise_std=0):\n from torchcontrol.transform import Rotation as R\n from torchcontrol.transform import Transformation as T\n # z marker\n # ^ /\n # | / \n # (q0)----L---(q1)ee\n # |\n # H\n # | > camera\n # +--------------------+--->x\n # \n L=0.3\n D=2.0 #camera-distance to robot base\n H=0.2\n get_ee = lambda q: torch.DoubleTensor([L * math.cos(q), L * math.sin(q), H])\n p_marker_0=torch.DoubleTensor([0., 0., 0.2]) #marker position on ee frame\n T_camera_ee = T.from_rot_xyz(\n rotation=R.from_rotvec(torch.DoubleTensor([-math.pi/2, 0, 0])) * R.from_rotvec(torch.DoubleTensor([0, -math.pi/2, 0])), # camera orientation\n translation=torch.DoubleTensor([D, 0., 0.])) # camera position\n gt_param = torch.cat([T_camera_ee.rotation().as_rotvec(), T_camera_ee.translation(), p_marker_0])\n data=[]\n for i in range(n):\n q0 = i * 2* math.pi / n\n q1 = i * 2* math.pi / n * 2\n ee = get_ee(q0)\n obs_Tee_base = T.from_rot_xyz(\n rotation=(R.from_rotvec(torch.DoubleTensor([0., 0., q1])) * R.from_rotvec(torch.DoubleTensor([q1, 0., 0.]))),\n translation=ee)\n p_marker_camera = (T_camera_ee.inv() * obs_Tee_base).apply(p_marker_0)\n p_marker_proj = K.matmul(p_marker_camera)\n p_marker_image = p_marker_proj[:2] / p_marker_proj[2]\n data.append([\n p_marker_image + torch.randn_like(p_marker_image) * noise_std,\n obs_Tee_base.translation(),\n obs_Tee_base.rotation().as_rotvec()])\n # print('q', [q0, q1], '\\n'\n # 'ee', ee, '\\n'\n # 'marker_camera', p_marker_camera, '\\n',\n # 'marker_image', p_marker_image)\n \n return data, gt_param\n",
"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\nimport unittest\nimport numpy as np\n\nfrom droidlet.interpreter.tests.all_test_commands import *\nfrom droidlet.shared_data_structs import TICKS_PER_SEC\nfrom agents.craftassist.tests.base_craftassist_test_case import BaseCraftassistTestCase\nfrom droidlet.lowlevel.minecraft.pyworld.fake_mobs import LoopMob, make_mob_opts\n\n\ndef add_sequence_mob(test, mobname, sequence):\n m = LoopMob(make_mob_opts(mobname), sequence)\n m.add_to_world(test.world)\n\n\nclass MoveDirectionUntilTest(BaseCraftassistTestCase):\n def setUp(self):\n super().setUp()\n\n def test_move_till_condition(self):\n cow_look = (0.0, 0.0)\n x = -5\n z = -5\n cow_move_sequence = [((x, 63, z), cow_look)]\n # speaker_pos = [5, 63, 5]\n for i in range(20):\n while x < 10:\n x += 1\n cow_move_sequence.append(((x, 63, z), cow_look))\n z += 1\n cow_move_sequence.append(((x, 63, z), cow_look))\n while x > 1:\n x -= 1\n cow_move_sequence.append(((x, 63, z), cow_look))\n z -= 1\n cow_move_sequence.append(((x, 63, z), cow_look))\n\n add_sequence_mob(self, \"cow\", cow_move_sequence)\n cow = self.agent.world.mobs[0]\n look_at_target = (1, 63, -2)\n self.set_looking_at(look_at_target)\n\n d = STOP_CONDITION_COMMANDS[\"go left until that cow is closer than 2 steps to me\"]\n self.handle_logical_form(d, max_steps=1000)\n\n # check stopped when cow was close:\n self.assertLessEqual(((5 - cow.pos[0]) ** 2 + (5 - cow.pos[2]) ** 2) ** 0.5, 2)\n\n # check agent went left:\n player_lv = np.array(look_at_target) - np.array((5, 63, 5))\n player_lv = player_lv / np.linalg.norm(player_lv)\n player_left = player_lv.copy()\n player_left[0] = player_lv[2]\n player_left[2] = -player_lv[0]\n agent_mv = self.agent.pos - np.array((0, 63, 0))\n agent_mv_n = agent_mv / np.linalg.norm(agent_mv)\n self.assertGreaterEqual(agent_mv_n @ player_left, 0.8)\n\n # check that player stopped first time cow was close:\n self.assertLessEqual(self.agent.world.count, 30)\n\n\nclass FollowUntilTest(BaseCraftassistTestCase):\n def setUp(self):\n super().setUp()\n\n def test_move_till_condition(self):\n cow_look = (0.0, 0.0)\n cow_move_sequence = [((0, 63, 0), cow_look)]\n x = 0\n for i in range(20):\n while x < 10:\n x += 1\n for j in [-2, -3, -2, -3]:\n cow_move_sequence.append(((x, 63, j), cow_look))\n while x > 1:\n x -= 1\n for j in [-2, -3, -2, -3]:\n cow_move_sequence.append(((x, 63, j), cow_look))\n\n add_sequence_mob(self, \"cow\", cow_move_sequence)\n cow = self.agent.world.mobs[0]\n self.set_looking_at((1, 63, -2))\n\n d = deepcopy(STOP_CONDITION_COMMANDS[\"follow the cow for 18 seconds\"])\n start_time = self.agent.get_time()\n self.handle_logical_form(d, max_steps=5000)\n end_time = self.agent.get_time()\n time_elapsed = (end_time - start_time) / TICKS_PER_SEC\n self.assertLessEqual(\n abs(self.agent.pos[0] - cow.pos[0]) + abs(self.agent.pos[2] - cow.pos[2]), 1.01\n )\n self.assertLessEqual(time_elapsed, 20)\n self.assertGreaterEqual(time_elapsed, 16)\n\n d = deepcopy(STOP_CONDITION_COMMANDS[\"follow the cow for 2 minutes\"])\n start_time = self.agent.get_time()\n self.handle_logical_form(d, max_steps=5000)\n end_time = self.agent.get_time()\n time_elapsed = (end_time - start_time) / TICKS_PER_SEC\n self.assertLessEqual(\n abs(self.agent.pos[0] - cow.pos[0]) + abs(self.agent.pos[2] - cow.pos[2]), 1.01\n )\n self.assertLessEqual(time_elapsed, 130)\n self.assertGreaterEqual(time_elapsed, 110)\n\n\n# # FIXME!!!! this test is flaky....\n# # TODO make sure cow is moving in x positive direction from below 5 when the test starts\n# # it is now if everything else works, but should force it\n# d = deepcopy(\n# STOP_CONDITION_COMMANDS[\"follow the cow for 18 seconds after it has x greater than 5\"]\n# )\n# start_time = self.agent.get_time()\n# self.handle_logical_form(d, max_steps=5000)\n# end_time = self.agent.get_time()\n# time_elapsed = (end_time - start_time) / TICKS_PER_SEC\n# self.assertEqual(cow_move_sequence[self.agent.world.count - 1 - 18][0][0], 6)\n# self.assertLessEqual(\n# abs(self.agent.pos[0] - cow.pos[0]) + abs(self.agent.pos[2] - cow.pos[2]), 1.01\n# )\n\n\nclass DigRemoveConditionTest(BaseCraftassistTestCase):\n def setUp(self):\n super().setUp()\n\n def test_dig_n_times(self):\n s = deepcopy(STOP_CONDITION_COMMANDS[\"dig a hole 2 times\"])\n changes = self.handle_logical_form(s)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\nimport numpy as np\n\nDEFAULT_NUM_STEPS = 1\n\n\nclass ComputeLocations:\n def __call__(\n self,\n interpreter,\n speaker,\n mems,\n steps,\n reldir,\n repeat_num=1,\n repeat_dir=None,\n objects=[],\n padding=(1, 1, 1),\n ):\n repeat_num = max(repeat_num, len(objects))\n origin = compute_location_heuristic(mems, steps, reldir, interpreter.memory)\n if repeat_num > 1:\n raise NotImplementedError\n else:\n offsets = [(0, 0, 0)]\n # offsets = [post_process_loc(o, interpreter) for o in offsets]\n return origin, offsets\n\n\n# FIXME this can be merged with the MC version without too much work,\n# main difference is speaker vs agent frame (but that is in agent.default_frame)\n# There will be at least one mem in mems\ndef compute_location_heuristic(mems, steps, reldir, memory):\n loc = mems[0].get_pos()\n self_mem = memory.get_mem_by_id(memory.self_memid)\n if reldir is not None:\n steps = steps or DEFAULT_NUM_STEPS\n if reldir == \"BETWEEN\":\n loc = tuple((np.add(mems[0].get_pos(), mems[1].get_pos())) / 2)\n elif reldir == \"INSIDE\":\n raise NotImplementedError\n elif reldir == \"NEAR\":\n pass\n elif reldir == \"AROUND\":\n pass\n else: # LEFT, RIGHT, etc...\n reldir_vec = memory.coordinate_transforms.DIRECTIONS[reldir]\n yaw, _ = self_mem.get_yaw_pitch() # work in agent frame\n # we are converting a agent-frame reldir to absolute frame so we set inverted=True\n dir_vec = memory.coordinate_transforms.transform(reldir_vec, yaw, 0, inverted=True)\n loc = steps * np.array(dir_vec) + np.array(loc)\n elif steps is not None:\n loc = np.add(loc, [0, 0, steps])\n return tuple(loc)\n"
] | [
[
"torch.allclose",
"torch.Tensor"
],
[
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.array"
],
[
"torch.randn_like",
"torch.matrix_exp",
"torch.tensor",
"torch.optim.LBFGS",
"torch.acos",
"torch.stack",
"torch.DoubleTensor"
],
[
"numpy.array",
"numpy.linalg.norm"
],
[
"numpy.add",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bf108/elo_package | [
"ec87eba14362ff1de1c4830be2761ef23a32c6e4"
] | [
"src/elopackage/preprocess.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom ast import literal_eval\n\n\ndef convert_scores_literal(score):\n try:\n return literal_eval(score)\n except:\n return f'Error: {score}'\n\n\ndef pts_diff(row):\n try:\n winner_pts = sum(row['winning_team_scores_lst'])\n loser_pts = sum(row['losing_team_scores_lst'])\n\n return winner_pts - loser_pts\n except:\n return np.nan\n\n\ndef game_pts_diff(win, lsr):\n winner_pts = np.array(win)\n loser_pts = np.array(lsr)\n\n return winner_pts - loser_pts\n\n\ndef preprocess_tour_data(csv_file):\n p = Path(csv_file).absolute()\n df = pd.read_csv(p)\n # Convert match date to datetime\n df['match_date_dt'] = pd.to_datetime(df['match_date'], errors='coerce')\n\n # Drop matches where score not present\n df = df[(df['losing_team_scores'] != 'n/a') & (df['winning_team_scores'] != 'n/a')]\n\n # Convert scores to list of int and get pts diff - Drop any rows with missing pts diff\n df['losing_team_scores_lst'] = df['losing_team_scores'].apply(lambda x: convert_scores_literal(x))\n df['winning_team_scores_lst'] = df['winning_team_scores'].apply(lambda x: convert_scores_literal(x))\n df['pts_diff'] = df.apply(lambda x: pts_diff(x), axis=1)\n\n df.dropna(subset=['pts_diff'], inplace=True)\n\n # Get score diff for winner of each game\n gme_pts_diff = []\n for row in df.itertuples():\n gme_pts_diff.append(game_pts_diff(row.winning_team_scores_lst, row.losing_team_scores_lst))\n\n df['gme_pts_diff'] = gme_pts_diff\n\n # Add a flag for doubles games\n df['Doubles'] = ~df.losing_team_p2.isna()\n\n df.sort_values(by=['match_date_dt'], ascending=True, inplace=True)\n\n df.reset_index(drop=True, inplace=True)\n\n return df\n\n\ndef briers_score(predictions, actual):\n return sum([(ft - ot) ** 2 for ft, ot in zip(predictions, actual)]) / len(predictions)\n\n"
] | [
[
"numpy.array",
"pandas.to_datetime",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
vkola-lab/multi-task | [
"6a61db4223e1812744f13028747b07e2f840cc0b"
] | [
"lookupcsv/derived_tables/FHS/get_T1_MRI.py"
] | [
"from glob import glob\nimport pydicom\nfrom pydicom import dcmread\nimport os\nimport numpy as np\nimport nibabel as nib\n\n\ndef dicom_to_nifti(folder):\n dicoms = None\n return\n\n\ndef read_a_dicom(dicom_file):\n ds = dcmread(dicom_file)\n\n\ndef get_array(dcm_file):\n ds = dcmread(dcm_file)\n location = float(ds[0x0020, 0x1041].value)\n array = ds.pixel_array\n return (array, location)\n\n\ndef stack_dicoms(dicoms):\n pool = []\n for file in dicoms:\n pool.append(get_array(file))\n pool.sort(key=lambda x : x[1])\n return np.stack([x[0] for x in pool], axis=0)\n\n\ndef find_subfolders(root_dir):\n count = 0\n for root, dirs, files in os.walk(root_dir):\n if 't1' in root and len(files) and '.dcm' in files[0]:\n full_path = [os.path.join(root, file) for file in files]\n count += 1\n identifier = full_path[0].split('/')[5]\n print(identifier)\n try:\n volume = stack_dicoms(full_path)\n print(volume.shape)\n nifti = nib.Nifti1Image(volume, affine=np.eye(4))\n nib.save(nifti, '/data_2/FHS/nifti/{}.nii'.format(identifier))\n except ValueError:\n print('value error')\n pass\n except RuntimeError:\n print('runtime error')\n pass\n print(count)\n\n\nif __name__ == \"__main__\":\n path = '/data_2/FHS/20200819/'\n find_subfolders(path)\n"
] | [
[
"numpy.eye",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
getzlab/SMM_clustering_2020 | [
"0a00e11fbde38a1da82167e1c924234a79cba4a5"
] | [
"funcs/plot.py"
] | [
"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport os\nfrom typing import Union\nimport pandas as pd\nimport signatureanalyzer as sa\n\n# Cluster Color map\nCOLORMAP={\n 0:'#0073C2FF',\n 1:'#EFC000FF',\n 2:'#868686FF',\n 3:'#CD534CFF',\n 4:'#7AA6DCFF',\n 5:'#003C67FF',\n}\n\nCOLORMAP2={\n \"C1\":'#0073C2FF',\n \"C2\":'#EFC000FF',\n \"C3\":'#868686FF',\n \"C4\":'#CD534CFF',\n \"C5\":'#7AA6DCFF',\n \"C6\":'#003C67FF'\n}\n\n# Plotting funcs\ndef plot_scatter(df: pd.DataFrame, group: str, ax: plt.Axes, pal=None):\n \"\"\"\n Generic scatterplot.\n ------------------------------\n Inputs:\n * df: dataframe wtih columns as axes\n * group: column in dataframe that defines group colors\n * ax: matplotlib axes\n * pal: dictionary mapping groups to colors\n\n Output:\n * none\n \"\"\"\n groups = list(set(df[group]))\n\n if pal is None:\n groups_cdict = {groups[i]:x for i,x, in enumerate(sns.color_palette(\"hls\", len(set(df[group]))))}\n else:\n groups_cdict = pal\n\n _ = ax.scatter(\n df['PC1'],\n df['PC2'],\n alpha=0.8,\n c=df[group].apply(lambda x: groups_cdict[x]),\n label=None,\n s=70,\n edgecolor='black',\n linewidth=0.5\n )\n\n ax.set_xlabel(\"PC1\", fontsize=14)\n ax.set_ylabel(\"PC2\", fontsize=14)\n\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n\n for k,v in groups_cdict.items():\n try:\n m = ax.scatter(-1e4, -1e4, alpha=.8, c=np.array(v)[np.newaxis,:], label=k)\n except:\n m = ax.scatter(-1e4, -1e4, alpha=.8, c=v, label=k)\n\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n ax.legend(loc='lower left')\n ax.set_title(group, fontsize=14, fontweight='bold', x=0.05, y=0.025, ha='left')\n\ndef plot_metric_hm(X, sample_n, k_n, title: str = None, figsize: tuple = (8,6), ax: plt.Axes = None):\n \"\"\"\n Metrics heatmap for downsampling analysis.\n ------------------------------\n Inputs:\n * X\n * sample_n\n * k_n\n * title:\n * figsize: tuple of\n * ax: plt.Axes\n\n Outputs:\n * none\n \"\"\"\n if ax is None:\n fig,ax = plt.subplots(figsize=figsize)\n\n sns.heatmap(\n pd.DataFrame(X.mean(2), index=[\"n={}\".format(str(x)) for x in sample_n], columns=[str(x) for x in k_n]),\n annot=True,\n cmap='coolwarm',\n ax=ax,\n linewidth=0.1,\n linecolor='black',\n fmt='.2f'\n )\n\n [ax.spines[sp].set_visible(True) for sp in ['top','right','left','bottom']]\n\n ax.set_yticklabels(ax.get_yticklabels(), rotation = 0, fontsize=12, fontstyle='italic')\n ax.set_xlabel(\"K Factors\", fontsize=16)\n\n if title is not None:\n ax.set_title(title, fontsize=18)\n\ndef plot_dist_per_metric(X, k, sample_n, k_n, figsize=(8,6), ax=None, title=None, s=10):\n \"\"\"\n Plot distribution per for a given K.\n \"\"\"\n if ax is None:\n fig,ax = plt.subplots(figsize=figsize)\n\n X = pd.DataFrame(X[:,k-min(k_n),:], index=[\"n={}\".format(str(x)) for x in sample_n]).T\n\n sns.stripplot(\n data=X,\n s=s,\n ax=ax,\n linewidth=.25,\n alpha=0.25,\n rasterized=True\n )\n\n sns.violinplot(\n data=X,\n ax=ax,\n linewidth=1,\n alpha=0.6,\n color='White'\n )\n\n ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right', fontsize=14)\n ax.set_title(\"K = {}\".format(k), fontsize=14)\n\ndef plot_dist_per_metric_grid(X, sample_n, k_n, y_label=None):\n \"\"\"\n Plot distribution grid for a given metric.\n \"\"\"\n\n fig,axes = plt.subplots(3,3,figsize=(12,12), sharex=True)\n\n c=0\n for i in range(3):\n for j in range(3):\n plot_dist_per_metric(X, k_n[c], sample_n, k_n, ax=axes[i,j], s=8)\n c+=1\n if j==0: axes[i,j].set_ylabel(y_label, fontsize=14)\n\n plt.tight_layout()\n\ndef plot_marker_heatmap(\n X: pd.DataFrame,\n signatures: pd.DataFrame,\n order_series: pd.Series,\n signatures_idx: str = 'max_id',\n subset_genes: Union[pd.Series,None] = None,\n diff: float = 0.5,\n max_norm: float = 0.5,\n figsize: tuple = (16,12),\n cmap: str =\"YlGnBu\",\n vmax: float = None,\n vmin: float = None,\n cohort_s: Union[pd.Series,None] = None\n ):\n \"\"\"\n Plot marker map.\n -----------------------------\n Args:\n * X: pd.DataFrame of input sample x feature matrix\n * signatures: pd.DataFrame signatures output;\n this bundles information about the weightings of each feature (ex. gene) and\n what signature they map to\n * order_series: series of samples mapping to subgroups\n index: X.index\n values: subgrouping\n * signatures_idx: string for signature grouping\n * subset_series: a pd.Series with the index as the gene name or ID that\n matches the marker matrix & has a \"Subgroup\" column for labeling\n * diff: difference of loading for called signature vs. rest\n * max_norm: strength of loading for called signature\n * figsize: size of figure\n * cmap: colormap for plot\n * display_y: whether or not to display feature names\n * vmax: colorbar max\n * vmin: colorbar min\n * cohort_s: cohort_series dataframe (added on top of plot)\n * y_hm_label: label of y-axis on heatmap (ex. Genes, Protein LFC, etc.)\n * cbar_hm_label: label of heatmap colorbar\n Returns:\n * plt.Figure\n \"\"\"\n from scipy.cluster import hierarchy\n import scipy.cluster.hierarchy as shc\n from sklearn.cluster import AgglomerativeClustering\n\n import signatureanalyzer as sa\n\n # Remove signatures with no marker genes associated\n order_series = order_series[order_series.isin(set(signatures[signatures_idx].astype(int)))]\n\n # Filter X matrix\n sample_markers = X.loc[signatures.index, order_series.sort_values().index]\n\n # Set horizontal lines\n hz_lines = np.unique(sample_markers.join(signatures).loc[:,signatures_idx].values, return_index=True)[1]\n\n fig, ax = plt.subplots(figsize=figsize)\n\n x0 = ax.get_position().x0\n x1 = ax.get_position().x1\n y0 = ax.get_position().y0\n y1 = ax.get_position().y1\n buf = y1*0.01\n\n sns.heatmap(sample_markers, ax=ax, cmap=cmap, rasterized=True, vmax=vmax, vmin=vmin, cbar=False)\n v,c = np.unique(order_series, return_counts=True)\n\n # plot horizontal lines\n _c = np.cumsum(c)\n _ci = np.roll(_c,2)\n _ci[0] = 0\n _ci[1] = 0\n ax.hlines(hz_lines, _ci, _c, rasterized=True)\n\n # plot vertical lines\n _h = list(hz_lines)\n _h.append(ax.get_ylim()[0])\n ax.vlines(np.cumsum(c)[:-1], _h[:-2], _h[2:], rasterized=True)\n ax.vlines(np.cumsum(c)[:-1], *ax.get_ylim(), alpha=0.4, rasterized=True, linewidth=1)\n\n # Set yticks\n ax.yaxis.tick_right()\n ax.set_yticks(np.arange(sample_markers.index.values.shape[0])+0.5)\n ax.set_yticklabels(sample_markers.index.values, fontsize=7.5, rasterized=True, rotation=0, va=\"center\")\n\n # --------------cluster annot-------------------\n clust_ax = fig.add_axes([x0, y1+buf, x1*.861, 2*buf])\n\n clust_ax.set_xticks([])\n clust_ax.set_yticks([])\n\n colors_conversion, meta_colormap = sa.pl.series_to_colors(order_series.loc[sample_markers.columns])\n meta_colormap_inv = dict([[v,k] for k,v in meta_colormap.items()])\n meta_colormap_inv = {(k[0],k[1],k[2]):v for k,v in meta_colormap_inv.items()}\n\n mat,cmap = sa.pl.color_list_to_matrix_and_cmap(colors_conversion)\n\n sns.heatmap(\n mat,\n cmap=cmap,\n ax=clust_ax,\n yticklabels=False,\n xticklabels=False,\n cbar=False\n )\n\n [spine.set_visible(True) for _, spine in clust_ax.spines.items()]\n\n clust_ax.yaxis.set_label_position(\"right\")\n clust_ax.set_ylabel(\"Consensus NMF\", rotation=0, va='center', ha='left')\n # --------------cluster annot-------------------\n\n\n # --------------sample annot-------------------\n if cohort_s is not None:\n cdict = {'Low': 'Green', 'Intermediate':'Yellow', 'High': 'Red'}\n order_dict = {'Green': 0, 'Yellow': 1, 'Red': 2}\n\n # Get ordering and samples\n cohort_s = cohort_s.loc[sample_markers.columns]\n\n # Create axis\n cs_ax = fig.add_axes([x0, y1+4*buf, x1*.861, 2*buf])\n cs_ax.set_xticks([])\n cs_ax.set_yticks([])\n\n cbar_cs_ax = fig.add_axes([x0, y1+7*buf, x1*.25, 2*buf])\n\n colors_conversion, meta_colormap = sa.pl.series_to_colors(cohort_s, cdict=cdict)\n meta_colormap_inv = dict([[v,k] for k,v in meta_colormap.items()])\n\n mat,cmap = sa.pl.color_list_to_matrix_and_cmap(colors_conversion, order_dict=order_dict)\n\n sns.heatmap(\n mat,\n cmap=cmap,\n ax=cs_ax,\n yticklabels=False,\n xticklabels=False,\n cbar=True,\n cbar_ax=cbar_cs_ax,\n cbar_kws={\"orientation\": \"horizontal\"}\n )\n\n cb_ticks = [float(t.get_text().replace('−','-')) for t in cbar_cs_ax.get_yticklabels()]\n\n color_value_mapping = dict([[v,k] for k,v in order_dict.items()])\n\n cbar_cs_ax.get_xaxis().set_ticks([])\n\n n_labels = len(list(color_value_mapping.keys()))\n\n # FIX THIS\n vals = [x * ((n_labels)/(n_labels+1)) + 0.5 * ((n_labels)/(n_labels+1)) for x in list(color_value_mapping.keys())]\n #cbar_cs_ax.get_xaxis().set_ticks(vals)\n\n cbar_cs_ax.get_xaxis().set_ticks([0.375, 1, 1.675])\n cbar_cs_ax.get_xaxis().set_ticklabels(list(cdict.keys()))\n cbar_cs_ax.xaxis.set_ticks_position('top')\n\n cbar_cs_ax.set_frame_on(True)\n [spine.set_visible(True) for _, spine in cs_ax.spines.items()]\n\n cs_ax.yaxis.set_label_position(\"right\")\n cs_ax.set_ylabel(\"Risk\", rotation=0, va='center', ha='left')\n\n # --------------sample annot-------------------\n\n # --------------pval barplot-------------------\n p_ax = fig.add_axes([x1+12*buf, y0, 10*buf, y1-y0])\n p_ax.set_yticks([])\n\n log10_pval_adj = -np.log10(signatures.loc[sample_markers.index]['pval_adj'])\n\n p_ax.barh(np.arange(signatures.shape[0]), log10_pval_adj[::-1], edgecolor='black', linewidth=1, color='darkblue')\n plt.margins(y=0)\n p_ax.axvline(1, linewidth=1, color='red')\n\n p_ax.spines['top'].set_visible(False)\n p_ax.spines['right'].set_visible(False)\n p_ax.set_xticks([0,5,10,15,20])\n p_ax.set_xlabel(\"$-log_{10}$ (adj. p-val)\")\n # --------------pval barplot-------------------\n\n ax.set_title('')\n ax.set_xlabel('')\n ax.set_ylabel('')\n\n [spine.set_visible(True) for _, spine in ax.spines.items()]\n\n # Set xticks\n ax.set_xticks([])\n ax.set_xticklabels([])\n ax.set_xticks(np.cumsum(c)-c/2)\n ax.set_xticklabels(v, rotation=360, fontsize=14)\n ax.tick_params(axis='x', which=u'both',length=0)\n\n return fig\n\ndef plot_consensus_matrix(\n cmatrix: pd.DataFrame,\n metric: str = 'euclidean',\n method: str = 'ward',\n n_clusters: int = 10,\n color_thresh_scale: float = 0.3,\n figsize: tuple = (8,8),\n p: int = 30,\n metas: Union[list, None] = None,\n vmax: Union[float, None] = None,\n vmin: Union[float, None] = None,\n cbar_label: str = 'ARD-NMF \\nMembership',\n cmap: Union[str, None] = None,\n plot_cluster_lines: bool = False\n ):\n \"\"\"\n Plot consensus matrix.\n -----------------------\n Args:\n * cmatrix: consensus matrix. This may be generated by calling:\n df, assign_p = consensus_cluster_ardnmf(filepath)\n * metric: distance metric\n * method: method of clustering\n * n_clusters: number of clusters for agglomerative clustering\n * color_thresh_scale: asthetic scale for coloring of dendrogram\n * figsize: figsize\n * p: parameter for dendrogram\n * meta: list of pd.Series that includes a variable of interest to plot\n to left of plot; must be categorical in nature\n Returns:\n * fig\n \"\"\"\n from matplotlib.pyplot import cm\n import matplotlib as mpl\n from scipy.cluster import hierarchy\n import scipy.cluster.hierarchy as shc\n from sklearn.cluster import AgglomerativeClustering\n # -------------\n # Heatmap\n # -------------\n fig,ax = plt.subplots(figsize=figsize)\n cbar_ax = fig.add_axes([ax.get_position().x1 + ax.get_position().x1*0.1, ax.get_position().y0, .025, .1])\n\n # Compute initial linkage to grab ordering\n d_linkage = shc.linkage(cmatrix, metric=metric, method=method)\n dres = shc.dendrogram(d_linkage, p=p, no_plot=True)\n dgram_idx = list(map(int, dres['ivl']))\n\n # Create heatmap\n if vmax is None:\n cbar_top_lim = np.max(cmatrix.values)\n else:\n cbar_top_lim = vmax\n\n if vmin is None:\n cbar_bottom_lim = 0\n else:\n cbar_bottom_lim = vmin\n\n # Create heatmap\n sns.heatmap(\n cmatrix.iloc[dgram_idx,dgram_idx].values,\n ax=ax,\n square=True,\n cbar_ax=cbar_ax,\n cbar_kws = {'ticks':[cbar_bottom_lim, cbar_top_lim]},\n rasterized=True,\n vmax=vmax,\n vmin=vmin,\n cmap=cmap\n )\n\n cbar_ax.set_ylabel(cbar_label, fontsize=10,rotation=90)\n ax.set_xticks([])\n ax.set_yticks([])\n\n x0 = ax.get_position().x0\n x1 = ax.get_position().x1\n y0 = ax.get_position().y0\n y1 = ax.get_position().y1\n\n buf = y1*0.015\n\n # -------------\n # Clustering\n # -------------\n cluster = AgglomerativeClustering(\n n_clusters=n_clusters,\n affinity=metric,\n linkage=method\n )\n\n clusters = cluster.fit_predict(cmatrix.iloc[dgram_idx,dgram_idx])\n cluster_color_list, _ = sa.pl.series_to_colors(pd.Series(clusters), cdict=COLORMAP)\n\n # -------------\n # Dendrogram\n # -------------\n cmap = cm.rainbow(np.linspace(0, 1, 10))\n hierarchy.set_link_color_palette([mpl.colors.rgb2hex(rgb[:3]) for rgb in cmap])\n\n dax = fig.add_axes([x0, y1+buf, x1-x0, 0.15])\n\n dres = shc.dendrogram(\n d_linkage,\n p=p,\n ax=dax,\n above_threshold_color=\"grey\",\n color_threshold=color_thresh_scale*max(d_linkage[:,2])\n )\n\n dax.set_xticks([])\n dax.set_yticks([])\n [dax.spines[x].set_visible(False) for x in ['top','right','bottom','left']]\n\n # -------------\n # Metadata Axes\n # -------------\n if plot_cluster_lines:\n hz_lines = np.sort(np.unique(pd.Series(clusters), return_index=True)[1])\n v,c = np.unique(clusters, return_counts=True)\n\n _c = hz_lines\n _c = np.roll(hz_lines, 1)\n _c[0] = 0\n _c[1] = 0\n\n _ci = hz_lines[1:]\n _ci = np.append(_ci, clusters.shape[0])\n\n for idx, hz in enumerate(hz_lines):\n ax.hlines(hz, _c[idx], _ci[idx], rasterized=True)\n ax.vlines(hz, _c[idx], _ci[idx], rasterized=True)\n\n # Add axes\n # Plots agglomerative clustering results\n if metas is None:\n lax = fig.add_axes([x0-3*buf, y0, 2*buf, y1-y0])\n mat, cmap = sa.pl.color_list_to_matrix_and_cmap(cluster_color_list)\n sns.heatmap(mat.T, cmap=cmap, ax=lax, xticklabels=False, yticklabels=False, cbar=False, rasterized=True)\n\n uniq, idx, num_vals = np.unique(clusters.T, return_index=True, return_counts=True)\n y_locs = idx + num_vals / 2\n\n for idx,u in enumerate(uniq):\n lax.text(x0-50*buf, y_locs[idx], u, ha='center')\n\n for idx,u in enumerate(uniq):\n ax.text(\n mat.shape[1]+0.01*mat.shape[1],\n y_locs[idx],\n \"n={}\".format(num_vals[idx]),\n ha='left',\n fontsize=14\n )\n\n for _, spine in lax.spines.items():\n spine.set_visible(True)\n\n lax.set_xlabel(\"Consensus\", rotation=90)\n\n else:\n for idx,meta in enumerate(metas):\n new_ax = [x0-(idx+3)*buf-(idx*2)*buf, y0, 2*buf, y1-y0]\n lax = fig.add_axes(new_ax)\n\n if isinstance(meta, str) and meta=='aggr':\n mat, cmap = sa.pl.color_list_to_matrix_and_cmap(cluster_color_list)\n sns.heatmap(mat.T, cmap=cmap, ax=lax, xticklabels=False, yticklabels=False, cbar=False, rasterized=True)\n\n uniq, idx, num_vals = np.unique(clusters.T, return_index=True, return_counts=True)\n y_locs = idx + num_vals / 2\n\n for idx,u in enumerate(uniq):\n lax.text(0.5, y_locs[idx], \"C{}\".format(u+1), ha='center', color='white')\n\n for idx,u in enumerate(uniq):\n ax.text(\n mat.shape[1]+0.01*mat.shape[1],\n y_locs[idx],\n \"n={}\".format(num_vals[idx]),\n ha='left',\n fontsize=14\n )\n\n #lax.set_xlabel(\"Consensus\", rotation=90)\n\n else:\n meta = meta.loc[cmatrix.index[dgram_idx]].fillna(0).astype(int)\n cdict={1:'purple',0:'white'}\n\n cluster_color_list, _ = sa.pl.series_to_colors(meta, cdict=cdict)\n mat,cmap = sa.pl.color_list_to_matrix_and_cmap(cluster_color_list)\n sns.heatmap(mat.T, cmap=cmap, ax=lax, yticklabels=False, xticklabels=False, cbar=False)\n lax.set_xlabel(meta.name, rotation=90)\n\n for _, spine in lax.spines.items():\n spine.set_visible(True)\n\n rs = pd.DataFrame(clusters, index=cmatrix.index[dgram_idx]).rename(columns={0:'clusters'})\n\n for _, spine in ax.spines.items():\n spine.set_visible(True)\n\n ax.set_xlabel(\"Samples\", fontsize=14)\n\n return fig, rs\n\ndef plot_marker_heatmap_fig1(\n X: pd.DataFrame,\n signatures: pd.DataFrame,\n order_series: pd.Series,\n signatures_idx: str = 'max_id',\n figsize: tuple = (16,13),\n vmax: float = None,\n vmin: float = None,\n metas: Union[pd.Series,None] = None,\n order_x: Union[pd.Series,None] = None,\n ):\n \"\"\"\n Plot marker map.\n -----------------------------\n Args:\n * X: pd.DataFrame of input sample x feature matrix\n * signatures: pd.DataFrame signatures output;\n this bundles information about the weightings of each feature (ex. gene) and\n what signature they map to\n * order_series: series of samples mapping to subgroups\n index: X.index\n values: subgrouping\n * signatures_idx: string for signature grouping\n * subset_series: a pd.Series with the index as the gene name or ID that\n matches the marker matrix & has a \"Subgroup\" column for labeling\n * diff: difference of loading for called signature vs. rest\n * max_norm: strength of loading for called signature\n * figsize: size of figure\n * cmap: colormap for plot\n * display_y: whether or not to display feature names\n * vmax: colorbar max\n * vmin: colorbar min\n * cohort_s: cohort_series dataframe (added on top of plot)\n * y_hm_label: label of y-axis on heatmap (ex. Genes, Protein LFC, etc.)\n * cbar_hm_label: label of heatmap colorbar\n Returns:\n * plt.Figure\n \"\"\"\n from matplotlib.colors import ListedColormap\n from scipy.cluster import hierarchy\n import scipy.cluster.hierarchy as shc\n from sklearn.cluster import AgglomerativeClustering\n\n cmap = ListedColormap(['white','darkblue'])\n\n # Remove signatures with no marker genes associated\n order_series = order_series[order_series.isin(set(signatures[signatures_idx]))]\n\n # Filter X matrix\n if order_x is None:\n order_series = order_series.sort_values()\n else:\n order_series = order_series.loc[order_x]\n\n sample_markers = X.loc[signatures.index, order_series.sort_values().index]\n\n # Set horizontal lines\n hz_lines = np.unique(sample_markers.join(signatures).loc[:,signatures_idx].values, return_index=True)[1]\n\n # Create figure\n fig, ax = plt.subplots(figsize=figsize)\n\n x0 = ax.get_position().x0\n x1 = ax.get_position().x1\n y0 = ax.get_position().y0\n y1 = ax.get_position().y1\n buf = y1*0.01\n\n sns.heatmap(sample_markers, ax=ax, cmap=cmap, rasterized=True, vmax=vmax, vmin=vmin, cbar=False)\n v,c = np.unique(order_series, return_counts=True)\n\n # plot horizontal lines\n _c = np.cumsum(c)\n _ci = np.roll(_c,2)\n _ci[0] = 0\n _ci[1] = 0\n ax.hlines(hz_lines, _ci, _c, rasterized=True, color='k')\n\n # plot vertical lines\n _h = list(hz_lines)\n _h.append(ax.get_ylim()[0])\n ax.vlines(np.cumsum(c)[:-1], _h[:-2], _h[2:], rasterized=True , color='k')\n ax.vlines(np.cumsum(c)[:-1], *ax.get_ylim(), alpha=0.8, rasterized=True, linewidth=1 , color='lightgrey')\n\n # Set yticks\n ax.yaxis.tick_right()\n ax.set_yticks(np.arange(sample_markers.index.values.shape[0])+0.5)\n ax.set_yticklabels(sample_markers.index.values, fontsize=7.5, rasterized=True, rotation=0, va=\"center\")\n\n # --------------cluster annot-------------------\n for idx,meta in enumerate(metas):\n if meta.unique().shape[0]==2 and meta.dtype=='int64':\n meta = meta.astype(bool)\n cdict={True:'purple',False:'white'}\n elif meta.name =='Consensus':\n cdict=COLORMAP2\n else:\n cdict=None\n\n new_ax = [x0, y1+buf*(idx*3+1), x1*.861, 2*buf]\n clust_ax = fig.add_axes(new_ax)\n\n clust_ax.set_xticks([])\n clust_ax.set_yticks([])\n\n colors_conversion, meta_colormap = sa.pl.series_to_colors(meta.loc[sample_markers.columns],cdict=cdict)\n meta_colormap_inv = dict([[v,k] for k,v in meta_colormap.items()])\n meta_colormap_inv = {(k[0],k[1],k[2]):v for k,v in meta_colormap_inv.items()}\n\n mat,cmap = sa.pl.color_list_to_matrix_and_cmap(colors_conversion)\n\n sns.heatmap(\n mat,\n cmap=cmap,\n ax=clust_ax,\n yticklabels=False,\n xticklabels=False,\n cbar=False\n )\n\n [spine.set_visible(True) for _, spine in clust_ax.spines.items()]\n\n clust_ax.yaxis.set_label_position(\"right\")\n clust_ax.set_ylabel(meta.name, rotation=0, va='center', ha='left')\n # --------------cluster annot-------------------\n\n # --------------pval barplot-------------------\n p_ax = fig.add_axes([x1+12*buf, y0, 10*buf, y1-y0])\n p_ax.set_yticks([])\n\n log10_pval_adj = -np.log10(signatures.loc[sample_markers.index]['pval_adj'])\n\n p_ax.barh(np.arange(signatures.shape[0]), log10_pval_adj[::-1], edgecolor='black', linewidth=1, color='purple')\n plt.margins(y=0)\n p_ax.axvline(1, linewidth=1, color='red')\n\n p_ax.spines['top'].set_visible(False)\n p_ax.spines['right'].set_visible(False)\n p_ax.set_xticks([0,5,10,15,20])\n p_ax.set_xlabel(\"$-log_{10}$ (adj. p-val)\")\n # --------------pval barplot-------------------\n\n ax.set_title('')\n ax.set_xlabel('')\n ax.set_ylabel('')\n\n [spine.set_visible(True) for _, spine in ax.spines.items()]\n\n # Set xticks\n ax.set_xticks([])\n ax.set_xticklabels([])\n ax.set_xticks(np.cumsum(c)-c/2)\n v = [\"{}\\n(n={})\".format(x,y) for x,y in zip(*np.unique(order_series, return_counts=True))]\n ax.set_xticklabels(v, rotation=360, fontsize=12)\n ax.tick_params(axis='x', which=u'both',length=0)\n\n return fig\n\ndef plot_rnaseqc_metrics_grid(metrics: pd.DataFrame, median_exon_cv_cutoff: float, figsize: tuple = (9,9)):\n \"\"\"\n Plot RNASeQC Metrics grid.\n \"\"\"\n def _format_ax(ax):\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n for axis in ['bottom','left',]:\n ax.spines[axis].set_linewidth(1)\n\n # Plot figure\n fig,axes = plt.subplots(2, 2, figsize=figsize, sharey=True)\n cax = fig.add_axes([0.15, 0.6, 0.1, 0.01])\n\n im = axes[0,0].scatter(\n metrics[\"Duplicate Rate of Mapped\"],\n metrics['Genes Detected'],\n s=80,\n alpha=0.8,\n c=np.log(metrics['Unique Mapping, Vendor QC Passed Reads']),\n edgecolor='black',\n linewidth=0.4,\n cmap='coolwarm'\n )\n\n fig.colorbar(im, cax=cax, orientation='horizontal')\n cax.set_title(\"log Unique Mapping\", fontsize=8)\n\n axes[0,0].set_ylabel('Genes Detected',fontsize=16)\n\n _format_ax(axes[0,0])\n\n im = axes[0,1].scatter(\n metrics[\"Median Exon CV\"],\n metrics['Genes Detected'],\n s=80,\n alpha=0.8,\n c=np.log(metrics['Unique Mapping, Vendor QC Passed Reads']),\n edgecolor='black',\n linewidth=0.4,\n cmap='coolwarm'\n )\n _format_ax(axes[0,1])\n\n axes[1,0].set_xlabel('Duplicate Rate',fontsize=16)\n axes[1,1].set_xlabel('Median Exon CV',fontsize=16)\n\n im = axes[1,0].scatter(\n metrics[\"Duplicate Rate of Mapped\"],\n metrics['Genes Detected'],\n s=80,\n alpha=0.8,\n c=metrics['filter'].apply(lambda x: 'lightgrey' if x else 'red'),\n edgecolor='black',\n linewidth=0.4,\n cmap='coolwarm'\n )\n\n axes[1,0].set_ylabel('Genes Detected',fontsize=16)\n\n _format_ax(axes[1,0])\n\n im = axes[1,1].scatter(\n metrics[\"Median Exon CV\"],\n metrics['Genes Detected'],\n s=80,\n alpha=0.8,\n c=metrics['filter'].apply(lambda x: 'lightgrey' if x else 'red'),\n edgecolor='black',\n linewidth=0.4,\n cmap='coolwarm'\n )\n _format_ax(axes[1,1])\n\n axes[1,0].set_xlim(*axes[1,0].get_xlim())\n axes[1,0].set_ylim(*axes[1,0].get_ylim())\n\n axes[1,0].scatter(-1,0,c='red',label='Filter')\n axes[1,0].scatter(-1,0,c='lightgrey',label='Keep')\n axes[1,0].legend(loc='lower left')\n\n axes[1,1].axvspan(.8, axes[1,1].get_xlim()[1], zorder=0, alpha=0.1, color='lightgrey')\n axes[0,1].axvspan(.8, axes[1,1].get_xlim()[1], zorder=0, alpha=0.1, color='lightgrey')\n\n plt.tight_layout()\n"
] | [
[
"pandas.Series",
"numpy.linspace",
"numpy.cumsum",
"pandas.DataFrame",
"numpy.max",
"sklearn.cluster.AgglomerativeClustering",
"numpy.roll",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"numpy.arange",
"scipy.cluster.hierarchy.linkage",
"numpy.log",
"matplotlib.pyplot.margins",
"numpy.append",
"numpy.log10",
"matplotlib.colors.ListedColormap",
"scipy.cluster.hierarchy.dendrogram",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.colors.rgb2hex"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
TeeKay53/Dialogue-systems-for-language-learning | [
"28e226a579b8f22bebf5ec133985d7d86aef6606",
"28e226a579b8f22bebf5ec133985d7d86aef6606"
] | [
"chatbot/models/hier_model.py",
"utils/iterator_utils_test.py"
] | [
"\"\"\"\nA dialogue system meant to be used for language learning.\n\nThis is based on Google Neural Machine Tranlation model\nhttps://github.com/tensorflow/nmt\nwhich is based on Thang Luong's thesis on\nNeural Machine Translation: https://github.com/lmthang/thesis\n\nAnd on the paper Building End-To-End Dialogue Systems\nUsing Generative Hierarchical Neural Network Models:\nhttps://arxiv.org/pdf/1507.04808.pdf\n\nCreated by Tudor Paraschivescu for the Cambridge UROP project\n\"Dialogue systems for language learning\"\n\nThe hierarchical model with dynamic RNN support.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom chatbot.models.base_model import BaseModel\n\nimport utils.misc_utils as utils\nfrom chatbot.models import model_helper\n\nutils.check_tensorflow_version(version=\"1.3.0\")\n\n\nclass HierarchicalModel(BaseModel):\n \"\"\"\n Sequence-to-sequence hierarchical model.\n\n This class implements a multi-layer recurrent neural network as encoder,\n a multi-layer recurrent neural network as a context encoder\n and a multi-layer recurrent neural network decoder.\n \"\"\"\n\n def _build_encoder(self, hparams):\n \"\"\"Build an encoder\"\"\"\n encoder_num_layers = hparams.num_layers\n encoder_num_residual_layers = hparams.num_residual_layers\n\n context_num_layers = hparams.context_num_layers\n context_num_residual_layers = hparams.context_num_residual_layers\n\n iterator = self.iterator\n sources = iterator.source # shape=[batch_size, dialogue_len src_max_len]\n sources = tf.transpose(sources, perm=[1, 0, 2]) # shape=[dialogue_len, batch_size, src_max_len]\n\n sequence_lengths = tf.transpose(iterator.source_sequence_length) # shape=[dialogue_len, batch_size]\n\n with tf.variable_scope(\"encoder\") as encoder_scope:\n dtype = encoder_scope.dtype\n if self.verbose:\n utils.print_out(\" Building encoder cell: num_layers = %d, num_residual_layers=%d\" %\n (encoder_num_layers, encoder_num_residual_layers))\n # Build the encoder cell. Decided to leave the default base gpu\n encoder_cell = self._build_encoder_cell(hparams,\n encoder_num_layers,\n encoder_num_residual_layers)\n if self.verbose:\n utils.print_out(\" Building context cell: num_layers = %d, num_residual_layers=%d\" %\n (encoder_num_layers, encoder_num_residual_layers))\n context_cell = self._build_encoder_cell(hparams,\n context_num_layers,\n context_num_residual_layers)\n max_dialogue_length = tf.shape(sources)[0]\n # Initialize the state using the current batch size\n current_batch_size = tf.shape(sources)[1]\n initial_state = context_cell.zero_state(current_batch_size, dtype=dtype)\n\n # Define the body and the condition for the while loop\n def body(context_state, counter):\n source = tf.gather(sources, counter)\n\n if self.time_major:\n source = tf.transpose(source) # [max_time, batch_size]\n\n seq_len = tf.gather(sequence_lengths, counter, name='get_current_source')\n encoder_emb_inp = tf.nn.embedding_lookup(\n self.embeddings, source)\n # Create RNN. Performs fully dynamic unrolling of inputs\n encoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n cell=encoder_cell,\n inputs=encoder_emb_inp,\n sequence_length=seq_len,\n dtype=dtype,\n time_major=self.time_major,\n )\n # The encoder_state is a tuple. (cell state, memory state), aka (c, h).\n # Use the cell state as input.\n context_input = encoder_state[0]\n\n output, next_state = context_cell(inputs=context_input, state=context_state, scope=\"context\")\n\n return [next_state, tf.add(counter, 1, name='increment_counter')]\n\n def condition(context_state, counter):\n return tf.less(counter, max_dialogue_length, name='condition')\n\n # Initialize the counter\n counter = tf.Variable(0, name='counter', trainable=False, dtype=tf.int32)\n\n # Create the while loop, filling the encoder_states list\n final_context_state, _ = tf.while_loop(cond=condition, body=body,\n loop_vars=[initial_state, counter])\n\n return final_context_state\n\n def _build_decoder_cell(self, hparams, encoder_state):\n \"\"\"Build an RNN cell that can be used by decoder.\"\"\"\n # We only make use of encoder_outputs in attention-based models\n\n\n num_layers = hparams.num_layers\n num_residual_layers = hparams.num_residual_layers\n decoder_cell = model_helper.create_rnn_cell(\n unit_type=hparams.unit_type,\n num_units=hparams.num_units,\n num_layers=num_layers,\n num_residual_layers=num_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n num_gpus=hparams.num_gpus,\n mode=self.mode,\n verbose=self.verbose\n )\n\n # For beam search, we need to replicate encoder infos beam_width times\n if self.mode == tf.contrib.learn.ModeKeys.INFER and hparams.beam_width > 0:\n # Tile them along the batch_size. [batch_size, etc.] to [batch_size * multiplier, etc]\n # by copying each t[i], i in [0, batch_size - 1] 'multiplier' times\n decoder_initial_state = tf.contrib.seq2seq.tile_batch(\n encoder_state, multiplier=hparams.beam_width\n )\n else:\n decoder_initial_state = encoder_state\n\n return decoder_cell, decoder_initial_state\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for iterator_utils.py\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import lookup_ops\n\nfrom utils import iterator_utils\n\n\nclass IteratorUtilsTest(tf.test.TestCase):\n def testGetIterator(self):\n vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([\"a\", \"b\", \"c\", \"eos\", \"sos\"]))\n src_dataset = tf.contrib.data.Dataset.from_tensor_slices(\n tf.constant([\"c c a\", \"c a\", \"d\", \"f e a g\"]))\n tgt_dataset = tf.contrib.data.Dataset.from_tensor_slices(\n tf.constant([\"a b\", \"b c\", \"\", \"c c\"]))\n hparams = tf.contrib.training.HParams(\n random_seed=3,\n num_buckets=5,\n source_reverse=False,\n eos=\"eos\",\n sos=\"sos\")\n batch_size = 2\n src_max_len = 3\n iterator = iterator_utils.get_iterator(\n src_dataset=src_dataset,\n tgt_dataset=tgt_dataset,\n vocab_table=vocab_table,\n batch_size=batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n src_reverse=hparams.source_reverse,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n src_max_len=src_max_len)\n table_initializer = tf.tables_initializer()\n source = iterator.source\n target_input = iterator.target_input\n target_output = iterator.target_output\n src_seq_len = iterator.source_sequence_length\n tgt_seq_len = iterator.target_sequence_length\n self.assertEqual([None, None], source.shape.as_list())\n self.assertEqual([None, None], target_input.shape.as_list())\n self.assertEqual([None, None], target_output.shape.as_list())\n self.assertEqual([None], src_seq_len.shape.as_list())\n self.assertEqual([None], tgt_seq_len.shape.as_list())\n with self.test_session() as sess:\n sess.run(table_initializer)\n sess.run(iterator.initializer)\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[-1, -1, 0], # \"f\" == unknown, \"e\" == unknown, a\n [2, 0, 3]], # c a eos -- eos is padding\n source_v)\n self.assertAllEqual([3, 2], src_len_v)\n self.assertAllEqual(\n [[4, 2, 2], # sos c c\n [4, 1, 2]], # sos b c\n target_input_v)\n self.assertAllEqual(\n [[2, 2, 3], # c c eos\n [1, 2, 3]], # b c eos\n target_output_v)\n self.assertAllEqual([3, 3], tgt_len_v)\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[2, 2, 0]], # c c a\n source_v)\n self.assertAllEqual([3], src_len_v)\n self.assertAllEqual(\n [[4, 0, 1]], # sos a b\n target_input_v)\n self.assertAllEqual(\n [[0, 1, 3]], # a b eos\n target_output_v)\n self.assertAllEqual([3], tgt_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(source)\n\n def testGetIteratorWithSkipCount(self):\n vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([\"a\", \"b\", \"c\", \"eos\", \"sos\"]))\n src_dataset = tf.contrib.data.Dataset.from_tensor_slices(\n tf.constant([\"c c a\", \"c a\", \"d\", \"f e a g\"]))\n tgt_dataset = tf.contrib.data.Dataset.from_tensor_slices(\n tf.constant([\"a b\", \"b c\", \"\", \"c c\"]))\n hparams = tf.contrib.training.HParams(\n random_seed=3,\n num_buckets=5,\n source_reverse=False,\n eos=\"eos\",\n sos=\"sos\")\n batch_size = 2\n src_max_len = 3\n skip_count = tf.placeholder(shape=(), dtype=tf.int64)\n iterator = iterator_utils.get_iterator(\n src_dataset=src_dataset,\n tgt_dataset=tgt_dataset,\n vocab_table=vocab_table,\n batch_size=batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n src_reverse=hparams.source_reverse,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n src_max_len=src_max_len,\n skip_count=skip_count)\n table_initializer = tf.tables_initializer()\n source = iterator.source\n target_input = iterator.target_input\n target_output = iterator.target_output\n src_seq_len = iterator.source_sequence_length\n tgt_seq_len = iterator.target_sequence_length\n self.assertEqual([None, None], source.shape.as_list())\n self.assertEqual([None, None], target_input.shape.as_list())\n self.assertEqual([None, None], target_output.shape.as_list())\n self.assertEqual([None], src_seq_len.shape.as_list())\n self.assertEqual([None], tgt_seq_len.shape.as_list())\n with self.test_session() as sess:\n sess.run(table_initializer)\n sess.run(iterator.initializer, feed_dict={skip_count: 3})\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[-1, -1, 0]], # \"f\" == unknown, \"e\" == unknown, a\n source_v)\n self.assertAllEqual([3], src_len_v)\n self.assertAllEqual(\n [[4, 2, 2]], # sos c c\n target_input_v)\n self.assertAllEqual(\n [[2, 2, 3]], # c c eos\n target_output_v)\n self.assertAllEqual([3], tgt_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(source)\n\n # Re-init iterator with skip_count=0.\n sess.run(iterator.initializer, feed_dict={skip_count: 0})\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[-1, -1, 0], # \"f\" == unknown, \"e\" == unknown, a\n [2, 0, 3]], # c a eos -- eos is padding\n source_v)\n self.assertAllEqual([3, 2], src_len_v)\n self.assertAllEqual(\n [[4, 2, 2], # sos c c\n [4, 1, 2]], # sos b c\n target_input_v)\n self.assertAllEqual(\n [[2, 2, 3], # c c eos\n [1, 2, 3]], # b c eos\n target_output_v)\n self.assertAllEqual([3, 3], tgt_len_v)\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[2, 2, 0]], # c c a\n source_v)\n self.assertAllEqual([3], src_len_v)\n self.assertAllEqual(\n [[4, 0, 1]], # sos a b\n target_input_v)\n self.assertAllEqual(\n [[0, 1, 3]], # a b eos\n target_output_v)\n self.assertAllEqual([3], tgt_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(source)\n\n def testGetInferIterator(self):\n vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([\"a\", \"b\", \"c\", \"eos\", \"sos\"]))\n src_dataset = tf.contrib.data.Dataset.from_tensor_slices(\n tf.constant([\"c c a\", \"c a\", \"d\", \"f e a g\"]))\n hparams = tf.contrib.training.HParams(\n random_seed=3,\n source_reverse=False,\n eos=\"eos\",\n sos=\"sos\")\n batch_size = 2\n src_max_len = 3\n iterator = iterator_utils.get_infer_iterator(\n dataset=src_dataset,\n vocab_table=vocab_table,\n batch_size=batch_size,\n eos=hparams.eos,\n src_reverse=hparams.source_reverse,\n src_max_len=src_max_len)\n table_initializer = tf.tables_initializer()\n source = iterator.source\n seq_len = iterator.source_sequence_length\n self.assertEqual([None, None], source.shape.as_list())\n self.assertEqual([None], seq_len.shape.as_list())\n with self.test_session() as sess:\n sess.run(table_initializer)\n sess.run(iterator.initializer)\n\n (source_v, seq_len_v) = sess.run((source, seq_len))\n self.assertAllEqual(\n [[2, 2, 0], # c c a\n [2, 0, 3]], # c a eos\n source_v)\n self.assertAllEqual([3, 2], seq_len_v)\n\n (source_v, seq_len_v) = sess.run((source, seq_len))\n self.assertAllEqual(\n [[-1, 3, 3], # \"d\" == unknown, eos eos\n [-1, -1, 0]], # \"f\" == unknown, \"e\" == unknown, a\n source_v)\n self.assertAllEqual([1, 3], seq_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run((source, seq_len))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.nn.dynamic_rnn",
"tensorflow.transpose",
"tensorflow.while_loop",
"tensorflow.Variable",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.contrib.seq2seq.tile_batch",
"tensorflow.gather",
"tensorflow.add",
"tensorflow.variable_scope",
"tensorflow.nn.embedding_lookup"
],
[
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.placeholder",
"tensorflow.tables_initializer",
"tensorflow.contrib.training.HParams"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
}
] |
Drkun/ZSTCI | [
"93543ee843dac680c33e34de5d61ba048ef1f6d3",
"93543ee843dac680c33e34de5d61ba048ef1f6d3"
] | [
"models/inception.py",
"losses/DistWeightLoss.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n__all__ = ['Inception3', 'inception_v3']\n\ndef inception_v3(**kwargs):\n r\"\"\"Inception v3 model architecture from\n `\"Rethinking the Inception Architecture for Computer Vision\" <http://arxiv.org/abs/1512.00567>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n # \"\"\"\n\n return Inception3(**kwargs)\n\n\nclass Inception3(nn.Module):\n\n def __init__(self, Embed_dim=512, num_classes=100, dropout=0.5, aux_logits=False, transform_input=False):\n super(Inception3, self).__init__()\n self.aux_logits = aux_logits\n self.transform_input = transform_input\n self.dropout = dropout\n self.Embed_dim = Embed_dim\n self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)\n self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)\n self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)\n self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)\n self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)\n self.Mixed_5b = InceptionA(192, pool_features=32)\n self.Mixed_5c = InceptionA(256, pool_features=64)\n self.Mixed_5d = InceptionA(288, pool_features=64)\n self.Mixed_6a = InceptionB(288)\n self.Mixed_6b = InceptionC(768, channels_7x7=128)\n self.Mixed_6c = InceptionC(768, channels_7x7=160)\n self.Mixed_6d = InceptionC(768, channels_7x7=160)\n self.Mixed_6e = InceptionC(768, channels_7x7=192)\n if aux_logits:\n self.AuxLogits = InceptionAux(768, num_classes)\n self.Mixed_7a = InceptionD(768)\n self.Mixed_7b = InceptionE(1280)\n self.Mixed_7c = InceptionE(2048)\n\n if self.Embed_dim > 0:\n self.Embed = Embedding(2048, self.Embed_dim)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n import scipy.stats as stats\n stddev = m.stddev if hasattr(m, 'stddev') else 0.001\n X = stats.truncnorm(-2, 2, scale=stddev)\n values = torch.Tensor(X.rvs(m.weight.data.numel()))\n m.weight.data.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n if self.transform_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n # 299 x 299 x 3\n x = self.Conv2d_1a_3x3(x)\n # 149 x 149 x 32\n x = self.Conv2d_2a_3x3(x)\n # 147 x 147 x 32\n x = self.Conv2d_2b_3x3(x)\n # 147 x 147 x 64\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 73 x 73 x 64\n x = self.Conv2d_3b_1x1(x)\n # 73 x 73 x 80\n x = self.Conv2d_4a_3x3(x)\n # 71 x 71 x 192\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 35 x 35 x 192\n x = self.Mixed_5b(x)\n # 35 x 35 x 256\n x = self.Mixed_5c(x)\n # 35 x 35 x 288\n x = self.Mixed_5d(x)\n # 35 x 35 x 288\n x = self.Mixed_6a(x)\n # 17 x 17 x 768\n x = self.Mixed_6b(x)\n # 17 x 17 x 768\n x = self.Mixed_6c(x)\n # 17 x 17 x 768\n x = self.Mixed_6d(x)\n # 17 x 17 x 768\n x = self.Mixed_6e(x)\n # 17 x 17 x 768\n if self.training and self.aux_logits:\n aux = self.AuxLogits(x)\n # 17 x 17 x 768\n x = self.Mixed_7a(x)\n # 8 x 8 x 1280\n x = self.Mixed_7b(x)\n # 8 x 8 x 2048\n x = self.Mixed_7c(x)\n # 8 x 8 x 2048\n x = F.adaptive_avg_pool2d(x, output_size=1)\n # 1 x 1 x 2048\n x = F.dropout(x, training=self.training)\n # 1 x 1 x 2048\n x = x.view(x.size(0), -1)\n # 2048\n if self.Embed_dim > 0:\n x = self.Embed(x)\n return x\n\n\nclass Embedding(nn.Module):\n def __init__(self, in_dim, out_dim, dropout=None, normalized=True):\n super(Embedding, self).__init__()\n self.bn = nn.BatchNorm2d(in_dim, eps=0.001)\n self.linear = nn.Linear(in_features=in_dim, out_features=out_dim)\n self.dropout = dropout\n self.normalized = normalized\n\n def forward(self, x):\n x = self.bn(x)\n x = F.relu(x, inplace=True)\n if self.dropout is not None:\n x = nn.Dropout(p=self.dropout)(x, inplace=True)\n x = self.linear(x)\n if self.normalized:\n norm = x.norm(dim=1, p=2, keepdim=True)\n x = x.div(norm.expand_as(x))\n return x\n\n\nclass InceptionA(nn.Module):\n\n def __init__(self, in_channels, pool_features):\n super(InceptionA, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)\n\n self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)\n self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)\n\n self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch5x5 = self.branch5x5_1(x)\n branch5x5 = self.branch5x5_2(branch5x5)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionB(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionB, self).__init__()\n self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)\n\n def forward(self, x):\n branch3x3 = self.branch3x3(x)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)\n\n outputs = [branch3x3, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionC(nn.Module):\n\n def __init__(self, in_channels, channels_7x7):\n super(InceptionC, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)\n\n c7 = channels_7x7\n self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)\n self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))\n\n self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)\n self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))\n\n self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch7x7 = self.branch7x7_1(x)\n branch7x7 = self.branch7x7_2(branch7x7)\n branch7x7 = self.branch7x7_3(branch7x7)\n\n branch7x7dbl = self.branch7x7dbl_1(x)\n branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionD(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionD, self).__init__()\n self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)\n self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)\n\n self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)\n self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)\n\n def forward(self, x):\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = self.branch3x3_2(branch3x3)\n\n branch7x7x3 = self.branch7x7x3_1(x)\n branch7x7x3 = self.branch7x7x3_2(branch7x7x3)\n branch7x7x3 = self.branch7x7x3_3(branch7x7x3)\n branch7x7x3 = self.branch7x7x3_4(branch7x7x3)\n\n branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)\n outputs = [branch3x3, branch7x7x3, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionE(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionE, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)\n\n self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)\n self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))\n self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)\n self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))\n self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))\n\n self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = [\n self.branch3x3_2a(branch3x3),\n self.branch3x3_2b(branch3x3),\n ]\n branch3x3 = torch.cat(branch3x3, 1)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = [\n self.branch3x3dbl_3a(branch3x3dbl),\n self.branch3x3dbl_3b(branch3x3dbl),\n ]\n branch3x3dbl = torch.cat(branch3x3dbl, 1)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionAux(nn.Module):\n\n def __init__(self, in_channels, num_classes):\n super(InceptionAux, self).__init__()\n self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)\n self.conv1 = BasicConv2d(128, 768, kernel_size=5)\n self.conv1.stddev = 0.01\n self.fc = nn.Linear(768, num_classes)\n self.fc.stddev = 0.001\n\n def forward(self, x):\n # 17 x 17 x 768\n x = F.avg_pool2d(x, kernel_size=5, stride=3)\n # 5 x 5 x 768\n x = self.conv0(x)\n # 5 x 5 x 128\n x = self.conv1(x)\n # 1 x 1 x 768\n x = x.view(x.size(0), -1)\n # 768\n x = self.fc(x)\n # 1000\n return x\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\n",
"from __future__ import absolute_import\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\n# import numpy as np\n\n\ndef similarity(inputs_):\n # Compute similarity mat of deep feature\n # n = inputs_.size(0)\n sim = torch.matmul(inputs_, inputs_.t())\n return sim\n\n\nclass DistWeightLoss(nn.Module):\n def __init__(self, margin=0.02):\n super(DistWeightLoss, self).__init__()\n self.margin = margin\n\n def forward(self, inputs, targets):\n n = inputs.size(0)\n # Compute similarity matrix\n sim_mat = similarity(inputs)\n # print(sim_mat)\n targets = targets.cuda()\n # split the positive and negative pairs\n eyes_ = Variable(torch.eye(n, n)).cuda()\n # eyes_ = Variable(torch.eye(n, n))\n pos_mask = targets.expand(n, n).eq(targets.expand(n, n).t())\n neg_mask = eyes_.eq(eyes_) - pos_mask\n pos_mask = pos_mask - eyes_.eq(1)\n\n pos_sim = torch.masked_select(sim_mat, pos_mask)\n neg_sim = torch.masked_select(sim_mat, neg_mask)\n\n num_instances = len(pos_sim)//n + 1\n num_neg_instances = n - num_instances\n\n pos_sim = pos_sim.resize(len(pos_sim)//(num_instances-1), num_instances-1)\n neg_sim = neg_sim.resize(\n len(neg_sim) // num_neg_instances, num_neg_instances)\n\n # clear way to compute the loss first\n loss = list()\n err = 0\n\n for i, pos_pair in enumerate(pos_sim):\n # print(i)\n pos_pair = torch.sort(pos_pair)[0]\n print(pos_pair)\n sampled_index = torch.multinomial(torch.exp(5*pos_pair), 1)\n # print(torch.exp(5*pos_pair))\n # print('sampled pos is : ', sampled_index)\n neg_pair = torch.sort(neg_sim[i])[0]\n pos_min = pos_pair[sampled_index]\n neg_pair = torch.masked_select(neg_pair, neg_pair > pos_min - 0.01)\n # print('neg_pair is :', neg_pair)\n if len(neg_pair) > 0:\n loss.append(torch.mean(neg_pair) - pos_min + 0.01)\n err += 1\n\n if len(loss) == 0:\n loss = 0.0 * (torch.mean(pos_min))\n else:\n loss = torch.sum(torch.cat(loss))/n\n\n prec = 1 - float(err)/n\n neg_d = torch.mean(neg_sim).data[0]\n pos_d = torch.mean(pos_sim).data[0]\n\n return loss, prec, pos_d, neg_d\n\n\ndef main():\n data_size = 32\n input_dim = 3\n output_dim = 2\n num_class = 4\n # margin = 0.5\n x = Variable(torch.rand(data_size, input_dim), requires_grad=False)\n # print(x)\n w = Variable(torch.rand(input_dim, output_dim), requires_grad=True)\n inputs = x.mm(w)\n y_ = 8*list(range(num_class))\n targets = Variable(torch.IntTensor(y_))\n\n print(DistWeightLoss()(inputs, targets))\n\n\nif __name__ == '__main__':\n main()\n print('Congratulations to you!')\n\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"scipy.stats.truncnorm",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.BatchNorm2d",
"torch.nn.functional.max_pool2d"
],
[
"torch.mean",
"torch.cat",
"torch.eye",
"torch.exp",
"torch.rand",
"torch.sort",
"torch.IntTensor",
"torch.masked_select"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ziatdinovmax/sidpy | [
"299147bfc22741b5170aa00e92b34159dfc910c5"
] | [
"sidpy/hdf/dtype_utils.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nUtilities for transforming and validating data types\n\nGiven that many of the data transformations involve copying the data, they should\nideally happen in a lazy manner to avoid memory issues.\n\nCreated on Tue Nov 3 21:14:25 2015\n\n@author: Suhas Somnath, Chris Smith\n\"\"\"\n\nfrom __future__ import division, absolute_import, unicode_literals, print_function\nimport sys\nfrom warnings import warn\nimport h5py\nimport numpy as np\nimport dask.array as da\n\n__all__ = ['flatten_complex_to_real', 'get_compound_sub_dtypes', 'flatten_compound_to_real', 'check_dtype',\n 'stack_real_to_complex', 'validate_dtype', 'is_complex_dtype',\n 'stack_real_to_compound', 'stack_real_to_target_dtype', 'flatten_to_real']\n\nfrom sidpy.hdf.hdf_utils import lazy_load_array\n\nif sys.version_info.major == 3:\n unicode = str\n\n\ndef flatten_complex_to_real(dataset, lazy=False):\n \"\"\"\n Stacks the real values followed by the imaginary values in the last dimension of the given N dimensional matrix.\n Thus a complex matrix of shape (2, 3, 5) will turn into a matrix of shape (2, 3, 10)\n\n Parameters\n ----------\n dataset : array-like or :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`\n Dataset of complex data type\n lazy : bool, optional. Default = False\n If set to True, will use lazy Dask arrays instead of in-memory numpy arrays\n\n Returns\n -------\n retval : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`\n real valued dataset\n \"\"\"\n if not isinstance(dataset, (h5py.Dataset, np.ndarray, da.core.Array)):\n raise TypeError('dataset should either be a h5py.Dataset or numpy / dask array')\n if not is_complex_dtype(dataset.dtype):\n raise TypeError(\"Expected a complex valued dataset\")\n\n if isinstance(dataset, da.core.Array):\n lazy = True\n\n xp = np\n if lazy:\n dataset = lazy_load_array(dataset)\n xp = da\n\n axis = xp.array(dataset).ndim - 1\n if axis == -1:\n return xp.hstack([xp.real(dataset), xp.imag(dataset)])\n else: # along the last axis\n return xp.concatenate([xp.real(dataset), xp.imag(dataset)], axis=axis)\n\n\ndef flatten_compound_to_real(dataset, lazy=False):\n \"\"\"\n Flattens the individual components in a structured array or compound valued hdf5 dataset along the last axis to form\n a real valued array. Thus a compound h5py.Dataset or structured numpy matrix of shape (2, 3, 5) having 3 components\n will turn into a real valued matrix of shape (2, 3, 15), assuming that all the sub-dtypes of the matrix are real\n valued. ie - this function does not handle structured dtypes having complex values\n\n\n Parameters\n ----------\n dataset : :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`\n Numpy array that is a structured array or a :class:`h5py.Dataset` of compound dtype\n lazy : bool, optional. Default = False\n If set to True, will use lazy Dask arrays instead of in-memory numpy arrays\n\n Returns\n -------\n retval : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`\n real valued dataset\n \"\"\"\n if isinstance(dataset, h5py.Dataset):\n if len(dataset.dtype) == 0:\n raise TypeError(\"Expected compound h5py dataset\")\n\n if lazy:\n xp = da\n dataset = lazy_load_array(dataset)\n else:\n xp = np\n warn('HDF5 datasets will be loaded as Dask arrays in the future. ie - kwarg lazy will default to True in future releases of sidpy')\n\n return xp.concatenate([xp.array(dataset[name]) for name in dataset.dtype.names], axis=len(dataset.shape) - 1)\n\n elif isinstance(dataset, (np.ndarray, da.core.Array)):\n if isinstance(dataset, da.core.Array):\n lazy = True\n\n xp = np\n if lazy:\n dataset = lazy_load_array(dataset)\n xp = da\n\n if len(dataset.dtype) == 0:\n raise TypeError(\"Expected structured array\")\n if dataset.ndim > 0:\n return xp.concatenate([dataset[name] for name in dataset.dtype.names], axis=dataset.ndim - 1)\n else:\n return xp.hstack([dataset[name] for name in dataset.dtype.names])\n elif isinstance(dataset, np.void):\n return np.hstack([dataset[name] for name in dataset.dtype.names])\n else:\n raise TypeError('Datatype {} not supported'.format(type(dataset)))\n\n\ndef flatten_to_real(ds_main, lazy=False):\n \"\"\"\n Flattens complex / compound / real valued arrays to real valued arrays\n\n Parameters\n ----------\n ds_main : :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`\n Compound, complex or real valued numpy array or HDF5 dataset\n lazy : bool, optional. Default = False\n If set to True, will use lazy Dask arrays instead of in-memory numpy arrays\n\n Returns\n ----------\n ds_main : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`\n Array raveled to a float data type\n \"\"\"\n if not isinstance(ds_main, (h5py.Dataset, np.ndarray, da.core.Array)):\n ds_main = np.array(ds_main)\n if is_complex_dtype(ds_main.dtype):\n return flatten_complex_to_real(ds_main, lazy=lazy)\n elif len(ds_main.dtype) > 0:\n return flatten_compound_to_real(ds_main, lazy=lazy)\n else:\n return ds_main\n\n\ndef get_compound_sub_dtypes(struct_dtype):\n \"\"\"\n Returns a dictionary of the dtypes of each of the fields in the given structured array dtype\n\n Parameters\n ----------\n struct_dtype : :class:`numpy.dtype`\n dtype of a structured array\n\n Returns\n -------\n dtypes : dict\n Dictionary whose keys are the field names and values are the corresponding dtypes\n \"\"\"\n if not isinstance(struct_dtype, np.dtype):\n raise TypeError('Provided object must be a structured array dtype')\n dtypes = dict()\n for field_name in struct_dtype.fields:\n dtypes[field_name] = struct_dtype.fields[field_name][0]\n return dtypes\n\n\ndef check_dtype(h5_dset):\n \"\"\"\n Checks the datatype of the input HDF5 dataset and provides the appropriate\n function calls to convert it to a float\n\n Parameters\n ----------\n h5_dset : :class:`h5py.Dataset`\n Dataset of interest\n\n Returns\n -------\n func : callable\n function that will convert the dataset to a float\n is_complex : bool\n is the input dataset complex?\n is_compound : bool\n is the input dataset compound?\n n_features : Unsigned int\n Unsigned integer - the length of the 2nd dimension of the data after `func` is called on it\n type_mult : Unsigned int\n multiplier that converts from the typesize of the input :class:`~numpy.dtype` to the\n typesize of the data after func is run on it\n \"\"\"\n if not isinstance(h5_dset, h5py.Dataset):\n raise TypeError('h5_dset should be a h5py.Dataset object')\n is_complex = False\n is_compound = False\n in_dtype = h5_dset.dtype\n # TODO: avoid assuming 2d shape - why does one even need n_samples!? We only care about the last dimension!\n n_features = h5_dset.shape[-1]\n if is_complex_dtype(h5_dset.dtype):\n is_complex = True\n new_dtype = np.real(h5_dset[0, 0]).dtype\n type_mult = new_dtype.itemsize * 2\n func = flatten_complex_to_real\n n_features *= 2\n elif len(h5_dset.dtype) > 0:\n \"\"\"\n Some form of structured numpy is in use\n We only support real scalars for the component types at the current time\n \"\"\"\n is_compound = True\n # TODO: Avoid hard-coding to float32\n new_dtype = np.float32\n type_mult = len(in_dtype) * new_dtype(0).itemsize\n func = flatten_compound_to_real\n n_features *= len(in_dtype)\n else:\n if h5_dset.dtype not in [np.float32, np.float64]:\n new_dtype = np.float32\n else:\n new_dtype = h5_dset.dtype.type\n\n type_mult = new_dtype(0).itemsize\n\n func = new_dtype\n\n return func, is_complex, is_compound, n_features, type_mult\n\n\ndef stack_real_to_complex(ds_real, lazy=False):\n \"\"\"\n Puts the real and imaginary sections of the provided matrix (in the last axis) together to make complex matrix\n\n Parameters\n ------------\n ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array`, or :class:`h5py.Dataset`\n n dimensional real-valued numpy array or HDF5 dataset where data arranged as [instance, 2 x features],\n where the first half of the features are the real component and the\n second half contains the imaginary components\n lazy : bool, optional. Default = False\n If set to True, will use lazy Dask arrays instead of in-memory numpy arrays\n\n Returns\n ----------\n ds_compound : :class:`numpy.ndarray` or :class:`dask.array.core.Array`\n 2D complex array arranged as [sample, features]\n \"\"\"\n if not isinstance(ds_real, (np.ndarray, da.core.Array, h5py.Dataset)):\n if not isinstance(ds_real, (tuple, list)):\n raise TypeError(\"Expected at least an iterable like a list or tuple\")\n ds_real = np.array(ds_real)\n if len(ds_real.dtype) > 0:\n raise TypeError(\"Array cannot have a compound dtype\")\n if is_complex_dtype(ds_real.dtype):\n raise TypeError(\"Array cannot have complex dtype\")\n\n if ds_real.shape[-1] / 2 != ds_real.shape[-1] // 2:\n raise ValueError(\"Last dimension must be even sized\")\n half_point = ds_real.shape[-1] // 2\n\n if isinstance(ds_real, da.core.Array):\n lazy = True\n\n if lazy and not isinstance(ds_real, da.core.Array):\n ds_real = lazy_load_array(ds_real)\n\n return ds_real[..., :half_point] + 1j * ds_real[..., half_point:]\n\n\ndef stack_real_to_compound(ds_real, compound_type, lazy=False):\n \"\"\"\n Converts a real-valued dataset to a compound dataset (along the last axis) of the provided compound d-type\n\n Parameters\n ------------\n ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array`, or :class:`h5py.Dataset`\n n dimensional real-valued numpy array or HDF5 dataset where data arranged as [instance, features]\n compound_type : :class:`numpy.dtype`\n Target complex data-type\n lazy : bool, optional. Default = False\n If set to True, will use lazy Dask arrays instead of in-memory numpy arrays\n\n Returns\n ----------\n ds_compound : :class:`numpy.ndarray` or :class:`dask.array.core.Array`\n N-dimensional complex-valued array arranged as [sample, features]\n \"\"\"\n if lazy or isinstance(ds_real, da.core.Array):\n raise NotImplementedError('Lazy operation not available due to absence of Dask support')\n if not isinstance(ds_real, (np.ndarray, h5py.Dataset)):\n if not isinstance(ds_real, (list, tuple)):\n raise TypeError(\"Expected at least an iterable like a list or tuple\")\n ds_real = np.array(ds_real)\n if len(ds_real.dtype) > 0:\n raise TypeError(\"Array cannot have a compound dtype\")\n elif is_complex_dtype(ds_real.dtype):\n raise TypeError(\"Array cannot have complex dtype\")\n if not isinstance(compound_type, np.dtype):\n raise TypeError('Provided object must be a structured array dtype')\n\n new_spec_length = ds_real.shape[-1] / len(compound_type)\n if new_spec_length % 1:\n raise ValueError('Provided compound type was not compatible by number of elements')\n\n new_spec_length = int(new_spec_length)\n new_shape = list(ds_real.shape) # Make mutable\n new_shape[-1] = new_spec_length\n\n xp = np\n kwargs = {}\n \"\"\"\n if isinstance(ds_real, h5py.Dataset) and not lazy:\n warn('HDF5 datasets will be loaded as Dask arrays in the future. ie - kwarg lazy will default to True in future releases of sidpy')\n if isinstance(ds_real, da.core.Array):\n lazy = True \n if lazy:\n xp = da\n ds_real = lazy_load_array(ds_real)\n kwargs = {'chunks': 'auto'}\n \"\"\"\n\n ds_compound = xp.empty(new_shape, dtype=compound_type, **kwargs)\n for name_ind, name in enumerate(compound_type.names):\n i_start = name_ind * new_spec_length\n i_end = (name_ind + 1) * new_spec_length\n ds_compound[name] = ds_real[..., i_start:i_end]\n\n return ds_compound.squeeze()\n\n\ndef stack_real_to_target_dtype(ds_real, new_dtype, lazy=False):\n \"\"\"\n Transforms real data into the target dtype\n\n Parameters\n ----------\n ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array` or :class:`h5py.Dataset`\n n dimensional real-valued numpy array or HDF5 dataset\n new_dtype : :class:`numpy.dtype`\n Target data-type\n\n Returns\n ----------\n ret_val : :class:`numpy.ndarray` or :class:`dask.array.core.Array`\n N-dimensional array of the target data-type\n \"\"\"\n if is_complex_dtype(new_dtype):\n return stack_real_to_complex(ds_real, lazy=lazy)\n try:\n if len(new_dtype) > 0:\n return stack_real_to_compound(ds_real, new_dtype, lazy=lazy)\n except TypeError:\n return new_dtype(ds_real)\n\n # catching all other cases, such as np.dtype('<f4')\n return new_dtype.type(ds_real)\n\n\ndef validate_dtype(dtype):\n \"\"\"\n Checks the provided object to ensure that it is a valid dtype that can be written to an HDF5 file.\n Raises a type error if invalid. Returns True if the object passed the tests\n\n Parameters\n ----------\n dtype : object\n Object that is hopefully a :class:`h5py.Datatype`, or :class:`numpy.dtype` object\n\n Returns\n -------\n status : bool\n True if the object was a valid data-type\n \"\"\"\n if isinstance(dtype, (h5py.Datatype, np.dtype)):\n pass\n elif isinstance(np.dtype(dtype), np.dtype):\n # This should catch all those instances when dtype is something familiar like - np.float32\n pass\n else:\n raise TypeError('dtype should either be a numpy or h5py dtype')\n return True\n\n\ndef is_complex_dtype(dtype):\n \"\"\"\n Checks if the provided dtype is a complex dtype\n\n Parameters\n ----------\n dtype : object\n Object that is a class:`h5py.Datatype`, or :class:`numpy.dtype` object\n\n Returns\n -------\n is_complex : bool\n True if the dtype was a complex dtype. Else returns False\n \"\"\"\n validate_dtype(dtype)\n if dtype in [np.complex, np.complex64, np.complex128]:\n return True\n return False\n"
] | [
[
"numpy.hstack",
"numpy.real",
"numpy.array",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brandontrabucco/jetpack | [
"aa60488788b2e6fe1d09727943be043158a7af09",
"aa60488788b2e6fe1d09727943be043158a7af09"
] | [
"mineral/algorithms/tuners/entropy_tuner.py",
"mineral/relabelers/reachability_relabeler.py"
] | [
"\"\"\"Author: Brandon Trabucco, Copyright 2019\"\"\"\n\n\nimport tensorflow as tf\nfrom mineral.algorithms.tuners.tuner import Tuner\n\n\nclass EntropyTuner(Tuner):\n\n def __init__(\n self,\n policy,\n **kwargs\n ):\n Tuner.__init__(self, **kwargs)\n self.policy = policy\n\n def update_algorithm(\n self,\n observations,\n actions,\n rewards,\n terminals\n ):\n def loss_function():\n policy_actions = self.policy.get_expected_value(\n observations[:, :(-1), ...])\n policy_entropy = -terminals[:, :(-1)] * self.policy.get_log_probs(\n policy_actions,\n observations[:, :(-1), ...])\n entropy_error = policy_entropy - self.target\n entropy_loss = self.tuning_variable * tf.stop_gradient(entropy_error)\n self.record(\n \"entropy_tuning_variable\",\n self.tuning_variable)\n self.record(\n \"entropy_error_mean\",\n tf.reduce_mean(entropy_error))\n self.record(\n \"entropy_error_max\",\n tf.reduce_max(entropy_error))\n self.record(\n \"entropy_error_min\",\n tf.reduce_min(entropy_error))\n self.record(\n \"entropy\",\n tf.reduce_mean(policy_entropy))\n self.record(\n \"entropy_loss\",\n tf.reduce_mean(entropy_loss))\n return tf.reduce_mean(entropy_loss)\n self.optimizer.minimize(\n loss_function, [self.tuning_variable])\n",
"\"\"\"Author: Brandon Trabucco, Copyright 2019\"\"\"\r\n\r\n\r\nimport tensorflow as tf\r\nfrom mineral.relabelers.relabeler import Relabeler\r\n\r\n\r\nclass ReachabilityRelabeler(Relabeler):\r\n \r\n def __init__(\r\n self,\r\n policy,\r\n *args,\r\n observation_selector=(lambda x: x[\"proprio_observation\"]),\r\n reward_scale=1.0,\r\n **kwargs\r\n ):\r\n Relabeler.__init__(self, *args, **kwargs)\r\n self.policy = policy\r\n self.observation_selector = observation_selector\r\n self.reward_scale = reward_scale\r\n\r\n def relabel(\r\n self,\r\n observations,\r\n actions,\r\n rewards,\r\n terminals\r\n ):\r\n induced_observations = [\r\n self.observation_selector(x)\r\n for x in observations[\"induced_observations\"]]\r\n\r\n cumulative_distances = 0.0\r\n for lower_observation in induced_observations:\r\n error = lower_observation[:, 1:, ...] - actions\r\n cumulative_distances += self.reward_scale * tf.linalg.norm(\r\n tf.reshape(error, [tf.shape(error)[1], tf.shape(error)[1], -1]),\r\n ord=self.order, axis=(-1))\r\n\r\n relabel_condition = self.relabel_probability >= tf.random.uniform(\r\n tf.shape(rewards),\r\n maxval=1.0,\r\n dtype=tf.float32)\r\n\r\n rewards = tf.where(\r\n relabel_condition, -cumulative_distances, rewards)\r\n return (\r\n observations,\r\n actions,\r\n rewards,\r\n terminals)\r\n"
] | [
[
"tensorflow.reduce_max",
"tensorflow.stop_gradient",
"tensorflow.reduce_min",
"tensorflow.reduce_mean"
],
[
"tensorflow.where",
"tensorflow.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
hassanmohsin/chemprop | [
"b6d84f064f682d6ff7fa42e1f231d7467a87105e"
] | [
"biasdb/split.py"
] | [
"import numpy as np\nfrom copy import deepcopy\n\n\n\ndef stratify(data, classes, ratios, one_hot=False):\n \"\"\"Stratifying procedure.\n\n data is a list of lists: a list of labels, for each sample.\n Each sample's labels should be ints, if they are one-hot encoded, use one_hot=True\n \n classes is the list of classes each label can take\n\n ratios is a list, summing to 1, of how the dataset should be split\n\n \"\"\"\n # one-hot decoding\n if one_hot:\n temp = [[] for _ in range(len(data))]\n indexes, values = np.where(np.array(data).astype(int) == 1)\n for k, v in zip(indexes, values):\n temp[k].append(v)\n data = temp\n\n # Organize data per label: for each label l, per_label_data[l] contains the list of samples\n # in data which have this label\n per_label_data = {c: set() for c in classes}\n for i, d in enumerate(data):\n for l in d:\n per_label_data[l].add(i)\n\n # number of samples\n size = len(data)\n\n # In order not to compute lengths each time, they are tracked here.\n subset_sizes = [r * size for r in ratios]\n target_subset_sizes = deepcopy(subset_sizes)\n per_label_subset_sizes = {\n c: [r * len(per_label_data[c]) for r in ratios]\n for c in classes\n }\n\n # For each subset we want, the set of sample-ids which should end up in it\n stratified_data_ids = [set() for _ in range(len(ratios))]\n\n # For each sample in the data set\n while size > 0:\n # Compute |Di|\n lengths = {\n l: len(label_data)\n for l, label_data in per_label_data.items()\n }\n try:\n # Find label of smallest |Di|\n label = min(\n {k: v for k, v in lengths.items() if v > 0}, key=lengths.get\n )\n except ValueError:\n # If the dictionary in `min` is empty we get a Value Error. \n # This can happen if there are unlabeled samples.\n # In this case, `size` would be > 0 but only samples without label would remain.\n # \"No label\" could be a class in itself: it's up to you to format your data accordingly.\n break\n current_length = lengths[label]\n\n # For each sample with label `label`\n while per_label_data[label]:\n # Select such a sample\n current_id = per_label_data[label].pop()\n\n subset_sizes_for_label = per_label_subset_sizes[label]\n # Find argmax clj i.e. subset in greatest need of the current label\n largest_subsets = np.argwhere(\n subset_sizes_for_label == np.amax(subset_sizes_for_label)\n ).flatten()\n\n if len(largest_subsets) == 1:\n subset = largest_subsets[0]\n # If there is more than one such subset, find the one in greatest need\n # of any label\n else:\n largest_subsets = np.argwhere(\n subset_sizes == np.amax(subset_sizes)\n ).flatten()\n if len(largest_subsets) == 1:\n subset = largest_subsets[0]\n else:\n # If there is more than one such subset, choose at random\n subset = np.random.choice(largest_subsets)\n\n # Store the sample's id in the selected subset\n stratified_data_ids[subset].add(current_id)\n\n # There is one fewer sample to distribute\n size -= 1\n # The selected subset needs one fewer sample\n subset_sizes[subset] -= 1\n\n # In the selected subset, there is one more example for each label\n # the current sample has\n for l in data[current_id]:\n per_label_subset_sizes[l][subset] -= 1\n \n # Remove the sample from the dataset, meaning from all per_label dataset created\n for l, label_data in per_label_data.items():\n if current_id in label_data:\n label_data.remove(current_id)\n\n # Create the stratified dataset as a list of subsets, each containing the orginal labels\n stratified_data_ids = [sorted(strat) for strat in stratified_data_ids]\n stratified_data = [\n [data[i] for i in strat] for strat in stratified_data_ids\n ]\n\n # Return both the stratified indexes, to be used to sample the `features` associated with your labels\n # And the stratified labels dataset\n return stratified_data_ids, stratified_data\n"
] | [
[
"numpy.amax",
"numpy.array",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.