repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
Sunshine352/adversarial-robustness-toolbox | [
"070bf751aee40eb1b723fa5e24cde55d17978f62"
] | [
"art/defences/spatial_smoothing_unittest.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport numpy as np\n\nfrom art.defences.spatial_smoothing import SpatialSmoothing\n\nlogger = logging.getLogger('testLogger')\n\n\nclass TestLocalSpatialSmoothing(unittest.TestCase):\n def test_ones(self):\n m, n = 10, 2\n x = np.ones((1, m, n, 3))\n\n # Start to test\n for window_size in range(1, 20):\n preprocess = SpatialSmoothing()\n smoothed_x = preprocess(x, window_size)\n self.assertTrue((smoothed_x == 1).all())\n\n def test_fix(self):\n x = np.array([[[[1], [2], [3]], [[7], [8], [9]], [[4], [5], [6]]]])\n\n # Start to test\n preprocess = SpatialSmoothing()\n smooth_x = preprocess(x, window_size=3)\n self.assertTrue((smooth_x == np.array(\n [[[[2], [3], [3]], [[4], [5], [6]], [[5], [6], [6]]]])).all())\n\n smooth_x = preprocess(x, window_size=1)\n self.assertTrue((smooth_x == x).all())\n\n smooth_x = preprocess(x, window_size=2)\n self.assertTrue((smooth_x == np.array(\n [[[[1], [2], [3]], [[7], [7], [8]], [[7], [7], [8]]]])).all())\n\n def test_channels(self):\n x = np.arange(9).reshape(1, 1, 3, 3)\n preprocess = SpatialSmoothing(channel_index=1)\n smooth_x = preprocess(x)\n\n new_x = np.arange(9).reshape(1, 3, 3, 1)\n preprocess = SpatialSmoothing()\n new_smooth_x = preprocess(new_x)\n\n self.assertTrue((smooth_x[0, 0] == new_smooth_x[0, :, :, 0]).all())\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brokencuph/diff_pd | [
"e491668995a163b8ff7542d99f0b4e0c0f4ed2df",
"e491668995a163b8ff7542d99f0b4e0c0f4ed2df",
"2c30ecfa39762c5fc78dea9c7a226000e9fc5c15",
"2c30ecfa39762c5fc78dea9c7a226000e9fc5c15"
] | [
"python/example/print_quadruped_3d.py",
"python/example/print_bunny_3d.py",
"python/example/rolling_sphere_3d.py",
"python/example/pd_forward.py"
] | [
"import sys\nsys.path.append('../')\n\nfrom pathlib import Path\nimport pickle\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter\nfrom matplotlib.gridspec import GridSpec\nimport numpy as np\n\nfrom py_diff_pd.common.common import print_info, print_error\n\ndef format_axes(fig):\n for i, ax in enumerate(fig.axes):\n ax.text(0.5, 0.5, \"ax%d\" % (i+1), va=\"center\", ha=\"center\")\n ax.tick_params(labelbottom=False, labelleft=False)\n\nif __name__ == '__main__':\n folder = Path('quadruped_3d')\n try:\n data = pickle.load(open(folder / 'data_0008_threads.bin', 'rb'))\n except:\n print_error('Log file not found.')\n loss_l, loss_h = data['loss_range']\n # For this quadruped, loss_l is not 0 but the performance of PD.\n loss_l = data['pd_eigen'][-1]['loss']\n print_info('Loss range: {:3f}, {:3f}'.format(loss_l, loss_h))\n def normalize_loss(unnormalized_loss):\n return (unnormalized_loss - loss_l) / (loss_h - loss_l)\n\n for thread_ct in [8,]:\n data_file = folder / 'data_{:04d}_threads.bin'.format(thread_ct)\n if data_file.exists():\n print_info('Loading {}'.format(data_file))\n data = pickle.load(open(data_file, 'rb'))\n for method in ['newton_pcg', 'newton_cholesky', 'pd_eigen']:\n total_time = 0\n avg_forward = 0\n average_backward = 0\n for d in data[method]:\n d['loss'] = normalize_loss(d['loss'])\n print('loss: {:8.3f}, |grad|: {:8.3f}, forward time: {:6.3f}s, backward time: {:6.3f}s'.format(\n d['loss'], np.linalg.norm(d['grad']), d['forward_time'], d['backward_time']))\n total_time += d['forward_time'] + d['backward_time']\n average_backward += d['backward_time']\n avg_forward += d['forward_time']\n avg_forward /= len(data[method])\n average_backward /= len(data[method])\n print_info('Optimizing with {} finished in {:6.3f}s in {:d} iterations. Average Backward time: {:6.3f}s, Average Forward Time = {:6.3f}s'.format(\n method, total_time, len(data[method]), average_backward, avg_forward))\n\n plt.rc('pdf', fonttype=42)\n plt.rc('font', size=30) # Controls default text sizes.\n plt.rc('axes', titlesize=36) # Fontsize of the axes title.\n plt.rc('axes', labelsize=36) # Fontsize of the x and y labels.\n plt.rc('xtick', labelsize=36) # Fontsize of the tick labels.\n plt.rc('ytick', labelsize=36) # Fontsize of the tick labels.\n plt.rc('legend', fontsize=36) # Legend fontsize.\n plt.rc('figure', titlesize=36) # Fontsize of the figure title.\n\n acts = {}\n losses = {}\n for method in ['newton_pcg', 'newton_cholesky', 'pd_eigen']:\n acts[method] = [np.linalg.norm(d['x']) for d in data[method]]\n losses[method] = [d['loss'] for d in data[method]]\n\n fig = plt.figure(figsize=(20, 10))\n\n ax_act = fig.add_subplot(121)\n\n ax_loss= fig.add_subplot(122)\n\n titles = ['muscle actuation', 'loss']\n for title, ax, y in zip(titles, (ax_act, ax_loss), (acts, losses)):\n\n if 'muscle' in title:\n ax.set_ylabel(\"|actuation|\")\n ax.grid(True, which='both')\n else:\n ax.set_ylabel(\"loss\")\n ax.grid(True)\n ax.set_xlabel('function evaluations')\n for method, method_ref_name, color in zip(['newton_pcg', 'newton_cholesky', 'pd_eigen'],\n ['PCG', 'Cholesky', 'Ours'], ['tab:blue', 'tab:red', 'tab:green']):\n ax.plot(y[method], color=color, label=method_ref_name, linewidth=4)\n ax.set_title(title, pad=25)\n handles, labels = ax.get_legend_handles_labels()\n\n plt.subplots_adjust(bottom = 0.25, wspace=0.3)\n # Share legends.\n fig.legend(handles, labels, loc='lower center', ncol=3)#, bbox_to_anchor=(0.5, 0.17))\n\n fig.savefig(folder / 'quadruped.pdf')\n plt.show()",
"import sys\nsys.path.append('../')\n\nfrom pathlib import Path\nimport pickle\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter\nfrom matplotlib.gridspec import GridSpec\nimport numpy as np\n\nfrom py_diff_pd.common.common import print_info\n\ndef format_axes(fig):\n for i, ax in enumerate(fig.axes):\n ax.text(0.5, 0.5, \"ax%d\" % (i+1), va=\"center\", ha=\"center\")\n ax.tick_params(labelbottom=False, labelleft=False)\n\nif __name__ == '__main__':\n folder = Path('bunny_3d')\n try:\n data = pickle.load(open(folder / 'data_0008_threads.bin', 'rb'))\n except:\n print_error('Log file not found.')\n loss_l, loss_h = data['loss_range']\n print_info('Loss range: {:3f}, {:3f}'.format(loss_l, loss_h))\n def normalize_loss(unnormalized_loss):\n return (unnormalized_loss - loss_l) / (loss_h - loss_l)\n\n for thread_ct in [8,]:\n data_file = Path('bunny_3d') / 'data_{:04d}_threads.bin'.format(thread_ct)\n if data_file.exists():\n print_info('Loading {}'.format(data_file))\n data = pickle.load(open(data_file, 'rb'))\n for method in ['newton_pcg', 'newton_cholesky', 'pd_eigen']:\n total_time = 0\n avg_forward = 0\n average_backward = 0\n for d in data[method]:\n d['loss'] = normalize_loss(d['loss'])\n print('loss: {:8.3f}, |grad|: {:8.3f}, forward time: {:6.3f}s, backward time: {:6.3f}s'.format(\n d['loss'], np.linalg.norm(d['grad']), d['forward_time'], d['backward_time']))\n total_time += d['forward_time'] + d['backward_time']\n average_backward += d['backward_time']\n avg_forward += d['forward_time']\n avg_forward /= len(data[method])\n average_backward /= len(data[method])\n print_info('Optimizing with {} finished in {:6.3f}s in {:d} iterations. Average Backward time: {:6.3f}s, Average Forward Time = {:6.3f}s'.format(\n method, total_time, len(data[method]), average_backward, avg_forward))\n\n plt.rc('pdf', fonttype=42)\n plt.rc('font', size=30) # Controls default text sizes.\n plt.rc('axes', titlesize=24) # Fontsize of the axes title.\n plt.rc('axes', labelsize=24) # Fontsize of the x and y labels.\n plt.rc('xtick', labelsize=22) # Fontsize of the tick labels.\n plt.rc('ytick', labelsize=22) # Fontsize of the tick labels.\n plt.rc('legend', fontsize=20) # Legend fontsize.\n plt.rc('figure', titlesize=16) # Fontsize of the figure title.\n com_qs = {}\n rpys = {}\n com_vs = {}\n losses = {}\n for method in ['newton_pcg', 'newton_cholesky', 'pd_eigen']:\n rpys[method] = [np.linalg.norm(d['x'][:3]) for d in data[method]]\n com_qs[method] = [np.linalg.norm(d['x'][3:6]) for d in data[method]]\n com_vs[method] = [np.linalg.norm(d['x'][6:9]) for d in data[method]]\n losses[method] = [d['loss'] for d in data[method]]\n\n fig = plt.figure(figsize=(18, 13))\n\n ax_com = fig.add_subplot(221)\n ax_com.set_position((0.05, 0.27, 0.20, 0.6))\n\n ax_rpy = fig.add_subplot(222)\n ax_rpy.set_position((0.295, 0.27, 0.20, 0.6))\n\n ax_v = fig.add_subplot(223)\n ax_v.set_position((0.5425, 0.27, 0.20, 0.6))\n\n ax_loss = fig.add_subplot(224)\n ax_loss.set_position((0.795, 0.27, 0.20, 0.6))\n\n titles = ['initial position', 'initial pose', 'initial velocity', 'loss']\n for title, ax, y in zip(titles, (ax_com, ax_rpy, ax_v, ax_loss), (com_qs, rpys, com_vs, losses)):\n if 'position' in title:\n ax.set_ylabel(\"|initial position|\")\n ax.grid(True, which='both')\n ax.set_yticks([0.19, 0.20, 0.21])\n elif 'pose' in title:\n ax.set_ylabel(\"|Euler angles|\")\n ax.grid(True, which='both')\n elif 'velocity' in title:\n ax.set_ylabel(\"|initial velocity|\")\n ax.grid(True, which='both')\n else:\n ax.set_ylabel(\"loss\")\n ax.set_yscale('log')\n ax.grid(True)\n ax.set_xlabel('function evaluations')\n for method, method_ref_name, color in zip(['newton_pcg', 'newton_cholesky', 'pd_eigen'],\n ['PCG', 'Cholesky', 'Ours'], ['tab:blue', 'tab:red', 'tab:green']):\n ax.plot(y[method], color=color, label=method_ref_name, linewidth=4)\n handles, labels = ax.get_legend_handles_labels()\n\n plt.subplots_adjust(wspace=0.3, hspace=0.4)\n # Share legends.\n fig.legend(handles, labels, loc='lower center', ncol=3)#, bbox_to_anchor=(0.5, 0.17))\n\n fig.savefig(folder / 'bunny.pdf')\n plt.show()\n",
"import sys\nsys.path.append('../')\n\nimport os\nfrom pathlib import Path\nimport time\nimport numpy as np\nimport scipy.optimize\nimport pickle\n\nfrom py_diff_pd.common.common import ndarray, create_folder, rpy_to_rotation, rpy_to_rotation_gradient\nfrom py_diff_pd.common.common import print_info, print_ok, print_error, PrettyTabular\nfrom py_diff_pd.common.grad_check import check_gradients\nfrom py_diff_pd.core.py_diff_pd_core import StdRealVector\nfrom py_diff_pd.env.rolling_sphere_env_3d import RollingSphereEnv3d\nfrom py_diff_pd.common.display import export_mp4\n\ndef test_rolling_sphere(verbose):\n seed = 42\n folder = Path('rolling_sphere_3d')\n refinement = 10\n youngs_modulus = 2e6\n poissons_ratio = 0.4\n env = RollingSphereEnv3d(seed, folder, { 'refinement': refinement,\n 'youngs_modulus': youngs_modulus,\n 'poissons_ratio': poissons_ratio })\n deformable = env.deformable()\n\n # Setting thread number.\n thread_cts = [2, 4, 8]\n\n methods = ('newton_pcg', 'newton_cholesky', 'pd_eigen', 'pd_no_acc')\n opts = ({ 'max_newton_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4 },\n { 'max_newton_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4 },\n { 'max_pd_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4,\n 'use_bfgs': 1, 'bfgs_history_size': 10 },\n { 'max_pd_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4,\n 'use_bfgs': 1, 'bfgs_history_size': 10, 'use_acc': 0 })\n\n dt = 5e-3\n frame_num = 100\n\n # Initial state.\n dofs = deformable.dofs()\n act_dofs = deformable.act_dofs()\n q0 = env.default_init_position() + np.random.normal(scale=0.001, size=dofs)\n radius = env.radius()\n pivot = ndarray([radius, radius, 0])\n omega = ndarray([0, 10.0, 0])\n omega_x, omega_y, omega_z = omega\n omega_skewed = ndarray([\n [0, -omega_z, omega_y],\n [omega_z, 0, -omega_x],\n [-omega_y, omega_x, 0]\n ])\n v0 = (q0.reshape((-1, 3)) @ -omega_skewed).ravel()\n a0 = np.zeros(act_dofs)\n f0 = np.zeros(dofs)\n\n # Visualization.\n if verbose:\n for method, opt in zip(methods, opts):\n _, _, info = env.simulate(dt, frame_num, 'pd_eigen' if method == 'pd_no_acc' else method,\n opt, q0, v0, [a0 for _ in range(frame_num)],\n [f0 for _ in range(frame_num)], require_grad=True, vis_folder=method)\n print('{}: forward: {:3.3f}s; backward: {:3.3f}s'.format(method, info['forward_time'], info['backward_time']))\n export_mp4(folder / method, '{}.mp4'.format(str(folder / method)), fps=12)\n\n # Benchmark time.\n print('Reporting time cost. DoFs: {:d}, Contact DoFs: {:d}, frames: {:d}, dt: {:3.3e}'.format(dofs,\n env.contact_dofs(), frame_num, dt))\n rel_tols = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]\n forward_backward_times = {}\n forward_times = {}\n backward_times = {}\n losses = {}\n grads = {}\n for method in methods:\n for thread_ct in thread_cts:\n meth_thread_num = '{}_{}threads'.format(method, thread_ct)\n forward_backward_times[meth_thread_num] = []\n forward_times[meth_thread_num] = []\n backward_times[meth_thread_num] = []\n losses[meth_thread_num] = []\n grads[meth_thread_num] = []\n\n for rel_tol in rel_tols:\n print_info('rel_tol: {:3.3e}'.format(rel_tol))\n tabular = PrettyTabular({\n 'method': '{:^30s}',\n 'forward and backward (s)': '{:3.3f}',\n 'forward only (s)': '{:3.3f}',\n 'loss': '{:3.3f}',\n '|grad|': '{:3.3f}'\n })\n print_info(tabular.head_string())\n\n for method, opt in zip(methods, opts):\n opt['rel_tol'] = rel_tol\n for thread_ct in thread_cts:\n opt['thread_ct'] = thread_ct\n meth_thread_num = '{}_{}threads'.format(method, thread_ct)\n loss, grad, info = env.simulate(dt, frame_num, 'pd_eigen' if method == 'pd_no_acc' else method,\n opt, q0, v0, [a0 for _ in range(frame_num)],\n [f0 for _ in range(frame_num)], require_grad=True, vis_folder=None)\n grad_q, grad_v, grad_a, grad_f = grad\n grad = np.zeros(q0.size + v0.size + a0.size + f0.size)\n grad[:dofs] = grad_q\n grad[dofs:2 * dofs] = grad_v\n grad[2 * dofs:2 * dofs + act_dofs] = np.sum(ndarray(grad_a), axis=0)\n grad[2 * dofs + act_dofs:] = np.sum(ndarray(grad_f), axis=0)\n l, g, forward_time, backward_time = loss, grad, info['forward_time'], info['backward_time']\n print(tabular.row_string({\n 'method': meth_thread_num,\n 'forward and backward (s)': forward_time + backward_time,\n 'forward only (s)': forward_time,\n 'loss': l,\n '|grad|': np.linalg.norm(g) }))\n forward_backward_times[meth_thread_num].append(forward_time + backward_time)\n forward_times[meth_thread_num].append(forward_time)\n backward_times[meth_thread_num].append(backward_time)\n losses[meth_thread_num].append(l)\n grads[meth_thread_num].append(g)\n pickle.dump((rel_tols, forward_times, backward_times, losses, grads), open(folder / 'table.bin', 'wb'))\n\nif __name__ == '__main__':\n verbose = True\n test_rolling_sphere(verbose)",
"import sys\nsys.path.append('../')\n\nimport os\nfrom pathlib import Path\nimport time\nimport scipy.optimize\nimport numpy as np\n\nfrom py_diff_pd.core.py_diff_pd_core import QuadMesh2d, QuadDeformable, StdRealVector\nfrom py_diff_pd.common.common import create_folder, ndarray, print_info, print_error, print_ok\nfrom py_diff_pd.env.cantilever_env_2d import CantileverEnv2d\nfrom py_diff_pd.env.circle_env_2d import CircleEnv2d\n\ndef test_pd_forward(verbose):\n seed = 42\n folder = Path('pd_forward')\n\n def test_env(env_class_name):\n env = env_class_name(seed, folder, { 'refinement': 6 })\n\n methods = ['newton_pcg', 'pd_eigen']\n opts = [{ 'max_newton_iter': 100, 'max_ls_iter': 10, 'abs_tol': 1e-4, 'rel_tol': 1e-6, 'verbose': 0, 'thread_ct': 4 },\n { 'max_pd_iter': 100, 'max_ls_iter': 10, 'abs_tol': 1e-4, 'rel_tol': 1e-6, 'verbose': 0, 'thread_ct': 4,\n 'use_bfgs': 1, 'bfgs_history_size': 10 }]\n # Check if Pardiso is available\n pardiso_available = 'PARDISO_LIC_PATH' in os.environ\n if pardiso_available:\n methods.append('pd_pardiso')\n opts.append(opts[-1])\n\n # Forward simulation.\n dt = 0.01\n frame_num = 50\n deformable = env.deformable()\n dofs = deformable.dofs()\n act_dofs = deformable.act_dofs()\n a0 = np.random.uniform(size=(frame_num, act_dofs))\n q = {}\n for method, opt in zip(methods, opts):\n loss, info = env.simulate(dt, frame_num, method, opt, act=a0, vis_folder=method if verbose else None)\n if verbose:\n print_info('{} finishes in {:3.3f} seconds. Loss: {:3.3f}'.format(method, info['forward_time'], loss))\n q[method] = info['q']\n\n # Compare results.\n atol = 1e-4\n rtol = 5e-3\n for qn, qp in zip(q['newton_pcg'], q['pd_eigen']):\n state_equal = np.linalg.norm(qn - qp) < rtol * np.linalg.norm(qn) + atol\n if not state_equal:\n if verbose:\n print_error(np.linalg.norm(qn - qp), np.linalg.norm(qn))\n return False\n\n if pardiso_available:\n for qn, qp in zip(q['newton_pcg'], q['pd_pardiso']):\n state_equal = np.linalg.norm(qn - qp) < rtol * np.linalg.norm(qn) + atol\n if not state_equal:\n if verbose:\n print_error(np.linalg.norm(qn - qp), np.linalg.norm(qn))\n return False\n\n # Visualize results.\n if verbose:\n print_info('PD and Newton solutions are the same.')\n for method in methods:\n print_info('Showing {} gif...'.format(method))\n os.system('eog {}/{}.gif'.format(folder, method))\n\n return True\n\n if not test_env(CantileverEnv2d): return False\n if not test_env(CircleEnv2d): return False\n return True\n\nif __name__ == '__main__':\n verbose = True\n test_pd_forward(verbose)"
] | [
[
"matplotlib.pyplot.rc",
"numpy.linalg.norm",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.rc",
"numpy.linalg.norm",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.random.normal",
"numpy.zeros",
"numpy.linalg.norm"
],
[
"numpy.random.uniform",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weihs/hashtag_co-occurrence_network | [
"b6aaa664d4cd42fd7dfd2d2dc2350568e0c6b08e"
] | [
"inconsistency_graph_construction.py"
] | [
"import json\nimport copy\nimport yaml\nimport sys\nimport numpy as np\nimport networkx as nx\nfrom scipy import linalg\n\n\ndef merge_cooccurrence_matrix(number_of_days, origin_directory,result_directory,origin_prefix,result_filename):\n postfix='.npy'\n for i in range(1,1+number_of_days):#build combine co_occurrence matrix\n filename=origin_directory+origin_prefix+str(i)+postfix\n if i==1:\n combine_matrix=np.load(filename)\n else:\n new_matrix=np.load(filename)\n combine_matrix=linalg.block_diag(combine_matrix,new_matrix)\n\n result_file=result_directory+result_filename\n np.save(result_file,combine_matrix)\n return combine_matrix\n\n\ndef construct_graphml(number_of_days,combine_matrix,origin_directory,origin_prefix,hashtag_frequency_prefix):\n G=nx.from_numpy_matrix(combine_matrix)\n prenode=0\n for i in range(1,1+number_of_days):#add node attributes\n daily_matrix_filename=origin_directory+origin_prefix+str(i)+'.npy'#get the number of hashtag\n matrix=np.load(daily_matrix_filename)\n number_of_hashtag=matrix.shape[0]\n\n filename=origin_directory+hashtag_frequency_prefix+str(i)+'.json'#construct graph and set node attributes\n with open(filename, mode='r') as f:\n hashtag_frequency=json.load(f)\n for j in range(number_of_hashtag):\n G.node[prenode+j]['text']=hashtag_frequency[j]['_id']\n G.node[prenode+j]['frequency']=hashtag_frequency[j]['frequency']\n G.node[prenode+j]['timeinterval']=i\n prenode+=j+1\n\n\n for v in G.nodes():#connect the same node in two closet period\n text=G.node[v]['text']\n same_text_nodelist=[u for u in G.nodes() if G.node[u]['text']==text and u>v]\n if len(same_text_nodelist)==0:\n continue\n else:\n u=min(same_text_nodelist)\n G.add_edge(u,v)\n G.edge[u][v]['type']=1\n G.edge[u][v]['weight']=10\n for u,v in G.edges():# set type attributes for vertical edges and remove self-loop\n if 'type' not in G.edge[u][v]:\n G.edge[u][v]['type']=0\n if u==v:\n G.remove_edge(u,v)\n return G\n\nwith open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\nnumber_of_days=cfg['number_of_days']\ndata_directory=cfg['data_directory']\n\nif sys.argv[1]=='without_aggregation':\n origin_prefix=cfg['origin_aggregation_matrix']\n hashtag_frequency_prefix=cfg['origin_aggregation_list']\n graphml_filename=data_directory+str(number_of_days)+cfg['without_aggregation_graphml_filename']\n result_filename=cfg['without_aggregation_combine_matrix']\nelse:\n origin_prefix=cfg['result_aggregation_matrix']\n hashtag_frequency_prefix=cfg['result_aggregation_list']\n graphml_filename=data_directory+str(number_of_days)+cfg['with_aggregation_graphml_filename']\n result_filename=cfg['with_aggregation_combine_matrix']\n\n\n\n\ncombine_matrix=merge_cooccurrence_matrix(number_of_days, data_directory, data_directory, origin_prefix, result_filename)\nG=construct_graphml(number_of_days, combine_matrix, data_directory,origin_prefix,hashtag_frequency_prefix)\n\nwith open(graphml_filename,mode='w') as f:\n nx.write_graphml(G,f)\n"
] | [
[
"numpy.load",
"scipy.linalg.block_diag",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
lbaiao/sys-simulator-2 | [
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6",
"94f00d43309fe7b56dac5099bd4024695ba317b6"
] | [
"scripts_a2c/script5.py",
"scripts_dql/script44.py",
"sys_simulator/sinr/sinr.py",
"scripts_a2c/evaluate3.py",
"scripts_gym/script7_windows.py",
"plot_scripts/channels.py",
"sys_simulator/q_learning/environments/completeEnvironment5dB.py",
"data/benchmarks/script1/20210205-134052/script1.py",
"data/rainbow/script1/20210217-121832/script1.py",
"scripts_a2c/script15.py",
"tests/test_channel.py",
"scratch_dql/scratch8.py",
"scripts_dql/script17.py",
"scratch_dql/scratch2_3.py"
] | [
"# Similar to script 1 but with discrete-value actions.\n# It uses CompleteEnvironmentA2C2\n\nfrom sys_simulator import general as gen\nfrom sys_simulator.q_learning.environments.completeEnvironmentA2C2 \\\n import CompleteEnvironmentA2C2\nfrom sys_simulator.q_learning.rewards import dis_reward_tensor\nfrom sys_simulator.parameters.parameters import EnvironmentParameters\nfrom sys_simulator.a2c.agent import Agent\nfrom sys_simulator.a2c import ActorCriticDiscrete, compute_gae_returns\nfrom torch import optim, nn\nimport torch\nimport os\nimport pickle\nimport random\n# from copy import deepcopy\n\n\ndef run():\n # environment physical parameters\n n_mues = 1 # number of mues\n n_d2d = 2 # number of d2d pairs\n n_rb = n_mues # number of RBs\n bs_radius = 500 # bs radius in m\n rb_bandwidth = 180*1e3 # rb bandwidth in Hz\n d2d_pair_distance = 50 # d2d pair distance in m\n p_max = 23 # max tx power in dBm\n noise_power = -116 # noise power per RB in dBm\n bs_gain = 17 # macro bs antenna gain in dBi\n user_gain = 4 # user antenna gain in dBi\n sinr_threshold_train = 6 # mue sinr threshold in dB for training\n mue_margin = .5e4\n # conversions from dB to pow\n p_max = p_max - 30\n p_max = gen.db_to_power(p_max)\n noise_power = noise_power - 30\n noise_power = gen.db_to_power(noise_power)\n bs_gain = gen.db_to_power(bs_gain)\n user_gain = gen.db_to_power(user_gain)\n sinr_threshold_train = gen.db_to_power(sinr_threshold_train)\n # ai training parameters\n STEPS_PER_EPISODE = 20\n MAX_NUM_EPISODES = 2700 * 1\n # STEPS_PER_EPISODE = 10\n # MAX_NUM_EPISODES = 2\n # C = 8000 # C constant for the improved reward function\n C = 80 # C constant for the improved reward function\n MAX_NUMBER_OF_AGENTS = 10\n NUM_ACTIONS = 5\n HIDDEN_SIZE = 256\n LEARNING_RATE = 3e-2\n BETA = 1e-2\n # mu = 0.82*p_max/5/2000\n # std = mu/6\n mu = 0\n std = 0.1\n # torch device\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # parameters classes initialization\n env_params = EnvironmentParameters(\n rb_bandwidth, d2d_pair_distance, p_max, noise_power,\n bs_gain, user_gain, sinr_threshold_train,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin)\n # environment initialization\n reward_function = dis_reward_tensor\n environment = CompleteEnvironmentA2C2(env_params, reward_function)\n # a2c initialization\n a2c = ActorCriticDiscrete(environment.state_space_size,\n NUM_ACTIONS, HIDDEN_SIZE, mu, std)\n actor_optimizer = optim.Adam(a2c.actor.parameters(), lr=LEARNING_RATE)\n critic_optimizer = optim.Adam(a2c.critic.parameters(), lr=LEARNING_RATE)\n # training loop\n episode = 0\n d2d_spectral_effs = []\n mue_spectral_effs = []\n actions = [i*0.82*p_max/5/1000 for i in range(NUM_ACTIONS)] # best result\n while episode < MAX_NUM_EPISODES:\n # entropy = 0\n aux_range = range(MAX_NUMBER_OF_AGENTS+1)[1:]\n n_agents = random.choice(aux_range)\n agents = [Agent() for _ in range(n_agents)]\n environment.build_scenario(agents)\n obs = [environment.get_state(a) for a in agents]\n log_probs = torch.zeros((n_agents, STEPS_PER_EPISODE)).to(device)\n values = torch.zeros((n_agents, STEPS_PER_EPISODE+1)).to(device)\n rewards = torch.zeros((n_agents, STEPS_PER_EPISODE)).to(device)\n entropy = torch.zeros((n_agents, STEPS_PER_EPISODE)).to(device)\n i = 0\n done = False\n # actions = [] # used for debug purposes\n while not done and i < STEPS_PER_EPISODE:\n # agents choose their actions\n # actions_t = [] # used for debug purposes\n for j, agent in enumerate(agents):\n action_index, dist, value = agent.act_discrete(a2c, obs[j])\n agent.set_action(actions[action_index.item()])\n # actions_t.append(action) # used for debug purposes\n log_prob = dist.log_prob(action_index)\n # entropy += dist.entropy().mean()\n log_probs[j][i] = log_prob\n values[j][i] = value\n entropy[j][i] = dist.entropy()\n # perform a environment step\n next_obs_t, rewards_t, done = environment.step(agents)\n rewards[:, i] = torch.FloatTensor(rewards_t)\n # actions.append(actions_t) # used for debug purposes\n i += 1\n # last_states = deepcopy(obs) # used for debug purposes\n obs = next_obs_t\n # gae and returns\n next_obs_t = torch.cat(obs, 0).to(device)\n for j, agent in enumerate(agents):\n _, _, next_value_t = agents[0].act(a2c, next_obs_t[j])\n values[j][i] = next_value_t\n advantages, returns = compute_gae_returns(device, rewards, values)\n # update critic\n values_critic = values[:, :-1].reshape(1, -1).to(device)\n returns_critic = returns.view(1, -1).to(device)\n critic_loss = nn.functional.mse_loss(values_critic, returns_critic)\n critic_optimizer.zero_grad()\n critic_loss.backward()\n critic_optimizer.step()\n # update actor\n aux = torch.mul(advantages, log_probs)\n aux -= BETA * entropy\n aux = torch.sum(aux, axis=1)\n actor_loss = -torch.mean(aux)\n actor_optimizer.zero_grad()\n actor_loss.backward()\n actor_optimizer.step()\n # print training info\n episode += 1\n m_reward = torch.mean(rewards).item()\n d2d_spectral_effs.append(environment.d2d_spectral_eff)\n mue_spectral_effs.append(environment.mue_spectral_eff)\n print(\"Episode#:{} mean reward:{}\".format(\n episode, m_reward))\n # save training data into a file\n cwd = os.getcwd()\n data = {}\n data['d2d_spectral_effs'] = d2d_spectral_effs\n data['mue_spectral_effs'] = mue_spectral_effs\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n filename_model = filename\n filename = f'{cwd}/data/a2c/{filename}.pickle'\n # save the a2c models\n torch.save(\n a2c.state_dict(),\n f'{cwd}/models/a2c/{filename_model}.pt')\n with open(filename, 'wb') as f:\n pickle.dump(data, f)\n",
"# Similar to script 36\n# Uses CompleteEnvironment10dB.\n# Single episode convergence. Everything is in dB.\n# Central DQN controls all agents.\nfrom copy import deepcopy\nfrom itertools import product\nfrom sys_simulator.general import db_to_power, power_to_db\nfrom sys_simulator.plots import plot_positions_actions_pie\nfrom sys_simulator.channels import BANChannel, UrbanMacroLOSWinnerChannel\nfrom sys_simulator import general as gen\nfrom sys_simulator.q_learning.environments.completeEnvironment10dB \\\n import CompleteEnvironment10dB\nfrom sys_simulator.dqn.agents.dqnAgent import CentralDQNAgent, ExternalDQNAgent\nfrom sys_simulator.dqn.externalDQNFramework import ExternalDQNFramework\nfrom sys_simulator.parameters.parameters import \\\n EnvironmentParameters, TrainingParameters, DQNAgentParameters\nfrom sys_simulator.q_learning.rewards import dis_reward_tensor_db\nimport torch\nimport numpy as np\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\n\n\nn_mues = 1 # number of mues\nn_d2d = 2 # number of d2d pairs\nn_rb = n_mues # number of RBs\ncarrier_frequency = 2.4 # carrier frequency in GHz\nbs_radius = 500 # bs radius in m\nrb_bandwidth = 180*1e3 # rb bandwidth in Hz\nd2d_pair_distance = 50 # d2d pair distance in m\ndevice_height = 1.5 # mobile devices height in m\nbs_height = 25 # BS antenna height in m\np_max = 40 # max tx power in dBm\nnoise_power = -116 # noise power per RB in dBm\nbs_gain = 17 # macro bs antenna gain in dBi\nuser_gain = 4 # user antenna gain in dBi\nsinr_threshold_train = 6 # mue sinr threshold in dB for training\nmue_margin = 200 # mue margin in dB\n# conversions from dBm to dB\np_max = p_max - 30\nnoise_power = noise_power - 30\n# channel parameters\nCHANNEL_RND = False\n# q-learning parameters\n# training\nNUMBER = 1\nSTEPS_PER_EPISODE = 4000\n# STEPS_PER_EPISODE = 10\nTEST_STEPS_PER_EPISODE = 200\n# common\nEPSILON_INITIAL = 1\nEPSILON_MIN = .05\nEPSILON_DECAY = 1.1 / STEPS_PER_EPISODE # fast training\nGAMMA = 0.5 # Discount factor\nC = 8 # C constant for the improved reward function\nTARGET_UPDATE = 10\nMAX_NUMBER_OF_AGENTS = 4\nREPLAY_MEMORY_SIZE = 10000\nBATCH_SIZE = 128\nHIDDEN_SIZE = 256\nNUM_HIDDEN_LAYERS = 2\nLEARNING_RATE = 1e-2\nREWARD_PENALTY = 1.5\nENVIRONMENT_MEMORY = 2\nmax_d2d = MAX_NUMBER_OF_AGENTS\n# more parameters\n# linear discretization\n# actions = power_to_db(np.linspace(\n# db_to_power(p_max-20), db_to_power(p_max-10), 10\n# ))\n# db discretization\nNUM_ACTIONS = 10\nactions = power_to_db(\n np.linspace(\n 1e-6, db_to_power(p_max-10), NUM_ACTIONS\n )\n)\nenv_params = EnvironmentParameters(\n rb_bandwidth, d2d_pair_distance, p_max, noise_power,\n bs_gain, user_gain, sinr_threshold_train,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin\n)\nparams = TrainingParameters(1, STEPS_PER_EPISODE)\nagent_params = DQNAgentParameters(\n EPSILON_MIN, EPSILON_DECAY, EPSILON_INITIAL, REPLAY_MEMORY_SIZE,\n BATCH_SIZE, GAMMA\n)\nreward_function = dis_reward_tensor_db\nchannel_to_devices = BANChannel(rnd=CHANNEL_RND)\nchannel_to_bs = UrbanMacroLOSWinnerChannel(\n rnd=CHANNEL_RND, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height\n)\nref_env = CompleteEnvironment10dB(\n env_params,\n reward_function,\n channel_to_bs,\n channel_to_devices,\n reward_penalty=REWARD_PENALTY,\n memory=ENVIRONMENT_MEMORY,\n bs_height=bs_height\n)\nfoo_agents = [ExternalDQNAgent(agent_params, [1]) for a in range(4)]\nfoo_env = deepcopy(ref_env)\nfoo_env.build_scenario(foo_agents)\n_, _ = foo_env.step(foo_agents)\nenv_state_size = foo_env.get_state_size(foo_agents[0])\npairs_positions = [\n ((-400, 0, device_height), (-450, 0, device_height)),\n ((100, 0, device_height), (150, 0, device_height)),\n ((225, 225, device_height), (275, 225, device_height)),\n ((55, -55, device_height), (55, -5, device_height)),\n]\nmue_position = (0, 200, device_height)\nn_agents = len(pairs_positions)\nactions_tuples = \\\n list(product(range(NUM_ACTIONS), repeat=n_agents))\nframework = ExternalDQNFramework(\n agent_params,\n env_state_size * n_agents,\n len(actions_tuples),\n HIDDEN_SIZE,\n NUM_HIDDEN_LAYERS,\n LEARNING_RATE\n)\n\n\ndef calculate_interferences(env: CompleteEnvironment10dB):\n bs = env.bs\n mue = env.mue\n d2d_pairs = env.d2d_pairs\n txs = [mue]\n txs += [p[0] for p in d2d_pairs]\n rxs = [bs]\n rxs += [p[1] for p in d2d_pairs]\n interferences = np.zeros((len(txs), len(rxs)))\n for i, tx in enumerate(txs):\n for j, (rx, interfered) in enumerate(zip(rxs, txs)):\n if tx == interfered:\n interf = tx.power_at_receiver\n elif tx == mue:\n interf = interfered.received_mue_interference\n elif rx == bs:\n interf = tx.caused_mue_interference\n else:\n interf = [\n power_to_db(i[1]) for i in interfered.interferences\n if i[0] == tx.id\n ][0]\n interferences[i][j] = interf\n tx_labels = [d.id for d in txs]\n rx_labels = [d.id for d in rxs]\n return interferences, tx_labels, rx_labels\n\n\ndef train():\n global actions\n env = deepcopy(ref_env)\n best_reward = float('-inf')\n mue_spectral_eff_bag = list()\n d2d_spectral_eff_bag = list()\n rewards_bag = list()\n # aux_range = range(max_d2d+1)[1:]\n epsilon = agent_params.start_epsilon\n # n_agents = np.random.choice(aux_range)\n agents = [ExternalDQNAgent(agent_params, actions)\n for _ in range(n_agents)] # 1 agent per d2d tx\n central_agent = CentralDQNAgent(agent_params, actions, n_agents)\n central_agent.set_epsilon(epsilon)\n env.set_scenario(pairs_positions, mue_position, agents)\n obs_aux, _ = env.step(agents)\n obs = torch.cat(obs_aux).view(1, -1).float()\n # env.build_scenario(agents)\n # obs = [env.get_state(a).float() for a in agents]\n reward = 0.0\n i = 0\n bag = list()\n while True:\n if i >= params.steps_per_episode:\n break\n else:\n tuple_index = central_agent.get_action(framework, obs).item()\n action_tuple = actions_tuples[tuple_index]\n for j, agent in enumerate(agents):\n agent.set_action(action_tuple[j], actions[action_tuple[j]])\n # # debugging\n # if len(agents) == 2:\n # print('debugging')\n next_obs_aux, rewards = env.step(agents)\n next_obs = torch.cat(next_obs_aux).view(1, -1).float()\n reward = np.sum(rewards)\n i += 1\n framework.replay_memory.push(\n obs, tuple_index, next_obs, reward\n )\n framework.learn()\n bag.append(reward)\n obs = next_obs\n if i % TARGET_UPDATE == 0:\n framework.target_net.load_state_dict(\n framework.policy_net.state_dict()\n )\n if reward > best_reward:\n best_reward = reward\n print(\"Step#:{} sum reward:{} best_sum_reward:{} eps:{}\".format(\n i, reward, best_reward, central_agent.epsilon)\n )\n # mue spectral eff\n mue_spectral_eff_bag.append(\n (env.mue_spectral_eff, n_agents)\n )\n # average d2d spectral eff\n d2d_spectral_eff_bag.append(\n (env.d2d_spectral_eff/n_agents, n_agents)\n )\n rewards_bag.append(env.reward)\n epsilon = central_agent.epsilon\n # Return the trained policy\n mue_spectral_effs = mue_spectral_eff_bag\n d2d_spectral_effs = d2d_spectral_eff_bag\n spectral_effs = zip(mue_spectral_effs, d2d_spectral_effs)\n avg_q_values = framework.bag\n # # saving the data and the model\n cwd = os.getcwd()\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n filename_model = filename\n filename = f'{cwd}/data/dql/{filename}_training.pt'\n torch.save(framework.policy_net.state_dict(),\n f'{cwd}/models/dql/{filename_model}.pt')\n torch.save(spectral_effs, filename)\n with open(\n f'{cwd}/data/dql/{filename_model}_avg_q_values.pickle',\n 'wb'\n ) as p_file:\n pickle.dump(avg_q_values, p_file)\n with open(\n f'{cwd}/data/dql/{filename_model}_rewards.pickle',\n 'wb'\n ) as p_file:\n pickle.dump(rewards_bag, p_file)\n\n\ndef print_stuff(actions, env: CompleteEnvironment10dB):\n actions = [f'{i:.2f}' for i in actions]\n sinr_d2ds = [f'{d[0].sinr:.2f}' for d in env.d2d_pairs]\n print(f'MUE Tx Power [dBW]: {env.mue.tx_power:.2f}')\n print(f'D2D Power levels [dBW]: {actions}')\n print(f'D2D SINR [dB]: {sinr_d2ds}')\n print(f'D2D Spectral Efficiencies: {env.d2d_spectral_eff}')\n\n\ndef test():\n env = deepcopy(ref_env)\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n framework.policy_net.load_state_dict(\n torch.load(f'models/dql/{filename}.pt')\n )\n framework.policy_net.eval()\n mue_spectral_effs = [list() for _ in range(max_d2d+1)]\n d2d_spectral_effs = [list() for _ in range(max_d2d+1)]\n # jain_index = [list() for _ in range(max_d2d+1)]\n # done = False\n bag = list()\n # aux_range = range(max_d2d+1)[1:]\n # n_agents = np.random.choice(aux_range)\n agents = [ExternalDQNAgent(agent_params, actions)\n for i in range(n_agents)] # 1 agent per d2d tx\n central_agent = CentralDQNAgent(agent_params, actions, n_agents)\n env.set_scenario(pairs_positions, mue_position, agents)\n # env.build_scenario(agents)\n # done = False\n # obs = [env.get_state(a) for a in agents]\n obs_aux, _ = env.step(agents)\n obs = torch.cat(obs_aux).view(1, -1).float()\n total_reward = 0.0\n i = 0\n while True:\n tuple_index = central_agent.act(framework, obs)\n action_tuple = actions_tuples[tuple_index]\n actions_index = actions_tuples[tuple_index]\n bag.append(tuple_index)\n for j, agent in enumerate(agents):\n agent.set_action(action_tuple[j], actions[action_tuple[j]])\n next_obs_aux, rewards = env.step(agents)\n next_obs = torch.cat(next_obs_aux).view(1, -1).float()\n obs = next_obs\n total_reward += sum(rewards)\n # saving stuff\n mue_spectral_effs[n_agents].append(env.mue_spectral_eff.item())\n d2d_spectral_effs[n_agents].append(env.d2d_spectral_eff.item())\n i += 1\n if i >= TEST_STEPS_PER_EPISODE:\n break\n d2d_txs, d2d_rxs = zip(*env.d2d_pairs)\n # D2D interference on the MUE, in dB\n d2d_interferences = np.array([\n d.caused_mue_interference for d in d2d_txs\n ])\n d2d_interferences_mag = db_to_power(d2d_interferences)\n d2d_total_interference = np.sum(d2d_interferences_mag)\n percentage_interferences = d2d_interferences_mag / d2d_total_interference\n interferences, tx_labels, rx_labels = calculate_interferences(env)\n if d2d_total_interference != 0:\n plot_positions_actions_pie(\n env.bs, env.mue, d2d_txs, d2d_rxs,\n actions_index, percentage_interferences,\n env.mue.sinr > sinr_threshold_train, sinr_threshold_train,\n env.reward, interferences, tx_labels, rx_labels\n )\n print_stuff(actions, env)\n plt.show()\n # jain_index[n_agents].append(gen.jain_index(env.sinr_d2ds))\n mue_success_rate = list()\n for i, m in enumerate(mue_spectral_effs):\n mue_success_rate.append(\n np.average(m > np.log2(1 + sinr_threshold_train))\n )\n d2d_speffs_avg = list()\n for i, d in enumerate(d2d_spectral_effs):\n d2d_speffs_avg.append(np.average(d))\n # jain_index_avg = list()\n # for i, j in enumerate(jain_index):\n # jain_index_avg.append(np.average(j))\n log = list()\n for i, d in enumerate(zip(d2d_speffs_avg, mue_success_rate)):\n log.append(f'NUMBER OF D2D_USERS: {i+1}')\n log.append(f'D2D SPECTRAL EFFICIENCY - SCRIPT: {d[0]}')\n log.append(f'MUE SUCCESS RATE - SCRIPT: {d[1]}')\n log.append('-------------------------------------')\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n log_path = f'logs/dql/{filename}.txt'\n file = open(log_path, 'w')\n for lg in log:\n file.write(f'{lg}\\n')\n file.close()\n data_path = f'data/dql/{filename}.pickle'\n data = {\n 'd2d_speffs_avg_total': d2d_spectral_effs,\n 'mue_success_rate': mue_success_rate,\n 'chosen_actions': bag,\n 'd2d_speffs': d2d_spectral_effs,\n 'mue_speffs': mue_spectral_effs\n }\n with open(data_path, 'wb') as file:\n pickle.dump(data, file)\n\n\nif __name__ == '__main__':\n train()\n test()\n",
"from sys_simulator.general import db_to_power, power_to_db\nfrom typing import List\nfrom sys_simulator import pathloss\nfrom sys_simulator.devices.devices \\\n import d2d_user, mobile_user, d2d_node_type, base_station\nfrom scipy.spatial.distance import euclidean\n\n\ndef sinr_d2d(d2d_tx: d2d_user, d2d_rx: d2d_user, d2d_devices: List[d2d_user],\n mue: mobile_user, noise_power: float, user_gain: float):\n d2d_tx_contrib = d2d_tx.tx_power / \\\n pathloss.pathloss_users_db(d2d_tx.distance_d2d/1000) * user_gain**2\n d2d_rx_mue_distance = euclidean(d2d_rx.position, mue.position)\n mue_interference = mue.tx_power / \\\n pathloss.pathloss_users(d2d_rx_mue_distance/1000) * user_gain**2\n d2d_interferers = [d for d in d2d_devices if (\n d.id != d2d_tx.id\n and d.type == d2d_node_type.TX and d.rb == d2d_tx.rb)]\n d2d_interference = sum(\n [d.tx_power * user_gain**2 / pathloss.pathloss_users(\n euclidean(d2d_rx.position, d.position)/1000)\n for d in d2d_interferers]\n )\n sinr = d2d_tx_contrib / (noise_power + mue_interference + d2d_interference)\n return sinr\n\n\ndef sinr_d2d_db(\n d2d_tx: d2d_user, d2d_rx: d2d_user, d2d_devices: List[d2d_user],\n mue: mobile_user, noise_power: float, user_gain: float\n):\n d2d_tx_contrib = d2d_tx.tx_power - \\\n pathloss.pathloss_users_db(d2d_tx.distance_d2d/1000) + 2 * user_gain\n d2d_rx_mue_distance = euclidean(d2d_rx.position, mue.position)\n mue_interference = mue.tx_power - \\\n pathloss.pathloss_users_db(d2d_rx_mue_distance/1000) + 2 * user_gain\n d2d_interferers = [d for d in d2d_devices if (\n d.id != d2d_tx.id\n and d.type == d2d_node_type.TX and d.rb == d2d_tx.rb)]\n d2d_interference = sum(\n [\n db_to_power(d.tx_power) *\n db_to_power(2 * user_gain) /\n pathloss.pathloss_users_db(\n euclidean(d2d_rx.position, d.position)/1000\n )\n for d in d2d_interferers\n ]\n )\n sinr = d2d_tx_contrib - \\\n power_to_db(\n db_to_power(noise_power) +\n db_to_power(mue_interference) +\n d2d_interference\n )\n return sinr\n\n\ndef sinr_mue(mue: mobile_user, d2d_devices: List[d2d_user], bs: base_station,\n noise_power: float, bs_gain: float, user_gain: float):\n mue_contrib = mue.tx_power * user_gain * bs_gain / \\\n pathloss.pathloss_bs_users(mue.distance_to_bs/1000)\n d2d_interferers = [d for d in d2d_devices if (\n d.type == d2d_node_type.TX and d.rb == mue.rb)]\n d2d_interference = sum(\n [d.tx_power * user_gain * bs_gain / pathloss.pathloss_bs_users(\n euclidean(d.position, bs.position)/1000) for d in d2d_interferers]\n )\n sinr = mue_contrib / (noise_power + d2d_interference)\n return sinr\n\n\ndef sinr_mue_db(mue: mobile_user, d2d_devices: List[d2d_user],\n bs: base_station,\n noise_power: float, bs_gain: float, user_gain: float):\n mue_contrib = mue.tx_power + user_gain + bs_gain \\\n - pathloss.pathloss_bs_users_db(mue.distance_to_bs/1000)\n d2d_interferers = [d for d in d2d_devices if (\n d.type == d2d_node_type.TX and d.rb == mue.rb)]\n d2d_interference = sum(\n [\n db_to_power(d.tx_power) *\n db_to_power(user_gain) *\n db_to_power(bs_gain) /\n pathloss.pathloss_bs_users(euclidean(d.position, bs.position)/1000)\n for d in d2d_interferers\n ]\n )\n sinr = \\\n mue_contrib - power_to_db(db_to_power(noise_power) + d2d_interference)\n return sinr\n\n\ndef sinr_d2d_tensor(d2d_tx: d2d_user, d2d_rx: d2d_user,\n d2d_devices: List[d2d_user], mue: mobile_user,\n noise_power: float, user_gain: float):\n d2d_tx_contrib = d2d_tx.tx_power / \\\n pathloss.pathloss_users(d2d_tx.distance_d2d/1000) * user_gain**2\n d2d_rx_mue_distance = euclidean(d2d_rx.position, mue.position)\n mue_interference = mue.tx_power / \\\n pathloss.pathloss_users(d2d_rx_mue_distance/1000) * user_gain**2\n d2d_interferers = [d for d in d2d_devices if (\n d.id != d2d_tx.id\n and d.type == d2d_node_type.TX and d.rb == d2d_tx.rb)]\n d2d_interference = sum(\n [d.tx_power * user_gain**2 / pathloss.pathloss_users(\n euclidean(d2d_rx.position, d.position)/1000)\n for d in d2d_interferers]\n )\n sinr = d2d_tx_contrib / (noise_power + mue_interference + d2d_interference)\n return sinr\n\n\ndef sinr_mue_tensor(mue: mobile_user, d2d_devices: List[d2d_user],\n bs: base_station, noise_power: float, bs_gain: float,\n user_gain: float):\n mue_contrib = mue.tx_power * user_gain * bs_gain / \\\n pathloss.pathloss_bs_users(mue.distance_to_bs/1000)\n d2d_interferers = [d for d in d2d_devices if (\n d.type == d2d_node_type.TX and d.rb == mue.rb)]\n d2d_interference = sum(\n [d.tx_power * user_gain * bs_gain / pathloss.pathloss_bs_users(\n euclidean(d.position, bs.position)/1000)\n for d in d2d_interferers]\n )\n sinr = mue_contrib / (noise_power + d2d_interference)\n return sinr\n",
"from copy import deepcopy\nfrom shutil import copyfile\nfrom sys_simulator.a2c.framework import A2CDiscreteFramework\nfrom typing import List\nfrom time import time\nfrom sys_simulator.a2c.agent import A2CAgent, A2CCentralAgent\nfrom sys_simulator.general.actions_discretizations import db_six\nfrom sys_simulator.q_learning.environments.completeEnvironment12 import CompleteEnvironment12\nimport sys_simulator.general as gen\nfrom sys_simulator.general import load_with_pickle, print_evaluate3, random_seed, save_with_pickle\nfrom sys_simulator.ddpg.framework import Framework\nimport torch\n\n\n# parameters\nALGO_NAME = 'a2c'\nBASE_PATH = '/home/lucas/dev/sys-simulator-2'\nAGENTS_RANGE = range(6)[1:]\nMODELS_PATHS = [\n f'{BASE_PATH}/data/a2c/script16/20210510-005348/last_model.pt',\n f'{BASE_PATH}/data/a2c/script16/20210510-071418/last_model.pt',\n f'{BASE_PATH}/data/a2c/script16/20210510-080641/last_model.pt',\n f'{BASE_PATH}/data/a2c/script16/20210510-192953/last_model.pt',\n f'{BASE_PATH}/data/a2c/script16/20210510-204338/last_model.pt',\n]\nENVS_PATHS = [\n f'{BASE_PATH}/data/a2c/script16/20210510-005348/env.pickle',\n f'{BASE_PATH}/data/a2c/script16/20210510-071418/env.pickle',\n f'{BASE_PATH}/data/a2c/script16/20210510-080641/env.pickle',\n f'{BASE_PATH}/data/a2c/script16/20210510-192953/env.pickle',\n f'{BASE_PATH}/data/a2c/script16/20210510-204338/env.pickle',\n]\n# TEST_NUM_EPISODES = 10000\nTEST_NUM_EPISODES = 1000\nEVAL_STEPS_PER_EPISODE = 10\nPRINT_EVERY = 100\n# env parameters\nRND_SEED = True\nSEED = 42\nCHANNEL_RND = True\n# writer\nfilename = gen.path_leaf(__file__)\nfilename = filename.split('.')[0]\ndir_path = f'data/{ALGO_NAME}/{filename}'\ndata_path, _ = gen.make_dir_timestamp(dir_path)\nif RND_SEED:\n random_seed(SEED)\ntorch_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\na_min = -90\na_max = 60\na_offset = -10\nframeworks = []\nfor p in MODELS_PATHS:\n f = torch.load(p, map_location=torch_device)\n frameworks.append(f)\n# envs = [load_with_pickle(p) for p in ENVS_PATHS]\np_min = -90\np_max = 40 # max tx power in dBm\np_max = p_max - 30\nactions = db_six(p_min, p_max)\ncentral_agent = A2CCentralAgent(torch_device)\nenvs = [load_with_pickle(e) for e in ENVS_PATHS]\n\ndef test(framework: A2CDiscreteFramework, env: CompleteEnvironment12,\n surr_agents: List[A2CAgent], start: float):\n framework.a2c.actor.eval()\n framework.a2c.critic.eval()\n mue_availability = []\n mue_sinrs = []\n d2d_sinrs = []\n rewards_bag = []\n for ep in range(TEST_NUM_EPISODES):\n if ep % PRINT_EVERY == 0:\n now = (time() - start) / 60\n print_evaluate3(ep, TEST_NUM_EPISODES, now, len(surr_agents))\n env.reset()\n env.build_scenario(surr_agents, motion_model='random')\n obs, _, _, _ = env.step(surr_agents)\n i = 0\n done = False\n ep_availability = []\n ep_rewards = []\n ep_mue_sinrs = []\n ep_d2d_sinrs = []\n while not done and i < EVAL_STEPS_PER_EPISODE:\n # actions = np.zeros(MAX_NUMBER_OF_AGENTS) + 1e-9\n # db_actions = power_to_db(actions)\n for j, agent in enumerate(agents):\n agent.act(obs[j], framework)\n next_obs, reward, done, _ = env.step(surr_agents)\n obs = next_obs\n ep_availability.append(env.mue.sinr > env.params.sinr_threshold)\n ep_rewards.append(reward)\n ep_mue_sinrs.append(env.mue.sinr)\n ep_d2d_sinrs.append([p[0].sinr for p in env.d2d_pairs])\n i += 1\n rewards_bag += ep_rewards\n mue_sinrs += ep_mue_sinrs\n d2d_sinrs += ep_d2d_sinrs\n mue_availability += ep_availability\n all_bags = {\n 'rewards': rewards_bag,\n 'mue_sinrs': mue_sinrs,\n 'd2d_sinrs': d2d_sinrs,\n 'mue_availability': mue_availability\n }\n return all_bags\n\n\nif __name__ == '__main__':\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n dir_path = f'data/{ALGO_NAME}/{filename}'\n data_path, _ = gen.make_dir_timestamp(dir_path)\n start = time()\n results = []\n for f, i, e in zip(frameworks, AGENTS_RANGE, envs):\n agents = [\n A2CAgent(torch_device, actions)\n for _ in range(i)\n ]\n r = test(f, e, agents, start)\n results.append(r)\n # save stuff\n now = (time() - start) / 60\n data_file_path = f'{data_path}/log.pickle'\n save_with_pickle(results, data_file_path)\n copyfile(__file__, f'{data_path}/{filename}.py')\n print(f'done. Elapsed time: {now} minutes.')\n\n",
"from sys_simulator.a2c.agent import A2CAgent\nfrom sys_simulator.a2c.framework import A2CDiscreteFramework\nimport torch\nimport gym\n\nALGO_NAME = 'a2c'\nNUM_ENVS = 8\nENV_NAME = 'CartPole-v1'\nHIDDEN_SIZE = 256\nNUM_HIDDEN_LAYERS = 1\nLEARNING_RATE = 3E-4\nMAX_STEPS = 200000\nSTEPS_PER_EPISODE = 300\nTHRESHOLD_REWARD = 450\nBETA = .001\nGAMMA = .99\nLBDA = .95\nEVAL_NUM_EPISODES = 10\nEVAL_EVERY = int(MAX_STEPS / 20)\n\n\ntorch_device = torch.device(\"cpu\")\nagent = A2CAgent(torch_device)\n\n\ndef test_video(\n framework: A2CDiscreteFramework,\n num_episodes: int,\n steps_per_episode: int\n):\n env = gym.make(f'{ENV_NAME}')\n for _ in range(num_episodes):\n obs = env.reset()\n env.render()\n done = False\n i = 0\n while not done and i < steps_per_episode:\n action, _, _, _ = agent.act(obs, framework)\n next_obs, _, done, _ = env.step(action.item())\n obs = next_obs\n env.render()\n",
"import os\nfrom sys_simulator.channels import BANChannel, UrbanMacroNLOSWinnerChannel\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\n\n\nsns.set_style(\"darkgrid\")\n# sns.set_palette(\"viridis\")\n# sns.set_palette(\"rocket\")\n# sns.set_theme(style=\"whitegrid\")\nN_SAMPLES = int(1e5)\nN_BINS = int(2e2)\n# N_SAMPLES = int(1e3)\n# N_BINS = int(1e2)\nd = np.linspace(1e-9, 500, N_SAMPLES)\nban_channel = BANChannel()\nurban_channel = UrbanMacroNLOSWinnerChannel(sigma=8.0, small_sigma=4.0)\n# get channel data\nban_pathlosses = ban_channel.pathloss(d)\nban_large_scale = [ban_channel.large_scale() for _ in range(N_SAMPLES)]\nban_small_scale = [ban_channel.small_scale() for _ in range(N_SAMPLES)]\nurban_pathlosses = urban_channel.pathloss(d)\nurban_large_scale = [urban_channel.large_scale() for _ in range(N_SAMPLES)]\nurban_small_scale = [urban_channel.small_scale() for _ in range(N_SAMPLES)]\n# data\nban_dict = {\n 'channel': ['BAN' for _ in range(N_SAMPLES)],\n 'pathloss': ban_pathlosses,\n 'large_scale': ban_large_scale,\n 'small_scale': ban_small_scale,\n 'distances': d,\n}\nurban_dict = {\n 'channel': ['URBAN' for _ in range(N_SAMPLES)],\n 'pathloss': urban_pathlosses,\n 'large_scale': urban_large_scale,\n 'small_scale': urban_small_scale,\n 'distances': d,\n}\ndf = pd.DataFrame.from_dict(ban_dict)\ndf = df.append(pd.DataFrame.from_dict(urban_dict))\n# fonts config\nx_font = {\n 'family': 'serif',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 16,\n}\ny_font = {\n 'family': 'serif',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 16,\n}\nticks_font = {\n 'fontfamily': 'serif',\n 'fontsize': 13\n}\nlegends_font = {\n 'size': 13,\n 'family': 'serif'\n}\n# pathlosses fig\nplt.figure()\nsns.lineplot(\n data=df,\n hue='channel',\n x='distances',\n y='pathloss',\n)\nplt.xlabel('Distance [m]', fontdict=x_font)\nplt.ylabel('Path Loss [dB]', fontdict=y_font)\nplt.xticks(**ticks_font)\nplt.yticks(**ticks_font)\nplt.legend(prop=legends_font)\nsvg_path = '/home/lucas/dev/sys-simulator-2/figs/channels/channel_pathlosses.svg'\neps_path = '/home/lucas/dev/sys-simulator-2/figs/channels/channel_pathlosses.eps'\nplt.savefig(svg_path)\nos.system(f'magick convert {svg_path} {eps_path}')\n# shadowings fig\nplt.figure()\nsns.kdeplot(\n data=df,\n hue='channel',\n x='large_scale',\n # multiple='stack',\n)\nplt.xlabel('Loss [dB]', fontdict=x_font)\nplt.ylabel('Probability Density', fontdict=y_font)\nplt.xticks(**ticks_font)\nplt.yticks(**ticks_font)\n# plt.legend(prop=legends_font)\nsvg_path = '/home/lucas/dev/sys-simulator-2/figs/channels/channel_shadowings.svg'\neps_path = '/home/lucas/dev/sys-simulator-2/figs/channels/channel_shadowings.eps'\nplt.savefig(svg_path)\nos.system(f'magick convert {svg_path} {eps_path}')\n# small scale fig\nplt.figure()\nsns.kdeplot(\n data=df,\n hue='channel',\n x='small_scale',\n)\nplt.xlabel('Loss [dB]', fontdict=x_font)\nplt.ylabel('Probability Density', fontdict=y_font)\nplt.xticks(**ticks_font)\nplt.yticks(**ticks_font)\n# plt.legend(prop=legends_font)\nsvg_path = '/home/lucas/dev/sys-simulator-2/figs/channels/channel_small_scale_fadings.svg'\neps_path = '/home/lucas/dev/sys-simulator-2/figs/channels/channel_small_scale_fadings.eps'\nplt.savefig(svg_path)\nos.system(f'magick convert {svg_path} {eps_path}')\nplt.show()\n",
"from sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent\nfrom sys_simulator.channels import Channel\nfrom sys_simulator.general import db_to_power, power_to_db\nfrom sys_simulator.devices.devices \\\n import base_station, mobile_user, d2d_user, d2d_node_type\nfrom sys_simulator import general as gen\nfrom sys_simulator.q_learning.agents.distanceAgent import DistanceAgent\nfrom sys_simulator.q_learning.environments.environment import RLEnvironment\nfrom typing import List\nfrom sys_simulator.parameters.parameters import EnvironmentParameters\nfrom scipy.spatial.distance import euclidean\nfrom typing import Tuple\nimport numpy as np\nimport torch\n\n\nclass CompleteEnvironment5dB(RLEnvironment):\n \"\"\"\n Same as CompleteEnvironment5, but everything is in dB.\n \"\"\"\n def __init__(self, params: EnvironmentParameters,\n reward_function, channel: Channel, **kwargs):\n self.params = params\n super(CompleteEnvironment5dB, self).__init__(params,\n reward_function, **kwargs)\n self.states = [0, 0, 1]\n self.device = \\\n torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.sinr_d2ds: float = []\n self.channel: Channel = channel\n self.pathlosses_are_calculated = False\n self.n_closest_devices = 2\n self.memory = 2\n self.dummy_d2d_pair = (\n self.make_dummy_d2d_device(d2d_node_type.TX),\n self.make_dummy_d2d_device(d2d_node_type.RX)\n )\n self.diff = 0\n\n def build_scenario(self, agents: List[ExternalDQNAgent]):\n # declaring the bs, mues and d2d pairs\n self.sinr_d2ds = []\n self.bs = base_station((0, 0),\n radius=self.params.bs_radius)\n self.bs.set_gain(self.params.bs_gain)\n self.mue = mobile_user(0, self.params.p_max)\n self.mue.set_gain(self.params.user_gain)\n self.d2d_pairs = [(d2d_user(x, d2d_node_type.TX, self.params.p_max),\n d2d_user(x, d2d_node_type.RX, self.params.p_max))\n for x in range(len(agents))]\n self.rb = 1\n # distributing nodes in the bs radius\n gen.distribute_nodes([self.mue], self.bs)\n for p in self.d2d_pairs:\n gen.distribute_pair_fixed_distance(\n p, self.bs, self.params.d2d_pair_distance\n )\n for d in p:\n d.set_distance_d2d(self.params.d2d_pair_distance)\n d.set_gain(self.params.user_gain)\n\n self.mue.set_rb(self.rb)\n\n for p in self.d2d_pairs:\n p[0].set_rb(self.rb)\n p[1].set_rb(self.rb)\n\n # TODO: como determinar a potencia de transmissao do mue?\n # vou setar pmax por enquanto\n self.mue.set_tx_power(self.params.p_max)\n\n for i in range(len(agents)):\n agents[i].set_d2d_tx_id(self.d2d_pairs[i][0].id)\n agents[i].set_d2d_tx(self.d2d_pairs[i][0])\n # set diff\n diff = self.n_closest_devices - len(self.d2d_pairs) + 1\n self.diff = 0 if diff < 0 else diff\n # reset sets\n self.reset_sets()\n\n def set_scenario(self, pairs_positions: List[Tuple],\n mue_position: Tuple, agents: List[ExternalDQNAgent]):\n if len(pairs_positions) != len(agents):\n raise Exception('Different `pair_positions` and `agents` lengths.')\n # declaring the bs, mues and d2d pairs\n self.sinr_d2ds = []\n self.rb = 1\n self.bs = base_station((0, 0), radius=self.params.bs_radius)\n self.bs.set_gain(self.params.bs_gain)\n # mue stuff\n self.mue = mobile_user(0)\n self.mue.set_gain(self.params.user_gain)\n self.mue.set_position(mue_position)\n self.mue.set_rb(self.rb)\n self.mue.set_tx_power(self.params.p_max)\n self.mue.set_distance_to_bs(euclidean(mue_position, self.bs.position))\n # instantiate d2d_pairs\n self.d2d_pairs = [(d2d_user(x, d2d_node_type.TX, self.params.p_max),\n d2d_user(x, d2d_node_type.RX, self.params.p_max))\n for x in range(len(agents))]\n self.distances = [1/10*i*self.bs.radius for i in range(11)]\n # distributing nodes in the bs radius\n if euclidean(mue_position, self.bs.position) <= self.params.bs_radius:\n self.mue.set_position(mue_position)\n else:\n raise Exception(\n 'Node distance to BS is greater than the BS radius.'\n )\n for pair, position in zip(self.d2d_pairs, pairs_positions):\n # check if node is inside the BS radius\n if euclidean(position, self.bs.position) <= self.params.bs_radius:\n # set tx position\n pair[0].set_position(position)\n # distribute rx around tx, in a fixed distance\n gen.distribute_rx_fixed_distance(\n pair, self.bs, self.params.d2d_pair_distance\n )\n # set tx distances\n for d in pair:\n d.set_distance_d2d(self.params.d2d_pair_distance)\n d.set_distance_to_bs(euclidean(d.position,\n self.bs.position))\n d.set_gain(self.params.user_gain)\n d.set_rb(self.rb)\n else:\n raise Exception(\n 'Node distance to BS is greater than the BS radius.'\n )\n for i in range(len(agents)):\n # register d2d device to a RL agent\n agents[i].set_d2d_tx_id(self.d2d_pairs[i][0].id)\n agents[i].set_d2d_tx(self.d2d_pairs[i][0])\n # set diff: the amount of devices must be >= than\n # `self.n_closest_devices` in order to build the environment states\n diff = self.n_closest_devices - len(self.d2d_pairs) + 1\n self.diff = 0 if diff < 0 else diff\n # reset sets\n self.reset_sets()\n\n def get_state(self, agent: ExternalDQNAgent):\n # calculates all pathlosses\n if not self.pathlosses_are_calculated:\n # d2d pathlosses\n self.calculate_d2d_pathlosses()\n # mue pathloss\n self.calculate_mue_pathloss()\n sinr = self.sinr_mue(\n self.mue, list(zip(*self.d2d_pairs))[0],\n self.params.noise_power,\n self.params.bs_gain, self.params.user_gain\n )\n d2d_tx = agent.d2d_tx\n close_devices = self.get_n_closest_transmitters(\n d2d_tx, self.n_closest_devices\n ) if len(self.d2d_pairs) > 1 else []\n for _ in range(self.diff):\n # append a dummy pair\n close_devices.append(self.dummy_d2d_pair[0])\n close_devices_x = [d.position[0] for d in close_devices]\n close_devices_y = [d.position[1] for d in close_devices]\n last_mue_powers = self.mue.past_actions[:self.memory].tolist()\n mue_sinrs = self.mue.past_sinrs[:self.memory].tolist()\n device_sinrs = d2d_tx.past_sinrs[:self.memory]\n device_powers = d2d_tx.past_actions[:self.memory].tolist()\n d2d_pathloss = d2d_tx.pathloss_d2d\n close_devs_powers = []\n close_devs_sinrs = []\n for d in close_devices:\n close_devs_powers += d.past_actions[:self.memory].tolist()\n close_devs_sinrs += d.past_sinrs[:self.memory].tolist()\n device_contrib = d2d_tx.past_actions[0] - d2d_tx.past_bs_losses[0]\n # + d2d_tx.gain + self.bs.gain\n bs_interference = self.mue.past_actions[0] \\\n - self.mue.past_bs_losses[0] - self.mue.past_sinrs[0]\n # + self.mue.gain + self.bs.gain\n device_contrib_pct = db_to_power(device_contrib - bs_interference)\n d2d_tx.set_interference_contrib_pct(device_contrib_pct)\n recent_d2d_pathloss = d2d_tx.pathloss_d2d\n recent_bs_pathloss = d2d_tx.pathloss_to_bs\n number_of_d2d_pairs = len(self.d2d_pairs)\n interference_indicator = sinr > self.params.sinr_threshold\n # normalization\n device_sinrs = [gen.ceil(i, 30) for i in device_sinrs]\n close_devs_sinrs = [gen.ceil(i, 30) for i in close_devs_sinrs]\n # state\n state = [\n number_of_d2d_pairs / 10,\n d2d_tx.position[0] / self.bs.radius,\n d2d_tx.position[1] / self.bs.radius,\n self.mue.position[0] / self.bs.radius,\n self.mue.position[1] / self.bs.radius,\n agent.action / 30,\n self.mue.tx_power / 30,\n int(interference_indicator),\n int(not interference_indicator),\n ]\n state += (np.array(close_devices_x) / self.bs.radius).tolist()\n state += (np.array(close_devices_y) / self.bs.radius).tolist()\n state += (np.array(last_mue_powers) / 30).tolist()\n state += (np.array(mue_sinrs) / 30).tolist()\n state += (np.array(device_sinrs) / 30).tolist()\n state += (np.array(device_powers) / 30).tolist()\n state.append(d2d_pathloss / 30)\n state += (np.array(close_devs_powers) / 30).tolist()\n state += (np.array(close_devs_sinrs) / 30).tolist()\n state.append(device_contrib / 30)\n state.append(device_contrib_pct / 30)\n state.append(recent_d2d_pathloss / 30)\n state.append(recent_bs_pathloss / 30)\n state = torch.tensor([state]).to(self.device)\n # end\n self.reset_sets()\n return state\n\n def get_other_devices_mean_positions(self, tx: d2d_user):\n other_devices = [d[0] for d in self.d2d_pairs if d[0] != tx]\n x_mean = np.mean([d.position[0] for d in other_devices])\n y_mean = np.mean([d.position[1] for d in other_devices])\n return x_mean, y_mean\n\n def get_other_devices_std_positions(self, tx: d2d_user):\n other_devices = [d[0] for d in self.d2d_pairs if d[0] != tx]\n x_std = np.std([d.position[0] for d in other_devices])\n y_std = np.std([d.position[1] for d in other_devices])\n return x_std, y_std\n\n def step(self, agents: List[DistanceAgent]):\n # allocate agents tx power\n for agent in agents:\n for pair in self.d2d_pairs:\n if agent.id == pair[0].id:\n pair[0].set_tx_power(agent.action)\n # mue_tx_power\n mue_tx_power = self.mue.get_tx_power_db(\n self.bs, self.params.sinr_threshold, self.params.noise_power,\n self.params.mue_margin, self.params.p_max)\n self.mue.set_tx_power(mue_tx_power)\n # mue sinr\n sinr_m = self.sinr_mue(\n self.mue, list(zip(*self.d2d_pairs))[0],\n self.params.noise_power,\n self.params.bs_gain, self.params.user_gain\n )\n self.mue.set_sinr(sinr_m)\n # d2d pairs sinr\n for p in self.d2d_pairs:\n if p[0].rb == self.rb:\n _ = self.sinr_d2d(\n p[0], p[1], list(zip(*self.d2d_pairs))[0], self.mue,\n self.params.noise_power, self.params.user_gain\n )\n # get the states\n states = [self.get_state(a) for a in agents]\n # rewards\n rewards = [self.calculate_reward(a) for a in agents]\n # total reward\n self.reward = np.sum(rewards)\n # spectral efficiencies\n self.mue_spectral_eff = np.log2(1 + db_to_power(self.mue.sinr))\n self.d2d_spectral_eff = np.sum(\n [np.log2(1 + db_to_power(d[0].sinr)) for d in self.d2d_pairs]\n )\n # end\n return states, rewards\n\n def calculate_d2d_pathlosses(self):\n for tx in [d[0] for d in self.d2d_pairs]:\n pathloss_to_bs = self.channel.step(\n euclidean(tx.position, self.bs.position)\n )\n pathloss_to_rx = self.channel.step(self.params.d2d_pair_distance)\n tx.set_pathloss_to_bs(pathloss_to_bs)\n tx.set_pathloss_d2d(pathloss_to_rx)\n\n def calculate_mue_pathloss(self):\n pathloss_to_bs = self.channel.step(\n euclidean(self.mue.position, self.bs.position)\n )\n self.mue.set_pathloss_to_bs(pathloss_to_bs)\n\n def set_n_d2d(self, n_d2d):\n self.n_d2d = n_d2d\n\n def sinr_mue(self, mue: mobile_user, d2d_devices: List[d2d_user],\n noise_power: float,\n bs_gain: float, user_gain: float):\n mue_contrib = mue.tx_power + user_gain + bs_gain \\\n - mue.pathloss_to_bs\n d2d_interferers = [d for d in d2d_devices if (\n d.type == d2d_node_type.TX and d.rb == mue.rb)]\n d2d_interference = sum(\n [\n db_to_power(d.tx_power) *\n db_to_power(user_gain) *\n db_to_power(bs_gain) /\n db_to_power(d.pathloss_to_bs)\n for d in d2d_interferers\n ]\n )\n sinr = mue_contrib - power_to_db(\n db_to_power(noise_power) + d2d_interference\n )\n return sinr\n\n def sinr_d2d(self, d2d_tx: d2d_user, d2d_rx: d2d_user,\n d2d_devices: List[d2d_user], mue: mobile_user,\n noise_power: float, user_gain: float):\n d2d_tx_contrib = d2d_tx.tx_power - \\\n self.channel.step(d2d_tx.distance_d2d) + 2 * user_gain\n d2d_rx_mue_distance = euclidean(d2d_rx.position, mue.position)\n mue_interference = mue.tx_power - \\\n self.channel.step(d2d_rx_mue_distance) + 2 * user_gain\n d2d_interferers = [d for d in d2d_devices if (\n d.id != d2d_tx.id\n and d.type == d2d_node_type.TX and d.rb == d2d_tx.rb\n )]\n d2d_interference = sum(\n [\n db_to_power(d.tx_power) *\n db_to_power(2 * user_gain) /\n db_to_power(self.channel.step(\n euclidean(d2d_rx.position, d.position)\n ))\n for d in d2d_interferers\n ]\n )\n sinr = d2d_tx_contrib - \\\n power_to_db(\n db_to_power(noise_power) +\n db_to_power(mue_interference) +\n d2d_interference\n )\n d2d_tx.set_sinr(sinr)\n return sinr\n\n def reset_sets(self):\n self.pathlosses_are_calculated = False\n self.mue.reset_set_flags()\n for d in self.d2d_pairs:\n d[0].reset_set_flags()\n\n def get_n_closest_transmitters(\n self,\n device: d2d_user,\n n: int\n ) -> List[d2d_user]:\n transmitters = [\n d[0] for d in self.d2d_pairs if d[0].id != device.id\n ]\n distances = [\n euclidean(device.position, t.position) for t in transmitters\n ]\n aux = [i for i in zip(transmitters, distances)]\n aux.sort(key=lambda x: x[1])\n length = n if n <= len(transmitters) else len(transmitters)\n sorted_txs, _ = zip(*aux)\n return list(sorted_txs[:length])\n\n def get_state_size(self, foo_agent: ExternalDQNAgent):\n foo = self.get_state(foo_agent)\n return foo.shape[1]\n\n def rewards(self, agents: List[ExternalDQNAgent]):\n flag = self.mue.sinr >= self.params.sinr_threshold\n d2d_txs = [a.d2d_tx for a in agents]\n d2d_sinrs, d2d_powers, d2d_bs_pathlosses = zip(\n *[(d.sinr, d.tx_power, d.pathloss_to_bs) for d in d2d_txs]\n )\n d2ds_speffs = np.log2(1 + db_to_power(d2d_sinrs))\n d2d_interferences = \\\n np.log2(1 + db_to_power(d2d_powers - d2d_bs_pathlosses))\n beta = 10\n if flag:\n beta = 1\n rewards = d2ds_speffs - beta * d2d_interferences\n return rewards\n\n def calculate_reward(self, agent: ExternalDQNAgent) -> float:\n flag = self.mue.sinr < self.params.sinr_threshold\n pct = agent.d2d_tx.interference_contrib_pct\n d2d_tx = agent.d2d_tx\n beta = 1\n alpha = 1\n if flag:\n if pct >= 0 and pct < .05:\n beta = 1\n alpha = 1\n elif pct >= .05 and pct < .5:\n beta = 100\n alpha = .1\n elif pct >= .5 and pct < 1:\n beta = 500\n alpha = .01\n else:\n raise Exception('Invalid pct.')\n # with pathloss to bs\n # reward = alpha * np.log2(1 + db_to_power(d2d_tx.sinr)) - beta * \\\n # np.log2(1 + db_to_power(d2d_tx.tx_power - d2d_tx.pathloss_to_bs))\n # without pathloss to bs\n reward = alpha * np.log2(1 + db_to_power(d2d_tx.sinr)) - beta * \\\n np.log2(1 + db_to_power(d2d_tx.tx_power))\n return reward\n\n def make_dummy_d2d_device(self, d2d_type: d2d_node_type) -> d2d_user:\n device = d2d_user(99, d2d_type)\n device.set_distance_to_bs(1000)\n device.set_distance_to_mue(1000)\n device.set_position((1000, 0))\n device.set_rb(1)\n device.set_sinr(30)\n device.set_tx_power(-1000)\n device.set_pathloss_to_bs(1000)\n device.set_distance_d2d(0)\n return device\n",
"# Uses CompleteEnvironment10dB\n# Random power allocation\n# Simulates many times, for different number of agents, and take the averages.\n# There are different channels to the BS and to the devices.\nimport random\nfrom shutil import copyfile\nfrom itertools import product\nfrom time import time\nfrom typing import List\nfrom sys_simulator.general \\\n import db_to_power, make_dir_timestamp, power_to_db, save_with_pickle\nfrom sys_simulator.channels import BANChannel, UrbanMacroNLOSWinnerChannel\nfrom sys_simulator import general as gen\nfrom sys_simulator.q_learning.environments.completeEnvironment10dB \\\n import CompleteEnvironment10dB\nfrom sys_simulator.dqn.agents.dqnAgent import CentralDQNAgent, ExternalDQNAgent\nfrom sys_simulator.dqn.externalDQNFramework import ExternalDQNFramework\nfrom sys_simulator.parameters.parameters import \\\n EnvironmentParameters, TrainingParameters, DQNAgentParameters\nfrom sys_simulator.q_learning.rewards import dis_reward_tensor_db\nfrom copy import deepcopy\nimport torch\nimport numpy as np\n\n\nn_mues = 1 # number of mues\nn_d2d = 2 # number of d2d pairs\nn_rb = n_mues # number of RBs\ncarrier_frequency = 2.4 # carrier frequency in GHz\nbs_radius = 500 # bs radius in m\nrb_bandwidth = 180*1e3 # rb bandwidth in Hz\nd2d_pair_distance = 50 # d2d pair distance in m\ndevice_height = 1.5 # mobile devices height in m\nbs_height = 25 # BS antenna height in m\np_max = 40 # max tx power in dBm\nnoise_power = -116 # noise power per RB in dBm\nbs_gain = 17 # macro bs antenna gain in dBi\nuser_gain = 4 # user antenna gain in dBi\nsinr_threshold_train = 6 # mue sinr threshold in dB for training\nmue_margin = 200 # mue margin in dB\n# conversions from dBm to dB\np_max = p_max - 30\nnoise_power = noise_power - 30\n# channel parameters\nCHANNEL_RND = True\n# q-learning parameters\n# training\nNUMBER = 1\nSTEPS_PER_EPISODE = 500\nTEST_STEPS_PER_EPISODE = 1000\n# STEPS_PER_EPISODE = 2\n# TEST_STEPS_PER_EPISODE = 2\n# common\nEPSILON_INITIAL = 1\nEPSILON_MIN = .05\nEPSILON_DECAY = 1.3 / STEPS_PER_EPISODE # fast training\nGAMMA = 0.5 # Discount factor\nC = 8 # C constant for the improved reward function\nTARGET_UPDATE = 10\nREPLAY_MEMORY_SIZE = 10000\nBATCH_SIZE = 64\nHIDDEN_SIZE = 128\nNUM_HIDDEN_LAYERS = 3\nLEARNING_RATE = 1e-2\nREWARD_PENALTY = 1.5\nENVIRONMENT_MEMORY = 2\nMAX_NUMBER_OF_AGENTS = 5\nITERATIONS_PER_NUM_AGENTS = 50\nNUM_ACTIONS = 10\nmax_d2d = MAX_NUMBER_OF_AGENTS\n# more parameters\n# linear discretization\n# actions = power_to_db(np.linspace(\n# db_to_power(p_max-20), db_to_power(p_max-10), 10\n# ))\n# db discretization\nactions = power_to_db(\n np.linspace(\n 1e-6, db_to_power(p_max-10), NUM_ACTIONS\n )\n)\nenv_params = EnvironmentParameters(\n rb_bandwidth, d2d_pair_distance, p_max, noise_power,\n bs_gain, user_gain, sinr_threshold_train,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin\n)\nparams = TrainingParameters(1, STEPS_PER_EPISODE)\nagent_params = DQNAgentParameters(\n EPSILON_MIN, EPSILON_DECAY, EPSILON_INITIAL, REPLAY_MEMORY_SIZE,\n BATCH_SIZE, GAMMA\n)\nreward_function = dis_reward_tensor_db\nchannel_to_devices = BANChannel(rnd=CHANNEL_RND)\nchannel_to_bs = UrbanMacroNLOSWinnerChannel(\n rnd=CHANNEL_RND, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height\n)\nref_env = CompleteEnvironment10dB(\n env_params,\n reward_function,\n channel_to_bs,\n channel_to_devices,\n reward_penalty=REWARD_PENALTY,\n memory=ENVIRONMENT_MEMORY,\n bs_height=bs_height\n)\n# foo env and foo agents stuff\nfoo_env = deepcopy(ref_env)\nfoo_agents = [ExternalDQNAgent(agent_params, [1]) for _ in range(4)]\nfoo_env.build_scenario(foo_agents)\n_, _ = foo_env.step(foo_agents)\nenv_state_size = foo_env.get_state_size(foo_agents[0])\n\n\ndef calculate_interferences(env: CompleteEnvironment10dB):\n bs = env.bs\n mue = env.mue\n d2d_pairs = env.d2d_pairs\n txs = [mue]\n txs += [p[0] for p in d2d_pairs]\n rxs = [bs]\n rxs += [p[1] for p in d2d_pairs]\n interferences = np.zeros((len(txs), len(rxs)))\n for i, tx in enumerate(txs):\n for j, (rx, interfered) in enumerate(zip(rxs, txs)):\n if tx == interfered:\n interf = tx.power_at_receiver\n elif tx == mue:\n interf = interfered.received_mue_interference\n elif rx == bs:\n interf = tx.caused_mue_interference\n else:\n interf = [\n i[1] for i in interfered.interferences\n if i[0] == tx.id\n ][0]\n interferences[i][j] = interf\n tx_labels = [d.id for d in txs]\n rx_labels = [d.id for d in rxs]\n return interferences, tx_labels, rx_labels\n\n\ndef train(n_agents, env):\n global actions\n actions_tuples = \\\n list(product(range(len(actions)), repeat=n_agents))\n framework = ExternalDQNFramework(\n agent_params,\n env_state_size * n_agents,\n len(actions_tuples),\n HIDDEN_SIZE,\n NUM_HIDDEN_LAYERS,\n LEARNING_RATE\n )\n best_reward = float('-inf')\n mue_spectral_eff_bag = list()\n d2d_spectral_eff_bag = list()\n rewards_bag = list()\n # aux_range = range(max_d2d+1)[1:]\n epsilon = agent_params.start_epsilon\n # n_agents = np.random.choice(aux_range)\n agents = [ExternalDQNAgent(agent_params, actions)\n for _ in range(n_agents)] # 1 agent per d2d tx\n central_agent = CentralDQNAgent(agent_params, actions, n_agents)\n central_agent.set_epsilon(epsilon)\n for a in agents:\n a.set_epsilon(epsilon)\n env.build_scenario(agents)\n obs_aux, _ = env.step(agents)\n obs = torch.cat(obs_aux).view(1, -1).float()\n # env.build_scenario(agents)\n # obs = [env.get_state(a).float() for a in agents]\n total_reward = 0.0\n i = 0\n bag = list()\n while True:\n if i >= params.steps_per_episode:\n break\n else:\n tuple_index = central_agent.get_action(framework, obs).item()\n action_tuple = actions_tuples[tuple_index]\n for j, agent in enumerate(agents):\n agent.set_action(action_tuple[j], actions[action_tuple[j]])\n next_obs_aux, rewards = env.step(agents)\n total_reward = np.sum(rewards)\n next_obs = torch.cat(next_obs_aux).view(1, -1).float()\n i += 1\n framework.replay_memory.push(\n obs, tuple_index, next_obs, total_reward\n )\n framework.learn()\n bag.append(total_reward.item())\n obs = next_obs\n if i % TARGET_UPDATE == 0:\n framework.target_net.load_state_dict(\n framework.policy_net.state_dict()\n )\n if total_reward > best_reward:\n best_reward = total_reward\n # mue spectral eff\n mue_spectral_eff_bag.append(env.mue_spectral_eff)\n # average d2d spectral eff\n d2d_spectral_eff_bag.append(env.d2d_spectral_eff)\n rewards_bag.append(env.reward)\n # print(\"Step#:{} sum reward:{} best_sum_reward:{} eps:{}\".format(\n # i, total_reward, best_reward, agents[0].epsilon)\n # )\n epsilon = central_agent.epsilon\n # Return the trained policy\n return framework, central_agent, agents, actions_tuples\n\n\ndef print_stuff(actions, env: CompleteEnvironment10dB):\n actions = [f'{i:.2f}' for i in actions]\n sinr_d2ds = [f'{d[0].sinr:.2f}' for d in env.d2d_pairs]\n print(f'MUE Tx Power [dBW]: {env.mue.tx_power:.2f}')\n print(f'D2D Power levels [dBW]: {actions}')\n print(f'D2D SINR [dB]: {sinr_d2ds}')\n print(f'D2D Spectral Efficiencies: {env.d2d_spectral_eff}')\n\n\ndef test(\n agents: List[ExternalDQNAgent],\n):\n global actions\n n_agents = len(agents)\n actions_tuples = \\\n list(product(range(len(actions)), repeat=n_agents))\n mue_spectral_effs = []\n d2d_spectral_effs = []\n rewards_bag = []\n test_env = deepcopy(ref_env)\n test_env.build_scenario(agents)\n total_reward = 0.0\n i = 0\n while True:\n action_tuple = random.choice(actions_tuples)\n for j, agent in enumerate(agents):\n agent.set_action(action_tuple[j], actions[action_tuple[j]])\n _, rewards = test_env.step(agents)\n total_reward = sum(rewards)\n # saving stuff\n rewards_bag.append(total_reward)\n mue_spectral_effs.append(test_env.mue_spectral_eff.item())\n d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())\n i += 1\n if i >= TEST_STEPS_PER_EPISODE:\n break\n mue_success_rate = np.mean(\n np.array(mue_spectral_effs) > np.log2(\n 1 + db_to_power(sinr_threshold_train)\n )\n )\n # jain_index_avg = list()\n # for i, j in enumerate(jain_index):\n # jain_index_avg.append(np.average(j))\n # save data\n return mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards\n\n\ndef run():\n mue_sucess_rate_total = []\n mue_spectral_effs_total = []\n d2d_spectral_effs_total = []\n rewards_total = []\n start = time()\n for n in range(1, MAX_NUMBER_OF_AGENTS+1, 1):\n mue_suc_rates = []\n mue_speff_rates = []\n d2d_speff_rates = []\n rews = []\n for it in range(ITERATIONS_PER_NUM_AGENTS):\n now = (time() - start) / 60\n print(\n f'Number of agents: {n}/{MAX_NUMBER_OF_AGENTS}. ' +\n f'Iteration: {it}/{ITERATIONS_PER_NUM_AGENTS-1}. ' +\n f'Elapsed time: {now} minutes.'\n )\n agents = [ExternalDQNAgent(agent_params, actions)\n for _ in range(n)]\n mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards = \\\n test(agents)\n mue_suc_rates.append(mue_success_rate)\n mue_speff_rates.append(mue_spectral_effs)\n d2d_speff_rates.append(d2d_spectral_effs)\n rews.append(rewards)\n mue_sucess_rate_total.append(mue_suc_rates)\n mue_spectral_effs_total.append(mue_speff_rates)\n d2d_spectral_effs_total.append(d2d_speff_rates)\n rewards_total.append(rews)\n # save stuff\n now = (time() - start) / 60\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n dir_path = f'data/benchmarks/{filename}'\n data_path = make_dir_timestamp(dir_path)\n data_file_path = f'{data_path}/log.pickle'\n data = {\n 'mue_success_rate': mue_sucess_rate_total,\n 'd2d_speffs': d2d_spectral_effs_total,\n 'mue_speffs': mue_spectral_effs_total,\n 'rewards': rewards_total,\n 'mue_sinr_threshold': sinr_threshold_train,\n 'elapsed_time': now\n }\n save_with_pickle(data, data_file_path)\n copyfile(__file__, f'{data_path}/{filename}.py')\n print(f'done. Elapsed time: {now} minutes.')\n\n\nif __name__ == '__main__':\n run()\n",
"# Similar to script .\n# Uses CompleteEnvironment10dB\n# Centralized Learning-Distributed Execution\n# Simulates many times, for different number of agents, and take the averages.\n# There are different channels to the BS and to the devices.\n# Multiple episodes convergence. Everything is in dB.\n# One NN is trained and copied to each agent.\nfrom shutil import copyfile\nfrom sys_simulator.general import make_dir_timestamp, save_with_pickle\nimport matplotlib.pyplot as plt\nfrom sys_simulator.plots import plot_positions_actions_pie\nfrom time import time\nfrom sys_simulator.general import db_to_power, power_to_db\nfrom sys_simulator.channels import BANChannel, UrbanMacroNLOSWinnerChannel\nfrom sys_simulator import general as gen\nfrom sys_simulator.q_learning.environments.completeEnvironment10dB \\\n import CompleteEnvironment10dB\nfrom sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent\nfrom sys_simulator.dqn.externalDQNFramework \\\n import ExternalDQNFramework, RainbowFramework\nfrom sys_simulator.parameters.parameters import \\\n EnvironmentParameters, TrainingParameters, DQNAgentParameters\nfrom sys_simulator.q_learning.rewards import dis_reward_tensor_db\nfrom copy import deepcopy\nimport torch\nimport numpy as np\nimport pickle\n\n\nn_mues = 1 # number of mues\nn_d2d = 2 # number of d2d pairs\nn_rb = n_mues # number of RBs\ncarrier_frequency = 2.4 # carrier frequency in GHz\nbs_radius = 500 # bs radius in m\nrb_bandwidth = 180*1e3 # rb bandwidth in Hz\nd2d_pair_distance = 50 # d2d pair distance in m\ndevice_height = 1.5 # mobile devices height in m\nbs_height = 25 # BS antenna height in m\np_max = 40 # max tx power in dBm\nnoise_power = -116 # noise power per RB in dBm\nbs_gain = 17 # macro bs antenna gain in dBi\nuser_gain = 4 # user antenna gain in dBi\nsinr_threshold_train = 6 # mue sinr threshold in dB for training\nmue_margin = 200 # mue margin in dB\n# conversions from dBm to dB\np_max = p_max - 30\nnoise_power = noise_power - 30\n# channel parameters\nCHANNEL_RND = True\n# q-learning parameters\n# training\nNUMBER = 1\nREWARD_FUNCTION = 'classic'\n# exec params\nSTEPS_PER_EPISODE = 25\nTEST_STEPS_PER_EPISODE = 25\nMAX_NUM_EPISODES = 1000 # medium training\nITERATIONS_PER_NUM_AGENTS = 100\nEVAL_EVERY = 150\nEVAL_NUM_EPISODES = 100\nEVAL_STEPS_PER_EPISODE = 5\n# debug params\n# STEPS_PER_EPISODE = 2\n# TEST_STEPS_PER_EPISODE = 2\n# MAX_NUM_EPISODES = 10\n# ITERATIONS_PER_NUM_AGENTS = 10\n# EVAL_EVERY = 1000\n# EVAL_NUM_EPISODES = 2\n# EVAL_STEPS_PER_EPISODE = 2\n# common\nEPSILON_INITIAL = 1\nEPSILON_MIN = .05\n# EPSILON_DECAY = .9*1e-4 # medium training\nEPSILON_DECAY = 1.3/(MAX_NUM_EPISODES*STEPS_PER_EPISODE) # medium training\nPRIO_BETA_ITS = int(.8*MAX_NUM_EPISODES*STEPS_PER_EPISODE)\nGAMMA = 0.9 # Discount factor\nC = 8 # C constant for the improved reward function\nTARGET_UPDATE = 20\nREPLAY_MEMORY_SIZE = 100000\nBATCH_SIZE = 512\nHIDDEN_SIZE = 128\nNUM_HIDDEN_LAYERS = 1\nLEARNING_RATE = 1e-2\nREWARD_PENALTY = 1.5\nENVIRONMENT_MEMORY = 10\nMAX_NUMBER_OF_AGENTS = 5\nmax_d2d = MAX_NUMBER_OF_AGENTS\nrange_n_d2d = range(1, max_d2d + 1, 1)\n# more parameters\n# linear discretization\n# actions = power_to_db(np.linspace(\n# db_to_power(p_max-20), db_to_power(p_max-10), 10\n# ))\n# db discretization\nactions = power_to_db(\n np.linspace(\n 1e-6, db_to_power(p_max-10), 10\n )\n)\nenv_params = EnvironmentParameters(\n rb_bandwidth, d2d_pair_distance, p_max, noise_power,\n bs_gain, user_gain, sinr_threshold_train,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin\n)\nparams = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)\nagent_params = DQNAgentParameters(\n EPSILON_MIN, EPSILON_DECAY, EPSILON_INITIAL, REPLAY_MEMORY_SIZE,\n BATCH_SIZE, GAMMA\n)\nreward_function = dis_reward_tensor_db\nchannel_to_devices = BANChannel(rnd=CHANNEL_RND)\nchannel_to_bs = UrbanMacroNLOSWinnerChannel(\n rnd=CHANNEL_RND, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height\n)\nref_env = CompleteEnvironment10dB(\n env_params,\n channel_to_bs,\n channel_to_devices,\n reward_penalty=REWARD_PENALTY,\n memory=ENVIRONMENT_MEMORY,\n bs_height=bs_height,\n reward_function=REWARD_FUNCTION\n)\n# foo env and foo agents stuff\nfoo_env = deepcopy(ref_env)\nfoo_agents = [ExternalDQNAgent(agent_params, [1]) for _ in range(4)]\nfoo_env.build_scenario(foo_agents)\n_, _ = foo_env.step(foo_agents)\nenv_state_size = foo_env.get_state_size(foo_agents[0])\n\n\ndef train(start):\n global actions\n framework = RainbowFramework(\n agent_params,\n env_state_size,\n len(actions),\n HIDDEN_SIZE,\n PRIO_BETA_ITS,\n NUM_HIDDEN_LAYERS,\n LEARNING_RATE,\n )\n best_reward = float('-inf')\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n mue_spectral_eff_bag = list()\n d2d_spectral_eff_bag = list()\n rewards_bag = list()\n # aux_range = range(max_d2d+1)[1:]\n epsilon = agent_params.start_epsilon\n for episode in range(MAX_NUM_EPISODES):\n env = deepcopy(ref_env)\n n_agents = np.random.choice(range_n_d2d)\n now = (time() - start) / 60\n print(\n 'Training. ' +\n f'Number of agents: {n_agents}. ' +\n f'Episode: {episode}/{MAX_NUM_EPISODES-1}. ' +\n f'Epsilon: {epsilon}. ' +\n f'Prio_Beta: {framework.replay_memory._beta}. ' +\n f'Elapsed time: {now} minutes.'\n )\n agents = [ExternalDQNAgent(agent_params, actions)\n for _ in range(n_agents)] # 1 agent per d2d tx\n for a in agents:\n a.set_epsilon(epsilon)\n env.build_scenario(agents)\n obs, _ = env.step(agents)\n total_reward = 0.0\n i = 0\n bag = list()\n while True:\n if i >= params.steps_per_episode:\n break\n else:\n past_actions = torch.zeros([len(agents)], device=device)\n for j, agent in enumerate(agents):\n agent.get_action(framework, obs[j].float())\n past_actions[j] = agent.action_index\n # # debugging\n # if len(agents) == 2:\n # print('debugging')\n # aux1 = agents[0].action_index == 9\n # aux2 = agents[1].action_index == 5\n # aux = [aux1, aux2]\n # if np.mean(aux) == 1:\n # print('debugging')\n next_obs, rewards = env.step(agents)\n i += 1\n for j, agent in enumerate(agents):\n framework.replay_memory.push(\n obs[j].cpu(), past_actions[j].cpu(),\n rewards[j], next_obs[j].cpu(), 0\n )\n framework.learn()\n total_reward = np.sum(rewards)\n bag.append(total_reward.item())\n obs = next_obs\n if i % TARGET_UPDATE == 0:\n framework.target_net.load_state_dict(\n framework.policy_net.state_dict()\n )\n if total_reward > best_reward:\n best_reward = total_reward\n epsilon = agents[0].epsilon\n if episode % EVAL_EVERY == 0:\n r, d_speff, m_speff = in_training_test(framework)\n rewards_bag.append(r)\n # average d2d spectral eff\n d2d_spectral_eff_bag.append(d_speff)\n # mue spectral eff\n mue_spectral_eff_bag.append(m_speff)\n # save stuff\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n data_path = f'models/dql/{filename}.pt'\n torch.save(framework.policy_net.state_dict(), data_path)\n # Return the trained policy\n return framework, rewards_bag, d2d_spectral_eff_bag, mue_spectral_eff_bag, epsilon # noqa\n\n\ndef test(n_agents, test_env, framework):\n framework.policy_net.eval()\n mue_spectral_effs = []\n d2d_spectral_effs = []\n rewards_bag = []\n # jain_index = [list() for _ in range(max_d2d+1)]\n bag = list()\n agents = [ExternalDQNAgent(agent_params, actions)\n for i in range(n_agents)] # 1 agent per d2d tx\n test_env.build_scenario(agents)\n obs, _ = test_env.step(agents)\n total_reward = 0.0\n i = 0\n while True:\n actions_index = list()\n for j, agent in enumerate(agents):\n aux = agent.act(framework, obs[j].float()).max(1)\n agent.set_action(aux[1].long(),\n agent.actions[aux[1].item()])\n bag.append(aux[1].item())\n actions_index.append(aux[1].item())\n next_obs, rewards = test_env.step(agents)\n obs = next_obs\n total_reward = sum(rewards)\n # saving stuff\n rewards_bag.append(total_reward)\n mue_spectral_effs.append(test_env.mue_spectral_eff.item())\n d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())\n i += 1\n if i >= TEST_STEPS_PER_EPISODE:\n break\n mue_success_rate = np.mean(\n np.array(mue_spectral_effs) > np.log2(\n 1 + db_to_power(sinr_threshold_train)\n )\n )\n # jain_index_avg = list()\n # for i, j in enumerate(jain_index):\n # jain_index_avg.append(np.average(j))\n # save data\n return mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards\n\n\ndef in_training_test(framework: ExternalDQNFramework):\n mue_spectral_eff_bag = list()\n d2d_spectral_eff_bag = list()\n rewards_bag = list()\n for _ in range(EVAL_NUM_EPISODES):\n env = deepcopy(ref_env)\n n_agents = np.random.choice(range_n_d2d)\n agents = [ExternalDQNAgent(agent_params, actions)\n for _ in range(n_agents)] # 1 agent per d2d tx\n env.build_scenario(agents)\n obs, _ = env.step(agents)\n for _ in range(EVAL_STEPS_PER_EPISODE):\n for j, agent in enumerate(agents):\n aux = agent.act(framework, obs[j].float()).max(1)\n agent.set_action(aux[1].long(),\n agent.actions[aux[1].item()])\n next_obs, _ = env.step(agents)\n obs = next_obs\n # mue spectral eff\n mue_spectral_eff_bag.append(env.mue_spectral_eff)\n # average d2d spectral eff\n d2d_spectral_eff_bag.append(env.d2d_spectral_eff)\n rewards_bag.append(env.reward)\n mean_mue_speff = np.mean(mue_spectral_eff_bag)\n mean_d2d_speff = np.mean(d2d_spectral_eff_bag)\n mean_reward = np.mean(rewards_bag)\n return mean_reward, mean_d2d_speff, mean_mue_speff\n\n\ndef run(framework=None):\n mue_sucess_rate_total = []\n mue_spectral_effs_total = []\n d2d_spectral_effs_total = []\n rewards_total = []\n start = time()\n r, d_speffs, m_speffs, epsilon = 0, 0, 0, 1\n if framework is None:\n framework, r, d_speffs, m_speffs, epsilon = train(start)\n for n in range(1, MAX_NUMBER_OF_AGENTS+1, 1):\n mue_suc_rates = []\n mue_speff_rates = []\n d2d_speff_rates = []\n rews = []\n for it in range(ITERATIONS_PER_NUM_AGENTS):\n now = (time() - start) / 60\n print(\n 'Testing. ' +\n f'Number of agents: {n}/{MAX_NUMBER_OF_AGENTS}. ' +\n f'Iteration: {it}/{ITERATIONS_PER_NUM_AGENTS-1}. ' +\n f'Elapsed time: {now} minutes.'\n )\n test_env = deepcopy(ref_env)\n mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards = \\\n test(n, test_env, framework)\n mue_suc_rates.append(mue_success_rate)\n mue_speff_rates.append(mue_spectral_effs)\n d2d_speff_rates.append(d2d_spectral_effs)\n rews.append(rewards)\n mue_sucess_rate_total.append(mue_suc_rates)\n mue_spectral_effs_total.append(mue_speff_rates)\n d2d_spectral_effs_total.append(d2d_speff_rates)\n rewards_total.append(rews)\n # save stuff\n now = (time() - start) / 60\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n dir_path = f'data/rainbow/{filename}'\n data_path = make_dir_timestamp(dir_path)\n data_file_path = f'{data_path}/log.pickle'\n data = {\n 'mue_success_rate': mue_sucess_rate_total,\n 'd2d_speffs': d2d_spectral_effs_total,\n 'mue_speffs': mue_spectral_effs_total,\n 'rewards': rewards_total,\n 'mue_sinr_threshold': sinr_threshold_train,\n 'elapsed_time': now,\n 'training_rewards': r,\n 'training_d2d_speffs': d_speffs,\n 'training_mue_speffs': m_speffs,\n 'eval_every': EVAL_EVERY,\n 'final_epsilon': epsilon,\n }\n save_with_pickle(data, data_file_path)\n copyfile(__file__, f'{data_path}/{filename}.py')\n print(f'done. Elapsed time: {now} minutes.')\n\n\ndef run_test():\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n data_path = f'models/dql/{filename}.pt'\n framework = torch.load(data_path)\n run(framework)\n\n\ndef test_exec():\n # environment\n test_env = deepcopy(ref_env)\n # load framework\n framework = ExternalDQNFramework(\n agent_params,\n env_state_size,\n len(actions),\n HIDDEN_SIZE,\n NUM_HIDDEN_LAYERS,\n LEARNING_RATE\n )\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n data_path = f'models/dql/{filename}.pt'\n state_dict = torch.load(data_path)\n framework.policy_net.load_state_dict(state_dict)\n framework.policy_net.eval()\n # simulation stuff\n mue_spectral_effs = []\n d2d_spectral_effs = []\n rewards_bag = []\n # devices positions\n pairs_positions = [\n ((-400, 0, device_height), (-450, 0, device_height)),\n ((100, 0, device_height), (150, 0, device_height)),\n ((225, 225, device_height), (275, 225, device_height)),\n ((55, -55, device_height), (55, -5, device_height)),\n ]\n mue_position = (0, 200, device_height)\n # jain_index = [list() for _ in range(max_d2d+1)]\n n_agents = len(pairs_positions)\n bag = list()\n agents = [ExternalDQNAgent(agent_params, actions)\n for i in range(n_agents)] # 1 agent per d2d tx\n test_env.set_scenario(pairs_positions, mue_position, agents)\n obs, _ = test_env.step(agents)\n total_reward = 0.0\n i = 0\n while True:\n actions_index = list()\n for j, agent in enumerate(agents):\n aux = agent.act(framework, obs[j].float()).max(1)\n agent.set_action(aux[1].long(),\n agent.actions[aux[1].item()])\n bag.append(aux[1].item())\n actions_index.append(aux[1].item())\n next_obs, rewards = test_env.step(agents)\n obs = next_obs\n total_reward = sum(rewards)\n # saving stuff\n rewards_bag.append(total_reward)\n mue_spectral_effs.append(test_env.mue_spectral_eff.item())\n d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())\n i += 1\n if i >= TEST_STEPS_PER_EPISODE:\n break\n d2d_txs, d2d_rxs = zip(*test_env.d2d_pairs)\n # D2D interference on the MUE, in dB\n d2d_interferences = np.array([\n d.caused_mue_interference for d in d2d_txs\n ])\n d2d_interferences_mag = db_to_power(d2d_interferences)\n d2d_total_interference = np.sum(d2d_interferences_mag)\n percentage_interferences = d2d_interferences_mag / d2d_total_interference\n interferences, tx_labels, rx_labels = calculate_interferences(test_env)\n if d2d_total_interference != 0:\n plot_positions_actions_pie(\n test_env.bs, test_env.mue, d2d_txs, d2d_rxs,\n actions_index, percentage_interferences,\n test_env.mue.sinr > sinr_threshold_train, sinr_threshold_train,\n test_env.reward, interferences, tx_labels, rx_labels\n )\n # jain_index[n_agents].append(gen.jain_index(test_env.sinr_d2ds))\n mue_success_rate = np.mean(\n np.array(mue_spectral_effs) > np.log2(\n 1 + db_to_power(sinr_threshold_train)\n )\n )\n # jain_index_avg = list()\n # for i, j in enumerate(jain_index):\n # jain_index_avg.append(np.average(j))\n # save data\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n data_path = f'data/rainbow/{filename}_exec.pickle'\n data = {\n 'd2d_speffs_avg_total': d2d_spectral_effs,\n 'mue_success_rate': mue_success_rate,\n 'chosen_actions': bag,\n 'd2d_speffs': d2d_spectral_effs,\n 'mue_speffs': mue_spectral_effs,\n 'rewards': rewards_bag,\n 'mue_sinr_threshold': sinr_threshold_train,\n }\n with open(data_path, 'wb') as file:\n pickle.dump(data, file)\n # plot\n print_stuff(actions, test_env)\n plt.show()\n\n\ndef calculate_interferences(env: CompleteEnvironment10dB):\n bs = env.bs\n mue = env.mue\n d2d_pairs = env.d2d_pairs\n txs = [mue]\n txs += [p[0] for p in d2d_pairs]\n rxs = [bs]\n rxs += [p[1] for p in d2d_pairs]\n interferences = np.zeros((len(txs), len(rxs)))\n for i, tx in enumerate(txs):\n for j, (rx, interfered) in enumerate(zip(rxs, txs)):\n if tx == interfered:\n interf = tx.power_at_receiver\n elif tx == mue:\n interf = interfered.received_mue_interference\n elif rx == bs:\n interf = tx.caused_mue_interference\n else:\n interf = [\n power_to_db(i[1]) for i in interfered.interferences\n if i[0] == tx.id\n ][0]\n interferences[i][j] = interf\n tx_labels = [d.id for d in txs]\n rx_labels = [d.id for d in rxs]\n return interferences, tx_labels, rx_labels\n\n\ndef print_stuff(actions, env: CompleteEnvironment10dB):\n actions = [f'{i:.2f}' for i in actions]\n sinr_d2ds = [f'{d[0].sinr:.2f}' for d in env.d2d_pairs]\n print(f'MUE Tx Power [dBW]: {env.mue.tx_power:.2f}')\n print(f'D2D Power levels [dBW]: {actions}')\n print(f'D2D SINR [dB]: {sinr_d2ds}')\n print(f'D2D Spectral Efficiencies: {env.d2d_spectral_eff}')\n\n\nif __name__ == '__main__':\n run()\n",
"# Similar to script5. The same environment\n# is passed from training to testing, after being reset.\n# Distributed Learning-Distributed execution\n# Uses CompleteEnvironment10dB.\n# Simulates many times, for different number of agents, and take the averages.\n# There are different channels to the BS and to the devices.\n# Single episode convergence. Everything is in dB. One NN for each agent.\nfrom shutil import copyfile\nfrom sys_simulator.a2c.framework import DiscreteFramework\nfrom sys_simulator.a2c.agent import Agent\nfrom sys_simulator.a2c import ActorCriticDiscrete\nfrom time import time\nfrom typing import List\nfrom sys_simulator.general import db_to_power, make_dir_timestamp, power_to_db, save_with_pickle\nfrom sys_simulator.channels import BANChannel, UrbanMacroLOSWinnerChannel\nfrom sys_simulator import general as gen\nfrom sys_simulator.q_learning.environments.completeEnvironment10dB \\\n import CompleteEnvironment10dB\nfrom sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent\nfrom sys_simulator.parameters.parameters import \\\n EnvironmentParameters, TrainingParameters, DQNAgentParameters\nfrom sys_simulator.q_learning.rewards import dis_reward_tensor_db\nfrom copy import deepcopy\nimport torch\nimport numpy as np\n\n\nn_mues = 1 # number of mues\nn_d2d = 2 # number of d2d pairs\nn_rb = n_mues # number of RBs\ncarrier_frequency = 2.4 # carrier frequency in GHz\nbs_radius = 500 # bs radius in m\nrb_bandwidth = 180*1e3 # rb bandwidth in Hz\nd2d_pair_distance = 50 # d2d pair distance in m\ndevice_height = 1.5 # mobile devices height in m\nbs_height = 25 # BS antenna height in m\np_max = 40 # max tx power in dBm\nnoise_power = -116 # noise power per RB in dBm\nbs_gain = 17 # macro bs antenna gain in dBi\nuser_gain = 4 # user antenna gain in dBi\nsinr_threshold_train = 6 # mue sinr threshold in dB for training\nmue_margin = 200 # mue margin in dB\n# conversions from dBm to dB\np_max = p_max - 30\nnoise_power = noise_power - 30\n# channel parameters\nCHANNEL_RND = True\n# q-learning parameters\n# training\nNUMBER = 1\n# run params\nSTEPS_PER_EPISODE = 10\nTEST_STEPS_PER_EPISODE = 100\nMAX_NUM_EPISODES = 50\n# debugging params\n# STEPS_PER_EPISODE = 10\n# TEST_STEPS_PER_EPISODE = 10\n# MAX_NUM_EPISODES = 2\n# common\nEPSILON_INITIAL = 1\nEPSILON_MIN = .05\nEPSILON_DECAY = 1.3 / STEPS_PER_EPISODE # fast training\nGAMMA = 0.5 # Discount factor\nC = 8 # C constant for the improved reward function\nTARGET_UPDATE = 10\nREPLAY_MEMORY_SIZE = 10000\nBATCH_SIZE = 64\nHIDDEN_SIZE = 128\nNUM_HIDDEN_LAYERS = 1\nLEARNING_RATE = 1e-2\nBETA = 1e-2\nREWARD_PENALTY = 1.5\nENVIRONMENT_MEMORY = 2\nMAX_NUMBER_OF_AGENTS = 5\nITERATIONS_PER_NUM_AGENTS = 50\n# ITERATIONS_PER_NUM_AGENTS = 10\nmax_d2d = MAX_NUMBER_OF_AGENTS\n# more parameters\n# linear discretization\n# actions = power_to_db(np.linspace(\n# db_to_power(p_max-20), db_to_power(p_max-10), 10\n# ))\n# db discretization\nactions = power_to_db(\n np.linspace(\n 1e-6, db_to_power(p_max-10), 10\n )\n)\nenv_params = EnvironmentParameters(\n rb_bandwidth, d2d_pair_distance, p_max, noise_power,\n bs_gain, user_gain, sinr_threshold_train,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin\n)\nparams = TrainingParameters(1, STEPS_PER_EPISODE)\nagent_params = DQNAgentParameters(\n EPSILON_MIN, EPSILON_DECAY, EPSILON_INITIAL, REPLAY_MEMORY_SIZE,\n BATCH_SIZE, GAMMA\n)\nreward_function = dis_reward_tensor_db\nchannel_to_devices = BANChannel(rnd=CHANNEL_RND)\nchannel_to_bs = UrbanMacroLOSWinnerChannel(\n rnd=CHANNEL_RND, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height\n)\nref_env = CompleteEnvironment10dB(\n env_params,\n channel_to_bs,\n channel_to_devices,\n reward_penalty=REWARD_PENALTY,\n memory=ENVIRONMENT_MEMORY,\n bs_height=bs_height,\n reward_function='classic'\n)\n# foo env and foo agents stuff\nfoo_env = deepcopy(ref_env)\nfoo_agents = [ExternalDQNAgent(agent_params, [1]) for _ in range(4)]\nfoo_env.build_scenario(foo_agents)\n_, _ = foo_env.step(foo_agents)\nenv_state_size = foo_env.get_state_size(foo_agents[0])\n\n\ndef calculate_interferences(env: CompleteEnvironment10dB):\n bs = env.bs\n mue = env.mue\n d2d_pairs = env.d2d_pairs\n txs = [mue]\n txs += [p[0] for p in d2d_pairs]\n rxs = [bs]\n rxs += [p[1] for p in d2d_pairs]\n interferences = np.zeros((len(txs), len(rxs)))\n for i, tx in enumerate(txs):\n for j, (rx, interfered) in enumerate(zip(rxs, txs)):\n if tx == interfered:\n interf = tx.power_at_receiver\n elif tx == mue:\n interf = interfered.received_mue_interference\n elif rx == bs:\n interf = tx.caused_mue_interference\n else:\n interf = [\n i[1] for i in interfered.interferences\n if i[0] == tx.id\n ][0]\n interferences[i][j] = interf\n tx_labels = [d.id for d in txs]\n rx_labels = [d.id for d in rxs]\n return interferences, tx_labels, rx_labels\n\n\ndef train(n_agents, env):\n global actions\n frameworks = [\n DiscreteFramework(\n env_state_size,\n len(actions),\n HIDDEN_SIZE,\n NUM_HIDDEN_LAYERS,\n 1,\n STEPS_PER_EPISODE,\n LEARNING_RATE,\n BETA,\n GAMMA\n )\n for _ in range(n_agents)\n ]\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n agents = [Agent() for _ in range(n_agents)]\n env.build_scenario(agents)\n for _ in range(MAX_NUM_EPISODES):\n obs, _ = env.step(agents)\n best_reward = 0\n for i in range(STEPS_PER_EPISODE):\n for j, (agent, f) in enumerate(zip(agents, frameworks)):\n action_index, dist, value = agent.act_discrete(f, obs[j])\n agent.set_action(actions[action_index.item()])\n log_prob = dist.log_prob(action_index)\n f.log_probs[0][i] = log_prob\n f.values[0][i] = value\n f.entropy[0][i] = dist.entropy()\n # perform an environment step\n next_obs_t, rewards_t = env.step(agents)\n for r, f in zip(rewards_t, frameworks):\n f.rewards[0][i] = r\n total_reward = np.sum(rewards_t)\n best_reward = \\\n total_reward if total_reward > best_reward else best_reward\n obs = next_obs_t\n # gae and returns\n next_obs_t = torch.cat(obs, 0).to(device)\n for j, (agent, f) in enumerate(zip(agents, frameworks)):\n _, _, next_value_t = agent.act_discrete(f, next_obs_t[j])\n f.values[0][STEPS_PER_EPISODE] = next_value_t\n f.learn()\n # Return the trained policy\n return frameworks, agents\n\n\ndef print_stuff(actions, env: CompleteEnvironment10dB):\n actions = [f'{i:.2f}' for i in actions]\n sinr_d2ds = [f'{d[0].sinr:.2f}' for d in env.d2d_pairs]\n print(f'MUE Tx Power [dBW]: {env.mue.tx_power:.2f}')\n print(f'D2D Power levels [dBW]: {actions}')\n print(f'D2D SINR [dB]: {sinr_d2ds}')\n print(f'D2D Spectral Efficiencies: {env.d2d_spectral_eff}')\n\n\ndef test(\n test_env: CompleteEnvironment10dB,\n frameworks: List[ActorCriticDiscrete],\n agents: List[Agent]\n):\n for f in frameworks:\n f.a2c.eval()\n mue_spectral_effs = []\n d2d_spectral_effs = []\n rewards_bag = []\n # jain_index = [list() for _ in range(max_d2d+1)]\n bag = list() # 1 agent per d2d tx\n test_env.reset_before_build_set()\n obs, _ = test_env.step(agents)\n total_reward = 0.0\n i = 0\n while True:\n for j, (agent, framework) in enumerate(zip(agents, frameworks)):\n action_index, _, _ = agent.act_discrete(framework, obs[j])\n agent.set_action(actions[action_index.item()])\n bag.append(action_index.item())\n next_obs, rewards = test_env.step(agents)\n obs = next_obs\n total_reward = sum(rewards)\n # saving stuff\n rewards_bag.append(total_reward)\n mue_spectral_effs.append(test_env.mue_spectral_eff.item())\n d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())\n i += 1\n if i >= TEST_STEPS_PER_EPISODE:\n break\n mue_success_rate = np.mean(\n np.array(mue_spectral_effs) > np.log2(\n 1 + db_to_power(sinr_threshold_train)\n )\n )\n return mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards\n\n\ndef run():\n mue_sucess_rate_total = []\n mue_spectral_effs_total = []\n d2d_spectral_effs_total = []\n rewards_total = []\n start = time()\n for n in range(1, MAX_NUMBER_OF_AGENTS+1, 1):\n mue_suc_rates = []\n mue_speff_rates = []\n d2d_speff_rates = []\n rews = []\n for it in range(ITERATIONS_PER_NUM_AGENTS):\n now = (time() - start) / 60\n print(\n f'Number of agents: {n}/{MAX_NUMBER_OF_AGENTS}. ' +\n f'Iteration: {it}/{ITERATIONS_PER_NUM_AGENTS-1}. ' +\n f'Elapsed time: {now} minutes.'\n )\n env = deepcopy(ref_env)\n frameworks, agents = train(n, env)\n mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards = \\\n test(env, frameworks, agents)\n mue_suc_rates.append(mue_success_rate)\n mue_speff_rates.append(mue_spectral_effs)\n d2d_speff_rates.append(d2d_spectral_effs)\n rews.append(rewards)\n mue_sucess_rate_total.append(mue_suc_rates)\n mue_spectral_effs_total.append(mue_speff_rates)\n d2d_spectral_effs_total.append(d2d_speff_rates)\n rewards_total.append(rews)\n # save stuff\n now = (time() - start) / 60\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n dir_path = f'data/a2c/{filename}'\n data_path = make_dir_timestamp(dir_path)\n data_file_path = f'{data_path}/log.pickle'\n data = {\n 'mue_success_rate': mue_sucess_rate_total,\n 'd2d_speffs': d2d_spectral_effs_total,\n 'mue_speffs': mue_spectral_effs_total,\n 'rewards': rewards_total,\n 'mue_sinr_threshold': sinr_threshold_train,\n 'elapsed_time': now\n }\n save_with_pickle(data, data_file_path)\n copyfile(__file__, f'{data_path}/{filename}.py')\n print(f'done. Elapsed time: {now} minutes.')\n\n\nif __name__ == '__main__':\n run()\n",
"from sys_simulator.channels import BANChannel, UrbanMacroNLOSWinnerChannel\nimport numpy as np\n\n\ndef test_ban_channel():\n channel1 = BANChannel(env='office', rnd=False)\n channel2 = BANChannel(env='ferry', rnd=False)\n d = 10\n loss1 = channel1.step(d)\n loss2 = channel2.step(d)\n loss1_mag = 10**(loss1/10)\n loss2_mag = 10**(loss2/10)\n assert (loss1, loss1_mag) == (49.1, 81283.05161640995)\n assert (loss2, loss2_mag) == (42.099999999999994, 16218.100973589266)\n\n\ndef test_winner_channel():\n carrier_frequency = 2.4 # carrier frequency in GHz\n bs_height = 25 # BS antenna height in m\n device_height = 1.5 # mobile devices height in m\n channel_to_bs = UrbanMacroNLOSWinnerChannel(\n rnd=True, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height\n )\n # np.random.seed(42)\n x1 = channel_to_bs.step(500)\n np.random.seed(42)\n x2 = channel_to_bs.step(500)\n x3 = channel_to_bs.step(500)\n print(x1)\n print(x2)\n print(x3)\n\n\nif __name__ == '__main__':\n np.random.seed(42)\n test_winner_channel()\n",
"# Same as scratch5, but using the trained external policy, from script14\n\nimport sys\nimport os\n\nlucas_path = os.environ['LUCAS_PATH']\nsys.path.insert(1, lucas_path)\n\nfrom general import general as gen\nfrom devices.devices import node, base_station, mobile_user, d2d_user, d2d_node_type\nfrom pathloss import pathloss\nfrom plots.plots import plot_positions, plot_spectral_effs\nfrom q_learning.environments.completeEnvironment import CompleteEnvironment\nfrom dqn.agents.dqnAgent import DQNAgent\nfrom dqn.dqn import DQN\nfrom q_learning.q_table import DistributedQTable\nfrom q_learning import rewards\nfrom parameters.parameters import EnvironmentParameters, TrainingParameters, DQNAgentParameters, LearningParameters\nfrom typing import List\nfrom matplotlib import pyplot as plt\n\nimport torch\nimport math\nimport numpy as np\nimport os\n\ndef test(agents: List[DQNAgent], env: CompleteEnvironment, num_episodes: int, episode_steps: int):\n mue_spectral_effs = list()\n d2d_spectral_effs = list() \n device = torch.device('cuda')\n done = False\n bag = list()\n for _ in range(num_episodes):\n env.build_scenario(agents)\n done = False\n obs = [env.get_state(a) for a in agents] \n total_reward = 0.0\n i = 0\n while not done: \n for j, agent in enumerate(agents):\n aux = agent.act(obs[j]).max(1)\n agent.set_action(aux[1].long(), agent.actions[aux[1]])\n bag.append(aux[1].item())\n next_obs, rewards, done = env.step(agents)\n obs = next_obs\n total_reward += sum(rewards)\n i +=1\n if i >= episode_steps:\n break\n mue_spectral_effs.append(env.mue_spectral_eff)\n d2d_spectral_effs.append(env.d2d_spectral_eff)\n return total_reward, mue_spectral_effs, d2d_spectral_effs, bag\n\nn_mues = 1 # number of mues\nn_d2d = 2 # number of d2d pairs\nn_rb = n_mues # number of RBs\nbs_radius = 500 # bs radius in m\n\nrb_bandwidth = 180*1e3 # rb bandwidth in Hz\nd2d_pair_distance = 50 # d2d pair distance in m\np_max = 23 # max tx power in dBm\nnoise_power = -116 # noise power per RB in dBm\nbs_gain = 17 # macro bs antenna gain in dBi\nuser_gain = 4 # user antenna gain in dBi\nsinr_threshold_mue = 6 # true mue sinr threshold in dB\nmue_margin = .5e4\n\n\n# conversions from dB to pow\np_max = p_max - 30\np_max = gen.db_to_power(p_max)\nnoise_power = noise_power - 30\nnoise_power = gen.db_to_power(noise_power)\nbs_gain = gen.db_to_power(bs_gain)\nuser_gain = gen.db_to_power(user_gain)\nsinr_threshold_mue = gen.db_to_power(sinr_threshold_mue)\n\n# q-learning parameters\n# MAX_NUM_EPISODES = 2500\n# MAX_NUM_EPISODES = 8000\n# MAX_NUM_EPISODES = int(1.2e4)\n# MAX_NUM_EPISODES = int(6e3)\nSTEPS_PER_EPISODE = 4000\n# STEPS_PER_EPISODE = 200\n# STEPS_PER_EPISODE = 1000\nEPSILON_MIN = 0.01\n# MAX_NUM_STEPS = 50\n# EPSILON_DECAY = 4e-2 * EPSILON_MIN / STEPS_PER_EPISODE\nEPSILON_DECAY = 100 * EPSILON_MIN / STEPS_PER_EPISODE\nMAX_NUM_EPISODES = int(1.2/EPSILON_DECAY)\n# EPSILON_DECAY = 8e-1 * EPSILON_MIN / STEPS_PER_EPISODE\n# EPSILON_DECAY = 2 * EPSILON_MIN / MAX_NUM_STEPS\nALPHA = 0.05 # Learning rate\nGAMMA = 0.98 # Discount factor\nC = 8000 # C constant for the improved reward function\nTARGET_UPDATE = 10\n\n# more parameters\nenv_params = EnvironmentParameters(rb_bandwidth, d2d_pair_distance, p_max, noise_power, bs_gain, user_gain, sinr_threshold_mue,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin)\ntrain_params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)\nagent_params = DQNAgentParameters(EPSILON_MIN, EPSILON_DECAY, 1, 128, GAMMA)\n\nactions = torch.tensor([i*p_max/10/1000 + 1e-9 for i in range(11)])\nreward_function = rewards.dis_reward_tensor\nenvironment = CompleteEnvironment(env_params, reward_function, early_stop=1e-6, tolerance=10)\n\ncwd = os.getcwd()\n\nreward_function = rewards.dis_reward_tensor\n\n# policy 5 test\nt_agents = [DQNAgent(agent_params, actions) for i in range(n_d2d)] # 1 agent per d2d tx\nfor i, a in enumerate(t_agents):\n a.policy_net.load_state_dict(torch.load(f'{cwd}/models/ext_model_dqn_agent.pt'))\ntotal_reward, mue_spectral_effs, d2d_spectral_effs, bag = test(t_agents, environment, 1000, 50)\n\nmue_spectral_effs = torch.tensor(mue_spectral_effs)\nmue_spectral_effs = torch.reshape(mue_spectral_effs, (1, torch.prod(torch.tensor(mue_spectral_effs.shape))))\n\nd2d_spectral_effs = torch.tensor(d2d_spectral_effs)\nd2d_spectral_effs = torch.reshape(d2d_spectral_effs, (1, torch.prod(torch.tensor(d2d_spectral_effs.shape))))\n\nd2d_speffs_avg = torch.sum(d2d_spectral_effs)/d2d_spectral_effs.shape[1]\n\nmue_success_rate = torch.sum(mue_spectral_effs > np.log2(1 + sinr_threshold_mue)).float() / mue_spectral_effs.shape[1]\n\nlog = list()\nlog.append(f'D2D SPECTRAL EFFICIENCY - SCRIPT: {d2d_speffs_avg}')\nlog.append(f'MUE SUCCESS RATE - SCRIPT: {mue_success_rate}')\n\nfilename = gen.path_leaf(__file__)\nfilename = filename.split('.')[0]\nfilename = f'{lucas_path}/logs/{filename}.txt'\nfile = open(filename, 'w')\n\nfor l in log:\n file.write(f'{l}\\n')\nfile.close()\n\nplt.figure(1)\nplt.hist(bag)\n\n# plt.figure(1)\n# plt.plot(list(range(len(d2d_spectral_effs))), d2d_spectral_effs, '.', label='Script')\n# plt.title('D2D spectral efficiencies')\n# plt.legend()\n\n# plt.figure(2)\n# threshold_eff = np.log2(1 + sinr_threshold_mue) * np.ones(len(mue_spectral_effs))\n# plt.plot(list(range(len(mue_spectral_effs))), mue_spectral_effs, '.', label='Script ')\n# plt.plot(list(range(len(mue_spectral_effs))), threshold_eff, label='Threshold') \n\n# plt.title('MUE spectral efficiencies')\n# plt.legend()\n\n\nplt.show()\n\n\n\n\n\n\n\n\n\n",
"# Same as script 15, but there are only 5 actions options, hence the DQN has a smaller output layer.\n\nimport sys\nimport os\n\nlucas_path = os.environ['LUCAS_PATH']\nsys.path.insert(1, lucas_path)\n\nfrom general import general as gen\nfrom devices.devices import node, base_station, mobile_user, d2d_user, d2d_node_type\nfrom pathloss import pathloss\nfrom plots.plots import plot_positions, plot_spectral_effs\nfrom q_learning.environments.completeEnvironment import CompleteEnvironment\nfrom dqn.agents.dqnAgent import ExternalDQNAgent\nfrom dqn.externalDQNFramework import ExternalDQNFramework\nfrom dqn.replayMemory import ReplayMemory\nfrom dqn.dqn import DQN\nfrom q_learning.q_table import DistributedQTable\nfrom q_learning import rewards\nfrom parameters.parameters import EnvironmentParameters, TrainingParameters, DQNAgentParameters, LearningParameters\nfrom typing import List\nfrom matplotlib import pyplot as plt\n\nimport torch\nimport math\nimport numpy as np\nimport os\nimport pickle\n\nn_mues = 1 # number of mues\nn_d2d = 2 # number of d2d pairs\nn_rb = n_mues # number of RBs\nbs_radius = 500 # bs radius in m\n\nrb_bandwidth = 180*1e3 # rb bandwidth in Hz\nd2d_pair_distance = 50 # d2d pair distance in m\np_max = 23 # max tx power in dBm\nnoise_power = -116 # noise power per RB in dBm\nbs_gain = 17 # macro bs antenna gain in dBi\nuser_gain = 4 # user antenna gain in dBi\nsinr_threshold_train = 6 # mue sinr threshold in dB for training\nsinr_threshold_mue = 6 # true mue sinr threshold in dB\nmue_margin = .5e4\n\n# conversions from dB to pow\np_max = p_max - 30\np_max = gen.db_to_power(p_max)\nnoise_power = noise_power - 30\nnoise_power = gen.db_to_power(noise_power)\nbs_gain = gen.db_to_power(bs_gain)\nuser_gain = gen.db_to_power(user_gain)\nsinr_threshold_train = gen.db_to_power(sinr_threshold_train)\n\n# q-learning parameters\nSTEPS_PER_EPISODE = 25\nEPSILON_MIN = 0.05\n# MAX_NUM_STEPS = 50\n# EPSILON_DECAY = 0.4045*1e-4 # super long training\n# EPSILON_DECAY = 0.809*1e-4 # long training\n# EPSILON_DECAY = 0.809*1e-4 # medium training\nEPSILON_DECAY = 3.35*1e-4 # medium training\n# EPSILON_DECAY = 8.09*1e-4 # short training\n# MAX_NUM_EPISODES = 40000 # super long training\n# MAX_NUM_EPISODES = 20000 # long training\nMAX_NUM_EPISODES = 480 # medium training\n# MAX_NUM_EPISODES = 480 # medium training\n# MAX_NUM_EPISODES = 2000 # short training\nALPHA = 0.05 # Learning rate\nGAMMA = 0.98 # Discount factor\n# C = 8000 # C constant for the improved reward function\nC = 80 # C constant for the improved reward function\nTARGET_UPDATE = 10\nMAX_NUMBER_OF_AGENTS = 20\n\n# more parameters\nenv_params = EnvironmentParameters(rb_bandwidth, d2d_pair_distance, p_max, noise_power, bs_gain, user_gain, sinr_threshold_train,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin)\ntrain_params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)\nagent_params = DQNAgentParameters(EPSILON_MIN, EPSILON_DECAY, 1, 512, GAMMA)\n\next_framework = ExternalDQNFramework(agent_params)\n# actions = [i*p_max/10/1000 for i in range(21)] # worst\n# actions = [i*0.80*p_max/10/1000 for i in range(21)] # best histogram\nreward_function = rewards.dis_reward_tensor\n# environment = CompleteEnvironment(env_params, reward_function, early_stop=1e-6, tolerance=10)\nenvironment = CompleteEnvironment(env_params, reward_function)\n\n\n# training function\n# TODO: colocar agente e d2d_device na mesma classe? fazer propriedade d2d_device no agente?\ndef train(framework: ExternalDQNFramework, env: CompleteEnvironment, params: TrainingParameters, agent_params: DQNAgentParameters, max_d2d: int): \n best_reward = float('-inf')\n device = torch.device('cuda')\n mue_spectral_eff_bag = list()\n d2d_spectral_eff_bag = list()\n aux_range = range(max_d2d)[1:]\n epsilon = agent_params.start_epsilon\n for episode in range(params.max_episodes):\n # TODO: atualmente redistribuo os usuarios aleatoriamente a cada episodio. Isto é o melhor há se fazer? \n # Simular deslocamento dos usuários?\n actions = [i*0.82*p_max/5/1000 for i in range(5)] # best result\n n_agents = np.random.choice(aux_range)\n agents = [ExternalDQNAgent(agent_params, actions) for i in range(n_agents)] # 1 agent per d2d tx\n counts = np.zeros(len(agents))\n awaits = list()\n await_steps = [2,3,4]\n for a in agents:\n awaits.append(np.random.choice(await_steps))\n a.set_action(torch.tensor(0).long().cuda(), a.actions[0])\n a.set_epsilon(epsilon)\n\n env.build_scenario(agents)\n done = False\n obs = [env.get_state(a) for a in agents] \n total_reward = 0.0\n i = 0\n bag = list()\n while not done:\n if i >= params.steps_per_episode:\n break\n else:\n actions = torch.zeros([len(agents)], device=device)\n for j, agent in enumerate(agents):\n if counts[j] < awaits[j]:\n counts[j] += 1\n else:\n agent.get_action(framework, obs[j])\n actions[j] = agent.action_index \n counts[j] = 0\n awaits[j] = np.random.choice(await_steps)\n next_obs, rewards, done = env.step(agents) \n i += 1\n for j, agent in enumerate(agents):\n framework.replay_memory.push(obs[j], actions[j], next_obs[j], rewards[j])\n framework.learn()\n obs = next_obs\n total_reward += torch.sum(rewards) \n bag.append(total_reward.item()) \n obs = next_obs\n if episode % TARGET_UPDATE == 0:\n framework.target_net.load_state_dict(framework.policy_net.state_dict())\n if total_reward > best_reward:\n best_reward = total_reward\n print(\"Episode#:{} sum reward:{} best_sum_reward:{} eps:{}\".format(episode,\n total_reward, best_reward, agents[0].epsilon))\n\n # some statistics\n mue_spectral_eff_bag.append(env.mue_spectral_eff) # mue spectral eff\n d2d_spectral_eff_bag.append(env.d2d_spectral_eff/env.params.n_d2d) # average d2d spectral eff \n epsilon = agents[0].epsilon\n\n \n # Return the trained policy\n return mue_spectral_eff_bag, d2d_spectral_eff_bag\n\n \n# SCRIPT EXEC\n# training\nmue_spectral_effs, d2d_spectral_effs = train(ext_framework, environment, train_params, agent_params, MAX_NUMBER_OF_AGENTS)\nspectral_effs = zip(mue_spectral_effs, d2d_spectral_effs)\n\ncwd = os.getcwd()\n\nfilename = gen.path_leaf(__file__)\nfilename = filename.split('.')[0]\nfilename_model = filename\nfilename = f'{lucas_path}/data/{filename}.pickle'\ntorch.save(ext_framework.policy_net.state_dict(), f'{lucas_path}/models/{filename_model}.pt')\nwith open(filename, 'wb') as f:\n pickle.dump(spectral_effs, f)\n\nplt.figure(1)\nplt.plot(mue_spectral_effs, '.', label='MUEs')\nplt.plot(d2d_spectral_effs, '.', label='D2Ds')\nplt.xlabel('Iteration')\nplt.ylabel('Average Spectral Efficiencies')\nplt.legend()\n\nplt.show()\n\n\n",
"# Testing for script2_3.py\n\nimport sys\nimport os\n\nlucas_path = os.environ['LUCAS_PATH']\nsys.path.insert(1, lucas_path)\n\nfrom general import general as gen\nfrom devices.devices import node, base_station, mobile_user, d2d_user, d2d_node_type\nfrom pathloss import pathloss\nfrom plots.plots import plot_positions, plot_spectral_effs\nfrom q_learning.environments.simpleEnvironment import SimpleEnvironment\nfrom dqn.agents.dqnAgent import ExternalDQNAgent\nfrom dqn.externalDQNFramework import ExternalDQNFramework\nfrom q_learning.q_table import DistributedQTable\nfrom q_learning import rewards\nfrom parameters.parameters import EnvironmentParameters, TrainingParameters, DQNAgentParameters, LearningParameters\nfrom typing import List\nfrom matplotlib import pyplot as plt\n\nimport torch\nimport math\nimport numpy as np\nimport os\n\ndef test(env: SimpleEnvironment, agents: List[ExternalDQNAgent], frameworks: List[ExternalDQNFramework], num_episodes: int):\n bag = list() \n env.build_scenario(agents)\n obs = env.get_state()\n mue_spectral_effs = list()\n d2d_spectral_effs = list()\n for _ in range(num_episodes):\n total_reward = 0.0\n for i, agent in enumerate(agents):\n aux = agent.act(frameworks[i], obs).max(1)\n agent.set_action(aux[1].long(), agent.actions[aux[1]])\n bag.append(aux[1].item())\n next_obs, rewards = env.step(agents)\n obs = next_obs\n total_reward += sum(rewards)\n mue_spectral_effs.append(env.mue_spectral_eff.item())\n d2d_spectral_effs.append(env.d2d_spectral_eff.item())\n return total_reward, mue_spectral_effs, d2d_spectral_effs, bag\n\nn_mues = 1 # number of mues\nn_d2d = 2 # number of d2d pairs\nn_rb = n_mues # number of RBs\nbs_radius = 500 # bs radius in m\n\nrb_bandwidth = 180*1e3 # rb bandwidth in Hz\nd2d_pair_distance = 50 # d2d pair distance in m\np_max = 23 # max tx power in dBm\nnoise_power = -116 # noise power per RB in dBm\nbs_gain = 17 # macro bs antenna gain in dBi\nuser_gain = 4 # user antenna gain in dBi\nsinr_threshold_mue = 6 # true mue sinr threshold in dB\nmue_margin = .5e4\n\n\n# conversions from dB to pow\np_max = p_max - 30\np_max = gen.db_to_power(p_max)\nnoise_power = noise_power - 30\nnoise_power = gen.db_to_power(noise_power)\nbs_gain = gen.db_to_power(bs_gain)\nuser_gain = gen.db_to_power(user_gain)\nsinr_threshold_mue = gen.db_to_power(sinr_threshold_mue)\n\n# q-learning parameters\n# MAX_NUM_EPISODES = 2500\n# MAX_NUM_EPISODES = 8000\n# MAX_NUM_EPISODES = int(1.2e4)\n# MAX_NUM_EPISODES = int(6e3)\nSTEPS_PER_EPISODE = 4000\n# STEPS_PER_EPISODE = 200\n# STEPS_PER_EPISODE = 1000\nEPSILON_MIN = 0.01\n# MAX_NUM_STEPS = 50\n# EPSILON_DECAY = 4e-2 * EPSILON_MIN / STEPS_PER_EPISODE\nEPSILON_DECAY = 100 * EPSILON_MIN / STEPS_PER_EPISODE\nMAX_NUM_EPISODES = int(1.2/EPSILON_DECAY)\n# EPSILON_DECAY = 8e-1 * EPSILON_MIN / STEPS_PER_EPISODE\n# EPSILON_DECAY = 2 * EPSILON_MIN / MAX_NUM_STEPS\nALPHA = 0.05 # Learning rate\nGAMMA = 0.98 # Discount factor\n# C = 8000 # C constant for the improved reward function\nC = 80 # C constant for the improved reward function\nTARGET_UPDATE = 10\nMAX_NUMBER_OF_AGENTS = 20\nREPLAY_MEMORY_SIZE = 128\nBATCH_SIZE = 32\n\n\n# more parameters\ncwd = os.getcwd()\n\nenv_params = EnvironmentParameters(rb_bandwidth, d2d_pair_distance, p_max, noise_power, bs_gain, user_gain, sinr_threshold_mue,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin)\ntrain_params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)\nagent_params = DQNAgentParameters(EPSILON_MIN, EPSILON_DECAY, 1, REPLAY_MEMORY_SIZE, BATCH_SIZE, GAMMA)\n\nreward_function = rewards.dis_reward_tensor2\n\n\nfilename = gen.path_leaf(__file__)\nfilename = filename.split('.')[0]\nscratch_name = filename\n\nmodels_path = [p.path for p in os.scandir(f'{lucas_path}/models/script2_3')]\n\nframeworks = [ExternalDQNFramework(agent_params) for _ in models_path]\nfor path, framework in zip(models_path, frameworks):\n framework.policy_net.load_state_dict(torch.load(path))\n\nreward_function = rewards.dis_reward_tensor\n\n# policy 5 test\nenvironment = SimpleEnvironment(env_params, reward_function)\nactions = torch.tensor([i*0.82*p_max/5/1000 for i in range(5)])\nagents = [ExternalDQNAgent(agent_params, actions) for _ in models_path]\ntotal_reward, mue_spectral_effs, d2d_spectral_effs, bag = test(environment, agents, framework, 1000)\n\nmue_success_rate = np.average(np.array(mue_spectral_effs) > np.log2(1 + sinr_threshold_mue))\nd2d_speffs_avg = np.average(d2d_spectral_effs)\n\nlog = list()\nlog.append(f'NUMBER OF D2D_USERS: {n_d2d}')\nlog.append(f'D2D SPECTRAL EFFICIENCY - SCRIPT: {d2d_speffs_avg}')\nlog.append(f'MUE SUCCESS RATE - SCRIPT: {mue_success_rate}')\nlog.append(f'-------------------------------------')\n\n\nfilename = gen.path_leaf(__file__)\nfilename = filename.split('.')[0]\nfilename = f'{lucas_path}/logs/{filename}.txt'\nfile = open(filename, 'w')\n\nfor l in log:\n file.write(f'{l}\\n')\nfile.close()\n\nplt.figure(1)\nplt.hist(bag)\nplt.xlabel('Actions')\nplt.ylabel('Number of occurrences')\n\n# fig2, ax1 = plt.subplots()\n# ax1.set_xlabel('Number of D2D pairs in the RB')\n# ax1.set_ylabel('D2D Average Spectral Efficiency [bps/Hz]', color='tab:blue')\n# ax1.plot(d2d_speffs_avg, '.', color='tab:blue')\n\n# ax2 = ax1.twinx()\n# ax2.set_ylabel('MUE Success Rate', color='tab:red')\n# ax2.plot(mue_success_rate, '.', color='tab:red')\n# fig2.tight_layout()\n\n\nplt.show()\n\n\n\n\n\n\n\n\n\n"
] | [
[
"torch.mean",
"torch.zeros",
"torch.cat",
"torch.sum",
"torch.nn.functional.mse_loss",
"torch.mul",
"torch.FloatTensor",
"torch.cuda.is_available"
],
[
"numpy.log2",
"torch.load",
"torch.cat",
"numpy.average",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.show",
"torch.save"
],
[
"scipy.spatial.distance.euclidean"
],
[
"torch.cuda.is_available",
"torch.load"
],
[
"torch.device"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"scipy.spatial.distance.euclidean",
"torch.tensor",
"numpy.std",
"numpy.mean",
"torch.cuda.is_available",
"numpy.array",
"numpy.sum"
],
[
"numpy.array",
"numpy.sum",
"torch.cat"
],
[
"torch.load",
"numpy.random.choice",
"numpy.mean",
"torch.cuda.is_available",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.show"
],
[
"numpy.array",
"numpy.sum",
"torch.cuda.is_available",
"torch.cat"
],
[
"numpy.random.seed"
],
[
"numpy.log2",
"torch.load",
"torch.sum",
"torch.tensor",
"torch.device",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"numpy.random.choice",
"matplotlib.pyplot.figure",
"torch.sum",
"torch.tensor",
"matplotlib.pyplot.plot",
"torch.device",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.log2",
"torch.load",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.average",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ndawlab/seqanx | [
"de44aa1baeb10646d538c185f0428d53b00db4b5",
"de44aa1baeb10646d538c185f0428d53b00db4b5"
] | [
"sisyphus/mdp/_dp.py",
"sisyphus/envs/_prey.py"
] | [
"\"\"\"Dynamic programming module\"\"\"\n\nimport numpy as np\nfrom copy import deepcopy\nfrom ._misc import check_params, softmax, pessimism\nfrom warnings import warn\n\nclass ValueIteration(object):\n \"\"\"Q-value iteration algorithm.\n \n Parameters\n ----------\n policy : max | min | softmax | pessimism (default = pessimism)\n Learning rule.\n gamma : float (default = 0.9)\n Temporal discounting factor.\n beta : float (default = 10.0)\n Inverse temperature for future choice (ignored if policy not softmax).\n w : float (default = 1.0)\n Pessimism weight (ignored if policy not pessimism).\n tol : float, default: 1e-4\n Tolerance for stopping criteria.\n max_iter : int, default: 100\n Maximum number of iterations taken for the solvers to converge.\n\n References\n ----------\n 1. Sutton, R. S., & Barto, A. G. (2018). Reinforcement learning: An introduction. MIT press.\n \"\"\"\n \n def __init__(self, policy='pessimism', gamma=0.9, beta=10.0, w=1.0, tol=0.0001, max_iter=100):\n\n ## Define choice policy.\n self.policy = policy\n if policy == 'max': self._policy = np.max\n elif policy == 'min': self._policy = np.min\n elif policy == 'softmax': self._policy = lambda arr: arr @ softmax(arr * self.beta)\n elif policy == 'pessimism': self._policy = lambda arr: pessimism(arr, self.w)\n else: raise ValueError('Policy \"%s\" not valid!' %self.policy)\n \n ## Check parameters.\n self.gamma = gamma\n self.beta = beta\n self.w = w\n check_params(gamma=self.gamma, beta=self.beta, w=self.w)\n \n ## Set convergence criteria.\n self.tol = tol\n self.max_iter = max_iter\n \n def __repr__(self):\n return '<Q-value iteration>'\n \n def copy(self):\n \"\"\"Return copy of agent.\"\"\"\n return deepcopy(self)\n \n def _q_solve(self, info, Q=None):\n \"\"\"Solve for Q-values iteratively.\"\"\"\n \n ## Initialize Q-values.\n if Q is None: Q = np.zeros(info.shape[0], dtype=float)\n assert np.equal(Q.shape, info.shape[0])\n copy = info.copy()\n \n ## Main loop.\n for k in range(self.max_iter):\n \n ## Make copy.\n q = Q.copy()\n \n ## Precompute successor value. \n copy['Q'] = q\n V_prime = copy.groupby('S').Q.apply(self._policy).values\n\n ## Compute Q-values.\n for i in range(info.shape[0]):\n \n ## Update Q-value.\n Q[i] = sum(info.loc[i,\"T\"] * (info.loc[i,\"R\"] + self.gamma * V_prime[info.loc[i,\"S'\"]]))\n\n ## Compute delta.\n delta = np.abs(Q - q)\n\n ## Check for termination.\n if np.all(delta < self.tol): break\n \n return Q, k + 1\n \n def _v_solve(self, info):\n \"\"\"Compute state value from Q-table.\"\"\"\n \n ## Copy info and append Q-values.\n copy = info.copy()\n copy['Q'] = self.Q\n \n ## Identify max by state.\n return copy.groupby('S').Q.max().values\n \n def _pi_solve(self, gym):\n \"\"\"Compute policy from Q-table.\"\"\"\n \n ## Precompute optimal q(s,a).\n copy = gym.info.copy()\n copy['Q'] = self.Q\n copy = copy.iloc[copy.groupby('S').Q.idxmax().values]\n copy[\"S'\"] = copy[\"S'\"].apply(lambda arr: arr[0])\n \n ## Initialize policy from initial state.\n policy = [gym.start]\n \n ## Iterately append.\n while True:\n\n ## Termination check.\n s = policy[-1]\n if s in gym.terminal: break\n \n ## Observe successor.\n s_prime, = copy.loc[copy[\"S\"]==s, \"S'\"].values\n \n ## Terminate on loops. Otherwise append.\n if s_prime in policy: break\n policy.append(s_prime)\n \n return policy\n \n def fit(self, gym, Q=None, verbose=True): \n \"\"\"Solve for optimal policy.\n \n Parameters\n ----------\n gym : GridWorld instance\n Simulation environment.\n \n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n \n ## Solve for Q-values.\n self.Q, self.n_iter = self._q_solve(gym.info, Q)\n if np.equal(self.n_iter, self.max_iter) and verbose:\n warn('Reached maximum iterations.')\n \n ## Solve for values.\n self.V = self._v_solve(gym.info)\n \n ## Compute policy.\n self.pi = self._pi_solve(gym)\n \n return self",
"import numpy as np\nfrom ._base import GraphWorld\n\nclass SleepingPredator(GraphWorld):\n \"\"\"Sleeping predator (behavioral inhibition) task.\n \n Parameters\n ----------\n p : float\n Probability of predation.\n \n Attributes\n ----------\n states : array, shape = (n,)\n Indices of states.\n n_states : int\n Total number of states.\n viable_states : array\n Indices of viable states.\n n_viable_states : int\n Number of viable states.\n info : DataFrame\n Pandas DataFrame storing the dynamics of the Markov decision process.\n Rows correspond to each viable Q-value, whereas each column contains\n its associated information.\n \n References\n ----------\n 1. Bach DR (2015) Anxiety-Like Behavioural Inhibition Is Normative under Environmental\n Threat-Reward Correlations. PLoS Comput Biol 11:e1004646.\n 2. Bach DR (2017) The cognitive architecture of anxiety-like behavioral inhibition. \n J Exp Psychol Hum Percept Perform 43:18–29.\n \"\"\"\n \n def __init__(self, p=0.1):\n \n ## Define one-step transition matrix.\n n = 7\n T = np.zeros((n+2,n+2)) * np.nan\n T[np.arange(n),np.arange(n)+1] = 1 # Corridor transitions\n T[:n,n] = 1 # Safety transition\n T[:n,n+1] = 0 # Danger transition\n T[[n,n+1],[n,n+1]] = 1 # Terminal states \n\n ## Define rewards.\n R = np.copy(T)\n R[np.arange(n),np.arange(n)+1] = 0 # Corridor transitions\n R[:n,n] = np.arange(n) # Safety transition\n R[:n,n+1] = -np.arange(n) # Danger transition\n R[[n,n+1],[n,n+1]] = 0 # Terminal states \n \n ## Define start/terminal states.\n start = 0\n terminal = []\n for s in range(T.shape[0]):\n s_prime, = np.nonzero(~np.isnan(T[s]))\n if np.all(s_prime == s): terminal.append(s)\n\n ## Initialize GraphWorld.\n GraphWorld.__init__(self, T, R, start, terminal, epsilon=0)\n \n ## Remove masochistic Q-values (i.e. agent cannot elect to be eaten).\n bps = self.n_states - 1\n sane_ix = [np.logical_or(arr[0]!=bps, arr.size==1) for arr in self.info[\"S'\"].values]\n self.info = self.info[sane_ix].reset_index(drop=True)\n \n ## Update probability of being eaten. \n for i, row in self.info.iterrows():\n s, s_prime = row[\"S\"], row[\"S'\"][0]\n if not s_prime in self.terminal:\n self.info.at[i,'T'] = np.array([1-p, 0, p])\n \n def __repr__(self):\n return '<GraphWorld | Sleeping Predator Task>'"
] | [
[
"numpy.all",
"numpy.abs",
"numpy.zeros",
"numpy.equal"
],
[
"numpy.isnan",
"numpy.arange",
"numpy.all",
"numpy.logical_or",
"numpy.copy",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
phenomax/praktipy | [
"58d1f74e6d128b0d22bfa0fcaf754c9b8b9e8c23"
] | [
"praktiplot.py"
] | [
"# Setting matplotlib layout to match tex settings \n\nimport matplotlib\nmatplotlib.use('pgf')\nimport matplotlib.pyplot as plt\nfrom os.path import dirname, abspath\nimport locale\nmatplotlib.rcParams.update({\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n 'pgf.texsystem': 'lualatex',\n 'pgf.preamble': r'\\input{'+dirname(abspath(__file__)).replace(\" \", r\"\\ \")+r'//matplotlib_header.tex'+r'}',\n})\n# use german locale settings for printing 3.4 as 3,4\ntry:\n locale.setlocale(locale.LC_ALL, 'de_DE.UTF8')\nexcept locale.Error:\n print(\"Could not set the language settings! 3.5 will not be written as 3,5! SO SAD!\")\n \nplt.ticklabel_format(useLocale=True)"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.ticklabel_format"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
reubensgithub/covid-19-dashboard | [
"f0d8de85a989597fe38ea771439a70fc9e0bd21f"
] | [
"coursework_project_dec_2021/covid_data_handler.py"
] | [
"\"\"\"This module has various functions inside it that will allow\r\n the processing and handling of covid data, whether from a\r\n CSV file or returned from an API\"\"\"\r\nimport sched\r\nimport time\r\nimport logging\r\nimport pandas as pd\r\nfrom typing import List\r\nfrom uk_covid19 import Cov19API\r\n\r\nlogging.basicConfig(filename='covid_log.log', level=logging.DEBUG,\r\n format='%(levelname)s: %(asctime)s %(message)s')\r\n\r\ndata_list_exeter = []\r\ndata_list_england = []\r\n\r\n\r\ndef parse_csv_data(csv_filename: str) -> list:\r\n \"\"\"This function will take the csv data from the csv file and return it as a list \"\"\"\r\n dataframe = pd.read_csv(csv_filename)\r\n return dataframe.values.tolist()\r\n\r\n# parse_csv_data(\"nation_2021-10-28.csv\")\r\n\r\n\r\ndef process_covid_csv_data(covid_csv_data: object) -> int:\r\n \"\"\"This function will take the returned list of data from parse_csv_data()\r\n (converted to a dataframe here for convenience in accessing values) and will return\r\n the necessary statistics back to the user \"\"\"\r\n covid_csv_data = pd.DataFrame(covid_csv_data)\r\n num_cases_7_days = int(covid_csv_data[6].head(9).sum(axis=0, skipna=True) -\r\n covid_csv_data._get_value(1, 6, takeable=True))\r\n current_num_hosp_cases = int(covid_csv_data._get_value(0, 5, takeable=True))\r\n cum_num_deaths = int(covid_csv_data._get_value(13, 4, takeable=True))\r\n\r\n return num_cases_7_days, current_num_hosp_cases, cum_num_deaths\r\n\r\n# process_covid_csv_data(covid_csv_data=parse_csv_data(\"nation_2021-10-28.csv\"))\r\n\r\n\r\ndef covid_API_request(location: str, location_type: str) -> List[dict]:\r\n \"\"\"This function will use the Cov19API provided by\r\n Public Health England and return all of the values of the given\r\n fields, from the start date up to the current date. This data\r\n is returned in a JSON format.\"\"\"\r\n location_data = [\r\n 'areaType='+str(location_type),\r\n 'areaName='+str(location)\r\n ]\r\n covid_data = {\r\n \"date\": \"date\",\r\n \"areaName\": \"areaName\",\r\n \"areaCode\": \"areaCode\",\r\n \"newCasesByPublishDate\": \"newCasesByPublishDate\",\r\n \"cumCasesByPublishDate\": \"cumCasesByPublishDate\",\r\n \"hospitalCases\": \"hospitalCases\",\r\n \"newDeaths28DaysByDeathDate\": \"newDeaths28DaysByDeathDate\",\r\n \"cumDeaths28DaysByDeathDate\": \"cumDeaths28DaysByDeathDate\",\r\n \"cumDeathsByPublishDate\": \"cumDeathsByPublishDate\"\r\n }\r\n\r\n api_object = Cov19API(filters=location_data, structure=covid_data)\r\n data = api_object.get_json()\r\n return data\r\n\r\n# covid_API_request()\r\n\r\nexe = covid_API_request(location=\"Exeter\", location_type=\"ltla\")\r\neng = covid_API_request(location=\"England\", location_type=\"nation\")\r\nfor piece in exe['data']:\r\n data_list_exeter.append(piece)\r\nfor info in eng['data']:\r\n data_list_england.append(info)\r\n\r\ndf_exeter = pd.DataFrame(data_list_exeter)\r\ndf_england = pd.DataFrame(data_list_england)\r\n\r\n# print(data_list_england)\r\n\r\nnational_7day_infections = int(df_england['newCasesByPublishDate'].head(7).sum(axis=0, skipna=True))\r\nlocal_7day_infections = int(df_exeter['newCasesByPublishDate'].head(7).sum(axis=0, skipna=True))\r\n# print(national_7day_infections)\r\n# print(local_7day_infections)\r\n\r\nscheduler = sched.scheduler(time.time, time.sleep)\r\n\r\ndef schedule_covid_updates(update_interval: int, update_name: str, repeat=False) -> List[object]:\r\n \"\"\"This function allows the user to schedule updates\r\n for when they want the Cov19API to get values at.\"\"\"\r\n if not repeat:\r\n event1 = scheduler.enter(update_interval, 1, covid_API_request, kwargs=update_name)\r\n logging.info(f\"\"\"Covid update for {update_name} has been scheduled\"\"\")\r\n return event1\r\n if repeat:\r\n for i in range(100000):\r\n rep = 0\r\n event2 = scheduler.enter(update_interval + rep, 2, covid_API_request,\r\n argument=repeat, kwargs=update_name)\r\n rep += 86400\r\n i += 1\r\n logging.info(f\"\"\"Repeating covid update for update {update_name}\"\"\")\r\n return event2\r\n scheduler.run(blocking=False)\r\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
vpolisky/pymc3 | [
"87cdd712c86321121c2ed3150764f3d847f5083c",
"87cdd712c86321121c2ed3150764f3d847f5083c",
"87cdd712c86321121c2ed3150764f3d847f5083c"
] | [
"pymc3/distributions/discrete.py",
"pymc3/tests/test_glm.py",
"pymc3/tests/models.py"
] | [
"from functools import partial \nimport numpy as np\nimport theano\nimport theano.tensor as tt\nfrom scipy import stats\n\nfrom .dist_math import bound, factln, binomln, betaln, logpow\nfrom .distribution import Discrete, draw_values, generate_samples, reshape_sampled\n\n__all__ = ['Binomial', 'BetaBinomial', 'Bernoulli', 'DiscreteWeibull',\n 'Poisson', 'NegativeBinomial', 'ConstantDist', 'Constant',\n 'ZeroInflatedPoisson', 'ZeroInflatedNegativeBinomial',\n 'DiscreteUniform', 'Geometric', 'Categorical']\n\n\nclass Binomial(Discrete):\n R\"\"\"\n Binomial log-likelihood.\n\n The discrete probability distribution of the number of successes\n in a sequence of n independent yes/no experiments, each of which\n yields success with probability p.\n\n .. math:: f(x \\mid n, p) = \\binom{n}{x} p^x (1-p)^{n-x}\n\n ======== ==========================================\n Support :math:`x \\in \\{0, 1, \\ldots, n\\}`\n Mean :math:`n p`\n Variance :math:`n p (1 - p)`\n ======== ==========================================\n\n Parameters\n ----------\n n : int\n Number of Bernoulli trials (n >= 0).\n p : float\n Probability of success in each trial (0 < p < 1).\n \"\"\"\n\n def __init__(self, n, p, *args, **kwargs):\n super(Binomial, self).__init__(*args, **kwargs)\n self.n = n = tt.as_tensor_variable(n)\n self.p = p = tt.as_tensor_variable(p)\n self.mode = tt.cast(tt.round(n * p), self.dtype)\n\n def random(self, point=None, size=None, repeat=None):\n n, p = draw_values([self.n, self.p], point=point)\n return generate_samples(stats.binom.rvs, n=n, p=p,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n n = self.n\n p = self.p\n\n return bound(\n binomln(n, value) + logpow(p, value) + logpow(1 - p, n - value),\n 0 <= value, value <= n,\n 0 <= p, p <= 1)\n\n\nclass BetaBinomial(Discrete):\n R\"\"\"\n Beta-binomial log-likelihood.\n\n Equivalent to binomial random variable with success probability\n drawn from a beta distribution.\n\n .. math::\n\n f(x \\mid \\alpha, \\beta, n) =\n \\binom{n}{x}\n \\frac{B(x + \\alpha, n - x + \\beta)}{B(\\alpha, \\beta)}\n\n ======== =================================================================\n Support :math:`x \\in \\{0, 1, \\ldots, n\\}`\n Mean :math:`n \\dfrac{\\alpha}{\\alpha + \\beta}`\n Variance :math:`n \\dfrac{\\alpha \\beta}{(\\alpha+\\beta)^2 (\\alpha+\\beta+1)}`\n ======== =================================================================\n\n Parameters\n ----------\n n : int\n Number of Bernoulli trials (n >= 0).\n alpha : float\n alpha > 0.\n beta : float\n beta > 0.\n \"\"\"\n\n def __init__(self, alpha, beta, n, *args, **kwargs):\n super(BetaBinomial, self).__init__(*args, **kwargs)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.beta = beta = tt.as_tensor_variable(beta)\n self.n = n = tt.as_tensor_variable(n)\n self.mode = tt.cast(tt.round(alpha / (alpha + beta)), 'int8')\n\n def _random(self, alpha, beta, n, size=None):\n size = size or 1\n p = np.atleast_1d(stats.beta.rvs(a=alpha, b=beta, size=np.prod(size)))\n # Sometimes scipy.beta returns nan. Ugh.\n while np.any(np.isnan(p)):\n i = np.isnan(p)\n p[i] = stats.beta.rvs(a=alpha, b=beta, size=np.sum(i))\n # Sigh...\n _n, _p, _size = np.atleast_1d(n).flatten(), p.flatten(), np.prod(size)\n samples = np.reshape(stats.binom.rvs(n=_n, p=_p, size=_size), size)\n return samples\n\n def random(self, point=None, size=None, repeat=None):\n alpha, beta, n = \\\n draw_values([self.alpha, self.beta, self.n], point=point)\n return generate_samples(self._random, alpha=alpha, beta=beta, n=n,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n alpha = self.alpha\n beta = self.beta\n return bound(binomln(self.n, value)\n + betaln(value + alpha, self.n - value + beta)\n - betaln(alpha, beta),\n value >= 0, value <= self.n,\n alpha > 0, beta > 0)\n\n\nclass Bernoulli(Discrete):\n R\"\"\"Bernoulli log-likelihood\n\n The Bernoulli distribution describes the probability of successes\n (x=1) and failures (x=0).\n\n .. math:: f(x \\mid p) = p^{x} (1-p)^{1-x}\n\n ======== ======================\n Support :math:`x \\in \\{0, 1\\}`\n Mean :math:`p`\n Variance :math:`p (1 - p)`\n ======== ======================\n\n Parameters\n ----------\n p : float\n Probability of success (0 < p < 1).\n \"\"\"\n\n def __init__(self, p, *args, **kwargs):\n super(Bernoulli, self).__init__(*args, **kwargs)\n self.p = p = tt.as_tensor_variable(p)\n self.mode = tt.cast(tt.round(p), 'int8')\n\n def random(self, point=None, size=None, repeat=None):\n p = draw_values([self.p], point=point)\n return generate_samples(stats.bernoulli.rvs, p,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n p = self.p\n return bound(\n tt.switch(value, tt.log(p), tt.log(1 - p)),\n value >= 0, value <= 1,\n p >= 0, p <= 1)\n\n\nclass DiscreteWeibull(Discrete):\n R\"\"\"Discrete Weibull log-likelihood\n\n The discrete Weibull distribution is a flexible model of count data that\n can handle both over- and under-dispersion.\n\n .. math:: f(x \\mid q, \\beta) = q^{x^{\\beta}} - q^{(x + 1)^{\\beta}}\n\n ======== ======================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\mu = \\sum_{x = 1}^{\\infty} q^{x^{\\beta}}`\n Variance :math:`2 \\sum_{x = 1}^{\\infty} x q^{x^{\\beta}} - \\mu - \\mu^2`\n ======== ======================\n \"\"\"\n def __init__(self, q, beta, *args, **kwargs):\n super(DiscreteWeibull, self).__init__(*args, defaults=['median'], **kwargs)\n \n self.q = q\n self.beta = beta\n\n self.median = self._ppf(0.5)\n \n def logp(self, value):\n q = self.q\n beta = self.beta\n \n return bound(tt.log(tt.power(q, tt.power(value, beta)) - tt.power(q, tt.power(value + 1, beta))),\n 0 <= value,\n 0 < q, q < 1,\n 0 < beta)\n\n def _ppf(self, p):\n \"\"\"\n The percentile point function (the inverse of the cumulative\n distribution function) of the discrete Weibull distribution.\n \"\"\"\n q = self.q\n beta = self.beta\n\n return (tt.ceil(tt.power(tt.log(1 - p) / tt.log(q), 1. / beta)) - 1).astype('int64')\n\n def _random(self, q, beta, size=None):\n p = np.random.uniform(size=size)\n\n return np.ceil(np.power(np.log(1 - p) / np.log(q), 1. / beta)) - 1\n\n def random(self, point=None, size=None, repeat=None):\n q, beta = draw_values([self.q, self.beta], point=point)\n\n return generate_samples(self._random, q, beta,\n dist_shape=self.shape,\n size=size)\n\n\nclass Poisson(Discrete):\n R\"\"\"\n Poisson log-likelihood.\n\n Often used to model the number of events occurring in a fixed period\n of time when the times at which events occur are independent.\n\n .. math:: f(x \\mid \\mu) = \\frac{e^{-\\mu}\\mu^x}{x!}\n\n ======== ==========================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\mu`\n Variance :math:`\\mu`\n ======== ==========================\n\n Parameters\n ----------\n mu : float\n Expected number of occurrences during the given interval\n (mu >= 0).\n\n Notes\n -----\n The Poisson distribution can be derived as a limiting case of the\n binomial distribution.\n \"\"\"\n\n def __init__(self, mu, *args, **kwargs):\n super(Poisson, self).__init__(*args, **kwargs)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.mode = tt.floor(mu).astype('int32')\n\n def random(self, point=None, size=None, repeat=None):\n mu = draw_values([self.mu], point=point)\n return generate_samples(stats.poisson.rvs, mu,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n mu = self.mu\n log_prob = bound(\n logpow(mu, value) - factln(value) - mu,\n mu >= 0, value >= 0)\n # Return zero when mu and value are both zero\n return tt.switch(1 * tt.eq(mu, 0) * tt.eq(value, 0),\n 0, log_prob)\n\n\nclass NegativeBinomial(Discrete):\n R\"\"\"\n Negative binomial log-likelihood.\n\n The negative binomial distribution describes a Poisson random variable\n whose rate parameter is gamma distributed.\n\n .. math::\n\n f(x \\mid \\mu, \\alpha) =\n \\frac{\\Gamma(x+\\alpha)}{x! \\Gamma(\\alpha)}\n (\\alpha/(\\mu+\\alpha))^\\alpha (\\mu/(\\mu+\\alpha))^x\n\n ======== ==========================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\mu`\n ======== ==========================\n\n Parameters\n ----------\n mu : float\n Poission distribution parameter (mu > 0).\n alpha : float\n Gamma distribution parameter (alpha > 0).\n \"\"\"\n\n def __init__(self, mu, alpha, *args, **kwargs):\n super(NegativeBinomial, self).__init__(*args, **kwargs)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.mode = tt.floor(mu).astype('int32')\n\n def random(self, point=None, size=None, repeat=None):\n mu, alpha = draw_values([self.mu, self.alpha], point=point)\n g = generate_samples(stats.gamma.rvs, alpha, scale=mu / alpha,\n dist_shape=self.shape,\n size=size)\n g[g == 0] = np.finfo(float).eps # Just in case\n return reshape_sampled(stats.poisson.rvs(g), size, self.shape)\n\n def logp(self, value):\n mu = self.mu\n alpha = self.alpha\n negbinom = bound(binomln(value + alpha - 1, value)\n + logpow(mu / (mu + alpha), value)\n + logpow(alpha / (mu + alpha), alpha),\n value >= 0, mu > 0, alpha > 0)\n\n # Return Poisson when alpha gets very large.\n return tt.switch(1 * (alpha > 1e10),\n Poisson.dist(self.mu).logp(value),\n negbinom)\n\n\nclass Geometric(Discrete):\n R\"\"\"\n Geometric log-likelihood.\n\n The probability that the first success in a sequence of Bernoulli\n trials occurs on the x'th trial.\n\n .. math:: f(x \\mid p) = p(1-p)^{x-1}\n\n ======== =============================\n Support :math:`x \\in \\mathbb{N}_{>0}`\n Mean :math:`\\dfrac{1}{p}`\n Variance :math:`\\dfrac{1 - p}{p^2}`\n ======== =============================\n\n Parameters\n ----------\n p : float\n Probability of success on an individual trial (0 < p <= 1).\n \"\"\"\n\n def __init__(self, p, *args, **kwargs):\n super(Geometric, self).__init__(*args, **kwargs)\n self.p = p = tt.as_tensor_variable(p)\n self.mode = 1\n\n def random(self, point=None, size=None, repeat=None):\n p = draw_values([self.p], point=point)\n return generate_samples(np.random.geometric, p,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n p = self.p\n return bound(tt.log(p) + logpow(1 - p, value - 1),\n 0 <= p, p <= 1, value >= 1)\n\n\nclass DiscreteUniform(Discrete):\n R\"\"\"\n Discrete uniform distribution.\n\n .. math:: f(x \\mid lower, upper) = \\frac{1}{upper-lower}\n\n ======== ===============================================\n Support :math:`x \\in {lower, lower + 1, \\ldots, upper}`\n Mean :math:`\\dfrac{lower + upper}{2}`\n Variance :math:`\\dfrac{(upper - lower)^2}{12}`\n ======== ===============================================\n\n Parameters\n ----------\n lower : int\n Lower limit.\n upper : int\n Upper limit (upper > lower).\n \"\"\"\n\n def __init__(self, lower, upper, *args, **kwargs):\n super(DiscreteUniform, self).__init__(*args, **kwargs)\n self.lower = tt.floor(lower).astype('int32')\n self.upper = tt.floor(upper).astype('int32')\n self.mode = tt.maximum(\n tt.floor((upper - lower) / 2.).astype('int32'), self.lower)\n\n def _random(self, lower, upper, size=None):\n # This way seems to be the only to deal with lower and upper\n # as array-like.\n samples = stats.uniform.rvs(lower, upper - lower - np.finfo(float).eps,\n size=size)\n return np.floor(samples).astype('int32')\n\n def random(self, point=None, size=None, repeat=None):\n lower, upper = draw_values([self.lower, self.upper], point=point)\n return generate_samples(self._random,\n lower, upper,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n upper = self.upper\n lower = self.lower\n return bound(-tt.log(upper - lower + 1),\n lower <= value, value <= upper)\n\n\nclass Categorical(Discrete):\n R\"\"\"\n Categorical log-likelihood.\n\n The most general discrete distribution.\n\n .. math:: f(x \\mid p) = p_x\n\n ======== ===================================\n Support :math:`x \\in \\{1, 2, \\ldots, |p|\\}`\n ======== ===================================\n\n Parameters\n ----------\n p : array of floats\n p > 0 and the elements of p must sum to 1. They will be automatically\n rescaled otherwise.\n \"\"\"\n\n def __init__(self, p, *args, **kwargs):\n super(Categorical, self).__init__(*args, **kwargs)\n try:\n self.k = tt.shape(p)[-1].tag.test_value\n except AttributeError:\n self.k = tt.shape(p)[-1]\n self.p = p = tt.as_tensor_variable(p)\n self.p = (p.T / tt.sum(p, -1)).T\n self.mode = tt.argmax(p)\n\n def random(self, point=None, size=None, repeat=None):\n def random_choice(k, *args, **kwargs):\n if len(kwargs['p'].shape) > 1:\n return np.asarray(\n [np.random.choice(k, p=p)\n for p in kwargs['p']]\n )\n else:\n return np.random.choice(k, *args, **kwargs)\n\n p, k = draw_values([self.p, self.k], point=point)\n return generate_samples(partial(random_choice, np.arange(k)),\n p=p,\n broadcast_shape=p.shape[:-1] or (1,),\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n p = self.p\n k = self.k\n\n # Clip values before using them for indexing\n value_clip = tt.clip(value, 0, k - 1)\n\n sumto1 = theano.gradient.zero_grad(\n tt.le(abs(tt.sum(p, axis=-1) - 1), 1e-5))\n\n if p.ndim > 1:\n a = tt.log(p[tt.arange(p.shape[0]), value_clip])\n else:\n a = tt.log(p[value_clip])\n\n return bound(a, value >= 0, value <= (k - 1), sumto1)\n\n\nclass Constant(Discrete):\n \"\"\"\n Constant log-likelihood.\n\n Parameters\n ----------\n value : float or int\n Constant parameter.\n \"\"\"\n\n def __init__(self, c, *args, **kwargs):\n super(Constant, self).__init__(*args, **kwargs)\n self.mean = self.median = self.mode = self.c = c = tt.as_tensor_variable(c)\n\n def random(self, point=None, size=None, repeat=None):\n c = draw_values([self.c], point=point)\n dtype = np.array(c).dtype\n\n def _random(c, dtype=dtype, size=None):\n return np.full(size, fill_value=c, dtype=dtype)\n\n return generate_samples(_random, c=c, dist_shape=self.shape,\n size=size).astype(dtype)\n\n def logp(self, value):\n c = self.c\n return bound(0, tt.eq(value, c))\n\n\ndef ConstantDist(*args, **kwargs):\n import warnings\n warnings.warn(\"ConstantDist has been deprecated. In future, use Constant instead.\",\n DeprecationWarning)\n return Constant(*args, **kwargs)\n\n\nclass ZeroInflatedPoisson(Discrete):\n R\"\"\"\n Zero-inflated Poisson log-likelihood.\n\n Often used to model the number of events occurring in a fixed period\n of time when the times at which events occur are independent.\n\n .. math::\n\n f(x \\mid \\theta, \\psi) = \\left\\{ \\begin{array}{l}\n (1-\\psi) + \\psi e^{-\\theta}, \\text{if } x = 0 \\\\\n \\psi \\frac{e^{-\\theta}\\theta^x}{x!}, \\text{if } x=1,2,3,\\ldots\n \\end{array} \\right.\n\n ======== ==========================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\psi\\theta`\n Variance :math:`\\theta + \\frac{1-\\psi}{\\psi}\\theta^2`\n ======== ==========================\n\n Parameters\n ----------\n theta : float\n Expected number of occurrences during the given interval\n (theta >= 0).\n psi : float\n Expected proportion of Poisson variates (0 < psi < 1)\n\n \"\"\"\n\n def __init__(self, theta, psi, *args, **kwargs):\n super(ZeroInflatedPoisson, self).__init__(*args, **kwargs)\n self.theta = theta = tt.as_tensor_variable(theta)\n self.psi = psi = tt.as_tensor_variable(psi)\n self.pois = Poisson.dist(theta)\n self.mode = self.pois.mode\n\n def random(self, point=None, size=None, repeat=None):\n theta, psi = draw_values([self.theta, self.psi], point=point)\n g = generate_samples(stats.poisson.rvs, theta,\n dist_shape=self.shape,\n size=size)\n sampled = g * (np.random.random(np.squeeze(g.shape)) < psi)\n return reshape_sampled(sampled, size, self.shape)\n\n def logp(self, value):\n return tt.switch(value > 0,\n tt.log(self.psi) + self.pois.logp(value),\n tt.log((1. - self.psi) + self.psi * tt.exp(-self.theta)))\n\n\nclass ZeroInflatedNegativeBinomial(Discrete):\n R\"\"\"\n Zero-Inflated Negative binomial log-likelihood.\n\n The Zero-inflated version of the Negative Binomial (NB).\n The NB distribution describes a Poisson random variable\n whose rate parameter is gamma distributed.\n\n .. math::\n\n f(x \\mid \\mu, \\alpha, \\psi) = \\left\\{ \\begin{array}{l}\n (1-\\psi) + \\psi \\left (\\frac{\\alpha}{\\alpha+\\mu} \\right) ^\\alpha, \\text{if } x = 0 \\\\\n \\psi \\frac{\\Gamma(x+\\alpha)}{x! \\Gamma(\\alpha)} \\left (\\frac{\\alpha}{\\mu+\\alpha} \\right)^\\alpha \\left( \\frac{\\mu}{\\mu+\\alpha} \\right)^x, \\text{if } x=1,2,3,\\ldots\n \\end{array} \\right.\n\n ======== ==========================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\psi\\mu`\n Var :math:`\\psi\\mu + \\left (1 + \\frac{\\mu}{\\alpha} + \\frac{1-\\psi}{\\mu} \\right)`\n ======== ==========================\n\n Parameters\n ----------\n mu : float\n Poission distribution parameter (mu > 0).\n alpha : float\n Gamma distribution parameter (alpha > 0).\n psi : float\n Expected proportion of NegativeBinomial variates (0 < psi < 1)\n \"\"\"\n\n def __init__(self, mu, alpha, psi, *args, **kwargs):\n super(ZeroInflatedNegativeBinomial, self).__init__(*args, **kwargs)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.psi = psi = tt.as_tensor_variable(psi)\n self.nb = NegativeBinomial.dist(mu, alpha)\n self.mode = self.nb.mode\n\n def random(self, point=None, size=None, repeat=None):\n mu, alpha, psi = draw_values(\n [self.mu, self.alpha, self.psi], point=point)\n g = generate_samples(stats.gamma.rvs, alpha, scale=mu / alpha,\n dist_shape=self.shape,\n size=size)\n g[g == 0] = np.finfo(float).eps # Just in case\n sampled = stats.poisson.rvs(g) * (np.random.random(np.squeeze(g.shape)) < psi)\n return reshape_sampled(sampled, size, self.shape)\n\n def logp(self, value):\n return tt.switch(value > 0,\n tt.log(self.psi) + self.nb.logp(value),\n tt.log((1. - self.psi) + self.psi * (self.alpha / (self.alpha + self.mu))**self.alpha))\n",
"import numpy as np\n\nfrom .helpers import SeededTest\nfrom pymc3 import Model, Uniform, Normal, find_MAP, Slice, sample\nfrom pymc3 import families, GLM, LinearComponent\nimport pandas as pd\n\n# Generate data\ndef generate_data(intercept, slope, size=700):\n x = np.linspace(-1, 1, size)\n y = intercept + x * slope\n return x, y\n\n\nclass TestGLM(SeededTest):\n @classmethod\n def setUpClass(cls):\n super(TestGLM, cls).setUpClass()\n cls.intercept = 1\n cls.slope = 3\n cls.sd = .05\n x_linear, cls.y_linear = generate_data(cls.intercept, cls.slope, size=1000)\n cls.y_linear += np.random.normal(size=1000, scale=cls.sd)\n cls.data_linear = pd.DataFrame(dict(x=x_linear, y=cls.y_linear))\n\n x_logistic, y_logistic = generate_data(cls.intercept, cls.slope, size=3000)\n y_logistic = 1 / (1 + np.exp(-y_logistic))\n bern_trials = [np.random.binomial(1, i) for i in y_logistic]\n cls.data_logistic = dict(x=x_logistic, y=bern_trials)\n\n def test_linear_component(self):\n with Model() as model:\n lm = LinearComponent.from_formula('y ~ x', self.data_linear)\n sigma = Uniform('sigma', 0, 20)\n Normal('y_obs', mu=lm.y_est, sd=sigma, observed=self.y_linear)\n start = find_MAP(vars=[sigma])\n step = Slice(model.vars)\n trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)\n\n self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)\n self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)\n self.assertAlmostEqual(np.mean(trace['sigma']), self.sd, 1)\n\n def test_glm(self):\n with Model() as model:\n GLM.from_formula('y ~ x', self.data_linear)\n step = Slice(model.vars)\n trace = sample(500, step, progressbar=False, random_seed=self.random_seed)\n\n self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)\n self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)\n self.assertAlmostEqual(np.mean(trace['sd']), self.sd, 1)\n\n def test_glm_link_func(self):\n with Model() as model:\n GLM.from_formula('y ~ x', self.data_logistic,\n family=families.Binomial(link=families.logit))\n step = Slice(model.vars)\n trace = sample(1000, step, progressbar=False, random_seed=self.random_seed)\n\n self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)\n self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)\n\n def test_more_than_one_glm_is_ok(self):\n with Model():\n GLM.from_formula('y ~ x', self.data_logistic,\n family=families.Binomial(link=families.logit),\n name='glm1')\n GLM.from_formula('y ~ x', self.data_logistic,\n family=families.Binomial(link=families.logit),\n name='glm2')\n\n def test_from_xy(self):\n with Model():\n GLM(self.data_logistic['x'],\n self.data_logistic['y'],\n family=families.Binomial(link=families.logit),\n name='glm1')\n",
"from pymc3 import Model, Normal, Categorical, Metropolis\nimport numpy as np\nimport pymc3 as pm\nfrom itertools import product\nimport theano.tensor as tt\nfrom theano.compile.ops import as_op\n\n\ndef simple_model():\n mu = -2.1\n tau = 1.3\n with Model() as model:\n Normal('x', mu, tau=tau, shape=2, testval=tt.ones(2) * .1)\n\n return model.test_point, model, (mu, tau ** -1)\n\n\ndef simple_categorical():\n p = np.array([0.1, 0.2, 0.3, 0.4])\n v = np.array([0.0, 1.0, 2.0, 3.0])\n with Model() as model:\n Categorical('x', p, shape=3, testval=[1, 2, 3])\n\n mu = np.dot(p, v)\n var = np.dot(p, (v - mu) ** 2)\n return model.test_point, model, (mu, var)\n\n\ndef multidimensional_model():\n mu = -2.1\n tau = 1.3\n with Model() as model:\n Normal('x', mu, tau=tau, shape=(3, 2), testval=.1 * tt.ones((3, 2)))\n\n return model.test_point, model, (mu, tau ** -1)\n\n\ndef simple_arbitrary_det():\n @as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])\n def arbitrary_det(value):\n return value\n\n with Model() as model:\n a = Normal('a')\n b = arbitrary_det(a)\n Normal('obs', mu=b.astype('float64'), observed=np.array([1, 3, 5]))\n\n return model.test_point, model\n\n\ndef simple_init():\n start, model, moments = simple_model()\n step = Metropolis(model.vars, np.diag([1.]), model=model)\n return model, start, step, moments\n\n\ndef simple_2model():\n mu = -2.1\n tau = 1.3\n p = .4\n with Model() as model:\n x = pm.Normal('x', mu, tau=tau, testval=.1)\n pm.Deterministic('logx', tt.log(x))\n pm.Bernoulli('y', p)\n return model.test_point, model\n\n\ndef mv_simple():\n mu = np.array([-.1, .5, 1.1])\n p = np.array([\n [2., 0, 0],\n [.05, .1, 0],\n [1., -0.05, 5.5]])\n tau = np.dot(p, p.T)\n with pm.Model() as model:\n pm.MvNormal('x', tt.constant(mu), tau=tt.constant(tau),\n shape=3, testval=np.array([.1, 1., .8]))\n H = tau\n C = np.linalg.inv(H)\n return model.test_point, model, (mu, C)\n\n\ndef mv_simple_discrete():\n d = 2\n n = 5\n p = np.array([.15, .85])\n with pm.Model() as model:\n pm.Multinomial('x', n, tt.constant(p), shape=d, testval=np.array([1, 4]))\n mu = n * p\n # covariance matrix\n C = np.zeros((d, d))\n for (i, j) in product(range(d), range(d)):\n if i == j:\n C[i, i] = n * p[i] * (1 - p[i])\n else:\n C[i, j] = -n * p[i] * p[j]\n\n return model.test_point, model, (mu, C)\n\n\ndef non_normal(n=2):\n with pm.Model() as model:\n pm.Beta('x', 3, 3, shape=n, transform=None)\n return model.test_point, model, (np.tile([.5], n), None)\n\n\ndef exponential_beta(n=2):\n with pm.Model() as model:\n pm.Beta('x', 3, 1, shape=n, transform=None)\n pm.Exponential('y', 1, shape=n, transform=None)\n return model.test_point, model, None\n\n\ndef beta_bernoulli(n=2):\n with pm.Model() as model:\n pm.Beta('x', 3, 1, shape=n, transform=None)\n pm.Bernoulli('y', 0.5)\n return model.test_point, model, None\n"
] | [
[
"numpy.log",
"scipy.stats.poisson.rvs",
"numpy.isnan",
"numpy.random.choice",
"numpy.arange",
"numpy.squeeze",
"numpy.finfo",
"numpy.full",
"numpy.atleast_1d",
"numpy.prod",
"numpy.floor",
"scipy.stats.binom.rvs",
"numpy.random.uniform",
"numpy.array",
"numpy.sum"
],
[
"numpy.linspace",
"numpy.random.normal",
"numpy.mean",
"numpy.random.binomial",
"numpy.exp"
],
[
"numpy.diag",
"numpy.dot",
"numpy.linalg.inv",
"numpy.tile",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zwx8981/DBCNN-Pytorch | [
"fa29f0307aa4533c4025c688ba5301cfddf9812f",
"fa29f0307aa4533c4025c688ba5301cfddf9812f"
] | [
"simple_demo.py",
"loss/cross_entropy_prob.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom DBCNN import DBCNN\nfrom PIL import Image\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ntest_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n])\n\noptions = {'fc': True}\nscnn_root = 'your path of SCNN model'\nmodel = nn.DataParallel(DBCNN(scnn_root, options), device_ids=[0]).cuda()\nmodel_name = type(model).__name__\nprint(model)\n\nckpt = \"your path of the checkpoint file\"\nimage_name = \"your path of test image\"\ncheckpoint = torch.load(ckpt)\nmodel.load_state_dict(checkpoint)\n\nmodel.eval()\n\nI = Image.open(image_name)\nI = test_transform(I)\nI = torch.unsqueeze(I, dim=0)\nI = I.to(device)\nwith torch.no_grad():\n score = model(I)\n\nformat_str = 'Prediction = %.4f'\nprint(format_str % score)\n\n\n\n\n\n\n",
"import torch\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass cross_entropy_prob(nn.Module):\n def __init__(self):\n super(cross_entropy_prob, self).__init__()\n\n def forward(self, pred, soft_targets):\n pred = F.log_softmax(pred)\n loss = torch.mean(torch.sum(- soft_targets * pred, 1))\n return loss\n"
] | [
[
"torch.cuda.is_available",
"torch.no_grad",
"torch.unsqueeze",
"torch.load"
],
[
"torch.sum",
"torch.nn.functional.log_softmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CVxTz/keras_model_aws_ec2 | [
"92a19f1c065ba7b19c0cd4e75b30f2935a7efacb"
] | [
"baseline.py"
] | [
"from glob import glob\n\nimport pandas as pd\nimport numpy as np # linear algebra\nfrom tensorflow.keras.applications.imagenet_utils import preprocess_input\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom sklearn.model_selection import train_test_split\n\nfrom models import get_model_classif_nasnet\nfrom utils import data_gen, chunker, read_image\n\n\nlabeled_files = glob('/media/ml/data_ml/dogs-vs-cats/train/*.jpg')\ntest_files = glob('/media/ml/data_ml/dogs-vs-cats/test1/*.jpg')\n\ntrain, val = train_test_split(labeled_files, test_size=0.1, random_state=101010)\n\nmodel = get_model_classif_nasnet()\n\nbatch_size = 32\nh5_path = \"model.h5\"\ncheckpoint = ModelCheckpoint(h5_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n\n_ = model.fit_generator(\n data_gen(train, batch_size),\n validation_data=data_gen(val, batch_size),\n epochs=10, verbose=1,\n callbacks=[checkpoint],\n steps_per_epoch=len(train) // batch_size,\n validation_steps=len(val) // batch_size)\n\nmodel.load_weights(h5_path)\n\npreds = []\nids = []\n\nfor batch in chunker(test_files, batch_size):\n X = [preprocess_input(read_image(x)) for x in batch]\n X = np.array(X)\n preds_batch = model.predict(X).ravel().tolist()\n preds += preds_batch\n\ndf = pd.DataFrame({'id': test_files, 'label': preds})\ndf.to_csv(\"baseline_nasnet.csv\", index=False)\ndf.head()\n"
] | [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"numpy.array",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
hzxie/torch-points-kernels | [
"a52ea03bdd62e890320c592282ebd89de659534f"
] | [
"torch_points_kernels/chamfer_dist.py"
] | [
"import torch\n\nif torch.cuda.is_available():\n import torch_points_kernels.points_cuda as tpcuda\n\n\nclass ChamferFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, xyz1, xyz2):\n if not torch.cuda.is_available():\n raise NotImplementedError(\n \"CPU version is not available for Chamfer Distance\"\n )\n\n dist1, dist2, idx1, idx2 = tpcuda.chamfer_dist(xyz1, xyz2)\n ctx.save_for_backward(xyz1, xyz2, idx1, idx2)\n\n return dist1, dist2\n\n @staticmethod\n def backward(ctx, grad_dist1, grad_dist2):\n xyz1, xyz2, idx1, idx2 = ctx.saved_tensors\n grad_xyz1, grad_xyz2 = tpcuda.chamfer_dist_grad(\n xyz1, xyz2, idx1, idx2, grad_dist1, grad_dist2\n )\n return grad_xyz1, grad_xyz2\n\n\ndef chamfer_dist(xyz1, xyz2, ignore_zeros=False):\n r\"\"\"\n Calcuates the distance between B pairs of point clouds\n\n Parameters\n ----------\n xyz1 : torch.Tensor (dtype=torch.float32)\n (B, n1, 3) B point clouds containing n1 points\n xyz2 : torch.Tensor (dtype=torch.float32)\n (B, n2, 3) B point clouds containing n2 points\n ignore_zeros : bool\n ignore the point whose coordinate is (0, 0, 0) or not\n\n Returns\n -------\n dist: torch.Tensor\n (B, ): the distances between B pairs of point clouds\n \"\"\"\n if len(xyz1.shape) != 3 or xyz1.size(2) != 3 or len(xyz2.shape) != 3 or xyz2.size(2) != 3:\n raise ValueError('The input point cloud should be of size (B, n_pts, 3)')\n\n batch_size = xyz1.size(0)\n if batch_size == 1 and ignore_zeros:\n non_zeros1 = torch.sum(xyz1, dim=2).ne(0)\n non_zeros2 = torch.sum(xyz2, dim=2).ne(0)\n xyz1 = xyz1[non_zeros1].unsqueeze(dim=0)\n xyz2 = xyz2[non_zeros2].unsqueeze(dim=0)\n\n dist1, dist2 = ChamferFunction.apply(xyz1, xyz2)\n return torch.mean(dist1) + torch.mean(dist2)\n\n"
] | [
[
"torch.mean",
"torch.sum",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zaman-lab/brexitmeter-py | [
"a1ea66d7d747276679f8f4acdb2c2963517a0f04"
] | [
"test/storage_service_test.py"
] | [
"\nimport os\nimport tensorflow as tf\n\nfrom app.storage_service import weights_filepath, dictionaries_dirpath\n\ndef test_local_storage():\n local_filepaths = [\n\t\tweights_filepath(\"local\"),\n\t\tos.path.join(dictionaries_dirpath(\"local\"), \"dic.txt\"),\n\t\tos.path.join(dictionaries_dirpath(\"local\"), \"dic_s.txt\"),\n\t]\n for filepath in local_filepaths:\n assert os.path.isfile(filepath)\n\ndef test_remote_storage():\n remote_filepaths = [\n\t\tweights_filepath(\"remote\"),\n\t\tos.path.join(dictionaries_dirpath(\"remote\"), \"dic.txt\"),\n\t\tos.path.join(dictionaries_dirpath(\"remote\"), \"dic_s.txt\"),\n\t]\n for filepath in remote_filepaths:\n assert tf.io.gfile.exists(filepath)\n"
] | [
[
"tensorflow.io.gfile.exists"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NREL/Panel-Segmentation | [
"2270157fe87dc211f87d79b9ca38a4fbae967a1a"
] | [
"panel_segmentation/panel_detection.py"
] | [
"\"\"\"\nPanel detection class\n\"\"\"\n\nimport numpy as np\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.models import load_model\nimport cv2\nimport matplotlib.pyplot as plt\nfrom skimage.transform import hough_line, hough_line_peaks\nfrom matplotlib import cm\nimport requests\nfrom PIL import Image\nfrom os import path\n\npanel_seg_model_path = path.join(path.dirname(__file__), 'VGG16Net_ConvTranpose_complete.h5')\npanel_classification_model_path = path.join(path.dirname(__file__), 'VGG16_classification_model.h5')\n\nclass PanelDetection():\n '''\n A class for training a deep learning architecture, \n detecting solar arrays from a satellite image, performing spectral\n clustering, and predicting the Azimuth.\n '''\n def __init__(self, model_file_path = './VGG16Net_ConvTranpose_complete.h5', \n classifier_file_path = './VGG16_classification_model.h5'):\n \n #This is the model used for detecting if there is a panel or not\n self.classifier = load_model(classifier_file_path, \n custom_objects=None, \n compile=False)\n \n self.model = load_model(model_file_path, \n custom_objects=None, \n compile=False)\n \n \n def generateSatelliteImage(self,latitude, longitude, \n file_name_save, google_maps_api_key):\n \"\"\"\n Generates satellite image via Google Maps, using the passed lat-long coordinates.\n \n Parameters\n -----------\n latitude: Float. Latitude coordinate of the site.\n longitude: Float. Longitude coordinate of the site.\n file_name_save: String. File path that we want to save the image to. PNG file.\n google_maps_api_key: String. Google Maps API Key for automatically \n pulling satellite images.\n \n Returns\n ----------- \n Returned satellite image.\n \"\"\"\n #Check input variable for types\n if type(latitude) != float:\n raise TypeError(\"latitude variable must be of type float.\")\n if type(longitude) != float:\n raise TypeError(\"longitude variable must be of type float.\") \n if type(file_name_save) != str:\n raise TypeError(\"file_name_save variable must be of type string.\")\n if type(google_maps_api_key) != str:\n raise TypeError(\"google_maps_api_key variable must be of type string.\")\n #Build up the lat_long string from the latitude-longitude coordinates\n lat_long = str(latitude)+ \", \"+ str(longitude)\n # get method of requests module \n # return response object \n r = requests.get(\"https://maps.googleapis.com/maps/api/staticmap?maptype=satellite¢er=\" + lat_long + \"&zoom=18&size=35000x35000&key=\"+google_maps_api_key,\n verify= False) \n #Raise an exception if the satellite image is not successfully returned\n if r.status_code != 200:\n raise ValueError(\"Response status code \" + str(r.status_code) + \": Image not pulled successfully from API.\")\n # wb mode is stand for write binary mode \n f = open(file_name_save, 'wb') \n # r.content gives content, \n # in this case gives image \n f.write(r.content) \n # close method of file object \n # save and close the file \n f.close()\n #Read in the image and return it via the console\n return Image.open(file_name_save) \n\n\n def diceCoeff(self,y_true, y_pred, smooth=1):\n \"\"\"\n This function is used as the metric of similarity between the \n predicted mask and ground truth. \n \n Parameters\n -----------\n y_true - (numpy array of floats) \n the true mask of the image \n y_pred - (numpy array of floats) \n the predicted mask of the data\n smooth - (int): \n a parameter to ensure we are not dividing by zero and also a smoothing parameter. \n For back propagation. If the prediction is hard threshold to 0 and 1, it is difficult to back\n propagate the dice loss gradient. We add this parameter to actually smooth out the loss function, \n making it differentiable.\n \n Returns\n -----------\n dice: - float: retuns the metric of similarity between prediction and ground truth\n \"\"\"\n #Ensure that the inputs are of the correct type\n if type(y_true) != np.ndarray:\n raise TypeError(\"Variable y_true should be of type np.ndarray.\")\n if type(y_pred) != np.ndarray:\n raise TypeError(\"Variable y_pred should be of type np.ndarray.\")\n if type(smooth) != int:\n raise TypeError(\"Variable smooth should be of type int.\")\n #If variable types are correct, continue with function\n intersection = K.sum(y_true * y_pred, axis=[1,2,3])\n union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3])\n dice = K.mean((2. * intersection + smooth)/(union + smooth), axis=0)\n return dice\n\n \n def diceCoeffLoss(self, y_true, y_pred):\n \"\"\"\n This function is a loss function that can be used when training the segmentation model.\n This loss function can be used in place of binary crossentropy,\n which is the current loss function in the training stage \n \n Parameters\n -----------\n y_true - (numpy array of floats) \n the true mask of the image \n y_pred - (numpy array of floats)\n the predicted mask of the data\n \n Returns\n -----------\n float: retuns the loss metric between prediction and ground truth\n \n \"\"\"\n #Ensure that the inputs are of the correct type\n if type(y_true) != np.ndarray:\n raise TypeError(\"Variable y_true should be of type np.ndarray.\")\n if type(y_pred) != np.ndarray:\n raise TypeError(\"Variable y_pred should be of type np.ndarray.\")\n return 1-self.dice_coef(y_true, y_pred)\n \n\n def testBatch(self, test_data, test_mask=None, BATCH_SIZE = 16, model =None):\n \"\"\"\n This function is used to predict the mask of a batch of test satellite images.\n Use this to test a batch of images greater than 4\n \n Parameters\n -----------\n 'test_data': (nparray float) \n the satellite images \n 'test_mask': (nparray int/float) \n the mask ground truth corresponding to the test_data\n 'batch_size': (int) \n the batch size of the test_data. \n 'model': (tf.keras.model.object)\n a custom model can be provided as input or we can use the initialized model\n \n Returns\n -----------\n 'test_res': (nparray float) \n retuns the predicted masks.\n 'accuracy': (float) \n returns the accuarcy of prediction as compared with the ground truth if provided\n \"\"\"\n #Ensure that the inputs are of the correct type\n if type(test_data) != np.ndarray:\n raise TypeError(\"Variable test_data should be of type np.ndarray.\")\n if type(BATCH_SIZE) != int:\n raise TypeError(\"Variable BATCH_SIZE should be of type int.\") \n test_datagen = image.ImageDataGenerator(rescale=1./255,dtype='float32')\n test_image_generator = test_datagen.flow(\n test_data,\n batch_size = BATCH_SIZE, shuffle=False)\n if model != None:\n test_res = model.predict(test_image_generator)\n else :\n test_res = self.model.predict(test_image_generator)\n if test_mask != None: \n test_mask = test_mask/np.max(test_mask)\n accuracy = self.dice_coef(test_mask,test_res) \n return test_res,accuracy\n else:\n return test_res\n\n def testSingle(self, test_data, test_mask=None, model =None):\n \"\"\"\n This function is used to predict the mask corresponding to a single test image. \n It takes as input the test_data (a required parameter) and two non-required parameters- test_mask and model\n \n Use this to test a single image.\n\n Parameters\n -----------\n 'test_data': (nparray int or float) \n the satellite image. dimension is (640,640,3) or (a,640,640,3) \n 'test_mask': (nparray int/flaot) \n the ground truth of what the mask should be \n 'model': (tf.keras model object) \n a custom model can be provided as input or we can use the initialized model\n \n Returns\n -----------\n 'test_res': (nparray float) \n retuns the predicted mask of the single image. The dimension is (640,640 or (a,640,640))\n 'accuracy': (float) \n returns the accuarcy of prediction as compared with the ground truth if provided\n \n \"\"\"\n #check that the inputs are correct\n if type(test_data) != np.ndarray:\n raise TypeError(\"Variable test_data must be of type Numpy ndarray.\")\n #Test that the input array has 2 to 3 channels\n if (len(test_data.shape) > 3) | (len(test_data.shape) < 2):\n raise ValueError(\"Numpy array test_data shape should be 2 or 3 dimensions.\")\n #Once the array passes checks, run the sequence\n test_data = test_data/255\n test_data = test_data[np.newaxis, :]\n if model != None:\n test_res = model.predict(test_data)\n else :\n test_res = self.model.predict(test_data)\n test_res = (test_res[0].reshape(640,640))\n if test_mask != None: \n test_mask = test_mask/np.max(test_mask)\n accuracy = self.dice_coef(test_mask,test_res) \n return test_res,accuracy\n else:\n return test_res \n \n\n def hasPanels(self, test_data):\n \"\"\"\n This function is used to predict if there is a panel in an image or not. \n Note that it uses a saved classifier model we have trained and not the \n segmentation model. \n \n Parameters\n -----------\n test_data: (nparray float or int) \n the satellite image. The shape should be [a,640,640,3] where \n 'a' is the number of data or (640,640,3) if it is a single image\n \n Returns\n -----------\n Boolean. Returns True if solar array is detected in an image, and False otherwise.\n \"\"\"\n #Check that the input is correct\n if type(test_data) != np.ndarray:\n raise TypeError(\"Variable test_data must be of type Numpy ndarray.\")\n #Test that the input array has 3 to 4 channels\n if (len(test_data.shape) > 4) | (len(test_data.shape) < 3):\n raise ValueError(\"Numpy array test_data shape should be 3 dimensions if a single image, or 4 dimensions if a batch of images.\") \n test_data = test_data/255\n #This ensures the first dimension is the number of test data to be predicted\n if test_data.ndim == 3:\n test_data = test_data[np.newaxis, :]\n prediction = self.classifier.predict(test_data)\n #index 0 is for no panels while index 1 is for panels\n if prediction[0][1] > prediction[0][0]:\n return True \n else:\n return False\n \n\n def detectAzimuth(self, in_img, number_lines=5):\n \"\"\"\n This function uses canny edge detection to first extract the edges of the input image. \n To use this function, you have to first predict the mask of the test image \n using testSingle function. Then use the cropPanels function to extract the solar \n panels from the input image using the predicted mask. Hence the input image to this \n function is the cropped image of solar panels.\n \n After edge detection, Hough transform is used to detect the most dominant lines in\n the input image and subsequently use that to predict the azimuth of a single image\n \n Parameters\n -----------\n in_img: (nparray uint8) \n The image containing the extracted solar panels with other pixels zeroed off. Dimension is [1,640,640,3]\n number_lines: (int) \n This variable tells the function the number of dominant lines it should examine.\n We currently inspect the top 10 lines.\n \n Returns\n -----------\n azimuth: (int) \n The azimuth of the panel in the image.\n \"\"\"\n #Check that the input variables are of the correct type\n if type(in_img) != np.ndarray:\n raise TypeError(\"Variable in_img must be of type Numpy ndarray.\")\n if type(number_lines) != int:\n raise TypeError(\"Variable number_lines must be of type int.\")\n #Run through the function\n edges = cv2.Canny(in_img[0],50,150,apertureSize=3)\n tested_angles = np.linspace(-np.pi / 2, np.pi / 2, 360)\n h, theta, d = hough_line(edges, theta=tested_angles)\n origin = np.array((0, edges.shape[1]))\n ind =0\n azimuth = 0\n az = np.zeros((number_lines))\n # Classic straight-line Hough transform\n # Set a precision of 0.5 degree. \n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=number_lines, threshold =0.25*np.max(h))):\n y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)\n \n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n az[ind] = 90+deg_ang\n else:\n az[ind] = 270 + deg_ang\n ind =ind+1\n unique_elements, counts_elements = np.unique(az, return_counts=True)\n check = counts_elements[np.argmax(counts_elements)]\n if check == 1:\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=1, threshold =0.25*np.max(h))):\n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n azimuth = 90+deg_ang\n else:\n azimuth = 270 + deg_ang\n else:\n azimuth = (unique_elements[np.argmax(counts_elements)])\n return azimuth \n\n \n def cropPanels(self, test_data, test_res):\n \"\"\"\n This function basically isolates regions with solar panels in a \n satellite image using the predicted mask. It zeros out other pixels that does not \n contain a panel.\n You can use this for a single test data or multiple test data. \n \n Parameters \n ----------\n test_data: (nparray float)\n This is the input test data. This can be a single image or multiple image. Hence the \n dimension can be (640,640,3) or (a,640,640,3)\n test_res: (nparray float) \n This is the predicted mask of the test images passed as an input and used to crop out the \n solar panels. dimension is (640,640)\n \n Returns \n ----------\n new_test_res: (nparray uint8) \n This returns images here the solar panels have been cropped out and the background zeroed. \n It has the same shape as test data. The dimension is [a,640,640,3] where a is the number of\n input images\n \n \"\"\"\n #Check that the input variables are of the correct type\n if type(test_data) != np.ndarray:\n raise TypeError(\"Variable test_data must be of type Numpy ndarray.\")\n if type(test_res) != np.ndarray:\n raise TypeError(\"Variable test_res must be of type Numpy ndarray.\") \n #Convert the test_data array from 3D to 4D\n if test_data.ndim == 3:\n test_data = test_data[np.newaxis, :]\n new_test_res = np.uint8(np.zeros((test_data.shape[0],640,640,3)))\n for ju in np.arange(test_data.shape[0]):\n try:\n in_img = test_res[ju].reshape(640,640)\n except:\n in_img = test_res.reshape(640,640)\n in_img[in_img < 0.9] = 0\n in_img[in_img >= 0.9] = 1\n in_img = np.uint8(in_img)\n test2 = np.copy(test_data[ju])\n test2[(1-in_img).astype(bool),0] = 0\n test2[(1-in_img).astype(bool),1] = 0\n test2[(1-in_img).astype(bool),2] = 0\n new_test_res[ju] = test2 \n return new_test_res\n \n \n def plotEdgeAz(self, test_results, no_lines=5, \n no_figs=1, save_img_file_path = None,\n plot_show = False):\n \"\"\"\n This function is used to generate plots of the image with its azimuth\n It can generate three figures or one. For three figures, that include the \n input image, the hough transform space and the input images with detected lines.\n For single image, it only outputs the input image with detected lines.\n \n Parameters \n ----------\n test_results: (nparray float64 or unit8) \n 8-bit input image. This variable represents the predicted images from the segmentation model. Hence the \n dimension must be [a,b,c,d] where [a] is the number of images, [b,c] are the dimensions\n of the image - 640 x 640 in this case and [d] is 3 - RGB\n no_lines: (int) \n default is 10. This variable tells the function the number of dominant lines it should examine. \n no_figs: (int) \n 1 or 3. If the number of figs is 1. It outputs the mask with Hough lines and the predicted azimuth\n However, if the number of lines is 3, it gives three plots. \n 1. The input image,\n 2. Hough transform search space\n 3. Unput image with houghlines and the predicted azimuth\n \n save_img_file_path: (string) \n You can pass as input the location to save the plots\n plot_show: Boolen: If False, it will supress the plot as an output and just save the plots in a folder\n \n Returns \n ----------\n Plot of the masked image, with detected Hough Lines and azimuth estimate.\n \"\"\"\n #Check that the input variables are of the correct type\n if type(test_results) != np.ndarray:\n raise TypeError(\"Variable test_results must be of type Numpy ndarray.\")\n if type(no_lines) != int:\n raise TypeError(\"Variable no_lines must be of type int.\") \n if type(no_figs) != int:\n raise TypeError(\"Variable no_figs must be of type int.\") \n if type(plot_show) != bool:\n raise TypeError(\"Variable no_figs must be of type boolean.\") \n \n for ii in np.arange(test_results.shape[0]):\n #This changes the float64 to uint8\n if (test_results.dtype is np.dtype(np.float64)):\n in_img = test_results[ii].reshape(640,640)\n in_img[in_img < 0.9] = 0\n in_img[in_img >= 0.9] = 1\n in_img = np.uint8(in_img)\n\n in_img = test_results[ii]\n #Edge detection\n edges = cv2.Canny(in_img,50,150,apertureSize=3)\n tested_angles = np.linspace(-np.pi / 2, np.pi / 2, 360)\n h, theta, d = hough_line(edges, theta=tested_angles)\n az = np.zeros((no_lines))\n origin = np.array((0, edges.shape[1]))\n ind =0\n # Generating figure 1 \n fig, ax = plt.subplots(1, no_figs, figsize=(10, 6))\n if no_figs == 1:\n ax.imshow(edges)# cmap=cm.gray)\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=no_lines, threshold =0.25*np.max(h))):\n y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)\n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n az[ind] = 90+deg_ang\n else:\n az[ind] = 270 + deg_ang\n ind =ind+1\n ax.plot(origin, (y0, y1), '-r')\n ax.set_xlim(origin)\n ax.set_ylim((edges.shape[0], 0))\n ax.set_axis_off()\n unique_elements, counts_elements = np.unique(az, return_counts=True)\n \n check = counts_elements[np.argmax(counts_elements)]\n \n if check == 1:\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=1, threshold =0.25*np.max(h))):\n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n azimuth = 90+deg_ang\n else:\n azimuth = 270 + deg_ang\n else:\n azimuth = (unique_elements[np.argmax(counts_elements)])\n #print(np.asarray((unique_elements, counts_elements)))\n ax.set_title('Azimuth = %i' %azimuth)\n #save the image\n if save_img_file_path != None:\n plt.savefig(save_img_file_path + '/crop_mask_az_'+str(ii),\n dpi=300)\n #Show the plot if plot_show = True\n if plot_show == True:\n plt.tight_layout()\n plt.show() \n elif no_figs == 3:\n ax = ax.ravel()\n\n ax[0].imshow(in_img, cmap=cm.gray)\n ax[0].set_title('Input image')\n ax[0].set_axis_off()\n \n\n ax[1].imshow(np.log(1 + h),\n extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]), d[-1], d[0]],\n cmap=cm.gray, aspect=1/1.5)\n ax[1].set_title('Hough transform')\n ax[1].set_xlabel('Angles (degrees)')\n ax[1].set_ylabel('Distance (pixels)')\n ax[1].axis('image')\n\n ax[2].imshow(in_img)# cmap=cm.gray)\n origin = np.array((0, edges.shape[1]))\n ind =0\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=no_lines, threshold =0.25*np.max(h))):\n y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)\n \n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n az[ind] = 90+deg_ang\n else:\n az[ind] = 270 + deg_ang\n ind =ind+1\n ax.plot(origin, (y0, y1), '-r')\n ax[2].set_xlim(origin)\n ax[2].set_ylim((edges.shape[0], 0))\n ax[2].set_axis_off()\n unique_elements, counts_elements = np.unique(az, return_counts=True)\n \n check = counts_elements[np.argmax(counts_elements)]\n \n if check == 1:\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=1, threshold =0.25*np.max(h))):\n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n azimuth = 90+deg_ang\n else:\n azimuth = 270 + deg_ang\n else:\n azimuth = (unique_elements[np.argmax(counts_elements)])\n #print(np.asarray((unique_elements, counts_elements)))\n ax[2].set_title('Azimuth = %i' %azimuth)\n #save the image\n if save_img_file_path != None:\n plt.savefig(save_img_file_path + '/crop_mask_az_'+str(ii),\n dpi=300)\n #Show the plot if plot_show = True\n if plot_show == True:\n plt.tight_layout()\n plt.show() \n else:\n print(\"Enter valid parameters\")\n \n\n def clusterPanels(self, test_mask, fig=False):\n '''\n This function uses connected component algorithm to cluster the panels\n\n Parameters\n ----------\n test_mask : (bool) or (float)\n The predicted mask. Dimension is (640,640) or can be converted to RGB (640,640,3)\n fig : (bool)\n shows the clustering image if fig = True\n\n Returns\n -------\n (uint8)\n Masked image containing detected clusters each of dimension(640,640,3)\n \n (uint8)\n The optimal number of clusters\n '''\n #Check that the input variables are of the correct type\n if type(test_mask) != np.ndarray:\n raise TypeError(\"Variable test_mask must be of type Numpy ndarray.\")\n if type(fig) != bool:\n raise TypeError(\"Variable fig must be of type bool.\") \n #Continue running through the function if all the inputs are correct\n if (len(test_mask.shape) < 3):\n test_mask = cv2.cvtColor(test_mask,cv2.COLOR_GRAY2RGB)\n test_mask = test_mask.reshape(640,640,3) \n # Converting those pixels with values 0-0.5 to 0 and others to 1\n img = cv2.threshold(test_mask, 0.5, 1, cv2.THRESH_BINARY)[1]\n # Applying cv2.connectedComponents() \n num_labels, labels = cv2.connectedComponents(img[:,:,2].reshape(640,640)) \n # Map component labels to hue val, 0-179 is the hue range in OpenCV\n label_hue = np.uint8(179*labels/np.max(labels))\n blank_ch = 255*np.ones_like(label_hue)\n labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])\n # Converting cvt to BGR\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)\n # set background label to black\n labeled_img[label_hue==0] = 0\n #Initialize each clusters\n clusters = np.uint8(np.zeros((num_labels-1, 640, 640,3)))\n #starting from 1 to ignore background\n for i in np.arange(1,num_labels):\n clus = np.copy(test_mask)\n c_mask = labels==i\n #clus_label = np.zeros((640,640,3))\n clus[(1-c_mask).astype(bool),0] = 0\n clus[(1-c_mask).astype(bool),1] = 0\n clus[(1-c_mask).astype(bool),2] = 0\n #clus_label = np.stack((clus_label,)*3, axis=-1)\n clusters[i-1] = clus\n # Loop through each cluster, and detect number of non-zero values\n # in each cluster.\n clusters_list_keep = []\n for cluster_number in range(clusters.shape[0]):\n cluster = clusters[cluster_number]\n # Get the number of non-zero values as a ratio of total pixels\n pixel_count = len(cluster[cluster>0])\n total_pixels = cluster.shape[0] * cluster.shape[1] * cluster.shape[2]\n # Must greater than 3% non-zero pixels or we omit the cluster\n print(pixel_count / total_pixels)\n if (pixel_count / total_pixels) >= 0.0015:\n clusters_list_keep.append(cluster_number)\n # Filter clusters\n clusters = clusters[clusters_list_keep]\n if fig == True:\n #Showing Image after Component Labeling\n plt.figure()\n plt.imshow(cv2.cvtColor(labeled_img, cv2.COLOR_BGR2RGB))\n plt.axis('off')\n plt.title(\"Image after Component Labeling\")\n plt.show()\n return len(clusters),clusters\n \n \n\n \n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.linspace",
"numpy.rad2deg",
"numpy.dtype",
"numpy.max",
"matplotlib.pyplot.tight_layout",
"numpy.ones_like",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.unique",
"numpy.arange",
"numpy.uint8",
"numpy.sin",
"numpy.copy",
"numpy.argmax",
"matplotlib.pyplot.axis",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.log",
"matplotlib.pyplot.title",
"tensorflow.keras.backend.sum",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.cos",
"tensorflow.keras.backend.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
deka108/mars | [
"2cd39847c188bb690dd5e2d612a5cbe9f7b21eca"
] | [
"mars/web/session.py"
] | [
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport time\nimport logging\nimport pickle\nimport sys\nimport uuid\nfrom io import BytesIO\nfrom numbers import Integral\n\nimport numpy as np\n\nfrom ..config import options\nfrom ..core.operand import Fetch\nfrom ..errors import ResponseMalformed, ExecutionInterrupted, ExecutionFailed, \\\n ExecutionStateUnknown, ExecutionNotStopped\nfrom ..serialize import dataserializer\nfrom ..serialize.dataserializer import pyarrow\nfrom ..tensor.core import Indexes\nfrom ..utils import build_tileable_graph, sort_dataframe_result, \\\n numpy_dtype_from_descr_json, serialize_graph, serialize_serializable\n\nlogger = logging.getLogger(__name__)\n\n\nclass Session(object):\n def __init__(self, endpoint, session_id=None, req_session=None, verify_ssl=True,\n **session_kwargs):\n self._endpoint = endpoint.rstrip('/')\n self._session_id = session_id\n self._session_kwargs = session_kwargs\n # dict structure: {tileable_key -> graph_key, tileable_ids}\n # dict value is a tuple object which records graph key and tileable id\n self._executed_tileables = dict()\n\n self._serial_type = None\n self._pickle_protocol = pickle.HIGHEST_PROTOCOL\n\n if req_session:\n self._req_session = req_session\n else:\n import requests\n from requests.adapters import HTTPAdapter\n\n self._req_session = requests.Session()\n self._req_session.mount('http://stackoverflow.com', HTTPAdapter(max_retries=5))\n\n self._req_session.verify = verify_ssl\n if not verify_ssl:\n try:\n import urllib3\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n except ImportError: # pragma: no cover\n pass\n\n self._main()\n\n @property\n def session_id(self):\n return self._session_id\n\n @property\n def endpoint(self):\n return self._endpoint\n\n @endpoint.setter\n def endpoint(self, url):\n self._endpoint = url\n\n def _main(self):\n if pyarrow is None:\n self._serial_type = dataserializer.SerialType.PICKLE\n else:\n self._serial_type = dataserializer.SerialType(options.client.serial_type.lower())\n\n session_kw = self._session_kwargs.copy()\n session_kw['pyver'] = '.'.join(str(v) for v in sys.version_info[:3])\n session_kw['pickle_protocol'] = self._pickle_protocol\n if pyarrow is not None:\n session_kw['arrow_version'] = pyarrow.__version__\n\n if self._session_id is None:\n resp = self._req_session.post(self._endpoint + '/api/session', data=session_kw)\n\n if resp.status_code >= 400:\n raise SystemError('Failed to create mars session: ' + resp.reason)\n else:\n resp = self._req_session.get(\n self._endpoint + '/api/session/' + self._session_id, params=session_kw)\n if resp.status_code == 404:\n raise ValueError(f'The session with id = {self._session_id} doesn\\'t exist')\n if resp.status_code >= 400:\n raise SystemError('Failed to check mars session.')\n\n content = json.loads(resp.text)\n self._session_id = content['session_id']\n self._pickle_protocol = content.get('pickle_protocol', pickle.HIGHEST_PROTOCOL)\n\n # as pyarrow will use pickle.HIGHEST_PROTOCOL to pickle, we need to use\n # SerialType.PICKLE when pickle protocol between client and server\n # does not agree with each other\n if not content.get('arrow_compatible') or self._pickle_protocol != pickle.HIGHEST_PROTOCOL:\n self._serial_type = dataserializer.SerialType.PICKLE\n\n def _get_tileable_graph_key(self, tileable_key):\n return self._executed_tileables[tileable_key][0]\n\n def _set_tileable_graph_key(self, tileable, graph_key):\n tileable_key = tileable.key\n tileable_id = tileable.id\n if tileable_key in self._executed_tileables:\n self._executed_tileables[tileable_key][1].add(tileable_id)\n else:\n self._executed_tileables[tileable_key] = graph_key, {tileable_id}\n\n @staticmethod\n def _handle_json_response(resp, allow_empty=True, raises=True):\n try:\n resp_txt = resp.text\n if allow_empty:\n resp_txt = resp_txt or '{}'\n resp_json = json.loads(resp_txt)\n except json.JSONDecodeError:\n text_part = resp.text if len(resp.text) < 100 else resp.text[:100] + '...'\n raise ResponseMalformed(f'Failed to parse server response. Status={resp.status_code} '\n f'Response=\"{text_part}\"')\n\n if raises and resp.status_code >= 400:\n exc_info = pickle.loads(base64.b64decode(resp_json['exc_info']))\n raise exc_info[1].with_traceback(exc_info[2])\n return resp_json\n\n def _check_response_finished(self, graph_url, timeout=None):\n import requests\n try:\n resp = self._req_session.get(graph_url, params={'wait_timeout': timeout})\n except requests.ConnectionError as ex:\n err_msg = str(ex)\n if 'ConnectionResetError' in err_msg or 'Connection refused' in err_msg or \\\n 'Connection aborted' in err_msg:\n return False\n raise\n\n if resp.status_code == 504:\n logging.debug('Gateway Time-out, try again')\n return False\n if resp.status_code >= 400:\n raise SystemError(f'Failed to obtain execution status. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n\n resp_json = self._handle_json_response(resp, raises=False)\n if resp_json['state'] == 'succeeded':\n return True\n elif resp_json['state'] in ('running', 'preparing'):\n return False\n elif resp_json['state'] in ('cancelled', 'cancelling'):\n raise ExecutionInterrupted\n elif resp_json['state'] == 'failed':\n if 'exc_info' in resp_json:\n exc_info = pickle.loads(base64.b64decode(resp_json['exc_info']))\n exc = exc_info[1].with_traceback(exc_info[2])\n raise ExecutionFailed('Graph execution failed.') from exc\n else:\n raise ExecutionFailed('Graph execution failed with unknown reason.')\n raise ExecutionStateUnknown('Unknown graph execution state ' + resp_json['state'])\n\n def run(self, *tileables, **kw):\n timeout = kw.pop('timeout', -1)\n compose = kw.pop('compose', True)\n fetch = kw.pop('fetch', True)\n name = kw.pop('name', None)\n if kw:\n raise TypeError(f'run got unexpected key arguments {kw!r}')\n\n # those executed tileables should fetch data directly, submit the others\n run_tileables = [t for t in tileables if t.key not in self._executed_tileables]\n\n if name is not None:\n if not isinstance(name, (list, tuple)):\n name = [name]\n if len(name) != len(tileables):\n raise TypeError('Name must match execute tileables')\n name = ','.join(name)\n\n graph = build_tileable_graph(run_tileables, set(self._executed_tileables.keys()))\n targets = [t.key for t in run_tileables]\n\n if len(graph) > 0:\n targets_join = ','.join(targets)\n session_url = self._endpoint + '/api/session/' + self._session_id\n serialized_graph = serialize_graph(graph)\n\n resp_json = self._submit_graph(serialized_graph, targets_join, names=name or '', compose=compose)\n graph_key = resp_json['graph_key']\n graph_url = f'{session_url}/graph/{graph_key}'\n\n exec_start_time = time.time()\n time_elapsed = 0\n check_interval = options.check_interval\n while timeout <= 0 or time_elapsed < timeout:\n timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval\n try:\n if self._check_response_finished(graph_url, timeout_val):\n break\n except KeyboardInterrupt:\n resp = self._req_session.delete(graph_url)\n if resp.status_code >= 400:\n raise ExecutionNotStopped(\n f'Failed to stop graph execution. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n finally:\n time_elapsed = time.time() - exec_start_time\n\n if 0 < timeout < time.time() - exec_start_time:\n raise TimeoutError\n\n for t in tileables:\n self._set_tileable_graph_key(t, graph_key)\n\n if not fetch:\n return\n else:\n return self.fetch(*tileables)\n\n def _is_executed(self, tileable):\n # if tileble.key in executed tileables\n # or it's a fetch already\n return tileable.key in self._executed_tileables or \\\n isinstance(tileable.op, Fetch)\n\n def fetch(self, *tileables, **kw):\n from ..tensor.indexing import TensorIndex\n from ..dataframe.indexing.iloc import DataFrameIlocGetItem, SeriesIlocGetItem\n\n timeout = kw.pop('timeout', None)\n if kw:\n raise TypeError(f'fetch got unexpected key arguments {kw!r}')\n\n results = list()\n for tileable in tileables:\n if tileable.key not in self._executed_tileables and \\\n isinstance(tileable.op, (TensorIndex, DataFrameIlocGetItem, SeriesIlocGetItem)):\n to_fetch_tileable = tileable.inputs[0]\n indexes = tileable.op.indexes\n if not all(isinstance(ind, (slice, Integral)) for ind in indexes):\n raise ValueError('Only support fetch data slices')\n else:\n to_fetch_tileable = tileable\n indexes = []\n\n if not self._is_executed(to_fetch_tileable):\n raise ValueError('Cannot fetch the unexecuted tileable')\n\n key = to_fetch_tileable.key\n indexes_str = base64.b64encode(\n serialize_serializable(Indexes(indexes=indexes))).decode('ascii')\n\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n compression_str = ','.join(v.value for v in dataserializer.get_supported_compressions())\n params = dict(compressions=compression_str, slices=indexes_str,\n serial_type=self._serial_type.value, pickle_protocol=self._pickle_protocol)\n data_url = f'{session_url}/graph/{self._get_tileable_graph_key(key)}/data/{key}'\n resp = self._req_session.get(data_url, params=params, timeout=timeout)\n if resp.status_code >= 400:\n raise ValueError(f'Failed to fetch data from server. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n result_data = dataserializer.loads(resp.content)\n results.append(sort_dataframe_result(tileable, result_data))\n return results\n\n @classmethod\n def _process_int_or_dict_argument(cls, argument, name, params):\n if argument is None:\n return\n if not isinstance(argument, dict):\n params[name] = argument\n else:\n params[name] = ','.join(f'{k}={v}' for k, v in argument.items())\n\n def fetch_tileable_op_logs(self, tileable_op_key, offsets=None, sizes=None):\n url = f'{self._endpoint}/api/session/{self._session_id}/op/{tileable_op_key}/log'\n params = dict()\n self._process_int_or_dict_argument(offsets, 'offsets', params)\n self._process_int_or_dict_argument(sizes, 'sizes', params)\n resp = self._req_session.get(url, params=params)\n if resp.status_code >= 400:\n raise ValueError(f'Failed to fetch log from server. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n return json.loads(resp.content)\n\n def fetch_log(self, tileables, offsets=None, sizes=None):\n from ..custom_log import fetch\n\n return fetch(tileables, self, offsets=offsets, sizes=sizes)\n\n def get_named_tileable_infos(self, name):\n from ..context import TileableInfos\n\n url = f'{self._endpoint}/api/session/{self._session_id}'\n params = dict(name=name)\n resp = self._req_session.get(url, params=params)\n if resp.status_code >= 400: # pragma: no cover\n raise ValueError(f'Failed to get tileable key from server. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n tileable_key = self._handle_json_response(resp)['tileable_key']\n nsplits, extra_meta = self._get_tileable_meta(tileable_key)\n shape = tuple(sum(s) for s in nsplits)\n return TileableInfos(tileable_key, shape, extra_meta)\n\n def create_mutable_tensor(self, name, shape, dtype, fill_value=None, chunk_size=None, *_, **__):\n from ..tensor.utils import create_mutable_tensor\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n tensor_url = f'{session_url}/mutable-tensor/{name}?action=create'\n if not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n # avoid built-in scalar dtypes are made into one-field record type.\n if dtype.fields:\n dtype_descr = dtype.descr\n else:\n dtype_descr = str(dtype)\n tensor_json = {\n 'shape': shape,\n 'dtype': dtype_descr,\n 'fill_value': fill_value,\n 'chunk_size': chunk_size,\n }\n resp = self._req_session.post(tensor_url, json=tensor_json)\n shape, dtype, chunk_size, chunk_keys, chunk_eps = self._handle_json_response(resp)\n return create_mutable_tensor(name, chunk_size, shape, numpy_dtype_from_descr_json(dtype),\n chunk_keys, chunk_eps)\n\n def get_mutable_tensor(self, name):\n from ..tensor.utils import create_mutable_tensor\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n tensor_url = f'{session_url}/mutable-tensor/{name}'\n resp = self._req_session.get(tensor_url)\n shape, dtype, chunk_size, chunk_keys, chunk_eps = self._handle_json_response(resp)\n return create_mutable_tensor(name, chunk_size, shape, numpy_dtype_from_descr_json(dtype),\n chunk_keys, chunk_eps)\n\n def write_mutable_tensor(self, tensor, index, value):\n \"\"\"\n How to serialize index and value:\n\n 1. process_index and serialize it as json\n 2. the payload of POST request:\n\n * a int64 value indicate the size of index json\n * ascii-encoded bytes of index json\n * pyarrow serialized bytes of `value`\n \"\"\"\n from ..tensor.core import Indexes\n from ..serialize import dataserializer\n\n index = Indexes(indexes=index)\n index_bytes = base64.b64encode(serialize_serializable(index))\n bio = BytesIO()\n bio.write(np.int64(len(index_bytes)).tobytes())\n bio.write(index_bytes)\n dataserializer.dump(value, bio)\n\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n tensor_url = f'{session_url}/mutable-tensor/{tensor.name}'\n resp = self._req_session.put(tensor_url, data=bio.getvalue(),\n headers={'Content-Type': 'application/octet-stream'})\n self._handle_json_response(resp)\n\n def seal(self, tensor):\n from ..tensor.utils import create_fetch_tensor\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n tensor_url = f'{session_url}/mutable-tensor/{tensor.name}?action=seal'\n resp = self._req_session.post(tensor_url)\n graph_key_hex, tileable_key, tensor_id, tensor_meta = self._handle_json_response(resp)\n self._executed_tileables[tileable_key] = uuid.UUID(graph_key_hex), {tensor_id}\n\n # # Construct Tensor on the fly.\n shape, dtype, chunk_size, chunk_keys, _ = tensor_meta\n return create_fetch_tensor(chunk_size, shape, numpy_dtype_from_descr_json(dtype),\n tensor_key=tileable_key, chunk_keys=chunk_keys)\n\n def _get_tileable_nsplits(self, tileable_key):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n graph_key = self._get_tileable_graph_key(tileable_key)\n url = f'{session_url}/graph/{graph_key}/data/{tileable_key}?type=nsplits'\n resp = self._req_session.get(url)\n new_nsplits = self._handle_json_response(resp)\n return new_nsplits\n\n def _get_tileable_meta(self, tileable_key):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n graph_key = self._get_tileable_graph_key(tileable_key)\n url = f'{session_url}/graph/{graph_key}/data/{tileable_key}?type=meta'\n resp = self._req_session.get(url)\n meta = self._handle_json_response(resp)\n return pickle.loads(base64.b64decode(meta)) # nosec\n\n def _update_tileable_shape(self, tileable):\n tileable_key = tileable.key\n new_nsplits = self._get_tileable_nsplits(tileable_key)\n tileable._update_shape(tuple(sum(nsplit) for nsplit in new_nsplits))\n tileable.nsplits = new_nsplits\n\n def decref(self, *keys):\n for tileable_key, tileable_id in keys:\n if tileable_key not in self._executed_tileables:\n continue\n graph_key, ids = self._executed_tileables[tileable_key]\n\n if tileable_id in ids:\n ids.remove(tileable_id)\n # for those same key tileables, do decref only when all those tileables are garbage collected\n if len(ids) != 0:\n continue\n self.delete_data(tileable_key)\n\n def delete_data(self, tileable_key, wait=False):\n if tileable_key not in self._executed_tileables:\n return\n graph_key, _ids = self._executed_tileables[tileable_key]\n data_url = f'{self._endpoint}/api/session/{self._session_id}/graph/{graph_key}' \\\n f'/data/{tileable_key}?wait={1 if wait else 0}'\n self._req_session.delete(data_url)\n self._executed_tileables.pop(tileable_key, None)\n\n def stop(self, graph_key):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n graph_url = session_url + '/graph/' + graph_key\n resp = self._req_session.delete(graph_url)\n if resp.status_code >= 400:\n raise SystemError(f'Failed to stop graph execution. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n\n def _submit_graph(self, serialized_graph, targets, names=None, compose=True):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n resp = self._req_session.post(session_url + '/graph', dict(\n graph=base64.b64encode(serialized_graph).decode('ascii'),\n target=targets,\n names=names,\n compose='1' if compose else '0'\n ))\n return self._handle_json_response(resp)\n\n def get_graph_states(self):\n resp = self._req_session.get(f'{self._endpoint}/api/session/{self._session_id}/graph')\n return self._handle_json_response(resp)\n\n def close(self):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n for key in list(self._executed_tileables.keys()):\n self.delete_data(key, wait=True)\n resp = self._req_session.delete(session_url)\n if resp.status_code >= 400:\n raise SystemError('Failed to close mars session.')\n\n def check_service_ready(self, timeout=1):\n import requests\n try:\n resp = self._req_session.get(self._endpoint + '/api', timeout=timeout)\n except (requests.ConnectionError, requests.Timeout):\n return False\n if resp.status_code >= 400:\n return False\n return True\n\n def count_workers(self):\n resp = self._req_session.get(self._endpoint + '/api/worker?action=count', timeout=1)\n return self._handle_json_response(resp)\n\n def get_cpu_count(self):\n resp = self._req_session.get(self._endpoint + '/api/worker?action=count_cpu', timeout=1)\n return self._handle_json_response(resp)\n\n def rescale_workers(self, new_scale, min_workers=None, wait=True, timeout=None):\n data = json.dumps(dict(new_scale=new_scale, min_workers=min_workers))\n wait_req = 1 if wait else 0\n resp = self._req_session.patch(f'{self._endpoint}/api/worker?action=count&wait={wait_req}',\n data, timeout=timeout)\n return self._handle_json_response(resp)\n\n def get_workers_meta(self):\n resp = self._req_session.get(self._endpoint + '/api/worker', timeout=1)\n return self._handle_json_response(resp)\n\n def get_task_count(self):\n resp = self._req_session.get(f'{self._endpoint}/api/session/{self._session_id}/graph')\n return len(self._handle_json_response(resp))\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n"
] | [
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jaryP/ContinualAI | [
"7d9b7614066d219ebd72049692da23ad6ec132b0",
"7d9b7614066d219ebd72049692da23ad6ec132b0",
"7d9b7614066d219ebd72049692da23ad6ec132b0",
"7d9b7614066d219ebd72049692da23ad6ec132b0",
"7d9b7614066d219ebd72049692da23ad6ec132b0"
] | [
"continual_learning/scenarios/classification/new_classes/nc_scenarios.py",
"continual_learning/scenarios/utils.py",
"continual_learning/methods/task_incremental/multi_task/gg/pruning/utils.py",
"continual_learning/datasets/svhn.py",
"continual_learning/backbone_networks/resnet/resnet.py"
] | [
"from collections import defaultdict\nfrom typing import Union, List, Any, Callable, Dict\n\nimport numpy as np\n\nfrom continual_learning.datasets.base import AbstractDataset, DatasetSplitsContainer\n\nfrom continual_learning.scenarios.base import TasksGenerator\nfrom continual_learning.scenarios.classification.new_classes import \\\n NCTransformingScenario\nfrom continual_learning.scenarios.classification.utils import \\\n get_dataset_subset_using_labels\nfrom continual_learning.scenarios.tasks import TransformerTask, Task\nfrom continual_learning.scenarios.utils import ImageRotation, PixelsPermutation\n\n\nclass NCScenario(TasksGenerator):\n def __init__(self,\n *,\n tasks_n: int,\n dataset: DatasetSplitsContainer,\n # transform_factory: Callable[[Any], Callable],\n # transformation_parameters: Union[List[any],\n # Callable[[Any], Any]],\n random_state: Union[np.random.RandomState, int] = None,\n lazy_initialization: bool = True,\n labels_per_tasks: Dict[int, int] = None,\n labels_task_mapping: Dict[int, int] = None,\n shuffle_labels: bool = False,\n remap_labels_across_task: bool = False,\n remap_labels_in_task: bool = False,\n **kwargs):\n\n super().__init__(dataset,\n random_state=random_state,\n **kwargs)\n\n dataset_labels = np.asarray(dataset.classes)\n assigned_labels = []\n\n if labels_task_mapping is None:\n labels_task_mapping = {}\n\n if labels_per_tasks is None:\n # if len(labels_task_mapping) == 0:\n if len(dataset_labels) % tasks_n != 0:\n raise ValueError(\n f'Attempted to create labels_per_tasks dictionary, '\n f'but the number of labels ({len(dataset_labels)}) '\n f'cannot be distributed equally between the tasks '\n f'({tasks_n}), '\n f'because len(dataset_labels) % tasks_n != 0.')\n\n labels_per_tasks = {task: len(dataset_labels) // tasks_n\n for task in range(tasks_n)}\n\n else:\n remaining_tasks = tasks_n - len(labels_per_tasks)\n\n if remaining_tasks > 0:\n assigned_tasks = sum(labels_per_tasks.values())\n remaining_labels = len(dataset_labels) - assigned_tasks\n labels_per_remaining_task = remaining_labels // remaining_tasks\n\n tasks_map = {i: labels_per_remaining_task\n for i in range(tasks_n)\n if i not in labels_per_tasks}\n\n labels_per_tasks.update(tasks_map)\n\n if any([v == 1 for v in labels_per_tasks.values()]):\n raise ValueError('Due to the lack of tasks '\n 'in labels_per_tasks, '\n 'the dictionary has been populated, '\n 'but some task has only '\n 'one labels associated ot it. '\n 'If intended, '\n 'please force this behaviour by setting '\n f'labels_per_tasks = {labels_per_tasks}')\n\n if remap_labels_in_task and remap_labels_across_task:\n raise ValueError('Both remap_labels_in_task and '\n 'remap_labels_across_task are set to True '\n 'but are mutually exclusive. '\n 'Please set at least one to False.')\n\n if max(labels_per_tasks.keys()) >= tasks_n or min(\n labels_per_tasks.keys()) < 0:\n raise ValueError('Invalid key value in labels_per_tasks. '\n f'The keys must be in [0, {tasks_n - 1}] '\n f'({labels_per_tasks.keys()})')\n\n if min(labels_per_tasks.values()) < 0:\n raise ValueError('Invalid value in labels_per_tasks. '\n f'The values must be > 0'\n f'({labels_per_tasks.keys()})')\n\n sm = sum(labels_per_tasks.values())\n if sm > len(dataset_labels):\n raise ValueError(f'The total number of classes in labels_per_tasks '\n f'({sm}) exceeds the number of labels '\n f'in the dataset ({len(dataset_labels)}).')\n\n if not all(label in dataset_labels\n for label, task in labels_task_mapping.items()):\n raise ValueError(f'Some labels in labels_task_mapping are not '\n f'present in the dataset. '\n f'Dataset labels: {dataset_labels}, '\n f'given labels: {labels_task_mapping}')\n\n if len(labels_task_mapping) > 0:\n if max(labels_task_mapping.keys()) > len(dataset_labels) - 1 \\\n or min(labels_task_mapping.keys()) < 0:\n raise ValueError('Invalid key value in labels_task_mapping. '\n f'The keys must be in '\n f'[0, {len(dataset_labels) - 1}] '\n f'({labels_task_mapping.keys()})')\n\n if max(labels_task_mapping.values()) >= tasks_n \\\n or min(labels_per_tasks.values()) < 0:\n raise ValueError('Invalid value in labels_task_mapping. '\n f'The values must be in [0, {tasks_n - 1}] '\n f'({labels_task_mapping.values()})')\n\n task_labels = {k: [] for k in range(tasks_n)}\n\n for label, task in labels_task_mapping.items():\n task_labels[task].append(label)\n assigned_labels.append(label)\n\n if any([len(v) > labels_per_tasks[t]\n for t, v in task_labels.items()]):\n s = {t: len(v) for t, v in task_labels.items()}\n raise ValueError(f'After populating the tasks '\n f'using labels_task_mapping, some task has more '\n f'assigned labels ({s}) than the limit '\n f'imposed by labels_per_tasks '\n f'({labels_per_tasks}).')\n\n if shuffle_labels:\n self.random_state.shuffle(dataset_labels)\n\n for label in [l for l in dataset_labels\n if l not in assigned_labels]:\n eligible_tasks = [t for t, v in task_labels.items()\n if len(v) < labels_per_tasks[t]]\n\n selected_task = eligible_tasks[0]\n task_labels[selected_task].append(label)\n\n labels_mapping = {}\n indexes = iter(range(len(dataset_labels)))\n\n for t, vals in task_labels.items():\n if remap_labels_across_task:\n map_dict = {v: next(indexes) for v in vals}\n elif remap_labels_in_task:\n map_dict = {v: i for i, v in enumerate(vals)}\n else:\n map_dict = {v: v for v in vals}\n\n labels_mapping[t] = map_dict\n\n self.tasks_n = tasks_n\n self.labels_mapping = labels_mapping\n self.lazy_initialization = lazy_initialization\n self.task_labels = task_labels\n\n self._tasks_generated = []\n\n # self.task_wise_labels = task_wise_labels\n # self.shuffle_labels = shuffle_labels\n #\n # self.parameters = transformation_parameters\n # self.task_n = tasks_n\n # self.transform_function = transform_factory\n #\n # self._t_counter = 0\n # self._current_task = 0\n #\n # self._transform_functions = []\n # self._tasks_generated = []\n\n if not lazy_initialization:\n for _ in range(tasks_n):\n self.generate_task()\n # # self._tasks_generated.append(t)\n\n def __len__(self):\n return self.tasks_n\n\n def __getitem__(self, i: int):\n if i >= len(self._tasks_generated):\n raise ValueError(f'Attempting to get a non generated task from '\n f'the lazy created stream of tasks (index: {i})'\n f'. Generate the task or set '\n f'lazy_initialization=False when '\n f'instantiating this class.')\n\n return self._tasks_generated[i]\n\n def generate_task(self, **kwargs) -> Union[Task, None]:\n\n counter = len(self._tasks_generated)\n\n # if self.infinite_stream and callable(self.parameters):\n # t_parameters = self.parameters(task=counter,\n # random_state=self.random_state)\n # else:\n if counter == len(self):\n return None\n\n # t_parameters = self.parameters[counter]\n #\n # t = self.transform_function(t_parameters)\n\n labels = self.task_labels[counter]\n labels_map = self.labels_mapping[counter]\n\n dataset = get_dataset_subset_using_labels(self.dataset, labels=labels)\n task = Task(base_dataset=dataset,\n labels_mapping=labels_map,\n task_index=counter)\n\n # task = TransformerTask(base_dataset=self.dataset, transformer=t,\n # index=counter)\n\n self._tasks_generated.append(task)\n\n return task\n\n # def __next__(self):\n # self._current_task = 0\n # return self\n\n def __iter__(self):\n for i in range(self.tasks_n):\n if len(self._tasks_generated) > i:\n t = self._tasks_generated[i]\n else:\n t = self.generate_task()\n if t is None:\n return\n\n yield t\n\n\nclass ImageRotationScenario(NCTransformingScenario):\n def __init__(self,\n dataset: DatasetSplitsContainer,\n tasks_n: int,\n transformation_parameters: Union[List[any],\n Callable[[Any], Any]],\n # infinite_stream: bool = False,\n random_state: Union[np.random.RandomState, int] = None,\n lazy_initialization: bool = True,\n labels_task_mapping: Dict[int, Union[int, list]] = None,\n remap_labels_across_task: bool = False,\n\n **kwargs):\n\n transform_function = self.get_rotation\n\n super().__init__(dataset=dataset,\n tasks_n=tasks_n,\n transform_factory=transform_function,\n transformation_parameters=transformation_parameters,\n # infinite_stream=infinite_stream,\n random_state=random_state,\n lazy_initialization=lazy_initialization,\n labels_task_mapping=labels_task_mapping,\n remap_labels_across_task=remap_labels_across_task,\n **kwargs)\n\n def get_rotation(self, degree, **kwargs):\n return ImageRotation(degree)\n\n\nclass PixelsPermutationScenario(NCTransformingScenario):\n def __init__(self,\n dataset: DatasetSplitsContainer,\n tasks_n: int,\n transformation_parameters: Union[List[any],\n Callable[[Any], Any]],\n # infinite_stream: bool = False,\n random_state: Union[np.random.RandomState, int] = None,\n lazy_initialization: bool = True,\n labels_task_mapping: Dict[int, Union[int, list]] = None,\n remap_labels_across_task: bool = False,\n\n **kwargs):\n\n transform_factory = self.get_permutation\n\n super().__init__(dataset=dataset,\n tasks_n=tasks_n,\n transform_factory=transform_factory,\n transformation_parameters=transformation_parameters,\n # infinite_stream=infinite_stream,\n random_state=random_state,\n lazy_initialization=lazy_initialization,\n labels_task_mapping=labels_task_mapping,\n remap_labels_across_task=remap_labels_across_task,\n **kwargs)\n\n def get_permutation(self, permutation, **kwargs):\n return PixelsPermutation(permutation)\n\n",
"from typing import Sequence, Union\n\nimport numpy as np\nfrom scipy.ndimage.interpolation import rotate as np_rotate\nfrom PIL.Image import Image\nfrom torch import Tensor, tensor\nfrom torchvision.transforms.functional import rotate\n\n\nclass ImageRotation(object):\n def __init__(self, degree):\n self.degree = degree\n\n def __call__(self, img: Union[Image, Tensor, np.ndarray]):\n if isinstance(img, np.ndarray):\n img = np_rotate(img, angle=self.degree, reshape=False)\n elif isinstance(img, Image):\n img = img.rotate(self.degree)\n elif isinstance(img, Tensor):\n img = rotate(img, angle=self.degree)\n else:\n raise ValueError(f'Accepted types are: '\n f'[ndarray, PIL Image, Tensor] {type(img)}')\n return img\n\n\nclass PixelsPermutation(object):\n def __init__(self, index_permutation: Sequence[int]):\n self.permutation = index_permutation\n\n def __call__(self, img: Union[Image, Tensor, np.ndarray]):\n if isinstance(img, np.ndarray):\n img = img.reshape(-1)[self.permutation].reshape(*img.shape)\n elif isinstance(img, Image):\n img = img.getdata()\n img = img.reshape(-1)[self.permutation].reshape(*img.shape)\n img = Image.fromarray(img)\n elif isinstance(img, Tensor):\n img = img.numpy()\n img = img.reshape(-1)[self.permutation].reshape(*img.shape)\n img = tensor(img)\n else:\n raise ValueError(f'Accepted types are: '\n f'[ndarray, PIL Image, Tensor] {type(img)}')\n\n return img\n\n\n",
"from typing import Union\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom continual_learning.scenarios.tasks import Task\n\n\ndef get_accuracy(encoder: torch.nn.Module, solver: torch.nn.Module,\n task: Task, batch_size=64, device='cpu'):\n with torch.no_grad():\n encoder.eval()\n solver.eval()\n\n true_labels = []\n predicted_labels = []\n\n for j, x, y in task.get_iterator(batch_size):\n x = x.to(device)\n true_labels.extend(y.tolist())\n emb = encoder(x)\n a = solver(emb, task=task.index).cpu()\n predicted_labels.extend(a.max(dim=1)[1].tolist())\n true_labels, predicted_labels = np.asarray(true_labels), np.asarray(predicted_labels)\n eq = predicted_labels == true_labels\n accuracy = eq.sum() / len(eq)\n\n return accuracy\n\n\nclass PrunedLayer(nn.Module):\n def __init__(self, layer: Union[nn.Linear, nn.Conv2d]):\n\n super().__init__()\n\n self._use_mask = True\n self._eval_mask = None\n\n self._mask = None\n self.steps = 0\n\n self.last_mask = None\n self.layer = layer\n\n self.is_conv = isinstance(layer, nn.Conv2d)\n\n @property\n def weight(self):\n return self.layer.weight\n\n @property\n def mask(self):\n return self._mask\n\n @mask.setter\n def mask(self, m):\n self._mask = m\n\n def forward(self, x):\n\n mask = self.mask\n\n w = self.layer.weight\n if mask is not None:\n w = w * mask\n\n if self.is_conv:\n o = nn.functional.conv2d(x, w, None, stride=self.layer.stride, padding=self.layer.padding,\n dilation=self.layer.dilation, groups=self.layer.groups)\n else:\n o = nn.functional.linear(x, w, None)\n\n return o\n\n\nclass ForwardHook:\n def __init__(self, module: nn.Module, mask: torch.Tensor):\n mask = mask.unsqueeze(0)\n self.mask = mask\n self.hook = module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, module_in, module_out):\n\n return module_out * self.mask\n\n def remove(self):\n self.hook.remove()\n\n def update_mask(self, mask):\n self.mask = mask\n\n\n",
"from os.path import join, exists\nfrom typing import Callable, Tuple\nfrom urllib.request import urlretrieve\nimport numpy as np\nfrom scipy import io\n\nfrom continual_learning.datasets.base import DownloadableDataset\n\n__all__ = ['SVHN']\n\nclass SVHN(DownloadableDataset):\n\n url = {'train': {'images': \"http://ufldl.stanford.edu/housenumbers/train_32x32.mat\"},\n 'test': {'images': \"http://ufldl.stanford.edu/housenumbers/test_32x32.mat\"}}\n\n def __init__(self, download_if_missing: bool = True, data_folder: str = None,\n transform: Callable = None, test_transform: Callable = None, target_transform: Callable = None):\n\n self.file_names = [url.rpartition('/')[2] for url in self.url]\n\n super().__init__(name='SVHN', download_if_missing=download_if_missing, data_folder=data_folder,\n transform=transform,\n target_transform=target_transform,\n test_transform=test_transform)\n\n def download_dataset(self):\n for _, type in self.url.items():\n for _, url in type.items():\n urlretrieve(url, join(self.data_folder, url.rpartition('/')[2]))\n\n def load_dataset(self) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[list, list, list]]:\n\n x, y = [], []\n train, dev, test = [], [], []\n\n for split in 'train', 'test':\n v = self.url[split]\n loaded_mat = io.loadmat(join(self.data_folder, v['images'].rpartition('/')[2]))\n\n x.append(loaded_mat['X'])\n y.append(loaded_mat['y'].astype(np.int64).squeeze())\n\n if split == 'train':\n train = list(range(x[-1].shape[-1]))\n else:\n test = list(range(len(train), len(train) + x[-1].shape[-1]))\n\n x = np.concatenate(x, -1)\n x = np.transpose(x, (3, 0, 1, 2))\n y = np.concatenate(y)\n\n return (x, y), (train, test, dev)\n\n def _check_exists(self) -> bool:\n for split in 'train', 'test':\n v = self.url[split]\n if not exists(join(self.data_folder, v['images'].rpartition('/')[2])):\n return False\n return True\n",
"import torch.nn.functional as F\nimport torch.nn.init as init\n\n__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']\n\nfrom torch import nn\n\n'''\nProperly implemented ResNet-s for CIFAR10 as described in paper [1].\nThe implementation and structure of this file is hugely influenced by [2]\nwhich is implemented for ImageNet and doesn't have option A for identity.\nMoreover, most of the implementations on the web is copy-paste from\ntorchvision's resnet and has wrong number of params.\nProper ResNet-s for CIFAR10 (for fair comparision and etc.) has following\nnumber of layers and parameters:\nname | layers | params\nResNet20 | 20 | 0.27M\nResNet32 | 32 | 0.46M\nResNet44 | 44 | 0.66M\nResNet56 | 56 | 0.85M\nResNet110 | 110 | 1.7M\nResNet1202| 1202 | 19.4m\nwhich this implementation indeed has.\nReference:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\nIf you use this implementation in you work, please don't forget to mention the\nauthor, Yerlan Idelbayev.\n'''\n\n\ndef _weights_init(m):\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight)\n\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1, option='A', hidden_planes=None):\n super(BasicBlock, self).__init__()\n if hidden_planes is None:\n hidden_planes = planes\n self.conv1 = nn.Conv2d(in_planes, hidden_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(hidden_planes)\n self.conv2 = nn.Conv2d(hidden_planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.stride = stride\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n if option == 'A':\n \"\"\"\n For CIFAR10 ResNet paper uses option A.\n \"\"\"\n self.shortcut = LambdaLayer(lambda x:\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes // 4, planes // 4), \"constant\",\n 0))\n elif option == 'B':\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass NoBNBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1, option='A', hidden_planes=None):\n super(NoBNBlock, self).__init__()\n if hidden_planes is None:\n hidden_planes = planes\n self.conv1 = nn.Conv2d(in_planes, hidden_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n # self.bn1 = nn.BatchNorm2d(hidden_planes)\n self.conv2 = nn.Conv2d(hidden_planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n # self.bn2 = nn.BatchNorm2d(planes)\n\n self.stride = stride\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n if option == 'A':\n \"\"\"\n For CIFAR10 ResNet paper uses option A.\n \"\"\"\n self.shortcut = LambdaLayer(lambda x:\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes // 4, planes // 4), \"constant\",\n 0))\n elif option == 'B':\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n # nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.conv1(x))\n out = self.conv2(out)\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks):\n super(ResNet, self).__init__()\n self.in_planes = 16\n\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)\n\n self.apply(_weights_init)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.avg_pool2d(out, out.size()[3])\n return out\n\n\ndef resnet20(use_bn=True):\n if use_bn:\n block = NoBNBlock\n else:\n block = BasicBlock\n return ResNet(block, [3, 3, 3])\n\n\ndef resnet32():\n return ResNet(BasicBlock, [5, 5, 5])\n\n\ndef resnet44():\n return ResNet(BasicBlock, [7, 7, 7])\n\n\ndef resnet56():\n return ResNet(BasicBlock, [9, 9, 9])\n\n\ndef resnet110():\n return ResNet(BasicBlock, [18, 18, 18])\n\n\ndef resnet1202():\n return ResNet(BasicBlock, [200, 200, 200])\n"
] | [
[
"numpy.asarray"
],
[
"scipy.ndimage.interpolation.rotate",
"torch.tensor"
],
[
"numpy.asarray",
"torch.nn.functional.conv2d",
"torch.no_grad",
"torch.nn.functional.linear"
],
[
"numpy.concatenate",
"numpy.transpose"
],
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.nn.BatchNorm2d",
"torch.nn.functional.pad",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Duplums/SMLvsDL | [
"b285717bd8d8e832b4bc9e2b42d18bd96b628def"
] | [
"dl_training/main.py"
] | [
"import argparse\nfrom dl_training.training import BaseTrainer\nfrom dl_training.testing import OpenBHBTester\nimport torch\nimport logging\n\nif __name__==\"__main__\":\n\n logger = logging.getLogger(\"SMLvsDL\")\n\n parser = argparse.ArgumentParser()\n\n # Data location + saving paths\n parser.add_argument(\"--root\", type=str, required=True, help=\"Path to data root directory\")\n parser.add_argument(\"--preproc\", type=str, default='vbm', choices=['vbm', 'quasi_raw'])\n parser.add_argument(\"--checkpoint_dir\", type=str)\n parser.add_argument(\"--exp_name\", type=str, required=True)\n parser.add_argument(\"--outfile_name\", type=str, help=\"The output file name used to save the results in testing mode.\")\n\n parser.add_argument(\"--N_train_max\", type=int, default=None, help=\"Maximum number of training samples \"\n \"to be used per fold\")\n parser.add_argument(\"--pb\", type=str, choices=[\"age\", \"sex\", \"scz\", \"bipolar\", \"asd\", \"self_supervised\"])\n parser.add_argument(\"--folds\", nargs='+', type=int, help=\"Fold indexes to run during the training\")\n parser.add_argument(\"--nb_folds\", type=int, default=5)\n\n # Important: what model do we use\n parser.add_argument(\"--net\", type=str, help=\"Network to use\")\n\n # Depends on available CPU/GPU memory\n parser.add_argument(\"-b\", \"--batch_size\", type=int, required=True)\n\n\n parser.add_argument(\"--nb_epochs_per_saving\", type=int, default=5)\n parser.add_argument(\"--manual_seed\", type=int, help=\"The manual seed to give to pytorch.\")\n\n # Optimizer hyper-parameters\n parser.add_argument(\"--lr\", type=float, required=True, help=\"Initial learning rate\")\n parser.add_argument(\"--gamma_scheduler\", type=float, required=True)\n parser.add_argument(\"--nb_epochs\", type=int, default=300)\n parser.add_argument(\"--step_size_scheduler\", type=int, default=10)\n\n # Dataloader: set them\n parser.add_argument(\"--num_cpu_workers\", type=int, default=3, help=\"Number of workers assigned to do the \"\n \"preprocessing step (used by DataLoader of Pytorch)\")\n parser.add_argument(\"--sampler\", choices=[\"random\", \"weighted_random\", \"sequential\"], required=True)\n\n parser.add_argument(\"--residualize\", type=str, choices=[\"linear\", \"combat\"])\n\n # Self-sypervised learning\n parser.add_argument(\"--sigma\", type=float, help=\"Hyper-parameter for RBF kernel in self-supervised loss.\", default=5)\n\n # Transfer Learning\n parser.add_argument(\"--pretrained_path\", type=str)\n parser.add_argument(\"--load_optimizer\", action=\"store_true\", help=\"If <pretrained_path> is set, loads also the \"\n \"optimizer's weigth\")\n\n # This code can be executed on CPU or GPU\n parser.add_argument(\"--cuda\", type=bool, default=True, help=\"If True, executes the code on GPU\")\n\n # Kind of tests\n parser.add_argument(\"--train\", action=\"store_true\")\n parser.add_argument(\"--test\", action=\"store_true\")\n\n args = parser.parse_args()\n\n if not torch.cuda.is_available():\n args.cuda = False\n logger.warning(\"cuda is not available and has been disabled.\")\n\n if args.manual_seed:\n torch.manual_seed(args.manual_seed)\n\n if not args.train and not args.test:\n args.train = True\n logger.info(\"No mode specify: training mode is set automatically\")\n\n if args.train:\n trainer = BaseTrainer(args)\n trainer.run()\n # do not consider the pretrained path anymore since it will be eventually computed automatically\n args.pretrained_path = None\n\n if args.test:\n tester = OpenBHBTester(args)\n tester.run()\n\n\n\n\n\n"
] | [
[
"torch.manual_seed",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
inacioMattos/DeepLearning-Cachorros-e-Gatos | [
"a1eb42308f820809b7239cca6e81c4e880f5f540"
] | [
"src/backend/model.py"
] | [
"import tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nfrom tensorflow.keras.callbacks import TensorBoard\nimport pickle, os, time\n\n\nDATADIR=\"data/\"\nNAME=\"cachorros-gatos-cnn-128-128-128-{}\".format(int(time.time()))\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(NAME))\n\n\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n\ndef getData():\n\tX = pickle.load(open(DATADIR + \"X.pickle\", \"rb\"))\n\ty = pickle.load(open(DATADIR + \"y.pickle\", \"rb\"))\n\n\treturn X, y\n\n\ndef normalizeData(X):\n\treturn X/255.0\t# já que numa imagem o valor máximo é 255 para cada pixels, é só dividir por 255.\n\n\ndef saveModel(model):\n\tmodel.save(\"128-128-128-CNN-noDense.model\")\n\n\ndef trainModel(model, training_set):\n\tX, y = training_set\n\n\tmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\tmodel.fit(X, y, batch_size=32, validation_split=0.1, epochs=7, callbacks=[tensorboard])\n\treturn model\n\n\ndef createModel(X):\n\tmodel = Sequential()\n\n\tmodel.add(Conv2D(128, (3,3), input_shape=X.shape[1:]))\n\tmodel.add(Activation(\"relu\"))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Conv2D(128, (4,4)))\n\tmodel.add(Activation(\"relu\"))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Conv2D(128, (3,3)))\n\tmodel.add(Activation(\"relu\"))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Flatten())\n\n\tmodel.add(Dense(1))\n\tmodel.add(Activation(\"sigmoid\"))\n\n\treturn model\n\n\ndef main():\n\tX, y = getData()\n\tX = normalizeData(X)\n\tmodel = createModel(X)\n\tmodel = trainModel(model, (X, y))\n\t#saveModel(model)\n\n\n\nmain()"
] | [
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
vivivibo/pipeline | [
"2a24660ca4b53b51bde3daedde80d8489bdeb37c"
] | [
"af/analysis/analysis/analysis.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"ooni-pipeline: * -> Analysis\n\nConfigured with /etc/analysis.conf\n\nRuns as a system daemon but can also be used from command line in devel mode\n\nCreates and updates unlogged tables.\nShows confirmed correlated by country, ASN, input URL over time.\n\nInputs: Database tables:\n countries\n\nOutputs:\n Files in /var/lib/analysis\n Dedicated unlogged database tables and charts\n tables:\n\n\n\"\"\"\n\n# Compatible with Python3.7 - linted with Black\n\n# TODO:\n# Enable unused code\n# Switch print() to logging\n# Overall datapoints count per country per day\n# Add ASN to confirmed_stats and use one table only if performance is\n# acceptable.\n# Move slicing and manipulation entirely in Pandas and drop complex SQL queries\n# Support feeder.py for continuous ingestion\n# Implement a crude precision metric based on msm_count and time window\n\nfrom argparse import ArgumentParser, Namespace\nfrom configparser import ConfigParser\nfrom contextlib import contextmanager\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom urllib.parse import urlencode\nimport os\nimport time\nimport logging\nimport sys\n\nfrom analysis import backup_to_s3\n\ntry:\n from systemd.journal import JournalHandler # debdeps: python3-systemd\n import sdnotify # debdeps: python3-sdnotify\n\n has_systemd = True\nexcept ImportError:\n # this will be the case on macOS for example\n has_systemd = False\n\nfrom bottle import template # debdeps: python3-bottle\nfrom sqlalchemy import create_engine # debdeps: python3-sqlalchemy-ext\n\n# TODO: move pandas / seaborn related stuff in a dedicated script\n#import pandas as pd # debdeps: python3-pandas python3-jinja2\n#import prometheus_client as prom # debdeps: python3-prometheus-client\nimport psycopg2 # debdeps: python3-psycopg2\nfrom psycopg2.extras import RealDictCursor\n\nimport matplotlib # debdeps: python3-matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n#import seaborn as sns # debdeps: python3-seaborn\n\nfrom analysis.metrics import setup_metrics # debdeps: python3-statsd\n\nfrom analysis.citizenlab_test_lists_updater import update_citizenlab_test_lists\n\nfrom analysis.counters_table_updater import (\n update_all_counters_tables,\n update_tables_daily,\n)\n\n\n# Global conf\nconf = Namespace()\n\n# Global db connectors\ndbengine = None\nconn = None\n\nlog = logging.getLogger(\"analysis\")\nmetrics = setup_metrics(name=\"analysis\")\n\n\ndef setup_database_connection(c):\n return psycopg2.connect(\n dbname=c[\"dbname\"],\n user=c[\"dbuser\"],\n host=c[\"dbhost\"],\n password=c[\"dbpassword\"],\n port=c.get(\"dbport\", 5432),\n )\n\n\n@contextmanager\ndef database_connection(c):\n conn = setup_database_connection(c)\n try:\n yield conn\n finally:\n conn.close()\n\n\ndef setup_database_connections(c):\n conn = setup_database_connection(c)\n dbengine = create_engine(\"postgresql+psycopg2://\", creator=lambda: conn)\n return conn, dbengine\n\n\ndef gen_table(name, df, cmap=\"RdYlGn\"):\n \"\"\"Render dataframe into an HTML table and save it to file.\n Create a timestamped file <name>.<ts>.html and a symlink to it.\n \"\"\"\n if cmap is None:\n tb = df.style\n else:\n tb = df.style.background_gradient(cmap=cmap)\n # df.style.bar(subset=['A', 'B'], align='mid', color=['#d65f5f', '#5fba7d'])\n html = tb.render()\n ts = datetime.utcnow().strftime(\"%Y%m%d-%H%M\")\n outf = conf.output_directory / f\"{name}.{ts}.html\"\n log.info(f\"Rendering to {outf}\")\n with outf.open(\"w\") as f:\n f.write(html)\n\n symlink = conf.output_directory / f\"{name}.html\"\n try:\n symlink.unlink() # Atomic symlinking not supported\n except:\n pass\n symlink.symlink_to(f\"{name}.{ts}.html\") # (Absolute path not supported)\n\n\ndef save(name, plt):\n fn = os.path.join(conf.output_directory, name + \".png\")\n log.info(f\"Rendering to {fn}\")\n plt.get_figure().savefig(fn)\n\n\ndef gen_plot(name, df, *a, **kw):\n plt = df.plot(*a, **kw)\n save(name, plt)\n\n\ndef heatmap(name, *a, **kw):\n fn = os.path.join(conf.output_directory, name + \".png\")\n log.info(f\"Rendering to {fn}\")\n h = sns.heatmap(*a, **kw)\n h.get_figure().savefig(fn)\n\n\ndef insert_into(tablename, q):\n assert tablename in (\"confirmed_stats\", \"confirmed_stats_asn\")\n # TODO: autoreconnect\n with metrics.timer(\"insert_into.\" + tablename):\n dbengine.execute(q)\n\n\ndef query(q):\n # TODO: add a label to generate metrics\n log.info(\" \".join(q.replace(\"\\n\", \" \").split())[:300], \"...\")\n # TODO: autoreconnect\n with metrics.timer(\"query.unnamed\"):\n r = pd.read_sql_query(q, conn)\n return r\n\n\[email protected](\"populate_countries\")\ndef populate_countries():\n ## Used only once to create a persistent list of countries\n dbengine.execute(\n \"\"\"\n CREATE UNLOGGED TABLE countries (\n probe_cc CHARACTER(2) NOT NULL,\n msm_count BIGINT NOT NULL\n );\n CREATE INDEX ON countries (msm_count);\n \"\"\"\n )\n insert_into(\n \"countries\",\n \"\"\"\n INSERT INTO countries\n SELECT\n probe_cc as country,\n COUNT(*) as msm_count\n FROM measurement\n JOIN report ON report.report_no = measurement.report_no\n WHERE measurement_start_time >= current_date - interval '5 day'\n AND measurement_start_time < current_date - interval '1 day'\n GROUP BY\n country\n \"\"\",\n )\n\n\[email protected](\"append_confirmed_stats\")\ndef append_confirmed_stats():\n ## Append confirmed_stats daily\n log.info(\"Updating confirmed_stats\")\n dbengine.execute(\n \"\"\"\n CREATE UNLOGGED TABLE IF NOT EXISTS confirmed_stats (\n day TIMESTAMP NOT NULL,\n probe_cc CHARACTER(2) NOT NULL,\n target TEXT,\n msm_count BIGINT NOT NULL,\n confirmed_count BIGINT NOT NULL,\n CONSTRAINT confirmed_stats_day_cc_target_u UNIQUE (day, probe_cc, target)\n ) ;\n CREATE INDEX ON confirmed_stats (day);\n \"\"\"\n )\n insert_into(\n \"confirmed_stats\",\n \"\"\"\n INSERT INTO confirmed_stats\n SELECT\n date_trunc('day', measurement_start_time) as day,\n probe_cc,\n concat(test_name, '::', input) as target,\n COUNT(*) as msm_count,\n COALESCE(SUM(CASE WHEN confirmed = TRUE THEN 1 ELSE 0 END), 0) as confirmed_count\n FROM measurement\n JOIN input ON input.input_no = measurement.input_no\n JOIN report ON report.report_no = measurement.report_no\n JOIN autoclaved ON autoclaved.autoclaved_no = report.autoclaved_no\n WHERE measurement_start_time < current_date - interval '1 day'\n AND measurement_start_time >= current_date - interval '2 day'\n GROUP BY\n day,\n probe_cc,\n target\n ON CONFLICT DO NOTHING\n \"\"\",\n )\n\n\[email protected](\"append_confirmed_stats_asn\")\ndef append_confirmed_stats_asn():\n ## Append confirmed_stats_asn daily\n log.info(\"Updating confirmed_stats_asn\")\n dbengine.execute(\n \"\"\"\n CREATE UNLOGGED TABLE IF NOT EXISTS confirmed_stats_asn (\n day TIMESTAMP NOT NULL,\n probe_cc CHARACTER(2) NOT NULL,\n probe_asn INTEGER NOT NULL,\n target TEXT,\n msm_count BIGINT NOT NULL,\n confirmed_count BIGINT NOT NULL,\n CONSTRAINT confirmed_stats_asn_day_cc_asn_target_u UNIQUE (day, probe_cc, probe_asn, target)\n ) ;\n CREATE INDEX ON confirmed_stats (day);\n \"\"\"\n )\n insert_into(\n \"confirmed_stats_asn\",\n \"\"\"\n INSERT INTO confirmed_stats_asn\n SELECT\n date_trunc('day', measurement_start_time) as day,\n probe_cc,\n probe_asn,\n concat(test_name, '::', input) as target,\n COUNT(*) as msm_count,\n COALESCE(SUM(CASE WHEN confirmed = TRUE THEN 1 ELSE 0 END), 0) as confirmed_count\n FROM measurement\n JOIN input ON input.input_no = measurement.input_no\n JOIN report ON report.report_no = measurement.report_no\n JOIN autoclaved ON autoclaved.autoclaved_no = report.autoclaved_no\n WHERE measurement_start_time < current_date - interval '1 day'\n AND measurement_start_time >= current_date - interval '2 day'\n GROUP BY\n day,\n probe_cc,\n probe_asn,\n target\n ON CONFLICT DO NOTHING\n \"\"\",\n )\n\n\ndef blocked_sites_per_country_per_week_heatmap():\n # Ratio of blocked sites per country per week\n q = query(\n \"\"\"\n SELECT\n date_trunc('week', test_day) as week,\n probe_cc as country,\n SUM(confirmed_count)::decimal / SUM(msm_count) as ratio\n FROM ooexpl_wc_confirmed\n WHERE test_day > current_date - interval '1 day' - interval '10 week'\n AND test_day < current_date - interval '1 day'\n GROUP BY\n probe_cc, week\n ;\n \"\"\"\n )\n x = q.pivot_table(index=\"week\", columns=\"country\", values=\"ratio\")\n plt.figure(figsize=(26, 6))\n heatmap(\"block_ratio\", x, cmap=\"Blues\")\n\n\ndef input_per_day_per_country_density_heatmap():\n # Measure input-per-day-per-country datapoint density\n q = query(\n \"\"\"\n SELECT\n date_trunc('week', test_day) as week,\n probe_cc as country,\n SUM(msm_count) as count\n FROM ooexpl_wc_confirmed\n WHERE test_day > current_date - interval '1 day' - interval '10 week'\n AND test_day < current_date - interval '1 day'\n GROUP BY\n week, country\n ;\n \"\"\"\n )\n p = q.pivot_table(index=\"week\", columns=\"country\", values=\"count\")\n heatmap(\"input_per_day_per_country_density\", p)\n\n\ndef msm_count_per_week_high_countries_gentable():\n pop = query(\n \"\"\"\n SELECT\n probe_cc as country,\n date_trunc('week', test_day) as week,\n SUM(msm_count) as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval '1 day' - interval '6 week'\n AND test_day < current_date - interval '1 day'\n AND msm_count > 2000\n GROUP BY\n week,\n probe_cc\n ;\n \"\"\"\n )\n p2 = pop.pivot_table(index=\"country\", columns=\"week\", values=\"cnt\").fillna(0)\n gen_table(\"msm_count_per_week_high_countries\", p2)\n\n\ndef msm_count_per_week_high_countries_gentable2():\n # Number of datapoints per week in popular countries\n q = query(\n \"\"\"\n SELECT\n probe_cc as country,\n date_trunc('week', test_day) as week,\n SUM(msm_count) as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval '1 day' - interval '6 week'\n AND test_day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT probe_cc\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval '1 day' - interval '3 weeks'\n AND test_day < current_date - interval '1 day'\n GROUP BY probe_cc\n ORDER BY SUM(msm_count) DESC\n LIMIT 20\n )\n GROUP BY\n week,\n country\n ;\n \"\"\"\n )\n p = q.pivot_table(index=\"country\", columns=\"week\", values=\"cnt\").fillna(0)\n\n gen_table(\"msm_count_per_week_high_countries\", p)\n\n\ndef msm_count_per_month_high_countries():\n # Number of datapoints per day in popular countries\n q = query(\n \"\"\"\n SELECT\n probe_cc as country,\n test_day,\n SUM(msm_count) as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval \\'1 day\\' - interval \\'3 week\\'\n AND test_day < current_date - interval \\'1 day\\'\n AND probe_cc IN (\n SELECT probe_cc\n FROM countries\n WHERE msm_count > 2000\n ORDER BY msm_count DESC\n )\n GROUP BY\n test_day,\n country\n ;\n \"\"\"\n )\n p = q.pivot_table(index=\"country\", columns=\"test_day\", values=\"cnt\").fillna(0)\n\n gen_table(\"msm_count_per_month_high_countries\", p)\n\n\ndef msm_count_per_month_low_countries():\n # Number of datapoints over the last month in countries with few probes\n q = query(\n \"\"\"\n SELECT probe_cc as country,\n SUM(msm_count) as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval \\'1 day\\' - interval \\'1 months\\'\n AND test_day < current_date - interval \\'1 day\\'\n GROUP BY probe_cc\n ORDER BY cnt\n LIMIT 80\n \"\"\"\n )\n p = q.pivot_table(index=\"country\", values=\"cnt\").fillna(0)\n\n gen_table(\"msm_count_per_month_low_countries\", p)\n\n\ndef coverage_variance():\n ## Variance of number of datapoints over countries: high values mean unequal coverage\n q = query(\n \"\"\"\n SELECT\n probe_cc as country,\n test_day,\n msm_count as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval '1 day' - interval '6 week'\n AND test_day < current_date - interval '1 day'\n ;\n \"\"\"\n )\n\n # pivot and fill NaN before calculating variance\n p = q.pivot_table(index=\"country\", columns=\"test_day\", values=\"cnt\")\n p = p.fillna(0)\n relvar = p.std() / p.mean()\n\n plt.figure(1)\n plt.subplot(311)\n plt.plot(p.sum())\n plt.subplot(312)\n plt.plot(relvar)\n plt.subplot(313)\n plt.plot(p.var())\n fig = plt.gcf()\n fig.savefig(\"output/msm_count_and_variance_over_countries.png\")\n\n ## Total number of datapoints and variance across countries per day\n\n\n# @metrics.timer(\"summarize_core_density\")\n# def summarize_core_density_UNUSED():\n# ## Core density\n# ## Measure coverage of citizenlab inputs on well-monitored countries\n# core = query(\n# \"\"\"\n# SELECT\n# date_trunc('day', measurement_start_time) as day,\n# probe_cc,\n# concat(test_name, '::', input) as target,\n# COUNT(*) as msm_count\n# FROM measurement\n# JOIN report ON report.report_no = measurement.report_no\n# JOIN input ON input.input_no = measurement.input_no\n# WHERE measurement_start_time >= current_date - interval '2 days'\n# AND measurement_start_time < current_date - interval '1 days'\n# AND (test_name, input) IN (\n# SELECT\n# test_name,\n# input\n# FROM interesting_inputs\n# )\n# AND probe_cc IN (\n# SELECT\n# probe_cc\n# FROM\n# countries\n# WHERE\n# msm_count > 2000\n# )\n# GROUP BY\n# probe_cc,\n# day,\n# target\n# \"\"\"\n# )\n#\n# day_slice = core.pivot_table(\n# index=\"probe_cc\", columns=\"target\", values=\"msm_count\", fill_value=0\n# )\n#\n# log.info(\"Countries: \", day_slice.shape[0], \"Targets:\", day_slice.shape[1])\n# metrics.gauge(\"countries_with_high_msm_count_1_day\", day_slice.shape[0])\n# metrics.gauge(\"targets_high_msm_countries_1_day\", day_slice.shape[1])\n#\n# area = day_slice.shape[0] * day_slice.shape[1]\n# log.info(\"Slice area:\", area)\n#\n# c1 = core[\"target\"].count() / area\n# log.info(\"Coverage-1: cells with at least one datapoint\", c1)\n# metrics.gauge(\"coverage_1_day_1dp\", c1)\n#\n# c5 = core[core[\"msm_count\"] > 5][\"target\"].count() / area\n# log.info(\"Coverage-5: cells with at least 5 datapoints\", c5)\n# metrics.gauge(\"coverage_1_day_5dp\", c1)\n\n\[email protected](\"plot_msmt_count_per_platform_over_time\")\ndef plot_msmt_count_per_platform_over_time(conn):\n log.info(\"COV: plot_msmt_count_per_platform_over_time\")\n sql = \"\"\"\n SELECT date_trunc('day', measurement_start_time) AS day, platform, COUNT(*) AS msm_count\n FROM fastpath\n WHERE measurement_start_time >= CURRENT_DATE - interval '60 days'\n AND measurement_start_time < CURRENT_DATE\n GROUP BY day, platform\n ORDER BY day, platform;\n \"\"\"\n q = pd.read_sql_query(sql, conn)\n p = q.pivot_table(index=\"day\", columns=\"platform\", values=\"msm_count\", fill_value=0)\n gen_plot(\"msmt_count_per_platform_over_time\", p)\n\n\[email protected](\"plot_coverage_per_platform\")\ndef plot_coverage_per_platform(conn):\n \"\"\"Measure how much each platform contributes to measurements\"\"\"\n log.info(\"COV: plot_coverage_per_platform\")\n # Consider only inputs that are listed on citizenlab\n sql = \"SELECT UPPER(cc), COUNT(*) from citizenlab GROUP BY cc\"\n with conn.cursor() as cur:\n cur.execute(sql)\n baseline = dict(cur.fetchall()) # CC -> count\n zz_cnt = baseline.pop(\"ZZ\")\n for cc in baseline:\n baseline[cc] += zz_cnt\n\n baseline[\"ZZ\"] = zz_cnt # put back the initial value\n\n # The inner query returns *one* line for each (platform, probe_cc, input)\n # that has 1 or more msmt. If an input is tested more than once in the time\n # period in a given CC we treat it as 1.\n sql = \"\"\"\n SELECT platform, probe_cc, count(*)\n FROM (\n SELECT platform, probe_cc\n FROM fastpath\n WHERE (\n (probe_cc, input) IN (SELECT UPPER(cc), url FROM citizenlab)\n OR\n input IN (SELECT url FROM citizenlab WHERE cc = 'ZZ')\n )\n AND measurement_start_time > NOW() - interval '1 days'\n AND measurement_start_time < NOW()\n AND test_name = 'web_connectivity'\n AND input IS NOT null\n GROUP BY probe_cc, input, platform\n ORDER BY probe_cc, platform) sq\n GROUP BY sq.platform, sq.probe_cc\n ORDER BY sq.probe_cc, sq.platform;\n \"\"\"\n with conn.cursor() as cur:\n cur.execute(sql)\n x = []\n for platform, probe_cc, count in cur:\n if probe_cc not in baseline:\n continue\n x.append((platform, probe_cc, count / baseline[probe_cc]))\n\n cov = pd.DataFrame(x, columns=[\"platform\", \"probe_cc\", \"ratio\"])\n cov = cov.pivot_table(\n index=\"probe_cc\", columns=\"platform\", values=\"ratio\", fill_value=0\n )\n pd.set_option(\"display.precision\", 1)\n gen_table(\"coverage_per_platform\", cov)\n\n\ndef coverage_generator(conf):\n \"\"\"Generate statistics on coverage\"\"\"\n log.info(\"COV: Started monitor_measurement_creation thread\")\n while True:\n try:\n conn, dbengine = setup_database_connections(conf.standby)\n except Exception as e:\n log.error(e, exc_info=True)\n time.sleep(30)\n continue\n\n try:\n plot_coverage_per_platform(conn)\n plot_msmt_count_per_platform_over_time(conn)\n log.info(\"COV: done. Sleeping\")\n\n except Exception as e:\n log.error(e, exc_info=True)\n\n finally:\n conn.close()\n\n time.sleep(3600 * 24)\n\n\ndef summarize_total_density_UNUSED():\n ## Total density\n ## Measure coverage of interesting_inputs on well-monitored countries\n core = query(\n \"\"\"\n SELECT\n day,\n probe_cc,\n target,\n msm_count\n FROM msm_count_core\n WHERE day >= current_date - interval \\'3 days\\'\n AND day < current_date - interval \\'2 days\\'\n ;\n \"\"\"\n )\n\n day_slice = core.pivot_table(\n index=\"probe_cc\", columns=\"target\", values=\"msm_count\", fill_value=0\n )\n log.info(\"Countries: \", day_slice.shape[0], \"Targets:\", day_slice.shape[1])\n metrics.gauge(\"\")\n metrics.gauge(\"\")\n area = day_slice.shape[0] * day_slice.shape[1]\n log.info(\"Slice area:\", area)\n metrics.gauge(\"\")\n c1 = core[\"target\"].count() / area\n log.info(\"Coverage-1: cells with at least one datapoint\", c1)\n metrics.gauge(\"\")\n c5 = core[core[\"msm_count\"] > 5][\"target\"].count() / area\n log.info(\"Coverage-5: cells with at least 5 datapoints\", c5)\n metrics.gauge(\"\")\n\n ## Another attempt at visualizing confirmed_states\n q = query(\n \"\"\"\n SELECT probe_cc, target, msm_count, confirmed_count FROM confirmed_stats\n WHERE day >= current_date - interval '8 day'\n AND day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT\n probe_cc\n FROM\n countries\n WHERE\n msm_count > 1000\n )\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n ;\n \"\"\"\n )\n\n msm = q.pivot_table(\n index=\"target\", columns=\"probe_cc\", values=\"msm_count\", fill_value=0\n )\n # sort targets\n msm.sort_values(ascending=False, inplace=True, by=\"RU\")\n\n # sort countries\n msm.sort_values(\n ascending=False,\n inplace=True,\n by=\"web_connectivity::https://www.ndi.org/\",\n axis=1,\n )\n\n heatmap(\n \"core_density\",\n msm,\n cbar=False,\n annot=False,\n cmap=\"RdYlGn\",\n xticklabels=True,\n yticklabels=False,\n vmax=10.0,\n )\n\n\[email protected](\"measure_blocking_globally\")\ndef measure_blocking_globally():\n ## Extract per-country blacking over time\n q = query(\n \"\"\"\n SELECT\n date_trunc('week', day),\n probe_cc,\n SUM(msm_count) as msm_count,\n SUM(confirmed_count) as confirmed_count,\n SUM(confirmed_count) / SUM(msm_count) as block_ratio\n FROM confirmed_stats\n WHERE day >= current_date - interval '1 day' - interval '6 week'\n AND day < current_date - interval '1 day'\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n GROUP BY\n day,\n probe_cc\n \"\"\"\n )\n obt = q[q[\"block_ratio\"] > 0.000001]\n oc = obt.pivot_table(\n index=\"date_trunc\", columns=\"probe_cc\", values=\"block_ratio\", fill_value=0\n )\n gen_plot(\"blocked_vs_nonblocked_by_country\", oc)\n\n\ndef create_currently_blocked_table_if_needed():\n q = \"\"\"\n CREATE UNLOGGED TABLE IF NOT EXISTS currently_blocked (\n analysis_date timestamp without time zone NOT NULL,\n probe_cc CHARACTER(2) NOT NULL,\n probe_asn integer,\n target TEXT NOT NULL,\n description TEXT NOT NULL\n ) ;\n \"\"\"\n dbengine.execute(q)\n\n\[email protected](\"detect_blocking_granularity_cc_target\")\ndef detect_blocking_granularity_cc_target(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n):\n q = query(\n \"\"\"\n SELECT\n probe_cc,\n target,\n SUM(msm_count) as msm_count,\n SUM(confirmed_count) as confirmed_count,\n SUM(confirmed_count) / SUM(msm_count) as block_ratio\n FROM confirmed_stats\n WHERE day >= current_date - interval '1 day' - interval '{}'\n AND day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT\n probe_cc\n FROM\n countries\n WHERE\n msm_count > 1000\n )\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n GROUP BY\n probe_cc,\n target\n \"\"\".format(\n interval\n )\n )\n r = q[\n (q[\"msm_count\"] > msm_count_threshold)\n & (q[\"block_ratio\"] > block_ratio_threshold)\n ]\n r[\"description\"] = \"by_cc_t\"\n return r\n\n\[email protected](\"detect_blocking_granularity_cc_asn_target\")\ndef detect_blocking_granularity_cc_asn_target(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n):\n q = query(\n \"\"\"\n SELECT\n probe_cc,\n probe_asn,\n target,\n SUM(msm_count) as msm_count,\n SUM(confirmed_count) as confirmed_count_with_asn,\n SUM(confirmed_count) / SUM(msm_count) as block_ratio\n FROM confirmed_stats_asn\n WHERE day >= current_date - interval '1 day' - interval '{}'\n AND day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT\n probe_cc\n FROM\n countries\n WHERE\n msm_count > 1000\n )\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n GROUP BY\n probe_cc,\n probe_asn,\n target\n \"\"\".format(\n interval\n )\n )\n r = q[\n (q[\"msm_count\"] > msm_count_threshold)\n & (q[\"block_ratio\"] > block_ratio_threshold)\n ]\n r[\"description\"] = \"by_cc_asn_t\"\n return r\n\n\[email protected](\"detect_blocking_granularity_cc\")\ndef detect_blocking_granularity_cc(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n):\n ## Overall, per-country blocking ratio\n ## Useful to detect if a country suddenly starts blocking many targets\n q = query(\n \"\"\"\n SELECT\n probe_cc,\n SUM(msm_count) as msm_count,\n SUM(confirmed_count) as confirmed_count_with_asn,\n SUM(confirmed_count) / SUM(msm_count) as block_ratio\n FROM confirmed_stats_asn\n WHERE day >= current_date - interval '1 day' - interval '{}'\n AND day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT\n probe_cc\n FROM\n countries\n WHERE\n msm_count > 1000\n )\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n GROUP BY\n probe_cc\n \"\"\".format(\n interval\n )\n )\n r = q[\n (q[\"msm_count\"] > msm_count_threshold)\n & (q[\"block_ratio\"] > block_ratio_threshold)\n ]\n r[\"description\"] = \"by_cc\"\n return r\n\n\[email protected](\"detect_blocking\")\ndef detect_blocking():\n ## Detect blocking by slicing the target/CC/ASN/time cubes.\n ## Slicing is done multiple times with decreasing granularity:\n ## - target + CC + ASN\n ## - target + CC\n ## - CC\n ## Also the slicing is done over different time ranges:\n ## Short time: detect blocking quickly in countries with high msm_count\n ## Long time: detect blocking in countries with low msm_count\n\n ## Extract country-target time cylinders with enough datapoints to do reliable detection\n ## The thresold is controlled by the time interval and the total msm_count\n ## This allows adaptive detection over different sampling frequencies using multiple time windows\n\n # TODO:\n # - avoid caching tables, do everything in Pandas\n # - implement optional continuous run to prevent recreating the Cube\n\n # config params\n msm_count_threshold = 8\n block_ratio_threshold = 0.3\n # TODO: use different thresholds for different granularities\n # TODO: add tunable filtering by country weight and interesting_inputs\n\n # Detect by CC, ASN and target\n cc_asn_t_1d = detect_blocking_granularity_cc_asn_target(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n )\n metrics.gauge(\"cc_asn_t_1d_count\", len(cc_asn_t_1d.index))\n\n # Detect by CC and target\n cc_t_1d = detect_blocking_granularity_cc_target(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n )\n metrics.gauge(\"cc_t_1d_count\", len(cc_t_1d.index))\n cc_t_2w = detect_blocking_granularity_cc_target(\n msm_count_threshold, block_ratio_threshold, interval=\"2 weeks\"\n )\n metrics.gauge(\"cc_t_2w_count\", len(cc_t_2w.index))\n\n # Detect by CC only. Very low granularity but allows spotting very large\n # blocking events in low-coverage countries\n cc_1d = detect_blocking_granularity_cc(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n )\n metrics.gauge(\"cc_1d_count\", len(cc_1d.index))\n\n # Create df of blocking events\n blocked = pd.concat((cc_asn_t_1d, cc_t_1d, cc_t_2w, cc_1d), sort=False)\n cols = [\"probe_cc\", \"probe_asn\", \"target\", \"description\"]\n blocked = blocked[cols]\n metrics.gauge(\"currently_blocked\", len(blocked.index))\n\n log.info(\"currently_blocked\", len(blocked.index))\n with metrics.timer(\"write_blocked_now\"):\n blocked.to_sql(\"blocked\", con=dbengine, if_exists=\"replace\")\n\n\ndef parse_args():\n ap = ArgumentParser(\"Analysis script \" + __doc__)\n ap.add_argument(\n \"--update-counters\", action=\"store_true\", help=\"Update counters table\"\n )\n ap.add_argument(\n \"--update-citizenlab\", action=\"store_true\", help=\"Update citizenlab test lists\"\n )\n ap.add_argument(\n \"--update-tables-daily\", action=\"store_true\", help=\"Run daily update\"\n )\n ap.add_argument(\n \"--dry-run\", action=\"store_true\", help=\"Dry run, supported only by some commands\"\n )\n ap.add_argument(\n \"--backup-db\", action=\"store_true\", help=\"Backup DB to S3\"\n )\n # ap.add_argument(\"--\", action=\"store_true\", help=\"\")\n ap.add_argument(\"--devel\", action=\"store_true\", help=\"Devel mode\")\n ap.add_argument(\"--stdout\", action=\"store_true\", help=\"Log to stdout\")\n return ap.parse_args()\n\n\ndef to_html(c):\n return f\"\"\"<html>\n <head>\n <link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.css\">\n </head>\n <body>\n {c}\n </body>\n </html>\n \"\"\"\n\n\ndef to_table(colnames, rowdicts) -> str:\n tpl = \"\"\"\n <table>\n <tr>\n % for c in colnames:\n <th>{{c}}</th>\n % end\n </tr>\n % for d in rowdicts:\n <tr>\n % for c in colnames:\n <td>{{!d[c]}}</td>\n % end\n </tr>\n % end\n </table>\n \"\"\"\n return template(tpl, colnames=colnames, rowdicts=rowdicts)\n\n\ndef gen_prometheus_url(expr, range_input=\"12h\"):\n \"\"\"Generate URL to point to a metric in Prometheus\"\"\"\n baseurl = \"https://mon.ooni.nu/prometheus/graph?\"\n return baseurl + urlencode(\n {\"g0.range_input\": \"12h\", \"g0.expr\": expr, \"g0.tab\": \"0\"}\n )\n\n\ndef html_anchor(url, text):\n return f\"\"\"<a href=\"{url}\">{text}</a>\"\"\"\n\n\[email protected](\"generate_slow_query_summary\")\ndef generate_slow_query_summary(conf):\n \"\"\"Generate HTML pages with a summary of heavy queries\n for active and standby database a bit like the \"top\" command.\n Send metrics to node exporter / Prometheus\n Show links to related charts\n \"\"\"\n sql = \"\"\"\n SELECT\n calls,\n mean_time / 1000 AS mean_s,\n round(total_time / 1000) AS total_seconds,\n queryid,\n query\n FROM\n pg_stat_statements\n ORDER BY\n total_time DESC\n LIMIT 16;\n \"\"\"\n prom_reg = prom.CollectorRegistry()\n total_query_time_g = prom.Gauge(\n \"db_total_query_time\",\n \"DB cumulative query time\",\n labelnames=[\"db_role\", \"queryid\"],\n registry=prom_reg,\n )\n calls_cnt = prom.Gauge(\n \"db_total_query_count\",\n \"DB cumulative query count\",\n labelnames=[\"db_role\", \"queryid\"],\n registry=prom_reg,\n )\n # Monitoring URL creation\n expr_tpl = \"\"\"delta(%s{db_role=\"%s\",queryid=\"%s\"}[1h])\"\"\"\n\n for role in (\"active\", \"standby\"):\n log.info(\"Main: Connecting\")\n conn, dbengine = setup_database_connections(getattr(conf, role))\n log.debug(\"Main: Connected, running query\")\n rows = dbengine.execute(sql)\n rows = [dict(r) for r in rows]\n for r in rows:\n queryid = r[\"queryid\"]\n total_query_time_g.labels(role, queryid).set(r[\"total_seconds\"])\n calls_cnt.labels(role, queryid).set(r[\"calls\"])\n expr = expr_tpl % (\"db_total_query_time\", role, queryid)\n url = gen_prometheus_url(expr)\n r[\"total_seconds\"] = html_anchor(url, r[\"total_seconds\"])\n\n expr = expr_tpl % (\"db_total_query_count\", role, queryid)\n url = gen_prometheus_url(expr)\n r[\"calls\"] = html_anchor(url, r[\"calls\"])\n r[\"mean_s\"] = \"%.3f\" % r[\"mean_s\"]\n\n colnames = [\"queryid\", \"calls\", \"mean_s\", \"total_seconds\", \"query\"]\n tbl = to_table(colnames, rows)\n html = to_html(tbl)\n\n fi = conf.output_directory / f\"db_slow_queries_{role}.html\"\n log.info(\"Main: Writing %s\", fi)\n fi.write_text(html)\n conn.close()\n\n log.info(\"Main: Writing metrics to node exporter\")\n prom.write_to_textfile(node_exporter_path, prom_reg)\n\n\ndef _generate_stat_activity_gauge(stat_activity_gauge, conn, db_role: str) -> None:\n \"\"\"Gather pg_stat_activity counts\"\"\"\n stat_activity_sql = \"\"\"SELECT state, usename, count(*)\n FROM pg_stat_activity GROUP BY state, usename\"\"\"\n with conn.cursor() as cur:\n # columns: state, usename, count\n cur.execute(stat_activity_sql)\n for r in cur:\n m = stat_activity_gauge.labels(db_role=db_role, state=r[0], usename=r[1])\n m.set(r[2])\n\n\[email protected](\"monitor_measurement_creation\")\ndef monitor_measurement_creation(conf):\n \"\"\"Monitors measurements created by fastpath and traditional pipeline\n to detect and alert on inconsistency.\n Queries the fastpath and measurements DB tables and compare their rows\n across different time ranges and generates metrics for Prometheus.\n\n Runs in a dedicated thread and writes in its own .prom file\n\n This is the most important function, therefore it pings the SystemD watchdog\n \"\"\"\n log.info(\"MMC: Started monitor_measurement_creation thread\")\n # TODO: switch to OOID\n\n INTERVAL = 60 * 5\n if has_systemd:\n watchdog = sdnotify.SystemdNotifier()\n\n prom_reg = prom.CollectorRegistry()\n gauge_family = prom.Gauge(\n \"measurements_flow\",\n \"Measurements being created\",\n labelnames=[\"type\"],\n registry=prom_reg,\n )\n replication_deltas_gauge = prom.Gauge(\n \"replication_deltas\",\n \"Deltas between xlog values\",\n labelnames=[\"type\"],\n registry=prom_reg,\n )\n stat_activity_gauge = prom.Gauge(\n \"stat_activity_count\",\n \"Active queries counts\",\n labelnames=[\"db_role\", \"state\", \"usename\"],\n registry=prom_reg,\n )\n\n queries = dict(\n fastpath_count=\"\"\"SELECT COUNT(*)\n FROM fastpath\n WHERE measurement_start_time > %(since)s\n AND measurement_start_time <= %(until)s\n \"\"\",\n pipeline_count=\"\"\"SELECT COUNT(*)\n FROM measurement\n WHERE measurement_start_time > %(since)s\n AND measurement_start_time <= %(until)s\n \"\"\",\n pipeline_not_fastpath_count=\"\"\"SELECT COUNT(*)\n FROM measurement\n LEFT OUTER JOIN input ON input.input_no = measurement.input_no\n JOIN report ON report.report_no = measurement.report_no\n WHERE NOT EXISTS (\n SELECT\n FROM fastpath fp\n WHERE measurement_start_time > %(since_ext)s\n AND measurement_start_time <= %(until_ext)s\n AND fp.report_id = report.report_id\n AND fp.test_name = report.test_name\n AND COALESCE(fp.input, '') = COALESCE(input.input, '')\n )\n AND measurement_start_time > %(since)s\n AND measurement_start_time <= %(until)s\n \"\"\",\n fastpath_not_pipeline_count=\"\"\"SELECT COUNT(*)\n FROM fastpath fp\n WHERE NOT EXISTS (\n SELECT\n FROM measurement\n LEFT OUTER JOIN input ON input.input_no = measurement.input_no\n JOIN report ON report.report_no = measurement.report_no\n WHERE measurement_start_time > %(since_ext)s\n AND measurement_start_time <= %(until_ext)s\n AND fp.report_id = report.report_id\n AND fp.test_name = report.test_name\n AND COALESCE(fp.input, '') = COALESCE(input.input, '')\n )\n AND measurement_start_time > %(since)s\n AND measurement_start_time <= %(until)s\n \"\"\",\n )\n sql_replication_delay = \"SELECT now() - pg_last_xact_replay_timestamp()\"\n\n # test connection and notify systemd\n conn, _ = setup_database_connections(conf.standby)\n with conn.cursor() as cur:\n cur.execute(\"SELECT 1\")\n conn.close()\n if has_systemd:\n watchdog.notify(\"READY=1\")\n\n cycle_seconds = 0\n\n while True:\n if has_systemd:\n watchdog.notify(\"WATCHDOG=1\")\n watchdog.notify(\"STATUS=Running\")\n\n try:\n # Clear gauges\n stat_activity_gauge._metrics.clear()\n\n log.info(\"MMC: Gathering fastpath count\")\n conn, dbengine = setup_database_connections(conf.standby)\n delta = timedelta(minutes=5)\n now = datetime.utcnow()\n since = now - delta\n with conn.cursor() as cur:\n sql = queries[\"fastpath_count\"]\n cur.execute(sql, dict(since=since, until=now))\n new_fp_msmt_count = cur.fetchone()[0]\n\n gauge_family.labels(\"fastpath_new_5m\").set(new_fp_msmt_count)\n\n log.info(\"MMC: Gathering database replica status\")\n with conn.cursor() as cur:\n cur.execute(sql_replication_delay)\n delay = cur.fetchone()[0].total_seconds()\n\n log.info(\"MMC: Summarizing pg_stat_activity on standby\")\n _generate_stat_activity_gauge(stat_activity_gauge, conn, \"standby\")\n\n log.info(\"MMC: Comparing active and standby xlog location\")\n with database_connection(conf.active) as active_conn:\n # This whole block runs against the active DB\n # Replication deltas\n log.info(\"MMC: Generating replication_deltas\")\n with active_conn.cursor(cursor_factory=RealDictCursor) as cur:\n # Thanks to\n # https://blog.dataegret.com/2017/04/deep-dive-into-postgres-stats.html\n sql = \"\"\"SELECT\n (pg_xlog_location_diff(pg_current_xlog_location(),sent_location) / 1024)::bigint as pending,\n (pg_xlog_location_diff(sent_location,write_location) / 1024)::bigint as write,\n (pg_xlog_location_diff(write_location,flush_location) / 1024)::bigint as flush,\n (pg_xlog_location_diff(flush_location,replay_location) / 1024)::bigint as replay\n FROM pg_stat_replication\"\"\"\n cur.execute(sql)\n d = cur.fetchone()\n assert d\n for k, v in d.items():\n replication_deltas_gauge.labels(k).set(v)\n # End of replication deltas\n\n log.info(\"MMC: Summarizing pg_stat_activity on active\")\n _generate_stat_activity_gauge(\n stat_activity_gauge, active_conn, \"active\"\n )\n\n # Extract active_xlog_location to compare active VS standby\n with active_conn.cursor() as cur:\n cur.execute(\"SELECT pg_current_xlog_location()\")\n active_xlog_location = cur.fetchone()[0]\n\n with conn.cursor() as cur:\n cur.execute(\"SELECT pg_last_xlog_receive_location()\")\n standby_xlog_location = cur.fetchone()[0]\n\n gauge_family.labels(\"raw_replication_delay\").set(delay)\n\n if active_xlog_location == standby_xlog_location:\n gauge_family.labels(\"replication_delay\").set(0)\n else:\n gauge_family.labels(\"replication_delay\").set(delay)\n\n # prom.write_to_textfile(nodeexp_path, prom_reg)\n\n # The following queries are heavier\n if cycle_seconds == 0:\n log.info(\"MMC: Running extended DB metrics gathering\")\n today = datetime.utcnow().date()\n with conn.cursor() as cur:\n # Compare different days in the past: pipeline and fastpath\n # might be catching up on older data and we want to monitor\n # that.\n for age_in_days in range(3):\n d1 = timedelta(days=1)\n end = today - timedelta(days=age_in_days) + d1\n times = dict(\n until_ext=end + d1 + d1 + d1,\n until=end,\n since=end - d1,\n since_ext=end - d1 - d1 - d1 - d1,\n )\n for query_name, sql in queries.items():\n cur.execute(sql, times)\n val = cur.fetchone()[0]\n log.info(\n \"MMC: %s %s %s %d\",\n times[\"since\"],\n times[\"until\"],\n query_name,\n val,\n )\n gauge_family.labels(\n f\"{query_name}_{age_in_days}_days_ago\"\n ).set(val)\n\n # prom.write_to_textfile(nodeexp_path, prom_reg)\n\n cycle_seconds = (cycle_seconds + INTERVAL) % 3600\n\n except Exception as e:\n log.error(e, exc_info=True)\n\n finally:\n conn.close()\n log.debug(\"MMC: Done\")\n if has_systemd:\n watchdog.notify(\"STATUS=MMC Sleeping\")\n\n endtime = time.time() + INTERVAL\n while time.time() < endtime:\n if has_systemd:\n watchdog.notify(\"WATCHDOG=1\")\n time.sleep(10)\n\n\ndef domain_input_update_runner():\n \"\"\"Runs domain_input_updater\"\"\"\n conf = Namespace(dry_run=False, db_uri=None)\n with metrics.timer(\"domain_input_updater_runtime\"):\n log.info(\"domain_input_updater: starting\")\n try:\n domain_input_updater.run(conf)\n metrics.gauge(\"domain_input_updater_success\", 1)\n log.info(\"domain_input_updater: success\")\n except Exception as e:\n metrics.gauge(\"domain_input_updater_success\", 0)\n log.error(\"domain_input_updater: failure %r\", e)\n\n\ndef main():\n global conf\n log.info(\"Analysis starting\")\n cp = ConfigParser()\n with open(\"/etc/ooni/analysis.conf\") as f:\n cp.read_file(f)\n\n conf = parse_args()\n if conf.devel or conf.stdout or not has_systemd:\n format = \"%(relativeCreated)d %(process)d %(levelname)s %(name)s %(message)s\"\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=format)\n\n else:\n log.addHandler(JournalHandler(SYSLOG_IDENTIFIER=\"analysis\"))\n log.setLevel(logging.DEBUG)\n\n for role in (\"active\", \"standby\"):\n setattr(conf, role, dict(cp[role]))\n\n log.info(\"Logging started\")\n conf.output_directory = (\n Path(\"./var/lib/analysis\") if conf.devel else Path(\"/var/lib/analysis\")\n )\n os.makedirs(conf.output_directory, exist_ok=True)\n\n # monitor_measurement_creation(conf)\n\n if conf.backup_db:\n backup_to_s3.log = log\n backup_to_s3.run_backup(conf, cp)\n return\n\n try:\n if conf.update_counters:\n update_all_counters_tables(conf)\n\n if conf.update_citizenlab:\n update_citizenlab_test_lists(conf)\n\n if conf.update_tables_daily:\n update_tables_daily(conf)\n\n except Exception as e:\n log.error(str(e), exc_info=e)\n\n log.info(\"done\")\n # coverage_generator(conf)\n\n # generate_slow_query_summary(conf)\n\n # # Update confirmed_stats table. The update is idempotent. The table is used\n # # in the next steps.\n # if conf.no_update_confirmed_stats == False:\n # append_confirmed_stats()\n # append_confirmed_stats_asn()\n\n # measure_blocking_globally()\n # detect_blocking()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hadrianl/qlib | [
"fa8f1cba06ba511744a0625afdf2cc3ac05302d0"
] | [
"setup.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\nimport io\nimport os\nimport numpy\n\nfrom setuptools import find_packages, setup, Extension\n\n# Package meta-data.\nNAME = \"pyqlib\"\nDESCRIPTION = \"A Quantitative-research Platform\"\nREQUIRES_PYTHON = \">=3.5.0\"\n\nVERSION = \"0.6.3.99\"\n\n# Detect Cython\ntry:\n import Cython\n\n ver = Cython.__version__\n _CYTHON_INSTALLED = ver >= \"0.28\"\nexcept ImportError:\n _CYTHON_INSTALLED = False\n\nif not _CYTHON_INSTALLED:\n print(\"Required Cython version >= 0.28 is not detected!\")\n print('Please run \"pip install --upgrade cython\" first.')\n exit(-1)\n\n# What packages are required for this module to be executed?\n# `estimator` may depend on other packages. In order to reduce dependencies, it is not written here.\nREQUIRED = [\n \"numpy>=1.12.0\",\n \"pandas>=0.25.1\",\n \"scipy>=1.0.0\",\n \"requests>=2.18.0\",\n \"sacred>=0.7.4\",\n \"python-socketio==3.1.2\",\n \"redis>=3.0.1\",\n \"python-redis-lock>=3.3.1\",\n \"schedule>=0.6.0\",\n \"cvxpy==1.0.21\",\n \"hyperopt==0.1.1\",\n \"fire>=0.3.1\",\n \"statsmodels\",\n \"xlrd>=1.0.0\",\n \"plotly==4.12.0\",\n \"matplotlib==3.1.3\",\n \"tables>=3.6.1\",\n \"pyyaml>=5.3.1\",\n \"mlflow>=1.12.1\",\n \"tqdm\",\n \"loguru\",\n \"lightgbm\",\n \"tornado\",\n \"joblib>=0.17.0\",\n \"ruamel.yaml>=0.16.12\",\n]\n\n# Numpy include\nNUMPY_INCLUDE = numpy.get_include()\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n\n# Cython Extensions\nextensions = [\n Extension(\n \"qlib.data._libs.rolling\",\n [\"qlib/data/_libs/rolling.pyx\"],\n language=\"c++\",\n include_dirs=[NUMPY_INCLUDE],\n ),\n Extension(\n \"qlib.data._libs.expanding\",\n [\"qlib/data/_libs/expanding.pyx\"],\n language=\"c++\",\n include_dirs=[NUMPY_INCLUDE],\n ),\n]\n\n# Where the magic happens:\nsetup(\n name=NAME,\n version=VERSION,\n license=\"MIT Licence\",\n url=\"https://github.com/microsoft/qlib\",\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n packages=find_packages(exclude=(\"tests\",)),\n # if your package is a single module, use this instead of 'packages':\n # py_modules=['qlib'],\n entry_points={\n # 'console_scripts': ['mycli=mymodule:cli'],\n \"console_scripts\": [\n \"qrun=qlib.workflow.cli:run\",\n ],\n },\n ext_modules=extensions,\n install_requires=REQUIRED,\n include_package_data=True,\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n # 'License :: OSI Approved :: MIT License',\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PauloBernardo/InteligenciaComputacional | [
"f5edcc01c68b83fc4435e6669e3ebd0a32d7d8b7"
] | [
"linearRegression/main.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\n\ndef simple_example():\n X = [10, 20, 30]\n Y = [15, 19, 45]\n plt.scatter(X, Y,)\n plt.show()\n\n A = np.array([10, 1, 20, 1, 30, 1]).reshape(3, 2)\n B = np.array(Y).reshape(3, 1)\n\n a = np.linspace(10, 30)\n arr = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), Y)\n arr.tolist()\n beta, alpha = arr\n Yi = alpha + beta * a\n\n plt.scatter(X, Y)\n plt.plot(a, Yi)\n plt.show()\n\n\ndef linear_least_squares(examples):\n m, n = examples.shape\n cx = examples[0].reshape(n, 1)\n c2 = np.ones(len(cx)).reshape(n, 1)\n A = np.hstack((cx, c2))\n\n return np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), examples[1])\n\n\ndef plot_figure(x, y, alpha, beta, title, x_label, y_label):\n min_y = alpha + beta * min(x)\n max_y = alpha + beta * max(x)\n\n plt.plot([min(x), max(x)], [min_y, max_y])\n plt.scatter(x, y, color='orange')\n plt.xlabel(x_label)\n plt.title(title)\n plt.grid(True)\n plt.ylabel(y_label)\n plt.show()\n\n\ndef plot_linear_regression(examples, title='Linear Least Squares Regression Example', x_label='X', y_label='Y'):\n min_x = min(examples[0])\n max_x = max(examples[0])\n theta = linear_least_squares(examples)\n theta.tolist()\n beta, alpha = theta\n\n min_y = alpha + beta * min_x\n max_y = alpha + beta * max_x\n\n plt.plot([min(examples[0]), max(examples[0])], [min_y, max_y])\n plt.scatter(examples[0], examples[1], color='orange')\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.title(title)\n plt.grid(True)\n plt.show()\n\n\ndef simple_linear_least_squares_example():\n plot_linear_regression(np.array([[1.5, 1.6, 1.3, 1.4, 1.5, 1.7, 1.8, 1.7, 1.1, 1.2], [10, 12, 16, 13, 15, 11, 8, 10, 18, 13]]), x_label='Prices', y_label='Sales')\n\n\ndef statistic_linear_regression(x, y):\n number_of_elements = len(x)\n if number_of_elements != len(y):\n raise Exception(\"Size of x and y must be equal!\")\n mean_x, mean_y = sum(x)/number_of_elements, sum(y)/number_of_elements\n sum_x_vezes_y = sum([i * j for i, j in zip(x, y)])\n sum_x_pow_2 = sum([i ** 2 for i in x])\n sxy = sum_x_vezes_y - number_of_elements * mean_x * mean_y\n sxx = sum_x_pow_2 - number_of_elements * mean_x * mean_x\n beta = sxy / sxx\n alpha = mean_y - beta * mean_x\n return alpha, beta\n\n\ndef plot_statistic_linear_regression(x, y, title='Statistic Linear Regression Example', x_label='X', y_label='Y'):\n alpha, beta = statistic_linear_regression(x, y)\n plot_figure(x, y, alpha, beta, title, x_label, y_label)\n\n\ndef simple_statistic_linear_regression_example():\n plot_statistic_linear_regression([1.5, 1.6, 1.3, 1.4, 1.5, 1.7, 1.8, 1.7, 1.1, 1.2], [10, 12, 16, 13, 15, 11, 8, 10, 18, 13], x_label='Prices', y_label='Sales')\n\n\ndef sklearn_linear_regression(x, y):\n reg = LinearRegression().fit(x.reshape(-1, 1), y.reshape(-1, 1))\n return reg.intercept_[0], reg.coef_[0][0]\n\n\ndef plot_sklearn_linear_regression(x, y, title='Sklearn Linear Regression Example', x_label='X', y_label='Y'):\n alpha, beta = sklearn_linear_regression(x, y)\n plot_figure(x, y, alpha, beta, title, x_label, y_label)\n\n\ndef simple_sklearn_linear_regression_example():\n prices = np.array([1.5, 1.6, 1.3, 1.4, 1.5, 1.7, 1.8, 1.7, 1.1, 1.2])\n sales = np.array([10, 12, 16, 13, 15, 11, 8, 10, 18, 13])\n plot_sklearn_linear_regression(prices, sales, x_label='Prices', y_label='Sales')\n\n\nif __name__ == '__main__':\n simple_linear_least_squares_example()\n simple_statistic_linear_regression_example()\n simple_sklearn_linear_regression_example()\n"
] | [
[
"numpy.hstack",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.matmul",
"matplotlib.pyplot.plot",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xkf15/faas-profiler | [
"77681daa9f5776e58d1e7dc2d38b61735d54b014"
] | [
"workload_analyzer/PerfMonAnalyzer.py"
] | [
"# Copyright (c) 2019 Princeton University\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom datetime import datetime, timedelta\nimport json\nimport os.path\nimport pandas as pd\nimport sys\n\nsys.path = ['./', '../'] + sys.path\n\n# Local\nfrom GenConfigs import *\nfrom Logger import ScriptLogger\n\nlogger = ScriptLogger(loggername='workload_analyzer/perf_mon_analyzer',\n logfile='WA.log')\n\n\ndef ReadPQOSMSRMon(pqos_msr_mon_file):\n \"\"\"\n This function parses the output of the pqos-msr-mon.\n \"\"\"\n with open(pqos_msr_mon_file) as f:\n lines = f.readlines()\n\n records = {'timestamp': [], 'Core': [], 'IPC': [],\n 'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}\n tmp_records = {'timestamp': [], 'Core': [], 'IPC': [],\n 'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}\n prev_timestamp, index = None, -1\n\n for line_index in range(len(lines)):\n line = lines[line_index]\n if 'TIME' in line:\n index += 1\n timestamp = datetime.strptime(line[5:-1], '%Y-%m-%d %H:%M:%S')\n if (timestamp != prev_timestamp):\n for key, value in tmp_records.items():\n if key == 'timestamp':\n for i in value:\n records[key] += [prev_timestamp +\n timedelta(seconds=1.0*i/index)]\n else:\n records[key] += value\n tmp_records = {'timestamp': [], 'Core': [], 'IPC': [\n ], 'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}\n index = 0\n\n prev_timestamp = timestamp\n elif 'CORE' in line:\n pass\n else:\n tmp_records['timestamp'].append(index)\n separated = line.split(' ')\n separated = [v for v in separated if v != '']\n tmp_records['Core'].append(int(separated[0]))\n tmp_records['IPC'].append(float(separated[1]))\n tmp_records['LLC Misses'].append(int(separated[2][:-1])*1000)\n tmp_records['LLC Util (KB)'].append(float(separated[3]))\n tmp_records['MBL (MB/s)'].append(float(separated[4]))\n\n for key, value in tmp_records.items():\n if key == 'timestamp':\n for i in value:\n records[key] += [prev_timestamp +\n timedelta(seconds=1.0*i/index)]\n else:\n records[key] += value\n\n # return the records as Pandas dataframe\n records_df = pd.DataFrame(records)\n return records_df\n\n\ndef ReadPerfMon(perf_mon_file):\n \"\"\"\n This function parses the output of the Linux Perf tool.\n \"\"\"\n with open(perf_mon_file) as f:\n lines = f.readlines()\n\n records = {'timestamp': []} # more fields are added dynamically\n\n for line in lines:\n separated = line.split(' ')\n separated = [v for v in separated if v != '']\n\n try:\n if 'counted' in separated[2]:\n del separated[2]\n except:\n pass\n\n if (len(separated) < 3) or (len(separated) > 4):\n continue\n time = float(separated[0])\n field = separated[2]\n try:\n val = int(separated[1].replace(',', ''))\n except:\n val = None\n try:\n records[field].append(val)\n except:\n records[field] = [val] # first element of the list\n try:\n if records['timestamp'][-1] != time:\n records['timestamp'].append(time)\n except:\n records['timestamp'].append(time) # first append\n\n # return the records as Pandas dataframe\n return pd.DataFrame(records)\n\n\ndef AnalyzePerfMonRecords(config_file):\n \"\"\"\n This function is used to analyze the performance monitoring data after conducting the test.\n \"\"\"\n logger.info(\"Started to analyze the performance monitoring records.\")\n\n try:\n with open(FAAS_ROOT + '/' + config_file) as f:\n workload = json.load(f)\n except:\n return False\n\n records = {}\n\n # Perf Tool\n perf_mon_file = FAAS_ROOT + '/perf-mon.out'\n pqos_msr_mon_file = FAAS_ROOT + '/pqos-msr-mon.out'\n\n if not os.path.isfile(perf_mon_file):\n logger.error(\"The perf output file missing!\")\n else:\n records['perf_records'] = ReadPerfMon(perf_mon_file)\n\n # PQOS Mon\n if not os.path.isfile(pqos_msr_mon_file):\n logger.error(\"The PQOS output file is missing!\")\n else:\n records['pqos_records'] = ReadPQOSMSRMon(pqos_msr_mon_file)\n\n return records\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
eonu/inf4-hons | [
"4b7372272860f19c0f5ea2910f122a62531d7d2e",
"4b7372272860f19c0f5ea2910f122a62531d7d2e"
] | [
"sequentia/lib/sequentia/internals/validator.py",
"sequentia/lib/sequentia/classifiers/hmm/topologies/left_right.py"
] | [
"import numpy as np\nfrom copy import copy\n\nclass _Validator:\n \"\"\"Performs internal validations on various input types.\"\"\"\n\n def observation_sequences(self, X, allow_single=False):\n \"\"\"Validates observation sequence(s).\n\n Parameters\n ----------\n X: numpy.ndarray or List[numpy.ndarray]\n An individual observation sequence or a list of multiple observation sequences.\n\n allow_single: bool\n Whether to allow an individual observation sequence.\n\n Returns\n -------\n X: numpy.ndarray or List[numpy.ndarray]\n The original input observation sequence(s) if valid.\n \"\"\"\n X = copy(X)\n if isinstance(X, (list, np.ndarray) if allow_single else list):\n if isinstance(X, list):\n for i, x in enumerate(X):\n if not isinstance(x, np.ndarray):\n raise TypeError('Each observation sequence must be a numpy.ndarray')\n if not x.ndim <= 2:\n raise ValueError('Each observation sequence must be at most two-dimensional')\n x = X[i] = (x if x.ndim == 2 else np.atleast_2d(x).T).astype(float)\n if not x.shape[1] == X[0].shape[1]:\n raise ValueError('Each observation sequence must have the same dimensionality')\n elif isinstance(X, np.ndarray):\n if not X.ndim <= 2:\n raise ValueError('Observation sequence must be at most two-dimensional')\n X = (X if X.ndim == 2 else np.atleast_2d(X).T).astype(float)\n else:\n if allow_single:\n raise TypeError('Expected an individual observation sequence or a list of multiple observation sequences, each of type numpy.ndarray')\n else:\n raise TypeError('Expected a list of observation sequences, each of type numpy.ndarray')\n return X\n\n def observation_sequences_and_labels(self, X, y):\n \"\"\"Validates observation sequences and corresponding labels.\n\n Parameters\n ----------\n X: List[numpy.ndarray]\n A list of multiple observation sequences.\n\n y: List[str]\n A list of labels for the observation sequences.\n\n Returns\n -------\n X: List[numpy.ndarray]\n The original input observation sequences if valid.\n\n y: List[str]\n The original input labels if valid.\n \"\"\"\n self.observation_sequences(X, allow_single=False)\n self.list_of_strings(y, desc='labels')\n if not len(X) == len(y):\n raise ValueError('Expected the same number of observation sequences and labels')\n return X, y\n\n def integer(self, item, desc):\n \"\"\"Validates an integer.\n\n Parameters\n ----------\n item: int\n The item to validate.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n item: int\n The original input item if valid.\n \"\"\"\n if not isinstance(item, int):\n raise TypeError(\"Expected {} to be an integer\".format(desc))\n return item\n\n def string(self, item, desc):\n \"\"\"Validates a string.\n\n Parameters\n ----------\n item: str\n The item to validate.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n item: str\n The original input item if valid.\n \"\"\"\n if not isinstance(item, str):\n raise TypeError(\"Expected {} to be a string\".format(desc))\n return item\n\n def boolean(self, item, desc):\n \"\"\"Validates a boolean.\n\n Parameters\n ----------\n item: bool\n The item to validate.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n item: bool\n The original input item if valid.\n \"\"\"\n if not isinstance(item, bool):\n raise TypeError(\"Expected {} to be a boolean\".format(desc))\n return item\n\n def one_of(self, item, items, desc):\n \"\"\"Validates that an item is one of some permitted values.\n\n Parameters\n ----------\n item: Any\n The item to validate.\n\n items: List[Any]\n The list of permitted values to check against.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n item: Any\n The original input item if valid.\n \"\"\"\n if not item in items:\n raise ValueError('Expected {} to be one of {}'.format(desc, items))\n return item\n\n def restricted_integer(self, item, condition, desc, expected):\n \"\"\"Validates an integer and checks that it satisfies some condition.\n\n Parameters\n ----------\n item: int\n The item to validate.\n\n condition: lambda\n A condition to check the item against.\n\n desc: str\n A description of the item being validated.\n\n expected: str\n A description of the condition, or expected value.\n\n Returns\n -------\n item: int\n The original input item if valid.\n \"\"\"\n if isinstance(item, int):\n if not condition(item):\n raise ValueError('Expected {} to be {}'.format(desc, expected))\n else:\n raise TypeError(\"Expected {} to be an integer\".format(desc))\n return item\n\n def restricted_float(self, item, condition, desc, expected):\n \"\"\"Validates a float and checks that it satisfies some condition.\n\n Parameters\n ----------\n item: float\n The item to validate.\n\n condition: lambda\n A condition to check the item against.\n\n desc: str\n A description of the item being validated.\n\n expected: str\n A description of the condition, or expected value.\n\n Returns\n -------\n item: float\n The original input item if valid.\n \"\"\"\n if isinstance(item, float):\n if not condition(item):\n raise ValueError('Expected {} to be {}'.format(desc, expected))\n else:\n raise TypeError(\"Expected {} to be a float\".format(desc))\n return item\n\n def list_of_strings(self, items, desc):\n \"\"\"Validates a list and checks that it consists entirely of strings.\n\n Parameters\n ----------\n items: List[str]\n The item to validate.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n items: List[str]\n The original input items if valid.\n \"\"\"\n if isinstance(items, list):\n if not all(isinstance(item, str) for item in items):\n raise ValueError('Expected all {} to be strings'.format(desc))\n else:\n raise TypeError('Expected {} to be a list of strings'.format(desc))\n return items\n\n def random_state(self, state):\n \"\"\"Validates a random state object or seed.\n\n Parameters\n ----------\n state: None, int, numpy.random.RandomState\n A random state object or seed.\n\n Returns\n -------\n state: numpy.random.RandomState\n A random state object.\n \"\"\"\n if state is None:\n return np.random.RandomState(seed=0)\n elif isinstance(state, int):\n return np.random.RandomState(seed=state)\n elif isinstance(state, np.random.RandomState):\n return state\n else:\n raise TypeError('Expected random state to be of type: None, int, or numpy.random.RandomState')",
"import numpy as np\nfrom .topology import _Topology\n\nclass _LeftRightTopology(_Topology):\n \"\"\"Represents the topology for a left-right HMM, imposing an upper-triangular transition matrix.\n\n Parameters\n ----------\n n_states: int\n Number of states in the HMM.\n\n random_state: numpy.random.RandomState\n A random state object for reproducible randomness.\n \"\"\"\n\n def __init__(self, n_states: int, random_state: np.random.RandomState):\n super().__init__(n_states, random_state)\n\n def uniform_transitions(self) -> np.ndarray:\n \"\"\"Sets the transition matrix as uniform (equal probability of transitioning\n to all other possible states from each state) corresponding to the topology.\n\n Returns\n -------\n transitions: numpy.ndarray\n The uniform transition matrix of shape `(n_states, n_states)`.\n \"\"\"\n upper_ones = np.triu(np.ones((self._n_states, self._n_states)))\n upper_divisors = np.triu(np.tile(np.arange(self._n_states, 0, -1), (self._n_states, 1)).T)\n lower_ones = np.tril(np.ones(self._n_states), k=-1) # One-pad lower triangle to prevent zero division\n return upper_ones / (upper_divisors + lower_ones)\n\n def random_transitions(self) -> np.ndarray:\n \"\"\"Sets the transition matrix as random (random probability of transitioning\n to all other possible states from each state) by sampling probabilities\n from a Dirichlet distribution - according to the topology.\n\n Parameters\n ----------\n transitions: numpy.ndarray\n The random transition matrix of shape `(n_states, n_states)`.\n \"\"\"\n transitions = self._random_state.dirichlet(np.ones(self._n_states), size=self._n_states)\n lower_sums = np.sum(np.tril(transitions, k=-1), axis=1) # Amount to be redistributed per row\n quantities = np.arange(self._n_states, 0, -1) # Number of elements per row to redistribute evenly to\n upper_ones = np.triu(np.ones((self._n_states, self._n_states)))\n redist = (lower_sums / quantities).reshape(-1, 1) * upper_ones\n return np.triu(transitions) + redist\n\n def validate_transitions(self, transitions: np.ndarray) -> None:\n \"\"\"Validates a transition matrix according to the topology's restrictions.\n\n Parameters\n ----------\n transitions: numpy.ndarray\n The transition matrix to validate.\n \"\"\"\n super().validate_transitions(transitions)\n if not np.allclose(transitions, np.triu(transitions)):\n raise ValueError('Left-right transition matrix must be upper-triangular')"
] | [
[
"numpy.atleast_2d",
"numpy.random.RandomState"
],
[
"numpy.arange",
"numpy.triu",
"numpy.tril",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JianGoForIt/tensorflow | [
"12e78f07a30e5ec8d1a9baf7cd87f4f45d29b657"
] | [
"tensorflow/python/ops/array_ops.py"
] | [
"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"## Casting\n\nTensorFlow provides several operations that you can use to cast tensor data\ntypes in your graph.\n\n@@string_to_number\n@@to_double\n@@to_float\n@@to_bfloat16\n@@to_int32\n@@to_int64\n@@cast\n@@saturate_cast\n\n## Shapes and Shaping\n\nTensorFlow provides several operations that you can use to determine the shape\nof a tensor and change the shape of a tensor.\n\n@@shape\n@@size\n@@rank\n@@reshape\n@@squeeze\n@@expand_dims\n\n## Slicing and Joining\n\nTensorFlow provides several operations to slice or extract parts of a tensor,\nor join multiple tensors together.\n\n@@slice\n@@split\n@@tile\n@@pad\n@@concat\n@@pack\n@@unpack\n@@reverse_sequence\n@@reverse\n@@transpose\n@@space_to_depth\n@@depth_to_space\n@@gather\n@@dynamic_partition\n@@dynamic_stitch\n@@boolean_mask\n@@one_hot\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import common_shapes\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import logging_ops\n# 'Constant' gets imported in the module 'array_ops'.\nfrom tensorflow.python.ops.constant_op import constant\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_array_ops import *\n# pylint: enable=wildcard-import\n\n\n# We override the 'slice' for the \"slice\" op, so we keep python's\n# existing 'slice' for later use in this module.\n_baseslice = slice\n\n\n# Aliases for some automatically-generated names.\nlistdiff = gen_array_ops.list_diff\n\n\n# DEPRECATED use init_ops.zeros_initializer\n# TODO(irving) Move it to init_ops.py\ndef zeros_initializer(shape, dtype=dtypes.float32):\n \"\"\"An adaptor for zeros() to match the Initializer spec.\"\"\"\n return zeros(shape, dtype)\n\n\n# pylint: disable=undefined-variable,protected-access\ndef _SliceHelper(tensor, slice_spec):\n \"\"\"Overload for Tensor.__getitem__.\n\n Currently the size of the slice must be statically known in each dimension,\n i.e. the \"stop\" of the slice must not be omitted.\n\n TODO(mrry): Support slices where the sizes are not specified.\n TODO(mrry): Support negative indices in slices with numpy/Python semantics.\n\n Args:\n tensor: An ops.Tensor object.\n slice_spec: The arguments to Tensor.__getitem__.\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: If the slice indices aren't int, slice, or Ellipsis.\n \"\"\"\n if not isinstance(slice_spec, (list, tuple)):\n slice_spec = [slice_spec]\n indices = []\n sizes = []\n squeeze_dims = []\n for dim, s in enumerate(slice_spec):\n if isinstance(s, int):\n if s < 0:\n raise NotImplementedError(\"Negative indices are currently unsupported\")\n indices.append(s)\n sizes.append(1)\n squeeze_dims.append(dim)\n elif isinstance(s, _baseslice):\n if s.step not in (None, 1):\n raise NotImplementedError(\n \"Steps other than 1 are not currently supported\")\n start = s.start if s.start is not None else 0\n if start < 0:\n raise NotImplementedError(\n \"Negative start indices are not currently supported\")\n indices.append(start)\n if s.stop is not None and s.stop < 0:\n raise NotImplementedError(\n \"Negative stop indices are not currently supported\")\n # NOTE(mrry): If the stop is not specified, Python substitutes\n # sys.maxsize, which is typically (2 ** 63) - 1. Since Slice currently\n # supports signed DT_INT32 arguments, we use -1 to specify that all\n # elements should be captured.\n if s.stop is None or s.stop == sys.maxsize:\n sizes.append(-1)\n else:\n if start > s.stop:\n raise ValueError(\"Stop must be at least start\")\n sizes.append(s.stop - start)\n elif s is Ellipsis:\n raise NotImplementedError(\"Ellipsis is not currently supported\")\n else:\n raise TypeError(\"Bad slice index %s of type %s\" % (s, type(s)))\n sliced = slice(tensor, indices, sizes)\n if squeeze_dims:\n return squeeze(sliced, squeeze_dims=squeeze_dims)\n else:\n return sliced\n\n\ndef slice(input_, begin, size, name=None):\n \"\"\"Extracts a slice from a tensor.\n\n This operation extracts a slice of size `size` from a tensor `input` starting\n at the location specified by `begin`. The slice `size` is represented as a\n tensor shape, where `size[i]` is the number of elements of the 'i'th dimension\n of `input` that you want to slice. The starting location (`begin`) for the\n slice is represented as an offset in each dimension of `input`. In other\n words, `begin[i]` is the offset into the 'i'th dimension of `input` that you\n want to slice from.\n\n `begin` is zero-based; `size` is one-based. If `size[i]` is -1,\n all remaining elements in dimension i are included in the\n slice. In other words, this is equivalent to setting:\n\n `size[i] = input.dim_size(i) - begin[i]`\n\n This operation requires that:\n\n `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`\n\n For example:\n\n ```\n # 'input' is [[[1, 1, 1], [2, 2, 2]],\n # [[3, 3, 3], [4, 4, 4]],\n # [[5, 5, 5], [6, 6, 6]]]\n tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]\n tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],\n [4, 4, 4]]]\n tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],\n [[5, 5, 5]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n size: An `int32` or `int64` `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n \"\"\"\n return gen_array_ops._slice(input_, begin, size, name=name)\n\n\nops.Tensor._override_operator(\"__getitem__\", _SliceHelper)\n\n\ndef pack(values, name=\"pack\"):\n \"\"\"Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.\n\n Packs tensors in `values` into a tensor with rank one higher than each tensor\n in `values` and shape `[len(values)] + values[0].shape`. The output satisfies\n `output[i, ...] = values[i][...]`.\n\n This is the opposite of unpack. The numpy equivalent is\n\n tf.pack([x, y, z]) = np.asarray([x, y, z])\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n name: A name for this operation (optional).\n\n Returns:\n output: A packed `Tensor` with the same type as `values`.\n \"\"\"\n return gen_array_ops._pack(values, name=name)\n\n\ndef unpack(value, num=None, name=\"unpack\"):\n \"\"\"Unpacks the outer dimension of a rank-`R` tensor into rank-`(R-1)` tensors.\n\n Unpacks `num` tensors from `value` along the first dimension.\n If `num` is not specified (the default), it is inferred from `value`'s shape.\n If `value.shape[0]` is not known, `ValueError` is raised.\n\n The ith tensor in `output` is the slice `value[i, ...]`. Each tensor in\n `output` has shape `value.shape[1:]`.\n\n This is the opposite of pack. The numpy equivalent is\n\n tf.unpack(x, n) = list(x)\n\n Args:\n value: A rank `R > 0` `Tensor` to be unpacked.\n num: An `int`. The first dimension of value. Automatically inferred if\n `None` (the default).\n name: A name for the operation (optional).\n\n Returns:\n The list of `Tensor` objects unpacked from `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n \"\"\"\n if num is None:\n value = ops.convert_to_tensor(value)\n shape = value.get_shape()\n num = shape[0].value\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % shape)\n return gen_array_ops._unpack(value, num=num, name=name)\n\n\ndef concat(concat_dim, values, name=\"concat\"):\n \"\"\"Concatenates tensors along one dimension.\n\n Concatenates the list of tensors `values` along dimension `concat_dim`. If\n `values[i].shape = [D0, D1, ... Dconcat_dim(i), ...Dn]`, the concatenated\n result has shape\n\n [D0, D1, ... Rconcat_dim, ...Dn]\n\n where\n\n Rconcat_dim = sum(Dconcat_dim(i))\n\n That is, the data from the input tensors is joined along the `concat_dim`\n dimension.\n\n The number of dimensions of the input tensors must match, and all dimensions\n except `concat_dim` must be equal.\n\n For example:\n\n ```python\n t1 = [[1, 2, 3], [4, 5, 6]]\n t2 = [[7, 8, 9], [10, 11, 12]]\n tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]\n\n # tensor t3 with shape [2, 3]\n # tensor t4 with shape [2, 3]\n tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]\n tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]\n ```\n\n Args:\n concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.\n values: A list of `Tensor` objects or a single `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` resulting from concatenation of the input tensors.\n \"\"\"\n if not isinstance(values, (list, tuple)):\n values = [values]\n # TODO(mrry): Change to return values?\n if len(values) == 1: # Degenerate case of one tensor.\n return identity(values[0], name=name)\n return gen_array_ops._concat(concat_dim=concat_dim,\n values=values,\n name=name)\n\n\[email protected](\"Pack\")\ndef _PackShape(op):\n input_shape = op.inputs[0].get_shape()\n for inp in op.inputs[1:]:\n input_shape = input_shape.merge_with(inp.get_shape())\n return [tensor_shape.TensorShape([len(op.inputs)]).concatenate(input_shape)]\n\n\[email protected](\"Unpack\")\ndef _UnpackShape(op):\n input_shape = op.inputs[0].get_shape()\n return [input_shape[1:]] * op.get_attr(\"num\")\n\n\[email protected](\"Concat\")\ndef _ConcatShape(op):\n concat_dim = tensor_util.constant_value(op.inputs[0])\n if concat_dim is None:\n # Return an unknown shape with the same rank as the inputs, or an\n # unknown rank if no input's rank is known.\n rank = None\n for value in op.inputs[1:]:\n if rank is not None:\n value.get_shape().assert_has_rank(rank)\n else:\n rank = value.get_shape().ndims\n if rank == 0:\n raise ValueError(\"Can't concatenate scalars (use tf.pack instead)\")\n return [tensor_shape.unknown_shape(ndims=rank)]\n\n else:\n # Merge all the non-concat dims, and sum the concat dim to make an\n # output shape.\n concat_dim = int(concat_dim)\n output_shape = op.inputs[1].get_shape()\n for value in op.inputs[2:]:\n value_shape = value.get_shape()\n if value_shape.ndims is not None and concat_dim >= value_shape.ndims:\n raise ValueError(\"concat_dim is out of range (values rank = %d)\" %\n value_shape.ndims)\n before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])\n at = output_shape[concat_dim] + value_shape[concat_dim]\n after = output_shape[\n concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])\n output_shape = before.concatenate(at).concatenate(after)\n return [output_shape]\n\n\[email protected](\"ConcatOffset\")\ndef _ConcatOffsetShape(op):\n return [x.get_shape() for x in op.inputs[1:]]\n\n\ndef boolean_mask(tensor, mask, name=\"boolean_mask\"):\n \"\"\"Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.\n\n ```python\n # 1-D example\n tensor = [0, 1, 2, 3]\n mask = [True, False, True, False]\n boolean_mask(tensor, mask) ==> [0, 2]\n ```\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n\n Args:\n tensor: N-D tensor. First K dimensions can be None, which allows e.g.\n undefined batch size. Trailing dimensions must be specified.\n mask: K-D boolean tensor, K <= N.\n name: A name for this operation (optional).\n\n Returns:\n Tensor populated by entries in `tensor` corresponding to `True` values in\n `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n a = [[1, 2], [3, 4], [5, 6]]\n mask = [True, False, True]\n boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]\n ```\n \"\"\"\n def _apply_mask_1d(reshaped_tensor, mask):\n \"\"\"Mask tensor along dimension 0 with a 1-D mask.\"\"\"\n indices = squeeze(where(mask), squeeze_dims=[1])\n return gather(reshaped_tensor, indices)\n\n with ops.op_scope([tensor, mask], name):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n mask = ops.convert_to_tensor(mask, name=\"mask\")\n\n shape_mask = mask.get_shape()\n ndims_mask = shape_mask.ndims\n shape_tensor = tensor.get_shape()\n if ndims_mask == 0:\n raise ValueError(\"mask cannot be scalar.\")\n if ndims_mask is None:\n raise ValueError(\n \"mask dimensions must be specified, even if some dimensions are None\"\n \". E.g. shape=[None] is ok, but shape=None is not.\")\n shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)\n\n tensor = reshape(tensor, [-1] + shape_tensor.as_list()[ndims_mask:])\n mask = reshape(mask, [-1])\n return _apply_mask_1d(tensor, mask)\n\n\ndef sparse_mask(a, mask_indices, name=None):\n \"\"\"Masks elements of `IndexedSlices`.\n\n Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that\n contains a subset of the slices of `a`. Only the slices at indices specified\n in `mask_indices` are returned.\n\n This is useful when you need to extract a subset of slices in an\n `IndexedSlices` object.\n\n For example:\n\n ```python\n # `a` contains slices at indices [12, 26, 37, 45] from a large tensor\n # with shape [1000, 10]\n a.indices => [12, 26, 37, 45]\n tf.shape(a.values) => [4, 10]\n\n # `b` will be the subset of `a` slices at its second and third indices, so\n # we want to mask of its first and last indices (which are at absolute\n # indices 12, 45)\n b = tf.sparse_mask(a, [12, 45])\n\n b.indices => [26, 37]\n tf.shape(b.values) => [2, 10]\n\n ```\n\n Args:\n * `a`: An `IndexedSlices` instance.\n * `mask_indices`: Indices of elements to mask.\n * `name`: A name for the operation (optional).\n\n Returns:\n The masked `IndexedSlices` instance.\n \"\"\"\n with ops.op_scope([a, mask_indices], name, \"sparse_mask\") as name:\n indices = a.indices\n out_indices, to_gather = listdiff(indices, mask_indices)\n out_values = gather(a.values, to_gather, name=name)\n return ops.IndexedSlices(out_values, out_indices, a.dense_shape)\n\n\ndef split(split_dim, num_split, value, name=\"split\"):\n \"\"\"Splits a tensor into `num_split` tensors along one dimension.\n\n Splits `value` along dimension `split_dim` into `num_split` smaller tensors.\n Requires that `num_split` evenly divide `value.shape[split_dim]`.\n\n For example:\n\n ```python\n # 'value' is a tensor with shape [5, 30]\n # Split 'value' into 3 tensors along dimension 1\n split0, split1, split2 = tf.split(1, 3, value)\n tf.shape(split0) ==> [5, 10]\n ```\n\n Args:\n split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.\n Must be in the range `[0, rank(value))`.\n num_split: A Python integer. The number of ways to split.\n value: The `Tensor` to split.\n name: A name for the operation (optional).\n\n Returns:\n `num_split` `Tensor` objects resulting from splitting `value`.\n \"\"\"\n return gen_array_ops._split(split_dim=split_dim,\n num_split=num_split,\n value=value,\n name=name)\n\n\[email protected](\"Reverse\")\ndef _ReverseShape(op):\n dims_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(dims_shape[0])\n if input_shape.ndims is not None and input_shape.ndims > 8:\n raise ValueError(\n \"tf.reverse() does not work on tensors with more than 8 dimensions\")\n return [input_shape]\n\n\ndef transpose(a, perm=None, name=\"transpose\"):\n \"\"\"Transposes `a`. Permutes the dimensions according to `perm`.\n\n The returned tensor's dimension i will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is\n the rank of the input tensor. Hence by default, this operation performs a\n regular matrix transpose on 2-D input Tensors.\n\n For example:\n\n ```python\n # 'x' is [[1 2 3]\n # [4 5 6]]\n tf.transpose(x) ==> [[1 4]\n [2 5]\n [3 6]]\n\n # Equivalently\n tf.transpose(x, perm=[1, 0]) ==> [[1 4]\n [2 5]\n [3 6]]\n\n # 'perm' is more useful for n-dimensional tensors, for n > 2\n # 'x' is [[[1 2 3]\n # [4 5 6]]\n # [[7 8 9]\n # [10 11 12]]]\n # Take the transpose of the matrices in dimension-0\n tf.transpose(b, perm=[0, 2, 1]) ==> [[[1 4]\n [2 5]\n [3 6]]\n\n [[7 10]\n [8 11]\n [9 12]]]\n ```\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`.\n name: A name for the operation (optional).\n\n Returns:\n A transposed `Tensor`.\n \"\"\"\n with ops.op_scope([a], name, \"transpose\") as name:\n if perm is None:\n rank = gen_array_ops.rank(a)\n perm = (rank - 1) - gen_math_ops._range(0, rank, 1)\n ret = gen_array_ops.transpose(a, perm, name=name)\n # NOTE(mrry): Setting the shape explicitly because\n # reverse is not handled by the shape function.\n input_shape = ret.op.inputs[0].get_shape().dims\n if input_shape is not None:\n ret.set_shape(input_shape[::-1])\n else:\n ret = gen_array_ops.transpose(a, perm, name=name)\n return ret\n\n\ndef zeros(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to zero.\n\n For example:\n\n ```python\n tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ```\n\n Args:\n shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.\n dtype: The type of an element in the resulting `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n with ops.op_scope([shape], name, \"zeros\") as name:\n if isinstance(shape, list):\n output = constant(0, shape=shape, dtype=dtype, name=name)\n else:\n shape = ops.convert_to_tensor(shape, name=\"shape\")\n output = fill(shape, constant(0, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype\n return output\n\n\ndef zeros_like(tensor, dtype=None, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to zero. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n For example:\n\n ```python\n # 'tensor' is [[1, 2, 3], [4, 5, 6]]\n tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n with ops.op_scope([tensor], name, \"zeros_like\") as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n if dtype is not None and tensor.dtype != dtype:\n ret = zeros(shape(tensor), dtype, name=name)\n ret.set_shape(tensor.get_shape())\n return ret\n else:\n return gen_array_ops._zeros_like(tensor, name=name)\n\n\ndef ones_like(tensor, dtype=None, name=None):\n \"\"\"Creates a tensor with all elements set to 1.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the same\n type and shape as `tensor` with all elements set to 1. Optionally, you can\n specify a new type (`dtype`) for the returned tensor.\n\n For example:\n\n ```python\n # 'tensor' is [[1, 2, 3], [4, 5, 6]]\n tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n with ops.op_scope([tensor], name, \"ones_like\") as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n ones_shape = shape(tensor)\n if dtype is None:\n dtype = tensor.dtype\n ret = ones(ones_shape, dtype=dtype, name=name)\n ret.set_shape(tensor.get_shape())\n return ret\n\n\ndef ones(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to 1.\n\n This operation returns a tensor of type `dtype` with shape `shape` and all\n elements set to 1.\n\n For example:\n\n ```python\n tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.\n dtype: The type of an element in the resulting `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n with ops.op_scope([shape], name, \"ones\") as name:\n if isinstance(shape, list):\n output = constant(1, shape=shape, dtype=dtype, name=name)\n else:\n shape = ops.convert_to_tensor(shape, name=\"shape\")\n output = fill(shape, constant(1, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype\n return output\n\n\ndef placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a tensor that will be always fed.\n\n **Important**: This tensor will produce an error if evaluated. Its value must\n be fed using the `feed_dict` optional argument to `Session.run()`,\n `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.placeholder(tf.float32, shape=(1024, 1024))\n y = tf.matmul(x, x)\n\n with tf.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n rand_array = np.random.rand(1024, 1024)\n print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.\n ```\n\n Args:\n dtype: The type of elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a tensor of any shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n \"\"\"\n shape = tensor_shape.as_shape(shape)\n if shape.is_fully_defined():\n dim_list = shape.as_list()\n else:\n dim_list = []\n ret = gen_array_ops._placeholder(\n dtype=dtype,\n shape=dim_list,\n name=name)\n ret.set_shape(shape)\n return ret\n\n\ndef pad(tensor, paddings, mode=\"CONSTANT\", name=None): # pylint: disable=invalid-name\n \"\"\"Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n # 't' is [[1, 2, 3], [4, 5, 6]].\n # 'paddings' is [[1, 1,], [2, 2]].\n # rank of 't' is 2.\n pad(t, paddings, \"CONSTANT\") ==> [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 2, 3, 0, 0],\n [0, 0, 4, 5, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n pad(t, paddings, \"REFLECT\") ==> [[6, 5, 4, 5, 6, 5, 4],\n [3, 2, 1, 2, 3, 2, 1],\n [6, 5, 4, 5, 6, 5, 4],\n [3, 2, 1, 2, 3, 2, 1]]\n\n pad(t, paddings, \"SYMMETRIC\") ==> [[2, 1, 1, 2, 3, 3, 2],\n [2, 1, 1, 2, 3, 3, 2],\n [5, 4, 4, 5, 6, 6, 5],\n [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n \"\"\"\n\n if mode == \"CONSTANT\":\n return gen_array_ops._pad(tensor, paddings, name=name)\n if mode == \"REFLECT\":\n return gen_array_ops._mirror_pad(tensor,\n paddings,\n mode=\"REFLECT\",\n name=name)\n if mode == \"SYMMETRIC\":\n return gen_array_ops._mirror_pad(tensor,\n paddings,\n mode=\"SYMMETRIC\",\n name=name)\n raise ValueError(\"Unknown padding mode: %s\" % mode)\n\n\[email protected](\"Placeholder\")\ndef _PlaceholderShape(op):\n given_shape = tensor_util.TensorShapeProtoToList(op.get_attr(\"shape\"))\n if given_shape:\n return [tensor_shape.TensorShape(given_shape)]\n else:\n return [tensor_shape.unknown_shape()]\n\n\[email protected](\"CheckNumerics\")\[email protected](\"Identity\")\[email protected](\"RefIdentity\")\[email protected](\"StopGradient\")\ndef _UnchangedShape(op):\n return [op.inputs[0].get_shape()]\n\n\[email protected](\"Rank\")\[email protected](\"Size\")\ndef _ScalarShape(unused_op):\n return [tensor_shape.scalar()]\n\n\[email protected](\"Slice\")\ndef _SliceShape(op):\n \"\"\"Shape function for array_ops.slice.\"\"\"\n input_shape = op.inputs[0].get_shape()\n begin_shape = op.inputs[1].get_shape().with_rank(1)\n sizes_shape = op.inputs[2].get_shape().with_rank(1)\n ndims = begin_shape.merge_with(sizes_shape)[0].value\n if ndims is not None:\n input_shape.assert_has_rank(ndims)\n begin_value = tensor_util.constant_value(op.inputs[1])\n sizes_value = tensor_util.constant_value(op.inputs[2])\n if sizes_value is not None:\n returned_dims = []\n for i, slice_size in enumerate(sizes_value.ravel()):\n if slice_size != -1:\n returned_dims.append(slice_size)\n elif begin_value is not None:\n returned_dims.append(input_shape[i] - begin_value[i])\n else:\n returned_dims.append(None)\n return [tensor_shape.TensorShape(returned_dims)]\n else:\n if input_shape.ndims is not None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n elif ndims is not None:\n return [tensor_shape.unknown_shape(ndims=ndims)]\n else:\n return [tensor_shape.unknown_shape()]\n\n\[email protected](\"Gather\")\ndef _GatherShape(op):\n \"\"\"Shape function for array_ops.gather.\"\"\"\n params_shape = op.inputs[0].get_shape()\n indices_shape = op.inputs[1].get_shape()\n return [indices_shape.concatenate(params_shape[1:])]\n\n\[email protected](\"Unique\")\ndef _UniqueShape(op):\n \"\"\"Shape function for array_ops.Unique.\"\"\"\n # The output is a vector with data-dependent length.\n input_shape = op.inputs[0].get_shape()\n input_shape.assert_has_rank(1)\n return [tensor_shape.vector(None), input_shape]\n\n\[email protected](\"UniqueWithCounts\")\ndef _UniqueWithCountsShape(op):\n \"\"\"Shape function for array_ops.Unique.\"\"\"\n # The output is a vector with data-dependent length.\n input_shape = op.inputs[0].get_shape()\n input_shape.assert_has_rank(1)\n return [tensor_shape.vector(None), input_shape, tensor_shape.vector(None)]\n\n\[email protected](\"Diag\")\ndef _DiagShape(op):\n \"\"\"Shape function for array_ops.diag.\n\n This op has one input (of rank k <= 3), and one output (of rank 2k),\n where the shape of the output is the concatenation of the input\n shape with itself.\n\n Args:\n op: A Diag Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank_at_most(3)\n return [input_shape.concatenate(input_shape)]\n\[email protected](\"DiagPart\")\ndef _DiagPartShape(op):\n \"\"\"Shape function for array_ops.diag_part.\n\n This op has one input (of rank k = 2, 4, or 6), and one output (of rank k/2),\n where the shape of the output is the diagonal of the input shape.\n\n Args:\n op: A DiagPart Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If input has odd rank or greater than 6\n\n \"\"\"\n shape = op.inputs[0].get_shape()\n rank = len(shape)\n mid = rank // 2\n if rank % 2 or rank > 6:\n raise ValueError(\"Input must have even rank <= 6, input rank is \" +\n str(rank) + \".\" )\n if shape[:mid] != shape[mid:]:\n raise ValueError(\"Invalid shape, shape[:mid] \" + str(shape[:mid]) +\n \" and shape[mid:] \" + str(shape[mid:]) +\n \" do not match \")\n input_shape = shape.with_rank_at_most(6)\n return [input_shape[:len(input_shape) // 2]]\n\[email protected](\"ExpandDims\")\ndef _ExpandDimsShape(op):\n \"\"\"Determine shape for expand op's output tensor.\n\n Args:\n op: Operation for which to determine shape.\n op.inputs[0] is the input tensor.\n op.inputs[1] is the dimension in which to expand.\n Returns:\n Shape of op's output tensor.\n Raises:\n ValueError: If dim is outside of [-rank - 1, rank], where rank is the number\n of dimensions in the input tensor.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.dims is None:\n return [tensor_shape.unknown_shape()]\n dim = tensor_util.constant_value(op.inputs[1])\n input_ndims = input_shape.ndims\n if dim < -input_ndims - 1 or dim > input_ndims:\n raise ValueError(\n \"dim %d not in [%d, %d].\" % (dim, -input_ndims, input_ndims))\n if dim < 0:\n dim += (input_ndims + 1)\n result_shape = list(input_shape.dims)\n result_shape.insert(dim, 1)\n return [tensor_shape.TensorShape(result_shape)]\n\n\[email protected](\"Squeeze\")\ndef _SqueezeShape(op):\n \"\"\"Determine shape for squeeze op's output tensor.\n\n Args:\n op: Operation for which to determine shape.\n Returns:\n Shape of op's output tensor.\n Raises:\n ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),\n where rank is the number of dimensions in the input tensor. Or, if\n squeeze_dims includes a dimension for which input shape has a value\n not equal to 1.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.dims is None:\n return [tensor_shape.unknown_shape()]\n\n squeeze_dims = op.get_attr(\"squeeze_dims\") or []\n wrapped_squeeze_dims = []\n input_ndims = input_shape.ndims\n for i, squeeze_dim in enumerate(squeeze_dims):\n if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:\n raise ValueError(\n \"squeeze_dims[%d]=%d not in [%d, %d).\" % (\n i, squeeze_dim, -input_ndims, input_ndims))\n if squeeze_dim < 0:\n squeeze_dim += input_ndims\n wrapped_squeeze_dims.append(squeeze_dim)\n\n result_shape = []\n for i, dim in enumerate([d.value for d in input_shape.dims]):\n is_explicit_match = i in wrapped_squeeze_dims\n if dim is None:\n if is_explicit_match:\n # Assume that the squeezed dimension will be 1 at runtime.\n continue\n if not wrapped_squeeze_dims:\n # If squeezing all 1 dimensions and we see a None, give up.\n return [tensor_shape.unknown_shape()]\n elif dim == 1:\n if is_explicit_match or not wrapped_squeeze_dims:\n continue\n elif is_explicit_match:\n raise ValueError(\n \"Can not squeeze dim[%d], expected a dimension of 1, got %d.\" % (\n i, dim))\n result_shape.append(dim)\n return [tensor_shape.TensorShape(result_shape)]\n\n\[email protected](\"Bitcast\")\ndef _BitcastShape(op):\n \"\"\"Shape function for Bitcast op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n input_type = op.inputs[0].dtype\n size_of_input = input_type.size\n output = dtypes.as_dtype(op.get_attr(\"type\"))\n size_of_output = output.size\n if size_of_input == size_of_output:\n return [tensor_shape.TensorShape(input_shape)]\n else:\n if size_of_output > size_of_input:\n new_shape = input_shape.as_list()\n last_val = new_shape[-1]\n if last_val == (size_of_output // size_of_input):\n new_shape = new_shape[:-1]\n else:\n raise ValueError(\n \"Cannot bitcast due to shape. %d is not evenly divisible by %d.\" %\n (new_shape[-1], size_of_input // size_of_output))\n else:\n new_shape = input_shape\n new_shape = new_shape.concatenate([size_of_input // size_of_output])\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"Reshape\")\ndef _ReshapeShape(op):\n \"\"\"Shape function for Reshape op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.ndims is not None:\n num_elements = tensor_shape.Dimension(1)\n for dim in input_shape.dims:\n num_elements *= dim\n else:\n num_elements = tensor_shape.Dimension(None)\n new_shape_shape = op.inputs[1].get_shape().with_rank(1)\n new_shape = tensor_util.constant_value(op.inputs[1])\n if new_shape is None:\n # Attempt to infer the rank of the output from the length of\n # new_shape.\n return [tensor_shape.unknown_shape(ndims=new_shape_shape[0].value)]\n new_shape = np.reshape(new_shape, -1).tolist()\n if -1 not in new_shape:\n # The new shape is fully defined.\n if (num_elements.value is not None\n and num_elements.value != np.prod(new_shape)):\n raise ValueError(\n \"Cannot reshape a tensor with %d elements to shape %s (%d elements)\"\n % (num_elements.value, new_shape, np.prod(new_shape)))\n return [tensor_shape.TensorShape(new_shape)]\n elif num_elements.value is not None:\n # We know the number of elements, so we can calculate the missing\n # dimension in the new_shape.\n known_elements = 1\n unknown_index = None\n for i, dim in enumerate(new_shape):\n if dim == -1:\n unknown_index = i\n else:\n known_elements *= dim\n if known_elements == 0:\n raise ValueError(\"cannot infer the missing input size for \"\n \"an empty tensor unless all specified \"\n \"input sizes are non-zero\")\n if num_elements % known_elements != 0:\n raise ValueError(\"input has %s elements, which isn't divisible by %d\" %\n (num_elements, known_elements))\n new_shape[unknown_index] = num_elements // known_elements\n return [tensor_shape.TensorShape(new_shape)]\n else:\n # We don't know the input shape, but we know n-1 of the dimensions\n # in the new shape.\n new_shape[new_shape.index(-1)] = None\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"BroadcastGradientArgs\")\ndef _BroadcastGradientArgsShape(op):\n \"\"\"Shape function for the BroadcastGradientArgs op.\"\"\"\n # TODO(mrry): Implement constant_value for BroadcastGradientArgs?\n op.inputs[0].get_shape().assert_has_rank(1)\n op.inputs[1].get_shape().assert_has_rank(1)\n return [tensor_shape.vector(None), tensor_shape.vector(None)]\n\n\[email protected](\"Fill\")\ndef _FillShape(op):\n \"\"\"Shape function for the Fill op.\n\n This op takes a vector of dimensions and a scalar, and produces a\n tensor with the given dimensions.\n\n Args:\n op: A Fill Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n dimensions_shape = op.inputs[0].get_shape().with_rank(1)\n op.inputs[1].get_shape().assert_is_compatible_with(tensor_shape.scalar())\n fill_dims = tensor_util.constant_value(op.inputs[0])\n if fill_dims is None:\n # Attempt to infer the rank of the output from the length of\n # dimensions.\n return [tensor_shape.unknown_shape(ndims=dimensions_shape[0].value)]\n else:\n return [tensor_shape.TensorShape(fill_dims.tolist())]\n\n\[email protected](\"InvertPermutation\")\ndef _InvertPermutationShape(op):\n \"\"\"Shape function for the InvertPermutation op.\"\"\"\n return [op.inputs[0].get_shape().with_rank(1)]\n\n\[email protected](\"ListDiff\")\ndef _ListDiffShape(op):\n \"\"\"Shape function for the ListDiff op.\"\"\"\n op.inputs[0].get_shape().assert_has_rank(1)\n op.inputs[1].get_shape().assert_has_rank(1)\n # TODO(mrry): Indicate that the length falls within an interval?\n return [tensor_shape.vector(None)] * 2\n\n\[email protected](\"Pad\")\[email protected](\"MirrorPad\")\ndef _PadShape(op):\n \"\"\"Shape function for the Pad op.\n\n This op has two inputs:\n\n * input: A rank-N tensor.\n * paddings: An N-by-2 matrix, in which the i^th row contains the\n number of padding elements to add before and after `input` in the\n i^th dimension.\n\n It has one output, which has the same rank as input, and additional\n elements according to the values in paddings.\n\n Args:\n op: A Pad Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the input shapes are incompatible.\n \"\"\"\n paddings_shape = op.inputs[1].get_shape().with_rank(2)\n input_shape = op.inputs[0].get_shape()\n input_shape = input_shape.with_rank(paddings_shape[0].value)\n paddings_shape = paddings_shape.merge_with(\n tensor_shape.matrix(input_shape.ndims, 2))\n paddings = tensor_util.constant_value(op.inputs[1])\n if paddings is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n if paddings[i, 0] < 0 or paddings[i, 1] < 0:\n raise ValueError(\"paddings must be non-negative\")\n output_dims.append(dim + paddings[i, 0] + paddings[i, 1])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"MirrorPadGrad\")\ndef _MirrorPadGradShape(op):\n \"\"\"Shape function for the MirrorPadGrad op.\"\"\"\n paddings_shape = op.inputs[1].get_shape().with_rank(2)\n input_shape = op.inputs[0].get_shape().with_rank(paddings_shape[0].value)\n paddings_shape = paddings_shape.merge_with(tensor_shape.matrix(\n input_shape.ndims, 2))\n paddings = tensor_util.constant_value(op.inputs[1])\n if paddings is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n if paddings[i, 0] < 0 or paddings[i, 1] < 0:\n raise ValueError(\"Paddings must be non-negative.\")\n if dim <= paddings[i, 0] + paddings[i, 1]:\n raise ValueError(\"Output dimension is not positive.\")\n output_dims.append(dim - paddings[i, 0] - paddings[i, 1])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"ReverseSequence\")\ndef _ReverseSequenceShape(op):\n \"\"\"Shape function for the ReverseSequence op.\n\n This op has two inputs:\n\n * input: A rank-N tensor with size B in the 0th dimension.\n * seq_lens: A vector of length B.\n\n It has one output, with the same size as input.\n\n Args:\n op: A ReverseSequence Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the input shapes are incompatible or seq_dim == batch_dim.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n seq_lens_shape = op.inputs[1].get_shape().with_rank(1)\n seq_dim = op.get_attr(\"seq_dim\")\n batch_dim = op.get_attr(\"batch_dim\")\n if batch_dim >= input_shape.ndims:\n raise ValueError(\"batch_dim must be < input.dims() (%d vs %d)\" %\n (batch_dim, input_shape.ndims))\n if seq_dim >= input_shape.ndims:\n raise ValueError(\"seq_dim must be < input.dims() (%d vs %d)\" %\n (seq_dim, input_shape.ndims))\n batch_size = input_shape[batch_dim].merge_with(seq_lens_shape[0])\n input_shape = tensor_shape.TensorShape([\n value if ix != batch_dim else batch_size\n for ix, value in enumerate(input_shape)])\n return [input_shape]\n\n\[email protected](\"Shape\")\[email protected](\"ShapeN\")\ndef _ShapeNShape(op):\n \"\"\"Shape function for the Shape/ShapeN op.\"\"\"\n return [tensor_shape.vector(x.get_shape().ndims) for x in op.inputs]\n\n\[email protected](\"Transpose\")\ndef _TransposeShape(op):\n \"\"\"Shape function for the Transpose op.\n\n This op takes two inputs:\n\n * input: a rank-N tensor of arbitrary shape.\n * shuffle: a length-N vector.\n\n Its output is the rank-N tensor computed by permuting the dimensions\n of input according to shuffle.\n\n Args:\n op: A Transpose op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input and shuffle are incompatible.\n IndexError: If shuffle contains an index that is >= the rank of input.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(\n input_shape.ndims))\n transpose_vec = tensor_util.constant_value(op.inputs[1])\n if transpose_vec is None:\n return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]\n else:\n return [tensor_shape.TensorShape([input_shape[i]\n for i in transpose_vec.tolist()])]\n\n\[email protected](\"Split\")\ndef _SplitShape(op):\n \"\"\"Shape function for the Split op.\"\"\"\n split_dim = tensor_util.constant_value(op.inputs[0])\n num_split = len(op.outputs)\n input_shape = op.inputs[1].get_shape()\n if split_dim is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split\n else:\n split_dim = int(split_dim)\n input_shape = input_shape.with_rank_at_least(split_dim + 1)\n if not (input_shape[split_dim] % num_split).is_compatible_with(0):\n raise ValueError(\n \"Number of ways to split should evenly divide the split \"\n \"dimension but got split_dim %d (size = %d) and num_split %d\" %\n (split_dim, input_shape[split_dim].value, num_split))\n prefix = input_shape[:split_dim]\n size_in_split_dim = input_shape[split_dim] // num_split\n suffix = input_shape[split_dim + 1:]\n output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)\n return [output_shape] * num_split\n\n\[email protected](\"Tile\")\ndef _TileShape(op):\n \"\"\"Shape function for the Tile op.\n\n This op has two inputs:\n\n * input: A rank-N tensor.\n * multiples: A length-N vector, in which the i^th element contains\n the factor by which `input` will be tiled in the i^th dimension.\n\n It has one output, which has the same rank as input, and additional\n elements according to the values in multiples\n\n Args:\n op: A Tile Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0].value)\n multiples = tensor_util.constant_value(op.inputs[1])\n if multiples is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n multiples = multiples.ravel()\n for i, dim in enumerate(input_shape.dims):\n output_dims.append(dim * multiples[i])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"TileGrad\")\ndef _TileGradShape(op):\n \"\"\"Shape function for the TileGrad op.\"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])\n multiples = tensor_util.constant_value(op.inputs[1])\n if multiples is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n output_dims.append(dim // multiples[i])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"Where\")\ndef _WhereShape(op):\n \"\"\"Shape function for the Where op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n return [tensor_shape.matrix(None, input_shape.ndims)]\n\n\[email protected](\"ZerosLike\")\ndef _ZerosLikeShape(op):\n \"\"\"Shape function for the ZerosLike op.\"\"\"\n return [op.inputs[0].get_shape()]\n\n\ndef edit_distance(hypothesis, truth, normalize=True, name=\"edit_distance\"):\n \"\"\"Computes the Levenshtein distance between sequences.\n\n This operation takes variable-length sequences (`hypothesis` and `truth`),\n each provided as a `SparseTensor`, and computes the Levenshtein distance.\n You can normalize the edit distance by length of `truth` by setting\n `normalize` to true.\n\n For example, given the following input:\n\n ```python\n # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:\n # (0,0) = [\"a\"]\n # (1,0) = [\"b\"]\n hypothesis = tf.SparseTensor(\n [[0, 0, 0],\n [1, 0, 0]],\n [\"a\", \"b\"]\n (2, 1, 1))\n\n # 'truth' is a tensor of shape `[2, 2]` with variable-length values:\n # (0,0) = []\n # (0,1) = [\"a\"]\n # (1,0) = [\"b\", \"c\"]\n # (1,1) = [\"a\"]\n truth = tf.SparseTensor(\n [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]]\n [\"a\", \"b\", \"c\", \"a\"],\n (2, 2, 2))\n\n normalize = True\n ```\n\n This operation would return the following:\n\n ```python\n # 'output' is a tensor of shape `[2, 2]` with edit distances normalized\n # by 'truth' lengths.\n output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis\n ```\n\n Args:\n hypothesis: A `SparseTensor` containing hypothesis sequences.\n truth: A `SparseTensor` containing truth sequences.\n normalize: A `bool`. If `True`, normalizes the Levenshtein distance by\n length of `truth.`\n name: A name for the operation (optional).\n\n Returns:\n A dense `Tensor` with rank `R - 1`, where R is the rank of the\n `SparseTensor` inputs `hypothesis` and `truth`.\n\n Raises:\n TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.\n \"\"\"\n if not isinstance(hypothesis, ops.SparseTensor):\n raise TypeError(\"Hypothesis must be a SparseTensor\")\n if not isinstance(truth, ops.SparseTensor):\n raise TypeError(\"Truth must be a SparseTensor\")\n\n return gen_array_ops._edit_distance(hypothesis.indices,\n hypothesis.values,\n hypothesis.shape,\n truth.indices,\n truth.values,\n truth.shape,\n normalize=normalize,\n name=name)\n\n\[email protected](\"EditDistance\")\ndef _EditDistanceShape(op):\n \"\"\"Shape function for the EditDistance op.\"\"\"\n hypothesis_shape = tensor_util.constant_value(op.inputs[2])\n truth_shape = tensor_util.constant_value(op.inputs[5])\n if hypothesis_shape is not None and truth_shape is not None:\n if len(hypothesis_shape) != len(truth_shape):\n raise ValueError(\n \"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s\" %\n (str(hypothesis_shape), str(truth_shape)))\n return [tensor_shape.TensorShape(\n [max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]\n\n return [tensor_shape.unknown_shape()]\n\n\n# The remaining ops do not change the shape of their inputs.\[email protected](\"Quantize\")\[email protected](\"Dequantize\")\ndef _QuantizeDequantizeShape(op):\n unused_min_range = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())\n unused_max_range = op.inputs[2].get_shape().merge_with(tensor_shape.scalar())\n return common_shapes.unchanged_shape(op)\n\n\[email protected](\"SpaceToDepth\")\ndef _SpaceToDepthShape(op):\n \"\"\"Shape function for the SpaceToDepth op.\n\n This op takes two inputs:\n\n * input: a tensor of shape like that [B, H, W, D]\n * block_size: an int.\n\n Its output is the same-rank tensor but with changed\n dimensions like that: [B, H/block_size, W/block_size, D*block_size*block_size]\n\n Args:\n op: A SpaceToDepth op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input are not as expected.\n IndexError: If block_size does not divide W or H.\n \"\"\"\n # Check that the input tensor is of 4 dimensions.\n try:\n input_shape = op.inputs[0].get_shape().with_rank(4)\n except ValueError:\n raise ValueError(\n \"tf.space_to_depth() requires tensors with exactly 4 dimensions.\")\n\n block_size = op.get_attr(\"block_size\")\n if block_size <= 1:\n raise ValueError(\"Attribute block_size has to be > 1.\")\n\n input_height = input_shape[1]\n input_width = input_shape[2]\n\n if (input_width % block_size > 0) or (input_height % block_size > 0):\n raise IndexError(\n \"block_size needs to divide both width and height.\")\n\n width = input_width // block_size\n height = input_height // block_size\n new_depth = input_shape[3] * block_size * block_size\n\n return [tensor_shape.TensorShape(\n [input_shape[0], height, width, new_depth])]\n\n\[email protected](\"DepthToSpace\")\ndef _DepthToSpaceShape(op):\n \"\"\"Shape function for the DepthToSpace op.\n\n This op takes two inputs:\n\n * input: a tensor of shape like that [B, H, W, D]\n * block_size: an int.\n\n Its output is the same-rank tensor but with changed\n dimensions like that:\n [B, H*block_size, W*block_size, D/(block_size*block_size)]\n\n Args:\n op: A DepthToSpace op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input are not as expected.\n IndexError: If block_size*block_size does not divide D.\n \"\"\"\n # Check that the input tensor is of 4 dimensions.\n try:\n input_shape = op.inputs[0].get_shape().with_rank(4)\n except ValueError:\n raise ValueError(\n \"tf.depth_to_space() requires tensors with exactly 4 dimensions.\")\n\n block_size = op.get_attr(\"block_size\")\n if block_size <= 1:\n raise ValueError(\"Attribute block_size has to be > 1.\")\n\n input_height = input_shape[1]\n input_width = input_shape[2]\n input_depth = input_shape[3]\n\n width = input_width * block_size\n height = input_height * block_size\n\n if input_depth % (block_size * block_size) > 0:\n raise IndexError(\n \"block_size*block_size needs to divide the input depth.\")\n\n new_depth = input_depth // (block_size * block_size)\n return [tensor_shape.TensorShape(\n [input_shape[0], height, width, new_depth])]\n\n\[email protected](\"OneHot\")\ndef _OneHotShape(op):\n \"\"\"Shape function for the OneHot op.\n\n It closely follows the code in the .cc implementation.\n\n Args:\n op: A OneHot Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: if axis < -1.\n \"\"\"\n indices_shape = op.inputs[0].get_shape()\n indices_dims = indices_shape.ndims\n depth = tensor_util.constant_value(op.inputs[1])\n axis = op.get_attr(\"axis\")\n\n if axis < -1:\n raise ValueError(\"axis must be >= -1\")\n\n new_shape = None\n if indices_dims is not None:\n new_shape = indices_shape.as_list()\n new_shape.insert(axis % (indices_dims + 1), depth)\n\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"PlaceholderWithDefault\")\ndef _PlaceholderWithDefaultShape(op):\n \"\"\"Shape function for the PlaceholderWithDefault op.\n\n This op acts as an identity when it is not fed (passing through a\n default value), but allows the user to feed it with tensors of a\n possibly less precise shape than its default value.\n\n Args:\n op: A PlaceholderWithDefault `Operation`.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n output_shape = tensor_shape.TensorShape(op.get_attr(\"shape\"))\n # NOTE(mrry): We don't merge these shapes, because `output_shape`\n # may be *less* precise than `input_shape`.\n input_shape.assert_is_compatible_with(output_shape)\n return [output_shape]\n"
] | [
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.ops.gen_math_ops._range",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.gen_array_ops._mirror_pad",
"tensorflow.python.framework.ops.op_scope",
"tensorflow.python.framework.tensor_shape.matrix",
"tensorflow.python.ops.gen_array_ops._pad",
"tensorflow.python.ops.gen_array_ops._placeholder",
"numpy.reshape",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.ops.gen_array_ops.rank",
"tensorflow.python.ops.gen_array_ops._zeros_like",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.gen_array_ops._split",
"tensorflow.python.ops.gen_array_ops._concat",
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.ops.gen_array_ops._slice",
"tensorflow.python.ops.gen_array_ops._pack",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.framework.ops.Tensor._override_operator",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.ops.gen_array_ops.transpose",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.ops.gen_array_ops._edit_distance",
"tensorflow.python.framework.tensor_shape.vector",
"tensorflow.python.ops.gen_array_ops._unpack",
"tensorflow.python.ops.constant_op.constant",
"tensorflow.python.ops.common_shapes.unchanged_shape",
"numpy.prod",
"tensorflow.python.framework.tensor_shape.as_shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
breaks-software/OSCAAR | [
"254acfccbd907b89485b9d78cff2681892a40309"
] | [
"oscaar/photometry.py"
] | [
"'''oscaar v2.0 \n Module for differential photometry\n Developed by Brett Morris, 2011-2013'''\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.cm as cm\n\ndef phot(image, xCentroid, yCentroid, apertureRadius, plottingThings, annulusOuterRadiusFactor=2.8, annulusInnerRadiusFactor=1.40, ccdGain=1, plots=False):\n '''\n Method for aperture photometry. \n \n Parameters\n ----------\n image : numpy.ndarray\n FITS image opened with PyFITS\n \n xCentroid : float\n Stellar centroid along the x-axis (determined by trackSmooth or equivalent)\n \n yCentroid : float\n Stellar centroid along the y-axis (determined by trackSmooth or equivalent)\n \n apertureRadius : float\n Radius in pixels from centroid to use for source aperture\n \n annulusInnerRadiusFactor : float\n Measure the background for sky background subtraction fron an annulus from a factor of \n `annulusInnerRadiusFactor` bigger than the `apertureRadius` to one a factor `annulusOuterRadiusFactor` bigger.\n \n annulusOuterRadiusFactor : float\n Measure the background for sky background subtraction fron an annulus a factor of \n `annulusInnerRadiusFactor` bigger than the `apertureRadius` to one a factor `annulusOuterRadiusFactor` bigger.\n \n ccdGain : float\n Gain of your detector, used to calculate the photon noise\n \n plots : bool\n If `plots`=True, display plots showing the aperture radius and \n annulus radii overplotted on the image of the star\n \n Returns\n -------\n rawFlux : float\n The background-subtracted flux measured within the aperture\n \n rawError : float\n The photon noise (limiting statistical) Poisson uncertainty on the measurement of `rawFlux`\n \n errorFlag : bool\n Boolean corresponding to whether or not any error occured when running oscaar.phot(). If an error occured, the flag is\n True; otherwise False.\n \n Core developer: Brett Morris (NASA-GSFC)\n '''\n if plots:\n [fig,subplotsDimensions,photSubplotsOffset] = plottingThings\n if photSubplotsOffset == 0: plt.clf()\n annulusRadiusInner = annulusInnerRadiusFactor*apertureRadius \n annulusRadiusOuter = annulusOuterRadiusFactor*apertureRadius\n\n ## From the full image, cut out just the bit around the star that we're interested in\n imageCrop = image[xCentroid-annulusRadiusOuter+1:xCentroid+annulusRadiusOuter+2,yCentroid-annulusRadiusOuter+1:yCentroid+annulusRadiusOuter+2]\n [dimy,dimx] = imageCrop.shape\n XX, YY = np.meshgrid(np.arange(dimx),np.arange(dimy)) \n x = (XX - annulusRadiusOuter)**2\n y = (YY - annulusRadiusOuter)**2\n ## Assemble arrays marking the pixels marked as either source or background pixels\n sourceIndices = x + y <= apertureRadius**2\n skyIndices = (x + y <= annulusRadiusOuter**2)*(x + y >= annulusRadiusInner**2)\n \n rawFlux = np.sum(imageCrop[sourceIndices] - np.median(imageCrop[skyIndices]))*ccdGain\n rawError = np.sqrt(np.sum(imageCrop[sourceIndices]*ccdGain) + np.median(ccdGain*imageCrop[skyIndices])) ## Poisson-uncertainty\n\n if plots:\n def format_coord(x, y):\n ''' Function to also give data value on mouse over with imshow. '''\n col = int(x+0.5)\n row = int(y+0.5)\n try:\n return 'x=%i, y=%i, Flux=%1.1f' % (x, y, imageCrop[row,col])\n except:\n return 'x=%i, y=%i' % (x, y)\n \n med = np.median(imageCrop)\n dsig = np.std(imageCrop)\n \n ax = fig.add_subplot(subplotsDimensions+photSubplotsOffset+1)\n ax.imshow(imageCrop, cmap=cm.gray, interpolation=\"nearest\",vmin = med-0.5*dsig, vmax =med+2*dsig)\n \n theta = np.arange(0,360)*(np.pi/180)\n rcos = lambda r, theta: annulusRadiusOuter + r*np.cos(theta)\n rsin = lambda r, theta: annulusRadiusOuter + r*np.sin(theta)\n ax.plot(rcos(apertureRadius,theta),rsin(apertureRadius,theta),'m',linewidth=4)\n ax.plot(rcos(annulusRadiusInner,theta),rsin(annulusRadiusInner,theta),'r',linewidth=4)\n ax.plot(rcos(annulusRadiusOuter,theta),rsin(annulusRadiusOuter,theta),'r',linewidth=4)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('Aperture')\n ax.set_xlim([-.5,dimx-.5])\n ax.set_ylim([-.5,dimy-.5])\n ax.format_coord = format_coord \n plt.draw()\n return [rawFlux, rawError, False]\n \ndef multirad(image, xCentroid, yCentroid, apertureRadii, plottingThings, annulusOuterRadiusFactor=2.8, annulusInnerRadiusFactor=1.40, ccdGain=1, plots=False):\n '''\n Method for aperture photometry. \n \n Parameters\n ----------\n image : numpy.ndarray\n FITS image opened with PyFITS\n \n xCentroid : float\n Stellar centroid along the x-axis (determined by trackSmooth or equivalent)\n \n yCentroid : float\n Stellar centroid along the y-axis (determined by trackSmooth or equivalent)\n \n apertureRadii : list\n List of aperture radii (floats) to feed to phot().\n \n annulusInnerRadiusFactor : float\n Measure the background for sky background subtraction fron an annulus from a factor of \n `annulusInnerRadiusFactor` bigger than the `apertureRadius` to one a factor `annulusOuterRadiusFactor` bigger.\n \n annulusOuterRadiusFactor : float\n Measure the background for sky background subtraction fron an annulus a factor of \n `annulusInnerRadiusFactor` bigger than the `apertureRadius` to one a factor `annulusOuterRadiusFactor` bigger.\n \n ccdGain : float\n Gain of your detector, used to calculate the photon noise\n \n plots : bool\n If `plots`=True, display plots showing the aperture radius and \n annulus radii overplotted on the image of the star\n \n Returns\n -------\n rawFlux : float\n The background-subtracted flux measured within the aperture\n \n rawError : float\n The photon noise (limiting statistical) Poisson uncertainty on the measurement of `rawFlux`\n \n errorFlag : bool\n Boolean corresponding to whether or not any error occured when running oscaar.phot(). If an error occured, the flag is\n True; otherwise False.\n \n Core developer: Brett Morris (NASA-GSFC)\n '''\n\n #[apertureRadiusMin, apertureRadiusMax, apertureRadiusStep] = apertureRadiusSettings\n #apertureRadii = np.arange(apertureRadiusMin, apertureRadiusMax, apertureRadiusStep)\n\n fluxes = []\n errors = []\n photFlags = []\n for apertureRadius in apertureRadii:\n flux, error, photFlag = phot(image, xCentroid, yCentroid, apertureRadius, plottingThings, annulusOuterRadiusFactor=annulusOuterRadiusFactor, annulusInnerRadiusFactor=annulusInnerRadiusFactor, ccdGain=ccdGain, plots=False)\n fluxes.append(flux)\n errors.append(error)\n photFlags.append(photFlag)\n annulusRadiusOuter = annulusOuterRadiusFactor*np.max(apertureRadii)\n imageCrop = image[xCentroid-annulusRadiusOuter+1:xCentroid+annulusRadiusOuter+2,yCentroid-annulusRadiusOuter+1:yCentroid+annulusRadiusOuter+2]\n [dimy,dimx] = imageCrop.shape\n\n if plots:\n [fig,subplotsDimensions,photSubplotsOffset] = plottingThings\n if photSubplotsOffset == 0: plt.clf()\n def format_coord(x, y):\n ''' Function to also give data value on mouse over with imshow. '''\n col = int(x+0.5)\n row = int(y+0.5)\n try:\n return 'x=%i, y=%i, Flux=%1.1f' % (x, y, imageCrop[row,col])\n except:\n return 'x=%i, y=%i' % (x, y)\n \n med = np.median(imageCrop)\n dsig = np.std(imageCrop)\n \n ax = fig.add_subplot(subplotsDimensions+photSubplotsOffset+1)\n ax.imshow(imageCrop, cmap=cm.gray, interpolation=\"nearest\",vmin = med-0.5*dsig, vmax =med+2*dsig)\n \n theta = np.arange(0,360)*(np.pi/180)\n rcos = lambda r, theta: annulusRadiusOuter + r*np.cos(theta)\n rsin = lambda r, theta: annulusRadiusOuter + r*np.sin(theta)\n for apertureRadius in apertureRadii:\n ax.plot(rcos(apertureRadius,theta),rsin(apertureRadius,theta),linewidth=4)\n #ax.plot(rcos(annulusRadiusInner,theta),rsin(annulusRadiusInner,theta),'r',linewidth=4)\n #ax.plot(rcos(annulusRadiusOuter,theta),rsin(annulusRadiusOuter,theta),'r',linewidth=4)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('Aperture')\n ax.set_xlim([-.5,dimx-.5])\n ax.set_ylim([-.5,dimy-.5])\n ax.format_coord = format_coord \n plt.draw() \n return fluxes, errors, photFlags\n"
] | [
[
"numpy.arange",
"numpy.median",
"numpy.cos",
"matplotlib.pyplot.draw",
"numpy.sin",
"numpy.max",
"numpy.std",
"matplotlib.pyplot.clf",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dreamflasher/pybo-python3 | [
"803873df98640c94580ccd93b6661197acf8fc5e"
] | [
"pybo/utils.py"
] | [
"\"\"\"\nVarious utility functions.\n\"\"\"\n\n\n\n\n\nimport numpy as np\nimport re\nimport subprocess\n\n__all__ = ['rstate', 'SubprocessQuery', 'InteractiveQuery']\n\n\ndef rstate(rng=None):\n \"\"\"\n Return a RandomState object. This is just a simple wrapper such that if rng\n is already an instance of RandomState it will be passed through, otherwise\n it will create a RandomState object using rng as its seed.\n \"\"\"\n if not isinstance(rng, np.random.RandomState):\n rng = np.random.RandomState(rng)\n return rng\n\n\nclass SubprocessQuery(object):\n \"\"\"\n Class for black-boxes that should be run from the shell. Simply pass the\n shell command with variables replaced with `{}` with python string\n formatting specs inside, then call the object with inputs to replace the `{}`\n in the same order as in the provided string.\n \"\"\"\n def __init__(self, command):\n self.command = command\n\n def __call__(self, x):\n out = subprocess.check_output(self.command.format(*x), shell=True)\n out = out.splitlines()[-1] # keep last line\n out = re.compile(r'\\x1b[^m]*m').sub('', out) # strip color codes\n out = out.split('=')[-1] # strip left hand side\n return np.float(out)\n\n\nclass InteractiveQuery(object):\n \"\"\"\n Wrapper for queries which interactively query the user.\n \"\"\"\n def __init__(self, prompt='Enter value at design x = {}\\ny = '):\n self.prompt = prompt\n\n def __call__(self, x):\n y = eval(input(self.prompt.format(x)))\n if not isinstance(y, (np.int, np.long, np.float)):\n # FIXME: this should probably just re-query the user rather than\n # raising an exception.\n raise ValueError('output must be a number')\n return y\n"
] | [
[
"numpy.random.RandomState",
"numpy.float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
o-Ian/X-Minerei | [
"d9f86d802787381afa4e955d7532cf9788b94962"
] | [
"Catch.Manipulation_data.py"
] | [
"import requests\nimport csv\nimport json\nimport pandas as pd\nimport os\nimport numpy as np\nimport datetime\nimport os\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver\n\n\ndef baixar_arquivo(url, nome_arquivo):\n resultado = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '\n 'AppleWebKit/537.36 (HTML, like Gecko) '\n 'Chrome/39.0.2171.95 Safari/537.36'})\n with open(nome_arquivo, 'wb') as novo_arquivo:\n novo_arquivo.write(resultado.content)\n novo_arquivo = pd.read_csv(nome_arquivo, usecols=['Date(UTC)', 'Value'])\n novo_arquivo['Date(UTC)'] = novo_arquivo['Date(UTC)'].astype('datetime64')\n novo_arquivo['Value'] = novo_arquivo['Value'].astype('float64')\n novo_arquivo.to_csv(f'{nome_arquivo}')\n return novo_arquivo\n\n\ndef conversorjsontocsv(nome_arquivojson):\n with open(nome_arquivojson) as file:\n data = json.load(file)\n fname = 'Mineration_DATA.ETH/IPCA.csv'\n with open(fname, 'wt') as file:\n csv_file = csv.writer(file, lineterminator='\\n')\n csv_file.writerow(['Year', '%IPCA'])\n for item in data:\n csv_file.writerow([item['p'].replace('dezembro', ''), item['v']])\n os.remove(nome_arquivojson)\n file = pd.read_csv(fname)\n file['%IPCA'] = file['%IPCA']/100\n file.to_csv('Mineration_DATA.ETH/IPCA.csv')\n\n return file\n\n\ndef baixar_arquivo2(url, nome_arquivo):\n resultado = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '\n 'AppleWebKit/537.36 (HTML, like Gecko) '\n 'Chrome/39.0.2171.95 Safari/537.36'})\n with open(nome_arquivo, 'wb') as novo_arquivo:\n novo_arquivo.write(resultado.content)\n return novo_arquivo\n\n\ndef calculateProfit(hashrate_proprio, difficulty, blockreward_dia, fees=1):\n return (((hashrate_proprio * 1000000) * (1 - (fees / 100))) / (difficulty * 1000000000000)) * blockreward_dia*3600\n\n\n# Downloading and renaming files\nETHPerDay = baixar_arquivo('https://etherscan.io/chart/blockreward?output=csv',\n 'Mineration_DATA.ETH/ETHPerDay.csv')\nETHPerDay = ETHPerDay.rename(columns={'Value': 'ETHPerDay'})\n\nNetworkDifficulty = baixar_arquivo('https://etherscan.io/chart/difficulty?output=csv',\n 'Mineration_DATA.ETH/NetworkDifficulty_TH_s.csv')\nNetworkDifficulty = NetworkDifficulty.rename(columns={'Value': 'Difficulty[TH/s]'})\n\nBlockCountDay = baixar_arquivo('https://etherscan.io/chart/blocks?output=csv',\n 'Mineration_DATA.ETH/BlockCount_Day.csv')\nBlockCountDay = BlockCountDay.rename(columns={'Value': 'Total_Blocks'})\n\nETHPriceUSD = baixar_arquivo('https://etherscan.io/chart/etherprice?output=csv',\n 'Mineration_DATA.ETH/ETHPrice_USD.csv')\nETHPriceUSD = ETHPriceUSD.rename(columns={'Value': 'ETHPrice_USD'})\n\nbaixar_arquivo2('https://servicodados.ibge.gov.br/api/v1/conjunturais?&d=s&user=ibge&t=1737&v=69&p=199512,'\n '199612,199712,199812,199912,200012,200112,200212,200312,200412,200512,200612,200712,200812,'\n '200912,201012, 201112,201212,201312,201412,201512,201612,201712,201812,201912,202012,202112,'\n '202212,202312,202412,202512,202612,202712,202812,202912,203012&ng=1(1)&c=',\n 'Mineration_DATA.ETH/IPCA.json')\nIPCA = conversorjsontocsv('Mineration_DATA.ETH/IPCA.json')\n\nDate = ETHPerDay['Date(UTC)']\n\n# Creating dataset that group all filesa\nAllData = pd.DataFrame(ETHPerDay)\n\nAllData['NetworkDifficulty[TH/s]'] = NetworkDifficulty['Difficulty[TH/s]']\n\nAllData['ETHPriceUSD'] = ETHPriceUSD['ETHPrice_USD']\n\nAllData['TotalBlocks'] = BlockCountDay['Total_Blocks']\n\n# Input data from user\nHashUsuario = float(input('Qual o seu hashrate [Mh/s]?: '))\nPower = int(input('Qual a potência [W]?: '))\nSuffixMult = 0.001\nPowerCoast = float(input('Qual o tarida de energia [USD]?: '))\nRS_GPUPrice = float(input('Qual o preço da placa de vídeo?: '))\n\n# Recalculating PowerCoast\nchoicelist = []\nc = datetime.date.today().year - 1\ncont = len(IPCA)-1\nAllData['PowerCoast'] = PowerCoast\nfor i in range(len(IPCA)):\n if c >= 2015:\n valor = float(IPCA.loc[i + cont, '%IPCA'])\n if c == 2020:\n novo_valor = 1 - valor\n choicelist.append(novo_valor)\n else:\n novo_valor = novo_valor - valor\n choicelist.append(novo_valor)\n cont -= 2\n c -= 1\n\n# Conditional structure for readjustment\ncondicionlist = [AllData['Date(UTC)'].dt.year == 2020,\n AllData['Date(UTC)'].dt.year == 2019,\n AllData['Date(UTC)'].dt.year == 2018,\n AllData['Date(UTC)'].dt.year == 2017,\n AllData['Date(UTC)'].dt.year == 2016,\n AllData['Date(UTC)'].dt.year == 2015]\n\nAllData['Inflação'] = np.select(condicionlist, choicelist, default=1)\nAllData['PowerCoast'] = PowerCoast * AllData['Inflação']\n\n# Putting together AllData and GPUPrice\nAllData['Date(UTC)'] = AllData['Date(UTC)'].astype('datetime64')\n\nGPUPrice = pd.read_csv('Mineration_DATA.ETH/GPUPrice.csv', index_col=0)\nGPUPrice['Date(UTC)'] = GPUPrice['Date(UTC)'].astype('datetime64')\ndel GPUPrice['R$_GPUPrice']\n\n# Activate/Deactivate when the relations between the variables must be done\nAllData = pd.merge(AllData, GPUPrice, on='Date(UTC)', how='outer')\n\n# Catching last date from AllData dataframe\nlast_date = AllData.loc[len(AllData)-1]\nlast_date = last_date['Date(UTC)']\nlast_date = str(last_date)\n\nlast_date = datetime.datetime.strptime(last_date[:10], '%Y-%m-%d').date()\nlast_date = last_date.strftime('%m-%d-%Y')\n\n# Downloading dollarPrice.csv\nbaixar_arquivo2(f\"https://olinda.bcb.gov.br/olinda/servico/PTAX/versao/v1/odata/CotacaoDolarPeriodo(dataInicial=@dataInicial,dataFinalCotacao=@dataFinalCotacao)?@dataInicial='07-30-2015'&@dataFinalCotacao='{last_date}'&$top=999999999&$format=text/csv\", r'Mineration_DATA.ETH/DollarPrice.csv')\ndollarPrice2 = pd.read_csv('Mineration_DATA.ETH/DollarPrice.csv')\ndollarPrice = pd.DataFrame(columns=['R$_DollarPrice', 'Date(UTC)'])\n\n# Converting dollarPrice columns\ndollarPrice['R$_DollarPrice'] = dollarPrice2['cotacaoCompra'].astype('string')\ndollarPrice['R$_DollarPrice'] = dollarPrice['R$_DollarPrice'].str.replace(',', '.').astype('float64')\ndollarPrice['Date(UTC)'] = dollarPrice2['dataHoraCotacao'].astype('string')\n\n# Taking out hours from Date(UTC) column\ndates = dollarPrice['Date(UTC)']\nlist_data = []\nfor date in dates:\n list_data.append(date[:10])\n\ndollarPrice['Date(UTC)'] = list_data\ndollarPrice['Date(UTC)'] = pd.to_datetime(dollarPrice['Date(UTC)'])\n\n# Putting dollarPrice with AllData dataframe\nAllData = pd.merge(AllData, dollarPrice, on='Date(UTC)', how='outer')\n\n# Replacing NaN values to last value from dollarPrice column\nAllData['R$_DollarPrice'].fillna(method='ffill', inplace=True)\n\n# Putting Price and Date(UTC) column from AllData on dollarPrice dataframe\ndollarPrice['R$_DollarPrice'] = AllData['R$_DollarPrice']\ndollarPrice['Date(UTC)'] = AllData['Date(UTC)']\nAllData['ETHPriceBRL'] = AllData['ETHPriceUSD'] * AllData['R$_DollarPrice']\n\n# Putting GPU Price from user on csv file\nAllData._set_value(len(AllData)-1, 'R$_GPUPrice', RS_GPUPrice)\n\n# Calculating profit\nAllData['ETH/dia'] = (calculateProfit(HashUsuario, AllData['NetworkDifficulty[TH/s]'], (AllData['ETHPerDay']/AllData['TotalBlocks']))) * 24\nAllData['USD_Revenue'] = AllData['ETHPriceBRL'] * AllData['ETH/dia']\nAllData['USD_Coast'] = Power * SuffixMult * AllData['PowerCoast'] * 24\nAllData['USD_Profit/day'] = AllData['USD_Revenue'] - AllData['USD_Coast']\nAllData['USD_Profit/month'] = AllData['USD_Profit/day'] * 30\n\nAllData['Indicador'] = AllData['USD_Coast']/AllData['USD_Revenue']\n\n# Creating new column (relation between Network Difficulty with the last Network Difficulty)\nLast_Difficulty = AllData['NetworkDifficulty[TH/s]'].iloc[-1]\nAllData['Multiple_Difficulty/LastDifficulty'] = AllData['NetworkDifficulty[TH/s]']/Last_Difficulty\n\n# Making the prevision of GPU Price\nAllData['R$_GPUPrice'] = RS_GPUPrice * AllData['Multiple_GPUPrice']\n\nfill_values = {'R$_GPUPrice': RS_GPUPrice * AllData['Multiple_Difficulty/LastDifficulty']}\nAllData.fillna(fill_values, inplace=True)\n\nAllData['R$_GPUPrice'] = RS_GPUPrice * AllData['Multiple_Difficulty/LastDifficulty']\n\n# Conditional structure to use inflation as a multiplicator when date is equal or less than 2017-09-12\ncondicionlist = [AllData['Date(UTC)'] <= '2017-09-12'\n ]\nchoicelist = [AllData['R$_GPUPrice'].loc[775] * AllData['Inflação']]\n\nAllData['R$_GPUPrice'] = np.select(condicionlist, choicelist, default=AllData['R$_GPUPrice'])\n\n# GPU Price conversion (real to dollar)\nAllData['USD_GPUPrice'] = AllData['R$_GPUPrice'] / AllData['R$_DollarPrice']\n\n# Column that calcule how much months do you need to pay your investment\nAllData['Pays_itself/months'] = AllData['R$_GPUPrice']/AllData['USD_Profit/month']\n\nAllData['Date'] = AllData['Date(UTC)']\n# Last step\ndollarPrice.to_csv('Mineration_DATA.ETH/DollarPrice.csv')\nAllData.to_csv('Mineration_DATA.ETH/AllData.csv')\n"
] | [
[
"pandas.merge",
"pandas.to_datetime",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.select"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
shangz-ai/gluon-nlp | [
"75b3c121ac02c1bdef25a785fda2238e256246f9"
] | [
"scripts/word_embeddings/evaluation.py"
] | [
"# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Evaluation\n=============\n\nFunctions to perform evaluation of TokenEmbeddings on the datasets included in\nthe GluonNLP toolkit.\n\n\"\"\"\nimport itertools\nimport sys\nimport logging\nimport json\n\nimport mxnet as mx\nimport numpy as np\nfrom scipy import stats\n\nimport gluonnlp as nlp\n\n\ndef add_parameters(parser):\n \"\"\"Add evaluation specific parameters to parser.\"\"\"\n group = parser.add_argument_group('Evaluation arguments')\n\n group.add_argument('--eval-batch-size', type=int, default=512)\n\n # Datasets\n group.add_argument(\n '--similarity-datasets', type=str,\n default=nlp.data.word_embedding_evaluation.word_similarity_datasets,\n nargs='*',\n help='Word similarity datasets to use for intrinsic evaluation.')\n group.add_argument(\n '--similarity-functions', type=str,\n default=nlp.embedding.evaluation.list_evaluation_functions(\n 'similarity'), nargs='+',\n help='Word similarity functions to use for intrinsic evaluation.')\n group.add_argument(\n '--analogy-datasets', type=str, default=['GoogleAnalogyTestSet'],\n nargs='*',\n help='Word similarity datasets to use for intrinsic evaluation.')\n group.add_argument(\n '--analogy-functions', type=str,\n default=nlp.embedding.evaluation.list_evaluation_functions('analogy'),\n nargs='+',\n help='Word analogy functions to use for intrinsic evaluation. ')\n\n ## Analogy evaluation specific arguments\n group.add_argument(\n '--analogy-dont-exclude-question-words', action='store_true',\n help=('Exclude input words from valid output analogies.'\n 'The performance of word embeddings on the analogy task '\n 'is around 0% accuracy if input words are not excluded.'))\n\n\ndef validate_args(args):\n \"\"\"Validate provided arguments and act on --help.\"\"\"\n # Check correctness of similarity dataset names\n for dataset_name in args.similarity_datasets:\n if dataset_name and dataset_name.lower() not in map(\n str.lower,\n nlp.data.word_embedding_evaluation.word_similarity_datasets):\n print('{} is not a supported dataset.'.format(dataset_name))\n sys.exit(1)\n\n # Check correctness of analogy dataset names\n for dataset_name in args.analogy_datasets:\n if dataset_name and dataset_name.lower() not in map(\n str.lower,\n nlp.data.word_embedding_evaluation.word_analogy_datasets):\n print('{} is not a supported dataset.'.format(dataset_name))\n sys.exit(1)\n\n\ndef iterate_similarity_datasets(args):\n \"\"\"Generator over all similarity evaluation datasets.\n\n Iterates over dataset names, keyword arguments for their creation and the\n created dataset.\n\n \"\"\"\n for dataset_name in args.similarity_datasets:\n if not dataset_name:\n continue\n parameters = nlp.data.list_datasets(dataset_name)\n for key_values in itertools.product(*parameters.values()):\n kwargs = dict(zip(parameters.keys(), key_values))\n yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs)\n\n\ndef iterate_analogy_datasets(args):\n \"\"\"Generator over all analogy evaluation datasets.\n\n Iterates over dataset names, keyword arguments for their creation and the\n created dataset.\n\n \"\"\"\n for dataset_name in args.analogy_datasets:\n if not dataset_name:\n continue\n parameters = nlp.data.list_datasets(dataset_name)\n for key_values in itertools.product(*parameters.values()):\n kwargs = dict(zip(parameters.keys(), key_values))\n yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs)\n\n\ndef get_similarity_task_tokens(args):\n \"\"\"Returns a set of all tokens occurring the evaluation datasets.\"\"\"\n tokens = set()\n for _, _, dataset in iterate_similarity_datasets(args):\n tokens.update(\n itertools.chain.from_iterable((d[0], d[1]) for d in dataset))\n return tokens\n\n\ndef get_analogy_task_tokens(args):\n \"\"\"Returns a set of all tokens occuring the evaluation datasets.\"\"\"\n tokens = set()\n for _, _, dataset in iterate_analogy_datasets(args):\n tokens.update(\n itertools.chain.from_iterable(\n (d[0], d[1], d[2], d[3]) for d in dataset))\n return tokens\n\n\ndef get_tokens_in_evaluation_datasets(args):\n tokens = get_similarity_task_tokens(args)\n tokens.update(get_analogy_task_tokens(args))\n return tokens\n\n\ndef evaluate_similarity(args, token_embedding, ctx, logfile=None,\n global_step=0):\n \"\"\"Evaluate on specified similarity datasets.\"\"\"\n\n results = []\n for similarity_function in args.similarity_functions:\n evaluator = nlp.embedding.evaluation.WordEmbeddingSimilarity(\n idx_to_vec=token_embedding.idx_to_vec,\n similarity_function=similarity_function)\n evaluator.initialize(ctx=ctx)\n if not args.no_hybridize:\n evaluator.hybridize()\n\n # Evaluate all datasets\n for (dataset_name, dataset_kwargs,\n dataset) in iterate_similarity_datasets(args):\n initial_length = len(dataset)\n dataset_coded = [[\n token_embedding.token_to_idx[d[0]],\n token_embedding.token_to_idx[d[1]], d[2]\n ] for d in dataset if d[0] in token_embedding.token_to_idx\n and d[1] in token_embedding.token_to_idx]\n num_dropped = initial_length - len(dataset_coded)\n\n # All words are unknown\n if not len(dataset_coded):\n correlation = 0\n else:\n words1, words2, scores = zip(*dataset_coded)\n pred_similarity = evaluator(\n mx.nd.array(words1, ctx=ctx), mx.nd.array(words2, ctx=ctx))\n sr = stats.spearmanr(pred_similarity.asnumpy(),\n np.array(scores))\n correlation = sr.correlation\n\n logging.info(\n 'Spearman rank correlation on %s (%s pairs) %s with %s:\\t%s',\n dataset.__class__.__name__, len(dataset_coded),\n str(dataset_kwargs), similarity_function, correlation)\n\n result = dict(\n task='similarity',\n dataset_name=dataset_name,\n dataset_kwargs=dataset_kwargs,\n similarity_function=similarity_function,\n spearmanr=correlation,\n num_dropped=num_dropped,\n global_step=global_step,\n )\n log_similarity_result(logfile, result)\n results.append(result)\n\n return results\n\n\ndef evaluate_analogy(args, token_embedding, ctx, logfile=None, global_step=0):\n \"\"\"Evaluate on specified analogy datasets.\n\n The analogy task is an open vocabulary task, make sure to pass a\n token_embedding with a sufficiently large number of supported tokens.\n\n \"\"\"\n results = []\n exclude_question_words = not args.analogy_dont_exclude_question_words\n for analogy_function in args.analogy_functions:\n evaluator = nlp.embedding.evaluation.WordEmbeddingAnalogy(\n idx_to_vec=token_embedding.idx_to_vec,\n exclude_question_words=exclude_question_words,\n analogy_function=analogy_function)\n evaluator.initialize(ctx=ctx)\n if not args.no_hybridize:\n evaluator.hybridize()\n\n for (dataset_name, dataset_kwargs,\n dataset) in iterate_analogy_datasets(args):\n initial_length = len(dataset)\n dataset_coded = [[\n token_embedding.token_to_idx[d[0]],\n token_embedding.token_to_idx[d[1]],\n token_embedding.token_to_idx[d[2]],\n token_embedding.token_to_idx[d[3]]\n ] for d in dataset if d[0] in token_embedding.token_to_idx\n and d[1] in token_embedding.token_to_idx\n and d[2] in token_embedding.token_to_idx\n and d[3] in token_embedding.token_to_idx]\n num_dropped = initial_length - len(dataset_coded)\n\n dataset_coded_batched = mx.gluon.data.DataLoader(\n dataset_coded, batch_size=args.eval_batch_size)\n\n acc = mx.metric.Accuracy()\n for batch in dataset_coded_batched:\n batch = batch.as_in_context(ctx)\n words1, words2, words3, words4 = (batch[:, 0], batch[:, 1],\n batch[:, 2], batch[:, 3])\n pred_idxs = evaluator(words1, words2, words3)\n acc.update(pred_idxs[:, 0], words4.astype(np.float32))\n\n logging.info('Accuracy on %s (%s quadruples) %s with %s:\\t%s',\n dataset.__class__.__name__, len(dataset_coded),\n str(dataset_kwargs), analogy_function,\n acc.get()[1])\n\n result = dict(\n task='analogy',\n dataset_name=dataset_name,\n dataset_kwargs=dataset_kwargs,\n analogy_function=analogy_function,\n accuracy=acc.get()[1],\n num_dropped=num_dropped,\n global_step=global_step,\n )\n log_analogy_result(logfile, result)\n results.append(result)\n return results\n\n\ndef log_similarity_result(logfile, result):\n \"\"\"Log a similarity evaluation result dictionary as TSV to logfile.\"\"\"\n assert result['task'] == 'similarity'\n\n if not logfile:\n return\n\n with open(logfile, 'a') as f:\n f.write('\\t'.join([\n str(result['global_step']),\n result['task'],\n result['dataset_name'],\n json.dumps(result['dataset_kwargs']),\n result['similarity_function'],\n str(result['spearmanr']),\n str(result['num_dropped']),\n ]))\n\n f.write('\\n')\n\n\ndef log_analogy_result(logfile, result):\n \"\"\"Log a analogy evaluation result dictionary as TSV to logfile.\"\"\"\n assert result['task'] == 'analogy'\n\n if not logfile:\n return\n\n with open(logfile, 'a') as f:\n f.write('\\t'.join([\n str(result['global_step']),\n result['task'],\n result['dataset_name'],\n json.dumps(result['dataset_kwargs']),\n result['analogy_function'],\n str(result['accuracy']),\n str(result['num_dropped']),\n ]))\n f.write('\\n')\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pedrogbmendes/TrimTuner | [
"a9dec1f1ac610e6ec6d54cfaf5f9c93bc4f80f97",
"a9dec1f1ac610e6ec6d54cfaf5f9c93bc4f80f97"
] | [
"trimtuner/trimtuner/trimtuner.py",
"trimtuner/models/trimtuner_dt.py"
] | [
"import time\nimport sys\nimport math\nimport random\nimport george\nimport numpy as np\nimport os\n\n#acq function\nfrom trimtuner.acquisition_functions.constrained_entropy_search import Constrained_EntropySearch\nfrom trimtuner.acquisition_functions.marginalization import MarginalizationGPMCMC, MarginalizationDT\nfrom robo.acquisition_functions.ei import *\n\n#heuristics to filter\nfrom trimtuner.maximizers.random_sampling import RandomSampling\nfrom trimtuner.maximizers.cea import CEA\n#from trimtuner.maximizers.direct import Direct\n#from trimtuner.maximizers.cmaes import CMAES\n\n\n#models\nfrom trimtuner.models.trimtuner_dt import EnsembleDTs\nfrom trimtuner.models.trimtuner_gp import EnsembleGPs\nfrom robo.priors.env_priors import EnvPrior\n\n\n#bootstrapping\nfrom trimtuner.trimtuner.initial_sampling import initial_sampling_trimtuner\n#incumbent estimation\nfrom trimtuner.trimtuner.incumbent_estimation import incumbent_estimation_cea, incumbent_estimation\n\n\n\n\ndef transform(s, s_min, s_max):\n s_transform = (np.log2(s) - np.log2(s_min)) / (np.log2(s_max) - np.log2(s_min))\n return s_transform\n\n\n\ndef retransform(s_transform, s_min, s_max):\n s = np.rint(2 ** (s_transform * (np.log2(s_max) - np.log2(s_min)) + np.log2(s_min)))\n return int(s)\n\n\n\nclass Logs():\n #class to print log files\n def __init__(self, seed, initSamples, model, heuristic):\n dir = os.path.abspath(os.getcwd())\n path = dir + \"/runLogs\"\n\n self.initSamples = initSamples\n self.seed = seed\n\n if not os.path.isdir(path):\n try:\n os.mkdir(path) #create runLogs folder\n except OSError:\n print(\"Creation of the directory %s failed\" % path)\n else:\n print(\"Successfully created the directory %s \" % path)\n\n filename_orig = path + \"/trimtuner_logs_seed\" + str(seed) + \"_initSamples\" + str(initSamples) + \"_model_\" + model + \"_heuristic_\" + heuristic \n \n filename = filename_orig + \".txt\"\n counter = 1\n while os.path.isfile(filename):\n\n filename = filename_orig + \"_\" + str(counter) + \".txt\"\n counter += 1\n if counter >= 10000:\n print(\"ERROR createing the log files!!! Check folder \" + path)\n sys.stdout.flush() \n sys.exit(0)\n\n #filename += \".txt\" \n\n self.file_logs = open(filename, \"w\")\n self.file_logs.write(\"runID;initSamples;explorationNumber;incumbent;incTime;incAcc;incCost;configTested;Time;Acc;Cost;Overhead;CumulativeCost;\\n\")\n\n\n def printLogs(self, it, inc, incTime, incAcc, incCost, conf, confTime, confAcc, confCost, overhead, CumulativeCost):\n \n strWrite = str(self.seed) + \";\" + str(self.initSamples) + \";\" + str(it) + \";\" + str(inc) + \";\" + str(incTime) + \";\" + str(incAcc) + \";\" + str(incCost) + \";\" + str(conf) + \";\" + str(confTime) + \";\" + str(confAcc) + \";\" + str(confCost) + \";\" + str(overhead) + \";\" + str(CumulativeCost) + \"\\n\"\n self.file_logs.write(strWrite)\n \n\n def close(self):\n self.file_logs.close()\n\n\n\n\n##################################################################################\n# TrimTuner: \n# Efficient Optimization of Machine Learning Jobs in the Cloud via Sub-Sampling\n#\n##################################################################################\n\ndef trimtuner(objective_function, all_configs, constraints, seed, filterHeuristic, model,\n lower, upper, s_min, s_max, n_init=30, num_iterations=100, subsets=[60, 10, 4, 2]):\n\n # internal paramaters\n burnin=100\n chain_length=100\n n_hypers=12\n\n #percentage of unexplored configs to test in the acquisition function\n per = 0.1 \n\n np.random.seed(seed)\n rng = np.random.RandomState(np.random.randint(0, 10000))\n\n #assert n_init * len(\n assert n_init <= num_iterations, \"Number of initial points (n_init) has to be smaller than the number of iterations\" \n assert lower.shape[0] == upper.shape[0], \"Dimension miss match between upper and lower bound\"\n assert model == \"gp\" or model == \"dt\", \"ERROR: wrong model techniques. Chose 'gp' for Gaussian Processes or 'dt' for an ensemble decision tress\"\n assert filterHeuristic == \"cea\" or filterHeuristic == \"random\" or filterHeuristic == \"nofilter\", \"ERROR: wrong filtering heuristic. Chose 'cea', 'random', or 'nofilter'!\"\n\n costCumulative = 0\n\n n_dims = lower.shape[0]\n\n # Bookkeeping logs\n logs = Logs(seed, n_init, model, filterHeuristic)\n\n unexplored_Set = all_configs # list with all possible configurations\n training_Set = [] # traning set\n\n X = []\n y = []\n c = []\n\n if model == \"dt\":\n #ensemble of descision trees\n number_trees = 10\n model_objective = EnsembleDTs(number_trees, seed)\n model_cost = EnsembleDTs(number_trees, seed)\n\n elif model == \"gp\":\n #Gaussian Processes\n\n #kernels functions based on FABOLAS\n\n # Define model for the objective function\n cov_amp = 1 # Covariance amplitude\n kernel = cov_amp\n\n for d in range(n_dims):\n kernel *= george.kernels.Matern52Kernel(np.ones([1])*0.01, ndim=n_dims+1, axes=d)\n\n # Kernel for the environmental variable\n # We use (1-s)**2 as basis function for the Bayesian linear kernel\n env_kernel = george.kernels.BayesianLinearRegressionKernel(log_a=0.1,log_b=0.1,ndim=n_dims + 1,axes=n_dims)\n kernel *= env_kernel\n\n # Take 3 times more samples than we have hyperparameters\n if n_hypers < 2 * len(kernel):\n n_hypers = 3 * len(kernel)\n if n_hypers % 2 == 1:\n n_hypers += 1\n\n\n prior = EnvPrior(len(kernel)+1, n_ls=n_dims, n_lr=2, rng=rng)\n\n quadratic_bf = lambda x: (1 - x) ** 2\n linear_bf = lambda x: x\n\n #model for accuracy\n model_objective = EnsembleGPs(kernel,\n prior=prior,\n burnin_steps=burnin,\n chain_length=chain_length,\n n_hypers=n_hypers,\n normalize_output=False,\n basis_func=quadratic_bf,\n lower=lower,\n upper=upper,\n rng=rng)\n\n # Define model for the cost function\n cost_cov_amp = 1\n cost_kernel = cost_cov_amp\n\n for d in range(n_dims):\n cost_kernel *= george.kernels.Matern52Kernel(np.ones([1])*0.01, ndim=n_dims+1, axes=d)\n\n cost_env_kernel = george.kernels.BayesianLinearRegressionKernel(log_a=0.1,log_b=0.1,ndim=n_dims+1,axes=n_dims)\n cost_kernel *= cost_env_kernel\n\n cost_prior = EnvPrior(len(cost_kernel)+1, n_ls=n_dims, n_lr=2, rng=rng)\n\n #model for cost\n model_cost = EnsembleGPs(cost_kernel,\n prior=cost_prior,\n burnin_steps=burnin,\n chain_length=chain_length,\n n_hypers=n_hypers,\n basis_func=linear_bf,\n normalize_output=False,\n lower=lower,\n upper=upper,\n rng=rng)\n\n\n # Extend input space by task variable\n extend_lower = np.append(lower, 0)\n extend_upper = np.append(upper, 1)\n is_env = np.zeros(extend_lower.shape[0])\n is_env[-1] = 1\n\n\n acq_func = Constrained_EntropySearch(model_objective,\n model_cost,\n constraints,\n extend_lower,\n extend_upper,\n sampling_acquisition=EI,\n is_env_variable=is_env,\n n_representer=50)\n\n #if model == 'gp':\n #gps marginalization\n acquisition_func = MarginalizationGPMCMC(acq_func)\n #else:\n # acquisition_func = MarginalizationDT(acq_func)\n\n\n if filterHeuristic == 'random':\n maximizer = RandomSampling(acquisition_func, extend_lower, extend_upper, seed, per)\n\n if filterHeuristic == 'nofilter':\n maximizer = RandomSampling(acquisition_func, extend_lower, extend_upper, seed, 1)\n \n elif filterHeuristic == 'cea':\n maximizer = CEA(acquisition_func, extend_lower, extend_upper, per, constraints)\n\n # elif filterHeuristic == 'direct':\n # #CMAES\n # maximizer = Direct(acquisition_func, extend_lower, extend_upper, n_func_evals=144, n_iters=300)\n\n # elif filterHeuristic == 'cmaes':\n # #CMAES\n # maximizer = CMAES(acquisition_func, seed, extend_lower, extend_upper, n_func_evals=144) \n \n\n # Initial Design\n print(\"Initial Design\")\n sys.stdout.flush()\n counter_it = 1\n\n real_n_init = int(n_init / len(subsets)) \n x_init = initial_sampling_trimtuner(seed, unexplored_Set, real_n_init, s_max)\n\n for it in range(real_n_init):\n\n for subset in subsets:\n start_time_overhead = time.time()\n s = int(s_max / float(subset)) ##real_size\n\n x = x_init[it]\n print(\"Evaluate %s on subset size %d\" % (x, s))\n sys.stdout.flush()\n\n #time to select a config to test\n overhead_init = time.time() - start_time_overhead\n\n func_val, cost, runTime = objective_function(x, s)\n costCumulative += cost\n\n print(\"Configuration has an accuracy of %f with cost %f and took %f seconds\" % (1-func_val,cost,runTime))\n sys.stdout.flush()\n\n start_time_overhead = time.time()\n\n #add config tested to the training set and remove from the untested configs\n tested_config = np.copy(x)\n tested_config[-1] = s\n training_Set.append(tested_config)\n count = 0\n while count != len(unexplored_Set):\n if np.array_equal(unexplored_Set[count], tested_config):\n unexplored_Set.pop(count)\n break\n count += 1\n\n # Bookkeeping\n config = np.append(x, transform(s, s_min, s_max))\n X.append(config)\n y.append(np.log(func_val)) # Model the target function on a logarithmic scale\n c.append(np.log(cost)) # Model the cost on a logarithmic scale\n\n #time to update the training and the unexplored set\n overhead_updateSet = time.time() - start_time_overhead\n\n overhead_time = overhead_updateSet + overhead_init\n\n #write logs in the files\n logs.printLogs(counter_it, x, runTime, 1-func_val, cost, x, runTime, 1-func_val, cost, overhead_time, costCumulative)\n\n counter_it +=1\n\n #end initial sampling\n\n X = np.array(X)\n y = np.array(y)\n c = np.array(c)\n\n # Train models\n model_objective.train(X, y, do_optimize=True) #model of accuracy\n model_cost.train(X, c, do_optimize=True) #model of cost\n\n\n #start optimization\n for it in range(X.shape[0]+1, num_iterations+1):\n print(\"Start iteration %d ... \" % (it))\n sys.stdout.flush()\n\n start_time = time.time()\n\n acquisition_func.update(model_objective, model_cost, X, y, c)\n new_x = maximizer.maximize(X, y, c, unexplored_Set) #maximize the acquisition function\n\n s = retransform(new_x[-1], s_min, s_max) # Map s from log space to original linear space\n\n #time to compute the acquisition function\n overhead_time_acqFunc = time.time() - start_time \n\n # Evaluate the chosen configuration\n print(\"Evaluate candidate \" + str(new_x[:-1]) + \" on subset size \" + str(int(s)))\n sys.stdout.flush()\n\n new_y, new_c, new_t = objective_function(new_x[:-1], int(s))\n\n costCumulative += new_c \n\n #add config tested to the training set and remove from the untested configs\n tested_config = np.copy(new_x)\n tested_config[-1] = s\n training_Set.append(tested_config)\n count = 0\n while count != len(unexplored_Set):\n if np.array_equal(unexplored_Set[count], tested_config):\n unexplored_Set.pop(count)\n break\n count += 1\n\n print(\"Configuration has an accuracy of %.3f with cost %.3f and took %.3f seconds\" % (1-new_y,new_c,new_t))\n sys.stdout.flush()\n\n start_time = time.time() #overhead\n\n # Add new observation to the data\n X = np.concatenate((X, new_x[None, :]), axis=0)\n y = np.concatenate((y, np.log(np.array([new_y]))), axis=0) # Model the target function on a logarithmic scale\n c = np.concatenate((c, np.log(np.array([new_c]))), axis=0) # Model the cost function on a logarithmic scale\n\n # Train models\n model_objective.train(X, y, do_optimize=True) #model of accuracy\n model_cost.train(X, c, do_optimize=True) #model of cost\n\n # determine the incumbent\n inc, inc_acc, inc_cost = incumbent_estimation_cea(model_objective, model_cost, X[:, :-1], constraints)\n inc[-1] = retransform(inc[-1], s_min, s_max)\n\n print(\"Current incumbent \" + str(inc) + \" with estimated accuracy of \" + str(inc_acc) + \"%\")\n\n #time to train the models\n overhead_time_trainModels = time.time() - start_time\n\n #overhead - training models and compute the acq. func.\n total_overhead = overhead_time_trainModels + overhead_time_acqFunc\n\n print(\"Optimization overhead was %.3f seconds\" % (total_overhead))\n sys.stdout.flush()\n\n #write logs in the files\n logs.printLogs(it, inc, 0, inc_acc, inc_cost, tested_config, new_t, 1-new_y, new_c, total_overhead, costCumulative)\n\n\n logs.close()\n\n results = \"\\n The optimal configuration is \" + str(inc) + \" with estimated accuracy of \" + str(inc_acc) + \"\\0025 and a cost of \" + str(inc_cost) + \"\\n\"\n print(results)\n\n return inc",
"import numpy as np \nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.tree import DecisionTreeRegressor, ExtraTreeRegressor\n\nfrom robo.models.base_model import BaseModel\n\n\n###############################################################\n# Decision tree\n###############################################################\nclass DecisionTreeRegressor_(DecisionTreeRegressor):\n def __init__(self):\n self.original_X = None\n self.models = None\n super(DecisionTreeRegressor_, self).__init__()\n self.bagging = None\n\n def UpdateBagging(self, bagging):\n self.bagging = bagging\n\n\n def train(self, X, y, do_optimize=True):\n #train is done in the ensemble using function fit\n self.original_X = X\n return\n\n\n def predict(self, X_test, full_cov=False, **kwargs):\n #predictiond done by the entire ensemble\n m, v = self.bagging.predict(X_test, full_cov)\n return m, v\n\n\n def predict_mean(self, X_test, **kwargs):\n #prection of each tree\n m = super(DecisionTreeRegressor_, self).predict(X_test)\n return m\n\n\n def get_incumbent(self): \n projection = np.ones([self.original_X.shape[0], 1]) \n X_projected = np.concatenate((self.original_X[:, :-1], projection), axis=1)\n m, _ = self.predict(X_projected)\n\n best = np.argmin(m)\n incumbent = X_projected[best]\n incumbent_value = m[best]\n\n return incumbent, incumbent_value\n\n\n def get_noise(self):\n return 1e-3\n\n def nll(self, theta):\n return 0 \n \n def grad_nll(self, theta):\n return 0\n\n def optimize(self):\n return 0\n \n def sample_functions(self, X_test, n_funcs=1):\n return 0 \n\n def predict_variance(self, x1, X2):\n x_ = np.concatenate((x1, X2))\n _, var = self.bagging.predict(x_, full_cov=True)\n\n var = var[-1, :-1, np.newaxis]\n return var\n\n\n\n###############################################################\n# Extra Decision tree\n###############################################################\nclass ExtraTreeRegressor_(ExtraTreeRegressor):\n def __init__(self):\n self.original_X = None\n self.models = None\n super(ExtraTreeRegressor_, self).__init__()\n self.bagging = None\n\n def UpdateBagging(self, bagging):\n self.bagging = bagging\n\n\n def predict_variance(self, x1, X2):\n x_ = np.concatenate((x1, X2))\n _, var = self.bagging.predict(x_, full_cov=True)\n\n var = var[-1, :-1, np.newaxis]\n return var\n\n\n def train(self, X, y, do_optimize=True):\n self.original_X = X\n return\n\n\n def predict(self, X_test, full_cov=False, **kwargs):\n m, v = self.bagging.predict(X_test, full_cov)\n return m, v\n\n\n def predict_mean(self, X_test, **kwargs):\n m = super(ExtraTreeRegressor_, self).predict(X_test)\n return m\n\n\n def get_incumbent(self): \n projection = np.ones([self.original_X.shape[0], 1]) * 1\n\n X_projected = np.concatenate((self.original_X[:, :-1], projection), axis=1)\n m, _ = self.predict(X_projected)\n\n best = np.argmin(m)\n incumbent = X_projected[best]\n incumbent_value = m[best]\n\n return incumbent, incumbent_value\n\n\n def get_noise(self):\n return 1e-3\n\n def nll(self, theta):\n return 0 \n \n def grad_nll(self, theta):\n return 0\n\n def optimize(self):\n return 0\n\n def sample_functions(self, X_test, n_funcs=1):\n return 0\n\n\n\n###############################################################\n# Bagging ensemble of decision trees\n###############################################################\n\nclass BaggingRegressor_(BaggingRegressor):\n def __init__(self, base_estimator, n_estimators, random_state):\n super(BaggingRegressor_,self).__init__(base_estimator=base_estimator, \n n_estimators=n_estimators, \n random_state=random_state)\n self.models = None \n self.original_X = None\n self.n_estimators = n_estimators\n\n\n def train(self, X, y, do_optimize=True):\n self.original_X = X\n self.models = self.fit(X, y)\n \n for tree in self.estimators_:\n tree.train(X,y) \n return self.models\n \n\n def predict(self, X_test, full_cov=False, **kwargs):\n \n mu = np.zeros([self.n_estimators, X_test.shape[0]])\n counter = 0\n \n # predicted mean value of each tree\n for tree in self.models.estimators_:\n mu[counter,:] = tree.predict_mean(X_test)\n counter +=1\n\n #mean and standar deviation in the ensemble\n m = np.mean(mu, axis=0)\n v_ = np.std(mu, axis=0)\n \n for i in range(len(v_)):\n if not np.isfinite(v_[i]) or v_[i] < 0:\n v_[i] = 1e-1\n\n if full_cov:\n v = np.identity(v_.shape[0]) * v_\n else:\n v = v_\n\n return m, v \n \n \n def get_incumbent(self): \n projection = np.ones([self.original_X.shape[0], 1]) * 1\n\n X_projected = np.concatenate((self.original_X[:, :-1], projection), axis=1)\n m, _ = self.predict(X_projected)\n\n best = np.argmin(m)\n incumbent = X_projected[best]\n incumbent_value = m[best]\n\n return incumbent, incumbent_value\n \n \n\n###############################################################\n# Bagging ensemble of decision trees\n###############################################################\n\nclass EnsembleDTs(BaseModel):\n\n def __init__(self, number_trees, seed):\n self.no_ensenble = number_trees\n self.X = None\n self.y = None\n self.seed = seed\n self.is_trained = False\n \n # select the tree -> trimtuner uses extra trees\n #self.tree = DecisionTreeRegressor_()\n self.tree = ExtraTreeRegressor_()\n\n self.forest = BaggingRegressor_(base_estimator=self.tree, n_estimators=number_trees, random_state=self.seed)\n\n a = np.zeros(1)\n self.models = self.forest.train(a.reshape(-1,1),a)\n \n \n def train(self, X, y, **kwargs):\n self.models = self.forest.train(X, y)\n\n for tree in self.forest.estimators_:\n tree.UpdateBagging(self.forest) \n\n self.X = X\n self.y = y\n self.is_trained = True\n\n\n def predict(self, X_test, **kwargs):\n m, v = self.forest.predict(X_test) \n return m, v\n\n\n def get_incumbent(self):\n inc, inc_value = self.forest.get_incumbent() \n return inc, inc_value"
] | [
[
"numpy.log",
"numpy.log2",
"numpy.random.seed",
"numpy.array_equal",
"numpy.ones",
"numpy.concatenate",
"numpy.append",
"numpy.copy",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.isfinite",
"numpy.ones",
"numpy.concatenate",
"numpy.std",
"numpy.argmin",
"numpy.mean",
"numpy.identity",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kurbansitterley/WaterTAP3 | [
"8f4493182a39e3ba180019aba02249916dbae500"
] | [
"watertap3/watertap3/utils/financials.py"
] | [
"##############################################################################\n# Institute for the Design of Advanced Energy Systems Process Systems\n# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2020, by the\n# software owners: The Regents of the University of California, through\n# Lawrence Berkeley National Laboratory, National Technology & Engineering\n# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia\n# University Research Corporation, et al. All rights reserved.\n##############################################################################\n\nimport pandas as pd\nfrom pyomo.environ import (Block, Expression, Param, Var, NonNegativeReals, units as pyunits)\n\nfrom .ml_regression import get_linear_regression\n\n__all__ = ['SystemSpecs', 'get_complete_costing', 'get_ind_table', 'get_system_specs',\n 'get_system_costing', 'global_costing_parameters']\n\nlast_year_for_cost_indicies = 2050\n\n\nclass SystemSpecs():\n\n def __init__(self, train=None):\n basis_data = pd.read_csv('data/case_study_TEA_basis.csv', index_col='case_study')\n elec_cost = pd.read_csv('data/industrial_electricity_costs_2020.csv', index_col='location')\n elec_cost.index = elec_cost.index.str.lower()\n case_study = train['case_study']\n scenario = train['scenario']\n # print(str(case_study).replace('_', ' ').swapcase() + ':', str(scenario).replace('_', ' ').swapcase())\n self.location = basis_data[basis_data['variable'] == 'location_basis'].loc[case_study].value\n self.elec_price = float(elec_cost.loc[self.location])\n self.land_cost_percent_FCI = float(basis_data[basis_data['variable'] == 'land_cost_percent'].loc[case_study].value)\n self.working_cap_percent_FCI = float(basis_data[basis_data['variable'] == 'working_capital_percent'].loc[case_study].value)\n self.salaries_percent_FCI = float(basis_data[basis_data['variable'] == 'base_salary_per_fci'].loc[case_study].value)\n self.maintenance_costs_percent_FCI = float(basis_data[basis_data['variable'] == 'maintenance_cost_percent'].loc[case_study].value)\n self.lab_fees_percent_FCI = float(basis_data[basis_data['variable'] == 'laboratory_fees_percent'].loc[case_study].value)\n self.insurance_taxes_percent_FCI = float(basis_data[basis_data['variable'] == 'insurance_and_taxes_percent'].loc[case_study].value)\n self.benefit_percent_of_salary = float(basis_data[basis_data['variable'] == 'employee_benefits_percent'].loc[case_study].value)\n self.plant_lifetime_yrs = int(basis_data[basis_data['variable'] == 'plant_life_yrs'].loc[case_study].value)\n self.analysis_yr_cost_indices = int(basis_data[basis_data['variable'] == 'analysis_year'].loc[case_study].value)\n self.debt_interest_rate = float(basis_data[basis_data['variable'] == 'debt_interest_rate'].loc[case_study].value)\n self.plant_cap_utilization = float(basis_data[basis_data['variable'] == 'plant_cap_utilization'].loc[case_study].value)\n\n\ndef create_costing_block(unit, basis_year, tpec_or_tic):\n '''\n Function to create costing block and establish basis year and TPEC/TIC factor for each\n WaterTAP3 unit.\n\n :param unit: WaterTAP3 unit\n :type unit: str\n :param basis_year: Basis year for adjusting cost calculations\n :type basis_year: str\n :param tpec_or_tic: either 'TPEC' or 'TIC'; determines which factor to use for FCI adjustment\n (if necessary)\n :type tpec_or_tic: str\n :return:\n '''\n unit.costing = costing = Block()\n costing.basis_year = basis_year\n sys_cost_params = unit.parent_block().costing_param\n if tpec_or_tic == 'TPEC':\n costing.tpec_tic = unit.tpec_tic = sys_cost_params.tpec\n else:\n costing.tpec_tic = unit.tpec_tic = sys_cost_params.tic\n\n\ndef get_complete_costing(costing):\n '''\n Function to build costing block for each WaterTAP3 unit.\n\n :param costing: Costing block object from WaterTAP3 unit model.\n :type costing: object\n :return:\n '''\n unit = costing.parent_block()\n time = unit.flowsheet().config.time\n t = time.first()\n flow_in_m3yr = pyunits.convert(costing.parent_block().flow_vol_in[t], to_units=pyunits.m ** 3 / pyunits.year)\n\n costing.tci_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for TCI')\n\n costing.tci_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for TCI')\n\n costing.fci_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for FCI')\n\n costing.fci_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for FCI')\n\n costing.fixed_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Fixed O&M')\n\n costing.fixed_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Fixed O&M')\n\n costing.annual_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Annual O&M')\n\n costing.annual_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Annual O&M')\n\n costing.total_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Total O&M')\n\n costing.total_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Total O&M')\n\n costing.catchem_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Catalysts/Chemicals')\n\n costing.catchem_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Catalysts/Chemicals')\n\n costing.elect_intens_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Electricity Intensity')\n\n costing.elect_intens_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Electricity Intensity')\n\n costing.elect_cost_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Electricity Intensity')\n\n costing.elect_cost_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Electricity Intensity')\n\n costing.other_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Other capital')\n\n costing.other_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Other capital')\n\n costing.tci_reduction.fix(0)\n costing.tci_uncertainty.fix(1)\n\n costing.fci_reduction.fix(0)\n costing.fci_uncertainty.fix(1)\n\n costing.fixed_op_reduction.fix(0)\n costing.fixed_op_uncertainty.fix(1)\n\n costing.annual_op_reduction.fix(0)\n costing.annual_op_uncertainty.fix(1)\n\n costing.total_op_reduction.fix(0)\n costing.total_op_uncertainty.fix(1)\n\n costing.catchem_reduction.fix(0)\n costing.catchem_uncertainty.fix(1)\n\n costing.elect_intens_reduction.fix(0)\n costing.elect_intens_uncertainty.fix(1)\n\n costing.elect_cost_reduction.fix(0)\n costing.elect_cost_uncertainty.fix(1)\n\n costing.other_reduction.fix(0)\n costing.other_uncertainty.fix(1)\n\n basis_year = costing.basis_year\n sys_specs = unit.parent_block().costing_param\n\n chem_dict = unit.chem_dict\n electricity = unit.electricity\n\n\n ## COSTING INDICES\n df = get_ind_table(sys_specs.analysis_yr_cost_indices)\n costing.cap_replacement_parts = df.loc[basis_year].Capital_Factor\n costing.catalysts_chemicals = df.loc[basis_year].CatChem_Factor\n costing.labor_and_other_fixed = df.loc[basis_year].Labor_Factor\n costing.consumer_price_index = df.loc[basis_year].CPI_Factor\n\n costing.fixed_cap_inv = ((costing.fixed_cap_inv_unadjusted * costing.cap_replacement_parts) * (1 - costing.fci_reduction[t])) * costing.fci_uncertainty[t]\n if unit.parent_block().train['case_study'] == 'cherokee' and unit.unit_name == 'evaporation_pond':\n costing.land_cost = costing.fixed_cap_inv * 0\n else:\n costing.land_cost = costing.fixed_cap_inv * sys_specs.land_cost_percent_FCI\n costing.working_cap = costing.fixed_cap_inv * sys_specs.working_cap_percent_FCI\n costing.contingency = costing.fixed_cap_inv * sys_specs.contingency_cost_percent_FCI\n costing.component_replacement = costing.fixed_cap_inv * sys_specs.component_replace_percent_FCI\n costing.base_employee_salary_cost = costing.fixed_cap_inv_unadjusted * sys_specs.salaries_percent_FCI\n costing.salaries = costing.labor_and_other_fixed * costing.base_employee_salary_cost\n costing.benefits = costing.salaries * sys_specs.benefit_percent_of_salary\n costing.maintenance = costing.fixed_cap_inv * sys_specs.maintenance_costs_percent_FCI\n costing.lab = costing.fixed_cap_inv * sys_specs.lab_fees_percent_FCI\n costing.insurance_taxes = costing.fixed_cap_inv * sys_specs.insurance_taxes_percent_FCI\n\n cat_chem_df = pd.read_csv('data/chemical_costs.csv', index_col='Material')\n chem_cost_sum = 0\n for key in chem_dict.keys():\n if key == 'unit_cost':\n chem_cost_sum = chem_dict[key] * costing.fixed_cap_inv * 1E6\n else:\n chem_cost = cat_chem_df.loc[key].Price\n chem_cost_sum += costing.catalysts_chemicals * flow_in_m3yr * chem_cost * chem_dict[key] * sys_specs.plant_cap_utilization\n\n costing.cat_and_chem_cost = ((chem_cost_sum * 1E-6) * (1 - costing.catchem_reduction[t])) * costing.catchem_uncertainty[t]\n\n # if not hasattr(costing, 'electricity_cost'):\n costing.electricity_intensity = (unit.electricity * (1 - costing.elect_intens_reduction[t])) * costing.elect_intens_uncertainty[t]\n costing.electricity_cost = ((costing.electricity_intensity * flow_in_m3yr * sys_specs.electricity_price * 1E-6) * sys_specs.plant_cap_utilization) * (1 - costing.elect_cost_reduction[t]) * costing.elect_cost_uncertainty[t]\n\n if not hasattr(costing, 'other_var_cost'):\n costing.other_var_cost = 0\n\n else:\n costing.other_var_cost = (costing.other_var_cost * (1 - costing.other_reduction[t])) * costing.other_uncertainty[t]\n\n costing.total_cap_investment = (costing.fixed_cap_inv + costing.land_cost + costing.working_cap) * (1 - costing.tci_reduction[t]) * costing.tci_uncertainty[t]\n # costing.salaries = Expression(expr=costing.labor_and_other_fixed * costing.base_employee_salary_cost, doc='Salaries')\n costing.total_fixed_op_cost = ((costing.salaries + costing.benefits + costing.maintenance + costing.lab + costing.insurance_taxes) * (1 - costing.fixed_op_reduction[t])) * costing.fixed_op_uncertainty[t]\n costing.annual_op_main_cost = ((costing.cat_and_chem_cost + costing.electricity_cost + costing.other_var_cost + costing.total_fixed_op_cost) * (1 - costing.annual_op_reduction[t])) * costing.annual_op_uncertainty[t]\n costing.total_operating_cost = ((costing.total_fixed_op_cost + costing.cat_and_chem_cost + costing.electricity_cost + costing.other_var_cost) * (1 - costing.total_op_reduction[t])) * costing.total_op_uncertainty[t]\n\n\ndef get_ind_table(analysis_yr_cost_indices):\n '''\n Function to get costing indicies for WaterTAP3 model.\n\n :param analysis_yr_cost_indices: Year to get costing indices for.\n :type analysis_yr_cost_indices: int\n :return: Indicies DataFrame\n '''\n df = pd.read_csv('data/plant_cost_indices.csv')\n\n df1 = pd.DataFrame()\n for name in df.columns[1:]:\n a, b = get_linear_regression(list(df.Year), list(df[('%s' % name)]), name)\n new_list = []\n yr_list = []\n for yr in range(df.Year.max() + 1, last_year_for_cost_indicies + 1):\n new_list.append(a * yr + b)\n yr_list.append(yr)\n df1[name] = new_list\n df1['Year'] = yr_list\n df = pd.concat([df, df1], axis=0)\n\n new_cost_variables = ['Capital', 'CatChem', 'Labor', 'CPI']\n for variable in new_cost_variables:\n ind_name = '%s_Index' % variable\n fac_name = '%s_Factor' % variable\n df[fac_name] = (df[df.Year == analysis_yr_cost_indices][ind_name].max() / df[ind_name])\n df = df.set_index(df.Year)\n df = df.replace(1.0, 1.00000000001)\n\n return df\n\n\ndef get_system_specs(m_fs):\n '''\n Function to set costing parameters for WaterTAP3 model.\n\n\n '''\n m_fs.costing_param = Block()\n b = m_fs.costing_param\n\n b.electricity_price = Var(initialize=0.07,\n doc='Electricity cost [$/kWh]')\n b.maintenance_costs_percent_FCI = Var(initialize=0.07,\n doc='Maintenance/contingency cost as % FCI')\n b.salaries_percent_FCI = Var(initialize=0.07,\n doc='Salaries cost as % FCI')\n b.benefit_percent_of_salary = Var(initialize=0.07,\n doc='Benefits cost as % FCI')\n b.insurance_taxes_percent_FCI = Var(initialize=0.07,\n doc='Insurance/taxes cost as % FCI')\n b.lab_fees_percent_FCI = Var(initialize=0.07,\n doc='Lab cost as % FCI')\n b.land_cost_percent_FCI = Var(initialize=0.07,\n doc='Land cost as % FCI')\n b.plant_lifetime_yrs = Var(initialize=30,\n doc='Plant lifetime [years')\n b.plant_cap_utilization = Var(initialize=1,\n doc='Plant capacity utilization [%]')\n b.working_cap_percent_FCI = Var(initialize=0.008,\n doc='Working capital as % FCI')\n b.wacc = Var(initialize=0.05,\n doc='Weighted Average Cost of Capital (WACC)')\n b.contingency_cost_percent_FCI = Var(initialize=0,\n doc='Contingency costs as % FCI')\n b.component_replace_percent_FCI = Var(initialize=0,\n doc='Component replacement costs as % FCI')\n\n system_specs = SystemSpecs(m_fs.train)\n\n b.electricity_price.fix(system_specs.elec_price)\n b.salaries_percent_FCI.fix(system_specs.salaries_percent_FCI)\n b.land_cost_percent_FCI.fix(system_specs.land_cost_percent_FCI)\n b.maintenance_costs_percent_FCI.fix(system_specs.maintenance_costs_percent_FCI)\n b.lab_fees_percent_FCI.fix(system_specs.lab_fees_percent_FCI)\n b.insurance_taxes_percent_FCI.fix(system_specs.insurance_taxes_percent_FCI)\n b.plant_lifetime_yrs.fix(system_specs.plant_lifetime_yrs)\n\n b.benefit_percent_of_salary.fix(system_specs.benefit_percent_of_salary)\n b.working_cap_percent_FCI.fix(system_specs.working_cap_percent_FCI)\n b.plant_cap_utilization.fix(system_specs.plant_cap_utilization) # 1.0\n b.wacc.fix(system_specs.debt_interest_rate)\n b.contingency_cost_percent_FCI.fix(0)\n b.component_replace_percent_FCI.fix(0)\n\n b.analysis_yr_cost_indices = system_specs.analysis_yr_cost_indices\n b.location = system_specs.location\n\n b.tpec = 3.4\n b.tic = 1.65\n\n\ndef get_system_costing(m_fs):\n '''\n Function to aggregate unit model results for calculation of system costing for WaterTAP3 model.\n\n '''\n if not hasattr(m_fs, 'costing'):\n m_fs.costing = Block()\n b = m_fs.costing\n time = m_fs.config.time\n t = time.first()\n sys_specs = m_fs.costing_param\n\n total_capital_investment_var_lst = []\n cat_and_chem_cost_lst = []\n electricity_cost_lst = []\n other_var_cost_lst = []\n total_fixed_op_cost_lst = []\n electricity_intensity_lst = []\n\n wacc = sys_specs.wacc\n\n # b.wacc = Var(initialize=sys_specs.wacc,\n # doc='Weighted average cost of capital (WACC)')\n #\n # b.wacc.fix(sys_specs.wacc)\n\n b.capital_recovery_factor = (wacc * (1 + wacc) ** sys_specs.plant_lifetime_yrs) / (\n ((1 + wacc) ** sys_specs.plant_lifetime_yrs) - 1)\n\n for b_unit in m_fs.component_objects(Block, descend_into=True):\n if hasattr(b_unit, 'costing'):\n total_capital_investment_var_lst.append(b_unit.costing.total_cap_investment)\n cat_and_chem_cost_lst.append(b_unit.costing.cat_and_chem_cost)\n electricity_cost_lst.append(b_unit.costing.electricity_cost)\n other_var_cost_lst.append(b_unit.costing.other_var_cost)\n total_fixed_op_cost_lst.append(b_unit.costing.total_fixed_op_cost)\n\n b.sys_tci_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System TCI reduction factor')\n\n b.sys_catchem_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System catalyst/chemical cost reduction factor')\n\n b.sys_elect_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System electricity cost reduction factor')\n\n b.sys_other_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System other cost reduction factor')\n\n b.sys_fixed_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System fixed O&M reduction factor')\n\n b.sys_total_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System total O&M reduction factor')\n\n b.sys_tci_reduction.fix(0)\n b.sys_catchem_reduction.fix(0)\n b.sys_elect_reduction.fix(0)\n b.sys_other_reduction.fix(0)\n b.sys_fixed_op_reduction.fix(0)\n b.sys_total_op_reduction.fix(0)\n\n b.sys_tci_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System TCI uncertainty factor')\n\n b.sys_catchem_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System catalyst/chemical cost uncertainty factor')\n\n b.sys_elect_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System electricity cost uncertainty factor')\n\n b.sys_other_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System other cost uncertainty factor')\n\n b.sys_fixed_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System fixed O&M uncertainty factor')\n\n b.sys_total_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System total O&M uncertainty factor')\n\n b.sys_tci_uncertainty.fix(1)\n b.sys_catchem_uncertainty.fix(1)\n b.sys_elect_uncertainty.fix(1)\n b.sys_other_uncertainty.fix(1)\n b.sys_fixed_op_uncertainty.fix(1)\n b.sys_total_op_uncertainty.fix(1)\n\n b.cat_and_chem_cost_annual = Expression(expr=(sum(cat_and_chem_cost_lst) * (1 - b.sys_catchem_reduction[t])) * b.sys_catchem_uncertainty[t])\n b.electricity_cost_annual = Expression(expr=(sum(electricity_cost_lst) * (1 - b.sys_elect_reduction[t])) * b.sys_elect_uncertainty[t])\n b.other_var_cost_annual = Expression(expr=(sum(other_var_cost_lst) * (1 - b.sys_other_reduction[t])) * b.sys_other_uncertainty[t])\n b.fixed_op_cost_annual = Expression(expr=(sum(total_fixed_op_cost_lst) * (1 - b.sys_fixed_op_reduction[t])) * b.sys_fixed_op_uncertainty[t])\n b.operating_cost_annual = Expression(expr=(b.fixed_op_cost_annual + b.cat_and_chem_cost_annual + b.electricity_cost_annual + b.other_var_cost_annual))\n #\n b.capital_investment_total = Expression(expr=(sum(total_capital_investment_var_lst) * (1 - b.sys_tci_reduction[t])) * b.sys_tci_uncertainty[t])\n b.cat_and_chem_cost_total = Expression(expr=b.cat_and_chem_cost_annual * m_fs.costing_param.plant_lifetime_yrs)\n b.electricity_cost_total = Expression(expr=b.electricity_cost_annual * m_fs.costing_param.plant_lifetime_yrs)\n b.other_var_cost_total = Expression(expr=b.other_var_cost_annual * m_fs.costing_param.plant_lifetime_yrs)\n b.fixed_op_cost_total = Expression(expr=b.fixed_op_cost_annual * m_fs.costing_param.plant_lifetime_yrs)\n b.operating_cost_total = Expression(expr=((b.fixed_op_cost_total + b.cat_and_chem_cost_total + b.electricity_cost_total + b.other_var_cost_total) * (1 - b.sys_total_op_reduction[t])) * b.sys_total_op_uncertainty[t])\n\n\n\n\n recovered_water_flow = 0\n wastewater_list = []\n\n time = m_fs.config.time.first()\n\n for b_unit in m_fs.component_objects(Block, descend_into=False):\n if hasattr(b_unit, 'outlet'):\n if len(getattr(b_unit, 'outlet').arcs()) == 0:\n if hasattr(b_unit.parent_block(), 'pfd_dict'):\n if b_unit.parent_block().pfd_dict[str(b_unit)[3:]]['Type'] == 'use':\n recovered_water_flow = recovered_water_flow + b_unit.flow_vol_out[time]\n else:\n if 'reverse_osmosis' in str(b_unit):\n recovered_water_flow = recovered_water_flow + b_unit.flow_vol_out[time]\n if 'cooling_tower' in str(b_unit):\n recovered_water_flow = recovered_water_flow + b_unit.flow_vol_out[time]\n\n b.treated_water = recovered_water_flow\n\n b.sum_of_inflow = sum_of_inflow = 0\n for key in b.parent_block().flow_in_dict.keys():\n sum_of_inflow += getattr(m_fs, key).flow_vol_in[time]\n\n b.system_recovery = b.treated_water / sum_of_inflow\n\n # LCOW for each unit\n for b_unit in m_fs.component_objects(Block, descend_into=True):\n if hasattr(b_unit, 'costing'):\n setattr(b_unit, 'LCOW', Expression(\n expr=1E6 * (b_unit.costing.total_cap_investment * b.capital_recovery_factor + b_unit.costing.annual_op_main_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_TCI', Expression(\n expr=1E6 * (b_unit.costing.total_cap_investment * b.capital_recovery_factor) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit TCI Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_elec', Expression(\n expr=1E6 * (b_unit.costing.electricity_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Electricity Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_fixed_op', Expression(\n expr=1E6 * (b_unit.costing.total_fixed_op_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Fixed Operating Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_chem', Expression(\n expr=1E6 * (b_unit.costing.cat_and_chem_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Chemical Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_other', Expression(\n expr=1E6 * (b_unit.costing.other_var_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Other O&M Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_total_op', Expression(\n expr=1E6 * (b_unit.costing.total_operating_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Total Operating Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'elec_int_treated', Expression(\n expr=(b_unit.costing.electricity_cost * 1E6 / sys_specs.electricity_price) /\n (b.treated_water * 3600 * 24 * 365),\n doc='Unit Electricity Intensity [kWh/m3]'))\n\n # LCOW by cost category\n b.LCOW_TCI = Expression(expr=1E6 * (b.capital_investment_total * b.capital_recovery_factor) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_elec = Expression(expr=1E6 * (b.electricity_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_fixed_op = Expression(expr=1E6 * (b.fixed_op_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_chem = Expression(expr=1E6 * (b.cat_and_chem_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_other_onm = Expression(expr=1E6 * (b.other_var_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_total_op = Expression(expr=1E6 * (b.operating_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n ## GET TOTAL ELECTRICITY CONSUMPTION IN kwh/m3 of treated water\n b.electricity_intensity = Expression(\n expr=(b.electricity_cost_annual * 1E6 / sys_specs.electricity_price) /\n (b.treated_water * 3600 * 24 * 365),\n doc='Electricity Intensity [kWh/m3]')\n\n b.LCOW = Expression(\n expr=1E6 * (b.capital_investment_total * b.capital_recovery_factor + b.operating_cost_annual) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Levelized Cost of Water [$/m3]')\n\n b.LCOW_inflow = Expression(\n expr=1E6 * (b.capital_investment_total * b.capital_recovery_factor + b.operating_cost_annual) /\n (sum_of_inflow * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Levelized Cost of Water by influent flow [$/m3]')\n\n b.elec_frac_LCOW = Expression(\n expr=((1E6 * (b.electricity_cost_annual) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))) / b.LCOW,\n doc='Electricity cost as fraction of LCOW')\n\n\ndef global_costing_parameters(self, year=None):\n if year is None:\n year = '2018'\n ce_index_dic = {\n '2019': 680,\n '2018': 671.1,\n '2017': 567.5,\n '2016': 541.7,\n '2015': 556.8,\n '2014': 576.1,\n '2013': 567.3,\n '2012': 584.6,\n '2011': 585.7,\n '2010': 550.8\n }\n\n self.CE_index = Param(mutable=True, initialize=ce_index_dic[year],\n doc='Chemical Engineering Plant Cost Index $ year')"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
vgaurav3011/100-Days-of-ML | [
"ec302b03fd492c459cff2592b3a4f5e38f9c9d72"
] | [
"Day 47/classifiers/neural_net.py"
] | [
"from __future__ import print_function\n\nfrom builtins import range\nfrom builtins import object\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom past.builtins import xrange\n\nclass TwoLayerNet(object):\n \"\"\"\n A two-layer fully-connected neural network. The net has an input dimension of\n N, a hidden layer dimension of H, and performs classification over C classes.\n We train the network with a softmax loss function and L2 regularization on the\n weight matrices. The network uses a ReLU nonlinearity after the first fully\n connected layer.\n\n In other words, the network has the following architecture:\n\n input - fully connected layer - ReLU - fully connected layer - softmax\n\n The outputs of the second fully-connected layer are the scores for each class.\n \"\"\"\n\n def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n \"\"\"\n Initialize the model. Weights are initialized to small random values and\n biases are initialized to zero. Weights and biases are stored in the\n variable self.params, which is a dictionary with the following keys:\n\n W1: First layer weights; has shape (D, H)\n b1: First layer biases; has shape (H,)\n W2: Second layer weights; has shape (H, C)\n b2: Second layer biases; has shape (C,)\n\n Inputs:\n - input_size: The dimension D of the input data.\n - hidden_size: The number of neurons H in the hidden layer.\n - output_size: The number of classes C.\n \"\"\"\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)\n\n def loss(self, X, y=None, reg=0.0):\n \"\"\"\n Compute the loss and gradients for a two layer fully connected neural\n network.\n\n Inputs:\n - X: Input data of shape (N, D). Each X[i] is a training sample.\n - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is\n an integer in the range 0 <= y[i] < C. This parameter is optional; if it\n is not passed then we only return scores, and if it is passed then we\n instead return the loss and gradients.\n - reg: Regularization strength.\n\n Returns:\n If y is None, return a matrix scores of shape (N, C) where scores[i, c] is\n the score for class c on input X[i].\n\n If y is not None, instead return a tuple of:\n - loss: Loss (data loss and regularization loss) for this batch of training\n samples.\n - grads: Dictionary mapping parameter names to gradients of those parameters\n with respect to the loss function; has the same keys as self.params.\n \"\"\"\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n fully_connected1 = X.dot(W1) + b1\n X2 = np.maximum(0, fully_connected1)\n scores = X2.dot(W2) + b2\n pass\n \n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. #\n #############################################################################\n scores -= np.max(scores, axis=1, keepdims=True)\n scores_exp = np.exp(scores)\n softmax_classify = scores_exp / np.sum(scores_exp, axis=1, keepdims=True)\n loss = np.sum(-np.log(softmax_classify[np.arange(N), y]))\n loss /= N\n loss += reg * (np.sum(W2 * W2) + np.sum( W1 * W1 ))\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n softmax_classify[np.arange(N), y] -= 1\n softmax_classify /= N\n \n dW = X2.T.dot(softmax_classify)\n db2 = softmax_classify.sum(axis=0)\n # W1 gradient\n dW1 = softmax_matrix.dot(W2.T) # [NxC] * [CxH] = [NxH]\n dfc1 = dW1 * (fc1>0) # [NxH] . [NxH] = [NxH]\n dW1 = X.T.dot(dfc1) # [DxN] * [NxH] = [DxH]\n\n # b1 gradient\n db1 = dfc1.sum(axis=0)\n\n # regularization gradient\n dW1 += reg * 2 * W1\n dW2 += reg * 2 * W2\n\n grads = {'W1':dW1, 'b1':db1, 'W2':dW2, 'b2':db2}\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, grads\n\n def train(self, X, y, X_val, y_val,\n learning_rate=1e-3, learning_rate_decay=0.95,\n reg=5e-6, num_iters=100,\n batch_size=200, verbose=False):\n \"\"\"\n Train this neural network using stochastic gradient descent.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving training data.\n - y: A numpy array f shape (N,) giving training labels; y[i] = c means that\n X[i] has label c, where 0 <= c < C.\n - X_val: A numpy array of shape (N_val, D) giving validation data.\n - y_val: A numpy array of shape (N_val,) giving validation labels.\n - learning_rate: Scalar giving learning rate for optimization.\n - learning_rate_decay: Scalar giving factor used to decay the learning rate\n after each epoch.\n - reg: Scalar giving regularization strength.\n - num_iters: Number of steps to take when optimizing.\n - batch_size: Number of training examples to use per step.\n - verbose: boolean; if true print progress during optimization.\n \"\"\"\n num_train = X.shape[0]\n iterations_per_epoch = max(num_train / batch_size, 1)\n\n # Use SGD to optimize the parameters in self.model\n loss_history = []\n train_acc_history = []\n val_acc_history = []\n\n for it in range(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: Create a random minibatch of training data and labels, storing #\n # them in X_batch and y_batch respectively. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n batch_indices = np.random.choice(num_train, batch_size)\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Compute loss and gradients using the current minibatch\n loss, grads = self.loss(X_batch, y=y_batch, reg=reg)\n loss_history.append(loss)\n\n #########################################################################\n # TODO: Use the gradients in the grads dictionary to update the #\n # parameters of the network (stored in the dictionary self.params) #\n # using stochastic gradient descent. You'll need to use the gradients #\n # stored in the grads dictionary defined above. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n for key in self.params:\n self.params[key] -= learning_rate * grads[key]\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n # Every epoch, check train and val accuracy and decay learning rate.\n if it % iterations_per_epoch == 0:\n # Check accuracy\n train_acc = (self.predict(X_batch) == y_batch).mean()\n val_acc = (self.predict(X_val) == y_val).mean()\n train_acc_history.append(train_acc)\n val_acc_history.append(val_acc)\n\n # Decay learning rate\n learning_rate *= learning_rate_decay\n\n return {\n 'loss_history': loss_history,\n 'train_acc_history': train_acc_history,\n 'val_acc_history': val_acc_history,\n }\n\n def predict(self, X):\n \"\"\"\n Use the trained weights of this two-layer network to predict labels for\n data points. For each data point we predict scores for each of the C\n classes, and assign each data point to the class with the highest score.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving N D-dimensional data points to\n classify.\n\n Returns:\n - y_pred: A numpy array of shape (N,) giving predicted labels for each of\n the elements of X. For all i, y_pred[i] = c means that X[i] is predicted\n to have class c, where 0 <= c < C.\n \"\"\"\n y_pred = None\n\n ###########################################################################\n # TODO: Implement this function; it should be VERY simple! #\n ###########################################################################\n y_pred = np.argmax( self.loss(X), axis=1)\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return y_pred\n"
] | [
[
"numpy.maximum",
"numpy.random.choice",
"numpy.arange",
"numpy.max",
"numpy.random.randn",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jsa4000/OpenGL-Python | [
"62055ba0c16f54507b7ba709d6691b2e9c7bc152"
] | [
"scripts/tests/test_opengl.py"
] | [
"import os\nimport math\nimport ctypes\nimport numpy as np\nimport pandas as pd\nimport OpenGL.GL as GL\nimport OpenGL.GL.shaders\nimport pygame\nfrom PIL import Image\nfrom pyrr import Quaternion, matrix44, Matrix44, Vector3\n\n\n# https://github.com/adamlwgriffiths/Pyrr/tree/master/pyrr\n# https://www.opengl.org/discussion_boards/showthread.php/199031-How-to-sort-draw-and-shader-calls-for-multiple-models\n# https://www.khronos.org/opengl/wiki/Vertex_Specification_Best_Practices\n\n# Great useful resource to learn OpenGL and all the concepts needed for understanding\n# ligths, materials, shaders, transformations, etc..\n# URL: https://learnopengl.com/, https://open.gl/drawing\n# https://codereview.stackexchange.com/questions/92769/managing-shaders-in-opengl-a-shader-class\n# https://www.packtpub.com/books/content/tips-and-tricks-getting-started-opengl-and-glsl-40\n# http://dominium.maksw.com/articles/physically-based-rendering-pbr/pbr-part-one/ \n# http://www.opengl-tutorial.org/beginners-tutorials/tutorial-8-basic-shading/\n# https://www.tomdalling.com/blog/modern-opengl/08-even-more-lighting-directional-lights-spotlights-multiple-lights/\n\n\"\"\"\n\nhttps://gamedev.stackexchange.com/questions/92832/in-opengl-whats-quicker-lots-of-smaller-vaos-or-one-large-one-updated-each-fr\nhttps://www.opengl.org/discussion_boards/showthread.php/197893-View-and-Perspective-matrices\nhttps://www.gamedev.net/topic/609159-would-like-help-with-glulookat-and-python-code/\n\n\nhttp://pyopengl.sourceforge.net/documentation/manual-3.0/gluLookAt.html\nhttp://stackoverflow.com/questions/3380100/how-do-i-use-glulookat-properly\nhttp://stackoverflow.com/questions/15006905/how-do-i-modify-the-perspective-of-an-opengl-using-glulookat\nhttp://stackoverflow.com/questions/26949617/pyopengl-glulookat-behaviour\nhttp://stackoverflow.com/questions/19078620/rotate-cube-to-look-at-mouse-from-python-in-opengl\n\"\"\"\n\ndef Rectangle3D():\n vertices = [\n -0.5, -0.5, 0.0, 1.0,\n 0.5, -0.5, 0.0, 1.0,\n 0.5, 0.5, 0.0, 1.0,\n -0.5, 0.5, 0.0, 1.0\n ]\n indices = [\n 0, 1, 2, \n 2, 3, 0\n ]\n return (np.asarray(vertices, dtype=np.float32),np.asarray(indices, dtype=np.uint32))\n\ndef cube3D(origin = [0.0,0.0,0.0], transform = None):\n \"\"\"\n This function will return a cube using normalized units in\n worl space. You can transform the oject by performing a\n transformation later.\n Also the position of he object by default will be the origin\n of the scene, in this case [0,0,0]\n In general the position will be defined in 4D position, since the\n transformation matrix need to be in 4-dimension to allow the trans-\n lation too. The fourth value will be always 1.0.\n \"\"\"\n # In order to create a cube or any other 3D geoemtry it's needed\n # to store all the information previousl in a buffer. This buffer\n # will be created and managed by opengl so at the end will be used\n # to represent the content of the buffer into the scene. \n # I addition to this it's needed to create Shaders (vertex and\n # fragment) so the graphics card can use to know what it's needed\n # prior to represent the data.\n # At the end, we are just defining attributes. But in this \n # particular case the first attribute that will be defined it's the\n # position. After the position we can define: vertex color, pscale,\n # normals, etc...\n # A cube will have a total of 8 vertices\n # \n # 2 3\n # 1 0\n #\n vertices = [\n # First we represent the vertices on the bottom y = -1\n -0.5, -0.5, 0.5, 1.0, # right, bottom, back vertex. (0)\n 0.5, -0.5, 0.5, 1.0, # left, bottom, back vertex. (1)\n 0.5, 0.5, 0.5, 1.0, # left, bottom, ack vertex. (2)\n -0.5, 0.5, 0.5, 1.0, # right, bottom, back vertex. (3) \n # The same vertex positions but on the top of the cube Y= 1\n -0.5, -0.5, -0.5, 1.0, # left, bottom, front vertex. (4)\n 0.5, -0.5, -0.5, 1.0, # right, bottom, front vertex. (5)\n 0.5, 0.5, -0.5, 1.0, # right, bottom, front vertex. (6)\n -0.5, 0.5, -0.5, 1.0 # left, bottom, front vertex. (7)\n ]\n #Conver the array into a numpy array (copy vs reference)\n nvertices = np.asarray(vertices, dtype=np.float32)\n # Defne the elements, in opengl it's needed to define triangles.\n # For each triangle we need to use 3 points or three vertices.\n # In this case we are going to define the indices, that corresponds\n # with the indexes of the vertices in the previos array. \n # A cube has a total of 6 faces: 4 for the sides + top + bottom.\n # However, we have to define triangles, so each face will be divided\n # by two. At the end we need 6 * 2 = 12 triangles in total\n # The trianglulation will be made in clockwise way. This is important\n # to know where the faces will be facing for shading them (normals).\n indices = [\n 0, 1, 2, 2, 3, 0, # Bottom face\n 4, 5, 6, 6, 7, 4, # Front face\n 4, 5, 1, 1, 0, 4, # left side\n 6, 7, 3, 3, 2, 6, # back face\n 5, 6, 2, 2, 1, 5, # Right Side\n 7, 4, 0, 0, 3, 7 # Top face\n ]\n #Conver the array into a numpy array (copy vs reference)\n nindices = np.asarray(indices, dtype=np.uint32)\n # The vertices are not repeated. You can have the vertices repeated if\n # you need different attrbiutes for them, like the normals, This will\n # be used to shade the elements in different ways. In some programs\n # This is called vertex normals. An it's used to crease or decrease\n # the weight for the transition between face to face. It's like define\n # smooth areas between the hard-surface areas.\n \n # It will return a tuple with the vertices and indices.\n #return (nvertices,nindices)\n return (vertices,indices)\n\ndef empty(value):\n \"\"\"\n Ths function will return is some list or variable is empty.\n For list, dict or any other collection will check there is \n more that one element. For other variables the condition\n will check if the object is None.\n \"\"\" \n if isinstance(value, (list, dict, np.ndarray, tuple, set)):\n if len(value) > 0:\n return False\n else:\n if value is not None:\n return False\n return True\n\ndef typeGL(dtype):\n \"\"\"\n This function will convert the types supported by OpenGL from\n numpy types. \n If dtype is not founded into the GLtypes the function will\n return GL.GL_FLOAT as default Open GL type\n \"\"\"\n # Check for some posibilities with the input, np.int32, 'int32','np.int32'\n if isinstance(dtype, (np.dtype)):\n dtype = dtype.name\n elif not isinstance(dtype, (str)):\n dtype = dtype.__name__\n # get the second part in case it can be splitted\n if len(dtype.split(\".\")) > 1:\n dtype = dtype.split(\".\")[-1]\n #Check the type of data has to be converted\n datatypes = {\n \"int8\" : GL.GL_BYTE, \t\t\t\n \"uint8\" : GL.GL_UNSIGNED_BYTE,\t\n\t \"int16\" : GL.GL_SHORT,\t\t\t\n\t \"uint16\" : GL.GL_UNSIGNED_SHORT,\t\n\t \"int32\" : GL.GL_INT,\t\t\t\t\n\t \"uint32\" : GL.GL_UNSIGNED_INT,\t\t\n\t \"float16\" : GL.GL_HALF_FLOAT,\t\t\n\t \"float32\" : GL.GL_FLOAT,\t\t\t\n\t \"float64\" : GL.GL_DOUBLE,\n \"fixed\" : GL.GL_FIXED # More compatibility for OS (float32)\n }\t\n # Check if the current datatype exists\n if dtype in datatypes:\t\t\n return datatypes[dtype]\n # if the data type does't exit returns default GL type\n return datatypes[np.float32]\n\ndef isfile(filename):\n \"\"\"\n Check if file exists\n \"\"\"\n if os.path.isfile(filename):\n return True\n return False\n\ndef readfile(filename):\n \"\"\"\n Read the current file entirely and return a \n string variable with all the content with\n special characters like new_line, etc..\n \"\"\"\n result = None\n if isfile(filename):\n with open(filename,'r') as file:\n result = file.read()\n return result\n\n\"\"\"\n For my OpenGL I will need the following classes or objects.\n\n [DONE] Display: window that manage the 3D view and Input Events \n fom the user. Also this class will be the one that\n implement the main loop for all the render.\n \n Shader: This class will be enable the creation of shaders\n programs that will be added to the main shader program\n that will be used.\n We can create Vertex, Fragment or Geoemtry shaders. These\n will be inked and use every time we want to render the\n geometry. \n Geometry: The class will be the main container for storing\n vertices, indices (element), vertex colors, normals and\n other attributes. Also the geometry will manage the uvs\n attributes and the materials that will be used for this\n particular geoemtry.\n - Vertices/Points\n - Indices (Faces)\n - Attributes (list with the Attrbiutes)\n Default attributes like Cd, N, P Uv could be \n created automatically for each object since they\n are used by default in all the 3D applications.\n\n Material: Each geoemtry obejct could have more that one material.\n In this case we have to decide if we are going to use\n different shaders or only one for the entire geometry.\n Camera: This class will allow the creation of different cameras\n to swtich indide the progrm. The camera will configure\n the View and Projection matrix.\n\n Light: Every scene have a light to lit the objects. These\n lights will be passed to the shaders to the objects\n would be shaded accordingly to these lights.\n \n There are several types of lights:\n Directional lights, Aerial lights, Spot lights,\n Ambient lights, Point lights.\n \n Also there are another indirect light that will be computed\n in real-time or render time that will depend on the environment.\n Ths light will be specular lights or bouncing lights.\n\n Finally, effects like Fresnel, bump, dissplacement, sub-surface\n scattering, reflection, refraction, translucency, layers, etc..\n are a cmobination between Materials and lights\n\n Volumes (VDB): The type of geoemtry is different from the way\n that polygons are created. This type of geometry\n requires additional manipulation and pipelone.\n\n Particles/Instances: This is used to represent millions of \n GEoemtry that will be packed into points. So the \n vertices, and indices will be instances.\n\n Sprites: Sprites is used for 2D and 3D. The idea is sprites\n will alway be facing to the camera. So there is no\n distorsion or perspective transformation that affect\n to this objects.\n \n Image: Image class will be used to create Interface controls,\n dashboard elements, etc.. \n \n\n\"\"\"\n\n\"\"\"\n The new pipeline used for OpenGL is that all operations, transformations,\n etc.. will be performed in the GPU. In order to do this these operations\n must be implemented into the shaders programs instead, so the GPU will be\n able to compile those shaders and execute them in Parallel.\n\n OpenGL works using states, so for eac state we configure the buffers, arrays,\n shaders, etc.. and finally draw. We perform the sema operation for all the \n geoemtry we have. Since the geometry could have different configuration \n and attributes, and shaders we need to operate with them separately.\n\n When the entire scene is complete, and all the geoemtry all correctly renderer\n it's time to flip the buffers to start the next frame.\n\n\n\"\"\"\n\nclass DisplayMode:\n fullscreen = pygame.FULLSCREEN\t# window is fullscreen\n resizable = pygame.RESIZABLE # window is resizeable\n noframe = pygame.NOFRAME\t# window has no border or controls\n doublebuf = pygame.DOUBLEBUF\t# use double buffer - recommended for HWSURFACE or OPENGL\n hwaccel = pygame.HWSURFACE # window is hardware accelerated, only possible in combination with FULLSCREEN\n opengl = pygame.OPENGL # window is renderable by OpenGL\n\nclass Display:\n \"\"\"\n This Class will manager the Display to interact with\n openGL. It will use OpenGL and a double buffer so\n it can sweep between the buffers per frame.\n\n Also the display is going to manage the interaction\n with the user regarding the events, mouse buttons and\n keypress done.\n \"\"\"\n \n # Default Display Mode that will be used when crating the window\n # Open GL and Double Buffer are neccesary to display OpenGL\n defaultmode = DisplayMode.opengl|DisplayMode.doublebuf\n\n # Default Background Color\n defaulBGColor = [0.0, 0.0, 0.0, 1.0]\n\n def __init__(self, title, width=800, height=600, bpp=16, displaymode = DisplayMode.resizable):\n # Initialize all the variables\n self.title = title\n self.width = width\n self.height = height\n self.bpp = bpp # RGBA 8*8*8*8 = 32 bits per pixel\n self.displaymode = displaymode\n # Initiali<e variables and Window\n self._initialize()\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the variables and Memory\n self._dispose()\n\n def _dispose(self):\n try:\n #Finalize pygame\n pygame.quit()\n # SEt is closed to true\n self.isClosed = True\n except:\n print(\"ERROR: Error disposing the display.\")\n\n def _initialize(self):\n # dispose and close all the windows prior to initialize\n self._dispose()\n # Initialize and open the display window\n try:\n # Initialize pygame\n pygame.init()\n # Set title bar caption\n pygame.display.set_caption(self.title)\n # Initialize the display\n screen = pygame.display.set_mode((self.width, self.height), \n Display.defaultmode|self.displaymode,\n self.bpp)\n # Enable Depth test to avoid overlaped areas\n GL.glEnable(GL.GL_DEPTH_TEST)\n # Clear the image\n self.clear()\n # Set isclosed to false\n self.isClosed = False\n except:\n print(\"ERROR: Error creating the display.\")\n \n def close(self):\n # Set close to true\n self.isClosed = True\n\n def clear(self, color = defaulBGColor):\n # Clear will clean the windows color.\n GL.glClearColor(*color)\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n def update(self):\n # With depth buffer flip is the way to update screen\n pygame.display.flip()\n # Check to close the window after update the window\n if self.isClosed:\n self._dispose()\n\nclass DrawMode:\n triangles = GL.GL_TRIANGLES\t\n points = GL.GL_POINTS\n lines = GL.GL_LINES \n quads = GL.GL_QUADS\n tfan = GL.GL_TRIANGLE_FAN\n lstrip = GL.GL_LINE_STRIP\n tstrip = GL.GL_TRIANGLE_STRIP\n\nclass UsageMode:\n stream_draw = GL.GL_STREAM_DRAW\n stream_read = GL.GL_STREAM_READ\n stream_copy = GL.GL_STREAM_COPY\n static_draw = GL.GL_STATIC_DRAW\n static_read = GL.GL_STATIC_READ\n static_copy = GL.GL_STATIC_COPY\n dynamic_draw = GL.GL_DYNAMIC_DRAW\n dynamic_read = GL.GL_DYNAMIC_READ\n dynamic_copy = GL.GL_DYNAMIC_COPY \n\nclass Geometry:\n \"\"\"\n This element will create and store all the elements needed\n to Render a Geometrt\n \"\"\"\n\n # Declare the subindex that will be used for multiple (vector) attribites\n index_cols = [\"x\",\"y\",\"z\",\"w\"]\n #Defaule type that will be used for indexing using OpenGL elements array buffer\n index_type = np.uint32\n\n def __init__(self, name=None, shader=None, mode=DrawMode.triangles, usage=UsageMode.static_draw):\n # Initialize all the variables\n self.name = name\n self.shader = shader\n self.mode = mode\n self.usage = usage\n # Create new properties\n self.transform = Transform()\n # Attributes dictionary to store the columns for each component\n self._pointAttribCols = {}\n self._primAttribCols = {}\n # Point Attributes and elements Data frames\n self._dfPoints = pd.DataFrame()\n self._dfPrims = pd.DataFrame()\n # Vertex Array Object for all the Attributtes, elements, etc.\n self._VAO = None\n # Vertex Arrays Buffers for all the Attributes\n self._VAB = {}\n # Element Array Buffers for all the Attrbiutes\n self._EAB = None\n\n # Initiali<e variables and Window\n self._initialize()\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n # Dispose all the objects and memory allocated\n GL.glDeleteVertexArrays(1,self._VAO)\n\n def _has_indices(self):\n if \"Id\" in self._primAttribCols:\n return True\n return False\n\n def _create_vertex_buffer_array(self, name, attribute_name = None):\n \"\"\"\n This function only make sense to do when working with\n points (vertex) attributes.S\n The function will return the bind attribute attached\n to the shader. This could be stored into a list to \n detach later when copy all the buffers and after unbind\n VAO object.\n \"\"\"\n # Check if not attribute name has been mapped for the bidinng\n if attribute_name is None:\n attribute_name = name\n # Get the current vertices (flatten is not needed)\n vertices = self._dfPoints[self._pointAttribCols[name]].values\n # Create the vertex array buffer and send the positions into the GPU buffers\n self._VAB[name] = GL.glGenBuffers(1)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._VAB[name] )\n GL.glBufferData(GL.GL_ARRAY_BUFFER, vertices.nbytes, vertices, self.usage)\n\n # Bind Attribute to the current shader. \n return self.shader.bind(attribute_name, len(vertices[0]), vertices.dtype)\n\n def _copy_to_buffer(self):\n # Bind the shaders attributes for the current geometry\n if self.shader is None:\n print(\"ERROR: No shader specified\")\n \n # Create a list with the attributes created and binded\n shader_attributes = []\n\n # Create a new VAO (Vertex Array Object). Only (1) VAO.\n # Note. Using bpp > 16bits doesn't work. This depend on the Graphic Card.\n self._VAO = GL.glGenVertexArrays(1)\n # Every time we want to use VAO we just have to bind it\n GL.glBindVertexArray(self._VAO)\n\n # Create the first attribute \"position\" (location = 0) (Mandatory)\n shader_attributes.append(self._create_vertex_buffer_array(\"P\",\"position\"))\n \n # Check wether the geometry has indexes\n if self._has_indices():\n # Get the current indices (flatten)\n indices = self._dfPrims[self._primAttribCols[\"Id\"]].values\n # Create the element array buffer and send the positions into the GPU buffers\n self._EAB = GL.glGenBuffers(1)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self._EAB);\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, self.usage);\n \n # Create and bind other Attributes\n for attrib in self._pointAttribCols.keys():\n if attrib != \"P\":\n shader_attributes.append(self._create_vertex_buffer_array(attrib))\n\n # Unbind VAO from OpenGL. Set to None = 0\n GL.glBindVertexArray(0)\n # Remove and unbind buffers\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n # Unbind all the Attributes \"position\" + Additionals\n for attribute in shader_attributes:\n self.shader.unbind(attribute)\n\n def _initialize(self):\n pass\n \n def update(self):\n # Depenging on the method to update the vertices using GPU or \n # inmediate OpenGL the update will be different.\n self._copy_to_buffer()\n \n def _createAttribute(self, df, name, size=3, values=None, default=None, dtype=None):\n #Check the data type if any\n if dtype is None:\n if empty(values):\n # Assign a default value\n dtype = np.float32\n else:\n # Get the type from the values\n if not isinstance(values,(np.ndarray)):\n # If not numpy then get the numppy array \n values = np.array(values)\n #Finally get the type from the numpy array\n dtype = values.dtype \n # Check any values or default values has been provided\n if empty(values) and empty(default):\n if df.empty:\n # If nothing to add exit the function\n return None\n else:\n # Create a default value (float)\n default = np.zeros((size), dtype=dtype)\n # Check the index value depending on the size\n if size > 1:\n columns = [name + Geometry.index_cols[i] for i in range(size)]\n else:\n columns = [name]\n # Check if values has been already defined\n if (empty(values) and not df.empty):\n # create an array with the same number of rows as the current\n values = np.tile(default,(len(df.index)))\n # Reshape the values [ Maybe should be normalized and flatten]\n values = np.array(np.reshape(values, (-1, size)) ,dtype=dtype)\n # Check if the DataFrame is empty\n if df.empty:\n # Add the current data into the attributes frame\n df = pd.DataFrame(values, columns=columns)\n else:\n # Add the current data into the attributes frame\n dfvalues = pd.DataFrame(values, columns=columns)\n # Append both dataframes\n df = pd.merge(df, dfvalues, how='inner', left_index=True, right_index=True)\n # Set the columns into the the current Point attribute\n return (df, columns)\n\n def getPrimsAttrib(self, name):\n return self._dfPrims[self._primAttribCols[name]]\n\n def delPrimsAttrib(self, name):\n self._dfPrims.drop(self._primAttribCols[name], axis=1, inplace=True)\n\n def addPrimsAttrib(self, name, values=None, size=3, default=None, dtype=None):\n # Get the new attribute and dataframe\n result = self._createAttribute(self._dfPrims,name,size,values,default,dtype)\n if not empty(result):\n # Set the returned dataframe with the new attribute\n self._dfPrims = result[0]\n # Set the columns into the the current Point attribute\n self._primAttribCols[name] = result[1]\n\n def addIndices(self, values, size=3, dtype=np.uint32):\n #Add prims Attributes Elements\n self.addPrimsAttrib(\"Id\", values, size, dtype=dtype)\n \n def getPointAttrib(self, name):\n return self._dfPoints[self._pointAttribCols[name]]\n\n def delPointAttrib(self, name):\n self._dfPoints.drop(self._pointAttribCols[name], axis=1, inplace=True)\n\n def addPointAttrib(self, name, values=None, size=3, default=None, dtype=None):\n # Get the new attribute and dataframe\n result = self._createAttribute(self._dfPoints,name,size,values,default,dtype)\n if not empty(result):\n # Set the returned dataframe with the new attribute\n self._dfPoints = result[0]\n # Set the columns into the the current Point attribute\n self._pointAttribCols[name] = result[1]\n\n def addPoints(self, values, size=3, dtype=np.float32):\n #Add point Attributes Position\n self.addPointAttrib(\"P\", values, size, dtype)\n\n def addNormals(self, values, size=3, dtype=np.float32):\n #Add point Attributes Normals\n self.addPointAttrib(\"N\", values, size, dtype)\n\n def render(self):\n # Bind the created Vertex Array Object\n GL.glBindVertexArray(self._VAO)\n # Draw the current geoemtry. Check if indices have been added\n if self._has_indices():\n GL.glDrawElements(self.mode, len(self._dfPrims.index) * 3, \n typeGL(Geometry.index_type), ctypes.c_void_p(0))\n else:\n GL.glDrawArrays(self.mode, 0, len(self._dfPoints.index))\n # Unbind VAO from GPU\n GL.glBindVertexArray(0)\n \n# Shader typas allow and extension for the files to use\nShaderTypes = {\n \"VERTEX_SHADER\" : { \"id\":\"vs\", \"type\":GL.GL_VERTEX_SHADER }, \n \"FRAGMENT_SHADER\" : { \"id\":\"fs\", \"type\":GL.GL_FRAGMENT_SHADER },\n \"GEOMETRY_SHADER\" : { \"id\":\"gs\", \"type\":GL.GL_GEOMETRY_SHADER }\n }\n\n# Transforms types availabile in shader\nTransformTypes = {\n \"WORLD_MATRIX\" : { \"name\":\"world_matrix\", \"size\":16, \"dtype\":np.float32 }, \n \"VIEW_MATRIX\" : { \"name\":\"view_matrix\", \"size\":16, \"dtype\":np.float32 }, \n \"PROJECTION_MATRIX\" : { \"name\":\"projection_matrix\", \"size\":16, \"dtype\":np.float32 }\n }\n\n# Transforms types availabile in shader\nGeometryAttributeTypes = {\n \"TEXTURE_COORDINATES\" : { \"name\":\"v_textcoord\", \"size\":2, \"dtype\":np.float32 }, \n \"NORMAL\" : { \"name\":\"v_normal\", \"size\":3, \"dtype\":np.float32 }, \n \"POSITION\" : { \"name\":\"v_pos\", \"size\":3, \"dtype\":np.float32 }, \n \"COLOR\" : { \"name\":\"v_color\", \"size\":4, \"dtype\":np.float32 }\n }\n\nclass Shader:\n \"\"\"\n This element will create and store all the elements needed\n to create a shader.\n \"\"\"\n\n # Shaders types\n shader_types = ShaderTypes\n\n # These are the default transforms that will be used\n uniform_transforms = TransformTypes\n \n def __init__(self, name=None, filepath=\"./\"):\n # Initialize all the variables\n self.name = name\n self.filepath = filepath\n # Initial variables\n self._shaders = {}\n self._uniforms = {}\n self._program = None\n # variable to tell if the shader has been initialized correctly\n self.initialized = False\n # Initiali<e variables and Window\n self._initialize()\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n # Dispose all the object and memory allocated\n for shader in self._shaders:\n GL.glDetachShader(self._program, self._shaders[shader])\n GL.glDeleleShader(self._shaders[shader])\n # Delete Shader Program\n if self._program:\n GL.glDeleteProgram(self._program)\n # Set initialized to false\n self.initialized = False\n\n def _initialize(self):\n # Dispose previous elemens created\n self._dispose()\n # Create the variables needed for the shader program\n self._shaders = {}\n self._uniforms = {}\n\n # Set initialized to false\n self.initialized = False\n\n # Create the main shader program\n self._program = GL.glCreateProgram()\n \n # Arrach the default shaders to the current program\n self._attach_default_shaders()\n\n # Bind and mapping the default attributes variables to the shader\n # - Bind location must be done before the linking process.\n # - Get location must be done after the linking process.\n self._bind_location_attributes()\n\n # Link the current shader program\n GL.glLinkProgram(self._program)\n # Check for link errors \n if self._check_shader_error(self._program, GL.GL_LINK_STATUS,True): \n return\n \n # Validate Program\n GL.glValidateProgram(self._program)\n # Check for link errors \n if self._check_shader_error(self._program, GL.GL_VALIDATE_STATUS,True):\n return\n \n # Get location uniforms variablesfrom the shader\n self._get_location_uniforms()\n\n # if all ok then set initialized to true\n self.initialized = True\n\n def _get_location_uniforms(self):\n for key,value in Shader.uniform_transforms.items():\n # Get the location for the curren shader loaded\n self._uniforms[key] = GL.glGetUniformLocation(self._program, value[\"name\"])\n\n def _bind_location_attributes(self):\n pass\n\n def _attach_default_shaders(self):\n # Generate the main path for the shaders to load\n filename = self.filepath + \"/\" + self.name + \".\"\n # Read all shader type files and link into the progrma\n for key,value in Shader.shader_types.items():\n shader = self._load_shader(filename + value[\"id\"], value[\"type\"])\n # Check the current shader has been loaded correctly\n if shader:\n # Finally attach the current shader into the program\n GL.glAttachShader(self._program, shader)\n # Add current shader\n self._shaders[key] = shader\n\n def _load_shader(self, filename, shader_type):\n # Check if the file exists\n if isfile(filename):\n #Load current shader code-source from file\n shader_source = readfile(filename)\n # Create curent shader\n shader = GL.glCreateShader(shader_type)\n # Set the source for the current sshader\n GL.glShaderSource(shader, shader_source) \n # Compile current shadershader\n GL.glCompileShader(shader)\n # Check for compiler errors \n if self._check_shader_error(shader, GL.GL_COMPILE_STATUS):\n return None\n # Return the current shader\n return shader\n #Return None if no file exists\n return None\n\n def _check_shader_error(self,shader,status,isProgram=False):\n if isProgram:\n # Check for errors in Programs \n if GL.glGetProgramiv(shader,status) != GL.GL_TRUE:\n print('Program load failed: {}'.format(GL.glGetProgramInfoLog(shader)))\n return True\n else:\n # Check for errors in Shaders \n if GL.glGetShaderiv(shader,status) != GL.GL_TRUE:\n print('Shader load failed: {}'.format(GL.glGetShaderInfoLog(shader)))\n return True\n return False \n\n def load(self, name):\n # Set the current file and initialize\n self.name = name\n # Call to initialize so it will load again the program and shader\n self._initialize()\n\n def use(self,use=True):\n \"\"\"\n Function to tell Open GL to use this Shader program.\n If the shader won't be used anymore then use use=False.\n \"\"\"\n if self.initialized:\n # Tell Open GL to use/not-use the current progrma\n if use:\n GL.glUseProgram(self._program)\n else:\n GL.glUseProgram(0)\n return True\n # Not initialized\n return False\n\n def bind(self, attribute_name, size, dtype=np.float32):\n \"\"\"\n This function will allow to bind attributes from the array buffer object\n to the shader. This operation will be done per VAO since it can store this\n binding. Again when a VAO will be opened and binding to OpenGL, this\n will bind again all the bindings previously performed during the creation.\n\n After unbind the current VAO and after loading all the buffers needed\n is very convenient to unbind the attribute after.\n\n Parameters:\n attribute_name (str): \n name of the attribute to use into the shader source-code.\n size:\n size of the current attribute. This is the number of elements, not\n de number of bytes etc.. ej. vector3 will have size = 3\n dtype:\n data-type of the values for the given attribute. If the vector contains\n int, float32, unit32, etc.. This must be given using GL types. Use\n typeGL function to convert numpy types into OpenGL types\n\n \"\"\"\n if self.initialized:\n # Get the location of the 'attribute_name' in parameter of our shader and bind it.\n attribute_id = GL.glGetAttribLocation(self._program, attribute_name)\n # Check if the current attribute is in the Shader\n if attribute_id != -1:\n #Enable current attribute in the shader\n GL.glEnableVertexAttribArray(attribute_id)\n # Describe the attribute data layout in the buffer\n GL.glVertexAttribPointer(attribute_id, size, typeGL(dtype),\n False, 0, ctypes.c_void_p(0))\n # Return the attribute id\n return attribute_id\n else:\n # Attribute has been discarted for the compiler or doesn't exist.\n print (\"Warning: Current attribute {} is not in the shader\".format(attribute_name))\n # Return false is not initialized\n return False\n\n def unbind(self, attribute_id):\n \"\"\"\n This operation will be performed after unbind the VAO obhect. The parameter\n needed will be the result of the previous result that the bind function call\n returns with the attribute id.\n \"\"\"\n if self.initialized:\n # Unbind Attribute\n GL.glDisableVertexAttribArray(attribute_id)\n \n def update(self, name, value):\n # Depending on the uniform name to update we have to select the proper operator.\n GL.glUniformMatrix4fv(self._uniforms[name], 1, GL.GL_FALSE, value)\n\ndef load_image(filename, bpp=8):\n #Load the image using the path configured\n image = Image.open(filename).transpose(Image.FLIP_TOP_BOTTOM)\n if (bpp == 32):\n dtype = np.uint32\n elif (bpp == 16):\n dtype = np.uint16\n else:\n dtype = np.uint8\n # Convert the image to a numpy string. Converto to uint8 image.\n image_data = np.array(image.getdata(), dtype)\n return [image_data, image.size]\n\nclass Texture:\n \"\"\"\n This class will create and store all the elements needed\n to create the texture.\n The module needed to load the images is Pillow\n from PIL import Image\n \"\"\"\n # Maximun number of textures\n max_textures = 32\n\n def __init__(self, filename):\n # Initialize all the variables\n self.filename = filename\n # Create a texture variable with the pointer to the buffer\n self._texture = None\n # Initiali<e variables and Window\n self._initialize()\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n pass\n\n def _initialize(self):\n # Create the texture and copy into OpenGL\n self._texture = self._load_Texture(self.filename)\n\n def _load_Texture(self,filename):\n # Check if the file exists\n if isfile(filename):\n # Load the image using the path configured\n img_data, size = load_image(filename)\n width, height = size\n # Generate texture buffer to load into GPU\n texture = GL.glGenTextures(1)\n # Set initial parameters needed prior send the image to OpenGL\n GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)\n # Bind current texture buffer to load the data\n GL.glBindTexture(GL.GL_TEXTURE_2D, texture)\n # Set parameters to tell OpenGL how to draw the image\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)\n GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB, width, height, 0,\n GL.GL_RGB, typeGL(img_data.dtype), img_data)\n # Create different Mipmaps for the current texure\n GL.glGenerateMipmap(GL.GL_TEXTURE_2D)\n return texture\n # If not exist return None\n return None\n \n def bind(self, count):\n \"\"\"\n This method will bind the current texture to be used to the graphic card\n Parameter:\n count: this is used to assign a free slot to the texture into OpenGL\n [ Some graphic cards could have a limitation in the number of ]\n [ textures that can store, depending on the memory. ]\n \"\"\"\n if self._texture and (count > 0 and count < Texture.max_textures + 1 ):\n # Following we will activate the texture in a slot \n GL.glActiveTexture(GL.GL_TEXTURE0 + count)\n GL.glBindTexture(GL.GL_TEXTURE_2D, self._texture)\n\nclass Transform:\n \"\"\"\n This class will manage the basic transformation that can be\n performed to a geometry.\n\n This class uses pyrr module that it's a packadge with many\n operations that can be used directly with OpenGL. In this class\n the selected approach will be Object Oriented because its features.\n Documentation can be founf in the following link:\n \n https://github.com/adamlwgriffiths/Pyrr\n\n Parameters:\n default position, rotation and scale can be set intially.\n \n To-Do:\n Pivot implementation. So it's rotate based on a point.\n Advanced transformations such as shear, bend, twist, et..\n\n \"\"\"\n def __init__(self, position=None, rotation=None, scale=None):\n # Create private members for the setters (properties)\n self.__position = self._get_Vector3(position)\n self.__rotation = self._get_Vector3(rotation)\n self.__scale = self._get_Vector3(scale)\n # Initiali<e variables and Window\n self._initialize()\n\n def _get_Vector3(self, value):\n if empty(value):\n return None\n # Check if it's already a Vector3 instance\n if isinstance(value,(Vector3)):\n return value\n else:\n return Vector3(value)\n\n def _initialize(self):\n # Create default transformations: position, rotation and scale\n if self.position is None:\n self.position = Vector3([0.0,0.0,0.0])\n if self.rotation is None:\n self.rotation = Vector3([0.0,0.0,0.0])\n if self.scale is None:\n self.scale = Vector3([1.0,1.0,1.0])\n\n @property\n def position(self):\n return self.__position\n\n @position.setter\n def position(self, value):\n self.__position = self._get_Vector3(value)\n\n @property\n def rotation(self):\n return self.__rotation\n\n @rotation.setter\n def rotation(self, value):\n self.__rotation = self._get_Vector3(value)\n\n @property\n def scale(self):\n return self.__scale\n\n @scale.setter\n def scale(self, value):\n self.__scale = self._get_Vector3(value)\n\n @property\n def model(self):\n \"\"\"\n This property will perform the current transformation and\n return a 4x4 matrix with the transformation matrix. This\n matrix could be send to the shader so it can perform the\n model-view transformation for any geometry\n \"\"\"\n # Create scale matrix transformation\n scale = Matrix44.from_scale(self.scale)\n\n #Convert the current degrees vector into radians\n rotation = np.radians(self.rotation)\n rotationY = Quaternion.from_x_rotation(rotation.x)\n rotationX = Quaternion.from_y_rotation(rotation.y)\n rotationZ = Quaternion.from_z_rotation(rotation.z)\n # compute all rotations.\n rotation = rotationX * rotationY * rotationZ\n\n # Create translation matrix transformation\n translation = Matrix44.from_translation(self.position)\n\n # Compute transformation matrix. convert to float32\n return np.array(scale * rotation * translation,dtype=np.float32)\n\n def transform(self, point):\n \"\"\"\n This function will apply the current transformation to\n the following point. \n \"\"\"\n # Get the current tranformation matrix\n matrix = self.model\n # transform our point by the matrix to model-view\n return matrix * self._get_Vector3(point)\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n pass\n\ndef convert(value, dtype=np.float32):\n return np.array(value,dtype)\n\nclass Camera:\n \"\"\"\n This class will manage the basic functionality that can be\n performed with a camera.\n\n This class uses pyrr module that it's a packadge with many\n operations that can be used directly with OpenGL. In this class\n the selected approach will be Object Oriented because its features.\n Documentation can be founf in the following link:\n \n https://github.com/adamlwgriffiths/Pyrr\n\n Parameters:\n default perspective, rotation and scale can be set intially.\n \n To-Do:\n Pivot implementation. So it's rotate based on a point.\n Advanced transformations such as shear, bend, twist, et..\n\n \"\"\"\n def __init__(self, position=[0.0,0.0,-3.0], fov=70.0, aspect=1.33, zNear=0.01, zFar=1000.0):\n # Create private members for the setters (properties)\n # View Matrix\n self.__position = Vector3(convert(position))\n self._forward = convert([0.0,0.0,1.0])\n self._up = convert([0.0,1.0,0.0])\n # Prejection Matrix\n self._fov = fov\n self._aspect = aspect\n self._zNear = zNear\n self._zFar = zFar\n \n # Initiali<e variables and Window\n self._initialize()\n\n def _initialize(self):\n pass\n\n def pan(self, value):\n \"\"\"\n Rotate using up direction\n \"\"\"\n \n\n def tilt(self, value):\n \"\"\"\n Rotate using cross product between up and forward vectors.\n \"\"\"\n pass\n\n def roll(self, value):\n \"\"\"\n Rotate using the forward direction\n \"\"\"\n pass\n\n @property\n def position(self):\n return self.__position\n\n @position.setter\n def position(self, value):\n self.__position = Vector3(convert(value))\n\n def setPerspective(fov=70.0, aspect=1.33, zNear=0.01, zFar=1000.0):\n \"\"\"\n Redefine the perspective view of the Camera\n \"\"\"\n self._fov = fov\n self._aspect = aspect\n self._zNear = zNear\n self._zFar = zFar\n \n @property\n def view(self):\n return self.lookAt(self.position,self.position + self._forward, self._up)\n\n def lookAt(self, position, target, up):\n ez = position - target\n ez = ez / np.linalg.norm(ez)\n\n ex = np.cross(up, ez)\n ex = ex / np.linalg.norm(ex)\n\n ey = np.cross(ez, ex)\n ey = ey / np.linalg.norm(ey)\n\n rmat = np.eye(4)\n rmat[0][0] = ex[0]\n rmat[0][1] = ex[1]\n rmat[0][2] = ex[2]\n\n rmat[1][0] = ey[0]\n rmat[1][1] = ey[1]\n rmat[1][2] = ey[2]\n\n rmat[2][0] = ez[0]\n rmat[2][1] = ez[1]\n rmat[2][2] = ez[2]\n\n tmat = np.eye(4)\n tmat[0][3] = -position[0]\n tmat[1][3] = -position[1]\n tmat[2][3] = -position[2]\n\n return np.dot(rmat, tmat).transpose()\n\n\n @property\n def projection(self):\n return matrix44.create_perspective_projection_matrix(self._fov,self._aspect,self._zNear,self._zFar)\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n pass\n\n\ndef Triangle():\n #Create default vertices 4f\n vertices = [ -0.5, -0.5, 0.0, 1.0,\n 0.0, 0.5, 0.0, 1.0,\n 0.5, -0.5, 0.0, 1.0]\n indices = [ 0, 1, 2 ]\n color = [ 1.0, 0.0, 0.0, 1.0,\n 0.0, 1.0, 0.0, 1.0,\n 0.0, 0.0, 1.0, 1.0]\n uvs = [0.0, 0.0,\n 0.5, 1.0,\n 1.0, 0.0 ]\n return [vertices, indices, color, uvs]\n\n# Testing pourposes main function\nif __name__ == \"__main__\":\n # Create the Display with the main window\n with Display(\"Main Window\",800,600) as display:\n \n #georaw = cube3D()\n georaw = Triangle()\n # Create a Camera\n camera = Camera([0.0,0.0,-3.0],70.0,800/600,0.01,1000)\n # Create a texture\n texture = Texture(\"./assets/images/texture.png\")\n # Create the default shader\n shader = Shader(\"default_shader\", \"./assets/shaders\")\n # Create the geometry\n geo = Geometry(\"geo\",shader,mode=DrawMode.triangles)\n #geo.addPoints(georaw[0], 4)\n geo.addPointAttrib(\"P\",georaw[0], 4)\n #geo.addIndices(georaw[1])\n geo.addPointAttrib(\"Cd\",georaw[2], 4)\n geo.addPointAttrib(\"UV\",georaw[3], 2)\n #geo.addPoints(vertices, 4)\n geo.update()\n\n # Create a counter\n counter = 0\n # Start the Main loop for the program\n while not display.isClosed: \n # Manage the event from the gui\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n display.close()\n if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n display.close()\n if event.type == pygame.KEYUP and event.key == pygame.K_UP:\n camera.position.y += 0.1\n if event.type == pygame.KEYUP and event.key == pygame.K_DOWN:\n camera.position.y -= 0.1\n if event.type == pygame.KEYUP and event.key == pygame.K_RIGHT:\n camera.position.x += 0.1\n if event.type == pygame.KEYUP and event.key == pygame.K_LEFT:\n camera.position.x -= 0.1\n\n \"\"\"\n Get the Key-codes used in pygame\n events = [ \"QUIT\",\n \"ACTIVEEVENT\",\n \"KEYDOWN\",\n \"KEYUP\",\n \"MOUSEMOTION\",\n \"MOUSEBUTTONUP\",\n \"MOUSEBUTTONDOWN\",\n \"JOYAXISMOTION\",\n \"JOYBALLMOTION\",\n \"JOYHATMOTION\",\n \"JOYBUTTONUP\",\n \"JOYBUTTONDOWN\",\n \"USEREVENT\"]\n\n with open(\"keycodes.txt\", mode=\"w\") as file:\n for item in sorted(pygame.__dir__()):\n if str(item).startswith(\"K\"):\n line = \"{} = {}\\n\".format( str(item),getattr(pygame,item))\n file.write(line)\n for item in events:\n line = \"{} = {}\\n\".format( str(item),getattr(pygame,item))\n file.write(line)\n\n \"\"\"\n\n \n # Clear the display\n display.clear()\n \n # Render all the elements that share the same shader.\n # Use the current Shader configuration\n shader.use()\n # Use the current texture after the shader\n texture.bind(0)\n \n # # Perform some motion to the object\n # sincount = math.sin(counter)\n # coscount = math.cos(counter)\n\n # geo.transform.position.x = sincount\n # geo.transform.rotation.z = counter*50\n # geo.transform.scale = [coscount,coscount,coscount]\n\n # counter += 0.01;\n\n shader.update(\"WORLD_MATRIX\",geo.transform.model)\n shader.update(\"VIEW_MATRIX\",camera.view)\n shader.update(\"PROJECTION_MATRIX\",camera.projection)\n\n # Render the geometry\n geo.render()\n # End Use the current Shader configuration\n shader.use(False)\n\n # Update the display\n display.update()\n\n # End of the program\n\n"
] | [
[
"numpy.dot",
"pandas.merge",
"numpy.radians",
"numpy.asarray",
"numpy.reshape",
"numpy.eye",
"numpy.linalg.norm",
"pandas.DataFrame",
"numpy.cross",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
fadhilmch/FaceRecognition | [
"03ecddb15db79b23ff612b119c9678f5b2ce1194"
] | [
"demo_video_face_recognition.py"
] | [
"import cv2\nimport time\nimport numpy as np\nfrom detection.FaceDetector import FaceDetector\nfrom recognition.FaceRecognition import FaceRecognition\nfrom classifier.FaceClassifier import FaceClassifier\n\nVIDEO_INPUT_FILE = './media/test_video/Zidane_1.avi'\nVIDEO_OUTPUT_FILE = './media/test_video_output/Zidane_Recognition_1.avi'\nFACE_CLASSIFIER_MODEL = './classifier/trained_classifier_lfw.pkl'\n\nface_detector = FaceDetector()\nface_recognition = FaceRecognition()\nface_classfier = FaceClassifier(FACE_CLASSIFIER_MODEL)\nvideo_capture = cv2.VideoCapture(VIDEO_INPUT_FILE)\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter(VIDEO_OUTPUT_FILE, fourcc, 24.0, (int(video_capture.get(3)),int(video_capture.get(4))))\n\nprint('Start Recognition!')\nprevTime = 0\nwhile video_capture.isOpened():\n ret, frame = video_capture.read()\n\n curTime = time.time() # calc fps\n find_results = []\n\n frame = frame[:, :, 0:3]\n boxes, scores = face_detector.detect(frame)\n face_boxes = boxes[np.argwhere(scores>0.3).reshape(-1)]\n face_scores = scores[np.argwhere(scores>0.3).reshape(-1)]\n print('Detected_FaceNum: %d' % len(face_boxes))\n\n if len(face_boxes) > 0:\n for i in range(len(face_boxes)):\n box = face_boxes[i]\n cropped_face = frame[box[0]:box[2], box[1]:box[3], :]\n cropped_face = cv2.resize(cropped_face, (160, 160), interpolation=cv2.INTER_AREA)\n feature = face_recognition.recognize(cropped_face)\n name = face_classfier.classify(feature)\n\n cv2.rectangle(frame, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)\n\n # plot result idx under box\n text_x = box[1]\n text_y = box[2] + 20\n cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL,\n 1, (0, 0, 255), thickness=1, lineType=2)\n else:\n print('Unable to align')\n\n sec = curTime - prevTime\n prevTime = curTime\n fps = 1 / (sec)\n str = 'FPS: %2.3f' % fps\n text_fps_x = len(frame[0]) - 150\n text_fps_y = 20\n cv2.putText(frame, str, (text_fps_x, text_fps_y),\n cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), thickness=1, lineType=2)\n\n out.write(frame)\n\nvideo_capture.release()\nout.release()\ncv2.destroyAllWindows()\n\n"
] | [
[
"numpy.argwhere"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SiChen-cuc/Tiyuntsong | [
"b1797b543f21d8e0cc80b2934aa1f5a70df5ee45"
] | [
"src/multi.py"
] | [
"import sabre as env\nimport math\nfrom network import Zero\nfrom tracepool import tracepool\nimport numpy as np\nfrom rules import rules\nfrom log import log\nimport os\nfrom multiprocessing import cpu_count\nimport multiprocessing as mp\n\nNUM_AGENT = 2\nUSE_CORES = cpu_count()\n\n\ndef agent(agent_id, net_params_queue, exp_queue):\n agent_list = []\n for p in range(NUM_AGENT):\n agent_list.append(Zero(str(p)))\n\n while True:\n net_params, _tracepool = net_params_queue.get()\n for i in range(NUM_AGENT):\n agent_list[i].set_params(net_params[i])\n\n _trace_result = []\n _global_history = []\n for p in range(NUM_AGENT):\n _global_history.append([])\n for _trace in _tracepool:\n agent_result = []\n for _agent in agent_list:\n total_bitrate, total_rebuffer, total_smoothness = env.execute(\n abr=_agent, trace=_trace)\n agent_result.append(\n (total_bitrate, total_rebuffer, total_smoothness))\n agent_reward = []\n for _index in range(len(agent_list[0].quality_history)):\n res = rules([agent_list[0].quality_history[_index],\n agent_list[1].quality_history[_index]])\n agent_reward.append(res)\n agent_reward = np.array(agent_reward)\n for _index, _agent in enumerate(agent_list):\n _history = _agent.get_action()\n reward = agent_reward[:, _index]\n _idx = 0\n s_batch, a_batch, r_batch, g_batch = [], [], [], []\n for (state, action, gan) in _history:\n s_batch.append(state)\n a_batch.append(action)\n r_batch.append(reward[_idx])\n g_batch.append(gan)\n _idx += 1\n _global_history[_index].append(\n (s_batch, a_batch, r_batch, g_batch))\n _trace_result.append(agent_result)\n exp_queue.put([_global_history, _trace_result])\n\n\ndef chunks(arr, m):\n if (len(arr) < m):\n m = len(arr)\n tmp, tmp_index = [], []\n idx = 0\n for i in range(m):\n tmp.append([])\n tmp_index.append([])\n for i in range(len(arr)):\n tmp[idx].append(arr[i])\n tmp_index[idx].append(i)\n idx += 1\n idx %= m\n return tmp, tmp_index\n\n\ndef central(net_params_queues, exp_queues):\n global_agent_list = []\n agent_elo = []\n\n _log = log('zero.txt')\n #log_file = open('zero.txt', 'w')\n elo_file = open('elo.txt', 'w')\n for p in range(NUM_AGENT):\n global_agent_list.append(Zero(str(p)))\n agent_elo.append(1000.0)\n\n _tracepool = tracepool(ratio=0.5)\n _split_pool, _idx_pool = chunks(_tracepool.get_list(), USE_CORES)\n while True:\n # synchronize the network parameters of work agent\n _params = []\n agent_elo = []\n global_trace_pool = []\n for p in range(len(_tracepool.get_list())):\n global_trace_pool.append([])\n\n for p in range(NUM_AGENT):\n agent_elo.append(1000.0)\n _params.append(global_agent_list[p].get_params())\n\n for i in range(USE_CORES):\n net_params_queues[i].put([_params, _split_pool[i]])\n\n _tmp = [0, 0, 0]\n for i in range(USE_CORES):\n _global_history, _trace_result = exp_queues[i].get()\n for p in range(NUM_AGENT):\n _history = _global_history[p]\n global_agent_list[p].set_action(_history)\n for p in range(len(_trace_result)):\n global_trace_pool[_idx_pool[i][p]] = _trace_result[p]\n for _trace_res in global_trace_pool: \n tmp_battle = rules(_trace_res)\n _log.write_log(_trace_res)\n _tmp[np.argmax(tmp_battle)] += 1\n _tmp[-1] += 1\n _rate, agent_elo = _tracepool.battle(agent_elo, global_trace_pool)\n _delta_array = [_tmp[0] / _tmp[-1], _tmp[1] / _tmp[-1]]\n for _agent, _d in zip(global_agent_list, _delta_array):\n _agent.learn(_d)\n _agent.clear()\n\n for p in agent_elo:\n elo_file.write(str(p) + ' ')\n elo_file.write('\\n')\n elo_file.flush()\n print(_rate)\n print(agent_elo)\n print(round(_tmp[0] * 100.0 / _tmp[-1], 2), '%',\n ',', round(_tmp[1] * 100.0 / _tmp[-1], 2), '%')\n\n _log.write_line()\n os.system('python draw.py')\n\n\ndef main():\n net_params_queues = []\n exp_queues = []\n for i in range(USE_CORES):\n net_params_queues.append(mp.Queue(1))\n exp_queues.append(mp.Queue(1))\n\n coordinator = mp.Process(target=central,\n args=(net_params_queues, exp_queues))\n coordinator.start()\n agents = []\n for i in range(USE_CORES):\n agents.append(mp.Process(target=agent,\n args=(i, net_params_queues[i], exp_queues[i])))\n for p in agents:\n p.start()\n\n # wait unit training is done\n coordinator.join()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zygn/f1tenth_gym | [
"fbb5b6a2b84bff0bd1e4eeeb62d963c74632f787"
] | [
"playground/random_obs.py"
] | [
"import random\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\nfile_name = 'map.png'\nimg = cv2.imread(file_name, -1)\n\ndst = img.copy()\nh, w = img.shape\nepsilon = 0.9999\n\n# 그레이스케일과 바이너리 스케일 변환\nth = cv2.bitwise_not(dst)\ncontours, _ = cv2.findContours(th, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n\ncontours.sort(key=len)\nsize_contours = []\nprint(\"i found contour(s):\", end=\"\")\nfor i in contours: size_contours.append(len(i))\nprint(size_contours)\nprint(\"epslion: \", epsilon)\n\n\n# 외곽선 검출\nin_contour = contours[0]\nout_contour = contours[-1]\n\n# 바깥쪽 라인 \ncv2.drawContours(dst, [out_contour], -1, 100, 3)\ncv2.fillPoly(dst, [out_contour], 100)\n# 안쪽 라인 \ncv2.drawContours(dst, [in_contour], -1, 200, 3)\ncv2.fillPoly(dst, [in_contour], 200)\n\n\n# 트랙안에서 점찍기\npbar = tqdm(range(w))\nfor i in pbar:\n for j in range(h):\n if np.all(dst[i][j] == np.array(100)) and random.random() >= epsilon:\n cv2.circle(img, (j,i), 2, (0,0,0), -1)\n pbar.set_description(f\"\\u001b[35mAdded Obstacle - [{j},{i}]\\u001b[0m \")\n # print(f\"added obs: [{j},{i}] \\r\", end=\"\")\nprint()\n\ncv2.imwrite(f\"obs_{file_name}\", img)\n# cv2.imshow('cont', dst)\n# cv2.imshow('obs_img', img)\n# cv2.waitKey()\n# cv2.destroyAllWindows()"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pmrowla/dvclive | [
"fe95917c965db210a6a11ff3d6f287c2df298330"
] | [
"tests/test_catalyst.py"
] | [
"import os\n\nimport pytest\nfrom catalyst import dl\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.data import ToTensor\nfrom catalyst.utils.torch import get_available_engine\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\n\nimport dvclive\nfrom dvclive.catalyst import DvcLiveCallback\n\n# pylint: disable=redefined-outer-name, unused-argument\n\n\[email protected]\ndef loaders():\n train_data = MNIST(\n os.getcwd(), train=True, download=True, transform=ToTensor()\n )\n valid_data = MNIST(\n os.getcwd(), train=False, download=True, transform=ToTensor()\n )\n return {\n \"train\": DataLoader(train_data, batch_size=32),\n \"valid\": DataLoader(valid_data, batch_size=32),\n }\n\n\[email protected]\ndef runner():\n return dl.SupervisedRunner(\n engine=get_available_engine(),\n input_key=\"features\",\n output_key=\"logits\",\n target_key=\"targets\",\n loss_key=\"loss\",\n )\n\n\ndef test_catalyst_callback(tmp_dir, runner, loaders):\n dvclive.init(\"dvc_logs\")\n\n model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.02)\n\n runner.train(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n loaders=loaders,\n num_epochs=2,\n callbacks=[\n dl.AccuracyCallback(input_key=\"logits\", target_key=\"targets\"),\n DvcLiveCallback(),\n ],\n logdir=\"./logs\",\n valid_loader=\"valid\",\n valid_metric=\"loss\",\n minimize_valid_metric=True,\n verbose=True,\n load_best_on_end=True,\n )\n\n assert os.path.exists(\"dvc_logs\")\n\n train_path = tmp_dir / \"dvc_logs/train\"\n valid_path = tmp_dir / \"dvc_logs/valid\"\n\n assert train_path.is_dir()\n assert valid_path.is_dir()\n assert (train_path / \"accuracy.tsv\").exists()\n\n\ndef test_catalyst_model_file(tmp_dir, runner, loaders):\n dvclive.init(\"dvc_logs\")\n\n model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.02)\n\n runner.train(\n model=model,\n engine=runner.engine,\n criterion=criterion,\n optimizer=optimizer,\n loaders=loaders,\n num_epochs=2,\n callbacks=[\n dl.AccuracyCallback(input_key=\"logits\", target_key=\"targets\"),\n DvcLiveCallback(\"model.pth\"),\n ],\n logdir=\"./logs\",\n valid_loader=\"valid\",\n valid_metric=\"loss\",\n minimize_valid_metric=True,\n verbose=True,\n load_best_on_end=True,\n )\n assert (tmp_dir / \"model.pth\").is_file()\n"
] | [
[
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.nn.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lynshao/NoisyNN | [
"2c827dbe697f4a8d8f9b2cb8abb2aa43a749fa16"
] | [
"TrainingNoise_CIFAR10/Update.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\n\n\nclass DatasetSplit(Dataset):\n def __init__(self, dataset, idxs):\n self.dataset = dataset\n self.idxs = list(idxs)\n\n def __len__(self):\n return len(self.idxs)\n\n def __getitem__(self, item):\n image, label = self.dataset[self.idxs[item]]\n return image, label\n\n\nclass LocalUpdate(object):\n def __init__(self, args, dataset=None, idxs=None):\n self.args = args\n self.loss_func = nn.CrossEntropyLoss()\n self.selected_clients = []\n self.ldr_train = DataLoader(DatasetSplit(dataset, idxs), batch_size=self.args.local_bs, shuffle=True, num_workers=4)\n\n\n def train(self, net, history_dict, lrT):\n net.train()\n # train and update\n optimizer = torch.optim.SGD(net.parameters(), lr=lrT, momentum=0.9, weight_decay=5e-4)\n\n epoch_loss = []\n for iter in range(self.args.local_ep):\n batch_loss = []\n for batch_idx, (images, labels) in enumerate(self.ldr_train):\n images, labels = images.to(self.args.device), labels.to(self.args.device)\n net.zero_grad()\n log_probs = net(images)\n loss = self.loss_func(log_probs, labels)\n loss.backward()\n optimizer.step()\n batch_loss.append(loss.item())\n epoch_loss.append(sum(batch_loss)/len(batch_loss))\n \n current_dict = net.state_dict()\n\n for k in current_dict.keys():\n current_dict[k] -= history_dict[k]\n\n return current_dict, sum(epoch_loss) / len(epoch_loss)\n\n"
] | [
[
"torch.nn.CrossEntropyLoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
smolsbs/aoc | [
"558cc68b94ead332190e14ad7a9ecd6ca5c4aa5a"
] | [
"2020/day-11/day11.py"
] | [
"#!/usr/bin/env python3\nimport numpy as np\n\ndirs = [(0,1), (1,0), (0, -1), (-1, 0), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\ndef parse_seats(line):\n v = []\n for s in line:\n if s == 'L':\n v.append(0)\n else:\n v.append(-1)\n return v\n\ndef allpos(xx,yy):\n all_p = []\n for x in xx:\n for y in yy:\n all_p.append( (x,y) )\n return all_p\n\ndef part1(data):\n max_y = len(data)\n max_x = len(data[0])\n changed = 1\n new_d = np.copy(data)\n while changed:\n changed = 0\n for y in range(max_y):\n for x in range(max_x):\n if data[y,x] == -1:\n continue\n near = data[max(0,y-1):y+2, max(0, x-1):x+2]\n n_occup = np.count_nonzero(near == 1) - data[y,x]\n if data[y, x] == 0:\n if n_occup == 0:\n new_d[ y, x] = 1\n changed = 1\n elif data[y, x] == 1:\n if n_occup >= 4:\n new_d[y,x] = 0\n changed = 1\n data = np.copy(new_d)\n \n all_occ = np.count_nonzero(data == 1)\n return all_occ\n\n# SPAGHETT CODE\ndef part2(data):\n max_y = len(data)\n max_x = len(data[0])\n all_p = allpos(range(max_y), range(max_x))\n\n changed = 1\n new_d = np.copy(data)\n while changed:\n changed = 0\n for p in all_p:\n c_pos = p\n seats = 0\n for direct in dirs:\n new_p = (c_pos[0]+direct[0], c_pos[1]+direct[1])\n while (new_p[0] >= 0 and new_p[0] < max_y) and (new_p[1] >= 0 and new_p[1] < max_x):\n if data[new_p[0], new_p[1]] == 1:\n seats += 1\n break\n elif data[new_p[0], new_p[1]] == 0:\n break\n new_p = (new_p[0]+direct[0], new_p[1]+direct[1])\n if data[c_pos[0], c_pos[1]] == 0:\n if seats == 0:\n new_d[c_pos[0], c_pos[1]] = 1\n changed = 1\n elif data[c_pos[0], c_pos[1]] == 1:\n if seats >= 5:\n new_d[c_pos[0], c_pos[1]] = 0\n changed = 1\n data = np.copy(new_d)\n\n all_occ = np.count_nonzero(data == 1)\n return all_occ\n \n\ndef main():\n with open('input', 'r') as fp:\n data = np.array([parse_seats(x) for x in fp.read().split('\\n')])\n \n p1 = part1(data)\n print(\"part 1: {}\".format(p1))\n p2 = part2(data)\n print(\"part 2: {}\".format(p2))\n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.copy",
"numpy.count_nonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
m-philipps/pyPESTO | [
"4c30abfca56ba714c302141cd44a9dd366bff4bb"
] | [
"pypesto/objective/amici.py"
] | [
"import abc\nimport copy\nimport os\nimport tempfile\nfrom collections import OrderedDict\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport numpy as np\n\nfrom ..C import FVAL, MODE_FUN, MODE_RES, RDATAS\nfrom .amici_calculator import AmiciCalculator\nfrom .amici_util import (\n create_identity_parameter_mapping,\n map_par_opt_to_par_sim,\n)\nfrom .base import ObjectiveBase, ResultDict\n\ntry:\n import amici\n import amici.parameter_mapping\n import amici.petab_objective\n from amici.parameter_mapping import ParameterMapping\nexcept ImportError:\n pass\n\nAmiciModel = Union['amici.Model', 'amici.ModelPtr']\nAmiciSolver = Union['amici.Solver', 'amici.SolverPtr']\n\n\nclass AmiciObjectBuilder(abc.ABC):\n \"\"\"Allows to build AMICI model, solver, and edatas.\n\n This class is useful for pickling an :class:`pypesto.AmiciObjective`,\n which is required in some parallelization schemes. Therefore, this\n class itself must be picklable.\n \"\"\"\n\n @abc.abstractmethod\n def create_model(self) -> AmiciModel:\n \"\"\"Create an AMICI model.\"\"\"\n\n @abc.abstractmethod\n def create_solver(self, model: AmiciModel) -> AmiciSolver:\n \"\"\"Create an AMICI solver.\"\"\"\n\n @abc.abstractmethod\n def create_edatas(self, model: AmiciModel) -> Sequence['amici.ExpData']:\n \"\"\"Create AMICI experimental data.\"\"\"\n\n\nclass AmiciObjective(ObjectiveBase):\n \"\"\"Allows to create an objective directly from an amici model.\"\"\"\n\n def __init__(\n self,\n amici_model: AmiciModel,\n amici_solver: AmiciSolver,\n edatas: Union[Sequence['amici.ExpData'], 'amici.ExpData'],\n max_sensi_order: Optional[int] = None,\n x_ids: Optional[Sequence[str]] = None,\n x_names: Optional[Sequence[str]] = None,\n parameter_mapping: Optional['ParameterMapping'] = None,\n guess_steadystate: Optional[Optional[bool]] = None,\n n_threads: Optional[int] = 1,\n fim_for_hess: Optional[bool] = True,\n amici_object_builder: Optional[AmiciObjectBuilder] = None,\n calculator: Optional[AmiciCalculator] = None,\n amici_reporting: Optional['amici.RDataReporting'] = None,\n ):\n \"\"\"\n Initialize objective.\n\n Parameters\n ----------\n amici_model:\n The amici model.\n amici_solver:\n The solver to use for the numeric integration of the model.\n edatas:\n The experimental data. If a list is passed, its entries correspond\n to multiple experimental conditions.\n max_sensi_order:\n Maximum sensitivity order supported by the model. Defaults to 2 if\n the model was compiled with o2mode, otherwise 1.\n x_ids:\n Ids of optimization parameters. In the simplest case, this will be\n the AMICI model parameters (default).\n x_names:\n Names of optimization parameters.\n parameter_mapping:\n Mapping of optimization parameters to model parameters. Format\n as created by `amici.petab_objective.create_parameter_mapping`.\n The default is just to assume that optimization and simulation\n parameters coincide.\n guess_steadystate:\n Whether to guess steadystates based on previous steadystates and\n respective derivatives. This option may lead to unexpected\n results for models with conservation laws and should accordingly\n be deactivated for those models.\n n_threads:\n Number of threads that are used for parallelization over\n experimental conditions. If amici was not installed with openMP\n support this option will have no effect.\n fim_for_hess:\n Whether to use the FIM whenever the Hessian is requested. This only\n applies with forward sensitivities.\n With adjoint sensitivities, the true Hessian will be used,\n if available.\n FIM or Hessian will only be exposed if `max_sensi_order>1`.\n amici_object_builder:\n AMICI object builder. Allows recreating the objective for\n pickling, required in some parallelization schemes.\n calculator:\n Performs the actual calculation of the function values and\n derivatives.\n amici_reporting:\n Determines which quantities will be computed by AMICI,\n see ``amici.Solver.setReturnDataReportingMode``. Set to ``None``\n to compute only the minimum required information.\n \"\"\"\n if amici is None:\n raise ImportError(\n \"This objective requires an installation of amici \"\n \"(https://github.com/icb-dcm/amici). \"\n \"Install via `pip3 install amici`.\"\n )\n\n self.amici_model = amici_model.clone()\n self.amici_solver = amici_solver.clone()\n\n # make sure the edatas are a list of edata objects\n if isinstance(edatas, amici.amici.ExpData):\n edatas = [edatas]\n\n # set the experimental data container\n self.edatas = edatas\n\n # set the maximum sensitivity order\n self.max_sensi_order = max_sensi_order\n\n self.guess_steadystate = guess_steadystate\n\n # optimization parameter ids\n if x_ids is None:\n # use model parameter ids as ids\n x_ids = list(self.amici_model.getParameterIds())\n self.x_ids = x_ids\n\n # mapping of parameters\n if parameter_mapping is None:\n # use identity mapping for each condition\n parameter_mapping = create_identity_parameter_mapping(\n amici_model, len(edatas)\n )\n self.parameter_mapping = parameter_mapping\n\n # If supported, enable `guess_steadystate` by default. If not\n # supported, disable by default. If requested but unsupported, raise.\n if (\n self.guess_steadystate is not False\n and self.amici_model.nx_solver_reinit > 0\n ):\n if self.guess_steadystate:\n raise ValueError(\n 'Steadystate prediction is not supported '\n 'for models with conservation laws!'\n )\n self.guess_steadystate = False\n\n if (\n self.guess_steadystate is not False\n and self.amici_model.getSteadyStateSensitivityMode()\n == amici.SteadyStateSensitivityMode.integrationOnly\n ):\n if self.guess_steadystate:\n raise ValueError(\n 'Steadystate guesses cannot be enabled '\n 'when `integrationOnly` as '\n 'SteadyStateSensitivityMode!'\n )\n self.guess_steadystate = False\n\n if self.guess_steadystate is not False:\n self.guess_steadystate = True\n\n if self.guess_steadystate:\n # preallocate guesses, construct a dict for every edata for which\n # we need to do preequilibration\n self.steadystate_guesses = {\n 'fval': np.inf,\n 'data': {\n iexp: {}\n for iexp, edata in enumerate(self.edatas)\n if len(edata.fixedParametersPreequilibration)\n },\n }\n # optimization parameter names\n if x_names is None:\n # use ids as names\n x_names = x_ids\n\n self.n_threads = n_threads\n self.fim_for_hess = fim_for_hess\n self.amici_object_builder = amici_object_builder\n self.amici_reporting = amici_reporting\n\n if calculator is None:\n calculator = AmiciCalculator()\n self.calculator = calculator\n super().__init__(x_names=x_names)\n\n # Custom (condition-specific) timepoints. See the\n # `set_custom_timepoints` method for more information.\n self.custom_timepoints = None\n\n def get_config(self) -> dict:\n \"\"\"Return basic information of the objective configuration.\"\"\"\n info = super().get_config()\n info['x_names'] = self.x_names\n info['model_name'] = self.amici_model.getName()\n info['solver'] = str(type(self.amici_solver))\n info['sensi_order'] = self.max_sensi_order\n\n return info\n\n def initialize(self):\n \"\"\"See `ObjectiveBase` documentation.\"\"\"\n super().initialize()\n self.reset_steadystate_guesses()\n self.calculator.initialize()\n\n def __deepcopy__(self, memodict: Dict = None) -> 'AmiciObjective':\n other = self.__class__.__new__(self.__class__)\n\n for key in set(self.__dict__.keys()) - {\n 'amici_model',\n 'amici_solver',\n 'edatas',\n }:\n other.__dict__[key] = copy.deepcopy(self.__dict__[key])\n\n # copy objects that do not have __deepcopy__\n other.amici_model = self.amici_model.clone()\n other.amici_solver = self.amici_solver.clone()\n other.edatas = [amici.ExpData(data) for data in self.edatas]\n\n return other\n\n def __getstate__(self) -> Dict:\n if self.amici_object_builder is None:\n raise NotImplementedError(\n \"AmiciObjective does not support __getstate__ without \"\n \"an `amici_object_builder`.\"\n )\n\n state = {}\n for key in set(self.__dict__.keys()) - {\n 'amici_model',\n 'amici_solver',\n 'edatas',\n }:\n state[key] = self.__dict__[key]\n\n _fd, _file = tempfile.mkstemp()\n try:\n # write amici solver settings to file\n try:\n amici.writeSolverSettingsToHDF5(self.amici_solver, _file)\n except AttributeError as e:\n e.args += (\n \"Pickling the AmiciObjective requires an AMICI \"\n \"installation with HDF5 support.\",\n )\n raise\n # read in byte stream\n with open(_fd, 'rb', closefd=False) as f:\n state['amici_solver_settings'] = f.read()\n finally:\n # close file descriptor and remove temporary file\n os.close(_fd)\n os.remove(_file)\n\n state['AMICI_model_settings'] = amici.get_model_settings(\n self.amici_model\n )\n\n return state\n\n def __setstate__(self, state: Dict) -> None:\n if state['amici_object_builder'] is None:\n raise NotImplementedError(\n \"AmiciObjective does not support __setstate__ without \"\n \"an `amici_object_builder`.\"\n )\n self.__dict__.update(state)\n\n # note: attributes not defined in the builder are lost\n model = self.amici_object_builder.create_model()\n solver = self.amici_object_builder.create_solver(model)\n edatas = self.amici_object_builder.create_edatas(model)\n\n _fd, _file = tempfile.mkstemp()\n try:\n # write solver settings to temporary file\n with open(_fd, 'wb', closefd=False) as f:\n f.write(state['amici_solver_settings'])\n # read in solver settings\n try:\n amici.readSolverSettingsFromHDF5(_file, solver)\n except AttributeError as err:\n if not err.args:\n err.args = ('',)\n err.args += (\n \"Unpickling an AmiciObjective requires an AMICI \"\n \"installation with HDF5 support.\",\n )\n raise\n finally:\n # close file descriptor and remove temporary file\n os.close(_fd)\n os.remove(_file)\n\n self.amici_model = model\n self.amici_solver = solver\n self.edatas = edatas\n\n self.apply_custom_timepoints()\n amici.set_model_settings(\n self.amici_model,\n state['AMICI_model_settings'],\n )\n\n def check_sensi_orders(\n self,\n sensi_orders: Tuple[int, ...],\n mode: str,\n ) -> bool:\n \"\"\"See `ObjectiveBase` documentation.\"\"\"\n if not sensi_orders:\n return True\n sensi_order = max(sensi_orders)\n\n # dynamically obtain maximum allowed sensitivity order\n max_sensi_order = self.max_sensi_order\n if max_sensi_order is None:\n max_sensi_order = 1\n # check whether it is ok to request 2nd order\n sensi_mthd = self.amici_solver.getSensitivityMethod()\n mthd_fwd = amici.SensitivityMethod_forward\n if mode == MODE_FUN and (\n self.amici_model.o2mode\n or (sensi_mthd == mthd_fwd and self.fim_for_hess)\n ):\n max_sensi_order = 2\n\n # evaluate sensitivity order\n return sensi_order <= max_sensi_order\n\n def check_mode(self, mode: str) -> bool:\n \"\"\"See `ObjectiveBase` documentation.\"\"\"\n return mode in [MODE_FUN, MODE_RES]\n\n def __call__(\n self,\n x: np.ndarray,\n sensi_orders: Tuple[int, ...] = (0,),\n mode: str = MODE_FUN,\n return_dict: bool = False,\n **kwargs,\n ) -> Union[float, np.ndarray, Tuple, ResultDict]:\n \"\"\"See `ObjectiveBase` documentation.\"\"\"\n # Use AMICI full reporting if amici.ReturnDatas are returned and no\n # other reporting mode was set\n if (\n return_dict\n and self.amici_reporting is None\n and 'amici_reporting' not in kwargs\n ):\n kwargs['amici_reporting'] = amici.RDataReporting.full\n\n return super().__call__(x, sensi_orders, mode, return_dict, **kwargs)\n\n def call_unprocessed(\n self,\n x: np.ndarray,\n sensi_orders: Tuple[int, ...],\n mode: str,\n edatas: Sequence['amici.ExpData'] = None,\n parameter_mapping: 'ParameterMapping' = None,\n amici_reporting: Optional['amici.RDataReporting'] = None,\n ):\n \"\"\"\n Call objective function without pre- or post-processing and formatting.\n\n Returns\n -------\n result:\n A dict containing the results.\n \"\"\"\n x_dct = self.par_arr_to_dct(x)\n\n # only ask amici to compute required quantities\n amici_reporting = (\n self.amici_reporting\n if amici_reporting is None\n else amici_reporting\n )\n if amici_reporting is None:\n amici_reporting = (\n amici.RDataReporting.likelihood\n if mode == MODE_FUN\n else amici.RDataReporting.residuals\n )\n self.amici_solver.setReturnDataReportingMode(amici_reporting)\n\n # update steady state\n if (\n self.guess_steadystate\n and self.steadystate_guesses['fval'] < np.inf\n ):\n for data_ix in range(len(self.edatas)):\n self.apply_steadystate_guess(data_ix, x_dct)\n\n if edatas is None:\n edatas = self.edatas\n if parameter_mapping is None:\n parameter_mapping = self.parameter_mapping\n ret = self.calculator(\n x_dct=x_dct,\n sensi_orders=sensi_orders,\n mode=mode,\n amici_model=self.amici_model,\n amici_solver=self.amici_solver,\n edatas=edatas,\n n_threads=self.n_threads,\n x_ids=self.x_ids,\n parameter_mapping=parameter_mapping,\n fim_for_hess=self.fim_for_hess,\n )\n\n nllh = ret[FVAL]\n rdatas = ret[RDATAS]\n\n # check whether we should update data for preequilibration guesses\n if (\n self.guess_steadystate\n and nllh <= self.steadystate_guesses['fval']\n and nllh < np.inf\n ):\n self.steadystate_guesses['fval'] = nllh\n for data_ix, rdata in enumerate(rdatas):\n self.store_steadystate_guess(data_ix, x_dct, rdata)\n\n return ret\n\n def par_arr_to_dct(self, x: Sequence[float]) -> Dict[str, float]:\n \"\"\"Create dict from parameter vector.\"\"\"\n return OrderedDict(zip(self.x_ids, x))\n\n def apply_steadystate_guess(self, condition_ix: int, x_dct: Dict) -> None:\n \"\"\"\n Apply steady state guess to `edatas[condition_ix].x0`.\n\n Use the stored steadystate as well as the respective sensitivity (\n if available) and parameter value to approximate the steadystate at\n the current parameters using a zeroth or first order taylor\n approximation:\n x_ss(x') = x_ss(x) [+ dx_ss/dx(x)*(x'-x)]\n \"\"\"\n mapping = self.parameter_mapping[condition_ix].map_sim_var\n x_sim = map_par_opt_to_par_sim(mapping, x_dct, self.amici_model)\n x_ss_guess = [] # resets initial state by default\n if condition_ix in self.steadystate_guesses['data']:\n guess_data = self.steadystate_guesses['data'][condition_ix]\n if guess_data['x_ss'] is not None:\n x_ss_guess = guess_data['x_ss']\n if guess_data['sx_ss'] is not None:\n linear_update = (\n guess_data['sx_ss']\n .transpose()\n .dot(\n (x_sim - guess_data['x'])[\n np.asarray(self.edatas[condition_ix].plist)\n ]\n )\n )\n # limit linear updates to max 20 % elementwise change\n if (linear_update / (x_ss_guess + np.spacing(1))).max() < 0.2:\n x_ss_guess += linear_update\n\n self.edatas[condition_ix].x0 = tuple(x_ss_guess)\n\n def store_steadystate_guess(\n self,\n condition_ix: int,\n x_dct: Dict,\n rdata: 'amici.ReturnData',\n ) -> None:\n \"\"\"\n Store condition parameter, steadystate and steadystate sensitivity.\n\n Stored in steadystate_guesses if steadystate guesses are enabled for\n this condition.\n \"\"\"\n if condition_ix not in self.steadystate_guesses['data']:\n return\n preeq_guesses = self.steadystate_guesses['data'][condition_ix]\n\n # update parameter\n condition_map_sim_var = self.parameter_mapping[\n condition_ix\n ].map_sim_var\n x_sim = map_par_opt_to_par_sim(\n condition_map_sim_var, x_dct, self.amici_model\n )\n preeq_guesses['x'] = x_sim\n\n # update steadystates\n preeq_guesses['x_ss'] = rdata['x_ss']\n preeq_guesses['sx_ss'] = rdata['sx_ss']\n\n def reset_steadystate_guesses(self) -> None:\n \"\"\"Reset all steadystate guess data.\"\"\"\n if not self.guess_steadystate:\n return\n\n self.steadystate_guesses['fval'] = np.inf\n for condition in self.steadystate_guesses['data']:\n self.steadystate_guesses['data'][condition] = {}\n\n def apply_custom_timepoints(self) -> None:\n \"\"\"Apply custom timepoints, if applicable.\n\n See the `set_custom_timepoints` method for more information.\n \"\"\"\n if self.custom_timepoints is not None:\n for index in range(len(self.edatas)):\n self.edatas[index].setTimepoints(self.custom_timepoints[index])\n\n def set_custom_timepoints(\n self,\n timepoints: Sequence[Sequence[Union[float, int]]] = None,\n timepoints_global: Sequence[Union[float, int]] = None,\n ) -> 'AmiciObjective':\n \"\"\"\n Create a copy of this objective that is evaluated at custom timepoints.\n\n The intended use is to aid in predictions at unmeasured timepoints.\n\n Parameters\n ----------\n timepoints:\n The outer sequence should contain a sequence of timepoints for each\n experimental condition.\n timepoints_global:\n A sequence of timepoints that will be used for all experimental\n conditions.\n\n Returns\n -------\n The customized copy of this objective.\n \"\"\"\n if timepoints is None and timepoints_global is None:\n raise KeyError('Timepoints were not specified.')\n\n amici_objective = copy.deepcopy(self)\n\n if timepoints is not None:\n if len(timepoints) != len(amici_objective.edatas):\n raise ValueError(\n 'The number of condition-specific timepoints `timepoints` '\n 'does not match the number of experimental conditions.\\n'\n f'Number of provided timepoints: {len(timepoints)}. '\n 'Number of experimental conditions: '\n f'{len(amici_objective.edatas)}.'\n )\n custom_timepoints = timepoints\n else:\n custom_timepoints = [\n copy.deepcopy(timepoints_global)\n for _ in range(len(amici_objective.edatas))\n ]\n\n amici_objective.custom_timepoints = custom_timepoints\n amici_objective.apply_custom_timepoints()\n return amici_objective\n\n def check_gradients_match_finite_differences(\n self, x: np.ndarray = None, *args, **kwargs\n ) -> bool:\n \"\"\"Check if gradients match finite differences (FDs).\n\n Parameters\n ----------\n x: The parameters for which to evaluate the gradient.\n\n Returns\n -------\n bool\n Indicates whether gradients match (True) FDs or not (False)\n \"\"\"\n if x is None and 'petab_problem' in dir(self.amici_object_builder):\n x = self.amici_object_builder.petab_problem.x_nominal_scaled\n x_free = self.amici_object_builder.petab_problem.x_free_indices\n return super().check_gradients_match_finite_differences(\n x=x, x_free=x_free, *args, **kwargs\n )\n"
] | [
[
"numpy.asarray",
"numpy.spacing"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gottaegbert/penter | [
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d",
"8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d"
] | [
"matplotlib/gallery_python/event_handling/path_editor.py",
"matplotlib/gallery_python/text_labels_and_annotations/fancytextbox_demo.py",
"imagelib/object_detection_RetinaNet_Keras.py",
"matplotlib/gallery_python/statistics/bxp.py",
"matplotlib/gallery_python/mplot3d/wire3d_animation_sgskip.py",
"matplotlib/gallery_python/color/color_by_yvalue.py",
"matplotlib/gallery_python/lines_bars_and_markers/linestyles.py",
"matplotlib/gallery_python/statistics/boxplot_color.py",
"matplotlib/gallery_python/mplot3d/scatter3d.py",
"tensorflow_v2/dragen1860/ch10/cifar100_train.py",
"matplotlib/gallery_python/axisartist/demo_ticklabel_direction.py",
"matplotlib/gallery_python/shapes_and_collections/patch_collection.py"
] | [
"\"\"\"\n===========\nPath Editor\n===========\n\nSharing events across GUIs.\n\nThis example demonstrates a cross-GUI application using Matplotlib event\nhandling to interact with and modify objects on the canvas.\n\"\"\"\nimport numpy as np\nimport matplotlib.path as mpath\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\nPath = mpath.Path\n\nfig, ax = plt.subplots()\n\npathdata = [\n (Path.MOVETO, (1.58, -2.57)),\n (Path.CURVE4, (0.35, -1.1)),\n (Path.CURVE4, (-1.75, 2.0)),\n (Path.CURVE4, (0.375, 2.0)),\n (Path.LINETO, (0.85, 1.15)),\n (Path.CURVE4, (2.2, 3.2)),\n (Path.CURVE4, (3, 0.05)),\n (Path.CURVE4, (2.0, -0.5)),\n (Path.CLOSEPOLY, (1.58, -2.57)),\n ]\n\ncodes, verts = zip(*pathdata)\npath = mpath.Path(verts, codes)\npatch = mpatches.PathPatch(path, facecolor='green', edgecolor='yellow', alpha=0.5)\nax.add_patch(patch)\n\n\nclass PathInteractor:\n \"\"\"\n An path editor.\n\n Key-bindings\n\n 't' toggle vertex markers on and off. When vertex markers are on,\n you can move them, delete them\n\n\n \"\"\"\n\n showverts = True\n epsilon = 5 # max pixel distance to count as a vertex hit\n\n def __init__(self, pathpatch):\n\n self.ax = pathpatch.axes\n canvas = self.ax.figure.canvas\n self.pathpatch = pathpatch\n self.pathpatch.set_animated(True)\n\n x, y = zip(*self.pathpatch.get_path().vertices)\n\n self.line, = ax.plot(x, y, marker='o', markerfacecolor='r', animated=True)\n\n self._ind = None # the active vert\n\n canvas.mpl_connect('draw_event', self.draw_callback)\n canvas.mpl_connect('button_press_event', self.button_press_callback)\n canvas.mpl_connect('key_press_event', self.key_press_callback)\n canvas.mpl_connect('button_release_event', self.button_release_callback)\n canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)\n self.canvas = canvas\n\n def draw_callback(self, event):\n self.background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self.pathpatch)\n self.ax.draw_artist(self.line)\n self.canvas.blit(self.ax.bbox)\n\n def pathpatch_changed(self, pathpatch):\n \"\"\"This method is called whenever the pathpatch object is called.\"\"\"\n # only copy the artist props to the line (except visibility)\n vis = self.line.get_visible()\n plt.Artist.update_from(self.line, pathpatch)\n self.line.set_visible(vis) # don't use the pathpatch visibility state\n\n def get_ind_under_point(self, event):\n \"\"\"\n Return the index of the point closest to the event position or *None*\n if no point is within ``self.epsilon`` to the event position.\n \"\"\"\n # display coords\n xy = np.asarray(self.pathpatch.get_path().vertices)\n xyt = self.pathpatch.get_transform().transform(xy)\n xt, yt = xyt[:, 0], xyt[:, 1]\n d = np.sqrt((xt - event.x)**2 + (yt - event.y)**2)\n ind = d.argmin()\n\n if d[ind] >= self.epsilon:\n ind = None\n\n return ind\n\n def button_press_callback(self, event):\n \"\"\"Callback for mouse button presses.\"\"\"\n if not self.showverts:\n return\n if event.inaxes is None:\n return\n if event.button != 1:\n return\n self._ind = self.get_ind_under_point(event)\n\n def button_release_callback(self, event):\n \"\"\"Callback for mouse button releases.\"\"\"\n if not self.showverts:\n return\n if event.button != 1:\n return\n self._ind = None\n\n def key_press_callback(self, event):\n \"\"\"Callback for key presses.\"\"\"\n if not event.inaxes:\n return\n if event.key == 't':\n self.showverts = not self.showverts\n self.line.set_visible(self.showverts)\n if not self.showverts:\n self._ind = None\n\n self.canvas.draw()\n\n def motion_notify_callback(self, event):\n \"\"\"Callback for mouse movements.\"\"\"\n if not self.showverts:\n return\n if self._ind is None:\n return\n if event.inaxes is None:\n return\n if event.button != 1:\n return\n x, y = event.xdata, event.ydata\n\n vertices = self.pathpatch.get_path().vertices\n\n vertices[self._ind] = x, y\n self.line.set_data(zip(*vertices))\n\n self.canvas.restore_region(self.background)\n self.ax.draw_artist(self.pathpatch)\n self.ax.draw_artist(self.line)\n self.canvas.blit(self.ax.bbox)\n\n\ninteractor = PathInteractor(patch)\nax.set_title('drag vertices to update path')\nax.set_xlim(-3, 4)\nax.set_ylim(-3, 4)\n\nplt.show()\n",
"\"\"\"\n=================\nFancytextbox Demo\n=================\n\n\"\"\"\nimport matplotlib.pyplot as plt\n\nplt.text(0.6, 0.7, \"eggs\", size=50, rotation=30.,\n ha=\"center\", va=\"center\",\n bbox=dict(boxstyle=\"round\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n\nplt.text(0.55, 0.6, \"spam\", size=50, rotation=-25.,\n ha=\"right\", va=\"top\",\n bbox=dict(boxstyle=\"square\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n\nplt.show()\n",
"# Keras RetinaNet 目标检测项目实例\n# https://github.com/fizyr/keras-retinanet\n\n# tensorflow https://github.com/fizyr/keras-retinanet/blob/master/examples/ResNet50RetinaNet.py\n\nfrom keras_retinanet import models\nfrom keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\nfrom keras_retinanet.utils.visualization import draw_box, draw_caption\nfrom keras_retinanet.utils.colors import label_color\n\n# import miscellaneous modules\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport time\n\n# models can be downloaded here: https://github.com/fizyr/keras-retinanet/releases\nmodel_path = 'resnet50_coco_best_v2.1.0.h5'\n\n# load retinanet model\nmodel = models.load_model(model_path, backbone_name='resnet50')\n\n# if the model is not converted to an inference model, use the line below\n# see: https://github.com/fizyr/keras-retinanet#converting-a-training-model-to-inference-model\n# model = models.convert_model(model)\n\n# print(model.summary())\n\n# load label to names mapping for visualization purposes\nlabels_to_names = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train',\n 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter',\n 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant',\n 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie',\n 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite',\n 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket',\n 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl',\n 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog',\n 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed',\n 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard',\n 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator',\n 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier',\n 79: 'toothbrush'}\n\n# load image\nimage = read_image_bgr('E:/bird.jpg')\n\n# copy to draw on\ndraw = image.copy()\ndraw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)\n\n# preprocess image for network\nimage = preprocess_image(image)\nimage, scale = resize_image(image)\n\n# process image\nstart = time.time()\nboxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))\nprint(\"processing time: \", time.time() - start)\n\n# correct for image scale\nboxes /= scale\n\n# visualize detections\nfor box, score, label in zip(boxes[0], scores[0], labels[0]):\n # scores are sorted so we can break\n if score < 0.5:\n break\n\n color = label_color(label)\n\n b = box.astype(int)\n draw_box(draw, b, color=color)\n\n caption = \"{} {:.3f}\".format(labels_to_names[label], score)\n draw_caption(draw, b, caption)\n\nplt.figure(figsize=(15, 15))\nplt.axis('off')\nplt.imshow(draw)\nplt.show()",
"\"\"\"\n=======================\nBoxplot drawer function\n=======================\n\nThis example demonstrates how to pass pre-computed box plot\nstatistics to the box plot drawer. The first figure demonstrates\nhow to remove and add individual components (note that the\nmean is the only value not shown by default). The second\nfigure demonstrates how the styles of the artists can\nbe customized.\n\nA good general reference on boxplots and their history can be found\nhere: http://vita.had.co.nz/papers/boxplots.pdf\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\n\n# fake data\nnp.random.seed(19680801)\ndata = np.random.lognormal(size=(37, 4), mean=1.5, sigma=1.75)\nlabels = list('ABCD')\n\n# compute the boxplot stats\nstats = cbook.boxplot_stats(data, labels=labels, bootstrap=10000)\n\n###############################################################################\n# After we've computed the stats, we can go through and change anything.\n# Just to prove it, I'll set the median of each set to the median of all\n# the data, and double the means\n\nfor n in range(len(stats)):\n stats[n]['med'] = np.median(data)\n stats[n]['mean'] *= 2\n\nprint(list(stats[0]))\n\nfs = 10 # fontsize\n\n###############################################################################\n# Demonstrate how to toggle the display of different elements:\n\nfig, axs = plt.subplots(nrows=2, ncols=3, figsize=(6, 6), sharey=True)\naxs[0, 0].bxp(stats)\naxs[0, 0].set_title('Default', fontsize=fs)\n\naxs[0, 1].bxp(stats, showmeans=True)\naxs[0, 1].set_title('showmeans=True', fontsize=fs)\n\naxs[0, 2].bxp(stats, showmeans=True, meanline=True)\naxs[0, 2].set_title('showmeans=True,\\nmeanline=True', fontsize=fs)\n\naxs[1, 0].bxp(stats, showbox=False, showcaps=False)\ntufte_title = 'Tufte Style\\n(showbox=False,\\nshowcaps=False)'\naxs[1, 0].set_title(tufte_title, fontsize=fs)\n\naxs[1, 1].bxp(stats, shownotches=True)\naxs[1, 1].set_title('notch=True', fontsize=fs)\n\naxs[1, 2].bxp(stats, showfliers=False)\naxs[1, 2].set_title('showfliers=False', fontsize=fs)\n\nfor ax in axs.flat:\n ax.set_yscale('log')\n ax.set_yticklabels([])\n\nfig.subplots_adjust(hspace=0.4)\nplt.show()\n\n###############################################################################\n# Demonstrate how to customize the display different elements:\n\nboxprops = dict(linestyle='--', linewidth=3, color='darkgoldenrod')\nflierprops = dict(marker='o', markerfacecolor='green', markersize=12,\n linestyle='none')\nmedianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick')\nmeanpointprops = dict(marker='D', markeredgecolor='black',\n markerfacecolor='firebrick')\nmeanlineprops = dict(linestyle='--', linewidth=2.5, color='purple')\n\nfig, axs = plt.subplots(nrows=2, ncols=2, figsize=(6, 6), sharey=True)\naxs[0, 0].bxp(stats, boxprops=boxprops)\naxs[0, 0].set_title('Custom boxprops', fontsize=fs)\n\naxs[0, 1].bxp(stats, flierprops=flierprops, medianprops=medianprops)\naxs[0, 1].set_title('Custom medianprops\\nand flierprops', fontsize=fs)\n\naxs[1, 0].bxp(stats, meanprops=meanpointprops, meanline=False,\n showmeans=True)\naxs[1, 0].set_title('Custom mean\\nas point', fontsize=fs)\n\naxs[1, 1].bxp(stats, meanprops=meanlineprops, meanline=True,\n showmeans=True)\naxs[1, 1].set_title('Custom mean\\nas line', fontsize=fs)\n\nfor ax in axs.flat:\n ax.set_yscale('log')\n ax.set_yticklabels([])\n\nfig.suptitle(\"I never said they'd be pretty\")\nfig.subplots_adjust(hspace=0.4)\nplt.show()\n",
"\"\"\"\n==========================\nRotating 3D wireframe plot\n==========================\n\nA very simple 'animation' of a 3D plot. See also rotate_axes3d_demo.\n\n(This example is skipped when building the documentation gallery because it\nintentionally takes a long time to run)\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\n\ndef generate(X, Y, phi):\n '''\n Generates Z data for the points in the X, Y meshgrid and parameter phi.\n '''\n R = 1 - np.sqrt(X**2 + Y**2)\n return np.cos(2 * np.pi * X + phi) * R\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\n# Make the X, Y meshgrid.\nxs = np.linspace(-1, 1, 50)\nys = np.linspace(-1, 1, 50)\nX, Y = np.meshgrid(xs, ys)\n\n# Set the z axis limits so they aren't recalculated each frame.\nax.set_zlim(-1, 1)\n\n# Begin plotting.\nwframe = None\ntstart = time.time()\nfor phi in np.linspace(0, 180. / np.pi, 100):\n # If a line collection is already remove it before drawing.\n if wframe:\n ax.collections.remove(wframe)\n\n # Plot the new wireframe and pause briefly before continuing.\n Z = generate(X, Y, phi)\n wframe = ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)\n plt.pause(.001)\n\nprint('Average FPS: %f' % (100 / (time.time() - tstart)))\n",
"\"\"\"\n================\nColor by y-value\n================\n\nUse masked arrays to plot a line with different colors by y-value.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nt = np.arange(0.0, 2.0, 0.01)\ns = np.sin(2 * np.pi * t)\n\nupper = 0.77\nlower = -0.77\n\nsupper = np.ma.masked_where(s < upper, s)\nslower = np.ma.masked_where(s > lower, s)\nsmiddle = np.ma.masked_where((s < lower) | (s > upper), s)\n\nfig, ax = plt.subplots()\nax.plot(t, smiddle, t, slower, t, supper)\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.plot\nmatplotlib.pyplot.plot\n",
"\"\"\"\n==========\nLinestyles\n==========\n\nSimple linestyles can be defined using the strings \"solid\", \"dotted\", \"dashed\"\nor \"dashdot\". More refined control can be achieved by providing a dash tuple\n``(offset, (on_off_seq))``. For example, ``(0, (3, 10, 1, 15))`` means\n(3pt line, 10pt space, 1pt line, 15pt space) with no offset. See also\n`.Line2D.set_linestyle`.\n\n*Note*: The dash style can also be configured via `.Line2D.set_dashes`\nas shown in :doc:`/gallery/lines_bars_and_markers/line_demo_dash_control`\nand passing a list of dash sequences using the keyword *dashes* to the\ncycler in :doc:`property_cycle </tutorials/intermediate/color_cycle>`.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlinestyle_str = [\n ('solid', 'solid'), # Same as (0, ()) or '-'\n ('dotted', 'dotted'), # Same as (0, (1, 1)) or '.'\n ('dashed', 'dashed'), # Same as '--'\n ('dashdot', 'dashdot')] # Same as '-.'\n\nlinestyle_tuple = [\n ('loosely dotted', (0, (1, 10))),\n ('dotted', (0, (1, 1))),\n ('densely dotted', (0, (1, 1))),\n\n ('loosely dashed', (0, (5, 10))),\n ('dashed', (0, (5, 5))),\n ('densely dashed', (0, (5, 1))),\n\n ('loosely dashdotted', (0, (3, 10, 1, 10))),\n ('dashdotted', (0, (3, 5, 1, 5))),\n ('densely dashdotted', (0, (3, 1, 1, 1))),\n\n ('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),\n ('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),\n ('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]\n\n\ndef plot_linestyles(ax, linestyles, title):\n X, Y = np.linspace(0, 100, 10), np.zeros(10)\n yticklabels = []\n\n for i, (name, linestyle) in enumerate(linestyles):\n ax.plot(X, Y+i, linestyle=linestyle, linewidth=1.5, color='black')\n yticklabels.append(name)\n\n ax.set_title(title)\n ax.set(ylim=(-0.5, len(linestyles)-0.5),\n yticks=np.arange(len(linestyles)),\n yticklabels=yticklabels)\n ax.tick_params(left=False, bottom=False, labelbottom=False)\n for spine in ax.spines.values():\n spine.set_visible(False)\n\n # For each line style, add a text annotation with a small offset from\n # the reference point (0 in Axes coords, y tick value in Data coords).\n for i, (name, linestyle) in enumerate(linestyles):\n ax.annotate(repr(linestyle),\n xy=(0.0, i), xycoords=ax.get_yaxis_transform(),\n xytext=(-6, -12), textcoords='offset points',\n color=\"blue\", fontsize=8, ha=\"right\", family=\"monospace\")\n\n\nfig, (ax0, ax1) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [1, 3]},\n figsize=(10, 8))\n\nplot_linestyles(ax0, linestyle_str[::-1], title='Named linestyles')\nplot_linestyles(ax1, linestyle_tuple[::-1], title='Parametrized linestyles')\n\nplt.tight_layout()\nplt.show()\n",
"\"\"\"\n=================================\nBox plots with custom fill colors\n=================================\n\nThis plot illustrates how to create two types of box plots\n(rectangular and notched), and how to fill them with custom\ncolors by accessing the properties of the artists of the\nbox plots. Additionally, the ``labels`` parameter is used to\nprovide x-tick labels for each sample.\n\nA good general reference on boxplots and their history can be found\nhere: http://vita.had.co.nz/papers/boxplots.pdf\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Random test data\nnp.random.seed(19680801)\nall_data = [np.random.normal(0, std, size=100) for std in range(1, 4)]\nlabels = ['x1', 'x2', 'x3']\n\nfig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(9, 4))\n\n# rectangular box plot\nbplot1 = ax1.boxplot(all_data,\n vert=True, # vertical box alignment\n patch_artist=True, # fill with color\n labels=labels) # will be used to label x-ticks\nax1.set_title('Rectangular box plot')\n\n# notch shape box plot\nbplot2 = ax2.boxplot(all_data,\n notch=True, # notch shape\n vert=True, # vertical box alignment\n patch_artist=True, # fill with color\n labels=labels) # will be used to label x-ticks\nax2.set_title('Notched box plot')\n\n# fill with colors\ncolors = ['pink', 'lightblue', 'lightgreen']\nfor bplot in (bplot1, bplot2):\n for patch, color in zip(bplot['boxes'], colors):\n patch.set_facecolor(color)\n\n# adding horizontal grid lines\nfor ax in [ax1, ax2]:\n ax.yaxis.grid(True)\n ax.set_xlabel('Three separate samples')\n ax.set_ylabel('Observed values')\n\nplt.show()\n",
"'''\n==============\n3D scatterplot\n==============\n\nDemonstration of a basic scatterplot in 3D.\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\n\ndef randrange(n, vmin, vmax):\n '''\n Helper function to make an array of random numbers having shape (n, )\n with each number distributed Uniform(vmin, vmax).\n '''\n return (vmax - vmin)*np.random.rand(n) + vmin\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nn = 100\n\n# For each set of style and range settings, plot n random points in the box\n# defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh].\nfor m, zlow, zhigh in [('o', -50, -25), ('^', -30, -5)]:\n xs = randrange(n, 23, 32)\n ys = randrange(n, 0, 100)\n zs = randrange(n, zlow, zhigh)\n ax.scatter(xs, ys, zs, marker=m)\n\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\n\nplt.show()\n",
"import os\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers, optimizers, datasets, Sequential\n\ntf.random.set_seed(2345)\n\nconv_layers = [ # 5 units of conv + max pooling\n # unit 1\n layers.Conv2D(64, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.Conv2D(64, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.MaxPool2D(pool_size=[2, 2], strides=2, padding=\"same\"),\n # unit 2\n layers.Conv2D(128, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.Conv2D(128, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.MaxPool2D(pool_size=[2, 2], strides=2, padding=\"same\"),\n # unit 3\n layers.Conv2D(256, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.Conv2D(256, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.MaxPool2D(pool_size=[2, 2], strides=2, padding=\"same\"),\n # unit 4\n layers.Conv2D(512, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.Conv2D(512, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.MaxPool2D(pool_size=[2, 2], strides=2, padding=\"same\"),\n # unit 5\n layers.Conv2D(512, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.Conv2D(512, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu),\n layers.MaxPool2D(pool_size=[2, 2], strides=2, padding=\"same\"),\n]\n\n\ndef preprocess(x, y):\n # [0~1]\n x = tf.cast(x, dtype=tf.float32) / 255.0\n y = tf.cast(y, dtype=tf.int32)\n return x, y\n\n\n(x, y), (x_test, y_test) = datasets.cifar100.load_data()\ny = tf.squeeze(y, axis=1)\ny_test = tf.squeeze(y_test, axis=1)\nprint(x.shape, y.shape, x_test.shape, y_test.shape)\n\ntrain_db = tf.data.Dataset.from_tensor_slices((x, y))\ntrain_db = train_db.shuffle(1000).map(preprocess).batch(128)\n\ntest_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))\ntest_db = test_db.map(preprocess).batch(64)\n\nsample = next(iter(train_db))\nprint(\n \"sample:\",\n sample[0].shape,\n sample[1].shape,\n tf.reduce_min(sample[0]),\n tf.reduce_max(sample[0]),\n)\n\n\ndef main():\n # [b, 32, 32, 3] => [b, 1, 1, 512]\n conv_net = Sequential(conv_layers)\n\n fc_net = Sequential(\n [\n layers.Dense(256, activation=tf.nn.relu),\n layers.Dense(128, activation=tf.nn.relu),\n layers.Dense(100, activation=None),\n ]\n )\n\n conv_net.build(input_shape=[None, 32, 32, 3])\n fc_net.build(input_shape=[None, 512])\n conv_net.summary()\n fc_net.summary()\n optimizer = optimizers.Adam(lr=1e-4)\n\n # [1, 2] + [3, 4] => [1, 2, 3, 4]\n variables = conv_net.trainable_variables + fc_net.trainable_variables\n\n for epoch in range(50):\n\n for step, (x, y) in enumerate(train_db):\n\n with tf.GradientTape() as tape:\n # [b, 32, 32, 3] => [b, 1, 1, 512]\n out = conv_net(x)\n # flatten, => [b, 512]\n out = tf.reshape(out, [-1, 512])\n # [b, 512] => [b, 100]\n logits = fc_net(out)\n # [b] => [b, 100]\n y_onehot = tf.one_hot(y, depth=100)\n # compute loss\n loss = tf.losses.categorical_crossentropy(\n y_onehot, logits, from_logits=True\n )\n loss = tf.reduce_mean(loss)\n\n grads = tape.gradient(loss, variables)\n optimizer.apply_gradients(zip(grads, variables))\n\n if step % 100 == 0:\n print(epoch, step, \"loss:\", float(loss))\n\n total_num = 0\n total_correct = 0\n for x, y in test_db:\n out = conv_net(x)\n out = tf.reshape(out, [-1, 512])\n logits = fc_net(out)\n prob = tf.nn.softmax(logits, axis=1)\n pred = tf.argmax(prob, axis=1)\n pred = tf.cast(pred, dtype=tf.int32)\n\n correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)\n correct = tf.reduce_sum(correct)\n\n total_num += x.shape[0]\n total_correct += int(correct)\n\n acc = total_correct / total_num\n print(epoch, \"acc:\", acc)\n\n\nif __name__ == \"__main__\":\n main()\n",
"\"\"\"\n========================\nDemo Ticklabel Direction\n========================\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.axisartist.axislines as axislines\n\n\ndef setup_axes(fig, rect):\n ax = axislines.Subplot(fig, rect)\n fig.add_subplot(ax)\n\n ax.set_yticks([0.2, 0.8])\n ax.set_xticks([0.2, 0.8])\n\n return ax\n\n\nfig = plt.figure(figsize=(6, 3))\nfig.subplots_adjust(bottom=0.2)\n\nax = setup_axes(fig, 131)\nfor axis in ax.axis.values():\n axis.major_ticks.set_tick_out(True)\n# or you can simply do \"ax.axis[:].major_ticks.set_tick_out(True)\"\n\nax = setup_axes(fig, 132)\nax.axis[\"left\"].set_axis_direction(\"right\")\nax.axis[\"bottom\"].set_axis_direction(\"top\")\nax.axis[\"right\"].set_axis_direction(\"left\")\nax.axis[\"top\"].set_axis_direction(\"bottom\")\n\nax = setup_axes(fig, 133)\nax.axis[\"left\"].set_axis_direction(\"right\")\nax.axis[:].major_ticks.set_tick_out(True)\n\nax.axis[\"left\"].label.set_text(\"Long Label Left\")\nax.axis[\"bottom\"].label.set_text(\"Label Bottom\")\nax.axis[\"right\"].label.set_text(\"Long Label Right\")\nax.axis[\"right\"].label.set_visible(True)\nax.axis[\"left\"].label.set_pad(0)\nax.axis[\"bottom\"].label.set_pad(10)\n\nplt.show()\n",
"\"\"\"\n============================\nCircles, Wedges and Polygons\n============================\n\nThis example demonstrates how to use\n:class:`patch collections <.collections.PatchCollection>`.\n\"\"\"\n\nimport numpy as np\nfrom matplotlib.patches import Circle, Wedge, Polygon\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.pyplot as plt\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\n\nfig, ax = plt.subplots()\n\nresolution = 50 # the number of vertices\nN = 3\nx = np.random.rand(N)\ny = np.random.rand(N)\nradii = 0.1*np.random.rand(N)\npatches = []\nfor x1, y1, r in zip(x, y, radii):\n circle = Circle((x1, y1), r)\n patches.append(circle)\n\nx = np.random.rand(N)\ny = np.random.rand(N)\nradii = 0.1*np.random.rand(N)\ntheta1 = 360.0*np.random.rand(N)\ntheta2 = 360.0*np.random.rand(N)\nfor x1, y1, r, t1, t2 in zip(x, y, radii, theta1, theta2):\n wedge = Wedge((x1, y1), r, t1, t2)\n patches.append(wedge)\n\n# Some limiting conditions on Wedge\npatches += [\n Wedge((.3, .7), .1, 0, 360), # Full circle\n Wedge((.7, .8), .2, 0, 360, width=0.05), # Full ring\n Wedge((.8, .3), .2, 0, 45), # Full sector\n Wedge((.8, .3), .2, 45, 90, width=0.10), # Ring sector\n]\n\nfor i in range(N):\n polygon = Polygon(np.random.rand(N, 2), True)\n patches.append(polygon)\n\ncolors = 100*np.random.rand(len(patches))\np = PatchCollection(patches, alpha=0.4)\np.set_array(np.array(colors))\nax.add_collection(p)\nfig.colorbar(p, ax=ax)\n\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n\nimport matplotlib\nmatplotlib.patches\nmatplotlib.patches.Circle\nmatplotlib.patches.Wedge\nmatplotlib.patches.Polygon\nmatplotlib.collections.PatchCollection\nmatplotlib.collections.Collection.set_array\nmatplotlib.axes.Axes.add_collection\nmatplotlib.figure.Figure.colorbar\n"
] | [
[
"numpy.sqrt",
"matplotlib.path.Path",
"matplotlib.pyplot.Artist.update_from",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.patches.PathPatch"
],
[
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.imshow",
"numpy.expand_dims",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.random.lognormal",
"matplotlib.cbook.boxplot_stats",
"numpy.random.seed",
"numpy.median",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.cos",
"numpy.meshgrid",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
],
[
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.sin",
"numpy.ma.masked_where",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.zeros"
],
[
"numpy.random.normal",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.random.seed"
],
[
"matplotlib.pyplot.show",
"numpy.random.rand",
"numpy.random.seed",
"matplotlib.pyplot.figure"
],
[
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.keras.Sequential",
"tensorflow.equal",
"tensorflow.random.set_seed",
"tensorflow.keras.layers.Conv2D",
"tensorflow.squeeze",
"tensorflow.losses.categorical_crossentropy",
"tensorflow.argmax",
"tensorflow.keras.datasets.cifar100.load_data",
"tensorflow.keras.layers.Dense",
"tensorflow.one_hot",
"tensorflow.GradientTape",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.reshape",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.reduce_min",
"tensorflow.keras.optimizers.Adam"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.collections.PatchCollection",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"matplotlib.patches.Circle",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.patches.Wedge"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UoS-SNe/pycoco | [
"bbcb09b6c8fde7e0c4464bfbd574a42e09dbfed2"
] | [
"pycocosn/classes.py"
] | [
"\"\"\"\nWorkhorse classes for interacting/running the CoCo templates.\n\nauthor: Rob Firth; github.com/RobFirth ; University of Southampton SN Group\n 2017\n\"\"\"\n\nfrom __future__ import print_function ## Force python3-like printing\n\nimport os\nimport re\nimport warnings\nfrom collections import OrderedDict\n\nimport astropy.units as u\nimport numpy as np\nfrom astropy.constants import c\nfrom astropy.coordinates import SkyCoord, Distance\nfrom astropy.table import Table, vstack, Row, Column\nfrom astropy.time import Time\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\nfrom scipy.integrate import simps, trapz\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.interpolate import interp1d as interp1d\n\n# from .colours import *\n# from .defaults import *\n# from .errors import *\n# from .extinction import *\n# from .models import *\n# from .utils import *\nfrom . import colours\nfrom . import defaults\nfrom . import errors\nfrom . import extinction\nfrom . import models\nfrom . import utils\n# from . import kcorr\n\n__all__ = [\"BaseSpectrumClass\",\n \"BaseLightCurveClass\",\n \"BaseFilterClass\",\n \"BaseLCModelClass\",\n \"PhotometryClass\",\n \"SpectrumClass\",\n \"LCfitClass\",\n \"specfitClass\",\n \"SNClass\",\n \"FilterClass\",\n \"InfoClass\",\n \"find_specphase_spec\"]\n\n# #----------------------------------------------------------------------------# #\n# # TOOLS # #\n# #----------------------------------------------------------------------------# #\n\n# #------------------------------------# #\n# # DUMMY CODE # #\n# #------------------------------------# #\n\n# class CustomValueError(ValueError):\n# \"\"\"\n# Raise when....\n# \"\"\"\n#\n#\n# def __init__(self, *args, **kwargs):\n# ValueError.__init__(self, *args)\n\n\nclass DummyClass():\n \"\"\"\n Quick dummy class.\n\n Contains a test class variable and test class method that prints the\n variable.\n\n RF\n \"\"\"\n\n\n def __init__(self):\n self.dummy_string = 'Hello, World!'\n\n\n def print_dummy_string(self):\n print(self.test_string)\n\n\ndef dummy_function(verbose = True, *args, **kwargs):\n \"\"\"\n Quick dummy function.\n\n Prints supplied **args and **kwargs\n Issues warnings if nothing passed\n\n RF\n \"\"\"\n if verbose: print(__name__)\n warnings.simplefilter('always')\n print(args)\n print(kwargs)\n\n\n # warnings.warn(\"WARNING\")\n\n if not args and not kwargs:\n warnings.warn( \"You didn't pass any *args or **kwargs\", RuntimeWarning)\n\n else:\n if args:\n for i, arg in enumerate(args):\n print('an arg passed via *args: ', repr(arg))\n else:\n warnings.warn( \"You didn't pass any *args\", RuntimeWarning)\n\n if kwargs:\n for key, value in kwargs.items():\n print('a **kwarg: ', repr(key), ' == ' , repr(value))\n else:\n warnings.warn( \"You didn't pass any **kwargs\", RuntimeWarning)\n pass\n\n\n_somevar = 'Foo'\n\n\n# #----------------------------------------------------------------------------# #\n# # CODE # #\n# #----------------------------------------------------------------------------# #\n# #----------------------------------------------------------------------------# #\n# # Classes # #\n# #----------------------------------------------------------------------------# #\n# #------------------------------------# #\n# # Base Classes # #\n# #------------------------------------# #\n\nclass BaseSpectrumClass():\n \"\"\"\n Base class for handling Spectra.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n \"\"\"\n\n ## Initialise the class variables\n self._default_list_dir_path = os.path.join(defaults._default_coco_dir_path, \"lists/\")\n #\n # ## Initialise using class methods\n self.set_list_directory(self._get_list_directory())\n\n pass\n\n\n def _get_list_directory(self):\n \"\"\"\n Get the default path to the spec lists directory.\n\n Looks for the list file directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default location: '~/Code/CoCo/', with 'lists/' appended.\n \"\"\"\n\n return os.path.join(os.environ.get('COCO_ROOT_DIR', os.path.join(self._default_list_dir_path, os.pardir)), \"lists/\")\n\n\n def set_list_directory(self, list_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(list_dir_path, self._default_list_dir_path)\n if os.path.isdir(os.path.abspath(list_dir_path)):\n self.list_directory = os.path.abspath(list_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(list_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_list_dir_path, UserWarning)\n self.list_directory = self._default_list_dir_path\n\n if not os.path.isdir(self.list_directory):\n if verbose: print(os.path.isdir(self.list_directory))\n raise errors.PathError(\"The default list directory '\" + self.list_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default list directory '\" + self._default_list_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_list_dir_path?\")\n pass\n\n\n def load(self, filename, directory=False, abspath=False, fmt=\"ascii\",\n wmin=1500 * u.angstrom, wmax=11000 * u.angstrom,\n names=(\"wavelength\", \"flux\"), wavelength_u=u.angstrom,\n flux_u=u.cgs.erg / u.si.cm ** 2 / u.si.s / u.angstrom,\n convert_flux_u=u.cgs.erg / u.si.cm ** 2 / u.si.s / u.angstrom,\n verbose=False, spectrum_name = False):\n \"\"\"\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n errors.StringWarning(filename)\n\n if abspath:\n path = filename\n\n else:\n if not directory:\n ## Differentiate between the two child classes\n if hasattr(self, 'data_directory'):\n path = os.path.join(self.data_directory, filename)\n if verbose: print(\"You didn't supply a directory, so using self.data_directory\")\n\n if hasattr(self, 'recon_directory'):\n path = os.path.join(self.recon_directory, filename)\n if verbose: print(\"You didn't supply a directory, so using self.recon_directory\")\n else:\n errors.StringWarning(directory)\n utils.check_dir_path(directory)\n\n path = os.path.join(directory, filename)\n if verbose: print(path)\n\n if os.path.isfile(path):\n\n ## Some might have three columns, deal with laters - this is untidy\n try:\n if hasattr(self, \"recon_directory\"):\n names = names + (\"flux_err\",)\n spec_table = Table.read(path, format=fmt, names=names)\n\n except:\n if \"flux_err\" not in names:\n names = names + (\"flux_err\",)\n spec_table = Table.read(path, format=fmt, names=names)\n\n if verbose: print(\"Reading \" + path)\n\n spec_table.meta[\"filepath\"] = path\n spec_table.meta[\"filename\"] = path.split(\"/\")[-1]\n\n if spectrum_name:\n spec_table.meta[\"plot_label_string\"] = r\"$\\textnormal{\" + spectrum_name.replace(\"_\", \"\\_\") + \"}$\"\n else:\n spec_table.meta[\"plot_label_string\"] = r'$\\rm{' + spec_table.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n\n spec_table['wavelength'].unit = wavelength_u\n\n if wavelength_u != u.Angstrom:\n spec_table['wavelength'] = spec_table['wavelength'].to(u.Angstrom)\n\n spec_table['flux'].unit = flux_u\n if \"flux_err\" in spec_table.colnames:\n spec_table[\"flux_err\"].unit = flux_u\n\n # Automatically convert units?\n if flux_u != convert_flux_u:\n spec_table[\"flux\"] = spec_table[\"flux\"].to(convert_flux_u)\n if \"flux_err\" in spec_table.colnames:\n spec_table[\"flux_err\"] = spec_table[\"flux_err\"].to(convert_flux_u)\n\n flux_u = convert_flux_u\n if wmin.unit == spec_table[\"wavelength\"].unit:\n # enforce wmin and wmax\n spec_table = spec_table[np.bitwise_and(spec_table['wavelength'].data > wmin.value, spec_table['wavelength'].data < wmax.value)]\n self.min_wavelength = np.nanmin(spec_table[\"wavelength\"])\n self.max_wavelength = np.nanmax(spec_table[\"wavelength\"])\n\n # assign to class\n self.data = spec_table\n self.wavelength = spec_table[\"wavelength\"]\n self.flux = spec_table[\"flux\"]\n\n # If you got this far...\n self.success = True\n else:\n warnings.warn(path + \" is not a valid file path\")\n if verbose: print(path + ' not found')\n\n\n def load_table(self, spec_table, spectrum_name = False, path = False, trim_wavelength=False, wmin=1500 * u.angstrom,\n wmax=15000 * u.angstrom, verbose=False ):\n \"\"\"Use with care - basically assumes you have all of your ducks in a row\"\"\"\n\n if trim_wavelength:\n spec_table = spec_table[np.bitwise_and(spec_table['wavelength'] > wmin, spec_table['wavelength'] < wmax)]\n\n if path:\n spec_table.meta[\"filepath\"] = path\n spec_table.meta[\"filename\"] = path.split(\"/\")[-1]\n if spectrum_name:\n spec_table.meta[\"plot_label_string\"] = r\"$\\textnormal{\" + spectrum_name.replace(\"_\", \"\\_\") + \"}$\"\n else:\n spec_table.meta[\"plot_label_string\"] = r'$\\rm{' + spec_table.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n elif spectrum_name:\n spec_table.meta[\"plot_label_string\"] = r\"$\\textnormal{\" + spectrum_name.replace(\"_\", \"\\_\")+ \"}$\"\n else:\n spec_table.meta[\"plot_label_string\"] = r\"$\\textnormal{Spectrum from table}$\"\n\n self.min_wavelength = np.nanmin(spec_table[\"wavelength\"])\n self.max_wavelength = np.nanmax(spec_table[\"wavelength\"])\n self.data = spec_table\n self.wavelength = spec_table[\"wavelength\"]\n self.flux = spec_table[\"flux\"]\n pass\n\n def plot(self, xminorticks = 250, legend = True, plot_filters=True,\n verbose = False, compare_red = True,\n return_figure=False,\n *args, **kwargs):\n \"\"\"\n Plots spec.\n\n xminorticks : spacing for minor tick marks on x axis\n\n legend : (True) Show legend\n\n verbose : print verbose output\n\n compare_red :\n\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.95,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n\n if verbose: print(self.data.__dict__)\n # plot_label_string = r'$\\rm{' + self.data.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n plot_label_string = self.data.meta[\"plot_label_string\"]\n\n\n # ax1.plot(self.data['wavelength'], self.flux, lw = 2,\n # label = plot_label_string, color = 'C0',\n # *args, **kwargs)\n ax1.plot(self.wavelength, self.flux, lw = 2,\n label = plot_label_string, color = 'C0',\n *args, **kwargs)\n\n maxplotydata = np.nanmax(self.flux)\n minplotydata = np.nanmin(self.flux)\n\n if hasattr(self, 'flux_dered') and compare_red:\n ax1.plot(self.data['wavelength'], self.data['flux_dered'], lw = 2,\n label = plot_label_string, color = 'Blue',\n *args, **kwargs)\n maxplotydata = np.nanmax(np.append(maxplotydata, np.nanmax(self.data['flux_dered'])))\n minplotydata = np.nanmin(np.append(minplotydata, np.nanmin(self.data['flux_dered'])))\n if legend:\n\n plot_legend = ax1.legend(loc = 1, scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n\n\n ax1.set_ylim(minplotydata*0.98, maxplotydata*1.02)\n\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n # yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{cm}^{-2}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n\n if return_figure:\n return fig\n plt.show()\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def set_MJD_obs(self, mjd):\n \"\"\"\n Log MJD of the observation.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n self.mjd_obs = mjd\n\n pass\n\n\n def set_EBV(self, EBV):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n self.EBV = EBV\n\n\n def deredden(self, z, EBV_host, EBV_MW = False, verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(\"Foo\")\n if hasattr(self, \"EBV\") and not EBV_MW:\n EBV_MW = self.EBV\n\n self.flux_dered = extinction.deredden(self.wavelength, self.flux, z, EBV_MW = EBV_MW, EBV_host=EBV_host)\n self.data[\"flux_dered\"] = self.flux_dered\n\n else:\n warnings.warn(\"No extinction value set\")\n pass\n\n\n def use_flux_dered(self):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n self.flux_red = self.flux\n self.flux = self.data['flux_dered']\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def _spec_format_for_save(self):\n \"\"\"\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n save_table = Table()\n\n save_table['wavelength'] = self.wavelength\n save_table['flux'] = self.flux\n\n save_table['wavelength'].format = \"5.5f\"\n save_table['flux'].format = \"5.5e\"\n\n return save_table\n\n\n def save(self, filename, path = False,\n squash = False, verbose = False, *args, **kwargs):\n \"\"\"\n Output the spectrum loaded into the Class via self.load into a format\n and location recognised by CoCo.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(\"has data\")\n if not path:\n if verbose: print(\"No directory specified, assuming \" + self._default_data_dir_path)\n path = self._default_data_dir_path\n else:\n errors.StringWarning(path)\n\n outpath = os.path.join(path, filename)\n\n utils.check_dir_path(path)\n\n if os.path.isfile(outpath):\n if squash:\n print(\"Overwriting \" + outpath)\n self._spec_format_for_save().write(outpath, format = \"ascii.fast_commented_header\", overwrite=True)\n else:\n warnings.warn(\"Found existing file matching \" + os.path.join(path,\n filename) + \". Run with squash = True to overwrite\")\n else:\n print(\"Writing \" + outpath)\n self._spec_format_for_save().write(outpath, format = \"ascii.fast_commented_header\")\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def _add_to_overlapping_filters(self, filter_name):\n if hasattr(self, \"_overlapping_filter_list\"):\n self._overlapping_filter_list = np.append(self._overlapping_filter_list, filter_name)\n self._n_overlapping_filters = self._n_overlapping_filters + 1\n else:\n self._overlapping_filter_list = np.array(filter_name)\n self._n_overlapping_filters = 1\n pass\n\n\n def set_infile(self, filename):\n self.infile=filename\n pass\n\n\n def get_specphot(self, filter_objects, correct_for_area=True, verbose = False):\n \"\"\"\n TODO - Some duplication between this and SNClass.get_specphot()\n :param spectrum:\n :param verbose:\n :return:\n \"\"\"\n if verbose: print(type(filter_objects), filter_objects)\n\n if not hasattr(self, \"_overlapping_filter_list\"):\n self.check_overlaps(filter_objects=filter_objects, verbose=verbose)\n\n if verbose: print(type(self._overlapping_filter_list), self._overlapping_filter_list)\n\n if self._n_overlapping_filters == 1:\n if verbose: print(\"only one overlapping filter\")\n\n if isinstance(filter_objects, FilterClass):\n if verbose: print(\"FilterClass passed\")\n ## if only one filter is given\n iterator = (filter_objects.filter_name,)\n else:\n # iterator = [self._overlapping_filter_list,]\n # iterator = [i.filter_name for i in filter_objects]\n iterator = filter_objects\n\n else:\n if isinstance(filter_objects, FilterClass):\n if verbose: print(\"FilterClass passed\")\n ## if only one filter is given\n iterator = [filter_objects.filter_name,]\n if verbose: print(type(iterator), iterator)\n\n else:\n # iterator = self._overlapping_filter_list\n iterator = filter_objects\n\n if isinstance(filter_objects, FilterClass):\n if verbose: print(\"FilterClass passed\")\n ## if only one filter is given\n filter_objects = (filter_objects,)\n\n if verbose: print(type(iterator), iterator)\n\n for j, filter_name in enumerate(iterator):\n if verbose: print(j, filter_name)\n\n # if filter_name in filter_objects:\n # if filter_name in [i.filter_name for i in filter_objects]:\n # if verbose: print(\"filter_name in filter_objects\")\n\n if isinstance(filter_name, FilterClass):\n filter_obj = filter_name\n elif isinstance(filter_objects, dict):\n filter_obj = filter_objects[filter_name]\n else:\n filter_obj = filter_objects[[i.filter_name for i in filter_objects].index(filter_name)]\n\n # flux = kcorr.calc_spectrum_filter_flux(filter_object=filter_obj,\n # spectrum_object=self)\n\n if not np.array_equal(filter_obj.wavelength, self.wavelength):\n filter_obj.resample_response(new_wavelength=self.wavelength)\n\n transmitted_spec = filter_obj.throughput * self.flux\n integrated_flux = simps(transmitted_spec, self.wavelength)\n\n if correct_for_area:\n\n if not hasattr(filter_obj, \"_effective_area\"):\n filter_obj.calculate_filter_area()\n\n filter_area = filter_obj._effective_area\n flux = integrated_flux / filter_area\n\n else:\n flux = integrated_flux\n\n if verbose: print(\"flux in filter\", filter_name, \" is \", flux)\n if j == 0:\n self.specphot = Table(names=(\"lambda_effective\", \"flux\", \"filter\"), dtype=('f4', 'f4', 'S'))\n\n self.specphot.add_row((filter_obj.lambda_effective, flux, filter_name))\n\n # else:\n # warnings.warn(\"no overlapping filters - filter_name not in filter_objects\")\n\n pass\n\n\n def check_overlaps(self, filter_objects, verbose = False):\n \"\"\"\n TODO - based on SNClass.check_overlaps()\n\n Checks the filters that the spectrum overlaps with.\n originally used functions.filter_within_spec\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if isinstance(FilterClass, type(filter_objects)):\n ## if only one filter is given\n filter_objects = [filter_objects, ]\n\n\n for i, filter_name in enumerate(filter_objects):\n if isinstance(FilterClass, type(filter_name)):\n filter_obj = filter_name\n elif isinstance(filter_objects, dict):\n filter_obj = filter_objects[filter_name]\n else:\n filter_obj = filter_objects[i]\n\n if verbose:print(i, filter_obj)\n\n if hasattr(filter_obj, \"_lower_edge\") and \\\n hasattr(filter_obj, \"_upper_edge\") and \\\n hasattr(self, \"data\"):\n blue_bool = filter_obj._lower_edge > self.min_wavelength\n red_bool = filter_obj._upper_edge < self.max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n if verbose: print(within)\n if within:\n self._add_to_overlapping_filters(filter_name)\n else:\n warnings.warn(\"SpectrumClass.check_overlaps - something went wrong... no overlaps or data?\")\n if self._n_overlapping_filters == 1:\n self._overlapping_filter_list = [self._overlapping_filter_list,] ## added to fix issue #27\n pass\n\n\nclass BaseLightCurveClass():\n \"\"\"\n Base class for handling Lightcurves.\n \"\"\"\n def __init__(self, verbose = False):\n \"\"\"\n\n \"\"\"\n ## Initialise the class variables\n\n ## Initialise using class methods\n\n pass\n\n\n def _get_filter_directory(self):\n \"\"\"\n Get the default path to the filter directory.\n\n Looks for the filter data directory set as environment variable\n $PYCOCO_FILTER_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $PYCOCO_FILTER_DIR, or\n default datalocation, i.e.: '/Users/berto/Code/CoCo/data/filters/'.\n \"\"\"\n return os.path.abspath(os.environ.get('PYCOCO_FILTER_DIR', self._default_filter_dir_path))\n\n\n def set_filter_directory(self, filter_dir_path='', verbose=False):\n \"\"\"\n Set a new filter directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if os.path.isdir(os.path.abspath(filter_dir_path)):\n self.filter_directory = os.path.abspath(filter_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(filter_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_filter_dir_path, UserWarning)\n self.data_directory = self._default_data_dir_path\n\n if not os.path.isdir(self.filter_directory):\n if verbose: print(os.path.isdir(self.filter_directory))\n raise errors.PathError(\"The default data directory '\" + self.filter_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default filter directory '\" + self._default_filter_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_filter_dir_path?\")\n pass\n\n\n def _sort_phot(self, verbose=False):\n \"\"\"\n resorts the photometry according to effective wavelength of the filter.\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n if hasattr(self, \"data\") and hasattr(self, \"data_filters\"):\n ## This looks fugly.\n newkeys = np.array([i for i in self.data_filters.keys()])[np.argsort([self.data_filters[i].lambda_effective.value for i in self.data_filters])]\n\n sorted_data = OrderedDict()\n sorted_data_filters = OrderedDict()\n\n for newkey in newkeys:\n\n if verbose: print(newkey)\n\n sorted_data[newkey] = self.data[newkey]\n sorted_data_filters[newkey] = self.data_filters[newkey]\n\n self.data = sorted_data\n self.data_filters = sorted_data_filters\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def unpack(self, filter_file_type=\".dat\", verbose=False):\n \"\"\"\n If loading from preformatted file, then unpack the table into self.data\n OrderedDict and load FilterClass objects into self.data_filters OrderedDict\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n\n if hasattr(self, \"phot\"):\n filter_names = np.unique(self.phot[\"filter\"])\n\n self.phot.add_index('filter', unique = True)\n\n\n for filter_name in filter_names:\n\n phot_table = self.phot.loc[\"filter\", filter_name]\n filter_filename = filter_name + filter_file_type\n if verbose: print(filter_filename)\n if verbose: print(phot_table)\n if verbose: print(type(filter_name), type(filter_file_type))\n\n # phot_table.meta = {\"filter_filename\": filter_filename}\n phot_table.meta[\"filter_filename\"] = filter_filename\n if not isinstance(phot_table, Row):\n # if len(np.unique(self.phot.loc[\"filter\", filter_name][\"MJD\"])) > 1:\n indices = phot_table.argsort(\"MJD\")\n # for column_name in phot_table.colnames:\n # phot_table[column_name] = phot_table[column_name][indices]\n sorted_phot_table = Table([phot_table[column_name][indices] for column_name in phot_table.colnames])\n else:\n sorted_phot_table = phot_table\n\n filter_key = np.unique(phot_table[\"filter\"])[0]\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != filter_name:\n raise errors.FilterMismatchError(\"There is a more than one filterdata in here! or there is a mismatch with filename\")\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n\n # def load_filter(path, cmap = False, verbose = False):\n #\n if utils.check_file_path(os.path.abspath(path_to_filter)):\n filter_object = FilterClass()\n filter_object.read_filter_file(os.path.abspath(path_to_filter), verbose = verbose)\n filter_object.calculate_AB_zp()\n else:\n warnings.warn(\"Couldn't load the filter\")\n\n self.data_filters[filter_key] = filter_object\n\n self.data[filter_name] = sorted_phot_table\n\n self.filter_names = filter_names\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n\n pass\n\n\n def load_table(self, phot_table, verbose=False):\n \"\"\"\n Loads a single photometry table.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n # errors.StringWarning(path)\n try:\n self.phot = phot_table\n self.unpack(verbose=verbose)\n\n ## Sort the OrderedDict\n self._sort_phot()\n except:\n raise Exception\n\n\n def load_phot_dict(self, data_dict):\n \"\"\"\n\n \"\"\"\n self.data = data_dict\n pass\n\n\n def _combine_phot(self, verbose = True):\n \"\"\"\n\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(self.data.keys())\n\n for i, phot_filter in enumerate(self.data.keys()):\n\n if verbose: print(i, phot_filter)\n\n if i == 0:\n\n full_phot = self.data[phot_filter]\n\n else:\n\n full_phot = vstack([full_phot, self.data[phot_filter]])\n\n pass\n\n self.data['full'] = full_phot\n\n else:\n warnings.warn(\"Cant find self.data\")\n\n pass\n\n\n # def _phot_format_for_save(self, filters = False, verbose = False):\n # \"\"\"\n # This is hacky - clear it up!\n #\n # Parameters\n # ----------\n # Returns\n # -------\n # \"\"\"\n #\n # if not filters:\n # ## if none specified, use all filters\n # filters = self.data.keys()\n #\n # w = np.array([])\n # for i, f in enumerate(filters):\n # w = np.append(w, np.where(self.phot[\"filter\"] == f))\n # if verbose: print(w)\n #\n # save_table = self.phot[\"MJD\", \"flux\", \"flux_err\", \"filter\"][w.astype(int)]\n # save_table['MJD'].format = \"5.5f\"\n # save_table['flux'].format = \"5.5e\"\n # save_table['flux_err'].format = \"5.5e\"\n # # save_table[save_table.argsort(\"MJD\")]\n # return save_table\n\n\n def _phot_format_for_save(self, names = ('MJD', 'flux', 'flux_err', 'filter'), formats = ('.3f','.5g', '.5g', ''),\n filters = False, verbose = False, sort=False):\n \"\"\"\n This is hacky - clear it up!\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n\n if sort:\n save_table = self.phot[names]\n save_table = save_table[save_table.argsort()]\n else:\n save_table = self.phot[names]\n\n for z in zip(names, formats):\n save_table[z[0]].format = z[1]\n\n if filters:\n save_table = save_table[np.in1d(save_table[\"filter\"], filters)]\n\n if verbose:\n print(save_table)\n return save_table\n\n\n def save(self, filename, filters = False, path = False,\n names = ('MJD', 'flux', 'flux_err', 'filter'), formats = ('.3f','.5g', '.5g', ''),\n squash = False, verbose = True, sort = False, *args, **kwargs):\n \"\"\"\n Output the photometry loaded into the SNClass via self.load_phot* into a format\n and location recognised by CoCo.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(\"has data\")\n if not path:\n if verbose: print(\"No directory specified, assuming \" + self._default_data_dir_path)\n path = self._default_data_dir_path\n else:\n errors.StringWarning(path)\n\n utils.check_dir_path(path)\n\n outpath = os.path.join(path, filename)\n\n if verbose: print(outpath)\n if not filters:\n ## if none specified, use all filters\n filters = list(self.data.keys())\n if verbose: print(filters)\n\n\n if os.path.isfile(outpath):\n if squash:\n print(\"Overwriting \" + outpath)\n self._phot_format_for_save(filters = filters, names = names, formats = formats,\n verbose = verbose, sort=sort).write(outpath, format = \"ascii.fast_commented_header\", overwrite = True, names=names)\n else:\n warnings.warn(\"Found existing file matching \" + outpath + \". Run with squash = True to overwrite\")\n\n else:\n print(\"Writing \" + outpath)\n self._phot_format_for_save(filters = filters).write(outpath, format = \"ascii.fast_commented_header\", names=names)\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def set_infile(self, filename):\n self.infile=filename\n pass\n\n\n def nightaverage(self, filters=False, verbose=False):\n \"\"\"\n\n :param verbose:\n :return:\n \"\"\"\n if hasattr(self, \"phot\") and hasattr(self, \"data\"):\n\n if not filters:\n filters = self.data_filters\n if type(filters) == str:\n filters = [filters]\n\n for i, filter_key in enumerate(filters):\n\n if verbose: print(i, self.data[filter_key].__dict__)\n\n dt = self.data[filter_key]\n\n dt[\"Night\"] = list(map(lambda x: np.round(x), dt[\"MJD\"]))\n\n dt.add_index(\"Night\")\n dt_grouped = dt.group_by(\"Night\")\n dt_grouped[\"weights\"] = 1.0 / ((dt_grouped[\"flux_err\"]) * (dt_grouped[\"flux_err\"]))\n\n if i == 0:\n na_table = Table(names=(\"MJD\", \"flux\", \"flux_err\", \"filter\"),\n dtype=(dt[\"MJD\"].dtype, dt[\"flux\"].dtype, dt[\"flux_err\"].dtype, dt[\"filter\"].dtype))\n for j, col in enumerate(na_table.columns):\n if verbose: print(j, col)\n na_table[col].unit = dt[col].unit\n for group in dt_grouped.groups:\n wmean = np.average(group[\"flux\"], weights=group[\"weights\"])\n wmean_err = np.sqrt(1. / np.sum(1. / (group[\"flux_err\"] * group[\"flux_err\"])))\n if verbose: print(np.mean(group[\"MJD\"]), wmean, wmean_err)\n na_table.add_row((np.mean(group[\"MJD\"]), wmean, wmean_err, filter_key))\n\n if verbose: print(\"loading into phot object...\")\n\n self.load_table(phot_table=na_table)\n\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.phot)\")\n pass\n\n\nclass BaseLCModelClass():\n \"\"\"\n\n \"\"\"\n\n def __init__(self, model_name):\n if model_name in _defined_models:\n\n if model_name == \"bazin09\":\n self.function = bazin09_listarg\n self.nparams = 4\n self._paramnames = [\"a\", \"t_0\", \"t_rise\", \"t_fall\"]\n\n elif model_name == \"karpenka12\":\n self.function = karpenka12_listarg\n self.nparams = 6\n self._paramnames = [\"a\", \"b\", \"t_0\", \"t_1\", \"t_rise\", \"t_fall\"]\n\n elif model_name == \"firth17\":\n self.function = firth17_listarg\n self.nparams = 8\n self._paramnames = [\"a\", \"b\", \"t_0\", \"t_1\", \"t_2\", \"t_x\", \"t_rise\", \"t_fall\"]\n\n else:\n warnings.warn(\"Model Not Recognised.\")\n # self.fit_params = OrderedDict\n pass\n\n def load_bestfitparams(self, param_array):\n \"\"\"\n pass array where the keys are the param names.\n \"\"\"\n\n self.params = OrderedDict()\n\n for i, element in enumerate(param_array):\n self.params[self._paramnames[i]] = element\n pass\n\n def evaluate(self, t):\n\n self.fit = self.function(t, [self.params[p] for p in self.params])\n pass\n\n\nclass BaseFilterClass():\n \"\"\"\n\n \"\"\"\n\n def __init__(self, verbose = True):\n \"\"\"\n\n :param verbose:\n \"\"\"\n self._wavelength_units = u.Angstrom\n self._wavelength_units._format['latex'] = r'\\rm{\\AA}'\n self._frequency_units = u.Hertz\n # self.calculate_frequency()\n # self.calculate_effective_frequency()\n pass\n\n\n def calculate_filter_area(self, verbose=False):\n \"\"\"\n\n :return:\n \"\"\"\n if hasattr(self, \"throughput\"):\n area = simps(self.throughput, self.wavelength)\n if np.isnan(area): ## See Issue #26 on GitHub\n area = trapz(self.throughput, self.wavelength)\n self._effective_area = area\n\n\n\n def calculate_AB_zp(self, ABpath = os.path.join(defaults._default_kcorr_data_path, \"AB_pseudospectrum.dat\"), wmin = 1500 * u.angstrom, wmax=25000 * u.angstrom):\n \"\"\"\n \"\"\"\n\n\n AB = SpectrumClass()\n AB.load(ABpath, wmin=wmin, wmax=wmax)\n\n if not hasattr(self, \"lambda_effective\"):\n self.calculate_effective_wavelength()\n\n self.resample_response(new_wavelength=AB.wavelength)\n\n transmitted_spec = self.throughput * AB.flux\n integrated_flux = simps(transmitted_spec, AB.wavelength)\n\n if not hasattr(self, \"_effective_area\"):\n self.calculate_filter_area()\n\n area_corr_integrated_flux = integrated_flux / self._effective_area\n\n self.zp_AB = -2.5 * np.log10(area_corr_integrated_flux)\n pass\n\n\n def calculate_effective_wavelength(self):\n \"\"\"\n Well, what are you expecting something called `calculate_effective_wavelength`\n to do?\n \"\"\"\n\n spline_rev = interp1d((np.cumsum(self.wavelength*self.throughput)/np.sum(self.wavelength*self.throughput)), self.wavelength)\n lambda_eff = spline_rev(0.5)\n\n self.lambda_effective = lambda_eff * self._wavelength_units\n pass\n\n\n def calculate_frequency(self):\n nu = c/self.wavelength_u\n self.frequency_u = nu.to(self._frequency_units)\n self.frequency = self.frequency_u.value\n\n\n def calculate_effective_frequency(self):\n \"\"\"\n\n \"\"\"\n\n if hasattr(self, \"frequency\"):\n spline_rev = interp1d((np.cumsum(self.frequency*self.throughput)/np.sum(self.frequency*self.throughput)), self.frequency)\n nu_eff = spline_rev(0.5)\n\n self.nu_effective = nu_eff * self._frequency_units\n pass\n\n\n def plot(self, xminorticks = 250, yminorticks = 0.1,\n show_lims = False, small = False, cumulative = False, return_figure=False,\n *args, **kwargs):\n \"\"\"\n Plots filter throughput, so you can double check it.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n ## Check if there is something in the class to plot\n if hasattr(self, \"wavelength\") and hasattr(self, \"throughput\"):\n\n utils.setup_plot_defaults()\n if hasattr(self._wavelength_units, \"format\"):\n if \"latex\" in self._wavelength_units.format:\n xaxis_label_string = r'$\\textnormal{Wavelength, ' + self._wavelength_units.name + ' (}' + self._wavelength_units._format['latex'] +')$'\n else:\n xaxis_label_string = r'$\\textnormal{Wavelength, ' + self._wavelength_units.name + '}$'\n\n plot_label_string = r'$\\textnormal{' + self.filter_name.replace('_', '\\\\_') + '}$'\n\n yminorLocator = MultipleLocator(yminorticks)\n xminorLocator = MultipleLocator(xminorticks)\n\n if not small:\n fig = plt.figure(figsize=[8, 4])\n else:\n fig = plt.figure(figsize=[4, 2])\n plt.rcParams['font.size'] = 10\n\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n if cumulative:\n throughput = np.cumsum(self.throughput)/np.sum(self.throughput)\n yaxis_label_string = r'$\\textnormal{Cumulative Throughput}$'\n\n else:\n throughput = self.throughput\n yaxis_label_string = r'$\\textnormal{Fractional Throughput}$'\n\n\n if hasattr(self, \"_plot_colour\"):\n ax1.plot(self.wavelength, throughput, color = self._plot_colour,\n lw = 2, label = plot_label_string)\n else:\n ax1.plot(self.wavelength, throughput, lw = 2, label = plot_label_string)\n\n if show_lims:\n try:\n ax1.plot([self._upper_edge, self._upper_edge], [0,1] ,\n lw = 1.5, alpha = 0.5, ls = ':',\n color = colours.hex['batman'], zorder = 0, )\n ax1.plot([self._lower_edge, self._lower_edge], [0,1] ,\n lw = 1.5, alpha = 0.5, ls = ':',\n color = colours.hex['batman'], zorder = 0, )\n except:\n print(\"Failed\")\n\n ax1.spines['top'].set_visible(True)\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.yaxis.set_minor_locator(yminorLocator)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n ax1.legend(loc = 0)\n\n if return_figure:\n return fig\n\n plt.show()\n pass\n else:\n warnings.warn(\"Doesn't look like you have loaded a filter into the object\")\n\n\n def resample_response(self, new_wavelength = False, k = 1, verbose=False,\n *args, **kwargs):\n \"\"\"\n Bit dodgy - spline has weird results for poorly sampled filters.\n Now the order is by default 1, seems to be less likely to introduce artifacts\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"wavelength\") and hasattr(self, \"throughput\"):\n\n if verbose: print(\"resampling response\")\n\n self._wavelength_orig = self.wavelength\n self._throughput_orig = self.throughput\n\n self.wavelength = np.concatenate(([0,1], self._wavelength_orig, [24999,25000]))\n self.throughput = np.concatenate(([0,0], self._throughput_orig, [0,0]))\n\n interp_func = InterpolatedUnivariateSpline(self.wavelength, self.throughput, k = k,\n *args, **kwargs)\n self.throughput = interp_func(new_wavelength)\n self.wavelength = new_wavelength\n # self.wavelength.name = \"wavelength\"\n\n self.throughput[np.where(self.throughput < 0.0)] = 0.0\n else:\n warning.warn(\"Doesn't look like you have loaded a filter into the object\")\n\n\n def load(self, path, directory = False, fmt = \"ascii.commented_header\",\n names = (\"wavelength\", \"throughput\"), wavelength_u = u.angstrom,\n verbose = False, name = False):\n \"\"\"\n Assumes Response function is fractional rather than %.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n\n if utils.check_file_path(os.path.abspath(path), verbose = verbose):\n\n self.data = Table.read(path, format = fmt, names = names)\n\n self.wavelength = self.data[\"wavelength\"]*wavelength_u\n self.wavelength = self.wavelength.to(u.angstrom)\n self.data[\"wavelength\"] = self.wavelength\n self.throughput = self.data[\"throughput\"]\n\n self.wavelength_u = self.wavelength * wavelength_u\n self._wavelength_units = wavelength_u\n\n self._filter_file_path = path\n\n if name:\n self.filter_name = name\n\n filename = path.split('/')[-1]\n filename_no_extension = filename.split('.')[0]\n\n else:\n warnings.warn(\"Foo\")\n\n\n def load_table(self, table, name, directory = False, wavelength_u = u.angstrom,\n verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n \"\"\"\n Assumes Response function is fractional rather than %.\n \"\"\"\n\n self.filter_name = name\n\n self.data = table\n\n if not hasattr(table[\"wavelength\"], \"unit\"):\n self.wavelength = self.data[\"wavelength\"]*wavelength_u\n else:\n self.wavelength = self.data[\"wavelength\"]\n\n self.wavelength = self.wavelength.to(u.angstrom)\n self.data[\"wavelength\"] = self.wavelength\n self.throughput = self.data[\"throughput\"]\n\n self.wavelength_u = self.wavelength.to(wavelength_u)\n self._wavelength_units = wavelength_u\n\n\n def save(self, filename, path = False,\n squash = False, verbose = True, *args, **kwargs):\n \"\"\"\n Output the filter loaded into the Class into a format\n and location recognised by CoCo.\n\n based on BaseSpectrumClass.save\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"wavelength\") and hasattr(self, \"throughput\"): ## enables resampling and wavelength conversion to be easily saved\n if verbose: print(\"has data\")\n if not path:\n if verbose: print(\"No directory specified, assuming \" + defaults._default_filter_dir_path)\n path = defaults._default_filter_dir_path\n else:\n errors.StringWarning(path)\n\n outpath = os.path.join(path, filename)\n\n utils.check_dir_path(path)\n\n if os.path.isfile(outpath):\n warnings.warn(\"Found existing file matching \" + path + \". Run with squash = True to overwrite\")\n if squash:\n print(\"Overwriting \" + outpath)\n outtable = Table([self.wavelength, self.throughput], names = [\"wavelength\", \"throughput\"])\n outtable.write(outpath, format = \"ascii.fast_commented_header\", overwrite = True)\n self._format_for_save = outtable\n\n\n else:\n print(\"Writing \" + outpath)\n outtable = Table([self.wavelength, self.throughput], names = [\"wavelength\", \"throughput\"])\n\n outtable.write(outpath, format = \"ascii.fast_commented_header\")\n self._format_for_save = outtable\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n# #------------------------------------# #\n# # Inheriting Classes # #\n# #------------------------------------# #\n\n\nclass PhotometryClass(BaseLightCurveClass):\n \"\"\"\n Inherits from BaseLightCurveClass\n\n Probably also overkill - but should be easier to store metadata etc. Hopefully\n flexible enough to just be a wrapper for AP tables of phot.\n\n Photometry stored in PhotometryClass.data should have a FilterClass method\n describing the observations stored in PhotometryClass.data_filters.\n\n ## NOTE should I use properties instead of get/set? http://www.python-course.eu/python3_properties.php\n looks like only python3?\n \"\"\"\n\n def __init__(self, verbose = False):\n \"\"\"\n\n \"\"\"\n\n ## Initialise the class variables\n self._default_data_dir_path = os.path.join(defaults._default_data_dir_path, 'lc/')\n self._default_filter_dir_path = defaults._default_filter_dir_path\n self.data = OrderedDict()\n self.data_filters = OrderedDict()\n\n ## Initialise using class methods\n self.set_data_directory(self._default_data_dir_path)\n self.set_filter_directory(self._get_filter_directory())\n\n\n def _get_data_directory(self):\n \"\"\"\n Get the default path to the data directory.\n\n Looks for the data data directory set as environment variable\n $PYCOCO_DATA_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $PYCOCO_DATA_DIR, or\n default datalocation: '../testdata/', with '/lc/' appended.\n \"\"\"\n\n return self.data_directory\n\n\n def set_data_directory(self, data_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(data_dir_path, self._default_data_dir_path)\n if os.path.isdir(os.path.abspath(data_dir_path)):\n self.data_directory = os.path.abspath(data_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(data_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_data_dir_path, UserWarning)\n self.data_directory = self._default_data_dir_path\n\n if not os.path.isdir(self.data_directory):\n if verbose: print(os.path.isdir(self.data_directory))\n raise errors.PathError(\"The default data directory '\" + self.data_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_data_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_data_dir_path?\")\n pass\n\n\n def load(self, path, names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii.commented_header', verbose = False):\n \"\"\"\n Loads a single photometry file.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n errors.StringWarning(path)\n try:\n phot_table = self.load_formatted_phot(path, names = names, format = format, verbose = verbose)\n self.phot = phot_table\n self.unpack()\n\n ## Sort the OrderedDict\n self._sort_phot()\n except:\n raise Exception\n\n\n def load_formatted_phot(self, path, format = \"ascii\", names = False,\n verbose = False):\n \"\"\"\n Loads a single photometry file.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n errors.StringWarning(path)\n\n if names:\n phot_table = Table.read(path, format = format, names = names)\n else:\n phot_table = Table.read(path, format = format)\n\n phot_table.meta[\"filename\"] = path\n\n phot_table[\"MJD\"].unit = u.day\n phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n phot_table[\"flux_err\"].unit = phot_table[\"flux\"].unit\n\n return phot_table\n\n\n def load_phot_from_file(self, path, names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii', verbose = False):\n \"\"\"\n For single filter data\n \"\"\"\n errors.StringWarning(path)\n try:\n # phot_table = functions.load_phot(path, names = names, format = format, verbose = verbose)\n # phot_table = ap.table.Table.read(path, format = format, names = names)\n phot_table = Table.read(path, format = format, names = names)\n\n phot_table.replace_column(\"MJD\", Time(phot_table[\"MJD\"], format = 'mjd'))\n\n phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n phot_table[\"flux_err\"].unit = phot_table[\"flux\"].unit\n\n self.data[np.unique(phot_table[\"filter\"])[0]] = phot_table\n\n ## Sort the OrderedDict\n self._sort_phot()\n except:\n raise Exception\n\n pass\n\n\n def load_phot_from_files(self, path = False, snname = False, prefix = 'SN',\n file_type = '.dat', names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii', filter_file_type = '.dat', verbose = False):\n \"\"\"\n Finds and loads in data (from file) into phot objects.\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n\n if snname:\n if not path:\n path = self._default_data_dir_path\n ## Find matching photometry\n phot_list = find_filter_phot(path = path, snname = snname, prefix = prefix,\n file_type = file_type, verbose = verbose)\n\n full_phot_table = Table()\n\n ## Loop over files (shouldn't be that many really)\n if len(phot_list) > 0:\n\n for phot_file in phot_list:\n\n if verbose: print(phot_file)\n phot_table = Table.read(phot_file, names = names, format = format)\n\n ## NOTE astropy vstack does not support mixin columns http://docs.astropy.org/en/stable/table/mixin_columns.html\n # This means I might have problems joining the tables together if I don't add together as I go along.\n\n full_phot_table = vstack([full_phot_table, phot_table])\n\n filter_string = functions.get_filter_from_filename(phot_file, snname, file_type)\n phot_table.meta = {\"filename\" : phot_file,\n \"filter\" : filter_string,\n \"filter_filename\": filter_string + filter_file_type}\n\n ## Sort out units\n phot_table.sort(\"MJD\")\n phot_table[\"t\"] = Time(phot_table[\"MJD\"], format = 'mjd')\n\n phot_table[\"MJD\"].unit = u.day\n phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n phot_table[\"flux_err\"].unit = phot_table[\"flux\"].unit\n\n ## Put in dictionary - use filter from the file\n filter_key = np.unique(phot_table[\"filter\"])[0]\n if verbose: print(len(np.unique(phot_table[\"filter\"])) , phot_table.meta[\"filter\"], filter_key)\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != phot_table.meta[\"filter\"]:\n raise errors.FilterMismatchError(\"There is a mismatch between the filter filename and that in the \"\n + \"photometry file\")\n\n self.data[filter_key] = phot_table\n\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n self.data_filters[filter_key] = functions.load_filter(path_to_filter)\n\n\n ## NOTE doing it this way because vstack doesn't like mixin columns (see above comment)\n full_phot_table.sort(\"MJD\")\n # full_phot_table[\"t\"] = Time(full_phot_table[\"MJD\"], format = 'mjd')\n full_phot_table[\"MJD\"].unit = u.day\n\n full_phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n full_phot_table[\"flux_err\"].unit = full_phot_table[\"flux\"].unit\n\n self.phot = full_phot_table\n\n ## Sort the OrderedDict\n self._sort_phot()\n else:\n warning.warn(\"Couldn't find any photometry\")\n else:\n warnings.warn(\"Provide a SN name\")\n\n pass\n\n\n def _combine_phot(self, verbose = False):\n \"\"\"\n\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(self.data.keys())\n\n for i, phot_filter in enumerate(self.data.keys()):\n\n if verbose: print(i, phot_filter)\n\n if i == 0:\n\n full_phot = self.data[phot_filter]\n\n else:\n\n\n full_phot = vstack([full_phot, self.data[phot_filter]])\n\n pass\n\n self.data['full'] = full_phot\n\n else:\n warnings.warn(\"Cant find self.data\")\n\n pass\n\n\n def plot(self, filters=False, legend=True, xminorticks=5, enforce_zero = True,\n verbose=False, xlim=False, yaxis_max_factor=1.02, return_figure=False,\n *args, **kwargs):\n \"\"\"\n Plots phot.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n\n if not filters:\n filters = self.data_filters\n if type(filters) == str:\n filters = [filters]\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.93,\n right = 0.96, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n for i, filter_key in enumerate(filters):\n if verbose: print(i, self.data[filter_key].__dict__)\n plot_label_string = r'$\\rm{' + self.data_filters[filter_key].filter_name.replace('_', '\\\\_') + '}$'\n if filter_key in colours.hex.keys():\n self.data_filters[filter_key]._plot_colour = colours.hex[filter_key]\n else:\n warnings.warn(\"Cannot find filter in the pycocosn colours registry\")\n self.data_filters[filter_key]._plot_colour = \"C0\"\n\n ax1.errorbar(self.data[filter_key]['MJD'], self.data[filter_key]['flux'],\n yerr = self.data[filter_key]['flux_err'],\n capsize = 0, fmt = 'o', color = self.data_filters[filter_key]._plot_colour,\n label = plot_label_string, ecolor = colours.hex['batman'], mec = colours.hex[\"batman\"],\n *args, **kwargs)\n\n if legend:\n\n # plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n # numpoints = 1, frameon = False, fontsize = 12)\n plot_legend = ax1.legend(loc = 1, scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n ## Use ap table groups instead? - can't; no support for mixin columns.\n if enforce_zero:\n ax1.set_ylim(0., yaxis_max_factor * np.nanmax(self.phot['flux']))\n else:\n ax1.set_ylim(np.nanmin(self.phot['flux']), yaxis_max_factor * np.nanmax(self.phot['flux']))\n\n if xlim:\n ax1.set_xlim(xlim)\n\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Time, MJD (days)}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n # yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{cm}^{-2}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.spines['top'].set_visible(True)\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n return fig\n plt.show()\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_filters(self, xminorticks = 250, yminorticks = 0.1,\n legend = True, use_cmap = False, verbose = False):\n \"\"\"\n Plots filters.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self, \"data_filters\"):\n\n utils.setup_plot_defaults()\n xaxis_label_string = r'$\\textnormal{Wavelength, (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Fractional Throughput}$'\n yminorLocator = MultipleLocator(yminorticks)\n xminorLocator = MultipleLocator(xminorticks)\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n ax1 = fig.add_subplot(111)\n\n ## Plot the throughput for each filter\n for i, filter_key in enumerate(self.data_filters):\n if verbose: print(i, self.data_filters[filter_key].__dict__)\n plot_label_string = r'$\\rm{' + self.data_filters[filter_key].filter_name.replace('_', '\\\\_') + '}$'\n if hasattr(self.data_filters[filter_key], \"_plot_colour\") and use_cmap:\n ax1.plot((self.data_filters[filter_key].wavelength_u).to(u.angstrom),\n self.data_filters[filter_key].throughput,\n color = self.data_filters[filter_key]._plot_colour,\n lw = 2, label = plot_label_string)\n else:\n ax1.plot((self.data_filters[filter_key].wavelength_u).to(u.angstrom),\n self.data_filters[filter_key].throughput,\n lw = 2, label = plot_label_string)\n # if hasattr(self, \"_plot_colour\"):\n # ax1.plot(self.wavelength, self.throughput, color = self._plot_colour,\n # lw = 2, label = plot_label_string)\n # else:\n # ax1.plot(self.wavelength, self.throughput, lw = 2, label = plot_label_string)\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.yaxis.set_minor_locator(yminorLocator)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if legend:\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any filters here (empty self.filter_data)\")\n pass\n\n\nclass SpectrumClass(BaseSpectrumClass):\n \"\"\"\n Class for handling Spectra.\n Inherits from BaseSpectrumClass.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n \"\"\"\n\n ## Initialise the class variables\n self._default_data_dir_path = os.path.join(defaults._default_data_dir_path, \"spec/\")\n self._default_list_dir_path = defaults._default_list_dir_path\n\n ## Initialise using class methods\n self.set_data_directory(self._default_data_dir_path)\n self.set_list_directory(self._get_list_directory())\n pass\n\n\n def _get_data_directory(self, path=False):\n \"\"\"\n Get the default path to the data directory.\n\n Looks for the data data directory set as environment variable\n $PYCOCO_DATA_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $PYCOCO_DATA_DIR, or\n default datalocation: '../testdata/', with '/spec/' appended.\n \"\"\"\n\n return self.data_directory\n\n\n def set_data_directory(self, data_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(data_dir_path, self._default_data_dir_path)\n if os.path.isdir(os.path.abspath(data_dir_path)):\n self.data_directory = os.path.abspath(data_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(data_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_data_dir_path, UserWarning)\n self.data_directory = self._default_data_dir_path\n\n if not os.path.isdir(self.data_directory):\n if verbose: print(os.path.isdir(self.data_directory))\n raise errors.PathError(\"The default data directory '\" + self.data_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_data_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_data_dir_path?\")\n pass\n\n\nclass LCfitClass(BaseLightCurveClass):\n \"\"\"\n Small class to hold the output from CoCo LCfit.\n Inherits from BaseLightCurveClass\n \"\"\"\n\n def __init__(self):\n\n ## Initialise the class variables\n self._default_recon_dir_path = os.path.join(defaults._default_coco_dir_path, \"recon/\")\n self._default_filter_dir_path = defaults._default_filter_dir_path\n\n ## Initialise using class methods\n self.set_recon_directory(self._get_recon_directory())\n self.set_filter_directory(self._get_filter_directory())\n\n ## Initialise some other stuff\n self.data = OrderedDict()\n self.data_filters = OrderedDict()\n\n pass\n\n\n def _get_recon_directory(self):\n \"\"\"\n Get the default path to the data directory.\n\n Looks for the CoCo home directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default CoCo location: '~/Code/CoCo/', with 'recon/' appended.\n \"\"\"\n\n return os.path.join(self._default_recon_dir_path, os.path.pardir, \"recon/\")\n\n\n def set_recon_directory(self, recon_dir_path = '', verbose = False):\n \"\"\"\n Set a new recon directory path.\n\n Enables the recon directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(recon_dir_path, self._default_recon_dir_path)\n if os.path.isdir(os.path.abspath(recon_dir_path)):\n self.recon_directory = os.path.abspath(recon_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(recon_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_recon_dir_path, UserWarning)\n self.recon_directory = self._default_recon_dir_path\n\n if not os.path.isdir(self.recon_directory):\n if verbose: print(os.path.isdir(self.recon_directory))\n raise errors.PathError(\"The default recon directory '\" + self.recon_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default recon directory '\" + self._default_recon_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_recon_dir_path?\")\n pass\n\n\n def load_formatted_phot(self, path, names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii', verbose = False):\n \"\"\"\n\n \"\"\"\n errors.StringWarning(path)\n\n try:\n phot_table = utils.load_formatted_phot(path, format = format, names = names,\n verbose = verbose)\n self.phot = phot_table\n\n self.phot['flux_upper'] = phot_table['flux'] + phot_table['flux_err']\n self.phot['flux_lower'] = phot_table['flux'] - phot_table['flux_err']\n\n except:\n raise Exception\n\n pass\n\n\n def plot(self, legend = True, xminorticks = 5, return_figure=False,\n verbose = False, *args, **kwargs):\n \"\"\"\n Plots phot.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n for i, filter_key in enumerate(self.data_filters):\n if verbose: print(i, self.data[filter_key].__dict__)\n plot_label_string = r'$\\rm{' + self.data_filters[filter_key].filter_name.replace('_', '\\\\_') + '}$'\n\n # ax1.errorbar(self.data[filter_key]['MJD'], self.data[filter_key]['flux'],\n # yerr = self.data[filter_key]['flux_err'],\n # capsize = 0, fmt = 'o',\n # label = plot_label_string,\n # *args, **kwargs)\n\n # ## Best Fit\n # ax1.plot(self.data[filter_key]['MJD'], self.data[filter_key]['flux'],\n # lw = 2, label = plot_label_string,\n # *args, **kwargs)\n\n ## With error\n ax1.fill_between(self.data[filter_key]['MJD'], self.data[filter_key]['flux_upper'], self.data[filter_key]['flux_lower'],\n label = plot_label_string, color = self.data_filters[filter_key]._plot_colour,\n alpha = 0.8,\n *args, **kwargs)\n if legend:\n\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n ## Use ap table groups instead? - can't; no support for mixin columns.\n ax1.set_ylim(np.nanmin(self.phot['flux']), np.nanmax(self.phot['flux']))\n\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Time, MJD (days)}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n return fig\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def get_fit_splines(self, verbose = False):\n \"\"\"\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n if hasattr(self, \"data\"):\n self.spline = OrderedDict()\n\n for i, filter_key in enumerate(self.data):\n try:\n if verbose: print(filter_key)\n self.spline[filter_key] = InterpolatedUnivariateSpline(self.data[filter_key][\"MJD\"], self.data[filter_key][\"flux\"])\n self.spline[filter_key+\"_err\"] = InterpolatedUnivariateSpline(self.data[filter_key][\"MJD\"], self.data[filter_key][\"flux_err\"])\n except:\n print(\"NOPE\")\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def colour_from_model(self, filter_key1, filter_key2):\n\n return phot_1 - phot_2\n\n\nclass specfitClass(BaseSpectrumClass):\n \"\"\"\n Small class to hold the output from CoCo spec.\n Inherits from BaseSpectrumClass.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n \"\"\"\n\n ## Initialise the class variables\n self._default_recon_dir_path = os.path.join(defaults._default_coco_dir_path, \"recon/\")\n # self._default_list_dir_path = self._default_data_dir_path\n\n ## Initialise using class methods\n self.set_recon_directory(self._get_recon_directory())\n\n pass\n\n\n def _get_recon_directory(self):\n \"\"\"\n Get the default path to the recon directory.\n\n Looks for the CoCo directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default datalocation: '../testdata/', with '/spec/' appended.\n \"\"\"\n\n return os.path.join(os.environ.get('COCO_ROOT_DIR', os.path.join(self._default_recon_dir_path, os.pardir)), \"recon/\")\n\n\n def set_recon_directory(self, recon_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(recon_dir_path, self._default_recon_dir_path)\n if os.path.isdir(os.path.abspath(recon_dir_path)):\n self.recon_directory = os.path.abspath(recon_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(recon_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_recon_dir_path, UserWarning)\n self.recon_directory = self._default_recon_dir_path\n\n if not os.path.isdir(self.recon_directory):\n if verbose: print(os.path.isdir(self.recon_directory))\n raise errors.PathError(\"The default data directory '\" + self.recon_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_recon_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_recon_dir_path?\")\n pass\n\n\n def set_orig_specpath(self, orig_specpath = False, verbose = False):\n \"\"\"\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n if not orig_specpath:\n self.orig_specpath = self.data.meta[\"comments\"][0].split(\"/\")[-1]\n\n else:\n self.orig_specpath = orig_specpath\n\n pass\n\n\n def plot_comparison(self, SpectrumClassInstance,\n xminorticks=250, legend=True,\n verbose=True, twoaxes=True, return_figure=False,\n *args, **kwargs):\n \"\"\"\n Plots spec.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.94, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n if verbose: print(self.data.__dict__)\n plot_label_string = r'$\\rm{' + self.data.meta[\"filename\"].replace('_', '\\_') + '}$'\n plot_label_string_compare = r'$\\rm{' + SpectrumClassInstance.data.meta[\"filename\"].replace('_', '\\_') + '}$'\n\n\n ax1.plot(self.data['wavelength'], self.flux, lw = 2,\n label = plot_label_string, color = 'Red',\n *args, **kwargs)\n if twoaxes:\n ax2 = ax1.twinx()\n ax2.plot(SpectrumClassInstance.data['wavelength'], SpectrumClassInstance.data['flux'],\n label = plot_label_string_compare, color = 'Blue',\n *args, **kwargs)\n\n else:\n ax1.plot(SpectrumClassInstance.data['wavelength'], SpectrumClassInstance.data['flux'],\n label = plot_label_string_compare, color = 'Blue',\n *args, **kwargs)\n\n maxplotydata = np.nanmax(np.append(self.flux, SpectrumClassInstance.data['flux']))\n minplotydata = np.nanmin(np.append(self.flux, SpectrumClassInstance.data['flux']))\n\n if legend:\n ## https://stackoverflow.com/a/10129461\n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n\n # plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n ax1.legend(lines + lines2,labels + labels2, loc=0, scatterpoints=1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n ax1.set_ylim(minplotydata*0.98, maxplotydata*1.02)\n\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n return fig\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\nclass FilterClass(BaseFilterClass):\n \"\"\"Docstring for FilterClass inherits from BaseFilterClass\"\"\"\n\n def __init__(self):\n self._wavelength_units = u.Angstrom\n self._wavelength_units._format['latex'] = r'\\rm{\\AA}'\n self._frequency_units = u.Hertz\n pass\n\n\n def read_filter_file(self, path, fmt = \"ascii\",\n names = (\"wavelength\", \"throughput\"),\n wavelength_u = u.angstrom, verbose = False):\n \"\"\"\n Assumes Response function is fractional rather than %.\n \"\"\"\n if utils.check_file_path(os.path.abspath(path), verbose = verbose):\n self.data = Table.read(path, format = fmt, names = names)\n self.wavelength = self.data[\"wavelength\"] * wavelength_u\n if verbose: print(\"1\", np.nanmax(self.wavelength))\n self.wavelength = self.wavelength.to(u.angstrom)\n self.throughput = self.data[\"throughput\"]\n if verbose: print(\"2\", np.nanmax(self.wavelength))\n\n self.wavelength_u = self.wavelength.to(wavelength_u)\n self._filter_file_path = path\n if verbose: print(\"3\", np.nanmax(self.wavelength))\n\n filename = path.split('/')[-1]\n filename_no_extension = filename.split('.')[0]\n self.filter_name = filename_no_extension\n if verbose: print(\"4\", np.nanmax(self.wavelength))\n\n self.set_plot_colour(verbose = verbose)\n if verbose: print(\"5\", np.nanmax(self.wavelength))\n self.calculate_effective_wavelength()\n if verbose: print(\"6\", np.nanmax(self.wavelength))\n self.calculate_edges()\n if verbose: print(\"7\", np.nanmax(self.wavelength))\n self.get_zeropoint()\n if verbose: print(\"8\", np.nanmax(self.wavelength))\n\n else:\n warnings.warn(\"Foo\")\n\n\n def calculate_edges_zero(self, verbose = False):\n \"\"\"\n calculates the first and last wavelength that has non-zero and steps one\n away\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n ## calculates the first and last wavelength that has non-zero\n # w = np.where(self.throughput > 0)[0]\n # if verbose: print(w)\n # self._upper_edge = self.wavelength[w[-1]]\n # self._lower_edge = self.wavelength[w[0]]\n\n w = np.where(self.throughput > 0)[0]\n if verbose: print(w)\n if w[0] - 1 < 0:\n w_low = 0\n else:\n w_low = w[0] - 1\n\n if w[-1] + 1 == len(self.throughput):\n w_high = w[-1]\n else:\n w_high = w[-1] + 1\n\n self._upper_edge = self.wavelength[w_high]\n self._lower_edge = self.wavelength[w_low]\n\n\n def calculate_edges(self, pc = 3., verbose = False):\n \"\"\"\n calculates edges by defining the region that contains (100 - pc)% of the\n flux.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n self._cumulative_throughput = np.cumsum(self.throughput)/np.sum(self.throughput)\n self._cumulative_throughput_spline = interp1d(self._cumulative_throughput, self.wavelength)\n\n self._upper_edge = self._cumulative_throughput_spline(1.0 - 0.5*(0.01*pc))\n self._lower_edge = self._cumulative_throughput_spline(0.0 + 0.5*(0.01*pc))\n\n pass\n\n\n def calculate_plot_colour(self, colourmap_name = \"plasma\", verbose = False):\n \"\"\"\n\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if not hasattr(self, \"_colourmap\"):\n self._colourmap = plt.get_cmap(defaults._colourmap_name)\n\n if hasattr(self, 'lambda_effective'):\n\n relative_lambda = self.lambda_effective - defaults._colour_upper_lambda_limit\n relative_lambda = relative_lambda / defaults._colour_upper_lambda_limit\n\n if verbose: print(\"relative_lambda = \", relative_lambda)\n\n self._plot_colour = self._colourmap(relative_lambda)\n\n else:\n warnings.warn(\"No self.lambda_effective set.\")\n\n\n def set_plot_colour(self, colour = False, verbose = False):\n \"\"\"\n\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if colour:\n self._plot_colour = colour\n\n else:\n\n try:\n if verbose:\n if self.filter_name in colours.hex.keys:\n print(colours.hex[self.filter_name])\n self._plot_colour = colours.hex[self.filter_name]\n except:\n if verbose: print(\"Nope\")\n self.calculate_plot_colour(verbose = verbose)\n\n pass\n\n\n def get_zeropoint(self, abpath=os.path.join(defaults._default_kcorr_data_path, \"AB_pseudospectrum.dat\")):\n \"\"\"\n\n :return:\n \"\"\"\n\n if hasattr(self, \"filter_name\"):\n # self.zp_AB = self.calculate_AB_zp()\n self.calculate_AB_zp(ABpath=abpath)\n # self.zp_vega = self.calc_vega_zp(filter_name)\n else:\n warnings.warn(\"No filter name - have you loaded in a bandpass?\")\n\n pass\n\n\n# #------------------------------------# #\n# # Model Classes # #\n# #------------------------------------# #\n#\n# class (BaseLCModelClass)\n\n\n# #------------------------------------# #\n# # Standalone Classes # #\n# #------------------------------------# #\n\n\nclass SNClass():\n \"\"\"docstring for SNClass.\"\"\"\n\n def __init__(self, snname):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n ## Initialise\n self.spec = OrderedDict()\n self.mangledspec = OrderedDict()\n # self.spec = SpectrumClass()\n self.phot = PhotometryClass()\n info = InfoClass()\n info.load()\n self.info = info.get_sn_info(snname)\n\n self.coco_directory = self._get_coco_directory()\n self.recon_directory = self._get_recon_directory()\n\n self.name = snname\n pass\n\n\n def _get_coco_directory(self):\n \"\"\"\n Get the default path to the data directory.\n\n Looks for the CoCo home directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default CoCo location: '~/Code/CoCo/', with appended.\n \"\"\"\n\n return os.path.abspath(os.environ.get('COCO_ROOT_DIR', os.path.abspath(defaults._default_coco_dir_path)))\n\n\n @staticmethod\n def _get_recon_directory():\n \"\"\"\n Get the default path to the recon directory.\n\n Looks for the CoCo directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default datalocation: '../testdata/', with '/spec/' appended.\n \"\"\"\n\n return os.path.join(os.path.abspath(os.environ.get('COCO_ROOT_DIR', os.path.join(defaults._default_recon_dir_path, os.pardir))), \"recon/\")\n\n\n def set_recon_directory(self, recon_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(recon_dir_path, self._default_recon_dir_path)\n if os.path.isdir(os.path.abspath(recon_dir_path)):\n self.recon_directory = os.path.abspath(recon_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(recon_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_recon_dir_path, UserWarning)\n self.recon_directory = self._default_recon_dir_path\n\n if not os.path.isdir(self.recon_directory):\n if verbose: print(os.path.isdir(self.recon_directory))\n raise errors.PathError(\"The default data directory '\" + self.recon_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_recon_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_recon_dir_path?\")\n pass\n\n\n def load_phot(self, phot_table = False, snname = False, path = False, file_type = '.dat',\n verbose = False):\n \"\"\"\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if not snname:\n snname = self.name\n if phot_table:\n self.phot.load_table(phot_table=phot_table, verbose=verbose)\n else:\n if not path:\n path = os.path.join(self.phot._default_data_dir_path, snname + file_type)\n if verbose: print(path)\n self.phot.load(path, verbose = verbose)\n\n pass\n\n\n def load_list(self, path, specfiletype = \".txt\", verbose = False):\n \"\"\"\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n listdata = utils.read_list_file(path, verbose=verbose)\n listdata.sort('mjd_obs')\n\n phases = []\n\n # for item in listdata[\"spec_path\"]:\n # filename = item.split(\"/\")[-1]\n # filename = filename.split(\"_\")[1:][0]\n # filename = filename.strip(specfiletype)\n # try:\n # phase = float(filename)\n # except:\n # pass\n #\n # phases.append(phase)\n # if verbose: print(phase)\n # listdata[\"phase\"] = phases\n\n self.list = listdata\n\n\n def load_spec(self, snname = False, spec_dir_path = False, verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n\n # if not snname:\n # snname = self.name\n #\n # if not spec_dir_path:\n # spec_dir_path = os.path.abspath(os.path.join(self._default_spec_data_dir_path, snname))\n #\n # if verbose: print(\"Loading spectra from: \", spec_dir_path)\n\n # spec_dir_path =\n\n\n if hasattr(self, 'coco_directory') and hasattr(self, 'list'):\n for i, path in enumerate(self.list['spec_path']):\n spec_fullpath = os.path.join(self.coco_directory, path)\n spec_filename = path.split('/')[-1]\n spec_dir_path = spec_fullpath.replace(spec_filename, '')\n if verbose: print(spec_fullpath, spec_dir_path, spec_filename)\n\n self.spec[spec_filename] = SpectrumClass()\n self.spec[spec_filename].load(spec_filename, directory = spec_dir_path,\n verbose = verbose)\n self.spec[spec_filename].set_infile(spec_filename)\n self.spec[spec_filename].set_MJD_obs(self.list['mjd_obs'][i])\n # self.spec[spec_filename].data.add_index('wavelength')\n\n else:\n warnings.warn(\"no coco or no listfile\")\n pass\n\n\n def load_mangledspec(self, snname = False, spec_dir_path = False, verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if not snname:\n snname = self.name\n\n # self._mangledspeclist = functions.find_recon_spec(snname)\n self._mangledspeclist = find_specphase_spec(self.name)\n self.mangledspec = OrderedDict()\n if verbose: print(\"loading mangledspec\")\n if hasattr(self, 'recon_directory') and hasattr(self, '_mangledspeclist') and hasattr(self, \"mangledspec\"):\n for i, spec_filename in enumerate(self._mangledspeclist):\n\n if verbose: print(i, spec_filename)\n # self.mangledspec[spec_filename] = SpectrumClass()\n self.mangledspec[spec_filename] = specfitClass()\n self.mangledspec[spec_filename].load(spec_filename, directory = self.recon_directory,\n verbose = verbose)\n\n orig_specpath = self.mangledspec[spec_filename].data.meta['comments']\n orig_specname = orig_specpath\n print(orig_specpath)\n w = np.where(self.list[\"spec_path\"] == orig_specpath)\n if verbose: print(w[0], len(w[0]))\n\n if len(w[0]) > 0:\n self.mangledspec[spec_filename].set_MJD_obs(self.list['mjd_obs'][w].data[0])\n self.mangledspec[spec_filename].data.add_index('wavelength')\n #\n else:\n warnings.warn(\"no coco or no listfile\")\n pass\n\n\n def load_sndist(self, path = defaults._default_sn_dist_path, format = \"ascii\"):\n \"\"\"\n based on functions.read_sndist and load_sndist\n \"\"\"\n\n if hasattr(self, \"name\"):\n # sndist = load_sndist(self.name)\n # self.z = sndist[\"z\"].data[0]\n # self.distmod = sndist[\"mu\"].data[0]\n\n utils.check_file_path(path)\n sndistlist = Table.read(path, format = format)\n\n try:\n w = np.where(sndistlist[\"snname\"] == snname)\n sndist = sndistlist[w]\n\n self.z = sndist[\"z\"].data[0]\n self.distmod = sndist[\"mu\"].data[0]\n except:\n warnings.warn(\"Failed to find distance info for \" + snname + \". is it in the list?\")\n else:\n if verbose: print(\"self.name not defined.\")\n\n pass\n\n\n def plot_lc(self, filters = False, legend = True, xminorticks = 10, mark_spectra = True,\n simplespecphot = False, fade = False, xlims = False, insidelegend = True,\n fit = True, enforce_zero = True, multiplot = True, yaxis_lim_multiplier = 1.1,\n lock_axis = False, xextent = False, filter_uncertainty = 10, return_figure=False,\n savepng = False, savepdf = False, outpath = False, showsnname = False,\n verbose = False, extra_phot=False, extra_phot_label=r\"$\\textnormal{Spectrophotometry}$\",\n *args, **kwargs):\n \"\"\"\n\n :param filters:\n :param legend:\n :param xminorticks:\n :param mark_spectra:\n :param simplespecphot:\n :param fade:\n :param xlims:\n :param insidelegend:\n :param fit:\n :param enforce_zero:\n :param multiplot:\n :param yaxis_lim_multiplier:\n :param lock_axis:\n :param xextent:\n :param filter_uncertainty:\n :param savepng:\n :param savepdf:\n :param outpath:\n :param showsnname:\n :param verbose:\n :param extra_phot:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n\n if hasattr(self.phot, \"data\"):\n if not fade:\n alpha = 1.0\n else:\n alpha = 0.2\n\n if not filters:\n filters = self.phot.data_filters\n if type(filters) == str:\n filters = [filters]\n\n utils.setup_plot_defaults()\n if not multiplot:\n fig = plt.figure(figsize=[8, 4])\n else:\n fig = plt.figure(figsize=[8, len(filters)*1.5])\n\n if showsnname:\n fig.suptitle(r\"$\\textrm{\"+self.name+\"}$\")\n if verbose: print(self.name)\n\n fig.subplots_adjust(left = 0.1, bottom = 0.13, top = 0.93,\n right = 0.91, hspace=0, wspace = 0)\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Time, MJD (days)}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n # yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{cm}^{-2}$'\n\n\n if not multiplot:\n ax1 = fig.add_subplot(111)\n axes_list = [ax1]\n else:\n axes_list = [plt.subplot2grid((len(filters), 1), (j, 0)) for j, k in enumerate(filters)]\n\n for i, filter_key in enumerate(filters):\n if multiplot:\n ax1 = axes_list[i]\n\n if filter_key in self.phot.data:\n if verbose: print(i, self.phot.data[filter_key].__dict__)\n plot_label_string = r'$\\rm{' + self.phot.data_filters[filter_key].filter_name.replace('_', '\\\\_') + '}$'\n\n if filter_key in colours.hex.keys():\n self.phot.data_filters[filter_key]._plot_colour = colours.hex[filter_key]\n else:\n warnings.warn(\"Cannot find filter in the pycocosn colours registry\")\n self.phot.data_filters[filter_key]._plot_colour = \"C0\"\n ax1.errorbar(self.phot.data[filter_key]['MJD'], self.phot.data[filter_key]['flux'],\n yerr = self.phot.data[filter_key]['flux_err'],\n capsize = 0, fmt = 'o', color = self.phot.data_filters[filter_key]._plot_colour,\n label = plot_label_string, ecolor = colours.hex['batman'], mec = colours.hex[\"batman\"],\n alpha = alpha,\n *args, **kwargs)\n\n if fit and hasattr(self, 'lcfit'):\n ax1.fill_between(self.lcfit.data[filter_key]['MJD'], self.lcfit.data[filter_key]['flux_upper'], self.lcfit.data[filter_key]['flux_lower'],\n color = self.phot.data_filters[filter_key]._plot_colour,\n alpha = 0.8, zorder = 0,\n *args, **kwargs)\n\n if simplespecphot and hasattr (self, \"simplespecphot\"):\n ax1.errorbar(self.simplespecphot.data[filter_key]['MJD'], self.simplespecphot.data[filter_key]['flux'],\n yerr = self.simplespecphot.data[filter_key]['flux_err'],\n capsize = 0, fmt = 'o', color = colours.hex[\"batman\"],\n ecolor = colours.hex['batman'], mec = colours.hex[\"batman\"], label = r\"$\\textnormal{SpecPhot}$\",\n *args, **kwargs)\n\n if extra_phot:\n if hasattr(extra_phot, \"data\") and filter_key in extra_phot.data:\n ax1.errorbar(extra_phot.data[filter_key]['MJD'], extra_phot.data[filter_key]['flux'],\n yerr=extra_phot.data[filter_key]['flux_err'],\n capsize=0, fmt='x', color=self.phot.data_filters[filter_key]._plot_colour,\n label=extra_phot_label, ecolor=\"C1\",\n mec=\"C1\", alpha=alpha, zorder=99,\n *args, **kwargs)\n\n\n if legend:\n if multiplot or insidelegend:\n plot_legend = ax1.legend(loc = 'upper right', scatterpoints = 1, markerfirst = False,\n numpoints = 1, frameon = False, bbox_to_anchor=(1., 1.),\n fontsize = 12.)\n\n if i == len(axes_list)-1:\n ax1.set_xlabel(xaxis_label_string)\n\n else:\n if multiplot:\n ax1.set_xticklabels('')\n\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.spines['top'].set_visible(True)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if mark_spectra:\n\n for spec_key in self.spec:\n if verbose: print(np.nanmin(self.spec[spec_key].wavelength) - filter_uncertainty, self.phot.data_filters[filter_key]._lower_edge)\n if verbose: print(np.nanmax(self.spec[spec_key].wavelength) + filter_uncertainty, self.phot.data_filters[filter_key]._upper_edge)\n\n if verbose: print(self.spec[spec_key].data.meta[\"filename\"] )\n too_blue = self.phot.data_filters[filter_key]._lower_edge < np.nanmin(self.spec[spec_key].wavelength) - filter_uncertainty\n too_red = self.phot.data_filters[filter_key]._upper_edge > np.nanmax(self.spec[spec_key].wavelength) + filter_uncertainty\n # if self.spec[spec_key]. self.phot.data_filters[filter_key]._upper_edge and self.phot.data_filters[filter_key]._lower_edge\n if verbose: print(too_blue, too_red)\n if not too_red and not too_blue:\n ax1.plot([self.spec[spec_key].mjd_obs, self.spec[spec_key].mjd_obs],\n [0.0, np.nanmax(self.phot.phot['flux'])*1.5],\n ls = ':', color = colours.hex['batman'], zorder = 0)\n\n if enforce_zero:\n ## Use ap table groups instead? - can't; no support for mixin columns.\n if multiplot and not lock_axis:\n ax1.set_ylim(np.nanmin(np.append(self.phot.data[filter_key]['flux'], 0.0)), np.nanmax(self.phot.data[filter_key]['flux'])*yaxis_lim_multiplier)\n else:\n ax1.set_ylim(np.nanmin(np.append(self.phot.phot['flux'], 0.0)), np.nanmax(self.phot.phot['flux'])*yaxis_lim_multiplier)\n else:\n if multiplot and not lock_axis:\n ax1.set_ylim(np.nanmin(self.phot.data[filter_key]['flux']), np.nanmax(self.phot.data[filter_key]['flux'])*yaxis_lim_multiplier)\n else:\n ax1.set_ylim(np.nanmin(self.phot.phot['flux']), np.nanmax(self.phot.phot['flux'])*yaxis_lim_multiplier)\n\n if multiplot:\n if not xextent:\n ax1.set_xlim(np.nanmin(self.phot.phot[\"MJD\"])-10, np.nanmax(self.phot.phot[\"MJD\"]))\n if xextent:\n ax1.set_xlim(np.nanmin(self.phot.phot[\"MJD\"])-10,np.nanmin(self.phot.phot[\"MJD\"]) + xextent)\n else:\n pass\n\n if xlims:\n ax1.set_xlim(xlims)\n if verbose:\n print(\"xrange = \", ax1.get_xlim())\n print(\"yrange = \", ax1.get_ylim())\n\n else:\n if verbose: print(\"Filter '\" + filter_key + \"' not found\")\n warnings.warn(\"Filter '\" + filter_key + \"' not found\")\n\n\n\n if not multiplot:\n\n ax1.set_ylabel(yaxis_label_string)\n\n if legend and not insidelegend:\n\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n else:\n fig.text(0.0, 0.5, yaxis_label_string, va = 'center', ha = 'left', rotation = 'vertical')\n\n if return_figure:\n warnings.warn(\"Returning figure, saveargs will be ignored\")\n return fig\n else:\n if savepdf and outpath:\n fig.savefig(outpath + \".pdf\", format = 'pdf', dpi=500)\n if savepng and outpath:\n fig.savefig(outpath + \".png\", format = 'png', dpi=500)\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_spec(self, xminorticks = 250, legend = True,\n wmin = 3500, return_figure=False,\n savepng = False, savepdf = False, outpath = False,\n verbose = False, add_mjd = True,\n *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self, \"spec\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 10])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n cmap_indices = np.linspace(0,1, len(self.spec))\n\n j = 0\n for i, spec_key in enumerate(self.spec):\n # if verbose: print(self.spec[spec_key].data.__dict__)\n\n plot_label_string = r'$\\rm{' + self.spec[spec_key].data.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n\n\n v_eff = 5436.87 ##Angstrom - TODO ???\n w = np.logical_and(self.spec[spec_key].data['wavelength'] > (v_eff-100.),self.spec[spec_key].data['wavelength'] < v_eff+100.)\n\n if verbose: print(i, len(w[np.where(w == True)]), spec_key, len(self.spec[spec_key].data['wavelength']), len(self.spec[spec_key].data['flux']), len(self.spec[spec_key].flux))\n if len(w[np.where(w == True)]) > 0:\n if verbose: print(len(w), 'Foo')\n flux_norm = self.spec[spec_key].flux / np.nanmean(self.spec[spec_key].flux[w])\n\n ax1.plot(self.spec[spec_key].data['wavelength'], flux_norm - 0.5*j, lw = 2,\n label = plot_label_string, color = defaults.spec_colourmap(cmap_indices[i]),\n *args, **kwargs)\n\n maxspecxdata = np.nanmax(self.spec[spec_key].data['wavelength'])\n minspecxdata = np.nanmin(self.spec[spec_key].data['wavelength'])\n\n w = np.where(self.spec[spec_key].data['wavelength'] >= maxspecxdata - 200)\n yatmaxspecxdata = np.nanmean((flux_norm - 0.5*j)[w])\n w = np.where(self.spec[spec_key].data['wavelength'] <= minspecxdata + 200)\n yatminspecxdata = np.nanmean((flux_norm - 0.5*j)[w])\n if verbose: print(yatminspecxdata)\n # if i == 0:\n if j == 0:\n maxplotydata = np.nanmax(flux_norm - 0.5*j)\n # minplotydata = np.nanmin(flux_norm - 0.5*j)\n minplotydata = 0. - 0.5*j ## Assumes always positive flux\n\n\n maxplotxdata = maxspecxdata\n minplotxdata = np.nanmin(self.spec[spec_key].data['wavelength'])\n else:\n maxplotydata = np.nanmax(np.append(maxplotydata, np.append(yatminspecxdata, yatminspecxdata)))\n # minplotydata = np.nanmin(np.append(minplotydata, flux_norm - 0.5*j))\n minplotydata = 0. - 0.5*j ## Assumes always positive flux\n maxplotxdata = np.nanmax(np.append(maxplotxdata, np.nanmax(self.spec[spec_key].data['wavelength'])))\n minplotxdata = np.nanmin(np.append(minplotxdata, np.nanmin(self.spec[spec_key].data['wavelength'])))\n if add_mjd:\n # ax1.plot([maxspecxdata, 11000],[1 - 0.5*j, 1 - 0.5*j], ls = '--', color = hex['batman'])\n # ax1.plot([maxspecxdata, 11000],[yatmaxspecxdata, yatmaxspecxdata], ls = '--', color = hex['batman'])\n ax1.plot([2000, minspecxdata],[1 - 0.5*j, yatminspecxdata], ls = '--', color = colours.hex['batman'])\n # txt = ax1.text(1500, yatminspecxdata, r'$' + str(self.spec[spec_key].mjd_obs) + '$',\n # horizontalalignment = 'right', verticalalignment = 'center')\n txt = ax1.text(2000, 1 - 0.5*j, r'$' + str(self.spec[spec_key].mjd_obs) + '$',\n horizontalalignment = 'right', verticalalignment = 'center')\n # ax1.text(1000, 1 - 0.5*j, r'$' + str(self.spec[spec_key].mjd_obs) + '$', horizontalalignment = 'right')\n j = j + 1\n else:\n if verbose: print(\"Not enough data to normalise\")\n if legend:\n\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n ax1.set_ylim(minplotydata - 0.5, maxplotydata + 0.5)\n ax1.set_xlim(1250, maxplotxdata*1.02)\n\n if verbose: print(minplotydata, maxplotydata)\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Flux, Arbitrary}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.set_yticklabels('')\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n warnings.warn(\"Returning figure, saveargs will be ignored\")\n return fig\n else:\n if savepdf and outpath:\n fig.savefig(outpath + \".pdf\", format = 'pdf', dpi=500)\n if savepng and outpath:\n fig.savefig(outpath + \".png\", format = 'png', dpi=500)\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_spec_coverage(self, xminorticks = 250, yminorticks = 5, legend = True,\n wmin = 3500, return_figure=False,\n savepng = False, savepdf = False, outpath = False,\n verbose = False,):\n \"\"\"\n\n :param xminorticks:\n :param yminorticks:\n :param legend:\n :param wmin:\n :param return_figure:\n :param savepng:\n :param savepdf:\n :param outpath:\n :param verbose:\n :return:\n \"\"\"\n if hasattr(self, \"spec\"):\n utils.setup_plot_defaults()\n\n y = [self.spec[i].mjd_obs for i in self.spec]\n xmax = [self.spec[i].max_wavelength for i in self.spec]\n xmin = [self.spec[i].min_wavelength for i in self.spec]\n\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Time, MJD (days)}$'\n\n fig = plt.figure(figsize=[10, 3])\n fig.subplots_adjust(left=0.09, bottom=0.2, top=0.99,\n right=0.99, hspace=0, wspace=0)\n\n ax = fig.add_subplot(111)\n\n ax.hlines(y=y, xmin=xmin, xmax=xmax)\n\n ax.scatter(xmin, y, color=\"blue\")\n ax.scatter(xmax, y, color=\"red\")\n\n xminorLocator = MultipleLocator(xminorticks)\n ax.xaxis.set_minor_locator(xminorLocator)\n yminorLocator = MultipleLocator(yminorticks)\n ax.yaxis.set_minor_locator(yminorLocator)\n\n ax.set_xlabel(xaxis_label_string)\n ax.set_ylabel(yaxis_label_string)\n\n if return_figure:\n warnings.warn(\"Returning figure, saveargs will be ignored\")\n return fig\n else:\n if savepdf and outpath:\n fig.savefig(outpath + \".pdf\", format='pdf', dpi=500)\n if savepng and outpath:\n fig.savefig(outpath + \".png\", format='png', dpi=500)\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_spectrum(self, spec_key):\n \"\"\"\n\n :param spec_key:\n :return:\n \"\"\"\n if hasattr(self, \"spec\"):\n if spec_key in self.spec:\n print(match)\n\n pass\n\n\n def plot_mangledspec(self, xminorticks = 250, legend = True,\n wmin = 3500, return_figure=False,\n savepng = False, savepdf = False, outpath = False,\n verbose = False, add_mjd = True,\n *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self, \"mangledspec\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 10])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n cmap_indices = np.linspace(0,1, len(self.mangledspec))\n if verbose: print(len(cmap_indices))\n\n j = 0\n for i, spec_key in enumerate(self.mangledspec):\n if hasattr(self.mangledspec[spec_key], \"mjd_obs\"):\n # if verbose: print(self.spec[spec_key].data.__dict__)\n\n plot_label_string = r'$\\rm{' + self.mangledspec[spec_key].data.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n\n\n v_eff = 5436.87 ##Angstrom\n w = np.logical_and(self.mangledspec[spec_key].data['wavelength'] > (v_eff-100.),self.mangledspec[spec_key].data['wavelength'] < v_eff+100.)\n\n if verbose: print(i, len(w[np.where(w == True)]), spec_key, len(self.mangledspec[spec_key].data['wavelength']), len(self.mangledspec[spec_key].data['flux']), len(self.mangledspec[spec_key].flux))\n if len(w[np.where(w == True)]) > 0:\n\n if verbose: print(len(w), 'Foo')\n\n flux_norm = self.mangledspec[spec_key].flux / np.nanmean(self.mangledspec[spec_key].flux[w])\n\n ax1.plot(self.mangledspec[spec_key].data['wavelength'], flux_norm - 0.5*j, lw = 2,\n label = plot_label_string, color = defaults.spec_colourmap(cmap_indices[j]),\n *args, **kwargs)\n\n maxspecxdata = np.nanmax(self.mangledspec[spec_key].data['wavelength'])\n minspecxdata = np.nanmin(self.mangledspec[spec_key].data['wavelength'])\n\n w = np.where(self.mangledspec[spec_key].data['wavelength'] >= maxspecxdata - 200)\n yatmaxspecxdata = np.nanmean((flux_norm - 0.5*j)[w])\n w = np.where(self.mangledspec[spec_key].data['wavelength'] <= minspecxdata + 200)\n yatminspecxdata = np.nanmean((flux_norm - 0.5*j)[w])\n if verbose: print(yatminspecxdata)\n # if i == 0:\n if j == 0:\n maxplotydata = np.nanmax(flux_norm - 0.5*j)\n # minplotydata = np.nanmin(flux_norm - 0.5*j)\n minplotydata = 0. - 0.5*j ## Assumes always positive flux\n\n\n maxplotxdata = maxspecxdata\n minplotxdata = np.nanmin(self.mangledspec[spec_key].data['wavelength'])\n else:\n maxplotydata = np.nanmax(np.append(maxplotydata, np.append(yatminspecxdata, yatminspecxdata)))\n # minplotydata = np.nanmin(np.append(minplotydata, flux_norm - 0.5*j))\n minplotydata = 0. - 0.5*j ## Assumes always positive flux\n maxplotxdata = np.nanmax(np.append(maxplotxdata, np.nanmax(self.mangledspec[spec_key].data['wavelength'])))\n minplotxdata = np.nanmin(np.append(minplotxdata, np.nanmin(self.mangledspec[spec_key].data['wavelength'])))\n if add_mjd:\n # ax1.plot([maxspecxdata, 11000],[1 - 0.5*j, 1 - 0.5*j], ls = '--', color = hex['batman'])\n # ax1.plot([maxspecxdata, 11000],[yatmaxspecxdata, yatmaxspecxdata], ls = '--', color = hex['batman'])\n ax1.plot([2000, minspecxdata],[1 - 0.5*j, yatminspecxdata], ls = '--', color = colours.hex['batman'])\n # txt = ax1.text(1500, yatminspecxdata, r'$' + str(self.mangledspec[spec_key].mjd_obs) + '$',\n # horizontalalignment = 'right', verticalalignment = 'center')\n txt = ax1.text(2000, 1 - 0.5*j, r'$' + str(self.mangledspec[spec_key].mjd_obs) + '$',\n horizontalalignment = 'right', verticalalignment = 'center')\n # ax1.text(1000, 1 - 0.5*j, r'$' + str(self.mangledspec[spec_key].mjd_obs) + '$', horizontalalignment = 'right')\n j = j + 1\n else:\n if verbose: print(\"Not enough data to normalise\")\n if legend:\n\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n ax1.set_ylim(minplotydata - 0.5, maxplotydata + 0.5)\n ax1.set_xlim(1250, maxplotxdata*1.02)\n\n if verbose: print(minplotydata, maxplotydata)\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Flux, Arbitrary}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.set_yticklabels('')\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n warnings.warn(\"Returning figure, saveargs will be ignored\")\n return fig\n else:\n if savepdf and outpath:\n fig.savefig(outpath + \".pdf\", format = 'pdf', dpi=500)\n if savepng and outpath:\n fig.savefig(outpath + \".png\", format = 'png', dpi=500)\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_filters(self, filters = False, xminorticks = 250, yminorticks = 0.1,\n show_lims = False, return_figure=False, verbose=False,\n *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self.phot, 'data_filters'):\n\n if not filters:\n filters = self.phot.data_filters\n\n if verbose: print(filters)\n\n utils.setup_plot_defaults()\n xaxis_label_string = r'$\\textnormal{Wavelength, Angstrom }(\\AA)$'\n yaxis_label_string = r'$\\textnormal{Fractional Throughput}$'\n\n yminorLocator = MultipleLocator(yminorticks)\n xminorLocator = MultipleLocator(xminorticks)\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n for i, filter_key in enumerate(filters):\n ## Check if there is something in the class to plot\n if hasattr(self.phot.data_filters[filter_key], \"wavelength\") and hasattr(self.phot.data_filters[filter_key], \"throughput\"):\n\n plot_label_string = r'$\\textnormal{' + self.phot.data_filters[filter_key].filter_name.replace(\"_\", \"\\_\") + '}$'\n\n\n if hasattr(self.phot.data_filters[filter_key], \"_plot_colour\"):\n ax1.plot(self.phot.data_filters[filter_key].wavelength, self.phot.data_filters[filter_key].throughput, color = self.phot.data_filters[filter_key]._plot_colour,\n lw = 2, label = plot_label_string)\n else:\n ax1.plot(self.phot.data_filters[filter_key].wavelength, self.phot.data_filters[filter_key].throughput, lw = 2, label = plot_label_string)\n\n if show_lims:\n try:\n ax1.plot([self.phot.data_filters[filter_key]._upper_edge, self.phot.data_filters[filter_key]._upper_edge], [0,1] ,\n lw = 1.5, alpha = 0.5, ls = ':',\n color = self.phot.data_filters[filter_key]._plot_colour, zorder = 0, )\n ax1.plot([self.phot.data_filters[filter_key]._lower_edge, self.phot.data_filters[filter_key]._lower_edge], [0,1] ,\n lw = 1.5, alpha = 0.5, ls = ':',\n color = self.phot.data_filters[filter_key]._plot_colour, zorder = 0, )\n except:\n print(\"Failed\")\n else:\n warning.warn(\"Doesn't look like you have loaded a filter into the object\")\n\n default_xlims = ax1.get_xlim()\n ax1.plot(default_xlims, [0,0], color = colours.hex[\"black\"], ls = \":\")\n ax1.set_xlim(default_xlims)\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.yaxis.set_minor_locator(yminorLocator)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n ax1.legend(loc = 0)\n\n if return_figure:\n return fig\n\n plt.show()\n pass\n\n\n\n def get_lcfit(self, path, verbose=False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n errors.StringWarning(path)\n self.lcfit = LCfitClass()\n self.lcfit.load_formatted_phot(path, verbose=verbose)\n self.lcfit.unpack(verbose=verbose)\n self.lcfit._sort_phot(verbose=verbose)\n self.lcfit.get_fit_splines(verbose=verbose)\n\n pass\n\n\n def get_specfit(self, verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n self.specfit = OrderedDict()\n\n if hasattr(self, \"name\"):\n specfit_list = functions.find_recon_spec(self.recon_directory, self.name, verbose = verbose)\n # if verbose: print(specfit_list)\n\n for i, specfit_file in enumerate(specfit_list):\n if verbose: print(i, specfit_file)\n self.specfit[specfit_file] = specfitClass()\n self.specfit[specfit_file].load(filename = specfit_file,\n directory = self.recon_directory, verbose = verbose)\n self.specfit[specfit_file].set_orig_specpath()\n\n else:\n warnings.warn(\"This SNClass object has no name\")\n if verbose: print(\"This SNClass object has no name\")\n\n pass\n\n\n def get_simplespecphot(self, verbose = False):\n \"\"\"\n When the SNClass has both lcfits and spec, sample the lcfits at the\n obsdate of the relevant (i.e. overlapping) spectra. Initially to\n recreate Fig 2 of Paper.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, 'lcfit') and hasattr(self, 'spec'):\n # if verbose: print(\"Foo\")\n\n # try:\n # # self.simplespecphot = LCfitClass()\n # self.simplespecphot = PhotometryClass()\n #\n # lenstring = np.nanmax([len(i) for i in self.lcfit.data_filters.keys()]) ## object dtype is slow\n # self.simplespecphot.phot = Table(names = ('MJD', 'flux', 'flux_err', 'filter'),\n # dtype = [float, float, float, '|S'+str(lenstring)])\n #\n # for i, spectrum in enumerate(self.spec):\n #\n # for filter_name in self.spec[spectrum]._overlapping_filter_list:\n # if verbose: print(i, spectrum, filter_name)\n #\n # mjd = self.spec[spectrum].mjd_obs\n # flux = self.lcfit.spline[filter_name](mjd)\n # flux_err = self.lcfit.spline[filter_name + \"_err\"](mjd)\n # newrow = {'MJD': mjd, 'flux': flux, 'flux_err': flux_err, 'filter':filter_name}\n # self.simplespecphot.phot.add_row([mjd, flux, flux_err, filter_name])\n #\n # self.simplespecphot.unpack()\n # except:\n # warnings.warn(\"simplespecphot failed\")\n\n\n # self.simplespecphot = LCfitClass()\n self.simplespecphot = PhotometryClass()\n\n lenstring = np.nanmax([len(i) for i in self.lcfit.data_filters.keys()]) ## object dtype is slow\n # self.simplespecphot.phot = Table(names=('MJD', 'flux', 'flux_err', 'filter'),\n # dtype=[float, float, float, '|S' + str(lenstring)])\n\n mjd_list = []\n flux_list = []\n flux_err_list = []\n filter_list = []\n\n for i, spectrum in enumerate(self.spec):\n\n for filter_name in self.spec[spectrum]._overlapping_filter_list:\n if verbose: print(i, spectrum, filter_name, type(filter_name))\n\n mjd = self.spec[spectrum].mjd_obs\n flux = self.lcfit.spline[filter_name](mjd)\n flux_err = self.lcfit.spline[filter_name + \"_err\"](mjd)\n # newrow = {'MJD': mjd, 'flux': flux, 'flux_err': flux_err, 'filter': filter_name}\n # if i == 0:\n # self.simplespecphot.phot = Table(newrow)\n # else:\n # self.simplespecphot.phot.add_row([mjd, flux, flux_err, filter_name])\n\n mjd_list.append(mjd)\n flux_list.append(flux)\n flux_err_list.append(flux_err)\n filter_list.append(filter_name)\n\n self.simplespecphot.phot = Table((mjd_list, flux_list, flux_err_list, filter_list), names=('MJD', 'flux', 'flux_err', 'filter'))\n\n self.simplespecphot.unpack(verbose=verbose)\n\n pass\n\n\n def check_overlaps(self, verbose = False):\n \"\"\"\n Checks the filters that the spectrum overlaps with.\n originally used functions.filter_within_spec\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass\n\n\n def get_specphot(self, spectrum = False, filter_objects = False, verbose = False):\n \"\"\"\n\n :param spectrum:\n :param filter_objects:\n :param verbose:\n :return:\n \"\"\"\n if hasattr(self, \"spec\"):\n if spectrum:\n spec_list = [spectrum]\n else:\n spec_list = self.spec\n if not filter_objects:\n filter_objects = self.phot.data_filters\n\n for i, spec in enumerate(spec_list):\n self.spec[spec].get_specphot(filter_objects=filter_objects, verbose=verbose)\n\n else:\n warnings.warn(\"object has no spectra\")\n pass\n\n\nclass InfoClass():\n \"\"\"\n\n \"\"\"\n\n def __init__(self):\n pass\n\n def load(self, path = False):\n if not path:\n path = defaults._default_info_path\n\n self._data = Table.read(path, format = \"ascii.commented_header\")\n\n self.table = self._data\n\n self.table.meta[\"success\"] = True\n self.snname = self.table[\"snname\"]\n self.z_obs = self.table[\"z_obs\"]\n self.distmod = self.table[\"mu\"]\n self.distance = Distance(distmod = self.table[\"mu\"])\n self.table[\"z_distmod\"] = [i.z for i in self.distance]\n\n self.RA = self.table[\"RA\"]\n self.Dec = self.table[\"Dec\"]\n\n self.table[\"SkyCoords\"] = SkyCoord(self.table[\"RA\"], self.table[\"Dec\"], unit=(u.hourangle, u.deg))\n self.coords = self.table[\"SkyCoords\"]\n\n self.type = self.table[\"Type\"]\n\n def get_sn_info(self, snname):\n try:\n w = np.where(self.snname == snname)\n except:\n print(\"foo\")\n\n return self.table[w]\n\n\n\n# #----------------------------------------------------------------------------# #\n# # /CODE # #\n# #----------------------------------------------------------------------------# #\n\n## FUNCTIONS THAT ITS A PAIN TO SHIFT\n\ndef find_specphase_spec(snname, dir_path = defaults._default_specphase_dir_path, file_type = \".spec\", verbose = False):\n \"\"\"\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if verbose: print(dir_path)\n errors.StringWarning(dir_path)\n errors.StringWarning(snname)\n if type(snname) is not str and type(snname) is not np.string_:\n raise(errors.PathError)\n\n if not utils.check_dir_path(dir_path):\n print(\"utils.check_dir_path failed\")\n return False\n\n try:\n ls = np.array(os.listdir(dir_path))\n\n # wspec = np.where(np.char.find(ls, file_type, start = -len(file_type)) > -1)\n # spec_list = ls[wspec]\n spec_list = [i for i in ls if i[-5:] == \".spec\"]\n ## The last 18 chars are for the MJD and file_type\n # wsn = np.where([i[:-18] == snname for i in spec_list])\n # snmatch_list = spec_list[wsn]\n snmatch_list = [i for i in spec_list if i[:len(snname)] == snname ]\n\n if verbose:\n print(\"Found: \")\n print(ls)\n print(\"Spec:\")\n print(spec_list)\n print(\"Matched:\")\n print(snmatch_list)\n if len(snmatch_list) is 0:\n warnings.warn(\"No matches found.\")\n return snmatch_list\n\n except:\n warnings.warn(\"Something went wrong\")\n return False\n\n\ndef find_filter_phot(path = defaults._default_data_dir_path, snname = False,\n prefix = 'SN', file_type = '.dat',\n verbose = False):\n \"\"\"\n Tries to find photometry in the supplied directory.\n\n Looks in a directory for things that match SN*.dat. Uses regex via `re` -\n probably overkill.\n\n Parameters\n ----------\n\n path :\n\n snname :\n\n prefix :\n\n file_type :\n\n\n Returns\n -------\n\n phot_list :\n :param path:\n :param snname:\n :param prefix:\n :param file_type:\n :param verbose:\n\n \"\"\"\n # regex = re.compile(\"^SN.*.dat\")\n\n errors.StringWarning(path)\n if not utils.check_dir_path(path):\n # return False\n raise errors.PathError\n\n\n try:\n if snname:\n match_string = \"^\" + str(snname) + \".*\" + '.dat'\n else:\n match_string = \"^\" + str(prefix) + \".*\" + '.dat'\n except:\n raise TypeError\n\n regex = re.compile(match_string)\n\n ls = os.listdir(path)\n\n phot_list = [os.path.join(path, match.group(0)) for file_name in ls for match in [regex.search(file_name)] if match]\n\n if os.path.join(path, snname + file_type) in phot_list:\n phot_list.remove(os.path.join(path,snname + file_type))\n warnings.warn(\"Found \" + os.path.join(path,snname + file_type) + \" - you could just read that in.\")\n\n if verbose:\n print(\"searching for\", match_string)\n print(\"Found: \")\n print(ls)\n print(\"Matched:\")\n print(phot_list)\n if len(phot_list) is 0:\n warnings.warn(\"No matches found.\")\n return phot_list\n"
] | [
[
"numpy.nanmax",
"matplotlib.ticker.MultipleLocator",
"numpy.in1d",
"numpy.nanmin",
"numpy.cumsum",
"matplotlib.pyplot.get_cmap",
"numpy.concatenate",
"numpy.round",
"numpy.mean",
"numpy.nanmean",
"numpy.where",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.unique",
"scipy.integrate.trapz",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.figure",
"numpy.isnan",
"numpy.append",
"numpy.log10",
"scipy.integrate.simps",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.logical_and",
"numpy.array_equal",
"numpy.bitwise_and",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.9",
"1.7",
"1.8"
],
"tensorflow": []
}
] |
TXM-DOOM/B.Tech-CSE-Y2 | [
"763436ae866f1f18fa8071c253d005bdf289532f"
] | [
"applied-statistics/python-revisited/libraries/numpy/sorting.py"
] | [
"import numpy as np\n\ntestArr1 = np.array([1, 20, 23, 14, 2, 1, 234, 12, 1, 3]) # Sorts in ascending order\ntestArr2 = np.array([True, False, False, True]) # False at the start of the array and then True\ntestArr3 = np.array(['C', 'A', 'Z', 'V']) # Sorts Alphabetically\n\nprint('1: {}\\n2: {}\\n3: {}'.format(np.sort(testArr1), np.sort(testArr2), np.sort(testArr3)))"
] | [
[
"numpy.array",
"numpy.sort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
benjamin-work/aristo-mini | [
"4d99fa4cb9eb1e64d0d21adfea15450d626cfcba"
] | [
"aristomini/common/wordtwovec.py"
] | [
"\"\"\"\na wrapper class for the gensim Word2Vec model that has extra features we need, as well as some\nhelper functions for tokenizing and stemming and things like that.\n\"\"\"\n\nfrom functools import lru_cache\nimport math\nfrom typing import Iterable, List\n\nfrom gensim.parsing.preprocessing import STOPWORDS\nfrom gensim.parsing.porter import PorterStemmer\nfrom gensim.models import Word2Vec\nfrom gensim.utils import simple_preprocess\n\nimport numpy as np\n\nstemmer = PorterStemmer()\n\n\n@lru_cache(maxsize=1024)\ndef stem(word: str) -> str:\n \"\"\"stemming words is not cheap, so use a cache decorator\"\"\"\n return stemmer.stem(word)\n\n\ndef tokenizer(sentence: str) -> List[str]:\n \"\"\"use gensim's `simple_preprocess` and `STOPWORDS` list\"\"\"\n return [stem(token) for token in simple_preprocess(sentence) if token not in STOPWORDS]\n\n\ndef cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:\n \"\"\"https://en.wikipedia.org/wiki/Cosine_similarity\"\"\"\n num = np.dot(v1, v2)\n d1 = np.dot(v1, v1)\n d2 = np.dot(v2, v2)\n\n if d1 > 0.0 and d2 > 0.0:\n return num / math.sqrt(d1 * d2)\n else:\n return 0.0\n\n\nclass WordTwoVec:\n \"\"\"\n a wrapper for gensim.Word2Vec with added functionality to embed phrases and compute the\n \"goodness\" of a question-answer pair based on embedding-vector similarity\n \"\"\"\n def __init__(self, model_file: str) -> None:\n if model_file.endswith(\".bin\"):\n self.model = Word2Vec.load_word2vec_format(model_file, binary=True)\n else:\n self.model = Word2Vec.load(model_file)\n\n def embed(self, words: Iterable[str]) -> np.ndarray:\n \"\"\"given a list of words, find their vector embeddings and return the vector mean\"\"\"\n # first find the vector embedding for each word\n vectors = [self.model[word] for word in words if word in self.model]\n\n if vectors:\n # if there are vector embeddings, take the vector average\n return np.average(vectors, axis=0)\n else:\n # otherwise just return a zero vector\n return np.zeros(self.model.vector_size)\n\n def goodness(self, question_stem: str, choice_text: str) -> float:\n \"\"\"how good is the choice for this question?\"\"\"\n question_words = {word for word in tokenizer(question_stem)}\n choice_words = {word for word in tokenizer(choice_text) if word not in question_words}\n\n score = cosine_similarity(self.embed(question_words), self.embed(choice_words))\n\n if \"Max is doing\" in question_stem:\n print(choice_text, score)\n\n return score\n"
] | [
[
"numpy.dot",
"numpy.average",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AishwaryaKalloli/koalas | [
"8d35a74508c1319996c8c27e2a5e24af52b9ee31",
"8d35a74508c1319996c8c27e2a5e24af52b9ee31"
] | [
"databricks/koalas/base.py",
"databricks/koalas/tests/test_groupby.py"
] | [
"#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nBase and utility classes for Koalas objects.\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\nimport datetime\nfrom functools import wraps, partial\nfrom typing import Any, Callable, Tuple, Union, cast, TYPE_CHECKING\nimport warnings\n\nimport numpy as np\nimport pandas as pd # noqa: F401\nfrom pandas.api.types import is_list_like\nfrom pyspark import sql as spark\nfrom pyspark.sql import functions as F, Window, Column\nfrom pyspark.sql.types import (\n BooleanType,\n DateType,\n DoubleType,\n FloatType,\n IntegralType,\n LongType,\n StringType,\n TimestampType,\n)\n\nfrom databricks import koalas as ks # For running doctests and reference resolution in PyCharm.\nfrom databricks.koalas import numpy_compat\nfrom databricks.koalas.internal import (\n InternalFrame,\n NATURAL_ORDER_COLUMN_NAME,\n SPARK_DEFAULT_INDEX_NAME,\n)\nfrom databricks.koalas.spark import functions as SF\nfrom databricks.koalas.spark.accessors import SparkIndexOpsMethods\nfrom databricks.koalas.typedef import as_spark_type, spark_type_to_pandas_dtype\nfrom databricks.koalas.utils import align_diff_series, same_anchor, scol_for, validate_axis\nfrom databricks.koalas.frame import DataFrame\n\nif TYPE_CHECKING:\n from databricks.koalas.indexes import Index\n from databricks.koalas.series import Series\n\n\ndef booleanize_null(left_scol, scol, f) -> Column:\n \"\"\"\n Booleanize Null in Spark Column\n \"\"\"\n comp_ops = [\n getattr(Column, \"__{}__\".format(comp_op))\n for comp_op in [\"eq\", \"ne\", \"lt\", \"le\", \"ge\", \"gt\"]\n ]\n\n if f in comp_ops:\n # if `f` is \"!=\", fill null with True otherwise False\n filler = f == Column.__ne__\n scol = F.when(scol.isNull(), filler).otherwise(scol)\n\n elif f == Column.__or__:\n scol = F.when(left_scol.isNull() | scol.isNull(), False).otherwise(scol)\n\n elif f == Column.__and__:\n scol = F.when(scol.isNull(), False).otherwise(scol)\n\n return scol\n\n\ndef column_op(f):\n \"\"\"\n A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be\n supported too. If this decorator is used for the `f` function that takes Spark Column and\n returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas\n Series.\n\n :param f: a function that takes Spark Column and returns Spark Column.\n :param self: Koalas Series\n :param args: arguments that the function `f` takes.\n \"\"\"\n\n @wraps(f)\n def wrapper(self, *args):\n # It is possible for the function `f` takes other arguments than Spark Column.\n # To cover this case, explicitly check if the argument is Koalas Series and\n # extract Spark Column. For other arguments, they are used as are.\n cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]\n\n if all(same_anchor(self, col) for col in cols):\n # Same DataFrame anchors\n args = [arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]\n scol = f(self.spark.column, *args)\n scol = booleanize_null(self.spark.column, scol, f)\n\n kser = self._with_new_scol(scol)\n else:\n # Different DataFrame anchors\n def apply_func(this_column, *that_columns):\n scol = f(this_column, *that_columns)\n return booleanize_null(this_column, scol, f)\n\n kser = align_diff_series(apply_func, self, *args, how=\"full\")\n\n if not all(self.name == col.name for col in cols):\n kser = kser.rename()\n\n return kser\n\n return wrapper\n\n\ndef numpy_column_op(f):\n @wraps(f)\n def wrapper(self, *args):\n # PySpark does not support NumPy type out of the box. For now, we convert NumPy types\n # into some primitive types understandable in PySpark.\n new_args = []\n for arg in args:\n # TODO: This is a quick hack to support NumPy type. We should revisit this.\n if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):\n new_args.append(float(arg / np.timedelta64(1, \"s\")))\n else:\n new_args.append(arg)\n return column_op(f)(self, *new_args)\n\n return wrapper\n\n\nclass IndexOpsMixin(object, metaclass=ABCMeta):\n \"\"\"common ops mixin to support a unified interface / docs for Series / Index\n\n Assuming there are following attributes or properties and function.\n \"\"\"\n\n @property\n @abstractmethod\n def _internal(self) -> InternalFrame:\n pass\n\n @property\n @abstractmethod\n def _kdf(self) -> DataFrame:\n pass\n\n @abstractmethod\n def _with_new_scol(self, scol: spark.Column):\n pass\n\n @property\n @abstractmethod\n def _column_label(self) -> Tuple:\n pass\n\n @property\n @abstractmethod\n def spark(self) -> SparkIndexOpsMethods:\n pass\n\n @property\n def spark_column(self) -> Column:\n warnings.warn(\n \"Series.spark_column is deprecated as of Series.spark.column. \"\n \"Please use the API instead.\",\n FutureWarning,\n )\n return self.spark.column\n\n spark_column.__doc__ = SparkIndexOpsMethods.column.__doc__\n\n # arithmetic operators\n __neg__ = column_op(Column.__neg__)\n\n def __add__(self, other) -> Union[\"Series\", \"Index\"]:\n if not isinstance(self.spark.data_type, StringType) and (\n (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n if isinstance(self.spark.data_type, StringType):\n # Concatenate string columns\n if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType):\n return column_op(F.concat)(self, other)\n # Handle df['col'] + 'literal'\n elif isinstance(other, str):\n return column_op(F.concat)(self, F.lit(other))\n else:\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n else:\n return column_op(Column.__add__)(self, other)\n\n def __sub__(self, other) -> Union[\"Series\", \"Index\"]:\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"substraction can not be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, TimestampType):\n # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.\n msg = (\n \"Note that there is a behavior difference of timestamp subtraction. \"\n \"The timestamp subtraction returns an integer in seconds, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, IndexOpsMixin) and isinstance(\n other.spark.data_type, TimestampType\n ):\n warnings.warn(msg, UserWarning)\n return self.astype(\"bigint\") - other.astype(\"bigint\")\n elif isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return self.astype(\"bigint\") - F.lit(other).cast(as_spark_type(\"bigint\"))\n else:\n raise TypeError(\"datetime subtraction can only be applied to datetime series.\")\n elif isinstance(self.spark.data_type, DateType):\n # Note that date subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.\n msg = (\n \"Note that there is a behavior difference of date subtraction. \"\n \"The date subtraction returns an integer in days, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, DateType):\n warnings.warn(msg, UserWarning)\n return column_op(F.datediff)(self, other).astype(\"bigint\")\n elif isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return column_op(F.datediff)(self, F.lit(other)).astype(\"bigint\")\n else:\n raise TypeError(\"date subtraction can only be applied to date series.\")\n return column_op(Column.__sub__)(self, other)\n\n def __mul__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(other, str):\n raise TypeError(\"multiplication can not be applied to a string literal.\")\n\n if (\n isinstance(self.spark.data_type, IntegralType)\n and isinstance(other, IndexOpsMixin)\n and isinstance(other.spark.data_type, StringType)\n ):\n return column_op(SF.repeat)(other, self)\n\n if isinstance(self.spark.data_type, StringType):\n if (\n isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, IntegralType)\n ) or isinstance(other, int):\n return column_op(SF.repeat)(self, other)\n else:\n raise TypeError(\n \"a string series can only be multiplied to an int series or literal\"\n )\n\n return column_op(Column.__mul__)(self, other)\n\n def __truediv__(self, other) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n __truediv__ has different behaviour between pandas and PySpark for several cases.\n 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf\n 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf\n 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf\n 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf\n\n +-------------------------------------------+\n | dividend (divisor: 0) | PySpark | pandas |\n |-----------------------|---------|---------|\n | np.inf | null | np.inf |\n | -np.inf | null | -np.inf |\n | 10 | null | np.inf |\n | -10 | null | -np.inf |\n +-----------------------|---------|---------+\n \"\"\"\n\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def truediv(left, right):\n return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise(\n F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(\n F.lit(np.inf).__div__(left)\n )\n )\n\n return numpy_column_op(truediv)(self, other)\n\n def __mod__(self, other) -> Union[\"Series\", \"Index\"]:\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"modulo can not be applied on string series or literals.\")\n\n def mod(left, right):\n return ((left % right) + right) % right\n\n return column_op(mod)(self, other)\n\n def __radd__(self, other) -> Union[\"Series\", \"Index\"]:\n # Handle 'literal' + df['col']\n if not isinstance(self.spark.data_type, StringType) and isinstance(other, str):\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, StringType):\n if isinstance(other, str):\n return self._with_new_scol(F.concat(F.lit(other), self.spark.column))\n else:\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n else:\n return column_op(Column.__radd__)(self, other)\n\n def __rsub__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"substraction can not be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, TimestampType):\n # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.\n msg = (\n \"Note that there is a behavior difference of timestamp subtraction. \"\n \"The timestamp subtraction returns an integer in seconds, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return -(self.astype(\"bigint\") - F.lit(other).cast(as_spark_type(\"bigint\")))\n else:\n raise TypeError(\"datetime subtraction can only be applied to datetime series.\")\n elif isinstance(self.spark.data_type, DateType):\n # Note that date subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.\n msg = (\n \"Note that there is a behavior difference of date subtraction. \"\n \"The date subtraction returns an integer in days, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return -column_op(F.datediff)(self, F.lit(other)).astype(\"bigint\")\n else:\n raise TypeError(\"date subtraction can only be applied to date series.\")\n return column_op(Column.__rsub__)(self, other)\n\n def __rmul__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(other, str):\n raise TypeError(\"multiplication can not be applied to a string literal.\")\n\n if isinstance(self.spark.data_type, StringType):\n if isinstance(other, int):\n return column_op(SF.repeat)(self, other)\n else:\n raise TypeError(\n \"a string series can only be multiplied to an int series or literal\"\n )\n\n return column_op(Column.__rmul__)(self, other)\n\n def __rtruediv__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def rtruediv(left, right):\n return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise(\n F.lit(right).__truediv__(left)\n )\n\n return numpy_column_op(rtruediv)(self, other)\n\n def __floordiv__(self, other) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n __floordiv__ has different behaviour between pandas and PySpark for several cases.\n 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf\n 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf\n 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf\n 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf\n\n +-------------------------------------------+\n | dividend (divisor: 0) | PySpark | pandas |\n |-----------------------|---------|---------|\n | np.inf | null | np.inf |\n | -np.inf | null | -np.inf |\n | 10 | null | np.inf |\n | -10 | null | -np.inf |\n +-----------------------|---------|---------+\n \"\"\"\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def floordiv(left, right):\n return F.when(F.lit(right is np.nan), np.nan).otherwise(\n F.when(\n F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right))\n ).otherwise(\n F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(\n F.lit(np.inf).__div__(left)\n )\n )\n )\n\n return numpy_column_op(floordiv)(self, other)\n\n def __rfloordiv__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def rfloordiv(left, right):\n return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise(\n F.when(F.lit(left) == np.nan, np.nan).otherwise(F.floor(F.lit(right).__div__(left)))\n )\n\n return numpy_column_op(rfloordiv)(self, other)\n\n def __rmod__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"modulo can not be applied on string series or literals.\")\n\n def rmod(left, right):\n return ((right % left) + left) % left\n\n return column_op(rmod)(self, other)\n\n __pow__ = column_op(Column.__pow__)\n __rpow__ = column_op(Column.__rpow__)\n __abs__ = column_op(F.abs)\n\n # comparison operators\n __eq__ = column_op(Column.__eq__)\n __ne__ = column_op(Column.__ne__)\n __lt__ = column_op(Column.__lt__)\n __le__ = column_op(Column.__le__)\n __ge__ = column_op(Column.__ge__)\n __gt__ = column_op(Column.__gt__)\n\n # `and`, `or`, `not` cannot be overloaded in Python,\n # so use bitwise operators as boolean operators\n __and__ = column_op(Column.__and__)\n __or__ = column_op(Column.__or__)\n __invert__ = column_op(Column.__invert__)\n __rand__ = column_op(Column.__rand__)\n __ror__ = column_op(Column.__ror__)\n\n # NDArray Compat\n def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):\n # Try dunder methods first.\n result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(\n self, ufunc, method, *inputs, **kwargs\n )\n\n # After that, we try with PySpark APIs.\n if result is NotImplemented:\n result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(\n self, ufunc, method, *inputs, **kwargs\n )\n\n if result is not NotImplemented:\n return result\n else:\n # TODO: support more APIs?\n raise NotImplementedError(\"Koalas objects currently do not support %s.\" % ufunc)\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"Return the dtype object of the underlying data.\n\n Examples\n --------\n >>> s = ks.Series([1, 2, 3])\n >>> s.dtype\n dtype('int64')\n\n >>> s = ks.Series(list('abc'))\n >>> s.dtype\n dtype('O')\n\n >>> s = ks.Series(pd.date_range('20130101', periods=3))\n >>> s.dtype\n dtype('<M8[ns]')\n\n >>> s.rename(\"a\").to_frame().set_index(\"a\").index.dtype\n dtype('<M8[ns]')\n \"\"\"\n return spark_type_to_pandas_dtype(self.spark.data_type)\n\n @property\n def empty(self) -> bool:\n \"\"\"\n Returns true if the current object is empty. Otherwise, returns false.\n\n >>> ks.range(10).id.empty\n False\n\n >>> ks.range(0).id.empty\n True\n\n >>> ks.DataFrame({}, index=list('abc')).index.empty\n False\n \"\"\"\n return self._internal.resolved_copy.spark_frame.rdd.isEmpty()\n\n @property\n def hasnans(self) -> bool:\n \"\"\"\n Return True if it has any missing values. Otherwise, it returns False.\n\n >>> ks.DataFrame({}, index=list('abc')).index.hasnans\n False\n\n >>> ks.Series(['a', None]).hasnans\n True\n\n >>> ks.Series([1.0, 2.0, np.nan]).hasnans\n True\n\n >>> ks.Series([1, 2, 3]).hasnans\n False\n\n >>> (ks.Series([1.0, 2.0, np.nan]) + 1).hasnans\n True\n\n >>> ks.Series([1, 2, 3]).rename(\"a\").to_frame().set_index(\"a\").index.hasnans\n False\n \"\"\"\n sdf = self._internal.spark_frame\n scol = self.spark.column\n\n if isinstance(self.spark.data_type, (DoubleType, FloatType)):\n return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]\n else:\n return sdf.select(F.max(scol.isNull())).collect()[0][0]\n\n @property\n def is_monotonic(self) -> bool:\n \"\"\"\n Return boolean if values in the object are monotonically increasing.\n\n .. note:: the current implementation of is_monotonic requires to shuffle\n and aggregate multiple times to check the order locally and globally,\n which is potentially expensive. In case of multi-index, all data are\n transferred to single node which can easily cause out-of-memory error currently.\n\n Returns\n -------\n is_monotonic : bool\n\n Examples\n --------\n >>> ser = ks.Series(['1/1/2018', '3/1/2018', '4/1/2018'])\n >>> ser.is_monotonic\n True\n\n >>> df = ks.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})\n >>> df.dates.is_monotonic\n False\n\n >>> df.index.is_monotonic\n True\n\n >>> ser = ks.Series([1])\n >>> ser.is_monotonic\n True\n\n >>> ser = ks.Series([])\n >>> ser.is_monotonic\n True\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.is_monotonic\n True\n\n >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])\n >>> ser.is_monotonic\n False\n\n >>> ser.index.is_monotonic\n True\n\n Support for MultiIndex\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('z', 'e')],\n )\n >>> midx.is_monotonic\n True\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('z', 'a'),\n ('z', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('x', 'e')],\n )\n >>> midx.is_monotonic\n False\n \"\"\"\n return self._is_monotonic(\"increasing\")\n\n is_monotonic_increasing = is_monotonic\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return boolean if values in the object are monotonically decreasing.\n\n .. note:: the current implementation of is_monotonic_decreasing requires to shuffle\n and aggregate multiple times to check the order locally and globally,\n which is potentially expensive. In case of multi-index, all data are transferred\n to single node which can easily cause out-of-memory error currently.\n\n Returns\n -------\n is_monotonic : bool\n\n Examples\n --------\n >>> ser = ks.Series(['4/1/2018', '3/1/2018', '1/1/2018'])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> df = ks.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})\n >>> df.dates.is_monotonic_decreasing\n False\n\n >>> df.index.is_monotonic_decreasing\n False\n\n >>> ser = ks.Series([1])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser = ks.Series([])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.is_monotonic_decreasing\n True\n\n >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser.index.is_monotonic_decreasing\n False\n\n Support for MultiIndex\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('z', 'e')],\n )\n >>> midx.is_monotonic_decreasing\n False\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])\n >>> midx # doctest: +SKIP\n MultiIndex([('z', 'a'),\n ('z', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('x', 'e')],\n )\n >>> midx.is_monotonic_decreasing\n True\n \"\"\"\n return self._is_monotonic(\"decreasing\")\n\n def _is_locally_monotonic_spark_column(self, order):\n window = (\n Window.partitionBy(F.col(\"__partition_id\"))\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-1, -1)\n )\n\n if order == \"increasing\":\n return (F.col(\"__origin\") >= F.lag(F.col(\"__origin\"), 1).over(window)) & F.col(\n \"__origin\"\n ).isNotNull()\n else:\n return (F.col(\"__origin\") <= F.lag(F.col(\"__origin\"), 1).over(window)) & F.col(\n \"__origin\"\n ).isNotNull()\n\n def _is_monotonic(self, order):\n assert order in (\"increasing\", \"decreasing\")\n\n sdf = self._internal.spark_frame\n\n sdf = (\n sdf.select(\n F.spark_partition_id().alias(\n \"__partition_id\"\n ), # Make sure we use the same partition id in the whole job.\n F.col(NATURAL_ORDER_COLUMN_NAME),\n self.spark.column.alias(\"__origin\"),\n )\n .select(\n F.col(\"__partition_id\"),\n F.col(\"__origin\"),\n self._is_locally_monotonic_spark_column(order).alias(\n \"__comparison_within_partition\"\n ),\n )\n .groupby(F.col(\"__partition_id\"))\n .agg(\n F.min(F.col(\"__origin\")).alias(\"__partition_min\"),\n F.max(F.col(\"__origin\")).alias(\"__partition_max\"),\n F.min(F.coalesce(F.col(\"__comparison_within_partition\"), F.lit(True))).alias(\n \"__comparison_within_partition\"\n ),\n )\n )\n\n # Now we're windowing the aggregation results without partition specification.\n # The number of rows here will be as the same of partitions, which is expected\n # to be small.\n window = Window.orderBy(F.col(\"__partition_id\")).rowsBetween(-1, -1)\n if order == \"increasing\":\n comparison_col = F.col(\"__partition_min\") >= F.lag(F.col(\"__partition_max\"), 1).over(\n window\n )\n else:\n comparison_col = F.col(\"__partition_min\") <= F.lag(F.col(\"__partition_max\"), 1).over(\n window\n )\n\n sdf = sdf.select(\n comparison_col.alias(\"__comparison_between_partitions\"),\n F.col(\"__comparison_within_partition\"),\n )\n\n ret = sdf.select(\n F.min(F.coalesce(F.col(\"__comparison_between_partitions\"), F.lit(True)))\n & F.min(F.coalesce(F.col(\"__comparison_within_partition\"), F.lit(True)))\n ).collect()[0][0]\n if ret is None:\n return True\n else:\n return ret\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of array dimensions.\n\n Return 1 for Series / Index / MultiIndex.\n\n Examples\n --------\n\n For Series\n\n >>> s = ks.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])\n >>> s.ndim\n 1\n\n For Index\n\n >>> s.index.ndim\n 1\n\n For MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [1, 1, 1, 1, 1, 2, 1, 2, 2]])\n >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s.index.ndim\n 1\n \"\"\"\n return 1\n\n def astype(self, dtype) -> Union[\"Index\", \"Series\"]:\n \"\"\"\n Cast a Koalas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> ser = ks.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.astype('int64')\n Int64Index([1, 2], dtype='int64', name='a')\n \"\"\"\n spark_type = as_spark_type(dtype)\n if not spark_type:\n raise ValueError(\"Type {} not understood\".format(dtype))\n if isinstance(spark_type, BooleanType):\n if isinstance(self.spark.data_type, StringType):\n scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(\n F.length(self.spark.column) > 0\n )\n elif isinstance(self.spark.data_type, (FloatType, DoubleType)):\n scol = F.when(\n self.spark.column.isNull() | F.isnan(self.spark.column), F.lit(True)\n ).otherwise(self.spark.column.cast(spark_type))\n else:\n scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(\n self.spark.column.cast(spark_type)\n )\n elif isinstance(spark_type, StringType):\n scol = F.when(self.spark.column.isNull(), str(None)).otherwise(\n self.spark.column.cast(spark_type)\n )\n else:\n scol = self.spark.column.cast(spark_type)\n return self._with_new_scol(scol)\n\n def isin(self, values) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Check whether `values` are contained in Series or Index.\n\n Return a boolean Series or Index showing whether each element in the Series\n matches an element in the passed sequence of `values` exactly.\n\n Parameters\n ----------\n values : list or set\n The sequence of values to test.\n\n Returns\n -------\n isin : Series (bool dtype) or Index (bool dtype)\n\n Examples\n --------\n >>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'], name='animal')\n >>> s.isin(['cow', 'lama'])\n 0 True\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n >>> s.rename(\"a\").to_frame().set_index(\"a\").index.isin(['lama'])\n Index([True, False, True, False, True, False], dtype='object', name='a')\n \"\"\"\n if not is_list_like(values):\n raise TypeError(\n \"only list-like objects are allowed to be passed\"\n \" to isin(), you passed a [{values_type}]\".format(values_type=type(values).__name__)\n )\n\n return self._with_new_scol(self.spark.column.isin(list(values)))\n\n def isnull(self) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values. Characters such as empty strings '' or\n numpy.inf are not considered NA values\n (unless you set pandas.options.mode.use_inf_as_na = True).\n\n Returns\n -------\n Series or Index : Mask of bool values for each element in Series\n that indicates whether an element is not an NA value.\n\n Examples\n --------\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser.isna() # doctest: +NORMALIZE_WHITESPACE\n 0 False\n 1 False\n 2 True\n dtype: bool\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.isna()\n Index([False, False, True], dtype='object', name='a')\n \"\"\"\n from databricks.koalas.indexes import MultiIndex\n\n if isinstance(self, MultiIndex):\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n if isinstance(self.spark.data_type, (FloatType, DoubleType)):\n return self._with_new_scol(self.spark.column.isNull() | F.isnan(self.spark.column))\n else:\n return self._with_new_scol(self.spark.column.isNull())\n\n isna = isnull\n\n def notnull(self) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Detect existing (non-missing) values.\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True.\n Characters such as empty strings '' or numpy.inf are not considered NA values\n (unless you set pandas.options.mode.use_inf_as_na = True).\n NA values, such as None or numpy.NaN, get mapped to False values.\n\n Returns\n -------\n Series or Index : Mask of bool values for each element in Series\n that indicates whether an element is not an NA value.\n\n Examples\n --------\n Show which entries in a Series are not NA.\n\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.notna()\n Index([True, True, False], dtype='object', name='a')\n \"\"\"\n from databricks.koalas.indexes import MultiIndex\n\n if isinstance(self, MultiIndex):\n raise NotImplementedError(\"notna is not defined for MultiIndex\")\n return (~self.isnull()).rename(\n self.name # type: ignore\n )\n\n notna = notnull\n\n # TODO: axis, skipna, and many arguments should be implemented.\n def all(self, axis: Union[int, str] = 0) -> bool:\n \"\"\"\n Return whether all elements are True.\n\n Returns True unless there at least one element within a series that is\n False or equivalent (e.g. zero or empty)\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n Examples\n --------\n >>> ks.Series([True, True]).all()\n True\n\n >>> ks.Series([True, False]).all()\n False\n\n >>> ks.Series([0, 1]).all()\n False\n\n >>> ks.Series([1, 2, 3]).all()\n True\n\n >>> ks.Series([True, True, None]).all()\n True\n\n >>> ks.Series([True, False, None]).all()\n False\n\n >>> ks.Series([]).all()\n True\n\n >>> ks.Series([np.nan]).all()\n True\n\n >>> df = ks.Series([True, False, None]).rename(\"a\").to_frame()\n >>> df.set_index(\"a\").index.all()\n False\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n sdf = self._internal.spark_frame.select(self.spark.column)\n col = scol_for(sdf, sdf.columns[0])\n\n # Note that we're ignoring `None`s here for now.\n # any and every was added as of Spark 3.0\n # ret = sdf.select(F.expr(\"every(CAST(`%s` AS BOOLEAN))\" % sdf.columns[0])).collect()[0][0]\n # Here we use min as its alternative:\n ret = sdf.select(F.min(F.coalesce(col.cast(\"boolean\"), F.lit(True)))).collect()[0][0]\n if ret is None:\n return True\n else:\n return ret\n\n # TODO: axis, skipna, and many arguments should be implemented.\n def any(self, axis: Union[int, str] = 0) -> bool:\n \"\"\"\n Return whether any element is True.\n\n Returns False unless there at least one element within a series that is\n True or equivalent (e.g. non-zero or non-empty).\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n Examples\n --------\n >>> ks.Series([False, False]).any()\n False\n\n >>> ks.Series([True, False]).any()\n True\n\n >>> ks.Series([0, 0]).any()\n False\n\n >>> ks.Series([0, 1, 2]).any()\n True\n\n >>> ks.Series([False, False, None]).any()\n False\n\n >>> ks.Series([True, False, None]).any()\n True\n\n >>> ks.Series([]).any()\n False\n\n >>> ks.Series([np.nan]).any()\n False\n\n >>> df = ks.Series([True, False, None]).rename(\"a\").to_frame()\n >>> df.set_index(\"a\").index.any()\n True\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n sdf = self._internal.spark_frame.select(self.spark.column)\n col = scol_for(sdf, sdf.columns[0])\n\n # Note that we're ignoring `None`s here for now.\n # any and every was added as of Spark 3.0\n # ret = sdf.select(F.expr(\"any(CAST(`%s` AS BOOLEAN))\" % sdf.columns[0])).collect()[0][0]\n # Here we use max as its alternative:\n ret = sdf.select(F.max(F.coalesce(col.cast(\"boolean\"), F.lit(False)))).collect()[0][0]\n if ret is None:\n return False\n else:\n return ret\n\n # TODO: add frep and axis parameter\n def shift(self, periods=1, fill_value=None) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Shift Series/Index by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default depends on the dtype of self. For numeric data, np.nan is used.\n\n Returns\n -------\n Copy of input Series/Index, shifted.\n\n Examples\n --------\n >>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]},\n ... columns=['Col1', 'Col2', 'Col3'])\n\n >>> df.Col1.shift(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 10.0\n 4 20.0\n Name: Col1, dtype: float64\n\n >>> df.Col2.shift(periods=3, fill_value=0)\n 0 0\n 1 0\n 2 0\n 3 13\n 4 23\n Name: Col2, dtype: int64\n\n >>> df.index.shift(periods=3, fill_value=0)\n Int64Index([0, 0, 0, 0, 1], dtype='int64')\n \"\"\"\n return self._shift(periods, fill_value)\n\n def _shift(self, periods, fill_value, part_cols=()):\n if not isinstance(periods, int):\n raise ValueError(\"periods should be an int; however, got [%s]\" % type(periods).__name__)\n\n col = self.spark.column\n window = (\n Window.partitionBy(*part_cols)\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-periods, -periods)\n )\n lag_col = F.lag(col, periods).over(window)\n col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)\n return self._with_new_scol(col)\n\n # TODO: Update Documentation for Bins Parameter when its supported\n def value_counts(\n self, normalize=False, sort=True, ascending=False, bins=None, dropna=True\n ) -> \"Series\":\n \"\"\"\n Return a Series containing counts of unique values.\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : boolean, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : boolean, default True\n Sort by values.\n ascending : boolean, default False\n Sort in ascending order.\n bins : Not Yet Supported\n dropna : boolean, default True\n Don't include counts of NaN.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n\n Examples\n --------\n For Series\n\n >>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})\n >>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE\n 1.0 3\n 0.0 2\n Name: x, dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE\n 1.0 0.6\n 0.0 0.4\n Name: x, dtype: float64\n\n **dropna**\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE\n 1.0 3\n 0.0 2\n NaN 1\n Name: x, dtype: int64\n\n For Index\n\n >>> idx = ks.Index([3, 1, 2, 3, 4, np.nan])\n >>> idx\n Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')\n\n >>> idx.value_counts().sort_index()\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n dtype: int64\n\n **sort**\n\n With `sort` set to `False`, the result wouldn't be sorted by number of count.\n\n >>> idx.value_counts(sort=True).sort_index()\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n dtype: int64\n\n **normalize**\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> idx.value_counts(normalize=True).sort_index()\n 1.0 0.2\n 2.0 0.2\n 3.0 0.4\n 4.0 0.2\n dtype: float64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n NaN 1\n dtype: int64\n\n For MultiIndex.\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [1, 1, 1, 1, 1, 2, 1, 2, 2]])\n >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s.index # doctest: +SKIP\n MultiIndex([( 'lama', 'weight'),\n ( 'lama', 'weight'),\n ( 'lama', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'length'),\n ('falcon', 'weight'),\n ('falcon', 'length'),\n ('falcon', 'length')],\n )\n\n >>> s.index.value_counts().sort_index()\n (cow, length) 1\n (cow, weight) 2\n (falcon, length) 2\n (falcon, weight) 1\n (lama, weight) 3\n dtype: int64\n\n >>> s.index.value_counts(normalize=True).sort_index()\n (cow, length) 0.111111\n (cow, weight) 0.222222\n (falcon, length) 0.222222\n (falcon, weight) 0.111111\n (lama, weight) 0.333333\n dtype: float64\n\n If Index has name, keep the name up.\n\n >>> idx = ks.Index([0, 0, 0, 1, 1, 2, 3], name='koalas')\n >>> idx.value_counts().sort_index()\n 0 3\n 1 2\n 2 1\n 3 1\n Name: koalas, dtype: int64\n \"\"\"\n from databricks.koalas.series import first_series\n\n if bins is not None:\n raise NotImplementedError(\"value_counts currently does not support bins\")\n\n if dropna:\n sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()\n else:\n sdf_dropna = self._internal.spark_frame.select(self.spark.column)\n index_name = SPARK_DEFAULT_INDEX_NAME\n column_name = self._internal.data_spark_column_names[0]\n sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()\n if sort:\n if ascending:\n sdf = sdf.orderBy(F.col(\"count\"))\n else:\n sdf = sdf.orderBy(F.col(\"count\").desc())\n\n if normalize:\n sum = sdf_dropna.count()\n sdf = sdf.withColumn(\"count\", F.col(\"count\") / F.lit(sum))\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_column_names=[index_name],\n column_labels=self._internal.column_labels,\n data_spark_columns=[scol_for(sdf, \"count\")],\n column_label_names=self._internal.column_label_names,\n )\n\n return first_series(DataFrame(internal))\n\n def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:\n \"\"\"\n Return number of unique elements in the object.\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don’t include NaN in the count.\n approx: bool, default False\n If False, will use the exact algorithm and return the exact number of unique.\n If True, it uses the HyperLogLog approximate algorithm, which is significantly faster\n for large amount of data.\n Note: This parameter is specific to Koalas and is not found in pandas.\n rsd: float, default 0.05\n Maximum estimation error allowed in the HyperLogLog algorithm.\n Note: Just like ``approx`` this parameter is specific to Koalas.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> ks.Series([1, 2, 3, np.nan]).nunique()\n 3\n\n >>> ks.Series([1, 2, 3, np.nan]).nunique(dropna=False)\n 4\n\n On big data, we recommend using the approximate algorithm to speed up this function.\n The result will be very close to the exact unique count.\n\n >>> ks.Series([1, 2, 3, np.nan]).nunique(approx=True)\n 3\n\n >>> idx = ks.Index([1, 1, 2, None])\n >>> idx\n Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')\n\n >>> idx.nunique()\n 2\n\n >>> idx.nunique(dropna=False)\n 3\n \"\"\"\n res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])\n return res.collect()[0][0]\n\n def _nunique(self, dropna=True, approx=False, rsd=0.05):\n colname = self._internal.data_spark_column_names[0]\n count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct\n if dropna:\n return count_fn(self.spark.column).alias(colname)\n else:\n return (\n count_fn(self.spark.column)\n + F.when(\n F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1\n ).otherwise(0)\n ).alias(colname)\n\n def take(self, indices) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n\n Series\n\n >>> kser = ks.Series([100, 200, 300, 400, 500])\n >>> kser\n 0 100\n 1 200\n 2 300\n 3 400\n 4 500\n dtype: int64\n\n >>> kser.take([0, 2, 4]).sort_index()\n 0 100\n 2 300\n 4 500\n dtype: int64\n\n Index\n\n >>> kidx = ks.Index([100, 200, 300, 400, 500])\n >>> kidx\n Int64Index([100, 200, 300, 400, 500], dtype='int64')\n\n >>> kidx.take([0, 2, 4]).sort_values()\n Int64Index([100, 300, 500], dtype='int64')\n\n MultiIndex\n\n >>> kmidx = ks.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\")])\n >>> kmidx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('x', 'c')],\n )\n\n >>> kmidx.take([0, 2]) # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'c')],\n )\n \"\"\"\n if not is_list_like(indices) or isinstance(indices, (dict, set)):\n raise ValueError(\"`indices` must be a list-like except dict or set\")\n if isinstance(self, ks.Series):\n return cast(ks.Series, self.iloc[indices])\n else:\n return self._kdf.iloc[indices].index\n",
"#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport unittest\nimport inspect\nfrom distutils.version import LooseVersion\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\n\nfrom databricks import koalas as ks\nfrom databricks.koalas.config import option_context\nfrom databricks.koalas.exceptions import PandasNotImplementedError, DataError\nfrom databricks.koalas.missing.groupby import (\n MissingPandasLikeDataFrameGroupBy,\n MissingPandasLikeSeriesGroupBy,\n)\nfrom databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils\nfrom databricks.koalas.groupby import is_multi_agg_with_relabel\n\n\nclass GroupByTest(ReusedSQLTestCase, TestUtils):\n def test_groupby_simple(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7],\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2],\n \"c\": [4, 2, 7, 3, None, 1, 1, 1, 2],\n \"d\": list(\"abcdefght\"),\n },\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n kdf = ks.from_pandas(pdf)\n\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values(\"a\").reset_index(drop=True)\n self.assert_eq(\n sort(kdf.groupby(\"a\", as_index=as_index).sum()),\n sort(pdf.groupby(\"a\", as_index=as_index).sum()),\n )\n self.assert_eq(\n sort(kdf.groupby(\"a\", as_index=as_index).b.sum()),\n sort(pdf.groupby(\"a\", as_index=as_index).b.sum()),\n )\n self.assert_eq(\n sort(kdf.groupby(\"a\", as_index=as_index)[\"b\"].sum()),\n sort(pdf.groupby(\"a\", as_index=as_index)[\"b\"].sum()),\n )\n self.assert_eq(\n sort(kdf.groupby(\"a\", as_index=as_index)[[\"b\", \"c\"]].sum()),\n sort(pdf.groupby(\"a\", as_index=as_index)[[\"b\", \"c\"]].sum()),\n )\n self.assert_eq(\n sort(kdf.groupby(\"a\", as_index=as_index)[[]].sum()),\n sort(pdf.groupby(\"a\", as_index=as_index)[[]].sum()),\n )\n self.assert_eq(\n sort(kdf.groupby(\"a\", as_index=as_index)[\"c\"].sum()),\n sort(pdf.groupby(\"a\", as_index=as_index)[\"c\"].sum()),\n )\n\n self.assert_eq(kdf.groupby(\"a\").a.sum().sort_index(), pdf.groupby(\"a\").a.sum().sort_index())\n self.assert_eq(\n kdf.groupby(\"a\")[\"a\"].sum().sort_index(), pdf.groupby(\"a\")[\"a\"].sum().sort_index()\n )\n self.assert_eq(\n kdf.groupby(\"a\")[[\"a\"]].sum().sort_index(), pdf.groupby(\"a\")[[\"a\"]].sum().sort_index()\n )\n self.assert_eq(\n kdf.groupby(\"a\")[[\"a\", \"c\"]].sum().sort_index(),\n pdf.groupby(\"a\")[[\"a\", \"c\"]].sum().sort_index(),\n )\n\n self.assert_eq(\n kdf.a.groupby(kdf.b).sum().sort_index(), pdf.a.groupby(pdf.b).sum().sort_index()\n )\n\n for axis in [0, \"index\"]:\n self.assert_eq(\n kdf.groupby(\"a\", axis=axis).a.sum().sort_index(),\n pdf.groupby(\"a\", axis=axis).a.sum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"a\", axis=axis)[\"a\"].sum().sort_index(),\n pdf.groupby(\"a\", axis=axis)[\"a\"].sum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"a\", axis=axis)[[\"a\"]].sum().sort_index(),\n pdf.groupby(\"a\", axis=axis)[[\"a\"]].sum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"a\", axis=axis)[[\"a\", \"c\"]].sum().sort_index(),\n pdf.groupby(\"a\", axis=axis)[[\"a\", \"c\"]].sum().sort_index(),\n )\n\n self.assert_eq(\n kdf.a.groupby(kdf.b, axis=axis).sum().sort_index(),\n pdf.a.groupby(pdf.b, axis=axis).sum().sort_index(),\n )\n\n self.assertRaises(ValueError, lambda: kdf.groupby(\"a\", as_index=False).a)\n self.assertRaises(ValueError, lambda: kdf.groupby(\"a\", as_index=False)[\"a\"])\n self.assertRaises(ValueError, lambda: kdf.groupby(\"a\", as_index=False)[[\"a\"]])\n self.assertRaises(ValueError, lambda: kdf.groupby(\"a\", as_index=False)[[\"a\", \"c\"]])\n self.assertRaises(KeyError, lambda: kdf.groupby(\"z\", as_index=False)[[\"a\", \"c\"]])\n self.assertRaises(KeyError, lambda: kdf.groupby([\"z\"], as_index=False)[[\"a\", \"c\"]])\n\n self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.b, as_index=False))\n\n self.assertRaises(NotImplementedError, lambda: kdf.groupby(\"a\", axis=1))\n self.assertRaises(NotImplementedError, lambda: kdf.groupby(\"a\", axis=\"columns\"))\n self.assertRaises(ValueError, lambda: kdf.groupby(\"a\", \"b\"))\n self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.a, kdf.b))\n\n # we can't use column name/names as a parameter `by` for `SeriesGroupBy`.\n self.assertRaises(KeyError, lambda: kdf.a.groupby(by=\"a\"))\n self.assertRaises(KeyError, lambda: kdf.a.groupby(by=[\"a\", \"b\"]))\n self.assertRaises(KeyError, lambda: kdf.a.groupby(by=(\"a\", \"b\")))\n\n # we can't use DataFrame as a parameter `by` for `DataFrameGroupBy`/`SeriesGroupBy`.\n self.assertRaises(ValueError, lambda: kdf.groupby(kdf))\n self.assertRaises(ValueError, lambda: kdf.a.groupby(kdf))\n self.assertRaises(ValueError, lambda: kdf.a.groupby((kdf,)))\n\n # non-string names\n pdf = pd.DataFrame(\n {\n 10: [1, 2, 6, 4, 4, 6, 4, 3, 7],\n 20: [4, 2, 7, 3, 3, 1, 1, 1, 2],\n 30: [4, 2, 7, 3, None, 1, 1, 1, 2],\n 40: list(\"abcdefght\"),\n },\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n kdf = ks.from_pandas(pdf)\n\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values(10).reset_index(drop=True)\n self.assert_eq(\n sort(kdf.groupby(10, as_index=as_index).sum()),\n sort(pdf.groupby(10, as_index=as_index).sum()),\n )\n self.assert_eq(\n sort(kdf.groupby(10, as_index=as_index)[20].sum()),\n sort(pdf.groupby(10, as_index=as_index)[20].sum()),\n )\n self.assert_eq(\n sort(kdf.groupby(10, as_index=as_index)[[20, 30]].sum()),\n sort(pdf.groupby(10, as_index=as_index)[[20, 30]].sum()),\n )\n\n def test_groupby_multiindex_columns(self):\n pdf = pd.DataFrame(\n {\n (10, \"a\"): [1, 2, 6, 4, 4, 6, 4, 3, 7],\n (10, \"b\"): [4, 2, 7, 3, 3, 1, 1, 1, 2],\n (20, \"c\"): [4, 2, 7, 3, None, 1, 1, 1, 2],\n (30, \"d\"): list(\"abcdefght\"),\n },\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby((10, \"a\")).sum().sort_index(), pdf.groupby((10, \"a\")).sum().sort_index()\n )\n self.assert_eq(\n kdf.groupby((10, \"a\"), as_index=False)\n .sum()\n .sort_values((10, \"a\"))\n .reset_index(drop=True),\n pdf.groupby((10, \"a\"), as_index=False)\n .sum()\n .sort_values((10, \"a\"))\n .reset_index(drop=True),\n )\n self.assert_eq(\n kdf.groupby((10, \"a\"))[[(20, \"c\")]].sum().sort_index(),\n pdf.groupby((10, \"a\"))[[(20, \"c\")]].sum().sort_index(),\n )\n\n # TODO: a pandas bug?\n # expected = pdf.groupby((10, \"a\"))[(20, \"c\")].sum().sort_index()\n expected = pd.Series(\n [4.0, 2.0, 1.0, 4.0, 8.0, 2.0],\n name=(20, \"c\"),\n index=pd.Index([1, 2, 3, 4, 6, 7], name=(10, \"a\")),\n )\n\n self.assert_eq(kdf.groupby((10, \"a\"))[(20, \"c\")].sum().sort_index(), expected)\n\n if LooseVersion(pd.__version__) < LooseVersion(\"1.1.3\"):\n self.assert_eq(\n kdf[(20, \"c\")].groupby(kdf[(10, \"a\")]).sum().sort_index(),\n pdf[(20, \"c\")].groupby(pdf[(10, \"a\")]).sum().sort_index(),\n )\n else:\n # seems like a pandas bug introduced in pandas 1.1.3.\n self.assert_eq(kdf[(20, \"c\")].groupby(kdf[(10, \"a\")]).sum().sort_index(), expected)\n\n def test_split_apply_combine_on_series(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7],\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2],\n \"c\": [4, 2, 7, 3, None, 1, 1, 1, 2],\n \"d\": list(\"abcdefght\"),\n },\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n kdf = ks.from_pandas(pdf)\n\n funcs = [\n ((True, False), [\"sum\", \"min\", \"max\", \"count\", \"first\", \"last\"]),\n ((True, True), [\"mean\"]),\n ((False, False), [\"var\", \"std\"]),\n ]\n funcs = [(check_exact, almost, f) for (check_exact, almost), fs in funcs for f in fs]\n\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)\n\n for check_exact, almost, func in funcs:\n for kkey, pkey in [(\"b\", \"b\"), (kdf.b, pdf.b)]:\n with self.subTest(as_index=as_index, func=func, key=pkey):\n if as_index is True or func != \"std\":\n self.assert_eq(\n sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),\n sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),\n check_exact=check_exact,\n almost=almost,\n )\n self.assert_eq(\n sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),\n sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),\n check_exact=check_exact,\n almost=almost,\n )\n else:\n # seems like a pandas' bug for as_index=False and func == \"std\"?\n self.assert_eq(\n sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),\n sort(pdf.groupby(pkey, as_index=True).a.std().reset_index()),\n check_exact=check_exact,\n almost=almost,\n )\n self.assert_eq(\n sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),\n sort(pdf.groupby(pkey, as_index=True).std().reset_index()),\n check_exact=check_exact,\n almost=almost,\n )\n\n for kkey, pkey in [(kdf.b + 1, pdf.b + 1), (kdf.copy().b, pdf.copy().b)]:\n with self.subTest(as_index=as_index, func=func, key=pkey):\n self.assert_eq(\n sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),\n sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),\n check_exact=check_exact,\n almost=almost,\n )\n self.assert_eq(\n sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),\n sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),\n check_exact=check_exact,\n almost=almost,\n )\n\n for check_exact, almost, func in funcs:\n for i in [0, 4, 7]:\n with self.subTest(as_index=as_index, func=func, i=i):\n self.assert_eq(\n sort(getattr(kdf.groupby(kdf.b > i, as_index=as_index).a, func)()),\n sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index).a, func)()),\n check_exact=check_exact,\n almost=almost,\n )\n self.assert_eq(\n sort(getattr(kdf.groupby(kdf.b > i, as_index=as_index), func)()),\n sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index), func)()),\n check_exact=check_exact,\n almost=almost,\n )\n\n for check_exact, almost, func in funcs:\n for kkey, pkey in [\n (kdf.b, pdf.b),\n (kdf.b + 1, pdf.b + 1),\n (kdf.copy().b, pdf.copy().b),\n (kdf.b.rename(), pdf.b.rename()),\n ]:\n with self.subTest(func=func, key=pkey):\n self.assert_eq(\n getattr(kdf.a.groupby(kkey), func)().sort_index(),\n getattr(pdf.a.groupby(pkey), func)().sort_index(),\n check_exact=check_exact,\n almost=almost,\n )\n self.assert_eq(\n getattr((kdf.a + 1).groupby(kkey), func)().sort_index(),\n getattr((pdf.a + 1).groupby(pkey), func)().sort_index(),\n check_exact=check_exact,\n almost=almost,\n )\n self.assert_eq(\n getattr((kdf.b + 1).groupby(kkey), func)().sort_index(),\n getattr((pdf.b + 1).groupby(pkey), func)().sort_index(),\n check_exact=check_exact,\n almost=almost,\n )\n self.assert_eq(\n getattr(kdf.a.rename().groupby(kkey), func)().sort_index(),\n getattr(pdf.a.rename().groupby(pkey), func)().sort_index(),\n check_exact=check_exact,\n almost=almost,\n )\n\n def test_aggregate(self):\n pdf = pd.DataFrame(\n {\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4], \"C\": [0.362, 0.227, 1.267, -0.562]}\n )\n kdf = ks.from_pandas(pdf)\n\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)\n\n for kkey, pkey in [(\"A\", \"A\"), (kdf.A, pdf.A)]:\n with self.subTest(as_index=as_index, key=pkey):\n self.assert_eq(\n sort(kdf.groupby(kkey, as_index=as_index).agg(\"sum\")),\n sort(pdf.groupby(pkey, as_index=as_index).agg(\"sum\")),\n )\n self.assert_eq(\n sort(kdf.groupby(kkey, as_index=as_index).agg({\"B\": \"min\", \"C\": \"sum\"})),\n sort(pdf.groupby(pkey, as_index=as_index).agg({\"B\": \"min\", \"C\": \"sum\"})),\n )\n self.assert_eq(\n sort(\n kdf.groupby(kkey, as_index=as_index).agg(\n {\"B\": [\"min\", \"max\"], \"C\": \"sum\"}\n )\n ),\n sort(\n pdf.groupby(pkey, as_index=as_index).agg(\n {\"B\": [\"min\", \"max\"], \"C\": \"sum\"}\n )\n ),\n )\n\n if as_index:\n self.assert_eq(\n sort(kdf.groupby(kkey, as_index=as_index).agg([\"sum\"])),\n sort(pdf.groupby(pkey, as_index=as_index).agg([\"sum\"])),\n )\n else:\n # seems like a pandas' bug for as_index=False and func_or_funcs is list?\n self.assert_eq(\n sort(kdf.groupby(kkey, as_index=as_index).agg([\"sum\"])),\n sort(pdf.groupby(pkey, as_index=True).agg([\"sum\"]).reset_index()),\n )\n\n for kkey, pkey in [(kdf.A + 1, pdf.A + 1), (kdf.copy().A, pdf.copy().A)]:\n with self.subTest(as_index=as_index, key=pkey):\n self.assert_eq(\n sort(kdf.groupby(kkey, as_index=as_index).agg(\"sum\")),\n sort(pdf.groupby(pkey, as_index=as_index).agg(\"sum\")),\n )\n self.assert_eq(\n sort(kdf.groupby(kkey, as_index=as_index).agg({\"B\": \"min\", \"C\": \"sum\"})),\n sort(pdf.groupby(pkey, as_index=as_index).agg({\"B\": \"min\", \"C\": \"sum\"})),\n )\n self.assert_eq(\n sort(\n kdf.groupby(kkey, as_index=as_index).agg(\n {\"B\": [\"min\", \"max\"], \"C\": \"sum\"}\n )\n ),\n sort(\n pdf.groupby(pkey, as_index=as_index).agg(\n {\"B\": [\"min\", \"max\"], \"C\": \"sum\"}\n )\n ),\n )\n self.assert_eq(\n sort(kdf.groupby(kkey, as_index=as_index).agg([\"sum\"])),\n sort(pdf.groupby(pkey, as_index=as_index).agg([\"sum\"])),\n )\n\n expected_error_message = (\n r\"aggs must be a dict mapping from column name to aggregate functions \"\n r\"\\(string or list of strings\\).\"\n )\n with self.assertRaisesRegex(ValueError, expected_error_message):\n kdf.groupby(\"A\", as_index=as_index).agg(0)\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(10, \"A\"), (10, \"B\"), (20, \"C\")])\n pdf.columns = columns\n kdf.columns = columns\n\n for as_index in [True, False]:\n stats_kdf = kdf.groupby((10, \"A\"), as_index=as_index).agg(\n {(10, \"B\"): \"min\", (20, \"C\"): \"sum\"}\n )\n stats_pdf = pdf.groupby((10, \"A\"), as_index=as_index).agg(\n {(10, \"B\"): \"min\", (20, \"C\"): \"sum\"}\n )\n self.assert_eq(\n stats_kdf.sort_values(by=[(10, \"B\"), (20, \"C\")]).reset_index(drop=True),\n stats_pdf.sort_values(by=[(10, \"B\"), (20, \"C\")]).reset_index(drop=True),\n )\n\n stats_kdf = kdf.groupby((10, \"A\")).agg({(10, \"B\"): [\"min\", \"max\"], (20, \"C\"): \"sum\"})\n stats_pdf = pdf.groupby((10, \"A\")).agg({(10, \"B\"): [\"min\", \"max\"], (20, \"C\"): \"sum\"})\n self.assert_eq(\n stats_kdf.sort_values(\n by=[(10, \"B\", \"min\"), (10, \"B\", \"max\"), (20, \"C\", \"sum\")]\n ).reset_index(drop=True),\n stats_pdf.sort_values(\n by=[(10, \"B\", \"min\"), (10, \"B\", \"max\"), (20, \"C\", \"sum\")]\n ).reset_index(drop=True),\n )\n\n # non-string names\n pdf.columns = [10, 20, 30]\n kdf.columns = [10, 20, 30]\n\n for as_index in [True, False]:\n stats_kdf = kdf.groupby(10, as_index=as_index).agg({20: \"min\", 30: \"sum\"})\n stats_pdf = pdf.groupby(10, as_index=as_index).agg({20: \"min\", 30: \"sum\"})\n self.assert_eq(\n stats_kdf.sort_values(by=[20, 30]).reset_index(drop=True),\n stats_pdf.sort_values(by=[20, 30]).reset_index(drop=True),\n )\n\n stats_kdf = kdf.groupby(10).agg({20: [\"min\", \"max\"], 30: \"sum\"})\n stats_pdf = pdf.groupby(10).agg({20: [\"min\", \"max\"], 30: \"sum\"})\n self.assert_eq(\n stats_kdf.sort_values(by=[(20, \"min\"), (20, \"max\"), (30, \"sum\")]).reset_index(\n drop=True\n ),\n stats_pdf.sort_values(by=[(20, \"min\"), (20, \"max\"), (30, \"sum\")]).reset_index(\n drop=True\n ),\n )\n\n def test_aggregate_func_str_list(self):\n # this is test for cases where only string or list is assigned\n pdf = pd.DataFrame(\n {\n \"kind\": [\"cat\", \"dog\", \"cat\", \"dog\"],\n \"height\": [9.1, 6.0, 9.5, 34.0],\n \"weight\": [7.9, 7.5, 9.9, 198.0],\n }\n )\n kdf = ks.from_pandas(pdf)\n\n agg_funcs = [\"max\", \"min\", [\"min\", \"max\"]]\n for aggfunc in agg_funcs:\n\n # Since in Koalas groupby, the order of rows might be different\n # so sort on index to ensure they have same output\n sorted_agg_kdf = kdf.groupby(\"kind\").agg(aggfunc).sort_index()\n sorted_agg_pdf = pdf.groupby(\"kind\").agg(aggfunc).sort_index()\n self.assert_eq(sorted_agg_kdf, sorted_agg_pdf)\n\n # test on multi index column case\n pdf = pd.DataFrame(\n {\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4], \"C\": [0.362, 0.227, 1.267, -0.562]}\n )\n kdf = ks.from_pandas(pdf)\n\n columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\")])\n pdf.columns = columns\n kdf.columns = columns\n\n for aggfunc in agg_funcs:\n sorted_agg_kdf = kdf.groupby((\"X\", \"A\")).agg(aggfunc).sort_index()\n sorted_agg_pdf = pdf.groupby((\"X\", \"A\")).agg(aggfunc).sort_index()\n self.assert_eq(sorted_agg_kdf, sorted_agg_pdf)\n\n @unittest.skipIf(pd.__version__ < \"0.25.0\", \"not supported before pandas 0.25.0\")\n def test_aggregate_relabel(self):\n # this is to test named aggregation in groupby\n pdf = pd.DataFrame({\"group\": [\"a\", \"a\", \"b\", \"b\"], \"A\": [0, 1, 2, 3], \"B\": [5, 6, 7, 8]})\n kdf = ks.from_pandas(pdf)\n\n # different agg column, same function\n agg_pdf = pdf.groupby(\"group\").agg(a_max=(\"A\", \"max\"), b_max=(\"B\", \"max\")).sort_index()\n agg_kdf = kdf.groupby(\"group\").agg(a_max=(\"A\", \"max\"), b_max=(\"B\", \"max\")).sort_index()\n self.assert_eq(agg_pdf, agg_kdf)\n\n # same agg column, different functions\n agg_pdf = pdf.groupby(\"group\").agg(b_max=(\"B\", \"max\"), b_min=(\"B\", \"min\")).sort_index()\n agg_kdf = kdf.groupby(\"group\").agg(b_max=(\"B\", \"max\"), b_min=(\"B\", \"min\")).sort_index()\n self.assert_eq(agg_pdf, agg_kdf)\n\n # test on NamedAgg\n agg_pdf = (\n pdf.groupby(\"group\").agg(b_max=pd.NamedAgg(column=\"B\", aggfunc=\"max\")).sort_index()\n )\n agg_kdf = (\n kdf.groupby(\"group\").agg(b_max=ks.NamedAgg(column=\"B\", aggfunc=\"max\")).sort_index()\n )\n self.assert_eq(agg_kdf, agg_pdf)\n\n # test on NamedAgg multi columns aggregation\n agg_pdf = (\n pdf.groupby(\"group\")\n .agg(\n b_max=pd.NamedAgg(column=\"B\", aggfunc=\"max\"),\n b_min=pd.NamedAgg(column=\"B\", aggfunc=\"min\"),\n )\n .sort_index()\n )\n agg_kdf = (\n kdf.groupby(\"group\")\n .agg(\n b_max=ks.NamedAgg(column=\"B\", aggfunc=\"max\"),\n b_min=ks.NamedAgg(column=\"B\", aggfunc=\"min\"),\n )\n .sort_index()\n )\n self.assert_eq(agg_kdf, agg_pdf)\n\n def test_dropna(self):\n pdf = pd.DataFrame(\n {\"A\": [None, 1, None, 1, 2], \"B\": [1, 2, 3, None, None], \"C\": [4, 5, 6, 7, None]}\n )\n kdf = ks.from_pandas(pdf)\n\n # pd.DataFrame.groupby with dropna parameter is implemented since pandas 1.1.0\n if LooseVersion(pd.__version__) >= LooseVersion(\"1.1.0\"):\n for dropna in [True, False]:\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values(\"A\").reset_index(drop=True)\n\n self.assert_eq(\n sort(kdf.groupby(\"A\", as_index=as_index, dropna=dropna).std()),\n sort(pdf.groupby(\"A\", as_index=as_index, dropna=dropna).std()),\n )\n\n self.assert_eq(\n sort(kdf.groupby(\"A\", as_index=as_index, dropna=dropna).B.std()),\n sort(pdf.groupby(\"A\", as_index=as_index, dropna=dropna).B.std()),\n )\n self.assert_eq(\n sort(kdf.groupby(\"A\", as_index=as_index, dropna=dropna)[\"B\"].std()),\n sort(pdf.groupby(\"A\", as_index=as_index, dropna=dropna)[\"B\"].std()),\n )\n\n self.assert_eq(\n sort(\n kdf.groupby(\"A\", as_index=as_index, dropna=dropna).agg(\n {\"B\": \"min\", \"C\": \"std\"}\n )\n ),\n sort(\n pdf.groupby(\"A\", as_index=as_index, dropna=dropna).agg(\n {\"B\": \"min\", \"C\": \"std\"}\n )\n ),\n )\n\n for dropna in [True, False]:\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values([\"A\", \"B\"]).reset_index(drop=True)\n\n self.assert_eq(\n sort(\n kdf.groupby([\"A\", \"B\"], as_index=as_index, dropna=dropna).agg(\n {\"C\": [\"min\", \"std\"]}\n )\n ),\n sort(\n pdf.groupby([\"A\", \"B\"], as_index=as_index, dropna=dropna).agg(\n {\"C\": [\"min\", \"std\"]}\n )\n ),\n almost=True,\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\")])\n pdf.columns = columns\n kdf.columns = columns\n\n for dropna in [True, False]:\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values((\"X\", \"A\")).reset_index(drop=True)\n sorted_stats_kdf = sort(\n kdf.groupby((\"X\", \"A\"), as_index=as_index, dropna=dropna).agg(\n {(\"X\", \"B\"): \"min\", (\"Y\", \"C\"): \"std\"}\n )\n )\n sorted_stats_pdf = sort(\n pdf.groupby((\"X\", \"A\"), as_index=as_index, dropna=dropna).agg(\n {(\"X\", \"B\"): \"min\", (\"Y\", \"C\"): \"std\"}\n )\n )\n self.assert_eq(sorted_stats_kdf, sorted_stats_pdf)\n else:\n # Testing dropna=True (pandas default behavior)\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values(\"A\").reset_index(drop=True)\n\n self.assert_eq(\n sort(kdf.groupby(\"A\", as_index=as_index, dropna=True)[\"B\"].min()),\n sort(pdf.groupby(\"A\", as_index=as_index)[\"B\"].min()),\n )\n\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values([\"A\", \"B\"]).reset_index(drop=True)\n\n self.assert_eq(\n sort(\n kdf.groupby([\"A\", \"B\"], as_index=as_index, dropna=True).agg(\n {\"C\": [\"min\", \"std\"]}\n )\n ),\n sort(pdf.groupby([\"A\", \"B\"], as_index=as_index).agg({\"C\": [\"min\", \"std\"]})),\n almost=True,\n )\n\n # Testing dropna=False\n index = pd.Index([1.0, 2.0, np.nan], name=\"A\")\n expected = pd.Series([2.0, np.nan, 1.0], index=index, name=\"B\")\n result = kdf.groupby(\"A\", as_index=True, dropna=False)[\"B\"].min().sort_index()\n self.assert_eq(expected, result)\n\n expected = pd.DataFrame({\"A\": [1.0, 2.0, np.nan], \"B\": [2.0, np.nan, 1.0]})\n result = (\n kdf.groupby(\"A\", as_index=False, dropna=False)[\"B\"]\n .min()\n .sort_values(\"A\")\n .reset_index(drop=True)\n )\n self.assert_eq(expected, result)\n\n index = pd.MultiIndex.from_tuples(\n [(1.0, 2.0), (1.0, None), (2.0, None), (None, 1.0), (None, 3.0)], names=[\"A\", \"B\"]\n )\n expected = pd.DataFrame(\n {\n (\"C\", \"min\"): [5.0, 7.0, np.nan, 4.0, 6.0],\n (\"C\", \"std\"): [np.nan, np.nan, np.nan, np.nan, np.nan],\n },\n index=index,\n )\n result = (\n kdf.groupby([\"A\", \"B\"], as_index=True, dropna=False)\n .agg({\"C\": [\"min\", \"std\"]})\n .sort_index()\n )\n self.assert_eq(expected, result)\n\n expected = pd.DataFrame(\n {\n (\"A\", \"\"): [1.0, 1.0, 2.0, np.nan, np.nan],\n (\"B\", \"\"): [2.0, np.nan, np.nan, 1.0, 3.0],\n (\"C\", \"min\"): [5.0, 7.0, np.nan, 4.0, 6.0],\n (\"C\", \"std\"): [np.nan, np.nan, np.nan, np.nan, np.nan],\n }\n )\n result = (\n kdf.groupby([\"A\", \"B\"], as_index=False, dropna=False)\n .agg({\"C\": [\"min\", \"std\"]})\n .sort_values([\"A\", \"B\"])\n .reset_index(drop=True)\n )\n self.assert_eq(expected, result)\n\n def test_describe(self):\n # support for numeric type, not support for string type yet\n datas = []\n datas.append({\"a\": [1, 1, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]})\n datas.append({\"a\": [-1, -1, -3], \"b\": [-4, -5, -6], \"c\": [-7, -8, -9]})\n datas.append({\"a\": [0, 0, 0], \"b\": [0, 0, 0], \"c\": [0, 8, 0]})\n # it is okay if string type column as a group key\n datas.append({\"a\": [\"a\", \"a\", \"c\"], \"b\": [4, 5, 6], \"c\": [7, 8, 9]})\n\n percentiles = [0.25, 0.5, 0.75]\n formatted_percentiles = [\"25%\", \"50%\", \"75%\"]\n non_percentile_stats = [\"count\", \"mean\", \"std\", \"min\", \"max\"]\n\n for data in datas:\n pdf = pd.DataFrame(data)\n kdf = ks.from_pandas(pdf)\n\n describe_pdf = pdf.groupby(\"a\").describe().sort_index()\n describe_kdf = kdf.groupby(\"a\").describe().sort_index()\n\n # since the result of percentile columns are slightly difference from pandas,\n # we should check them separately: non-percentile columns & percentile columns\n\n # 1. Check that non-percentile columns are equal.\n agg_cols = [col.name for col in kdf.groupby(\"a\")._agg_columns]\n self.assert_eq(\n describe_kdf.drop(list(product(agg_cols, formatted_percentiles))),\n describe_pdf.drop(columns=formatted_percentiles, level=1),\n check_exact=False,\n )\n\n # 2. Check that percentile columns are equal.\n # The interpolation argument is yet to be implemented in Koalas.\n quantile_pdf = pdf.groupby(\"a\").quantile(percentiles, interpolation=\"nearest\")\n quantile_pdf = quantile_pdf.unstack(level=1).astype(float)\n self.assert_eq(\n describe_kdf.drop(list(product(agg_cols, non_percentile_stats))),\n quantile_pdf.rename(columns=\"{:.0%}\".format, level=1),\n )\n\n # not support for string type yet\n datas = []\n datas.append({\"a\": [\"a\", \"a\", \"c\"], \"b\": [\"d\", \"e\", \"f\"], \"c\": [\"g\", \"h\", \"i\"]})\n datas.append({\"a\": [\"a\", \"a\", \"c\"], \"b\": [4, 0, 1], \"c\": [\"g\", \"h\", \"i\"]})\n for data in datas:\n pdf = pd.DataFrame(data)\n kdf = ks.from_pandas(pdf)\n\n self.assertRaises(NotImplementedError, lambda: kdf.groupby(\"a\").describe().sort_index())\n\n # multi-index columns\n pdf = pd.DataFrame({(\"x\", \"a\"): [1, 1, 3], (\"x\", \"b\"): [4, 5, 6], (\"y\", \"c\"): [7, 8, 9]})\n kdf = ks.from_pandas(pdf)\n\n describe_pdf = pdf.groupby((\"x\", \"a\")).describe().sort_index()\n describe_kdf = kdf.groupby((\"x\", \"a\")).describe().sort_index()\n\n # 1. Check that non-percentile columns are equal.\n agg_column_labels = [col._column_label for col in kdf.groupby((\"x\", \"a\"))._agg_columns]\n self.assert_eq(\n describe_kdf.drop(\n [\n tuple(list(label) + [s])\n for label, s in product(agg_column_labels, formatted_percentiles)\n ]\n ),\n describe_pdf.drop(columns=formatted_percentiles, level=2),\n check_exact=False,\n )\n\n # 2. Check that percentile columns are equal.\n # The interpolation argument is yet to be implemented in Koalas.\n quantile_pdf = pdf.groupby((\"x\", \"a\")).quantile(percentiles, interpolation=\"nearest\")\n quantile_pdf = quantile_pdf.unstack(level=1).astype(float)\n\n self.assert_eq(\n describe_kdf.drop(\n [\n tuple(list(label) + [s])\n for label, s in product(agg_column_labels, non_percentile_stats)\n ]\n ),\n quantile_pdf.rename(columns=\"{:.0%}\".format, level=2),\n )\n\n def test_aggregate_relabel_multiindex(self):\n pdf = pd.DataFrame({\"A\": [0, 1, 2, 3], \"B\": [5, 6, 7, 8], \"group\": [\"a\", \"a\", \"b\", \"b\"]})\n pdf.columns = pd.MultiIndex.from_tuples([(\"y\", \"A\"), (\"y\", \"B\"), (\"x\", \"group\")])\n kdf = ks.from_pandas(pdf)\n\n if LooseVersion(pd.__version__) < LooseVersion(\"1.0.0\"):\n agg_pdf = pd.DataFrame(\n {\"a_max\": [1, 3]}, index=pd.Index([\"a\", \"b\"], name=(\"x\", \"group\"))\n )\n elif LooseVersion(pd.__version__) >= LooseVersion(\"1.0.0\"):\n agg_pdf = pdf.groupby((\"x\", \"group\")).agg(a_max=((\"y\", \"A\"), \"max\")).sort_index()\n agg_kdf = kdf.groupby((\"x\", \"group\")).agg(a_max=((\"y\", \"A\"), \"max\")).sort_index()\n self.assert_eq(agg_pdf, agg_kdf)\n\n # same column, different methods\n if LooseVersion(pd.__version__) < LooseVersion(\"1.0.0\"):\n agg_pdf = pd.DataFrame(\n {\"a_max\": [1, 3], \"a_min\": [0, 2]}, index=pd.Index([\"a\", \"b\"], name=(\"x\", \"group\"))\n )\n elif LooseVersion(pd.__version__) >= LooseVersion(\"1.0.0\"):\n agg_pdf = (\n pdf.groupby((\"x\", \"group\"))\n .agg(a_max=((\"y\", \"A\"), \"max\"), a_min=((\"y\", \"A\"), \"min\"))\n .sort_index()\n )\n agg_kdf = (\n kdf.groupby((\"x\", \"group\"))\n .agg(a_max=((\"y\", \"A\"), \"max\"), a_min=((\"y\", \"A\"), \"min\"))\n .sort_index()\n )\n self.assert_eq(agg_pdf, agg_kdf)\n\n # different column, different methods\n if LooseVersion(pd.__version__) < LooseVersion(\"1.0.0\"):\n agg_pdf = pd.DataFrame(\n {\"a_max\": [6, 8], \"a_min\": [0, 2]}, index=pd.Index([\"a\", \"b\"], name=(\"x\", \"group\"))\n )\n elif LooseVersion(pd.__version__) >= LooseVersion(\"1.0.0\"):\n agg_pdf = (\n pdf.groupby((\"x\", \"group\"))\n .agg(a_max=((\"y\", \"B\"), \"max\"), a_min=((\"y\", \"A\"), \"min\"))\n .sort_index()\n )\n agg_kdf = (\n kdf.groupby((\"x\", \"group\"))\n .agg(a_max=((\"y\", \"B\"), \"max\"), a_min=((\"y\", \"A\"), \"min\"))\n .sort_index()\n )\n self.assert_eq(agg_pdf, agg_kdf)\n\n def test_all_any(self):\n pdf = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n \"B\": [True, True, True, False, False, False, None, True, None, False],\n }\n )\n kdf = ks.from_pandas(pdf)\n\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values(\"A\").reset_index(drop=True)\n self.assert_eq(\n sort(kdf.groupby(\"A\", as_index=as_index).all()),\n sort(pdf.groupby(\"A\", as_index=as_index).all()),\n )\n self.assert_eq(\n sort(kdf.groupby(\"A\", as_index=as_index).any()),\n sort(pdf.groupby(\"A\", as_index=as_index).any()),\n )\n\n self.assert_eq(\n sort(kdf.groupby(\"A\", as_index=as_index).all()).B,\n sort(pdf.groupby(\"A\", as_index=as_index).all()).B,\n )\n self.assert_eq(\n sort(kdf.groupby(\"A\", as_index=as_index).any()).B,\n sort(pdf.groupby(\"A\", as_index=as_index).any()).B,\n )\n\n self.assert_eq(\n kdf.B.groupby(kdf.A).all().sort_index(), pdf.B.groupby(pdf.A).all().sort_index()\n )\n self.assert_eq(\n kdf.B.groupby(kdf.A).any().sort_index(), pdf.B.groupby(pdf.A).any().sort_index()\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"Y\", \"B\")])\n pdf.columns = columns\n kdf.columns = columns\n\n for as_index in [True, False]:\n if as_index:\n sort = lambda df: df.sort_index()\n else:\n sort = lambda df: df.sort_values((\"X\", \"A\")).reset_index(drop=True)\n self.assert_eq(\n sort(kdf.groupby((\"X\", \"A\"), as_index=as_index).all()),\n sort(pdf.groupby((\"X\", \"A\"), as_index=as_index).all()),\n )\n self.assert_eq(\n sort(kdf.groupby((\"X\", \"A\"), as_index=as_index).any()),\n sort(pdf.groupby((\"X\", \"A\"), as_index=as_index).any()),\n )\n\n def test_raises(self):\n kdf = ks.DataFrame(\n {\"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7], \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n # test raises with incorrect key\n self.assertRaises(ValueError, lambda: kdf.groupby([]))\n self.assertRaises(KeyError, lambda: kdf.groupby(\"x\"))\n self.assertRaises(KeyError, lambda: kdf.groupby([\"a\", \"x\"]))\n self.assertRaises(KeyError, lambda: kdf.groupby(\"a\")[\"x\"])\n self.assertRaises(KeyError, lambda: kdf.groupby(\"a\")[\"b\", \"x\"])\n self.assertRaises(KeyError, lambda: kdf.groupby(\"a\")[[\"b\", \"x\"]])\n\n def test_nunique(self):\n pdf = pd.DataFrame(\n {\"a\": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], \"b\": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}\n )\n kdf = ks.from_pandas(pdf)\n self.assert_eq(\n kdf.groupby(\"a\").agg({\"b\": \"nunique\"}).sort_index(),\n pdf.groupby(\"a\").agg({\"b\": \"nunique\"}).sort_index(),\n )\n if LooseVersion(pd.__version__) < LooseVersion(\"1.1.0\"):\n expected = ks.DataFrame({\"b\": [2, 2]}, index=pd.Index([0, 1], name=\"a\"))\n self.assert_eq(kdf.groupby(\"a\").nunique().sort_index(), expected)\n self.assert_eq(\n kdf.groupby(\"a\").nunique(dropna=False).sort_index(), expected,\n )\n else:\n self.assert_eq(\n kdf.groupby(\"a\").nunique().sort_index(), pdf.groupby(\"a\").nunique().sort_index()\n )\n self.assert_eq(\n kdf.groupby(\"a\").nunique(dropna=False).sort_index(),\n pdf.groupby(\"a\").nunique(dropna=False).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"a\")[\"b\"].nunique().sort_index(),\n pdf.groupby(\"a\")[\"b\"].nunique().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"a\")[\"b\"].nunique(dropna=False).sort_index(),\n pdf.groupby(\"a\")[\"b\"].nunique(dropna=False).sort_index(),\n )\n\n nunique_kdf = kdf.groupby(\"a\", as_index=False).agg({\"b\": \"nunique\"})\n nunique_pdf = pdf.groupby(\"a\", as_index=False).agg({\"b\": \"nunique\"})\n self.assert_eq(\n nunique_kdf.sort_values([\"a\", \"b\"]).reset_index(drop=True),\n nunique_pdf.sort_values([\"a\", \"b\"]).reset_index(drop=True),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"y\", \"b\")])\n pdf.columns = columns\n kdf.columns = columns\n\n if LooseVersion(pd.__version__) < LooseVersion(\"1.1.0\"):\n expected = ks.DataFrame({(\"y\", \"b\"): [2, 2]}, index=pd.Index([0, 1], name=(\"x\", \"a\")))\n self.assert_eq(\n kdf.groupby((\"x\", \"a\")).nunique().sort_index(), expected,\n )\n self.assert_eq(\n kdf.groupby((\"x\", \"a\")).nunique(dropna=False).sort_index(), expected,\n )\n else:\n self.assert_eq(\n kdf.groupby((\"x\", \"a\")).nunique().sort_index(),\n pdf.groupby((\"x\", \"a\")).nunique().sort_index(),\n )\n self.assert_eq(\n kdf.groupby((\"x\", \"a\")).nunique(dropna=False).sort_index(),\n pdf.groupby((\"x\", \"a\")).nunique(dropna=False).sort_index(),\n )\n\n def test_unique(self):\n for pdf in [\n pd.DataFrame(\n {\"a\": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], \"b\": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}\n ),\n pd.DataFrame(\n {\n \"a\": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n \"b\": [\"w\", \"w\", \"w\", \"x\", \"x\", \"y\", \"y\", \"z\", \"z\", \"z\"],\n }\n ),\n ]:\n with self.subTest(pdf=pdf):\n kdf = ks.from_pandas(pdf)\n\n actual = kdf.groupby(\"a\")[\"b\"].unique().sort_index().to_pandas()\n expect = pdf.groupby(\"a\")[\"b\"].unique().sort_index()\n self.assert_eq(len(actual), len(expect))\n for act, exp in zip(actual, expect):\n self.assertTrue(sorted(act) == sorted(exp))\n\n def test_value_counts(self):\n pdf = pd.DataFrame({\"A\": [1, 2, 2, 3, 3, 3], \"B\": [1, 1, 2, 3, 3, 3]}, columns=[\"A\", \"B\"])\n kdf = ks.from_pandas(pdf)\n self.assert_eq(\n kdf.groupby(\"A\")[\"B\"].value_counts().sort_index(),\n pdf.groupby(\"A\")[\"B\"].value_counts().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[\"B\"].value_counts(sort=True, ascending=False).sort_index(),\n pdf.groupby(\"A\")[\"B\"].value_counts(sort=True, ascending=False).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[\"B\"].value_counts(sort=True, ascending=True).sort_index(),\n pdf.groupby(\"A\")[\"B\"].value_counts(sort=True, ascending=True).sort_index(),\n )\n self.assert_eq(\n kdf.B.rename().groupby(kdf.A).value_counts().sort_index(),\n pdf.B.rename().groupby(pdf.A).value_counts().sort_index(),\n )\n self.assert_eq(\n kdf.B.groupby(kdf.A.rename()).value_counts().sort_index(),\n pdf.B.groupby(pdf.A.rename()).value_counts().sort_index(),\n )\n self.assert_eq(\n kdf.B.rename().groupby(kdf.A.rename()).value_counts().sort_index(),\n pdf.B.rename().groupby(pdf.A.rename()).value_counts().sort_index(),\n )\n\n def test_size(self):\n pdf = pd.DataFrame({\"A\": [1, 2, 2, 3, 3, 3], \"B\": [1, 1, 2, 3, 3, 3]})\n kdf = ks.from_pandas(pdf)\n self.assert_eq(kdf.groupby(\"A\").size().sort_index(), pdf.groupby(\"A\").size().sort_index())\n self.assert_eq(\n kdf.groupby(\"A\")[\"B\"].size().sort_index(), pdf.groupby(\"A\")[\"B\"].size().sort_index()\n )\n self.assert_eq(\n kdf.groupby(\"A\")[[\"B\"]].size().sort_index(), pdf.groupby(\"A\")[[\"B\"]].size().sort_index()\n )\n self.assert_eq(\n kdf.groupby([\"A\", \"B\"]).size().sort_index(), pdf.groupby([\"A\", \"B\"]).size().sort_index()\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"Y\", \"B\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"X\", \"A\")).size().sort_index(), pdf.groupby((\"X\", \"A\")).size().sort_index()\n )\n self.assert_eq(\n kdf.groupby([(\"X\", \"A\"), (\"Y\", \"B\")]).size().sort_index(),\n pdf.groupby([(\"X\", \"A\"), (\"Y\", \"B\")]).size().sort_index(),\n )\n\n def test_diff(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6] * 3,\n \"b\": [1, 1, 2, 3, 5, 8] * 3,\n \"c\": [1, 4, 9, 16, 25, 36] * 3,\n }\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(kdf.groupby(\"b\").diff().sort_index(), pdf.groupby(\"b\").diff().sort_index())\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).diff().sort_index(), pdf.groupby([\"a\", \"b\"]).diff().sort_index()\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"a\"].diff().sort_index(), pdf.groupby([\"b\"])[\"a\"].diff().sort_index()\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[[\"a\", \"b\"]].diff().sort_index(),\n pdf.groupby([\"b\"])[[\"a\", \"b\"]].diff().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).diff().sort_index(), pdf.groupby(pdf.b // 5).diff().sort_index()\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].diff().sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].diff().sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).diff().sort_index(), pdf.groupby((\"x\", \"b\")).diff().sort_index()\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).diff().sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).diff().sort_index(),\n )\n\n def test_rank(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6] * 3,\n \"b\": [1, 1, 2, 3, 5, 8] * 3,\n \"c\": [1, 4, 9, 16, 25, 36] * 3,\n },\n index=np.random.rand(6 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(kdf.groupby(\"b\").rank().sort_index(), pdf.groupby(\"b\").rank().sort_index())\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).rank().sort_index(), pdf.groupby([\"a\", \"b\"]).rank().sort_index()\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"a\"].rank().sort_index(), pdf.groupby([\"b\"])[\"a\"].rank().sort_index()\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[[\"a\", \"c\"]].rank().sort_index(),\n pdf.groupby([\"b\"])[[\"a\", \"c\"]].rank().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).rank().sort_index(), pdf.groupby(pdf.b // 5).rank().sort_index()\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].rank().sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].rank().sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).rank().sort_index(), pdf.groupby((\"x\", \"b\")).rank().sort_index()\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).rank().sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).rank().sort_index(),\n )\n\n def test_cumcount(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6] * 3,\n \"b\": [1, 1, 2, 3, 5, 8] * 3,\n \"c\": [1, 4, 9, 16, 25, 36] * 3,\n },\n index=np.random.rand(6 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n for ascending in [True, False]:\n self.assert_eq(\n kdf.groupby(\"b\").cumcount(ascending=ascending).sort_index(),\n pdf.groupby(\"b\").cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).cumcount(ascending=ascending).sort_index(),\n pdf.groupby([\"a\", \"b\"]).cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"a\"].cumcount(ascending=ascending).sort_index(),\n pdf.groupby([\"b\"])[\"a\"].cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[[\"a\", \"c\"]].cumcount(ascending=ascending).sort_index(),\n pdf.groupby([\"b\"])[[\"a\", \"c\"]].cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).cumcount(ascending=ascending).sort_index(),\n pdf.groupby(pdf.b // 5).cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].cumcount(ascending=ascending).sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\").cumcount(ascending=ascending).sum(),\n pdf.groupby(\"b\").cumcount(ascending=ascending).sum(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).cumcount(ascending=ascending).sort_index(),\n pdf.a.rename().groupby(pdf.b).cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).cumcount(ascending=ascending).sort_index(),\n pdf.a.groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).cumcount(ascending=ascending).sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n for ascending in [True, False]:\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).cumcount(ascending=ascending).sort_index(),\n pdf.groupby((\"x\", \"b\")).cumcount(ascending=ascending).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cumcount(ascending=ascending).sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cumcount(ascending=ascending).sort_index(),\n )\n\n def test_cummin(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6] * 3,\n \"b\": [1, 1, 2, 3, 5, 8] * 3,\n \"c\": [1, 4, 9, 16, 25, 36] * 3,\n },\n index=np.random.rand(6 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"b\").cummin().sort_index(), pdf.groupby(\"b\").cummin().sort_index()\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).cummin().sort_index(),\n pdf.groupby([\"a\", \"b\"]).cummin().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"a\"].cummin().sort_index(),\n pdf.groupby([\"b\"])[\"a\"].cummin().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[[\"a\", \"c\"]].cummin().sort_index(),\n pdf.groupby([\"b\"])[[\"a\", \"c\"]].cummin().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).cummin().sort_index(),\n pdf.groupby(pdf.b // 5).cummin().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].cummin().sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].cummin().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\").cummin().sum().sort_index(),\n pdf.groupby(\"b\").cummin().sum().sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).cummin().sort_index(),\n pdf.a.rename().groupby(pdf.b).cummin().sort_index(),\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).cummin().sort_index(),\n pdf.a.groupby(pdf.b.rename()).cummin().sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).cummin().sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).cummin().sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).cummin().sort_index(),\n pdf.groupby((\"x\", \"b\")).cummin().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cummin().sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cummin().sort_index(),\n )\n\n kdf = ks.DataFrame([[\"a\"], [\"b\"], [\"c\"]], columns=[\"A\"])\n self.assertRaises(DataError, lambda: kdf.groupby([\"A\"]).cummin())\n kdf = ks.DataFrame([[1, \"a\"], [2, \"b\"], [3, \"c\"]], columns=[\"A\", \"B\"])\n self.assertRaises(DataError, lambda: kdf.groupby([\"A\"])[\"B\"].cummin())\n\n def test_cummax(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6] * 3,\n \"b\": [1, 1, 2, 3, 5, 8] * 3,\n \"c\": [1, 4, 9, 16, 25, 36] * 3,\n },\n index=np.random.rand(6 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"b\").cummax().sort_index(), pdf.groupby(\"b\").cummax().sort_index()\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).cummax().sort_index(),\n pdf.groupby([\"a\", \"b\"]).cummax().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"a\"].cummax().sort_index(),\n pdf.groupby([\"b\"])[\"a\"].cummax().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[[\"a\", \"c\"]].cummax().sort_index(),\n pdf.groupby([\"b\"])[[\"a\", \"c\"]].cummax().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).cummax().sort_index(),\n pdf.groupby(pdf.b // 5).cummax().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].cummax().sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].cummax().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\").cummax().sum().sort_index(),\n pdf.groupby(\"b\").cummax().sum().sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).cummax().sort_index(),\n pdf.a.rename().groupby(pdf.b).cummax().sort_index(),\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).cummax().sort_index(),\n pdf.a.groupby(pdf.b.rename()).cummax().sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).cummax().sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).cummax().sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).cummax().sort_index(),\n pdf.groupby((\"x\", \"b\")).cummax().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cummax().sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cummax().sort_index(),\n )\n\n kdf = ks.DataFrame([[\"a\"], [\"b\"], [\"c\"]], columns=[\"A\"])\n self.assertRaises(DataError, lambda: kdf.groupby([\"A\"]).cummax())\n kdf = ks.DataFrame([[1, \"a\"], [2, \"b\"], [3, \"c\"]], columns=[\"A\", \"B\"])\n self.assertRaises(DataError, lambda: kdf.groupby([\"A\"])[\"B\"].cummax())\n\n def test_cumsum(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6] * 3,\n \"b\": [1, 1, 2, 3, 5, 8] * 3,\n \"c\": [1, 4, 9, 16, 25, 36] * 3,\n },\n index=np.random.rand(6 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"b\").cumsum().sort_index(), pdf.groupby(\"b\").cumsum().sort_index()\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).cumsum().sort_index(),\n pdf.groupby([\"a\", \"b\"]).cumsum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"a\"].cumsum().sort_index(),\n pdf.groupby([\"b\"])[\"a\"].cumsum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[[\"a\", \"c\"]].cumsum().sort_index(),\n pdf.groupby([\"b\"])[[\"a\", \"c\"]].cumsum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).cumsum().sort_index(),\n pdf.groupby(pdf.b // 5).cumsum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].cumsum().sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].cumsum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\").cumsum().sum().sort_index(),\n pdf.groupby(\"b\").cumsum().sum().sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).cumsum().sort_index(),\n pdf.a.rename().groupby(pdf.b).cumsum().sort_index(),\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).cumsum().sort_index(),\n pdf.a.groupby(pdf.b.rename()).cumsum().sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).cumsum().sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).cumsum().sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).cumsum().sort_index(),\n pdf.groupby((\"x\", \"b\")).cumsum().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cumsum().sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cumsum().sort_index(),\n )\n\n kdf = ks.DataFrame([[\"a\"], [\"b\"], [\"c\"]], columns=[\"A\"])\n self.assertRaises(DataError, lambda: kdf.groupby([\"A\"]).cumsum())\n kdf = ks.DataFrame([[1, \"a\"], [2, \"b\"], [3, \"c\"]], columns=[\"A\", \"B\"])\n self.assertRaises(DataError, lambda: kdf.groupby([\"A\"])[\"B\"].cumsum())\n\n def test_cumprod(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6] * 3,\n \"b\": [1, 1, 2, 3, 5, 8] * 3,\n \"c\": [1, 4, 9, 16, 25, 36] * 3,\n },\n index=np.random.rand(6 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"b\").cumprod().sort_index(),\n pdf.groupby(\"b\").cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).cumprod().sort_index(),\n pdf.groupby([\"a\", \"b\"]).cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"a\"].cumprod().sort_index(),\n pdf.groupby([\"b\"])[\"a\"].cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[[\"a\", \"c\"]].cumprod().sort_index(),\n pdf.groupby([\"b\"])[[\"a\", \"c\"]].cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 3).cumprod().sort_index(),\n pdf.groupby(pdf.b // 3).cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 3)[\"a\"].cumprod().sort_index(),\n pdf.groupby(pdf.b // 3)[\"a\"].cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.groupby(\"b\").cumprod().sum().sort_index(),\n pdf.groupby(\"b\").cumprod().sum().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).cumprod().sort_index(),\n pdf.a.rename().groupby(pdf.b).cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).cumprod().sort_index(),\n pdf.a.groupby(pdf.b.rename()).cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).cumprod().sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).cumprod().sort_index(),\n almost=True,\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).cumprod().sort_index(),\n pdf.groupby((\"x\", \"b\")).cumprod().sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cumprod().sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).cumprod().sort_index(),\n almost=True,\n )\n\n kdf = ks.DataFrame([[\"a\"], [\"b\"], [\"c\"]], columns=[\"A\"])\n self.assertRaises(DataError, lambda: kdf.groupby([\"A\"]).cumprod())\n kdf = ks.DataFrame([[1, \"a\"], [2, \"b\"], [3, \"c\"]], columns=[\"A\", \"B\"])\n self.assertRaises(DataError, lambda: kdf.groupby([\"A\"])[\"B\"].cumprod())\n\n def test_nsmallest(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,\n \"b\": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,\n \"c\": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,\n \"d\": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,\n },\n index=np.random.rand(9 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby([\"a\"])[\"b\"].nsmallest(1).sort_values(),\n pdf.groupby([\"a\"])[\"b\"].nsmallest(1).sort_values(),\n )\n self.assert_eq(\n kdf.groupby([\"a\"])[\"b\"].nsmallest(2).sort_index(),\n pdf.groupby([\"a\"])[\"b\"].nsmallest(2).sort_index(),\n )\n self.assert_eq(\n (kdf.b * 10).groupby(kdf.a).nsmallest(2).sort_index(),\n (pdf.b * 10).groupby(pdf.a).nsmallest(2).sort_index(),\n )\n self.assert_eq(\n kdf.b.rename().groupby(kdf.a).nsmallest(2).sort_index(),\n pdf.b.rename().groupby(pdf.a).nsmallest(2).sort_index(),\n )\n self.assert_eq(\n kdf.b.groupby(kdf.a.rename()).nsmallest(2).sort_index(),\n pdf.b.groupby(pdf.a.rename()).nsmallest(2).sort_index(),\n )\n self.assert_eq(\n kdf.b.rename().groupby(kdf.a.rename()).nsmallest(2).sort_index(),\n pdf.b.rename().groupby(pdf.a.rename()).nsmallest(2).sort_index(),\n )\n with self.assertRaisesRegex(ValueError, \"nsmallest do not support multi-index now\"):\n kdf.set_index([\"a\", \"b\"]).groupby([\"c\"])[\"d\"].nsmallest(1)\n\n def test_nlargest(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,\n \"b\": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,\n \"c\": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,\n \"d\": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,\n },\n index=np.random.rand(9 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby([\"a\"])[\"b\"].nlargest(1).sort_values(),\n pdf.groupby([\"a\"])[\"b\"].nlargest(1).sort_values(),\n )\n self.assert_eq(\n kdf.groupby([\"a\"])[\"b\"].nlargest(2).sort_index(),\n pdf.groupby([\"a\"])[\"b\"].nlargest(2).sort_index(),\n )\n self.assert_eq(\n (kdf.b * 10).groupby(kdf.a).nlargest(2).sort_index(),\n (pdf.b * 10).groupby(pdf.a).nlargest(2).sort_index(),\n )\n self.assert_eq(\n kdf.b.rename().groupby(kdf.a).nlargest(2).sort_index(),\n pdf.b.rename().groupby(pdf.a).nlargest(2).sort_index(),\n )\n self.assert_eq(\n kdf.b.groupby(kdf.a.rename()).nlargest(2).sort_index(),\n pdf.b.groupby(pdf.a.rename()).nlargest(2).sort_index(),\n )\n self.assert_eq(\n kdf.b.rename().groupby(kdf.a.rename()).nlargest(2).sort_index(),\n pdf.b.rename().groupby(pdf.a.rename()).nlargest(2).sort_index(),\n )\n with self.assertRaisesRegex(ValueError, \"nlargest do not support multi-index now\"):\n kdf.set_index([\"a\", \"b\"]).groupby([\"c\"])[\"d\"].nlargest(1)\n\n def test_fillna(self):\n pdf = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2] * 3,\n \"B\": [2, 4, None, 3] * 3,\n \"C\": [None, None, None, 1] * 3,\n \"D\": [0, 1, 5, 4] * 3,\n }\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"A\").fillna(0).sort_index(), pdf.groupby(\"A\").fillna(0).sort_index()\n )\n self.assert_eq(\n kdf.groupby(\"A\")[\"C\"].fillna(0).sort_index(),\n pdf.groupby(\"A\")[\"C\"].fillna(0).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[[\"C\"]].fillna(0).sort_index(),\n pdf.groupby(\"A\")[[\"C\"]].fillna(0).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\").fillna(method=\"bfill\").sort_index(),\n pdf.groupby(\"A\").fillna(method=\"bfill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[\"C\"].fillna(method=\"bfill\").sort_index(),\n pdf.groupby(\"A\")[\"C\"].fillna(method=\"bfill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[[\"C\"]].fillna(method=\"bfill\").sort_index(),\n pdf.groupby(\"A\")[[\"C\"]].fillna(method=\"bfill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\").fillna(method=\"ffill\").sort_index(),\n pdf.groupby(\"A\").fillna(method=\"ffill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[\"C\"].fillna(method=\"ffill\").sort_index(),\n pdf.groupby(\"A\")[\"C\"].fillna(method=\"ffill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[[\"C\"]].fillna(method=\"ffill\").sort_index(),\n pdf.groupby(\"A\")[[\"C\"]].fillna(method=\"ffill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.A // 5).fillna(method=\"bfill\").sort_index(),\n pdf.groupby(pdf.A // 5).fillna(method=\"bfill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.A // 5)[\"C\"].fillna(method=\"bfill\").sort_index(),\n pdf.groupby(pdf.A // 5)[\"C\"].fillna(method=\"bfill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.A // 5)[[\"C\"]].fillna(method=\"bfill\").sort_index(),\n pdf.groupby(pdf.A // 5)[[\"C\"]].fillna(method=\"bfill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.A // 5).fillna(method=\"ffill\").sort_index(),\n pdf.groupby(pdf.A // 5).fillna(method=\"ffill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.A // 5)[\"C\"].fillna(method=\"ffill\").sort_index(),\n pdf.groupby(pdf.A // 5)[\"C\"].fillna(method=\"ffill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.A // 5)[[\"C\"]].fillna(method=\"ffill\").sort_index(),\n pdf.groupby(pdf.A // 5)[[\"C\"]].fillna(method=\"ffill\").sort_index(),\n )\n self.assert_eq(\n kdf.C.rename().groupby(kdf.A).fillna(0).sort_index(),\n pdf.C.rename().groupby(pdf.A).fillna(0).sort_index(),\n )\n self.assert_eq(\n kdf.C.groupby(kdf.A.rename()).fillna(0).sort_index(),\n pdf.C.groupby(pdf.A.rename()).fillna(0).sort_index(),\n )\n self.assert_eq(\n kdf.C.rename().groupby(kdf.A.rename()).fillna(0).sort_index(),\n pdf.C.rename().groupby(pdf.A.rename()).fillna(0).sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\"), (\"Z\", \"D\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"X\", \"A\")).fillna(0).sort_index(),\n pdf.groupby((\"X\", \"A\")).fillna(0).sort_index(),\n )\n self.assert_eq(\n kdf.groupby((\"X\", \"A\")).fillna(method=\"bfill\").sort_index(),\n pdf.groupby((\"X\", \"A\")).fillna(method=\"bfill\").sort_index(),\n )\n self.assert_eq(\n kdf.groupby((\"X\", \"A\")).fillna(method=\"ffill\").sort_index(),\n pdf.groupby((\"X\", \"A\")).fillna(method=\"ffill\").sort_index(),\n )\n\n def test_ffill(self):\n idx = np.random.rand(4 * 3)\n pdf = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2] * 3,\n \"B\": [2, 4, None, 3] * 3,\n \"C\": [None, None, None, 1] * 3,\n \"D\": [0, 1, 5, 4] * 3,\n },\n index=idx,\n )\n kdf = ks.from_pandas(pdf)\n\n if LooseVersion(pd.__version__) <= LooseVersion(\"0.24.2\"):\n self.assert_eq(\n kdf.groupby(\"A\").ffill().sort_index(),\n pdf.groupby(\"A\").ffill().sort_index().drop(\"A\", 1),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[[\"B\"]].ffill().sort_index(),\n pdf.groupby(\"A\")[[\"B\"]].ffill().sort_index().drop(\"A\", 1),\n )\n else:\n self.assert_eq(\n kdf.groupby(\"A\").ffill().sort_index(), pdf.groupby(\"A\").ffill().sort_index()\n )\n self.assert_eq(\n kdf.groupby(\"A\")[[\"B\"]].ffill().sort_index(),\n pdf.groupby(\"A\")[[\"B\"]].ffill().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[\"B\"].ffill().sort_index(), pdf.groupby(\"A\")[\"B\"].ffill().sort_index()\n )\n self.assert_eq(kdf.groupby(\"A\")[\"B\"].ffill()[idx[6]], pdf.groupby(\"A\")[\"B\"].ffill()[idx[6]])\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\"), (\"Z\", \"D\")])\n pdf.columns = columns\n kdf.columns = columns\n\n if LooseVersion(pd.__version__) <= LooseVersion(\"0.24.2\"):\n self.assert_eq(\n kdf.groupby((\"X\", \"A\")).ffill().sort_index(),\n pdf.groupby((\"X\", \"A\")).ffill().sort_index().drop((\"X\", \"A\"), 1),\n )\n else:\n self.assert_eq(\n kdf.groupby((\"X\", \"A\")).ffill().sort_index(),\n pdf.groupby((\"X\", \"A\")).ffill().sort_index(),\n )\n\n def test_bfill(self):\n idx = np.random.rand(4 * 3)\n pdf = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2] * 3,\n \"B\": [2, 4, None, 3] * 3,\n \"C\": [None, None, None, 1] * 3,\n \"D\": [0, 1, 5, 4] * 3,\n },\n index=idx,\n )\n kdf = ks.from_pandas(pdf)\n\n if LooseVersion(pd.__version__) <= LooseVersion(\"0.24.2\"):\n self.assert_eq(\n kdf.groupby(\"A\").bfill().sort_index(),\n pdf.groupby(\"A\").bfill().sort_index().drop(\"A\", 1),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[[\"B\"]].bfill().sort_index(),\n pdf.groupby(\"A\")[[\"B\"]].bfill().sort_index().drop(\"A\", 1),\n )\n else:\n self.assert_eq(\n kdf.groupby(\"A\").bfill().sort_index(), pdf.groupby(\"A\").bfill().sort_index()\n )\n self.assert_eq(\n kdf.groupby(\"A\")[[\"B\"]].bfill().sort_index(),\n pdf.groupby(\"A\")[[\"B\"]].bfill().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"A\")[\"B\"].bfill().sort_index(), pdf.groupby(\"A\")[\"B\"].bfill().sort_index(),\n )\n self.assert_eq(kdf.groupby(\"A\")[\"B\"].bfill()[idx[6]], pdf.groupby(\"A\")[\"B\"].bfill()[idx[6]])\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"X\", \"A\"), (\"X\", \"B\"), (\"Y\", \"C\"), (\"Z\", \"D\")])\n pdf.columns = columns\n kdf.columns = columns\n\n if LooseVersion(pd.__version__) <= LooseVersion(\"0.24.2\"):\n self.assert_eq(\n kdf.groupby((\"X\", \"A\")).bfill().sort_index(),\n pdf.groupby((\"X\", \"A\")).bfill().sort_index().drop((\"X\", \"A\"), 1),\n )\n else:\n self.assert_eq(\n kdf.groupby((\"X\", \"A\")).bfill().sort_index(),\n pdf.groupby((\"X\", \"A\")).bfill().sort_index(),\n )\n\n @unittest.skipIf(pd.__version__ < \"0.24.0\", \"not supported before pandas 0.24.0\")\n def test_shift(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 1, 2, 2, 3, 3] * 3,\n \"b\": [1, 1, 2, 2, 3, 4] * 3,\n \"c\": [1, 4, 9, 16, 25, 36] * 3,\n },\n index=np.random.rand(6 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(kdf.groupby(\"a\").shift().sort_index(), pdf.groupby(\"a\").shift().sort_index())\n # TODO: seems like a pandas' bug when fill_value is not None?\n # self.assert_eq(kdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index(),\n # pdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index())\n self.assert_eq(\n kdf.groupby([\"b\"])[\"a\"].shift().sort_index(),\n pdf.groupby([\"b\"])[\"a\"].shift().sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"])[\"c\"].shift().sort_index(),\n pdf.groupby([\"a\", \"b\"])[\"c\"].shift().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).shift().sort_index(),\n pdf.groupby(pdf.b // 5).shift().sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].shift().sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].shift().sort_index(),\n )\n # TODO: known pandas' bug when fill_value is not None pandas>=1.0.0\n # https://github.com/pandas-dev/pandas/issues/31971#issue-565171762\n if LooseVersion(pd.__version__) < LooseVersion(\"1.0.0\"):\n self.assert_eq(\n kdf.groupby([\"b\"])[[\"a\", \"c\"]].shift(periods=-1, fill_value=0).sort_index(),\n pdf.groupby([\"b\"])[[\"a\", \"c\"]].shift(periods=-1, fill_value=0).sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).shift().sort_index(),\n pdf.a.rename().groupby(pdf.b).shift().sort_index(),\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).shift().sort_index(),\n pdf.a.groupby(pdf.b.rename()).shift().sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).shift().sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).shift().sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"a\")).shift().sort_index(),\n pdf.groupby((\"x\", \"a\")).shift().sort_index(),\n )\n # TODO: seems like a pandas' bug when fill_value is not None?\n # self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,\n # fill_value=0).sort_index(),\n # pdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,\n # fill_value=0).sort_index())\n\n def test_apply(self):\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6], \"b\": [1, 1, 2, 3, 5, 8], \"c\": [1, 4, 9, 16, 25, 36]},\n columns=[\"a\", \"b\", \"c\"],\n )\n kdf = ks.from_pandas(pdf)\n self.assert_eq(\n kdf.groupby(\"b\").apply(lambda x: x + x.min()).sort_index(),\n pdf.groupby(\"b\").apply(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\").apply(len).sort_index(), pdf.groupby(\"b\").apply(len).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\")[\"a\"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),\n pdf.groupby(\"b\")[\"a\"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\")[[\"a\"]].apply(lambda x: x + x.min()).sort_index(),\n pdf.groupby(\"b\")[[\"a\"]].apply(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),\n pdf.groupby([\"a\", \"b\"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"c\"].apply(lambda x: 1).sort_index(),\n pdf.groupby([\"b\"])[\"c\"].apply(lambda x: 1).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"c\"].apply(len).sort_index(),\n pdf.groupby([\"b\"])[\"c\"].apply(len).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).apply(lambda x: x + x.min()).sort_index(),\n pdf.groupby(pdf.b // 5).apply(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].apply(lambda x: x + x.min()).sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].apply(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[[\"a\"]].apply(lambda x: x + x.min()).sort_index(),\n pdf.groupby(pdf.b // 5)[[\"a\"]].apply(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[[\"a\"]].apply(len).sort_index(),\n pdf.groupby(pdf.b // 5)[[\"a\"]].apply(len).sort_index(),\n almost=True,\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).apply(lambda x: x + x.min()).sort_index(),\n pdf.a.rename().groupby(pdf.b).apply(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),\n pdf.a.groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),\n )\n\n with self.assertRaisesRegex(TypeError, \"int object is not callable\"):\n kdf.groupby(\"b\").apply(1)\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).apply(lambda x: 1).sort_index(),\n pdf.groupby((\"x\", \"b\")).apply(lambda x: 1).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).apply(lambda x: x + x.min()).sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).apply(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).apply(len).sort_index(),\n pdf.groupby((\"x\", \"b\")).apply(len).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).apply(len).sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).apply(len).sort_index(),\n )\n\n def test_apply_without_shortcut(self):\n with option_context(\"compute.shortcut_limit\", 0):\n self.test_apply()\n\n def test_apply_negative(self):\n def func(_) -> ks.Series[int]:\n return pd.Series([1])\n\n with self.assertRaisesRegex(TypeError, \"Series as a return type hint at frame groupby\"):\n ks.range(10).groupby(\"id\").apply(func)\n\n def test_apply_with_new_dataframe(self):\n pdf = pd.DataFrame(\n {\"timestamp\": [0.0, 0.5, 1.0, 0.0, 0.5], \"car_id\": [\"A\", \"A\", \"A\", \"B\", \"B\"]}\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"car_id\").apply(lambda _: pd.DataFrame({\"column\": [0.0]})).sort_index(),\n pdf.groupby(\"car_id\").apply(lambda _: pd.DataFrame({\"column\": [0.0]})).sort_index(),\n )\n\n self.assert_eq(\n kdf.groupby(\"car_id\")\n .apply(lambda df: pd.DataFrame({\"mean\": [df[\"timestamp\"].mean()]}))\n .sort_index(),\n pdf.groupby(\"car_id\")\n .apply(lambda df: pd.DataFrame({\"mean\": [df[\"timestamp\"].mean()]}))\n .sort_index(),\n )\n\n # dataframe with 1000+ records\n pdf = pd.DataFrame(\n {\n \"timestamp\": [0.0, 0.5, 1.0, 0.0, 0.5] * 300,\n \"car_id\": [\"A\", \"A\", \"A\", \"B\", \"B\"] * 300,\n }\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"car_id\").apply(lambda _: pd.DataFrame({\"column\": [0.0]})).sort_index(),\n pdf.groupby(\"car_id\").apply(lambda _: pd.DataFrame({\"column\": [0.0]})).sort_index(),\n )\n\n self.assert_eq(\n kdf.groupby(\"car_id\")\n .apply(lambda df: pd.DataFrame({\"mean\": [df[\"timestamp\"].mean()]}))\n .sort_index(),\n pdf.groupby(\"car_id\")\n .apply(lambda df: pd.DataFrame({\"mean\": [df[\"timestamp\"].mean()]}))\n .sort_index(),\n )\n\n def test_apply_with_new_dataframe_without_shortcut(self):\n with option_context(\"compute.shortcut_limit\", 0):\n self.test_apply_with_new_dataframe()\n\n def test_apply_key_handling(self):\n pdf = pd.DataFrame(\n {\"d\": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], \"v\": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"d\").apply(sum).sort_index(), pdf.groupby(\"d\").apply(sum).sort_index()\n )\n\n with ks.option_context(\"compute.shortcut_limit\", 1):\n self.assert_eq(\n kdf.groupby(\"d\").apply(sum).sort_index(), pdf.groupby(\"d\").apply(sum).sort_index()\n )\n\n def test_apply_with_side_effect(self):\n pdf = pd.DataFrame(\n {\"d\": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], \"v\": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}\n )\n kdf = ks.from_pandas(pdf)\n\n acc = ks.utils.default_session().sparkContext.accumulator(0)\n\n def sum_with_acc_frame(x) -> ks.DataFrame[np.float64, np.float64]:\n nonlocal acc\n acc += 1\n return np.sum(x)\n\n actual = kdf.groupby(\"d\").apply(sum_with_acc_frame).sort_index()\n actual.columns = [\"d\", \"v\"]\n self.assert_eq(actual, pdf.groupby(\"d\").apply(sum).sort_index().reset_index(drop=True))\n self.assert_eq(acc.value, 2)\n\n def sum_with_acc_series(x) -> np.float64:\n nonlocal acc\n acc += 1\n return np.sum(x)\n\n self.assert_eq(\n kdf.groupby(\"d\")[\"v\"].apply(sum_with_acc_series).sort_index(),\n pdf.groupby(\"d\")[\"v\"].apply(sum).sort_index().reset_index(drop=True),\n )\n self.assert_eq(acc.value, 4)\n\n def test_transform(self):\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6], \"b\": [1, 1, 2, 3, 5, 8], \"c\": [1, 4, 9, 16, 25, 36]},\n columns=[\"a\", \"b\", \"c\"],\n )\n kdf = ks.from_pandas(pdf)\n self.assert_eq(\n kdf.groupby(\"b\").transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby(\"b\").transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\")[\"a\"].transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby(\"b\")[\"a\"].transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\")[[\"a\"]].transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby(\"b\")[[\"a\"]].transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby([\"a\", \"b\"]).transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"b\"])[\"c\"].transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby([\"b\"])[\"c\"].transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5).transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby(pdf.b // 5).transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[\"a\"].transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby(pdf.b // 5)[\"a\"].transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf.b // 5)[[\"a\"]].transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby(pdf.b // 5)[[\"a\"]].transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).transform(lambda x: x + x.min()).sort_index(),\n pdf.a.rename().groupby(pdf.b).transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),\n pdf.a.groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby((\"x\", \"b\")).transform(lambda x: x + x.min()).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).transform(lambda x: x + x.min()).sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")]).transform(lambda x: x + x.min()).sort_index(),\n )\n\n def test_transform_without_shortcut(self):\n with option_context(\"compute.shortcut_limit\", 0):\n self.test_transform()\n\n def test_filter(self):\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6], \"b\": [1, 1, 2, 3, 5, 8], \"c\": [1, 4, 9, 16, 25, 36]},\n columns=[\"a\", \"b\", \"c\"],\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"b\").filter(lambda x: any(x.a == 2)).sort_index(),\n pdf.groupby(\"b\").filter(lambda x: any(x.a == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\")[\"a\"].filter(lambda x: any(x == 2)).sort_index(),\n pdf.groupby(\"b\")[\"a\"].filter(lambda x: any(x == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(\"b\")[[\"a\"]].filter(lambda x: any(x.a == 2)).sort_index(),\n pdf.groupby(\"b\")[[\"a\"]].filter(lambda x: any(x.a == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([\"a\", \"b\"]).filter(lambda x: any(x.a == 2)).sort_index(),\n pdf.groupby([\"a\", \"b\"]).filter(lambda x: any(x.a == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf[\"b\"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),\n pdf.groupby(pdf[\"b\"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf[\"b\"] // 5)[\"a\"].filter(lambda x: any(x == 2)).sort_index(),\n pdf.groupby(pdf[\"b\"] // 5)[\"a\"].filter(lambda x: any(x == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.groupby(kdf[\"b\"] // 5)[[\"a\"]].filter(lambda x: any(x.a == 2)).sort_index(),\n pdf.groupby(pdf[\"b\"] // 5)[[\"a\"]].filter(lambda x: any(x.a == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b).filter(lambda x: any(x == 2)).sort_index(),\n pdf.a.rename().groupby(pdf.b).filter(lambda x: any(x == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.a.groupby(kdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),\n pdf.a.groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.a.rename().groupby(kdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),\n pdf.a.rename().groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),\n )\n\n with self.assertRaisesRegex(TypeError, \"int object is not callable\"):\n kdf.groupby(\"b\").filter(1)\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n kdf.groupby((\"x\", \"b\")).filter(lambda x: any(x[(\"x\", \"a\")] == 2)).sort_index(),\n pdf.groupby((\"x\", \"b\")).filter(lambda x: any(x[(\"x\", \"a\")] == 2)).sort_index(),\n )\n self.assert_eq(\n kdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")])\n .filter(lambda x: any(x[(\"x\", \"a\")] == 2))\n .sort_index(),\n pdf.groupby([(\"x\", \"a\"), (\"x\", \"b\")])\n .filter(lambda x: any(x[(\"x\", \"a\")] == 2))\n .sort_index(),\n )\n\n def test_idxmax(self):\n pdf = pd.DataFrame(\n {\"a\": [1, 1, 2, 2, 3] * 3, \"b\": [1, 2, 3, 4, 5] * 3, \"c\": [5, 4, 3, 2, 1] * 3}\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n pdf.groupby([\"a\"]).idxmax().sort_index(), kdf.groupby([\"a\"]).idxmax().sort_index()\n )\n self.assert_eq(\n pdf.groupby([\"a\"]).idxmax(skipna=False).sort_index(),\n kdf.groupby([\"a\"]).idxmax(skipna=False).sort_index(),\n )\n self.assert_eq(\n pdf.groupby([\"a\"])[\"b\"].idxmax().sort_index(),\n kdf.groupby([\"a\"])[\"b\"].idxmax().sort_index(),\n )\n self.assert_eq(\n pdf.b.rename().groupby(pdf.a).idxmax().sort_index(),\n kdf.b.rename().groupby(kdf.a).idxmax().sort_index(),\n )\n self.assert_eq(\n pdf.b.groupby(pdf.a.rename()).idxmax().sort_index(),\n kdf.b.groupby(kdf.a.rename()).idxmax().sort_index(),\n )\n self.assert_eq(\n pdf.b.rename().groupby(pdf.a.rename()).idxmax().sort_index(),\n kdf.b.rename().groupby(kdf.a.rename()).idxmax().sort_index(),\n )\n\n with self.assertRaisesRegex(ValueError, \"idxmax only support one-level index now\"):\n kdf.set_index([\"a\", \"b\"]).groupby([\"c\"]).idxmax()\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n pdf.groupby((\"x\", \"a\")).idxmax().sort_index(),\n kdf.groupby((\"x\", \"a\")).idxmax().sort_index(),\n )\n self.assert_eq(\n pdf.groupby((\"x\", \"a\")).idxmax(skipna=False).sort_index(),\n kdf.groupby((\"x\", \"a\")).idxmax(skipna=False).sort_index(),\n )\n\n def test_idxmin(self):\n pdf = pd.DataFrame(\n {\"a\": [1, 1, 2, 2, 3] * 3, \"b\": [1, 2, 3, 4, 5] * 3, \"c\": [5, 4, 3, 2, 1] * 3}\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n pdf.groupby([\"a\"]).idxmin().sort_index(), kdf.groupby([\"a\"]).idxmin().sort_index()\n )\n self.assert_eq(\n pdf.groupby([\"a\"]).idxmin(skipna=False).sort_index(),\n kdf.groupby([\"a\"]).idxmin(skipna=False).sort_index(),\n )\n self.assert_eq(\n pdf.groupby([\"a\"])[\"b\"].idxmin().sort_index(),\n kdf.groupby([\"a\"])[\"b\"].idxmin().sort_index(),\n )\n self.assert_eq(\n pdf.b.rename().groupby(pdf.a).idxmin().sort_index(),\n kdf.b.rename().groupby(kdf.a).idxmin().sort_index(),\n )\n self.assert_eq(\n pdf.b.groupby(pdf.a.rename()).idxmin().sort_index(),\n kdf.b.groupby(kdf.a.rename()).idxmin().sort_index(),\n )\n self.assert_eq(\n pdf.b.rename().groupby(pdf.a.rename()).idxmin().sort_index(),\n kdf.b.rename().groupby(kdf.a.rename()).idxmin().sort_index(),\n )\n\n with self.assertRaisesRegex(ValueError, \"idxmin only support one-level index now\"):\n kdf.set_index([\"a\", \"b\"]).groupby([\"c\"]).idxmin()\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n pdf.groupby((\"x\", \"a\")).idxmin().sort_index(),\n kdf.groupby((\"x\", \"a\")).idxmin().sort_index(),\n )\n self.assert_eq(\n pdf.groupby((\"x\", \"a\")).idxmin(skipna=False).sort_index(),\n kdf.groupby((\"x\", \"a\")).idxmin(skipna=False).sort_index(),\n )\n\n def test_head(self):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,\n \"b\": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,\n \"c\": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,\n },\n index=np.random.rand(10 * 3),\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(pdf.groupby(\"a\").head(2).sort_index(), kdf.groupby(\"a\").head(2).sort_index())\n self.assert_eq(\n pdf.groupby(\"a\").head(-2).sort_index(), kdf.groupby(\"a\").head(-2).sort_index()\n )\n self.assert_eq(\n pdf.groupby(\"a\").head(100000).sort_index(), kdf.groupby(\"a\").head(100000).sort_index()\n )\n\n self.assert_eq(\n pdf.groupby(\"a\")[\"b\"].head(2).sort_index(), kdf.groupby(\"a\")[\"b\"].head(2).sort_index()\n )\n self.assert_eq(\n pdf.groupby(\"a\")[\"b\"].head(-2).sort_index(), kdf.groupby(\"a\")[\"b\"].head(-2).sort_index()\n )\n self.assert_eq(\n pdf.groupby(\"a\")[\"b\"].head(100000).sort_index(),\n kdf.groupby(\"a\")[\"b\"].head(100000).sort_index(),\n )\n\n self.assert_eq(\n pdf.groupby(\"a\")[[\"b\"]].head(2).sort_index(),\n kdf.groupby(\"a\")[[\"b\"]].head(2).sort_index(),\n )\n self.assert_eq(\n pdf.groupby(\"a\")[[\"b\"]].head(-2).sort_index(),\n kdf.groupby(\"a\")[[\"b\"]].head(-2).sort_index(),\n )\n self.assert_eq(\n pdf.groupby(\"a\")[[\"b\"]].head(100000).sort_index(),\n kdf.groupby(\"a\")[[\"b\"]].head(100000).sort_index(),\n )\n\n self.assert_eq(\n pdf.groupby(pdf.a // 2).head(2).sort_index(),\n kdf.groupby(kdf.a // 2).head(2).sort_index(),\n )\n self.assert_eq(\n pdf.groupby(pdf.a // 2)[\"b\"].head(2).sort_index(),\n kdf.groupby(kdf.a // 2)[\"b\"].head(2).sort_index(),\n )\n self.assert_eq(\n pdf.groupby(pdf.a // 2)[[\"b\"]].head(2).sort_index(),\n kdf.groupby(kdf.a // 2)[[\"b\"]].head(2).sort_index(),\n )\n\n self.assert_eq(\n pdf.b.rename().groupby(pdf.a).head(2).sort_index(),\n kdf.b.rename().groupby(kdf.a).head(2).sort_index(),\n )\n self.assert_eq(\n pdf.b.groupby(pdf.a.rename()).head(2).sort_index(),\n kdf.b.groupby(kdf.a.rename()).head(2).sort_index(),\n )\n self.assert_eq(\n pdf.b.rename().groupby(pdf.a.rename()).head(2).sort_index(),\n kdf.b.rename().groupby(kdf.a.rename()).head(2).sort_index(),\n )\n\n # multi-index\n midx = pd.MultiIndex(\n [[\"x\", \"y\"], [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]],\n [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],\n )\n pdf = pd.DataFrame(\n {\n \"a\": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"b\": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],\n \"c\": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],\n },\n columns=[\"a\", \"b\", \"c\"],\n index=midx,\n )\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(pdf.groupby(\"a\").head(2).sort_index(), kdf.groupby(\"a\").head(2).sort_index())\n self.assert_eq(\n pdf.groupby(\"a\").head(-2).sort_index(), kdf.groupby(\"a\").head(-2).sort_index()\n )\n self.assert_eq(\n pdf.groupby(\"a\").head(100000).sort_index(), kdf.groupby(\"a\").head(100000).sort_index()\n )\n\n self.assert_eq(\n pdf.groupby(\"a\")[\"b\"].head(2).sort_index(), kdf.groupby(\"a\")[\"b\"].head(2).sort_index()\n )\n self.assert_eq(\n pdf.groupby(\"a\")[\"b\"].head(-2).sort_index(), kdf.groupby(\"a\")[\"b\"].head(-2).sort_index()\n )\n self.assert_eq(\n pdf.groupby(\"a\")[\"b\"].head(100000).sort_index(),\n kdf.groupby(\"a\")[\"b\"].head(100000).sort_index(),\n )\n\n # multi-index columns\n columns = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n pdf.columns = columns\n kdf.columns = columns\n\n self.assert_eq(\n pdf.groupby((\"x\", \"a\")).head(2).sort_index(),\n kdf.groupby((\"x\", \"a\")).head(2).sort_index(),\n )\n self.assert_eq(\n pdf.groupby((\"x\", \"a\")).head(-2).sort_index(),\n kdf.groupby((\"x\", \"a\")).head(-2).sort_index(),\n )\n self.assert_eq(\n pdf.groupby((\"x\", \"a\")).head(100000).sort_index(),\n kdf.groupby((\"x\", \"a\")).head(100000).sort_index(),\n )\n\n def test_missing(self):\n kdf = ks.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9]})\n\n # DataFrameGroupBy functions\n missing_functions = inspect.getmembers(\n MissingPandasLikeDataFrameGroupBy, inspect.isfunction\n )\n unsupported_functions = [\n name for (name, type_) in missing_functions if type_.__name__ == \"unsupported_function\"\n ]\n for name in unsupported_functions:\n with self.assertRaisesRegex(\n PandasNotImplementedError,\n \"method.*GroupBy.*{}.*not implemented( yet\\\\.|\\\\. .+)\".format(name),\n ):\n getattr(kdf.groupby(\"a\"), name)()\n\n deprecated_functions = [\n name for (name, type_) in missing_functions if type_.__name__ == \"deprecated_function\"\n ]\n for name in deprecated_functions:\n with self.assertRaisesRegex(\n PandasNotImplementedError, \"method.*GroupBy.*{}.*is deprecated\".format(name)\n ):\n getattr(kdf.groupby(\"a\"), name)()\n\n # SeriesGroupBy functions\n missing_functions = inspect.getmembers(MissingPandasLikeSeriesGroupBy, inspect.isfunction)\n unsupported_functions = [\n name for (name, type_) in missing_functions if type_.__name__ == \"unsupported_function\"\n ]\n for name in unsupported_functions:\n with self.assertRaisesRegex(\n PandasNotImplementedError,\n \"method.*GroupBy.*{}.*not implemented( yet\\\\.|\\\\. .+)\".format(name),\n ):\n getattr(kdf.a.groupby(kdf.a), name)()\n\n deprecated_functions = [\n name for (name, type_) in missing_functions if type_.__name__ == \"deprecated_function\"\n ]\n for name in deprecated_functions:\n with self.assertRaisesRegex(\n PandasNotImplementedError, \"method.*GroupBy.*{}.*is deprecated\".format(name)\n ):\n getattr(kdf.a.groupby(kdf.a), name)()\n\n # DataFrameGroupBy properties\n missing_properties = inspect.getmembers(\n MissingPandasLikeDataFrameGroupBy, lambda o: isinstance(o, property)\n )\n unsupported_properties = [\n name\n for (name, type_) in missing_properties\n if type_.fget.__name__ == \"unsupported_property\"\n ]\n for name in unsupported_properties:\n with self.assertRaisesRegex(\n PandasNotImplementedError,\n \"property.*GroupBy.*{}.*not implemented( yet\\\\.|\\\\. .+)\".format(name),\n ):\n getattr(kdf.groupby(\"a\"), name)\n deprecated_properties = [\n name\n for (name, type_) in missing_properties\n if type_.fget.__name__ == \"deprecated_property\"\n ]\n for name in deprecated_properties:\n with self.assertRaisesRegex(\n PandasNotImplementedError, \"property.*GroupBy.*{}.*is deprecated\".format(name)\n ):\n getattr(kdf.groupby(\"a\"), name)\n\n # SeriesGroupBy properties\n missing_properties = inspect.getmembers(\n MissingPandasLikeSeriesGroupBy, lambda o: isinstance(o, property)\n )\n unsupported_properties = [\n name\n for (name, type_) in missing_properties\n if type_.fget.__name__ == \"unsupported_property\"\n ]\n for name in unsupported_properties:\n with self.assertRaisesRegex(\n PandasNotImplementedError,\n \"property.*GroupBy.*{}.*not implemented( yet\\\\.|\\\\. .+)\".format(name),\n ):\n getattr(kdf.a.groupby(kdf.a), name)\n deprecated_properties = [\n name\n for (name, type_) in missing_properties\n if type_.fget.__name__ == \"deprecated_property\"\n ]\n for name in deprecated_properties:\n with self.assertRaisesRegex(\n PandasNotImplementedError, \"property.*GroupBy.*{}.*is deprecated\".format(name)\n ):\n getattr(kdf.a.groupby(kdf.a), name)\n\n @staticmethod\n def test_is_multi_agg_with_relabel():\n\n assert is_multi_agg_with_relabel(a=\"max\") is False\n assert is_multi_agg_with_relabel(a_min=(\"a\", \"max\"), a_max=(\"a\", \"min\")) is True\n\n def test_get_group(self):\n pdf = pd.DataFrame(\n [\n (\"falcon\", \"bird\", 389.0),\n (\"parrot\", \"bird\", 24.0),\n (\"lion\", \"mammal\", 80.5),\n (\"monkey\", \"mammal\", np.nan),\n ],\n columns=[\"name\", \"class\", \"max_speed\"],\n index=[0, 2, 3, 1],\n )\n pdf.columns.name = \"Koalas\"\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(\n kdf.groupby(\"class\").get_group(\"bird\"), pdf.groupby(\"class\").get_group(\"bird\"),\n )\n self.assert_eq(\n kdf.groupby(\"class\")[\"name\"].get_group(\"mammal\"),\n pdf.groupby(\"class\")[\"name\"].get_group(\"mammal\"),\n )\n self.assert_eq(\n kdf.groupby(\"class\")[[\"name\"]].get_group(\"mammal\"),\n pdf.groupby(\"class\")[[\"name\"]].get_group(\"mammal\"),\n )\n self.assert_eq(\n kdf.groupby([\"class\", \"name\"]).get_group((\"mammal\", \"lion\")),\n pdf.groupby([\"class\", \"name\"]).get_group((\"mammal\", \"lion\")),\n )\n self.assert_eq(\n kdf.groupby([\"class\", \"name\"])[\"max_speed\"].get_group((\"mammal\", \"lion\")),\n pdf.groupby([\"class\", \"name\"])[\"max_speed\"].get_group((\"mammal\", \"lion\")),\n )\n self.assert_eq(\n kdf.groupby([\"class\", \"name\"])[[\"max_speed\"]].get_group((\"mammal\", \"lion\")),\n pdf.groupby([\"class\", \"name\"])[[\"max_speed\"]].get_group((\"mammal\", \"lion\")),\n )\n self.assert_eq(\n (kdf.max_speed + 1).groupby(kdf[\"class\"]).get_group(\"mammal\"),\n (pdf.max_speed + 1).groupby(pdf[\"class\"]).get_group(\"mammal\"),\n )\n self.assert_eq(\n kdf.groupby(\"max_speed\").get_group(80.5), pdf.groupby(\"max_speed\").get_group(80.5),\n )\n\n self.assertRaises(KeyError, lambda: kdf.groupby(\"class\").get_group(\"fish\"))\n self.assertRaises(TypeError, lambda: kdf.groupby(\"class\").get_group([\"bird\", \"mammal\"]))\n self.assertRaises(KeyError, lambda: kdf.groupby(\"class\")[\"name\"].get_group(\"fish\"))\n self.assertRaises(\n TypeError, lambda: kdf.groupby(\"class\")[\"name\"].get_group([\"bird\", \"mammal\"])\n )\n self.assertRaises(\n KeyError, lambda: kdf.groupby([\"class\", \"name\"]).get_group((\"lion\", \"mammal\"))\n )\n self.assertRaises(ValueError, lambda: kdf.groupby([\"class\", \"name\"]).get_group((\"lion\",)))\n self.assertRaises(ValueError, lambda: kdf.groupby([\"class\", \"name\"]).get_group((\"mammal\",)))\n self.assertRaises(ValueError, lambda: kdf.groupby([\"class\", \"name\"]).get_group(\"mammal\"))\n\n # MultiIndex columns\n pdf.columns = pd.MultiIndex.from_tuples([(\"A\", \"name\"), (\"B\", \"class\"), (\"C\", \"max_speed\")])\n pdf.columns.names = [\"Hello\", \"Koalas\"]\n kdf = ks.from_pandas(pdf)\n self.assert_eq(\n kdf.groupby((\"B\", \"class\")).get_group(\"bird\"),\n pdf.groupby((\"B\", \"class\")).get_group(\"bird\"),\n )\n self.assert_eq(\n kdf.groupby((\"B\", \"class\"))[[(\"A\", \"name\")]].get_group(\"mammal\"),\n pdf.groupby((\"B\", \"class\"))[[(\"A\", \"name\")]].get_group(\"mammal\"),\n )\n self.assert_eq(\n kdf.groupby([(\"B\", \"class\"), (\"A\", \"name\")]).get_group((\"mammal\", \"lion\")),\n pdf.groupby([(\"B\", \"class\"), (\"A\", \"name\")]).get_group((\"mammal\", \"lion\")),\n )\n self.assert_eq(\n kdf.groupby([(\"B\", \"class\"), (\"A\", \"name\")])[[(\"C\", \"max_speed\")]].get_group(\n (\"mammal\", \"lion\")\n ),\n pdf.groupby([(\"B\", \"class\"), (\"A\", \"name\")])[[(\"C\", \"max_speed\")]].get_group(\n (\"mammal\", \"lion\")\n ),\n )\n self.assert_eq(\n (kdf[(\"C\", \"max_speed\")] + 1).groupby(kdf[(\"B\", \"class\")]).get_group(\"mammal\"),\n (pdf[(\"C\", \"max_speed\")] + 1).groupby(pdf[(\"B\", \"class\")]).get_group(\"mammal\"),\n )\n self.assert_eq(\n kdf.groupby((\"C\", \"max_speed\")).get_group(80.5),\n pdf.groupby((\"C\", \"max_speed\")).get_group(80.5),\n )\n\n self.assertRaises(KeyError, lambda: kdf.groupby((\"B\", \"class\")).get_group(\"fish\"))\n self.assertRaises(\n TypeError, lambda: kdf.groupby((\"B\", \"class\")).get_group([\"bird\", \"mammal\"])\n )\n self.assertRaises(\n KeyError, lambda: kdf.groupby((\"B\", \"class\"))[(\"A\", \"name\")].get_group(\"fish\")\n )\n self.assertRaises(\n KeyError,\n lambda: kdf.groupby([(\"B\", \"class\"), (\"A\", \"name\")]).get_group((\"lion\", \"mammal\")),\n )\n self.assertRaises(\n ValueError, lambda: kdf.groupby([(\"B\", \"class\"), (\"A\", \"name\")]).get_group((\"lion\",)),\n )\n self.assertRaises(\n ValueError, lambda: kdf.groupby([(\"B\", \"class\"), (\"A\", \"name\")]).get_group((\"mammal\",))\n )\n self.assertRaises(\n ValueError, lambda: kdf.groupby([(\"B\", \"class\"), (\"A\", \"name\")]).get_group(\"mammal\")\n )\n"
] | [
[
"numpy.timedelta64",
"pandas.api.types.is_list_like"
],
[
"pandas.Series",
"pandas.MultiIndex",
"pandas.Index",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.random.rand",
"pandas.NamedAgg",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.24"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"1.5",
"2.0",
"1.4"
],
"scipy": [],
"tensorflow": []
}
] |
taolon2018/Tensorflow2_Models | [
"b931b6779e8d2fc775bcaa1e9cbcad6edf0438f4"
] | [
"object_detection/YOLO_v3/model/yolov3.py"
] | [
"import tensorflow as tf\n\nfrom object_detection.YOLO_v3.backbone.darknet53 import Darknet53, ConvLayer\n\n\nclass ConvSet(tf.keras.layers.Layer):\n def __init__(self, output_dim):\n super(ConvSet, self).__init__()\n self.conv_1 = ConvLayer(output_dim, 1)\n self.conv_2 = ConvLayer(output_dim * 2, 3)\n self.conv_3 = ConvLayer(output_dim, 1)\n self.conv_4 = ConvLayer(output_dim * 2, 3)\n self.conv_5 = ConvLayer(output_dim, 1)\n\n def __call__(self, x):\n x = self.conv_1(x)\n x = self.conv_2(x)\n x = self.conv_3(x)\n x = self.conv_4(x)\n x = self.conv_5(x)\n return x\n\n\nclass Yolov3(tf.keras.Model):\n def __init__(self, predict_class_number=21):\n super(Yolov3, self).__init__()\n self.darknet53 = Darknet53()\n self.convset_1 = ConvSet(512)\n self.small_branch_conv_1 = ConvLayer(1024, 1)\n self.small_branch_conv_2 = tf.keras.layers.Conv2D(\n 3 * (predict_class_number + 5), 1, activation=None\n )\n self.conv_1 = ConvLayer(256, 1)\n self.convset_2 = ConvSet(256)\n self.medium_branch_conv_1 = ConvLayer(512, 1)\n self.medium_branch_conv_2 = tf.keras.layers.Conv2D(\n 3 * (predict_class_number + 5), 1, activation=None\n )\n self.conv_2 = ConvLayer(512, 1)\n self.convset_3 = ConvSet(128)\n self.large_branch_conv_1 = ConvLayer(256, 1)\n self.large_branch_conv_2 = tf.keras.layers.Conv2D(\n 3 * (predict_class_number + 5), 1, activation=None\n )\n self.conv_3 = ConvLayer(1024, 1)\n\n def __call__(self, x):\n input_1, input_2, input_3 = self.darknet53(x)\n x = input_3\n\n x = self.convset_1(x)\n\n output_1 = self.small_branch_conv_1(x)\n output_1 = self.small_branch_conv_2(output_1)\n\n x = self.conv_1(x)\n x = tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=\"nearest\")\n x = tf.concat([x, input_2], axis=-1)\n\n x = self.convset_2(x)\n\n output_2 = self.medium_branch_conv_1(x)\n output_2 = self.medium_branch_conv_2(output_2)\n\n x = self.conv_2(x)\n x = tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=\"nearest\")\n x = tf.concat([x, input_1], axis=-1)\n\n x = self.convset_3(x)\n\n output_3 = self.large_branch_conv_1(x)\n output_3 = self.large_branch_conv_2(output_3)\n\n return output_1, output_2, output_3\n"
] | [
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.concat",
"tensorflow.image.resize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jeffreyblair/ML | [
"9cf810cbe9a1720de9013740650f9d72b61bde59"
] | [
"logistic_regression/logistic.py"
] | [
"\"\"\" Methods for doing logistic regression.\"\"\"\n\nimport numpy as np\nfrom utils import sigmoid\n\ndef logistic_predict(weights, data):\n \"\"\"\n Compute the probabilities predicted by the logistic classifier.\n\n Note: N is the number of examples and\n M is the number of features per example.\n\n Inputs:\n weights: (M+1) x 1 vector of weights, where the last element\n corresponds to the bias (intercepts).\n data: N x M data matrix where each row corresponds\n to one data point.\n Outputs:\n y: :N x 1 vector of probabilities. This is the output of the classifier.\n \"\"\"\n\n ones = np.array([[1] for i in range(data.shape[0])])\n n_data = np.c_[data, ones]\n y = n_data @ weights\n return sigmoid(y)\n\ndef evaluate(targets, y):\n \"\"\"\n Compute evaluation metrics.\n Inputs:\n targets : N x 1 vector of targets.\n y : N x 1 vector of probabilities.\n Outputs:\n ce : (scalar) Cross entropy. CE(p, q) = E_p[-log q]. Here we want to compute CE(targets, y)\n frac_correct : (scalar) Fraction of inputs classified correctly.\n \"\"\"\n ce = -np.sum(targets * np.log(y) + (1-targets) * np.log(1-y))/len(targets)\n\n predictions = [1 if y_i > 0.5 else 0 for y_i in y]\n correct = [1 if predictions[i] == targets[i] else 0 for i in range(len(predictions))]\n\n frac_correct = sum(correct)/len(correct)\n\n return ce, frac_correct\n\ndef logistic(weights, data, targets, hyperparameters):\n \"\"\"\n Calculate negative log likelihood and its derivatives with respect to weights.\n Also return the predictions.\n\n Note: N is the number of examples and\n M is the number of features per example.\n\n Inputs:\n weights: (M+1) x 1 vector of weights, where the last element\n corresponds to bias (intercepts).\n data: N x M data matrix where each row corresponds\n to one data point.\n targets: N x 1 vector of targets class probabilities.\n hyperparameters: The hyperparameters dictionary.\n\n Outputs:\n f: The sum of the loss over all data points. This is the objective that we want to minimize.\n df: (M+1) x 1 vector of derivative of f w.r.t. weights.\n y: N x 1 vector of probabilities.\n \"\"\"\n\n y = logistic_predict(weights, data)\n f, frac = evaluate(targets, y)\n d = data.T @ (y - targets)\n db = np.array([sum((y - targets))])\n zero = np.array([[0]])\n df = np.r_[d, db]\n\n return f, df, y\n\n\ndef logistic_pen(weights, data, targets, hyperparameters):\n \"\"\"\n Calculate negative log likelihood and its derivatives with respect to weights.\n Also return the predictions.\n\n Note: N is the number of examples and\n M is the number of features per example.\n\n Inputs:\n weights: (M+1) x 1 vector of weights, where the last element\n corresponds to bias (intercepts).\n data: N x M data matrix where each row corresponds\n to one data point.\n targets: N x 1 vector of targets class probabilities.\n hyperparameters: The hyperparameters dictionary.\n\n Outputs:\n f: The sum of the loss over all data points. This is the objective that we want to minimize.\n df: (M+1) x 1 vector of derivative of f w.r.t. weights.\n \"\"\"\n\n lambd = hyperparameters['weight_regularization']\n f, dwb, y = logistic(weights, data, targets, hyperparameters)\n\n regularizer = hyperparameters['weight_regularization'] * weights\n regularizer[-1] = 0 # do not penalize bias\n df = dwb + regularizer\n\n return f, df, y\n"
] | [
[
"numpy.log",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lukevolpatti/xarray | [
"65ca92a5c0a4143d00dd7a822bcb1d49738717f1"
] | [
"xarray/core/common.py"
] | [
"import warnings\nfrom contextlib import suppress\nfrom html import escape\nfrom textwrap import dedent\nfrom typing import (\n Any,\n Callable,\n Dict,\n Hashable,\n Iterable,\n Iterator,\n List,\n Mapping,\n Tuple,\n TypeVar,\n Union,\n)\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dtypes, duck_array_ops, formatting, formatting_html, ops\nfrom .arithmetic import SupportsArithmetic\nfrom .npcompat import DTypeLike\nfrom .options import OPTIONS, _get_keep_attrs\nfrom .pycompat import dask_array_type\nfrom .rolling_exp import RollingExp\nfrom .utils import Frozen, either_dict_or_kwargs, is_scalar\n\n# Used as a sentinel value to indicate a all dimensions\nALL_DIMS = ...\n\n\nC = TypeVar(\"C\")\nT = TypeVar(\"T\")\n\n\nclass ImplementsArrayReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, axis=None, skipna=None, **kwargs):\n return self.reduce(func, dim, axis, skipna=skipna, **kwargs)\n\n else:\n\n def wrapped_func(self, dim=None, axis=None, **kwargs): # type: ignore\n return self.reduce(func, dim, axis, **kwargs)\n\n return wrapped_func\n\n _reduce_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `{name}` is calculated over axes.\"\"\"\n )\n\n _cum_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\"\"\"\n )\n\n\nclass ImplementsDatasetReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, skipna=None, **kwargs):\n return self.reduce(\n func, dim, skipna=skipna, numeric_only=numeric_only, **kwargs\n )\n\n else:\n\n def wrapped_func(self, dim=None, **kwargs): # type: ignore\n return self.reduce(func, dim, numeric_only=numeric_only, **kwargs)\n\n return wrapped_func\n\n _reduce_extra_args_docstring = dedent(\n \"\"\"\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`. By default `{name}` is\n applied over all dimensions.\n \"\"\"\n ).strip()\n\n _cum_extra_args_docstring = dedent(\n \"\"\"\n dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\n \"\"\"\n ).strip()\n\n\nclass AbstractArray(ImplementsArrayReduce):\n \"\"\"Shared base class for DataArray and Variable.\n \"\"\"\n\n __slots__ = ()\n\n def __bool__(self: Any) -> bool:\n return bool(self.values)\n\n def __float__(self: Any) -> float:\n return float(self.values)\n\n def __int__(self: Any) -> int:\n return int(self.values)\n\n def __complex__(self: Any) -> complex:\n return complex(self.values)\n\n def __array__(self: Any, dtype: DTypeLike = None) -> np.ndarray:\n return np.asarray(self.values, dtype=dtype)\n\n def __repr__(self) -> str:\n return formatting.array_repr(self)\n\n def _repr_html_(self):\n if OPTIONS[\"display_style\"] == \"text\":\n return f\"<pre>{escape(repr(self))}</pre>\"\n return formatting_html.array_repr(self)\n\n def _iter(self: Any) -> Iterator[Any]:\n for n in range(len(self)):\n yield self[n]\n\n def __iter__(self: Any) -> Iterator[Any]:\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\")\n return self._iter()\n\n def get_axis_num(\n self, dim: Union[Hashable, Iterable[Hashable]]\n ) -> Union[int, Tuple[int, ...]]:\n \"\"\"Return axis number(s) corresponding to dimension(s) in this array.\n\n Parameters\n ----------\n dim : str or iterable of str\n Dimension name(s) for which to lookup axes.\n\n Returns\n -------\n int or tuple of int\n Axis number or numbers corresponding to the given dimensions.\n \"\"\"\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n return tuple(self._get_axis_num(d) for d in dim)\n else:\n return self._get_axis_num(dim)\n\n def _get_axis_num(self: Any, dim: Hashable) -> int:\n try:\n return self.dims.index(dim)\n except ValueError:\n raise ValueError(f\"{dim!r} not found in array dimensions {self.dims!r}\")\n\n @property\n def sizes(self: Any) -> Mapping[Hashable, int]:\n \"\"\"Ordered mapping from dimension names to lengths.\n\n Immutable.\n\n See also\n --------\n Dataset.sizes\n \"\"\"\n return Frozen(dict(zip(self.dims, self.shape)))\n\n\nclass AttrAccessMixin:\n \"\"\"Mixin class that allows getting keys with attribute access\n \"\"\"\n\n __slots__ = ()\n\n def __init_subclass__(cls):\n \"\"\"Verify that all subclasses explicitly define ``__slots__``. If they don't,\n raise error in the core xarray module and a FutureWarning in third-party\n extensions.\n \"\"\"\n if not hasattr(object.__new__(cls), \"__dict__\"):\n pass\n elif cls.__module__.startswith(\"xarray.\"):\n raise AttributeError(\"%s must explicitly define __slots__\" % cls.__name__)\n else:\n cls.__setattr__ = cls._setattr_dict\n warnings.warn(\n \"xarray subclass %s should explicitly define __slots__\" % cls.__name__,\n FutureWarning,\n stacklevel=2,\n )\n\n @property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return []\n\n @property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-autocompletion\n \"\"\"\n return []\n\n def __getattr__(self, name: str) -> Any:\n if name not in {\"__dict__\", \"__setstate__\"}:\n # this avoids an infinite loop when pickle looks for the\n # __setstate__ attribute before the xarray object is initialized\n for source in self._attr_sources:\n with suppress(KeyError):\n return source[name]\n raise AttributeError(\n \"{!r} object has no attribute {!r}\".format(type(self).__name__, name)\n )\n\n # This complicated two-method design boosts overall performance of simple operations\n # - particularly DataArray methods that perform a _to_temp_dataset() round-trip - by\n # a whopping 8% compared to a single method that checks hasattr(self, \"__dict__\") at\n # runtime before every single assignment. All of this is just temporary until the\n # FutureWarning can be changed into a hard crash.\n def _setattr_dict(self, name: str, value: Any) -> None:\n \"\"\"Deprecated third party subclass (see ``__init_subclass__`` above)\n \"\"\"\n object.__setattr__(self, name, value)\n if name in self.__dict__:\n # Custom, non-slotted attr, or improperly assigned variable?\n warnings.warn(\n \"Setting attribute %r on a %r object. Explicitly define __slots__ \"\n \"to suppress this warning for legitimate custom attributes and \"\n \"raise an error when attempting variables assignments.\"\n % (name, type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n\n def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Objects with ``__slots__`` raise AttributeError if you try setting an\n undeclared attribute. This is desirable, but the error message could use some\n improvement.\n \"\"\"\n try:\n object.__setattr__(self, name, value)\n except AttributeError as e:\n # Don't accidentally shadow custom AttributeErrors, e.g.\n # DataArray.dims.setter\n if str(e) != \"{!r} object has no attribute {!r}\".format(\n type(self).__name__, name\n ):\n raise\n raise AttributeError(\n \"cannot set attribute %r on a %r object. Use __setitem__ style\"\n \"assignment (e.g., `ds['name'] = ...`) instead of assigning variables.\"\n % (name, type(self).__name__)\n ) from e\n\n def __dir__(self) -> List[str]:\n \"\"\"Provide method name lookup and completion. Only provide 'public'\n methods.\n \"\"\"\n extra_attrs = [\n item\n for sublist in self._attr_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return sorted(set(dir(type(self)) + extra_attrs))\n\n def _ipython_key_completions_(self) -> List[str]:\n \"\"\"Provide method for the key-autocompletions in IPython.\n See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion\n For the details.\n \"\"\"\n item_lists = [\n item\n for sublist in self._item_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return list(set(item_lists))\n\n\ndef get_squeeze_dims(\n xarray_obj,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n axis: Union[int, Iterable[int], None] = None,\n) -> List[Hashable]:\n \"\"\"Get a list of dimensions to squeeze out.\n \"\"\"\n if dim is not None and axis is not None:\n raise ValueError(\"cannot use both parameters `axis` and `dim`\")\n if dim is None and axis is None:\n return [d for d, s in xarray_obj.sizes.items() if s == 1]\n\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n dim = list(dim)\n elif dim is not None:\n dim = [dim]\n else:\n assert axis is not None\n if isinstance(axis, int):\n axis = [axis]\n axis = list(axis)\n if any(not isinstance(a, int) for a in axis):\n raise TypeError(\"parameter `axis` must be int or iterable of int.\")\n alldims = list(xarray_obj.sizes.keys())\n dim = [alldims[a] for a in axis]\n\n if any(xarray_obj.sizes[k] > 1 for k in dim):\n raise ValueError(\n \"cannot select a dimension to squeeze out \"\n \"which has length greater than one\"\n )\n return dim\n\n\nclass DataWithCoords(SupportsArithmetic, AttrAccessMixin):\n \"\"\"Shared base class for Dataset and DataArray.\"\"\"\n\n __slots__ = ()\n\n _rolling_exp_cls = RollingExp\n\n def squeeze(\n self,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n drop: bool = False,\n axis: Union[int, Iterable[int], None] = None,\n ):\n \"\"\"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or Hashable or iterable of Hashable, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n drop : bool, optional\n If ``drop=True``, drop squeezed coordinates instead of making them\n scalar.\n axis : None or int or iterable of int, optional\n Like dim, but positional.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dims = get_squeeze_dims(self, dim, axis)\n return self.isel(drop=drop, **{d: 0 for d in dims})\n\n def get_index(self, key: Hashable) -> pd.Index:\n \"\"\"Get an index for a dimension, with fall-back to a default RangeIndex\n \"\"\"\n if key not in self.dims:\n raise KeyError(key)\n\n try:\n return self.indexes[key]\n except KeyError:\n # need to ensure dtype=int64 in case range is empty on Python 2\n return pd.Index(range(self.sizes[key]), name=key, dtype=np.int64)\n\n def _calc_assign_results(\n self: C, kwargs: Mapping[Hashable, Union[T, Callable[[C], T]]]\n ) -> Dict[Hashable, T]:\n return {k: v(self) if callable(v) else v for k, v in kwargs.items()}\n\n def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign new coordinates to this object.\n\n Returns a new object with all the original data in addition to the new\n coordinates.\n\n Parameters\n ----------\n coords : dict, optional\n A dict where the keys are the names of the coordinates\n with the new values to assign. If the values are callable, they are\n computed on this object and assigned to new coordinate variables.\n If the values are not callable, (e.g. a ``DataArray``, scalar, or\n array), they are simply assigned. A new coordinate can also be\n defined and attached to an existing dimension using a tuple with\n the first element the dimension name and the second element the\n values for this new coordinate.\n\n **coords_kwargs : keyword, value pairs, optional\n The keyword arguments form of ``coords``.\n One of ``coords`` or ``coords_kwargs`` must be provided.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new coordinates in addition to the existing\n data.\n\n Examples\n --------\n Convert longitude coordinates from 0-359 to -180-179:\n\n >>> da = xr.DataArray(\n ... np.random.rand(4), coords=[np.array([358, 359, 0, 1])], dims=\"lon\",\n ... )\n >>> da\n <xarray.DataArray (lon: 4)>\n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))\n <xarray.DataArray (lon: 4)>\n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n The function also accepts dictionary arguments:\n\n >>> da.assign_coords({\"lon\": (((da.lon + 180) % 360) - 180)})\n <xarray.DataArray (lon: 4)>\n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n New coordinate can also be attached to an existing dimension:\n\n >>> lon_2 = np.array([300, 289, 0, 1])\n >>> da.assign_coords(lon_2=(\"lon\", lon_2))\n <xarray.DataArray (lon: 4)>\n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n lon_2 (lon) int64 300 289 0 1\n\n Note that the same result can also be obtained with a dict e.g.\n\n >>> _ = da.assign_coords({\"lon_2\": (\"lon\", lon_2)})\n\n Notes\n -----\n Since ``coords_kwargs`` is a dictionary, the order of your arguments\n may not be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign_coords``\n is possible, but you cannot reference other variables created within\n the same ``assign_coords`` call.\n\n See also\n --------\n Dataset.assign\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n data = self.copy(deep=False)\n results = self._calc_assign_results(coords_kwargs)\n data.coords.update(results)\n return data\n\n def assign_attrs(self, *args, **kwargs):\n \"\"\"Assign new attrs to this object.\n\n Returns a new object equivalent to ``self.attrs.update(*args, **kwargs)``.\n\n Parameters\n ----------\n args : positional arguments passed into ``attrs.update``.\n kwargs : keyword arguments passed into ``attrs.update``.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new attrs in addition to the existing data.\n\n See also\n --------\n Dataset.assign\n \"\"\"\n out = self.copy(deep=False)\n out.attrs.update(*args, **kwargs)\n return out\n\n def pipe(\n self,\n func: Union[Callable[..., T], Tuple[Callable[..., T], str]],\n *args,\n **kwargs,\n ) -> T:\n \"\"\"\n Apply ``func(self, *args, **kwargs)``\n\n This method replicates the pandas method of the same name.\n\n Parameters\n ----------\n func : function\n function to apply to this xarray object (Dataset/DataArray).\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the xarray object.\n args : positional arguments passed into ``func``.\n kwargs : a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n xarray or pandas objects, e.g., instead of writing\n\n >>> f(g(h(ds), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (ds.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c))\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (ds.pipe(h).pipe(g, arg1=a).pipe((f, \"arg2\"), arg1=a, arg3=c))\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": (\n ... (\"lat\", \"lon\"),\n ... 20 * np.random.rand(4).reshape(2, 2),\n ... ),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n >>> def adder(data, arg):\n ... return data + arg\n ...\n >>> def div(data, arg):\n ... return data / arg\n ...\n >>> def sub_mult(data, sub_arg, mult_arg):\n ... return (data * mult_arg) - sub_arg\n ...\n >>> x.pipe(adder, 2)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> x.pipe(adder, arg=2)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> (\n ... x.pipe(adder, arg=2)\n ... .pipe(div, arg=2)\n ... .pipe(sub_mult, sub_arg=2, mult_arg=2)\n ... )\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n See Also\n --------\n pandas.DataFrame.pipe\n \"\"\"\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n \"%s is both the pipe target and a keyword \" \"argument\" % target\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose unique values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n\n Examples\n --------\n Calculate daily anomalies for daily data:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 1826, num=1827),\n ... coords=[pd.date_range(\"1/1/2000\", \"31/12/2004\", freq=\"D\")],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 1827)>\n array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n >>> da.groupby(\"time.dayofyear\") - da.groupby(\"time.dayofyear\").mean(\"time\")\n <xarray.DataArray (time: 1827)>\n array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...\n\n See Also\n --------\n core.groupby.DataArrayGroupBy\n core.groupby.DatasetGroupBy\n \"\"\"\n # While we don't generally check the type of every arg, passing\n # multiple dimensions as multiple arguments is common enough, and the\n # consequences hidden enough (strings evaluate as true) to warrant\n # checking here.\n # A future version could make squeeze kwarg only, but would face\n # backward-compat issues.\n if not isinstance(squeeze, bool):\n raise TypeError(\n f\"`squeeze` must be True or False, but {squeeze} was supplied\"\n )\n\n return self._groupby_cls(\n self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims\n )\n\n def groupby_bins(\n self,\n group,\n bins,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n squeeze: bool = True,\n restore_coord_dims: bool = None,\n ):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Rather than using all unique values of `group`, the values are discretized\n first by applying `pandas.cut` [1]_ to `group`.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose binned values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n bins : int or array of scalars\n If bins is an int, it defines the number of equal-width bins in the\n range of x. However, in this case, the range of x is extended by .1%\n on each side to include the min or max values of x. If bins is a\n sequence it defines the bin edges allowing for non-uniform bin\n width. No extension of the range of x is done in this case.\n right : boolean, optional\n Indicates whether the bins include the rightmost edge or not. If\n right == True (the default), then the bins [1,2,3,4] indicate\n (1,2], (2,3], (3,4].\n labels : array or boolean, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, string bin labels are assigned by\n `pandas.cut`.\n precision : int\n The precision at which to store and display the bins labels.\n include_lowest : bool\n Whether the first interval should be left-inclusive or not.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n The name of the group has the added suffix `_bins` in order to\n distinguish it from the original variable.\n\n References\n ----------\n .. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html\n \"\"\"\n return self._groupby_cls(\n self,\n group,\n squeeze=squeeze,\n bins=bins,\n restore_coord_dims=restore_coord_dims,\n cut_kwargs={\n \"right\": right,\n \"labels\": labels,\n \"precision\": precision,\n \"include_lowest\": include_lowest,\n },\n )\n\n def weighted(self, weights):\n \"\"\"\n Weighted operations.\n\n Parameters\n ----------\n weights : DataArray\n An array of weights associated with the values in this Dataset.\n Each value in the data contributes to the reduction operation\n according to its associated weight.\n\n Notes\n -----\n ``weights`` must be a DataArray and cannot contain missing values.\n Missing values can be replaced by ``weights.fillna(0)``.\n \"\"\"\n\n return self._weighted_cls(self, weights)\n\n def rolling(\n self,\n dim: Mapping[Hashable, int] = None,\n min_periods: int = None,\n center: bool = False,\n keep_attrs: bool = None,\n **window_kwargs: int,\n ):\n \"\"\"\n Rolling window object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to create the rolling iterator\n along (e.g. `time`) to its moving window size.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **window_kwargs : optional\n The keyword arguments form of ``dim``.\n One of dim or window_kwargs must be provided.\n\n Returns\n -------\n Rolling object (core.rolling.DataArrayRolling for DataArray,\n core.rolling.DatasetRolling for Dataset.)\n\n Examples\n --------\n Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 11, num=12),\n ... coords=[\n ... pd.date_range(\n ... \"15/12/1999\", periods=12, freq=pd.DateOffset(months=1),\n ... )\n ... ],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 12)>\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.rolling(time=3, center=True).mean()\n <xarray.DataArray (time: 12)>\n array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n\n Remove the NaNs using ``dropna()``:\n\n >>> da.rolling(time=3, center=True).mean().dropna(\"time\")\n <xarray.DataArray (time: 10)>\n array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...\n\n See Also\n --------\n core.rolling.DataArrayRolling\n core.rolling.DatasetRolling\n \"\"\"\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n dim = either_dict_or_kwargs(dim, window_kwargs, \"rolling\")\n return self._rolling_cls(\n self, dim, min_periods=min_periods, center=center, keep_attrs=keep_attrs\n )\n\n def rolling_exp(\n self,\n window: Mapping[Hashable, int] = None,\n window_type: str = \"span\",\n **window_kwargs,\n ):\n \"\"\"\n Exponentially-weighted moving window.\n Similar to EWM in pandas\n\n Requires the optional Numbagg dependency.\n\n Parameters\n ----------\n window : A single mapping from a dimension name to window value,\n optional\n\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'],\n default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n **window_kwargs : optional\n The keyword arguments form of ``window``.\n One of window or window_kwargs must be provided.\n\n See Also\n --------\n core.rolling_exp.RollingExp\n \"\"\"\n window = either_dict_or_kwargs(window, window_kwargs, \"rolling_exp\")\n\n return self._rolling_exp_cls(self, window, window_type)\n\n def coarsen(\n self,\n dim: Mapping[Hashable, int] = None,\n boundary: str = \"exact\",\n side: Union[str, Mapping[Hashable, str]] = \"left\",\n coord_func: str = \"mean\",\n keep_attrs: bool = None,\n **window_kwargs: int,\n ):\n \"\"\"\n Coarsen object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to the window size.\n\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of the window size. If 'trim', the excess entries are\n dropped. If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func : function (name) that is applied to the coordinates,\n or a mapping from coordinate name to function (name).\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n Coarsen object (core.rolling.DataArrayCoarsen for DataArray,\n core.rolling.DatasetCoarsen for Dataset.)\n\n Examples\n --------\n Coarsen the long time series by averaging over every four days.\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 364, num=364),\n ... dims=\"time\",\n ... coords={\"time\": pd.date_range(\"15/12/1999\", periods=364)},\n ... )\n >>> da\n <xarray.DataArray (time: 364)>\n array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,\n 364. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12\n >>>\n >>> da.coarsen(time=3, boundary=\"trim\").mean()\n <xarray.DataArray (time: 121)>\n array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,\n 361.99449 ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10\n >>>\n\n See Also\n --------\n core.rolling.DataArrayCoarsen\n core.rolling.DatasetCoarsen\n \"\"\"\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n dim = either_dict_or_kwargs(dim, window_kwargs, \"coarsen\")\n return self._coarsen_cls(\n self,\n dim,\n boundary=boundary,\n side=side,\n coord_func=coord_func,\n keep_attrs=keep_attrs,\n )\n\n def resample(\n self,\n indexer: Mapping[Hashable, str] = None,\n skipna=None,\n closed: str = None,\n label: str = None,\n base: int = 0,\n keep_attrs: bool = None,\n loffset=None,\n restore_coord_dims: bool = None,\n **indexer_kwargs: str,\n ):\n \"\"\"Returns a Resample object for performing resampling operations.\n\n Handles both downsampling and upsampling. The resampled\n dimension must be a datetime-like coordinate. If any intervals\n contain no values from the original object, they will be given\n the value ``NaN``.\n\n Parameters\n ----------\n indexer : {dim: freq}, optional\n Mapping from the dimension name to resample frequency [1]_. The\n dimension must be datetime-like.\n skipna : bool, optional\n Whether to skip missing values when aggregating in downsampling.\n closed : 'left' or 'right', optional\n Side of each interval to treat as closed.\n label : 'left or 'right', optional\n Side of each interval to use for labeling.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '24H' frequency, base could\n range from 0 through 23.\n loffset : timedelta or str, optional\n Offset used to adjust the resampled time labels. Some pandas date\n offset strings are supported.\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n **indexer_kwargs : {dim: freq}\n The keyword arguments form of ``indexer``.\n One of indexer or indexer_kwargs must be provided.\n\n Returns\n -------\n resampled : same type as caller\n This object resampled.\n\n Examples\n --------\n Downsample monthly time-series data to seasonal data:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 11, num=12),\n ... coords=[\n ... pd.date_range(\n ... \"15/12/1999\", periods=12, freq=pd.DateOffset(months=1),\n ... )\n ... ],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 12)>\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.resample(time=\"QS-DEC\").mean()\n <xarray.DataArray (time: 4)>\n array([ 1., 4., 7., 10.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01\n\n Upsample monthly time-series data to daily data:\n\n >>> da.resample(time=\"1D\").interpolate(\"linear\")\n <xarray.DataArray (time: 337)>\n array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...\n\n Limit scope of upsampling method\n\n >>> da.resample(time=\"1D\").nearest(tolerance=\"1D\")\n <xarray.DataArray (time: 337)>\n array([ 0., 0., nan, ..., nan, 11., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15\n\n See Also\n --------\n pandas.Series.resample\n pandas.DataFrame.resample\n\n References\n ----------\n\n .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n \"\"\"\n # TODO support non-string indexer after removing the old API.\n\n from .dataarray import DataArray\n from .resample import RESAMPLE_DIM\n from ..coding.cftimeindex import CFTimeIndex\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n # note: the second argument (now 'skipna') use to be 'dim'\n if (\n (skipna is not None and not isinstance(skipna, bool))\n or (\"how\" in indexer_kwargs and \"how\" not in self.dims)\n or (\"dim\" in indexer_kwargs and \"dim\" not in self.dims)\n ):\n raise TypeError(\n \"resample() no longer supports the `how` or \"\n \"`dim` arguments. Instead call methods on resample \"\n \"objects, e.g., data.resample(time='1D').mean()\"\n )\n\n indexer = either_dict_or_kwargs(indexer, indexer_kwargs, \"resample\")\n if len(indexer) != 1:\n raise ValueError(\"Resampling only supported along single dimensions.\")\n dim, freq = next(iter(indexer.items()))\n\n dim_name = dim\n dim_coord = self[dim]\n\n if isinstance(self.indexes[dim_name], CFTimeIndex):\n from .resample_cftime import CFTimeGrouper\n\n grouper = CFTimeGrouper(freq, closed, label, base, loffset)\n else:\n grouper = pd.Grouper(\n freq=freq, closed=closed, label=label, base=base, loffset=loffset\n )\n group = DataArray(\n dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM\n )\n resampler = self._resample_cls(\n self,\n group=group,\n dim=dim_name,\n grouper=grouper,\n resample_dim=RESAMPLE_DIM,\n restore_coord_dims=restore_coord_dims,\n )\n\n return resampler\n\n def where(self, cond, other=dtypes.NA, drop: bool = False):\n \"\"\"Filter elements from this object according to a condition.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this object's values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, these locations filled with NA.\n drop : boolean, optional\n If True, coordinate labels that only correspond to False values of\n the condition are dropped from the result. Mutually exclusive with\n ``other``.\n\n Returns\n -------\n Same xarray type as caller, with dtype float64.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=(\"x\", \"y\"))\n >>> a\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 4)\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0., 1., 2., 3., nan],\n [ 5., 6., 7., nan, nan],\n [ 10., 11., nan, nan, nan],\n [ 15., nan, nan, nan, nan],\n [ nan, nan, nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 5, -1)\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, -1],\n [10, 11, 12, -1, -1],\n [15, 16, -1, -1, -1],\n [20, -1, -1, -1, -1]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 4, drop=True)\n <xarray.DataArray (x: 4, y: 4)>\n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [ 10., 11., nan, nan],\n [ 15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n >>> a.where(lambda x: x.x + x.y < 4, drop=True)\n <xarray.DataArray (x: 4, y: 4)>\n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [ 10., 11., nan, nan],\n [ 15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n where : equivalent function\n \"\"\"\n from .alignment import align\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if callable(cond):\n cond = cond(self)\n\n if drop:\n if other is not dtypes.NA:\n raise ValueError(\"cannot set `other` if drop=True\")\n\n if not isinstance(cond, (Dataset, DataArray)):\n raise TypeError(\n \"cond argument is %r but must be a %r or %r\"\n % (cond, Dataset, DataArray)\n )\n\n # align so we can use integer indexing\n self, cond = align(self, cond)\n\n # get cond with the minimal size needed for the Dataset\n if isinstance(cond, Dataset):\n clipcond = cond.to_array().any(\"variable\")\n else:\n clipcond = cond\n\n # clip the data corresponding to coordinate dims that are not used\n nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))\n indexers = {k: np.unique(v) for k, v in nonzeros}\n\n self = self.isel(**indexers)\n cond = cond.isel(**indexers)\n\n return ops.where_method(self, cond, other)\n\n def close(self: Any) -> None:\n \"\"\"Close any files linked to this object\n \"\"\"\n if self._file_obj is not None:\n self._file_obj.close()\n self._file_obj = None\n\n def isin(self, test_elements):\n \"\"\"Tests each value in the array for whether it is in test elements.\n\n Parameters\n ----------\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if an array or array_like.\n See numpy notes for behavior with non-array-like parameters.\n\n Returns\n -------\n isin : same as object, bool\n Has the same shape as this object.\n\n Examples\n --------\n\n >>> array = xr.DataArray([1, 2, 3], dims=\"x\")\n >>> array.isin([1, 3])\n <xarray.DataArray (x: 3)>\n array([ True, False, True])\n Dimensions without coordinates: x\n\n See also\n --------\n numpy.isin\n \"\"\"\n from .computation import apply_ufunc\n from .dataset import Dataset\n from .dataarray import DataArray\n from .variable import Variable\n\n if isinstance(test_elements, Dataset):\n raise TypeError(\n \"isin() argument must be convertible to an array: {}\".format(\n test_elements\n )\n )\n elif isinstance(test_elements, (Variable, DataArray)):\n # need to explicitly pull out data to support dask arrays as the\n # second argument\n test_elements = test_elements.data\n\n return apply_ufunc(\n duck_array_ops.isin,\n self,\n kwargs=dict(test_elements=test_elements),\n dask=\"allowed\",\n )\n\n def __enter__(self: T) -> T:\n return self\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n self.close()\n\n def __getitem__(self, value):\n # implementations of this class should implement this method\n raise NotImplementedError()\n\n\ndef full_like(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Return a new object with the same shape and type as a given object.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object in input\n fill_value : scalar\n Value to fill the new object with before returning it.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object with the same shape and type as other, with the data\n filled with fill_value. Coords will be copied from other.\n If other is based on dask, the new one will be as well, and will be\n split in the same chunks.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 1)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5, dtype=np.double)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, np.nan, dtype=np.double)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[nan, nan, nan],\n [nan, nan, nan]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n ones_like\n\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n from .variable import Variable\n\n if not is_scalar(fill_value):\n raise ValueError(f\"fill_value must be scalar. Received {fill_value} instead.\")\n\n if isinstance(other, Dataset):\n data_vars = {\n k: _full_like_variable(v, fill_value, dtype)\n for k, v in other.data_vars.items()\n }\n return Dataset(data_vars, coords=other.coords, attrs=other.attrs)\n elif isinstance(other, DataArray):\n return DataArray(\n _full_like_variable(other.variable, fill_value, dtype),\n dims=other.dims,\n coords=other.coords,\n attrs=other.attrs,\n name=other.name,\n )\n elif isinstance(other, Variable):\n return _full_like_variable(other, fill_value, dtype)\n else:\n raise TypeError(\"Expected DataArray, Dataset, or Variable\")\n\n\ndef _full_like_variable(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Inner function of full_like, where other must be a variable\n \"\"\"\n from .variable import Variable\n\n if isinstance(other.data, dask_array_type):\n import dask.array\n\n if dtype is None:\n dtype = other.dtype\n data = dask.array.full(\n other.shape, fill_value, dtype=dtype, chunks=other.data.chunks\n )\n else:\n data = np.full_like(other, fill_value, dtype=dtype)\n\n return Variable(dims=other.dims, data=data, attrs=other.attrs)\n\n\ndef zeros_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of zeros with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of zeros with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x, dtype=float)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0., 0., 0.],\n [0., 0., 0.]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n ones_like\n full_like\n\n \"\"\"\n return full_like(other, 0, dtype)\n\n\ndef ones_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of ones with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of ones with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.ones_like(x)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n full_like\n\n \"\"\"\n return full_like(other, 1, dtype)\n\n\ndef is_np_datetime_like(dtype: DTypeLike) -> bool:\n \"\"\"Check if a dtype is a subclass of the numpy datetime types\n \"\"\"\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)\n\n\ndef is_np_timedelta_like(dtype: DTypeLike) -> bool:\n \"\"\"Check whether dtype is of the timedelta64 dtype.\n \"\"\"\n return np.issubdtype(dtype, np.timedelta64)\n\n\ndef _contains_cftime_datetimes(array) -> bool:\n \"\"\"Check if an array contains cftime.datetime objects\n \"\"\"\n try:\n from cftime import datetime as cftime_datetime\n except ImportError:\n return False\n else:\n if array.dtype == np.dtype(\"O\") and array.size > 0:\n sample = array.ravel()[0]\n if isinstance(sample, dask_array_type):\n sample = sample.compute()\n if isinstance(sample, np.ndarray):\n sample = sample.item()\n return isinstance(sample, cftime_datetime)\n else:\n return False\n\n\ndef contains_cftime_datetimes(var) -> bool:\n \"\"\"Check if an xarray.Variable contains cftime.datetime objects\n \"\"\"\n return _contains_cftime_datetimes(var.data)\n\n\ndef _contains_datetime_like_objects(var) -> bool:\n \"\"\"Check if a variable contains datetime like objects (either\n np.datetime64, np.timedelta64, or cftime.datetime)\n \"\"\"\n return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)\n"
] | [
[
"numpy.nonzero",
"numpy.unique",
"numpy.asarray",
"numpy.issubdtype",
"pandas.Grouper",
"numpy.dtype",
"numpy.full_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jungwoohan72/DGN_pytorch | [
"65fe7ab4df661d97725f2a72a1fdb49df1b2ea44"
] | [
"Starcraft/DGN/test.py"
] | [
"import os, sys\nimport numpy as np\nfrom smac.env import StarCraft2Env\nfrom model import DGN\nfrom buffer import ReplayBuffer\nfrom config import *\nfrom utilis import *\nimport torch\nimport torch.optim as optim\n\ntest_env = StarCraft2Env(map_name='25m')\nenv_info = test_env.get_env_info()\nn_ant = env_info[\"n_agents\"]\nn_actions = env_info[\"n_actions\"]\nobs_space = env_info[\"obs_shape\"] + n_ant\n\nmodel = DGN(n_ant,obs_space,hidden_dim,n_actions).cuda()\ntask_path = os.path.dirname(os.path.realpath(__file__))\nload_path = task_path + \"/Weights/25/full_\" + str(482139) + \".pt\"\nmodel.load_state_dict(torch.load(load_path)[\"actor_architecture_state_dict\"])\n\ntest_r, test_win = 0, 0\nfor _ in range(20):\n test_env.reset()\n test_obs = get_obs(test_env.get_obs(),n_ant)\n test_adj = test_env.get_visibility_matrix()[:,0:n_ant]*1 + np.eye(n_ant)\n test_mask = np.array([test_env.get_avail_agent_actions(i) for i in range(n_ant)])\n terminated = False\n while terminated == False:\n test_env.render()\n time.sleep(0.05)\n action=[]\n q = model(torch.Tensor(np.array([test_obs])).cuda(), torch.Tensor(np.array([test_adj])).cuda())[0]\n for i in range(n_ant):\n a = np.argmax(q[i].cpu().detach().numpy() - 9e15*(1 - test_mask[i]))\n action.append(a)\n reward, terminated, winner = test_env.step(action)\n test_r += reward\n if winner.get('battle_won') == True:\n test_win += 1\n test_obs = get_obs(test_env.get_obs(),n_ant)\n test_adj = test_env.get_visibility_matrix()[:,0:n_ant]*1 + np.eye(n_ant)\n test_mask = np.array([test_env.get_avail_agent_actions(i) for i in range(n_ant)])\n\nprint(test_r/20, test_win/20)\n"
] | [
[
"numpy.eye",
"numpy.array",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexjungaalto/nLassoExpFamPDSimulations | [
"c07b441fff6473d7c650e511e210aaebf7fe061b"
] | [
"get_obs_csv.py"
] | [
"import requests\nimport datetime as dt\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport re\nimport argparse\n\ndef get_param_names(url):\n \"\"\" Get parameters metadata \"\"\"\n req = requests.get(url)\n params = {}\n\n if req.status_code == 200:\n xmlstring = req.content\n tree = ET.ElementTree(ET.fromstring(xmlstring))\n for p in tree.iter(tag='{http://inspire.ec.europa.eu/schemas/omop/2.9}ObservableProperty'):\n params[p.get('{http://www.opengis.net/gml/3.2}id')] = p.find('{http://inspire.ec.europa.eu/schemas/omop/2.9}label').text\n return params\n\ndef get_params(tree):\n \"\"\" Get parameters from response xml tree \"\"\"\n\n retParams = []\n for el in tree.iter(tag='{http://www.opengis.net/om/2.0}observedProperty'):\n url = el.get('{http://www.w3.org/1999/xlink}href')\n params = re.findall(r\"(?<=param=).*,.*(?=&)\", url)[0].split(',')\n\n param_names = get_param_names(url)\n for p in params:\n retParams.append('{} ({})'.format(param_names[p], p))\n\n return retParams\n\ndef get_positions(tree):\n \"\"\"\n Function to get times and coordinates from multipointcoverage answer\n \"\"\"\n positions = []\n for el in tree.iter(tag='{http://www.opengis.net/gmlcov/1.0}positions'):\n pos = el.text.split()\n i = 0\n while len(pos) > 0:\n lat = float(pos.pop(0))\n lon = float(pos.pop(0))\n timestamp = int(pos.pop(0))\n positions.append([lat,lon,timestamp])\n return np.array(positions)\n\ndef main():\n \"\"\"\n Get data from db and save it as csv\n \"\"\"\n\n url = 'http://opendata.fmi.fi/wfs'\n daystep = 1\n\n starttime = dt.datetime.strptime(options.starttime, '%Y-%m-%d')\n endtime = dt.datetime.strptime(options.endtime, '%Y-%m-%d')\n\n start = starttime\n end = start + dt.timedelta(days=daystep)\n if end > endtime: end = endtime\n\n while end <= endtime and start < end:\n startStr = start.strftime('%Y-%m-%d')\n endStr = end.strftime('%Y-%m-%d')\n\n # Get data\n payload = {\n 'request': 'getFeature',\n 'storedquery_id': 'fmi::observations::weather::multipointcoverage',\n 'bbox': '19,59,35,75',\n 'starttime': startStr,\n 'endtime': endStr,\n }\n r = requests.get(url, params=payload)\n\n # Construct XML tree\n tree = ET.ElementTree(ET.fromstring(r.content))\n\n # Get geospatial and temporal positions of data elements\n positions = get_positions(tree)\n\n # Extract data from XML tree\n d = []\n for el in tree.iter(tag='{http://www.opengis.net/gml/3.2}doubleOrNilReasonTupleList'):\n for pos in el.text.strip().split(\"\\n\"):\n d.append(pos.strip().split(' '))\n\n # Assign data values to positions\n junk = np.append(positions, np.array(d), axis=1)\n try:\n data = np.append(data, junk, axis=0)\n except NameError:\n data = junk\n\n print('Time interval {} - {} provided {} rows'.format(startStr, endStr, junk.shape[0]))\n\n start = end\n end = start + dt.timedelta(days=daystep)\n if end > endtime: end = endtime\n\n print('Done fetching data. Final dimensions of the result: {}'.format(data.shape))\n\n # Get params from the last XML tree element (they don't change over time)\n params = ['lat', 'lon', 'timestamp'] + get_params(tree)\n\n # Save\n np.savetxt(options.filename, data.astype(np.float32), fmt='%.5f', header=';'.join(params), delimiter=\";\")\n\nif __name__=='__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--filename', type=str, default=None, help='Filename to save the data')\n parser.add_argument('--starttime', type=str, default=None, help='Starttime in format Y-m-d')\n parser.add_argument('--endtime', type=str, default=None, help='Endtime in format Y-m-d')\n\n options = parser.parse_args()\n\n main()\n"
] | [
[
"numpy.append",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
attackgnome/SpaceX | [
"1005f8a24bc44ee3d19cc4500d9674666f07621b"
] | [
"spacex_dash_app.py"
] | [
"# Import required libraries\nimport pandas as pd\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\n\n# Read the airline data into pandas dataframe\nspacex_df = pd.read_csv(\"spacex_launch_dash.csv\")\nmax_payload = spacex_df['Payload Mass (kg)'].max()\nmin_payload = spacex_df['Payload Mass (kg)'].min()\n\n# Create a dash application\napp = dash.Dash(__name__)\n\n# Create an app layout\napp.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',\n style={'textAlign': 'center', 'color': '#503D36',\n 'font-size': 40}),\n # TASK 1: Add a dropdown list to enable Launch Site selection\n # The default select value is for ALL sites\n dcc.Dropdown(id='site-dropdown',\n options=[\n {'label': 'All Sites', 'value': 'All Sites'},\n {'label': 'CCAFS LC-40', 'value': 'CCAFS LC-40'},\n {'label': 'VAFB SLC-4E', 'value': 'VAFB SLC-4E'},\n {'label': 'KSC LC-39A', 'value': 'KSC LC-39A'},\n {'label': 'CCAFS SLC-40', 'value': 'CCAFS SLC-40'},\n ],\n value='All Sites',\n placeholder=\"Select Launch Site\",\n searchable=True),\n html.Br(),\n\n # TASK 2: Add a pie chart to show the total successful launches count for all sites\n # If a specific launch site was selected, show the Success vs. Failed counts for the site\n html.Div(dcc.Graph(id='success-pie-chart')),\n html.Br(),\n\n html.P(\"Payload range (Kg):\"),\n # TASK 3: Add a slider to select payload range\n dcc.RangeSlider(id='payload_slider',\n min=0, max=10000, step=1000,\n marks={0: '0',\n 1000: '1000',\n 2000: '2000',\n 3000: '3000',\n 4000: '42000',\n 5000: '5000',\n 6000: '6000',\n 7000: '7000',\n 8000: '8000',\n 9000: '9000',\n 10000: '10000'\n },\n value=['min_payload', 'max_Payload']),\n\n # TASK 4: Add a scatter chart to show the correlation between payload and launch success\n html.Div(dcc.Graph(id='success-payload-scatter-chart')),\n ])\n\n# TASK 2:\n# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output\n#siteList = sorted(list(set(spacex_df['Launch Site']))) \n\n# Function decorator to specify function input and output\[email protected](Output(component_id='success-pie-chart', component_property='figure'),\n Input(component_id='site-dropdown', component_property='value')\n )\n\ndef get_pie_chart(entered_site):\n filtered_df = spacex_df[spacex_df['Launch Site'] == entered_site ]\n if entered_site == 'All Sites':\n data = spacex_df\n fig1 = px.pie(data, values='class', \n names='Launch Site', \n title='Successful Launches per Site')\n else:\n # return the outcomes piechart for a selected site\n data = filtered_df\n fig1 = px.pie(data, \n names='class', \n title=entered_site)\n \n return fig1\n\n# TASK 4:\n# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output\[email protected](Output(component_id='success-payload-scatter-chart', component_property='figure'),\n [Input(component_id='site-dropdown', component_property='value'), \n Input(component_id=\"payload-slider\", component_property=\"value\")])\n\ndef get_scatter_chart(entered_site, slider):\n filtered_df = spacex_df\n # low, high = slider_range\n # mask = (filtered_df['Payload Mass (kg)'] >= low) & (filtered_df['Payload Mass (kg)'] <= high)\n\n if entered_site == 'All Sites':\n fig2 = px.scatter(filtered_df, #[mask], \n x = 'Payload Mass (kg)',\n y = 'class',\n color = 'Booster Version Category', \n title = 'Correlation Across all sites') \n else:\n # return the outcomes piechart for a selected site\n data = filtered_df # [mask]\n fig2 = px.scatter(data[data[\"Launch Site\"]==entered_site], \n x = 'Payload Mass (kg)',\n y = 'class',\n color = 'Booster Version Category', \n title='Correlation For ' + entered_site)\n return fig2\n\n# Run the app\nif __name__ == '__main__':\n app.run_server()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
simonwey/DecoupleNet | [
"3e9e09d512230cb0d95e9db98c5838ca9ff799da"
] | [
"lib/dataset/crowdpose.py"
] | [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bowen Cheng ([email protected]) and Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nfrom collections import OrderedDict\nimport logging\nimport os\nimport os.path\n\nimport cv2\nimport json_tricks as json\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom crowdposetools.cocoeval import COCOeval\nfrom lib.dataset.JointsDataset import JointsDataset\n\nfrom lib.utils import zipreader\nfrom lib.nms.nms import oks_nms\n\n# -------------------------------------------\ncrowdpose_sigmas = np.array([.79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .25, .25]) / 10.0\n\n# -------------------------------------------\n\nlogger = logging.getLogger(__name__)\n\n\nclass COCODataset(JointsDataset):\n \"\"\"`CrowdPose`_ Dataset.\n\n Args:\n root (string): Root directory where dataset is located to.\n dataset (string): Dataset name(train2017, val2017, test2017).\n data_format(string): Data format for reading('jpg', 'zip')\n transform (callable, optional): A function/transform that takes in an opencv image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n \"\"\"\n\n def __init__(self, cfg, image_dir, annotation_file, dataset_type, image_set, is_train, transform=None):\n super().__init__(cfg, image_dir, annotation_file, image_set, is_train, transform)\n\n\n\n # def __init__(self, root, dataset, data_format, transform=None,\n # target_transform=None):\n from crowdposetools.coco import COCO\n self.nms_thre = cfg.TEST.NMS_THRE\n self.image_thre = cfg.TEST.IMAGE_THRE\n self.soft_nms = cfg.TEST.SOFT_NMS\n self.oks_thre = cfg.TEST.OKS_THRE\n self.in_vis_thre = cfg.TEST.IN_VIS_THRE\n self.bbox_file = cfg.TEST.COCO_BBOX_FILE\n self.use_gt_bbox = cfg.TEST.USE_GT_BBOX\n self.image_width = cfg.MODEL.IMAGE_SIZE[0]\n self.image_height = cfg.MODEL.IMAGE_SIZE[1]\n self.aspect_ratio = self.image_width * 1.0 / self.image_height\n self.pixel_std = 200\n self.scale_thre = cfg.TEST.SCALE_THRE\n\n\n self.dataset_type = dataset_type\n self.coco = COCO(self._get_anno_file_keypoint())\n\n self.ids = list(self.coco.imgs.keys())\n self.transform = transform\n\n cats = [cat['name']\n for cat in self.coco.loadCats(self.coco.getCatIds())]\n self.classes = ['__background__'] + cats\n logger.info('=> classes: {}'.format(self.classes))\n self.num_classes = len(self.classes)\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))\n self._coco_ind_to_class_ind = dict(\n [\n (self._class_to_coco_ind[cls], self._class_to_ind[cls])\n for cls in self.classes[1:]\n ]\n )\n\n # load image file names\n self.image_set_index = self._load_image_set_index()\n self.num_images = len(self.image_set_index)\n logger.info('=> num_images: {}'.format(self.num_images))\n\n self.num_joints = 14\n self.flip_pairs = [[0, 1], [2, 3], [4, 5], [6, 7],\n [8, 9], [10, 11]]\n self.parent_ids = None\n self.upper_body_ids = (0, 1, 2, 3, 4, 5, 12, 13)\n self.lower_body_ids = (6, 7, 8, 9, 10, 11)\n\n self.joints_weight = np.array(\n [\n 1., 1., 1.2, 1.2,\n 1.5, 1.5, 1., 1., \n 1.2, 1.2, 1.5, 1.5,\n 1., 1.\n ],\n dtype=np.float32\n ).reshape((self.num_joints, 1))\n\n self.db = self._get_db()\n\n if is_train and cfg.DATASET.SELECT_DATA:\n self.db = self.select_data(self.db)\n\n logger.info('=> load {} samples'.format(len(self.db)))\n\n return\n\n def _load_image_set_index(self):\n \"\"\" image id: int \"\"\"\n image_ids = self.coco.getImgIds()\n return image_ids\n\n def _get_anno_file_keypoint(self):\n # example: root/json/crowdpose_{train,val,test}.json\n return self.annotation_file\n\n def _get_db(self):\n if self.is_train or self.use_gt_bbox:\n # use ground truth bbox\n gt_db = self._load_coco_keypoint_annotations()\n else:\n # use bbox from detection\n gt_db = self._load_coco_person_detection_results()\n return gt_db\n\n def _load_coco_keypoint_annotations(self):\n \"\"\" ground truth bbox and keypoints \"\"\"\n gt_db = []\n for index in self.image_set_index:\n gt_db.extend(self._load_coco_keypoint_annotation_kernal(index))\n return gt_db\n\n def _get_image_path(self, file_name):\n return os.path.join(self.image_dir, file_name)\n\n def _load_coco_keypoint_annotation_kernal(self, index):\n \"\"\"\n coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']\n iscrowd:\n crowd instances are handled by marking their overlaps with all categories to -1\n and later excluded in training\n bbox:\n [x1, y1, w, h]\n :param index: coco image id\n :return: db entry\n \"\"\"\n im_ann = self.coco.loadImgs(index)[0]\n width = im_ann['width']\n height = im_ann['height']\n\n annIds = self.coco.getAnnIds(imgIds=index, iscrowd=False)\n objs = self.coco.loadAnns(annIds)\n\n # sanitize bboxes\n valid_objs = []\n for obj in objs:\n x, y, w, h = obj['bbox']\n x1 = np.max((0, x))\n y1 = np.max((0, y))\n x2 = np.min((width - 1, x1 + np.max((0, w - 1))))\n y2 = np.min((height - 1, y1 + np.max((0, h - 1))))\n if x2 >= x1 and y2 >= y1:\n obj['clean_bbox'] = [x1, y1, x2-x1, y2-y1]\n valid_objs.append(obj)\n objs = valid_objs\n\n rec = []\n for obj in objs:\n cls = self._coco_ind_to_class_ind[obj['category_id']]\n if cls != 1:\n continue\n\n # ignore objs without keypoints annotation\n if max(obj['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)\n for ipt in range(self.num_joints):\n joints_3d[ipt, 0] = obj['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = obj['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = obj['keypoints'][ipt * 3 + 2]\n if t_vis > 1:\n t_vis = 1\n joints_3d_vis[ipt, 0] = t_vis\n joints_3d_vis[ipt, 1] = t_vis\n joints_3d_vis[ipt, 2] = 0\n\n center, scale = self._box2cs(obj['clean_bbox'][:4])\n image_file_name = im_ann['file_name'].split('/')[-1]\n image_path = os.path.join(self.image_dir, image_file_name)\n\n rec.append({\n 'image': image_path,\n 'center': center,\n 'scale': scale,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n 'filename': '',\n 'imgnum': 0,\n 'annotation_id': obj['id']\n })\n\n return rec\n\n def _box2cs(self, box):\n x, y, w, h = box[:4]\n return self._xywh2cs(x, y, w, h)\n\n def _xywh2cs(self, x, y, w, h):\n center = np.zeros((2), dtype=np.float32)\n center[0] = x + w * 0.5\n center[1] = y + h * 0.5\n\n if w > self.aspect_ratio * h:\n h = w * 1.0 / self.aspect_ratio\n elif w < self.aspect_ratio * h:\n w = h * self.aspect_ratio\n scale = np.array(\n [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],\n dtype=np.float32)\n if center[0] != -1:\n # scale = scale * 1.25\n scale = scale * self.scale_thre\n\n return center, scale\n\n\n def _load_coco_person_detection_results(self):\n all_boxes = None\n with open(self.bbox_file, 'r') as f:\n all_boxes = json.load(f)\n\n if not all_boxes:\n logger.error('=> Load %s fail!' % self.bbox_file)\n return None\n\n logger.info('=> Total boxes: {}'.format(len(all_boxes)))\n\n image_id_to_image_path = {}\n\n for index in self.image_set_index:\n im_ann = self.coco.loadImgs(index)[0]\n img_path_val = os.path.join(self.image_dir, im_ann['file_name'])\n image_id_to_image_path[im_ann['id']] = img_path_val\n\n kpt_db = []\n num_boxes = 0\n for n_img in range(0, len(all_boxes)):\n det_res = all_boxes[n_img]\n if det_res['category_id'] != 1:\n continue\n img_name = image_id_to_image_path[det_res['image_id']]\n\n box = det_res['bbox']\n score = det_res['score']\n\n if score < self.image_thre:\n continue\n\n num_boxes = num_boxes + 1\n\n center, scale = self._box2cs(box)\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.ones(\n (self.num_joints, 3), dtype=np.float)\n kpt_db.append({\n 'image': img_name,\n 'center': center,\n 'scale': scale,\n 'score': score,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n })\n\n logger.info('=> Total boxes after filter low score@{}: {}'.format(\n self.image_thre, num_boxes))\n return kpt_db\n # def __getitem__(self, index):\n # \"\"\"\n # Args:\n # index (int): Index\n\n # Returns:\n # tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.\n # \"\"\"\n # coco = self.coco\n # img_id = self.ids[index]\n # ann_ids = coco.getAnnIds(imgIds=img_id)\n # target = coco.loadAnns(ann_ids)\n\n # file_name = coco.loadImgs(img_id)[0]['file_name']\n\n # if self.data_format == 'zip':\n # img = zipreader.imread(\n # self._get_image_path(file_name),\n # cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION\n # )\n # else:\n # img = cv2.imread(\n # self._get_image_path(file_name),\n # cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION\n # )\n\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # import pdb; pdb.set_trace()\n\n\n # if self.transform is not None:\n # img = self.transform(img)\n\n # if self.target_transform is not None:\n # target = self.target_transform(target)\n\n # return img, target\n\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n def processKeypoints(self, keypoints):\n tmp = keypoints.copy()\n if keypoints[:, 2].max() > 0:\n p = keypoints[keypoints[:, 2] > 0][:, :2].mean(axis=0)\n num_keypoints = keypoints.shape[0]\n for i in range(num_keypoints):\n tmp[i][0:3] = [\n float(keypoints[i][0]),\n float(keypoints[i][1]),\n float(keypoints[i][2])\n ]\n\n return tmp\n\n # def evaluate(self, cfg, preds, scores, output_dir,\n # *args, **kwargs):\n def evaluate(self, cfg, preds, output_dir, all_boxes, img_path, epoch=-1,\n *args, **kwargs):\n '''\n Perform evaluation on COCO keypoint task\n :param cfg: cfg dictionary\n :param preds: prediction\n :param output_dir: output directory\n :param args: \n :param kwargs: \n :return: \n '''\n if all_boxes.shape[1] == 8:\n return self.evaluate_lambda(cfg, preds, output_dir, all_boxes, img_path, epoch, *args, **kwargs)\n\n res_folder = os.path.join(output_dir, 'val_results')\n if not os.path.exists(res_folder):\n try:\n os.makedirs(res_folder)\n except Exception:\n logger.error('Fail to make {}'.format(res_folder))\n \n res_file = os.path.join(\n res_folder, 'keypoints_{}_results_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n image_path_to_image_id = {}\n\n for index in self.image_set_index:\n im_ann = self.coco.loadImgs(index)[0]\n img_path_key = os.path.join(self.image_dir, im_ann['file_name'])\n image_path_to_image_id[img_path_key] = im_ann['id']\n\n # preds is a numpy array: person x (keypoints): N x 14 x 3\n # person x (keypoints)\n _kpts = []\n for idx, kpt in enumerate(preds):\n area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n kpt = self.processKeypoints(kpt)\n\n _kpts.append({\n 'keypoints': kpt[:, 0:3],\n 'center': all_boxes[idx][0:2],\n 'scale': all_boxes[idx][2:4],\n 'area': all_boxes[idx][4],\n 'score': all_boxes[idx][5],\n 'image': image_path_to_image_id[img_path[idx]],\n 'annotation_id': int(all_boxes[idx][6]),\n })\n\n # keypoints: num_joints * 4 (x, y, score, tag)\n # image x person x (keypoints)\n kpts = defaultdict(list)\n for kpt in _kpts:\n kpts[kpt['image']].append(kpt)\n\n # for idx, _kpts in enumerate(preds):\n # img_id = self.ids[idx]\n # file_name = self.coco.loadImgs(img_id)[0]['file_name']\n # for idx_kpt, kpt in enumerate(_kpts):\n # area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n # kpt = self.processKeypoints(kpt)\n \n\n # kpts[int(file_name.split('.')[0])].append(\n # {\n # 'keypoints': kpt[:, 0:3],\n # 'score': scores[idx][idx_kpt],\n # 'tags': kpt[:, 3],\n # 'image': int(file_name.split('.')[0]),\n # 'area': area\n # }\n # )\n\n # rescoring and oks nms\n num_joints = self.num_joints\n in_vis_thre = self.in_vis_thre\n oks_thre = self.oks_thre\n oks_nmsed_kpts = []\n\n # image x person x (keypoints)\n for img in kpts.keys():\n # person x (keypoints)\n img_kpts = kpts[img]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > in_vis_thre:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n n_p['box_score'] = box_score\n n_p['keypoint_score'] = kpt_score\n \n # person x (keypoints)\n # do not use nms, keep all detections\n keep = []\n if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n else:\n oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n\n self._write_coco_keypoint_results(\n oks_nmsed_kpts, res_file\n )\n\n # CrowdPose `test` set has annotation.\n info_str = self._do_python_keypoint_eval(\n res_file, res_folder\n )\n name_value = OrderedDict(info_str)\n return name_value, name_value['AP']\n\n # --------------------------------------------------------------------\n def evaluate_lambda(self, cfg, preds, output_dir, all_boxes, img_path, epoch=-1,\n *args, **kwargs):\n\n res_folder = os.path.join(output_dir, 'val_results')\n if not os.path.exists(res_folder):\n try:\n os.makedirs(res_folder)\n except Exception:\n logger.error('Fail to make {}'.format(res_folder))\n\n res_file = os.path.join(\n res_folder, 'keypoints_{}_results_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n res_file_mode0 = os.path.join(\n res_folder, 'keypoints_{}_results_mode0_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n res_file_mode1 = os.path.join(\n res_folder, 'keypoints_{}_results_mode1_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n res_file_mode2 = os.path.join(\n res_folder, 'keypoints_{}_results_mode2_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n res_file_mode3 = os.path.join(\n res_folder, 'keypoints_{}_results_mode3_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n image_path_to_image_id = {}\n\n for index in self.image_set_index:\n im_ann = self.coco.loadImgs(index)[0]\n img_path_key = os.path.join(self.image_dir, im_ann['file_name'])\n image_path_to_image_id[img_path_key] = im_ann['id']\n\n # person x (keypoints)\n _kpts = []\n for idx, kpt in enumerate(preds):\n area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n kpt = self.processKeypoints(kpt)\n\n _kpts.append({\n 'keypoints': kpt[:, 0:3],\n 'center': all_boxes[idx][0:2],\n 'scale': all_boxes[idx][2:4],\n 'area': all_boxes[idx][4],\n 'score': all_boxes[idx][5],\n 'image': image_path_to_image_id[img_path[idx]],\n 'annotation_id': int(all_boxes[idx][6]),\n 'mode': int(all_boxes[idx][7])\n })\n\n # image x person x (keypoints)\n kpts = defaultdict(list)\n for kpt in _kpts:\n kpts[kpt['image']].append(kpt)\n\n # rescoring and oks nms\n num_joints = self.num_joints\n in_vis_thre = self.in_vis_thre\n oks_thre = self.oks_thre\n oks_nmsed_kpts = []\n oks_nmsed_kpts_mode0 = []\n oks_nmsed_kpts_mode1 = []\n oks_nmsed_kpts_mode2 = []\n oks_nmsed_kpts_mode3 = []\n\n before_len_kps = 0\n for img in kpts:\n img_kpts = kpts[img]\n before_len_kps += len(img_kpts) \n\n for img in kpts.keys():\n img_kpts = kpts[img]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > in_vis_thre:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n n_p['box_score'] = box_score\n n_p['keypoint_score'] = kpt_score\n\n img_kpts_mode0 = [img_kpts[i] for i in range(len(img_kpts)) if img_kpts[i]['mode'] == 0]\n img_kpts_mode1 = [img_kpts[i] for i in range(len(img_kpts)) if img_kpts[i]['mode'] == 1]\n img_kpts_mode2 = [img_kpts[i] for i in range(len(img_kpts)) if img_kpts[i]['mode'] == 2]\n img_kpts_mode3 = [img_kpts[i] for i in range(len(img_kpts)) if img_kpts[i]['mode'] == 3]\n\n # # # ------------------------------\n # keep_mode0 = oks_nms(img_kpts_mode0, oks_thre)\n # keep_mode1 = oks_nms(img_kpts_mode1, oks_thre)\n # keep = oks_nms(img_kpts, oks_thre)\n \n # oks_img_kpts_mode0 = [img_kpts_mode0[_keep] for _keep in keep_mode0]\n # oks_img_kpts_mode1 = [img_kpts_mode1[_keep] for _keep in keep_mode1]\n\n # oks_img_kpts_merged = oks_img_kpts_mode0 + oks_img_kpts_mode1\n # # oks_img_kpts_merged = oks_merge(kpts_db_mode0=img_kpts_mode0, kpts_db_mode1=img_kpts_mode1, min_oks_thres=0.95)\n\n # # ------------------------------\n # img_kpts_merged = img_kpts_mode0 + img_kpts_mode1\n # keep = oks_nms(img_kpts_merged, oks_thre)\n # keep = []\n # oks_img_kpts_merged = [img_kpts_merged[_keep] for _keep in keep]\n\n # keep_mode0 = oks_nms(img_kpts_mode0, oks_thre)\n # keep_mode0 = []\n # oks_img_kpts_mode0 = [img_kpts_mode0[_keep] for _keep in keep_mode0]\n\n # keep_mode1 = oks_nms(img_kpts_mode1, oks_thre)\n # keep_mode1 = []\n # oks_img_kpts_mode1 = [img_kpts_mode1[_keep] for _keep in keep_mode1]\n #\n # keep_mode2 = []\n # oks_img_kpts_mode2 = [img_kpts_mode2[_keep] for _keep in keep_mode2]\n #\n # keep_mode3 = []\n # oks_img_kpts_mode3 = [img_kpts_mode3[_keep] for _keep in keep_mode3]\n\n\n # ------------------------------\n # if len(keep_mode0) == 0:\n oks_nmsed_kpts_mode0.append(img_kpts_mode0)\n # else:\n # oks_nmsed_kpts_mode0.append(oks_img_kpts_mode0)\n\n # if len(keep_mode1) == 0:\n oks_nmsed_kpts_mode1.append(img_kpts_mode1)\n # else:\n # oks_nmsed_kpts_mode1.append(oks_img_kpts_mode1)\n #\n # if len(keep_mode2) == 0:\n oks_nmsed_kpts_mode2.append(img_kpts_mode2)\n # else:\n # oks_nmsed_kpts_mode2.append(oks_img_kpts_mode2)\n #\n # if len(keep_mode3) == 0:\n oks_nmsed_kpts_mode3.append(img_kpts_mode3)\n # else:\n # oks_nmsed_kpts_mode3.append(oks_img_kpts_mode3)\n #\n # # ------------------------------\n # if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n # else:\n # oks_nmsed_kpts.append(oks_img_kpts_merged)\n\n oks_len_kps = sum([len(kps) for kps in oks_nmsed_kpts])\n oks_len_kps_mode0 = sum([len(kps) for kps in oks_nmsed_kpts_mode0])\n oks_len_kps_mode1 = sum([len(kps) for kps in oks_nmsed_kpts_mode1])\n oks_len_kps_mode2 = sum([len(kps) for kps in oks_nmsed_kpts_mode2])\n oks_len_kps_mode3 = sum([len(kps) for kps in oks_nmsed_kpts_mode3])\n\n print('before #kps:{}, after #kps:{}'.format(before_len_kps, oks_len_kps))\n\n ##------------------------------\n self._write_coco_keypoint_results(oks_nmsed_kpts_mode0, res_file_mode0)\n self._write_coco_keypoint_results(oks_nmsed_kpts_mode1, res_file_mode1)\n self._write_coco_keypoint_results(oks_nmsed_kpts_mode2, res_file_mode2)\n self._write_coco_keypoint_results(oks_nmsed_kpts_mode3, res_file_mode3)\n self._write_coco_keypoint_results(oks_nmsed_kpts, res_file) ## merged\n\n ##------------------------------\n # if 'test' not in self.image_set:\n info_str = self._do_python_keypoint_eval(res_file, res_folder)\n name_value = OrderedDict(info_str)\n\n info_str_mode0 = self._do_python_keypoint_eval(res_file_mode0, res_folder)\n name_value_mode0 = OrderedDict(info_str_mode0)\n\n info_str_mode1 = self._do_python_keypoint_eval(res_file_mode1, res_folder)\n name_value_mode1 = OrderedDict(info_str_mode1)\n\n if oks_len_kps_mode2 == 0:\n name_value_mode2 = {'Null': 0}\n else:\n info_str_mode2 = self._do_python_keypoint_eval(res_file_mode2, res_folder)\n name_value_mode2 = OrderedDict(info_str_mode2)\n\n if oks_len_kps_mode3 == 0:\n name_value_mode3 = {'Null': 0}\n else:\n info_str_mode3 = self._do_python_keypoint_eval(res_file_mode3, res_folder)\n name_value_mode3 = OrderedDict(info_str_mode3)\n\n return name_value, name_value_mode0, name_value_mode1, name_value_mode2, name_value_mode3, name_value['AP']\n # else:\n # return {'Null': 0}, {'Null': 0}, {'Null': 0}, {'Null': 0}, {'Null': 0}, 0\n\n # --------------------------------------------------------------------\n\n def _write_coco_keypoint_results(self, keypoints, res_file):\n data_pack = [\n {\n 'cat_id': self._class_to_coco_ind[cls],\n 'cls_ind': cls_ind,\n 'cls': cls,\n 'ann_type': 'keypoints',\n 'keypoints': keypoints\n }\n for cls_ind, cls in enumerate(self.classes) if not cls == '__background__'\n ]\n\n results = self._coco_keypoint_results_one_category_kernel(data_pack[0])\n logger.info('=> Writing results json to %s' % res_file)\n with open(res_file, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n try:\n json.load(open(res_file))\n except Exception:\n content = []\n with open(res_file, 'r') as f:\n for line in f:\n content.append(line)\n content[-1] = ']'\n with open(res_file, 'w') as f:\n for c in content:\n f.write(c)\n\n def _coco_keypoint_results_one_category_kernel(self, data_pack):\n cat_id = data_pack['cat_id']\n keypoints = data_pack['keypoints']\n cat_results = []\n num_joints = 14\n\n for img_kpts in keypoints:\n if len(img_kpts) == 0:\n continue\n\n _key_points = np.array(\n [img_kpts[k]['keypoints'] for k in range(len(img_kpts))]\n )\n key_points = np.zeros(\n (_key_points.shape[0], num_joints * 3),\n dtype=np.float\n )\n\n for ipt in range(num_joints):\n key_points[:, ipt * 3 + 0] = _key_points[:, ipt, 0]\n key_points[:, ipt * 3 + 1] = _key_points[:, ipt, 1]\n key_points[:, ipt * 3 + 2] = _key_points[:, ipt, 2] # keypoints score.\n\n for k in range(len(img_kpts)):\n kpt = key_points[k].reshape((num_joints, 3))\n left_top = np.amin(kpt, axis=0)\n right_bottom = np.amax(kpt, axis=0)\n\n w = right_bottom[0] - left_top[0]\n h = right_bottom[1] - left_top[1]\n\n cat_results.append({\n 'image_id': img_kpts[k]['image'],\n 'category_id': cat_id,\n 'keypoints': list(key_points[k]),\n 'score': img_kpts[k]['score'],\n 'bbox': list([left_top[0], left_top[1], w, h])\n })\n\n return cat_results\n\n def _do_python_keypoint_eval(self, res_file, res_folder):\n coco_dt = self.coco.loadRes(res_file)\n coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n stats_names = ['AP', 'Ap .5', 'AP .75', 'AR', 'AR .5', 'AR .75', 'AP (easy)', 'AP (medium)', 'AP (hard)']\n stats_index = [0, 1, 2, 5, 6, 7, 8, 9, 10]\n\n info_str = []\n for ind, name in enumerate(stats_names):\n info_str.append((name, coco_eval.stats[stats_index[ind]]))\n # info_str.append(coco_eval.stats[ind])\n\n return info_str"
] | [
[
"numpy.amax",
"numpy.min",
"numpy.amin",
"numpy.ones",
"numpy.max",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bioShaun/omsCabinet | [
"741179a06cbd5200662cd03bc2e0115f4ad06917"
] | [
"bioinformatics/analysis/snp/replace_score.py"
] | [
"import fire\nimport gzip\nimport pandas as pd\nfrom pathlib import PurePath\n\n\ndef extract_vcf_header(vcf):\n if vcf.suffix == '.gz':\n vcf_inf = gzip.open(vcf)\n else:\n vcf_inf = open(vcf)\n prefix = ''\n for eachline in vcf_inf:\n if vcf.suffix == '.gz':\n eachline = eachline.decode()\n prefix += eachline\n if eachline[:6] == '#CHROM':\n return eachline.strip().split('\\t'), prefix\n\n\ndef replace_score(vcf, map_file, outfile):\n vcf = PurePath(vcf)\n vcf_header, vcf_prefix = extract_vcf_header(vcf)\n vcf_df = pd.read_csv(vcf, sep='\\t', comment='#',\n header=None, names=vcf_header)\n map_df = pd.read_csv(\n map_file, sep='\\t', header=None,\n names=['chrom', 'start', 'ref', 'alt', 'sample_id', ],\n index_col=0)\n vcf_df.loc[:, 'QUAL'] = [\n map_df.loc[each].sample_id if each in map_df.index\n else 'NA'\n for each in vcf_df.loc[:, 'ID']\n ]\n with gzip.open(outfile, 'wt') as out_inf:\n out_inf.write(vcf_prefix)\n\n vcf_df.to_csv(outfile, sep='\\t', header=False,\n compression='gzip', mode='a', index=False)\n\n\nif __name__ == '__main__':\n fire.Fire(replace_score)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bvaisvil/lefse | [
"6e325486b9d72a3d489b472691d171377d85b589"
] | [
"format_input.py"
] | [
"#!/usr/bin/env python3\n\nimport sys,os,argparse,pickle,re,numpy\n\nimport functools\n\n\n#***************************************************************************************************************\n#* Log of change *\n#* January 16, 2014 - George Weingart - [email protected] *\n#* *\n#* biom Support *\n#* Modified the program to enable it to accept biom files as input *\n#* *\n#* Added two optional input parameters: *\n#* 1. biom_c is the name of the biom metadata to be used as class *\n#* 2. biom_s is the name of the biom metadata to be used as subclass *\n#* class and subclass are used in the same context as the original *\n#* parameters class and subclass *\n#* These parameters are totally optional, the default is the program *\n#* chooses as class the first metadata received from the conversion *\n#* of the biom file into a sequential (pcl) file as generated by *\n#* breadcrumbs, and similarly, the second metadata is selected as *\n#* subclass. *\n#* The syntax or logic for the original non-biom case was NOT changed. *\n#* *\n#* <******************* IMPORTANT NOTE *************************> *\n#* The biom case requires breadcrumbs and therefore there is a *\n#* a conditional import of the breadcrumbs modules *\n#* If the User uses a biom input and breadcrumbs is not detected, *\n#* the run is abnormally ended *\n#* breadcrumbs itself needs a biom environment, so if the immport *\n#* of biom in breadcrumbs fails, the run is also abnormally\n#* ended (Only if the input file was biom) *\n#* *\n#* USAGE EXAMPLES *\n#* -------------- *\n#* Case #1: Using a sequential file as input (Old version - did not change *\n#* ./format_input.py hmp_aerobiosis_small.txt hmp_aerobiosis_small.in -c 1 -s 2 -u 3 -o 1000000 *\n#* Case #2: Using a biom file as input *\n#* ./format_input.py hmp_aerobiosis_small.biom hmp_aerobiosis_small.in -o 1000000 *\n#* Case #3: Using a biom file as input and override the class and subclass *\n#* ./format_input.py lefse.biom hmp_aerobiosis_small.in -biom_c oxygen_availability -biom_s body_site -o 1000000\n#* *\n#***************************************************************************************************************\n\ndef read_input_file(inp_file, CommonArea):\n\n if inp_file.endswith('.biom'): #* If the file format is biom:\n CommonArea = biom_processing(inp_file) #* Process in biom format\n return CommonArea #* And return the CommonArea\n\n with open(inp_file) as inp:\n CommonArea['ReturnedData'] = [[v.strip() for v in line.strip().split(\"\\t\")] for line in inp.readlines()]\n return CommonArea\n\ndef transpose(data):\n return list(zip(*data))\n\ndef read_params(args):\n parser = argparse.ArgumentParser(description='LEfSe formatting modules')\n parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help=\"the input file, feature hierarchical level can be specified with | or . and those symbols must not be present for other reasons in the input file.\")\n parser.add_argument('output_file', metavar='OUTPUT_FILE', type=str,\n help=\"the output file containing the data for LEfSe\")\n parser.add_argument('--output_table', type=str, required=False, default=\"\",\n help=\"the formatted table in txt format\")\n parser.add_argument('-f',dest=\"feats_dir\", choices=[\"c\",\"r\"], type=str, default=\"r\",\n help=\"set whether the features are on rows (default) or on columns\")\n parser.add_argument('-c',dest=\"class\", metavar=\"[1..n_feats]\", type=int, default=1,\n help=\"set which feature use as class (default 1)\")\n parser.add_argument('-s',dest=\"subclass\", metavar=\"[1..n_feats]\", type=int, default=None,\n help=\"set which feature use as subclass (default -1 meaning no subclass)\")\n parser.add_argument('-o',dest=\"norm_v\", metavar=\"float\", type=float, default=-1.0,\n help=\"set the normalization value (default -1.0 meaning no normalization)\")\n parser.add_argument('-u',dest=\"subject\", metavar=\"[1..n_feats]\", type=int, default=None,\n help=\"set which feature use as subject (default -1 meaning no subject)\")\n parser.add_argument('-m',dest=\"missing_p\", choices=[\"f\",\"s\"], type=str, default=\"d\",\n help=\"set the policy to adopt with missin values: f removes the features with missing values, s removes samples with missing values (default f)\")\n parser.add_argument('-n',dest=\"subcl_min_card\", metavar=\"int\", type=int, default=10,\n help=\"set the minimum cardinality of each subclass (subclasses with low cardinalities will be grouped together, if the cardinality is still low, no pairwise comparison will be performed with them)\")\n\n parser.add_argument('-biom_c',dest=\"biom_class\", type=str,\n help=\"For biom input files: Set which feature use as class \")\n parser.add_argument('-biom_s',dest=\"biom_subclass\", type=str,\n help=\"For biom input files: set which feature use as subclass \")\n\n args = parser.parse_args()\n\n return vars(args)\n\ndef remove_missing(data,roc):\n if roc == \"c\": data = transpose(data)\n max_len = max([len(r) for r in data])\n to_rem = []\n for i,r in enumerate(data):\n if len([v for v in r if not( v == \"\" or v.isspace())]) < max_len: to_rem.append(i)\n if len(to_rem):\n for i in to_rem.reverse():\n data.pop(i)\n if roc == \"c\": return transpose(data)\n return data\n\n\ndef sort_by_cl(data,n,c,s,u):\n def sort_lines1(a,b):\n return int(a[c] > b[c])*2-1\n\n def sort_lines2u(a,b):\n if a[c] != b[c]:\n return int(a[c] > b[c])*2-1\n\n return int(a[u] > b[u])*2-1\n\n def sort_lines2s(a,b):\n if a[c] != b[c]:\n return int(a[c] > b[c])*2-1\n\n return int(a[s] > b[s])*2-1\n\n def sort_lines3(a,b):\n if a[c] != b[c]:\n return int(a[c] > b[c])*2-1\n\n if a[s] != b[s]:\n return int(a[s] > b[s])*2-1\n\n return int(a[u] > b[u])*2-1\n\n if n == 3:\n data.sort(key = functools.cmp_to_key(lambda a,b: sort_lines3(a,b)))\n\n if n == 2:\n if s is None:\n data.sort(key = functools.cmp_to_key(lambda a,b: sort_lines2u(a,b)))\n else:\n data.sort(key = functools.cmp_to_key(lambda a,b: sort_lines2s(a,b)))\n\n if n == 1:\n data.sort(key = functools.cmp_to_key(lambda a,b: sort_lines1(a,b)))\n\n return data\n\ndef group_small_subclasses(cls,min_subcl):\n last = \"\"\n n = 0\n repl = []\n dd = [list(cls['class']),list(cls['subclass'])]\n for d in dd:\n if d[1] != last:\n if n < min_subcl and last != \"\":\n repl.append(d[1])\n last = d[1]\n n = 1\n for i,d in enumerate(dd):\n if d[1] in repl: dd[i][1] = \"other\"\n dd[i][1] = str(dd[i][0])+\"_\"+str(dd[i][1])\n cls['class'] = dd[0]\n cls['subclass'] = dd[1]\n return cls\n\ndef get_class_slices(data):\n previous_class = data[0][0]\n previous_subclass = data[0][1]\n subclass_slices = []\n class_slices = []\n last_cl = 0\n last_subcl = 0\n class_hierarchy = []\n subcls = []\n for i,d in enumerate(data):\n if d[1] != previous_subclass:\n subclass_slices.append((previous_subclass,(last_subcl,i)))\n last_subcl = i\n subcls.append(previous_subclass)\n if d[0] != previous_class:\n class_slices.append((previous_class,(last_cl,i)))\n class_hierarchy.append((previous_class,subcls))\n subcls = []\n last_cl = i\n previous_subclass = d[1]\n previous_class = d[0]\n subclass_slices.append((previous_subclass,(last_subcl,i+1)))\n subcls.append(previous_subclass)\n class_slices.append((previous_class,(last_cl,i+1)))\n class_hierarchy.append((previous_class,subcls))\n return dict(class_slices), dict(subclass_slices), dict(class_hierarchy)\n\ndef numerical_values(feats,norm):\n mm = []\n for k,v in feats.items():\n feats[k] = [float(val) for val in v]\n if norm < 0.0: return feats\n tr = list(zip(*(list(feats.values()))))\n mul = []\n fk = list(feats.keys())\n hie = True if sum([k.count(\".\") for k in fk]) > len(fk) else False\n for i in range(len(list(feats.values())[0])):\n if hie: mul.append(sum([t for j,t in enumerate(tr[i]) if fk[j].count(\".\") < 1 ]))\n else: mul.append(sum(tr[i]))\n if hie and sum(mul) == 0:\n mul = []\n for i in range(len(list(feats.values())[0])):\n mul.append(sum(tr[i])) \n for i,m in enumerate(mul):\n if m == 0: mul[i] = 0.0\n else: mul[i] = float(norm) / m\n for k,v in feats.items():\n feats[k] = [val*mul[i] for i,val in enumerate(v)]\n if numpy.mean(feats[k]) and (numpy.std(feats[k])/numpy.mean(feats[k])) < 1e-10:\n feats[k] = [ float(round(kv*1e6)/1e6) for kv in feats[k]]\n return feats\n\ndef add_missing_levels2(ff):\n\n if sum( [f.count(\".\") for f in ff] ) < 1: return ff\n\n dn = {}\n\n added = True\n while added:\n added = False\n for f in ff:\n lev = f.count(\".\")\n if lev == 0: continue\n if lev not in dn: dn[lev] = [f]\n else: dn[lev].append(f)\n for fn in sorted(dn,reverse=True):\n for f in dn[fn]:\n fc = \".\".join(f.split('.')[:-1])\n if fc not in ff:\n ab_all = [ff[fg] for fg in ff if (fg.count(\".\") == 0 and fg == fc) or (fg.count(\".\") > 0 and fc == \".\".join(fg.split('.')[:-1]))]\n ab =[]\n for l in [f for f in zip(*ab_all)]:\n ab.append(sum([float(ll) for ll in l]))\n ff[fc] = ab\n added = True\n if added:\n break\n\n return ff\n\n\ndef add_missing_levels(ff):\n if sum( [f.count(\".\") for f in ff] ) < 1: return ff\n\n clades2leaves = {}\n for f in ff:\n fs = f.split(\".\")\n if len(fs) < 2:\n continue\n for l in range(len(fs)):\n n = \".\".join( fs[:l] )\n if n in clades2leaves:\n clades2leaves[n].append( f )\n else:\n clades2leaves[n] = [f]\n for k,v in clades2leaves.items():\n if k and k not in ff:\n ff[k] = [sum(a) for a in zip(*[[float(fn) for fn in ff[vv]] for vv in v])]\n return ff\n\n\ndef modify_feature_names(fn):\n ret = fn\n\n for v in [' ',r'\\$',r'\\@',r'#',r'%',r'\\^',r'\\&',r'\\*',r'\\\"',r'\\'']:\n ret = [re.sub(v,\"\",f) for f in ret]\n\n for v in [\"/\",r'\\(',r'\\)',r'-',r'\\+',r'=',r'{',r'}',r'\\[',r'\\]',\n r',',r'\\.',r';',r':',r'\\?',r'\\<',r'\\>',r'\\.',r'\\,']:\n ret = [re.sub(v,\"_\",f) for f in ret]\n\n for v in [\"\\|\"]:\n ret = [re.sub(v,\".\",f) for f in ret]\n\n ret2 = []\n for r in ret:\n if r[0] in ['0','1','2','3','4','5','6','7','8','9','_']:\n ret2.append(\"f_\"+r)\n else:\n ret2.append(r)\n\n return ret2\n\n\ndef rename_same_subcl(cl,subcl):\n toc = []\n for sc in set(subcl):\n if len(set([cl[i] for i in range(len(subcl)) if sc == subcl[i]])) > 1:\n toc.append(sc)\n new_subcl = []\n for i,sc in enumerate(subcl):\n if sc in toc: new_subcl.append(cl[i]+\"_\"+sc)\n else: new_subcl.append(sc)\n return new_subcl\n\n\n#*************************************************************************************\n#* Modifications by George Weingart, Jan 15, 2014 *\n#* If the input file is biom: *\n#* a. Load an AbundanceTable (Using breadcrumbs) *\n#* b. Create a sequential file from the AbundanceTable (de-facto - pcl) *\n#* c. Use that file as input to the rest of the program *\n#* d. Calculate the c,s,and u parameters, either from the values the User entered *\n#* from the meta data values in the biom file or set up defaults *\n#* <<<------------- I M P O R T A N T N O T E ------------------->> *\n#* breadcrumbs src directory must be included in the PYTHONPATH *\n#* <<<------------- I M P O R T A N T N O T E ------------------->> *\n#*************************************************************************************\ndef biom_processing(inp_file):\n CommonArea = dict() #* Set up a dictionary to return\n CommonArea['abndData'] = AbundanceTable.funcMakeFromFile(inp_file, #* Create AbundanceTable from input biom file\n cDelimiter = None,\n sMetadataID = None,\n sLastMetadataRow = None,\n sLastMetadata = None,\n strFormat = None)\n\n #****************************************************************\n #* Building the data element here *\n #****************************************************************\n ResolvedData = list() #This is the Resolved data that will be returned\n IDMetadataName = CommonArea['abndData'].funcGetIDMetadataName() #* ID Metadataname\n IDMetadata = [CommonArea['abndData'].funcGetIDMetadataName()] #* The first Row\n IDMetadata.extend([IDMetadataEntry for IDMetadataEntry in CommonArea['abndData'].funcGetMetadataCopy()[IDMetadataName]]) #* Loop on all the metadata values\n\n ResolvedData.append(IDMetadata) #Add the IDMetadata with all its values to the resolved area\n for key, value in CommonArea['abndData'].funcGetMetadataCopy().items():\n if key != IDMetadataName:\n MetadataEntry = [key] + value #* Set it up\n ResolvedData.append(MetadataEntry)\n for AbundanceDataEntry in CommonArea['abndData'].funcGetAbundanceCopy(): #* The Abundance Data\n lstAbundanceDataEntry = list(AbundanceDataEntry) #Convert tuple to list\n ResolvedData.append(lstAbundanceDataEntry) #Append the list to the metadata list\n CommonArea['ReturnedData'] = ResolvedData #Post the results\n return CommonArea\n\n\n#*******************************************************************************\n#* Check the params and override in the case of biom *\n#*******************************************************************************\ndef check_params_for_biom_case(params, CommonArea):\n CommonArea['MetadataNames'] = list() #Metadata names\n params['original_class'] = params['class'] #Save the original class\n params['original_subclass'] = params['subclass'] #Save the original subclass\n params['original_subject'] = params['subject'] #Save the original subclass\n\n\n TotalMetadataEntriesAndIDInBiomFile = len(CommonArea['abndData'].funcGetMetadataCopy()) # The number of metadata entries\n for i in range(0,TotalMetadataEntriesAndIDInBiomFile): #* Populate the meta data names table\n CommonArea['MetadataNames'].append(CommonArea['ReturnedData'][i][0]) #Add the metadata name\n\n\n #****************************************************\n #* Setting the params here *\n #****************************************************\n\n if TotalMetadataEntriesAndIDInBiomFile > 0: #If there is at least one entry - has to be the subject\n params['subject'] = 1\n if TotalMetadataEntriesAndIDInBiomFile == 2: #If there are 2 - The first is the subject and the second has to be the metadata, and that is the class\n params['class'] = 2\n if TotalMetadataEntriesAndIDInBiomFile == 3: #If there are 3: Set up default that the second entry is the class and the third is the subclass\n params['class'] = 2\n params['subclass'] = 3\n FlagError = False #Set up error flag\n\n if not params['biom_class'] is None and not params['biom_subclass'] is None: #Check if the User passed a valid class and subclass\n if params['biom_class'] in CommonArea['MetadataNames']:\n params['class'] = CommonArea['MetadataNames'].index(params['biom_class'])+1 #* Set up the index for that metadata\n else:\n FlagError = True\n if params['biom_subclass'] in CommonArea['MetadataNames']:\n params['subclass'] = CommonArea['MetadataNames'].index(params['biom_subclass'])+1 #* Set up the index for that metadata\n else:\n FlagError = True\n if FlagError == True: #* If the User passed an invalid class\n print(\"**Invalid biom class or subclass passed - Using defaults: First metadata=class, Second Metadata=subclass\\n\")\n params['class'] = 2\n params['subclass'] = 3\n return params\n\n\n\nif __name__ == '__main__':\n CommonArea = dict() #Build a Common Area to pass variables in the biom case\n params = read_params(sys.argv)\n\n #*************************************************************\n #* Conditionally import breadcrumbs if file is a biom file *\n #* If it is and no breadcrumbs found - abnormally exit *\n #*************************************************************\n if params['input_file'].endswith('.biom'):\n try:\n from lefsebiom.ConstantsBreadCrumbs import *\n from lefsebiom.AbundanceTable import *\n except ImportError:\n sys.stderr.write(\"************************************************************************************************************ \\n\")\n sys.stderr.write(\"* Error: Breadcrumbs libraries not detected - required to process biom files - run abnormally terminated * \\n\")\n sys.stderr.write(\"************************************************************************************************************ \\n\")\n exit(1)\n\n\n if type(params['subclass']) is int and int(params['subclass']) < 1:\n params['subclass'] = None\n if type(params['subject']) is int and int(params['subject']) < 1:\n params['subject'] = None\n\n\n CommonArea = read_input_file(sys.argv[1], CommonArea) #Pass The CommonArea to the Read\n data = CommonArea['ReturnedData'] #Select the data\n\n if sys.argv[1].endswith('biom'): #* Check if biom:\n params = check_params_for_biom_case(params, CommonArea) #Check the params for the biom case\n\n if params['feats_dir'] == \"c\":\n data = transpose(data)\n\n ncl = 1\n if not params['subclass'] is None: ncl += 1\n if not params['subject'] is None: ncl += 1\n\n first_line = list(zip(*data))[0]\n\n first_line = modify_feature_names(list(first_line))\n\n data = list(zip( first_line,\n *sort_by_cl(list(zip(*data))[1:],\n ncl,\n params['class']-1,\n params['subclass']-1 if not params['subclass'] is None else None,\n params['subject']-1 if not params['subject'] is None else None)))\n# data.insert(0,first_line)\n# data = remove_missing(data,params['missing_p'])\n cls = {}\n\n cls_i = [('class',params['class']-1)]\n if params['subclass'] is not None and params['subclass'] > 0:\n cls_i.append(('subclass',params['subclass']-1))\n\n if params['subject'] is not None and params['subject'] > 0:\n cls_i.append(('subject',params['subject']-1))\n\n cls_i.sort(key = functools.cmp_to_key(lambda x,y: -((x[1] > y[1]) - (x[1] < y[1]))))\n\n for v in cls_i: \n cls[v[0]] = data.pop(v[1])[1:]\n \n if params['subclass'] is None:\n cls['subclass'] = [str(cl)+\"_subcl\" for cl in cls['class']]\n\n cls['subclass'] = rename_same_subcl(cls['class'],cls['subclass'])\n# if 'subclass' in cls.keys(): cls = group_small_subclasses(cls,params['subcl_min_card'])\n class_sl,subclass_sl,class_hierarchy = get_class_slices(list(zip(cls['class'], cls['subclass'], cls['subject'])))\n\n feats = dict([(d[0],d[1:]) for d in data])\n\n feats = add_missing_levels(feats)\n\n feats = numerical_values(feats,params['norm_v'])\n out = {}\n out['feats'] = feats\n out['norm'] = params['norm_v']\n out['cls'] = cls\n out['class_sl'] = class_sl\n out['subclass_sl'] = subclass_sl\n out['class_hierarchy'] = class_hierarchy\n\n if params['output_table']:\n with open( params['output_table'], \"w\") as outf:\n if 'class' in cls: outf.write( \"\\t\".join(list([\"class\"])+list(cls['class'])) + \"\\n\" )\n if 'subclass' in cls: outf.write( \"\\t\".join(list([\"subclass\"])+list(cls['subclass'])) + \"\\n\" )\n if 'subject' in cls: outf.write( \"\\t\".join(list([\"subject\"])+list(cls['subject'])) + \"\\n\" )\n for k,v in out['feats'].items(): outf.write( \"\\t\".join([k]+[str(vv) for vv in v]) + \"\\n\" )\n\n with open(params['output_file'], 'wb') as back_file:\n pickle.dump(out,back_file)\n\n"
] | [
[
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samuelwestlake/deeplodocus-dev | [
"12b283ca4eb39abf13ddc56eabc78e01e90627ff"
] | [
"deeplodocus/data/load/loader.py"
] | [
"# Python imports\nfrom typing import Optional\nfrom typing import List\nfrom typing import Union\nfrom typing import Any\nimport numpy as np\nimport mimetypes\nimport weakref\n\n# Deeplodocus imports\nfrom deeplodocus.utils.notification import Notification\nfrom deeplodocus.utils.generic_utils import get_int_or_float\nfrom deeplodocus.utils.generic_utils import is_np_array\nfrom deeplodocus.utils.generic_utils import get_corresponding_flag\n\n# Deeplodocus flags\nfrom deeplodocus.flags import *\n\n\nclass Loader(object):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Load the unloaded data after being select by the Dataset\n \"\"\"\n\n def __init__(self,\n data_entry: weakref,\n load_as: Optional[str] = None,\n cv_library: Union[str, None, Flag] = DEEP_LIB_OPENCV\n ):\n\n # Weakref of the Entry instance\n self.data_entry = data_entry\n\n # Optional type of data to load (Still highly recommended to define it)\n self.load_as = load_as\n\n # Computer Vision library\n self.warning_video = None\n self.cv_library = None\n self.set_cv_library(cv_library)\n\n # Checked\n self.checked = False\n\n def check(self):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Check the Loader\n\n PARAMETERS:\n -----------\n\n None\n\n RETURN:\n -------\n\n :return: None\n \"\"\"\n # Check the load_as argument\n self.load_as = self.__check_load_as(self.load_as)\n\n # Set self.checked as True\n self.checked = True\n\n def __check_load_as(self, load_as: Union[str, int, Flag, None]) -> Flag:\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Check the data type\n If the data type given is None we try to estimate it (errors can occur with complex types)\n Else we directly get the data type given by the user\n\n PARAMETERS:\n -----------\n\n :param load_as (Union[str, int, None]): The data type in a raw format given by the user\n\n RETURN:\n -------\n\n :return load_as(Flag): The data type of the entry\n \"\"\"\n\n if load_as is None:\n # Get an instance\n instance_example, is_loaded, _ = self.data_entry().__get_first_item()\n\n if is_loaded is True:\n load_as = None\n else:\n # Automatically check the data type\n load_as = self.__estimate_load_as(instance_example)\n else:\n load_as = get_corresponding_flag(\n flag_list=DEEP_LIST_LOAD_AS,\n info=load_as\n )\n return load_as\n\n def __estimate_load_as(self, data: str) -> Flag:\n \"\"\"\n AUTHORS:\n --------\n\n author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Find the type of the given data\n\n PARAMETERS:\n -----------\n\n :param data: The data to analyze\n\n RETURN:\n -------\n\n :return: The integer flag of the corresponding type\n \"\"\"\n\n # If we have a list of item, we check that they all contain the same type\n if isinstance(data, list):\n load_as_list = []\n # Get all the data type\n for d in data:\n dt = self.__estimate_load_as(d)\n load_as_list.append(dt)\n\n # Check the data types are all the same\n for dt in load_as_list:\n if load_as_list[0].corresponds(dt) is False:\n Notification(DEEP_NOTIF_FATAL, \"Data type in your sequence of data are not all the same\")\n\n # If all the same then return the data type\n return load_as_list[0]\n\n # If not a list\n else:\n mime = mimetypes.guess_type(data)\n if mime[0] is not None:\n mime = mime[0].split(\"/\")[0]\n\n # IMAGE\n if mime == \"image\":\n return DEEP_LOAD_AS_IMAGE\n # VIDEO\n elif mime == \"video\":\n return DEEP_LOAD_AS_VIDEO\n # FLOAT\n elif DEEP_LOAD_AS_FLOAT.corresponds(get_int_or_float(data)):\n return DEEP_LOAD_AS_FLOAT\n # INTEGER\n elif DEEP_LOAD_AS_INTEGER.corresponds(get_int_or_float(data)):\n return DEEP_LOAD_AS_INTEGER\n # NUMPY ARRAY\n if is_np_array(data) is True:\n return DEEP_LOAD_AS_NP_ARRAY\n # Type not handled\n else:\n Notification(DEEP_NOTIF_FATAL, DEEP_MSG_DATA_NOT_HANDLED % data)\n\n def load_from_str(self, data: Union[str, List[str], Any]) -> Union[Any, List[Any]]:\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Load a data from a string format to the actual content\n Loads either one item or a list of items\n\n PARAMETERS:\n -----------\n\n :param data(Union[str, List[str]]): The data to transform\n\n RETURN:\n -------\n\n :return loaded_data(Union[Any, List[Any]]): The loaded data\n \"\"\"\n\n loaded_data = None\n\n # Make sure the data contains something\n if data is not None:\n\n # SEQUENCE\n if isinstance(data, list):\n # If data is a sequence we use the function in a recursive fashion\n loaded_data = []\n for d in data:\n ld = self.load_from_str(data=d)\n loaded_data.append(ld)\n\n # IMAGE\n elif DEEP_LOAD_AS_IMAGE.corresponds(self.load_as):\n # Load image\n loaded_data = self.__load_image(data)\n\n # VIDEO\n elif DEEP_LOAD_AS_VIDEO.corresponds(self.load_as):\n loaded_data = self.__load_video(data)\n\n # INTEGER\n elif DEEP_LOAD_AS_INTEGER.corresponds(self.load_as):\n loaded_data = int(data)\n\n # FLOAT NUMBER\n elif DEEP_LOAD_AS_FLOAT.corresponds(self.load_as):\n loaded_data = float(data)\n\n elif DEEP_LOAD_AS_STRING.corresponds(self.load_as):\n loaded_data = str(data)\n\n # NUMPY ARRAY\n elif DEEP_LOAD_AS_NP_ARRAY.corresponds(self.load_as):\n loaded_data = np.load(data)\n\n # LOAD AS GIVEN (unchanged)\n elif DEEP_LOAD_AS_GIVEN.corresponds(self.load_as):\n loaded_data = data\n\n # Data type not recognized\n else:\n\n Notification(DEEP_NOTIF_FATAL,\n \"The following data could not be loaded because its type is not recognized : %s.\\n\"\n \"Please check the documentation online to see the supported types\" % data)\n # If the data is None\n else:\n Notification(DEEP_NOTIF_FATAL, DEEP_MSG_DATA_IS_NONE % data)\n\n return loaded_data\n\n\n \"\"\"\n \"\n \" DATA LOADERS\n \"\n \"\"\"\n\n def __load_image(self, image_path: str):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Load the image in the image_path\n\n PARAMETERS:\n -----------\n\n :param image_path(str): The path of the image to load\n\n RETURN:\n -------\n\n :return: The loaded image\n \"\"\"\n if DEEP_LIB_OPENCV.corresponds(self.cv_library):\n image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)\n elif DEEP_LIB_PIL.corresponds(self.cv_library):\n image = np.array(Image.open(image_path))\n else:\n # Notify the user of invalid cv library\n image = None\n Notification(DEEP_NOTIF_FATAL, DEEP_MSG_CV_LIBRARY_NOT_IMPLEMENTED % self.cv_library.name)\n\n # Notify the user that the image failed to load\n if image is None:\n Notification(DEEP_NOTIF_FATAL, DEEP_MSG_DATA_CANNOT_LOAD_IMAGE % (self.cv_library.name, image_path))\n\n # If image is is grayscale add a new dimension\n if image.ndim > 2:\n # If image loaded using openCV, convert to RGB(a)\n if DEEP_LIB_OPENCV.corresponds(self.cv_library):\n image = self.__convert_bgra2rgba(image)\n else:\n image = image[:, :, np.newaxis]\n\n return image\n\n @staticmethod\n def __convert_bgra2rgba(image):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Convert BGR(alpha) image to RGB(alpha) image\n\n PARAMETERS:\n -----------\n\n :param image: image to convert\n\n RETURN:\n -------\n\n :return: a RGB(alpha) image\n \"\"\"\n\n # Get the number of channels in the image\n _, _, channels = image.shape\n\n # Handle BGR and BGR(A) images\n if channels == 3:\n image = image[:, :, (2, 1, 0)]\n elif channels == 4:\n image = image[:, :, (2, 1, 0, 3)]\n return image\n\n def __load_video(self, video_path: str):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n Load a video\n\n PARAMETERS:\n -----------\n\n :param video_path->str: absolute path to a video\n\n RETURN:\n -------\n\n :return: a list of frame from the video\n \"\"\"\n self.__throw_warning_video()\n video = []\n # If the computer vision library selected is OpenCV\n if DEEP_LIB_OPENCV.corresponds(self.cv_library):\n # try to load the file\n cap = cv2.VideoCapture(video_path)\n while True:\n _, frame = cap.read()\n if frame is None:\n break\n video.append(self.__convert_bgra2rgba(frame))\n cap.release()\n else:\n Notification(DEEP_NOTIF_FATAL,\n \"The video could not be loaded because OpenCV is not selected as the Computer Vision library\")\n return video\n\n def __throw_warning_video(self):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Warn the user of the unstable video mode.\n\n PARAMETERS:\n -----------\n\n None\n\n RETURN:\n -------\n\n :return: None\n \"\"\"\n if self.warning_video is None:\n Notification(DEEP_NOTIF_WARNING, \"The video mode is not fully supported. \"\n \"We deeply suggest you to use sequences of images.\")\n self.warning_video = 1\n\n def set_cv_library(self, cv_library: Flag) -> None:\n \"\"\"\n AUTHORS:\n --------\n\n :author: Samuel Westlake\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Set self.cv_library to the given value and import the corresponding cv library\n\n PARAMETERS:\n -----------\n\n :param cv_library: (Flag): The flag of the computer vision library selected\n\n RETURN:\n -------\n\n None\n \"\"\"\n # Set the cv_library argument to the corresponding Flag\n self.cv_library = get_corresponding_flag(flag_list=DEEP_LIST_CV_LIB, info=cv_library)\n\n # Import globally the required CV library\n self.__import_cv_library(cv_library=cv_library)\n\n @staticmethod\n def __import_cv_library(cv_library : Flag) -> None:\n \"\"\"\n AUTHORS:\n --------\n\n :author: Samuel Westlake\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Imports either cv2 or PIL.Image dependant on the value of cv_library\n\n PARAMETERS:\n -----------\n\n None\n\n RETURN:\n -------\n\n None\n \"\"\"\n if DEEP_LIB_OPENCV.corresponds(info=cv_library):\n try:\n global cv2\n import cv2\n except ImportError as e:\n Notification(DEEP_NOTIF_ERROR, str(e))\n elif DEEP_LIB_PIL.corresponds(info=cv_library):\n try:\n global Image\n from PIL import Image\n except ImportError as e:\n Notification(DEEP_NOTIF_ERROR, str(e))\n else:\n Notification(DEEP_NOTIF_ERROR, DEEP_MSG_CV_LIBRARY_NOT_IMPLEMENTED % cv_library)"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fernandezdaniel/Spearmint | [
"3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84",
"3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84",
"3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84"
] | [
"spearmint/kernels/matern.py",
"spearmint/utils/priors.py",
"examples/toy/toy.py"
] | [
"# -*- coding: utf-8 -*-\n# Spearmint\n#\n# Academic and Non-Commercial Research Use Software License and Terms\n# of Use\n#\n# Spearmint is a software package to perform Bayesian optimization\n# according to specific algorithms (the “Software”). The Software is\n# designed to automatically run experiments (thus the code name\n# 'spearmint') in a manner that iteratively adjusts a number of\n# parameters so as to minimize some objective in as few runs as\n# possible.\n#\n# The Software was developed by Ryan P. Adams, Michael Gelbart, and\n# Jasper Snoek at Harvard University, Kevin Swersky at the\n# University of Toronto (“Toronto”), and Hugo Larochelle at the\n# Université de Sherbrooke (“Sherbrooke”), which assigned its rights\n# in the Software to Socpra Sciences et Génie\n# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement\n# between the parties, it is distributed for free academic and\n# non-commercial research use by the President and Fellows of Harvard\n# College (“Harvard”).\n#\n# Using the Software indicates your agreement to be bound by the terms\n# of this Software Use Agreement (“Agreement”). Absent your agreement\n# to the terms below, you (the “End User”) have no rights to hold or\n# use the Software whatsoever.\n#\n# Harvard agrees to grant hereunder the limited non-exclusive license\n# to End User for the use of the Software in the performance of End\n# User’s internal, non-commercial research and academic use at End\n# User’s academic or not-for-profit research institution\n# (“Institution”) on the following terms and conditions:\n#\n# 1. NO REDISTRIBUTION. The Software remains the property Harvard,\n# Toronto and Socpra, and except as set forth in Section 4, End User\n# shall not publish, distribute, or otherwise transfer or make\n# available the Software to any other party.\n#\n# 2. NO COMMERCIAL USE. End User shall not use the Software for\n# commercial purposes and any such use of the Software is expressly\n# prohibited. This includes, but is not limited to, use of the\n# Software in fee-for-service arrangements, core facilities or\n# laboratories or to provide research services to (or in collaboration\n# with) third parties for a fee, and in industry-sponsored\n# collaborative research projects where any commercial rights are\n# granted to the sponsor. If End User wishes to use the Software for\n# commercial purposes or for any other restricted purpose, End User\n# must execute a separate license agreement with Harvard.\n#\n# Requests for use of the Software for commercial purposes, please\n# contact:\n#\n# Office of Technology Development\n# Harvard University\n# Smith Campus Center, Suite 727E\n# 1350 Massachusetts Avenue\n# Cambridge, MA 02138 USA\n# Telephone: (617) 495-3067\n# Facsimile: (617) 495-9568\n# E-mail: [email protected]\n#\n# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own\n# all intellectual property in the Software. End User shall gain no\n# ownership to the Software. End User shall not remove or delete and\n# shall retain in the Software, in any modifications to Software and\n# in any Derivative Works, the copyright, trademark, or other notices\n# pertaining to Software as provided with the Software.\n#\n# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,\n# as such term is defined under U.S. copyright laws, provided that any\n# such Derivative Works shall be restricted to non-commercial,\n# internal research and academic use at End User’s Institution. End\n# User may distribute Derivative Works to other Institutions solely\n# for the performance of non-commercial, internal research and\n# academic use on terms substantially similar to this License and\n# Terms of Use.\n#\n# 5. FEEDBACK. In order to improve the Software, comments from End\n# Users may be useful. End User agrees to provide Harvard with\n# feedback on the End User’s use of the Software (e.g., any bugs in\n# the Software, the user experience, etc.). Harvard is permitted to\n# use such information provided by End User in making changes and\n# improvements to the Software without compensation or an accounting\n# to End User.\n#\n# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or\n# Sherbrooke or Socpra may develop modifications to the Software that\n# may be based on the feedback provided by End User under Section 5\n# above. Harvard, Toronto and Sherbrooke/Socpra shall not be\n# restricted in any way by End User regarding their use of such\n# information. End User acknowledges the right of Harvard, Toronto\n# and Sherbrooke/Socpra to prepare, publish, display, reproduce,\n# transmit and or use modifications to the Software that may be\n# substantially similar or functionally equivalent to End User’s\n# modifications and/or improvements if any. In the event that End\n# User obtains patent protection for any modification or improvement\n# to Software, End User agrees not to allege or enjoin infringement of\n# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,\n# or any of the researchers, medical or research staff, officers,\n# directors and employees of those institutions.\n#\n# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,\n# present, or share results from the use of the Software. In\n# accordance with customary academic practice, End User will\n# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers\n# of the Software and may cite the relevant reference(s) from the\n# following list of publications:\n#\n# Practical Bayesian Optimization of Machine Learning Algorithms\n# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams\n# Neural Information Processing Systems, 2012\n#\n# Multi-Task Bayesian Optimization\n# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams\n# Advances in Neural Information Processing Systems, 2013\n#\n# Input Warping for Bayesian Optimization of Non-stationary Functions\n# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams\n# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013\n#\n# Bayesian Optimization and Semiparametric Models with Applications to\n# Assistive Technology Jasper Snoek, PhD Thesis, University of\n# Toronto, 2013\n#\n# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED \"AS IS.\" TO THE FULLEST\n# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA\n# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR\n# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY\n# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND\n# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,\n# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE\n# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT\n# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.\n#\n# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT\n# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,\n# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL\n# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR\n# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,\n# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER\n# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH\n# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS\n# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,\n# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGES.\n#\n# 10. INDEMNIFICATION. To the extent permitted by law, End User shall\n# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke\n# and Socpra, their corporate affiliates, current or future directors,\n# trustees, officers, faculty, medical and professional staff,\n# employees, students and agents and their respective successors,\n# heirs and assigns (the \"Indemnitees\"), against any liability,\n# damage, loss or expense (including reasonable attorney's fees and\n# expenses of litigation) incurred by or imposed upon the Indemnitees\n# or any one of them in connection with any claims, suits, actions,\n# demands or judgments arising from End User’s breach of this\n# Agreement or its Institution’s use of the Software except to the\n# extent caused by the gross negligence or willful misconduct of\n# Harvard, Toronto or Sherbrooke or Socpra. This indemnification\n# provision shall survive expiration or termination of this Agreement.\n#\n# 11. GOVERNING LAW. This Agreement shall be construed and governed by\n# the laws of the Commonwealth of Massachusetts regardless of\n# otherwise applicable choice of law standards.\n#\n# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall\n# be construed as granting End Users or their Institutions any rights\n# or licenses to use any trademarks, service marks or logos associated\n# with the Software. You may not use the terms “Harvard” or\n# “University of Toronto” or “Université de Sherbrooke” or “Socpra\n# Sciences et Génie S.E.C.” (or a substantially similar term) in any\n# way that is inconsistent with the permitted uses described\n# herein. You agree not to use any name or emblem of Harvard, Toronto\n# or Sherbrooke, or any of their subdivisions for any purpose, or to\n# falsely suggest any relationship between End User (or its\n# Institution) and Harvard, Toronto and/or Sherbrooke, or in any\n# manner that would infringe or violate any of their rights.\n#\n# 13. End User represents and warrants that it has the legal authority\n# to enter into this License and Terms of Use on behalf of itself and\n# its Institution.\n\n\nimport numpy as np\nimport kernel_utils\n\nfrom .abstract_kernel import AbstractKernel\nfrom ..utils import priors\nfrom ..utils.param import Param as Hyperparameter\n\nSQRT_3 = np.sqrt(3.0)\nSQRT_5 = np.sqrt(5.0)\n\n\nclass Matern52(AbstractKernel):\n def __init__(self, num_dims, value=None, name='Matern52', prior=None):\n self.name = name\n self.num_dims = num_dims\n\n self.ls = Hyperparameter(\n initial_value = np.ones(self.num_dims) if value is None else value,\n prior = priors.Tophat(0.0, 10.0) if prior is None else prior,\n name = 'ls'\n )\n\n assert self.ls.value.shape[0] == self.num_dims\n\n @property\n def hypers(self):\n return self.ls\n\n def cov(self, inputs):\n return self.cross_cov(inputs, inputs)\n\n def diag_cov(self, inputs):\n return np.ones(inputs.shape[0])\n\n def cross_cov(self, inputs_1, inputs_2):\n r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2))\n r = np.sqrt(r2)\n cov = (1.0 + SQRT_5*r + (5.0/3.0)*r2) * np.exp(-SQRT_5*r)\n\n return cov\n\n def cross_cov_grad_data(self, inputs_1, inputs_2):\n # NOTE: This is the gradient wrt the inputs of inputs_2\n # The gradient wrt the inputs of inputs_1 is -1 times this\n # This is sloppily coded -- the gradient that comes from kernel_utils is w.r.t. inputs_1\n # but a minus sign is dropped to make it w.r.t. inputs_2\n # oh well...\n r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2))\n r = np.sqrt(r2)\n grad_r2 = (5.0/6.0)*np.exp(-SQRT_5*r)*(1 + SQRT_5*r)\n\n return grad_r2[:,:,np.newaxis] * kernel_utils.grad_dist2(self.ls.value, inputs_1, inputs_2)\n\n",
"# -*- coding: utf-8 -*-\n# Spearmint\n#\n# Academic and Non-Commercial Research Use Software License and Terms\n# of Use\n#\n# Spearmint is a software package to perform Bayesian optimization\n# according to specific algorithms (the “Software”). The Software is\n# designed to automatically run experiments (thus the code name\n# 'spearmint') in a manner that iteratively adjusts a number of\n# parameters so as to minimize some objective in as few runs as\n# possible.\n#\n# The Software was developed by Ryan P. Adams, Michael Gelbart, and\n# Jasper Snoek at Harvard University, Kevin Swersky at the\n# University of Toronto (“Toronto”), and Hugo Larochelle at the\n# Université de Sherbrooke (“Sherbrooke”), which assigned its rights\n# in the Software to Socpra Sciences et Génie\n# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement\n# between the parties, it is distributed for free academic and\n# non-commercial research use by the President and Fellows of Harvard\n# College (“Harvard”).\n#\n# Using the Software indicates your agreement to be bound by the terms\n# of this Software Use Agreement (“Agreement”). Absent your agreement\n# to the terms below, you (the “End User”) have no rights to hold or\n# use the Software whatsoever.\n#\n# Harvard agrees to grant hereunder the limited non-exclusive license\n# to End User for the use of the Software in the performance of End\n# User’s internal, non-commercial research and academic use at End\n# User’s academic or not-for-profit research institution\n# (“Institution”) on the following terms and conditions:\n#\n# 1. NO REDISTRIBUTION. The Software remains the property Harvard,\n# Toronto and Socpra, and except as set forth in Section 4, End User\n# shall not publish, distribute, or otherwise transfer or make\n# available the Software to any other party.\n#\n# 2. NO COMMERCIAL USE. End User shall not use the Software for\n# commercial purposes and any such use of the Software is expressly\n# prohibited. This includes, but is not limited to, use of the\n# Software in fee-for-service arrangements, core facilities or\n# laboratories or to provide research services to (or in collaboration\n# with) third parties for a fee, and in industry-sponsored\n# collaborative research projects where any commercial rights are\n# granted to the sponsor. If End User wishes to use the Software for\n# commercial purposes or for any other restricted purpose, End User\n# must execute a separate license agreement with Harvard.\n#\n# Requests for use of the Software for commercial purposes, please\n# contact:\n#\n# Office of Technology Development\n# Harvard University\n# Smith Campus Center, Suite 727E\n# 1350 Massachusetts Avenue\n# Cambridge, MA 02138 USA\n# Telephone: (617) 495-3067\n# Facsimile: (617) 495-9568\n# E-mail: [email protected]\n#\n# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own\n# all intellectual property in the Software. End User shall gain no\n# ownership to the Software. End User shall not remove or delete and\n# shall retain in the Software, in any modifications to Software and\n# in any Derivative Works, the copyright, trademark, or other notices\n# pertaining to Software as provided with the Software.\n#\n# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,\n# as such term is defined under U.S. copyright laws, provided that any\n# such Derivative Works shall be restricted to non-commercial,\n# internal research and academic use at End User’s Institution. End\n# User may distribute Derivative Works to other Institutions solely\n# for the performance of non-commercial, internal research and\n# academic use on terms substantially similar to this License and\n# Terms of Use.\n#\n# 5. FEEDBACK. In order to improve the Software, comments from End\n# Users may be useful. End User agrees to provide Harvard with\n# feedback on the End User’s use of the Software (e.g., any bugs in\n# the Software, the user experience, etc.). Harvard is permitted to\n# use such information provided by End User in making changes and\n# improvements to the Software without compensation or an accounting\n# to End User.\n#\n# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or\n# Sherbrooke or Socpra may develop modifications to the Software that\n# may be based on the feedback provided by End User under Section 5\n# above. Harvard, Toronto and Sherbrooke/Socpra shall not be\n# restricted in any way by End User regarding their use of such\n# information. End User acknowledges the right of Harvard, Toronto\n# and Sherbrooke/Socpra to prepare, publish, display, reproduce,\n# transmit and or use modifications to the Software that may be\n# substantially similar or functionally equivalent to End User’s\n# modifications and/or improvements if any. In the event that End\n# User obtains patent protection for any modification or improvement\n# to Software, End User agrees not to allege or enjoin infringement of\n# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,\n# or any of the researchers, medical or research staff, officers,\n# directors and employees of those institutions.\n#\n# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,\n# present, or share results from the use of the Software. In\n# accordance with customary academic practice, End User will\n# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers\n# of the Software and may cite the relevant reference(s) from the\n# following list of publications:\n#\n# Practical Bayesian Optimization of Machine Learning Algorithms\n# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams\n# Neural Information Processing Systems, 2012\n#\n# Multi-Task Bayesian Optimization\n# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams\n# Advances in Neural Information Processing Systems, 2013\n#\n# Input Warping for Bayesian Optimization of Non-stationary Functions\n# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams\n# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013\n#\n# Bayesian Optimization and Semiparametric Models with Applications to\n# Assistive Technology Jasper Snoek, PhD Thesis, University of\n# Toronto, 2013\n#\n# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED \"AS IS.\" TO THE FULLEST\n# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA\n# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR\n# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY\n# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND\n# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,\n# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE\n# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT\n# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.\n#\n# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT\n# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,\n# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL\n# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR\n# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,\n# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER\n# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH\n# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS\n# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,\n# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGES.\n#\n# 10. INDEMNIFICATION. To the extent permitted by law, End User shall\n# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke\n# and Socpra, their corporate affiliates, current or future directors,\n# trustees, officers, faculty, medical and professional staff,\n# employees, students and agents and their respective successors,\n# heirs and assigns (the \"Indemnitees\"), against any liability,\n# damage, loss or expense (including reasonable attorney's fees and\n# expenses of litigation) incurred by or imposed upon the Indemnitees\n# or any one of them in connection with any claims, suits, actions,\n# demands or judgments arising from End User’s breach of this\n# Agreement or its Institution’s use of the Software except to the\n# extent caused by the gross negligence or willful misconduct of\n# Harvard, Toronto or Sherbrooke or Socpra. This indemnification\n# provision shall survive expiration or termination of this Agreement.\n#\n# 11. GOVERNING LAW. This Agreement shall be construed and governed by\n# the laws of the Commonwealth of Massachusetts regardless of\n# otherwise applicable choice of law standards.\n#\n# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall\n# be construed as granting End Users or their Institutions any rights\n# or licenses to use any trademarks, service marks or logos associated\n# with the Software. You may not use the terms “Harvard” or\n# “University of Toronto” or “Université de Sherbrooke” or “Socpra\n# Sciences et Génie S.E.C.” (or a substantially similar term) in any\n# way that is inconsistent with the permitted uses described\n# herein. You agree not to use any name or emblem of Harvard, Toronto\n# or Sherbrooke, or any of their subdivisions for any purpose, or to\n# falsely suggest any relationship between End User (or its\n# Institution) and Harvard, Toronto and/or Sherbrooke, or in any\n# manner that would infringe or violate any of their rights.\n#\n# 13. End User represents and warrants that it has the legal authority\n# to enter into this License and Terms of Use on behalf of itself and\n# its Institution.\n\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nimport numpy.random as npr\nimport scipy.stats as sps\nfrom operator import add # same as lambda x,y:x+y I think\n# import scipy.special.gammaln as log_gamma\n\n\n\nclass AbstractPrior(object):\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def logprob(self, x):\n pass\n\n # Some of these are \"improper priors\" and I cannot sample from them\n # In this case the sample method will just return None\n # (or could raise an exception)\n # In any case the sampling should only be used for debugging\n # Unless we want to initialize the hypers by sampling from the prior?\n # def sample(self, n_samples):\n # # raise Exception(\"Sampling not implemented for composed prior\")\n # return None\n\n\nclass Tophat(AbstractPrior):\n def __init__(self, xmin, xmax):\n self.xmin = xmin\n self.xmax = xmax\n if not (xmax > xmin):\n raise Exception(\"xmax must be greater than xmin\")\n\n def logprob(self, x):\n if np.any(x < self.xmin) or np.any(x > self.xmax):\n return -np.inf\n else:\n return 0. # More correct is -np.log(self.xmax-self.xmin), but constants don't matter\n\n def sample(self, n_samples):\n return self.xmin + npr.rand(n_samples) * (self.xmax-self.xmin)\n\n# This is the Horseshoe prior for a scalar entity\n# The multivariate Horseshoe distribution is not properly implemented right now\n# None of these are, really. We should fix that up at some point, you might\n# have to tell it the size in the constructor (e.g. with kwarg: dims=1)\n# (Not that we ever really want the multivariate one as a prior, do we?)\n# (I think more often we'd just want to vectorize the univariate one)\nclass Horseshoe(AbstractPrior):\n def __init__(self, scale):\n self.scale = scale\n\n # THIS IS INEXACT\n def logprob(self, x):\n if np.any(x == 0.0):\n return np.inf # POSITIVE infinity (this is the \"spike\")\n # We don't actually have an analytical form for this\n # But we have a bound between 2 and 4, so I just use 3.....\n # (or am I wrong and for the univariate case we have it analytically?)\n return np.sum(np.log(np.log(1 + 3.0 * (self.scale/x)**2) ) )\n\n def sample(self, n_samples):\n # Sample from standard half-cauchy distribution\n lamda = np.abs(npr.standard_cauchy(size=n_samples))\n\n # I think scale is the thing called Tau^2 in the paper.\n return npr.randn() * lamda * self.scale\n # return npr.multivariate_normal()\n\nclass Lognormal(AbstractPrior):\n def __init__(self, scale, mean=1):\n self.scale = scale\n self.mean = mean\n\n def logprob(self, x):\n return np.sum(sps.lognorm.logpdf(x, self.scale, scale=self.mean))\n\n def sample(self, n_samples):\n return npr.lognormal(mean=self.mean, sigma=self.scale, size=n_samples)\n\nclass LognormalTophat(AbstractPrior):\n def __init__(self, scale, xmin, xmax, mean=1):\n self.scale = scale\n self.mean = mean\n self.xmin = xmin\n self.xmax = xmax\n\n if not (xmax > xmin):\n raise Exception(\"xmax must be greater than xmin\")\n\n def logprob(self, x):\n if np.any(x < self.xmin) or np.any(x > self.xmax):\n return -np.inf\n else:\n return np.sum(sps.lognorm.logpdf(x, self.scale, scale=self.mean))\n\n def sample(self, n_samples):\n raise Exception('Sampling of LognormalTophat is not implemented.')\n\n# Let X~lognormal and Y=X^2. This is distribution of Y.\nclass LognormalOnSquare(Lognormal):\n def logprob(self, y):\n if np.any(y < 0): # Need this here or else sqrt(y) may occur with y < 0\n return -np.inf\n\n x = np.sqrt(y)\n dy_dx = 2*x # this is the Jacobean or inverse Jacobean, whatever\n # p_y(y) = p_x(sqrt(x)) / (dy/dx)\n # log p_y(y) = log p_x(x) - log(dy/dx)\n return Lognormal.logprob(self, x) - np.log(dy_dx)\n\n def sample(self, n_samples):\n return Lognormal.sample(self, n_samples)**2\n\nclass LogLogistic(AbstractPrior):\n def __init__(self, shape, scale=1):\n self.shape = shape\n self.scale = scale\n\n def logprob(self, x):\n return np.sum(sps.fisk.logpdf(x, self.shape, scale=self.scale))\n\nclass Exponential(AbstractPrior):\n def __init__(self, mean):\n self.mean = mean\n\n def logprob(self, x):\n return np.sum(sps.expon.logpdf(x, scale=self.mean))\n\n def sample(self, n_samples):\n return npr.exponential(scale=self.mean, size=n_samples)\n\n# We put sums everywhere so that we can use priors for multidimensional variables\n# But this makes the sampling not OK, because it only samples a scalar...\n# should fix this at some point... TODO\nclass Beta(AbstractPrior):\n def __init__(self, alpha, beta):\n self.alpha = alpha\n self.beta = beta\n\n def logprob(self, x):\n return np.sum(sps.beta.logpdf(x, self.alpha, self.beta))\n\n def sample(self, n_samples):\n return npr.beta(self.alpha, self.beta, size=n_samples)\n\nclass Gaussian(AbstractPrior):\n def __init__(self, mu, sigma):\n self.mu = mu\n self.sigma = sigma\n\n def logprob(self, x):\n return np.sum(sps.norm.logpdf(x, loc=self.mu, scale=self.sigma))\n\n def sample(self, n_samples):\n return self.mu + npr.randn(n_samples) * self.sigma\n\nclass MultivariateNormal(AbstractPrior):\n def __init__(self, mu, cov):\n self.mu = mu\n self.cov = cov\n\n if mu.size != cov.shape[0] or cov.shape[0] != cov.shape[1]:\n raise Exception(\"mu should be a vector and cov a matrix, of matching sizes\")\n\n def logprob(self, x):\n return sps.multivariate_normal.logpdf(x, mean=self.mu, cov=self.cov)\n\n def sample(self, n_samples):\n return npr.multivariate_normal(self.mu, self.cov, size=n_samples).T.squeeze()\n\nclass NoPrior(AbstractPrior):\n def __init__(self):\n pass\n\n def logprob(self, x):\n return 0.0\n\n# This class takes in another prior in its constructor and linearly scales the input variable\nclass Scale(AbstractPrior):\n def __init__(self, prior, scale):\n self.prior = prior\n self.scale = scale\n\n if hasattr(prior, 'sample'):\n self.sample = lambda n_samples: self.prior.sample(n_samples)*self.scale\n\n def logprob(self, x):\n return self.prior.logprob(x/self.scale)\n\n\n# This class takes in another prior in its constructor\n# And gives you the nonnegative version (actually the positive version, to be numerically safe)\nclass NonNegative(AbstractPrior):\n def __init__(self, prior):\n self.prior = prior\n\n if hasattr(prior, 'sample'):\n self.sample = lambda n_samples: np.abs(self.prior.sample(n_samples))\n\n def logprob(self, x):\n if np.any(x <= 0): \n return -np.inf\n else:\n return self.prior.logprob(x)# + np.log(2.0)\n # Above: the log(2) makes it correct, but we don't ever care about it I think\n \n\n# This class allows you to compose a list priors\n# (meaning, take the product of their PDFs)\n# The resulting distribution is \"improper\" -- i.e. not normalized\nclass ProductOfPriors(AbstractPrior):\n def __init__(self, priors):\n self.priors = priors\n\n def logprob(self, x):\n lp = 0.0\n for prior in self.priors:\n lp += prior.logprob(x)\n return lp\n\n# class Binomial(AbstractPrior):\n# def __init__(self, p, n):\n# self.p = p\n# self.n = n\n\n# def logprob(self, k):\n# pos = k\n# neg = self.n-k\n\n# with np.errstate(divide='ignore'): # suppress warnings about log(0)\n# return np.sum( pos[pos>0]*np.log(self.p[pos>0]) ) + np.sum( neg[neg>0]*np.log(1-self.p[neg>0]) )\n\n# def sample(self, n_samples):\n# return np.sum(npr.rand(n, n_samples) < p, axis=0)\n\n# class Bernoulli(Binomial):\n# def __init__(self, p):\n# super(Bernoulli, self).__init__(p, 1)\n\n",
"# The toy problem in http://arxiv.org/abs/1403.4890\nimport numpy as np\n\ndef main(job_id, params):\n x1 = params['x']\n x2 = params['y']\n\n f = x1 + x2\n c1 = 1.5 - x1 - 2.0*x2 - 0.5*np.sin(2*np.pi*(x1**2 - 2.0*x2))\n c2 = x1**2 + x2**2 - 1.5 \n c1 = -c1\n c2 = -c2\n\n return {'f':f, 'c1':c1, 'c2':c2}\n\n\n\"\"\"\nEverything below this point is optional. It is used to specify\nthe true solution so that one can plot the error curve using \nprogress_curve.py in the visualizations/ directory.\n\"\"\"\ndef true_val():\n return 0.5998\ndef true_sol():\n return {'x' : 0.1954, 'y' : 0.4044}\ntrue_func = main \n"
] | [
[
"numpy.exp",
"numpy.sqrt",
"numpy.ones"
],
[
"numpy.random.lognormal",
"numpy.log",
"numpy.random.beta",
"numpy.sqrt",
"scipy.stats.lognorm.logpdf",
"numpy.random.exponential",
"numpy.random.multivariate_normal",
"scipy.stats.fisk.logpdf",
"numpy.random.standard_cauchy",
"scipy.stats.norm.logpdf",
"scipy.stats.multivariate_normal.logpdf",
"numpy.any",
"numpy.random.rand",
"numpy.random.randn",
"scipy.stats.expon.logpdf",
"scipy.stats.beta.logpdf"
],
[
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iamabhishek0/sympy | [
"c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd"
] | [
"sympy/parsing/autolev/test-examples/ruletest12.py"
] | [
"import sympy.physics.mechanics as me\nimport sympy as sm\nimport math as m\nimport numpy as np\n\nx, y = me.dynamicsymbols('x y')\na, b, r = sm.symbols('a b r', real=True)\neqn = sm.Matrix([[0]])\neqn[0] = a*x**3+b*y**2-r\neqn = eqn.row_insert(eqn.shape[0], sm.Matrix([[0]]))\neqn[eqn.shape[0]-1] = a*sm.sin(x)**2+b*sm.cos(2*y)-r**2\nmatrix_list = []\nfor i in eqn:matrix_list.append(i.subs({a:2.0, b:3.0, r:1.0}))\nprint(sm.nsolve(matrix_list,(x,y),(np.deg2rad(30),3.14)))\n"
] | [
[
"numpy.deg2rad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ACWI-SSWD/nldi_xstool | [
"f201befc6454202042d2ed76e82c3c07edcf4c48"
] | [
"nldi_xstool/__openChannel.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 5 16:26:25 2015\n\n@author: mweier\n\"\"\"\n\nimport numpy as np\nfrom numba import jit\n\n\n@jit\ndef channelBuilder(wsDepth, rightSS, leftSS, widthBottom):\n \"\"\"\n Builds trapziodal channel station/elevation array given depth,\n right side slope, left side slope, and bottom width\n \"\"\"\n leftToe = wsDepth*1.25*leftSS\n rightToe = wsDepth*1.25*rightSS\n staElev = np.array([(0.0, wsDepth*1.25),\n (leftToe, 0.0),\n (leftToe + widthBottom, 0.0),\n (leftToe+widthBottom+rightToe, wsDepth*1.25)])\n return staElev\n\n\ndef lineIntersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n x = y = np.nan\n# print 'lines do not intersect'\n return x, y\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y\n\n\n@jit\ndef polygonArea(corners):\n area = 0.0\n for i in range(len(corners)):\n j = (i + 1) % len(corners)\n area += corners[i][0] * corners[j][1]\n area -= corners[j][0] * corners[i][1]\n area = abs(area) / 2.0\n return area\n\n\n@jit\ndef channelPerimeter(corners):\n P = 0.0\n for i in range(len(corners)-1):\n P += np.sqrt((np.power((corners[i+1][0]-corners[i][0]), 2) +\n np.power((corners[i+1][1]-corners[i][1]), 2)))\n return P\n\n\ndef flowEst(wsElev, n, slope, staElev, units):\n \"\"\"\n Estimates uniform flow using the Manning equation for\n a user defined trapziodal channel or a manually defined channel using\n a station/elevation file\n \"\"\"\n\n if units == \"m\":\n const = 1.0\n else:\n const = 1.49\n\n intersectList = []\n for i in range(0, len(staElev)):\n x, y = lineIntersection(\n (staElev[i-1], staElev[i]),\n ([staElev[0][0], wsElev], [staElev[-1][0], wsElev]))\n if x >= staElev[i-1][0] and x <= staElev[i][0] and abs(y - wsElev) < 0.01:\n # print (x,y)\n intersectList.append((x, y))\n else:\n # print ('line segments do not intersect')\n pass\n\n try:\n intersectArray = np.array(intersectList)\n intersectArray = intersectArray[intersectArray[:, 0].argsort()]\n # print 'more than two points intersect'\n staMinElev = staElev[np.where(\n staElev[:, 1] == min(staElev[:, 1]))][0][0]\n startPoint = intersectArray[np.where(\n intersectArray[:, 0] < staMinElev)][-1]\n endPoint = intersectArray[np.where(\n intersectArray[:, 0] > staMinElev)][0]\n intersectArray = np.vstack([startPoint, endPoint])\n except Exception as e:\n print(e)\n return 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n\n staMin = np.min(intersectArray[:, 0])\n staMax = np.max(intersectArray[:, 0])\n\n thalweig = staElev[np.where(staElev[:, 1] == np.min(staElev[:, 1]))]\n\n minElev = thalweig[:, 1][0]\n maxDepth = wsElev-minElev\n\n if len(intersectArray) < 2:\n return 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n\n staElevTrim = np.vstack([intersectArray[0], staElev, intersectArray[1]])\n # staElevTrim = staElevTrim[staElevTrim[:,0].argsort()]\n staElevTrim = staElevTrim[np.where(\n (staElevTrim[:, 0] >= staMin) & (staElevTrim[:, 0] <= staMax))]\n\n area = polygonArea(staElevTrim)\n R = area/channelPerimeter(staElevTrim)\n v = (const/n)*np.power(R, (2./3.0))*np.sqrt(slope)\n Q = v*area\n topWidth = staMax-staMin\n xGround = staElev[:, 0]\n yGround = staElev[:, 1]\n yGround0 = np.ones(len(xGround))*np.min(yGround)\n xWater = staElevTrim[:, 0]\n yWater = np.ones(len(xWater))*wsElev\n yWater0 = staElevTrim[:, 1]\n args = R, area, topWidth, Q, v, maxDepth, xGround, yGround, yGround0, xWater, yWater, yWater0\n return args\n"
] | [
[
"numpy.sqrt",
"numpy.min",
"numpy.power",
"numpy.max",
"numpy.array",
"numpy.where",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
whq-hqw/detr_change | [
"142f75cc5e0b59ca6e07928ddcbed3e461816611"
] | [
"models/matcher.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nModules to compute the matching cost and solve the corresponding LSAP.\n\"\"\"\nimport torch\nfrom scipy.optimize import linear_sum_assignment\nfrom torch import nn\n\nfrom util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou\n\n\nclass HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\" Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n out_prob = outputs[\"pred_logits\"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n # 在out_prob中,dim1中每一维都代表了对每个该bbox的所属类别的概率,由于有92个类,所以有92个数字\n # 由于candidate box远比实际的box数量要多,因此并不知道到底哪个candidate能与gt box进行匹配\n # 所以先获取所有tgt_id,并在out_ptob中取出对应的概率,因为知道在众多candidate中必有一个bbox与某个gt bbox最为匹配\n # 之所以用减号就是想知道与理想概率1的差距,但这里加不加1其实无所谓\n cost_class = -out_prob[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n\n\ndef build_matcher(args):\n return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)\n"
] | [
[
"torch.cat",
"torch.cdist",
"torch.no_grad",
"scipy.optimize.linear_sum_assignment",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
lupantech/InterGPS | [
"0f326027d16d7d50a9c189f897739dfb95085021"
] | [
"theorem_predict/eval_transformer.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\nimport json\nimport ast\nfrom tqdm import tqdm\n\nimport torch\nfrom transformers import BartForConditionalGeneration, BartTokenizerFast\n\n\ndef evaluate(diagram_logic_file, text_logic_file, tokenizer_name, model_name, check_point, seq_num):\n\n test_lst = range(2401, 3002)\n\n ## read logic form files\n with open(diagram_logic_file) as f:\n diagram_logic_forms = json.load(f)\n with open(text_logic_file) as f:\n text_logic_forms = json.load(f)\n\n combined_logic_forms = {}\n for pid in test_lst:\n combined_logic_forms[pid] = diagram_logic_forms[str(pid)]['diagram_logic_forms'] + \\\n text_logic_forms[str(pid)]['text_logic_forms']\n\n ## build tokenizer and model\n tokenizer = BartTokenizerFast.from_pretrained(tokenizer_name) # 'facebook/bart-base'\n model = BartForConditionalGeneration.from_pretrained(model_name).to(device) # 'facebook/bart-base'\n model.load_state_dict(torch.load(check_point))\n\n final = dict()\n for pid in tqdm(test_lst):\n input = str(combined_logic_forms[pid])\n tmp = tokenizer.encode(input)\n if len(tmp) > 1024:\n tmp = tmp[:1024]\n input = torch.LongTensor(tmp).unsqueeze(0).to(device)\n\n output = model.generate(input, bos_token_id=0, eos_token_id=2,\n max_length=20, num_beams=10, num_return_sequences=seq_num)\n # print(out.size())\n\n ## refine output sequence\n seq = []\n for j in range(seq_num):\n res = tokenizer.decode(output[j].tolist())\n res = res.replace(\"</s>\", \"\").replace(\"<s>\", \"\").replace(\"<pad>\", \"\")\n # print(res)\n try:\n res = ast.literal_eval(res) # string class to list class\n except Exception as e:\n res = []\n seq.append(res)\n\n final[str(pid)] = {\"id\": str(pid), \"num_seqs\": seq_num, \"seq\": seq}\n\n return final\n\n\nif __name__ == '__main__':\n\n diagram_logic_file = '../data/geometry3k/logic_forms/diagram_logic_forms_annot.json'\n text_logic_file = '../data/geometry3k/logic_forms/text_logic_forms_annot_dissolved.json'\n\n check_point = 'models/tp_model_best.pt'\n output_file = 'results/test/pred_seqs_test_bart_best.json'\n\n tokenizer_name = 'facebook/bart-base'\n model_name = 'facebook/bart-base'\n\n SEQ_NUM = 5\n\n device = torch.device('cuda:0')\n\n result = evaluate(diagram_logic_file, text_logic_file, tokenizer_name, model_name, check_point, SEQ_NUM)\n\n with open(output_file, 'w') as f:\n json.dump(result, f)\n\n"
] | [
[
"torch.device",
"torch.LongTensor",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zajaczajac/metaworld | [
"4febbc4f702c3145b73b012b58b111b2c439032a",
"4febbc4f702c3145b73b012b58b111b2c439032a"
] | [
"metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_lever_pull.py",
"metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py"
] | [
"import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs.env_util import get_asset_full_path\nfrom metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set\n\n\nclass SawyerLeverPullEnv(SawyerXYZEnv):\n\n def __init__(self):\n\n hand_low = (-0.5, 0.40, -0.15)\n hand_high = (0.5, 1, 0.5)\n obj_low = (-0.1, 0.7, 0.05)\n obj_high = (0.1, 0.8, 0.05)\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.init_config = {\n 'obj_init_pos': np.array([0, 0.7, 0.05]),\n 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),\n }\n self.goal = np.array([0, 0.75, -0.12])\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n goal_low = self.hand_low\n goal_high = self.hand_high\n\n \n\n self._random_reset_space = Box(\n np.array(obj_low),\n np.array(obj_high),\n )\n self.goal_space = Box(np.array(goal_low), np.array(goal_high))\n\n @property\n def model_name(self):\n return get_asset_full_path('sawyer_xyz/sawyer_lever_pull.xml')\n\n @_assert_task_is_set\n def step(self, action):\n ob = super().step(action)\n reward, reachDist, pullDist = self.compute_reward(action, ob)\n self.curr_path_length += 1\n\n info = {\n 'reachDist': reachDist,\n 'goalDist': pullDist,\n 'epRew': reward,\n 'pickRew': None,\n 'success': float(pullDist <= 0.05)\n }\n\n return ob, reward, False, info\n\n def _get_pos_objects(self):\n return self._get_site_pos('leverStart')\n\n def reset_model(self):\n self._reset_hand()\n self._target_pos = self.goal.copy()\n self.obj_init_pos = self.init_config['obj_init_pos']\n\n if self.random_init:\n goal_pos = self._get_state_rand_vec()\n self.obj_init_pos = goal_pos[:3]\n final_pos = goal_pos.copy()\n final_pos[1] += 0.05\n final_pos[2] -= 0.17\n self._target_pos = final_pos\n\n self.sim.model.body_pos[self.model.body_name2id('lever')] = self.obj_init_pos\n self.maxPullDist = np.linalg.norm(self._target_pos - self.obj_init_pos)\n\n return self._get_obs()\n\n def _reset_hand(self):\n super()._reset_hand(10)\n\n rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')\n self.init_fingerCOM = (rightFinger + leftFinger)/2\n self.reachCompleted = False\n\n def compute_reward(self, actions, obs):\n del actions\n\n objPos = obs[3:6]\n\n rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')\n fingerCOM = (rightFinger + leftFinger)/2\n\n pullGoal = self._target_pos\n\n pullDist = np.linalg.norm(objPos - pullGoal)\n reachDist = np.linalg.norm(objPos - fingerCOM)\n reachRew = -reachDist\n\n self.reachCompleted = reachDist < 0.05\n\n def pullReward():\n c1 = 1000\n c2 = 0.01\n c3 = 0.001\n\n if self.reachCompleted:\n pullRew = 1000*(self.maxPullDist - pullDist) + c1*(np.exp(-(pullDist**2)/c2) + np.exp(-(pullDist**2)/c3))\n pullRew = max(pullRew,0)\n return pullRew\n else:\n return 0\n\n pullRew = pullReward()\n reward = reachRew + pullRew\n\n return [reward, reachDist, pullDist]\n",
"import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs.env_util import get_asset_full_path\nfrom metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set\n\n\nclass SawyerPushEnvV2(SawyerXYZEnv):\n \"\"\"\n Motivation for V2:\n V1 was very difficult to solve because the observation didn't say where\n to move after reaching the puck.\n Changelog from V1 to V2:\n - (7/7/20) Removed 3 element vector. Replaced with 3 element position\n of the goal (for consistency with other environments)\n - (6/15/20) Added a 3 element vector to the observation. This vector\n points from the end effector to the goal coordinate.\n i.e. (self._target_pos - pos_hand)\n - (6/15/20) Separated reach-push-pick-place into 3 separate envs.\n \"\"\"\n def __init__(self):\n lift_thresh = 0.04\n\n hand_low = (-0.5, 0.40, 0.05)\n hand_high = (0.5, 1, 0.5)\n obj_low = (-0.1, 0.6, 0.02)\n obj_high = (0.1, 0.7, 0.02)\n goal_low = (-0.1, 0.8, 0.01)\n goal_high = (0.1, 0.9, 0.02)\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.init_config = {\n 'obj_init_angle': .3,\n 'obj_init_pos': np.array([0., 0.6, 0.02]),\n 'hand_init_pos': np.array([0., 0.6, 0.2]),\n }\n\n self.goal = np.array([0.1, 0.8, 0.02])\n\n self.obj_init_angle = self.init_config['obj_init_angle']\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n self.liftThresh = lift_thresh\n self.max_path_length = 200\n\n self.action_space = Box(\n np.array([-1, -1, -1, -1]),\n np.array([+1, +1, +1, +1]),\n )\n\n self._random_reset_space = Box(\n np.hstack((obj_low, goal_low)),\n np.hstack((obj_high, goal_high)),\n )\n self.goal_space = Box(np.array(goal_low), np.array(goal_high))\n\n self.num_resets = 0\n\n @property\n def model_name(self):\n return get_asset_full_path('sawyer_xyz/sawyer_push_v2.xml')\n\n @_assert_task_is_set\n def step(self, action):\n ob = super().step(action)\n\n rew, reach_dist, push_dist = self.compute_reward(action, ob)\n success = float(push_dist <= 0.07)\n\n info = {\n 'reachDist': reach_dist,\n 'epRew': rew,\n 'goalDist': push_dist,\n 'success': success\n }\n\n self.curr_path_length += 1\n return ob, rew, False, info\n\n def _get_pos_objects(self):\n return self.get_body_com('obj')\n\n def fix_extreme_obj_pos(self, orig_init_pos):\n # This is to account for meshes for the geom and object are not\n # aligned. If this is not done, the object could be initialized in an\n # extreme position\n diff = self.get_body_com('obj')[:2] - \\\n self.get_body_com('obj')[:2]\n adjusted_pos = orig_init_pos[:2] + diff\n # The convention we follow is that body_com[2] is always 0,\n # and geom_pos[2] is the object height\n return [\n adjusted_pos[0],\n adjusted_pos[1],\n self.get_body_com('obj')[-1]\n ]\n\n def reset_model(self):\n self._reset_hand()\n self._target_pos = self.goal.copy()\n self.obj_init_pos = self.fix_extreme_obj_pos(self.init_config['obj_init_pos'])\n self.obj_init_angle = self.init_config['obj_init_angle']\n self.objHeight = self.get_body_com('obj')[2]\n self.heightTarget = self.objHeight + self.liftThresh\n\n if self.random_init:\n goal_pos = self._get_state_rand_vec()\n self._target_pos = goal_pos[3:]\n while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:\n goal_pos = self._get_state_rand_vec()\n self._target_pos = goal_pos[3:]\n self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))\n self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))\n\n self._set_obj_xyz(self.obj_init_pos)\n self.maxPushDist = np.linalg.norm(\n self.obj_init_pos[:2] - np.array(self._target_pos)[:2])\n self.target_reward = 1000*self.maxPushDist + 1000*2\n self.num_resets += 1\n\n return self._get_obs()\n\n def _reset_hand(self):\n super()._reset_hand()\n\n finger_right, finger_left = (\n self._get_site_pos('rightEndEffector'),\n self._get_site_pos('leftEndEffector')\n )\n self.init_finger_center = (finger_right + finger_left) / 2\n self.pickCompleted = False\n\n def compute_reward(self, actions, obs):\n pos_obj = obs[3:6]\n\n finger_right, finger_left = (\n self._get_site_pos('rightEndEffector'),\n self._get_site_pos('leftEndEffector')\n )\n finger_center = (finger_right + finger_left) / 2\n\n goal = self._target_pos\n assert np.all(goal == self._get_site_pos('goal'))\n\n c1 = 1000\n c2 = 0.01\n c3 = 0.001\n reach_dist = np.linalg.norm(finger_center - pos_obj)\n reach_rew = -reach_dist\n\n push_dist = np.linalg.norm(pos_obj[:2] - goal[:2])\n if reach_dist < 0.05:\n push_rew = c1 * (self.maxPushDist - push_dist) + \\\n c1 * (np.exp(-(push_dist ** 2) / c2) +\n np.exp(-(push_dist ** 2) / c3))\n push_rew = max(push_rew, 0)\n else:\n push_rew = 0\n\n reward = reach_rew + push_rew\n return [reward, reach_dist, push_dist]\n"
] | [
[
"numpy.exp",
"numpy.array",
"numpy.linalg.norm"
],
[
"numpy.hstack",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.exp",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
realspacekolle/pyScattData | [
"585376761ef380c1f006bc8a0d23adaed5e9258d"
] | [
"h5_extract_write_plot/h5_extract_write_plot.py"
] | [
"import sys\nfrom pathlib import Path\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nTWOTHETA_KEYS = [\"2th\", \"2theta\", \"twotheta\"]\nQ_KEYS = [\"q\"]\nINTENSITY_KEYS = [\"i\", \"intensity\", \"int\"]\nSTACK_INDICES_KEY = \"stack_indices\"\n\nDPI = 300\nFIGSIZE = (12,4)\nFONTSIZE_LABELS = 20\nFONTSIZE_TICKS = 14\nLINEWIDTH = 1\nCOLORS = dict(bg_blue='#0B3C5D', bg_red='#B82601', bg_green='#1c6b0a',\n bg_lightblue='#328CC1', bg_darkblue='#062F4F',\n bg_yellow='#D9B310', bg_darkred='#984B43', bg_bordeaux='#76323F',\n bg_olivegreen='#626E60', bg_yellowgrey='#AB987A',\n bg_brownorange='#C09F80')\nCOLOR = COLORS[\"bg_blue\"]\n\n\ndef h5_extract_to_dict(h5_file):\n f = h5py.File(h5_file, mode=\"r\")\n d = {}\n fkeys = list(f.keys())\n if \"entry\" in fkeys:\n fkeys = list(f[\"entry\"].keys())\n for k in fkeys:\n d[k.lower()] = np.array(f[k])\n\n return d\n\n\ndef dict_to_xy_write(d, fname):\n twotheta, q, intensity = None, None, None\n dkeys = d.keys()\n for k in TWOTHETA_KEYS:\n if k in dkeys:\n twotheta = d[k]\n for k in Q_KEYS:\n if k in dkeys:\n q = d[k]\n for k in INTENSITY_KEYS:\n if k in dkeys:\n intensity = d[k]\n if STACK_INDICES_KEY in dkeys:\n stack_indices = d[STACK_INDICES_KEY]\n if isinstance(twotheta, np.ndarray) and isinstance(intensity, np.ndarray):\n if intensity.ndim > 1:\n zfill = len(str(intensity.shape[0]))\n scans_index = intensity.shape[0]\n else:\n scans_index = 1\n for i in range(scans_index):\n if STACK_INDICES_KEY in dkeys:\n print(f\"\\t\\t\\t{stack_indices[i]}\")\n else:\n print(f\"\\t\\t\\t{i}\")\n if intensity.ndim > 1:\n x, y = twotheta, intensity[i,:]\n else:\n x, y = twotheta, intensity\n xy = np.column_stack((x,y))\n h = \"2theta\\tintensity\"\n if STACK_INDICES_KEY in dkeys:\n np.savetxt(f\"xy/{fname}_{stack_indices[i]}.xy\", xy,\n encoding=\"utf-8\", header=h)\n else:\n np.savetxt(f\"xy/{fname}_{str(i).zfill(zfill)}.xy\", xy,\n encoding=\"utf-8\", header=h)\n elif isinstance(q, np.ndarray) and isinstance(intensity, np.ndarray):\n if intensity.ndim > 1:\n zfill = len(str(intensity.shape[0]))\n scans_index = intensity.shape[0]\n else:\n scans_index = 1\n for i in range(scans_index):\n if STACK_INDICES_KEY in dkeys:\n print(f\"\\t\\t\\t{stack_indices[i]}\")\n else:\n print(f\"\\t\\t\\t{i}\")\n if intensity.ndim > 1:\n x, y = q, intensity[i,:]\n else:\n x, y = q, intensity\n xy = np.column_stack((x,y))\n h = \"q\\tintensity\"\n if STACK_INDICES_KEY in dkeys:\n np.savetxt(f\"xy/{fname}_{stack_indices[i]}.xy\", xy,\n encoding=\"utf-8\", header=h)\n else:\n np.savetxt(f\"xy/{fname}_{str(i).zfill(zfill)}.xy\", xy,\n encoding=\"utf-8\", header=h)\n\n return None\n\n\ndef dict_to_plot(d, fname):\n twotheta, q, intensity = None, None, None\n dkeys = d.keys()\n for k in TWOTHETA_KEYS:\n if k in dkeys:\n twotheta = d[k]\n for k in Q_KEYS:\n if k in dkeys:\n q = d[k]\n for k in INTENSITY_KEYS:\n if k in dkeys:\n intensity = d[k]\n if STACK_INDICES_KEY in dkeys:\n stack_indices = d[STACK_INDICES_KEY]\n if isinstance(twotheta, np.ndarray) and isinstance(intensity, np.ndarray):\n if intensity.ndim > 1:\n zfill = len(str(intensity.shape[0]))\n scans_index = intensity.shape[0]\n else:\n scans_index = 1\n for i in range(scans_index):\n if STACK_INDICES_KEY in dkeys:\n print(f\"\\t\\t\\t{stack_indices[i]}\")\n else:\n print(f\"\\t\\t\\t{i}\")\n if intensity.ndim > 1:\n x, y = twotheta, intensity[i,:]\n else:\n x, y = twotheta, intensity\n plt.figure(dpi=DPI, figsize=FIGSIZE)\n plt.plot(x, y, c=COLOR, lw=LINEWIDTH)\n plt.xlim(np.amin(x), np.amax(x))\n plt.xlabel(r\"$2\\theta$ $[\\degree]$\", fontsize=FONTSIZE_LABELS)\n plt.ylabel(r\"$I$ $[\\mathrm{arb. u.}]$\", fontsize=FONTSIZE_LABELS)\n plt.tick_params(axis='both', which='major',\n labelsize=FONTSIZE_LABELS)\n plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n if STACK_INDICES_KEY in dkeys:\n plt.savefig(f\"png/{fname}_{stack_indices[i]}.png\",\n bbox_inches=\"tight\")\n plt.savefig(f\"pdf/{fname}_{stack_indices[i]}.pdf\",\n bbox_inches=\"tight\")\n else:\n plt.savefig(f\"png/{fname}_{str(i).zfill(zfill)}.png\",\n bbox_inches=\"tight\")\n plt.savefig(f\"pdf/{fname}_{str(i).zfill(zfill)}.pdf\",\n bbox_inches=\"tight\")\n plt.close()\n if isinstance(q, np.ndarray) and isinstance(intensity, np.ndarray):\n if intensity.ndim > 1:\n zfill = len(str(intensity.shape[0]))\n scans_index = intensity.shape[0]\n else:\n scans_index = 1\n for i in range(scans_index):\n if STACK_INDICES_KEY in dkeys:\n print(f\"\\t\\t\\t{stack_indices[i]}\")\n else:\n print(f\"\\t\\t\\t{i}\")\n if intensity.ndim > 1:\n x, y = q, intensity[i,:]\n else:\n x, y = q, intensity\n plt.figure(dpi=DPI, figsize=FIGSIZE)\n plt.plot(x, y, c=COLOR, lw=LINEWIDTH)\n plt.xlim(np.amin(x), np.amax(x))\n if np.amax(q) > 40 :\n plt.xlabel(r\"$Q$ $[\\mathrm{nm}^{-1}]$\",\n fontsize=FONTSIZE_LABELS)\n else:\n plt.xlabel(r\"$Q$ $[\\mathrm{\\AA}^{-1}]$\",\n fontsize=FONTSIZE_LABELS)\n plt.ylabel(r\"$I$ $[\\mathrm{arb. u.}]$\", fontsize=FONTSIZE_LABELS)\n plt.tick_params(axis='both', which='major',\n labelsize=FONTSIZE_LABELS)\n plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n if STACK_INDICES_KEY in dkeys:\n plt.savefig(f\"png/{fname}_{stack_indices[i]}.png\",\n bbox_inches=\"tight\")\n plt.savefig(f\"pdf/{fname}_{stack_indices[i]}.pdf\",\n bbox_inches=\"tight\")\n else:\n plt.savefig(f\"png/{fname}_{str(i).zfill(zfill)}.png\",\n bbox_inches=\"tight\")\n plt.savefig(f\"pdf/{fname}_{str(i).zfill(zfill)}.pdf\",\n bbox_inches=\"tight\")\n plt.close()\n\n return None\n\n\ndef merge_dict(d):\n twotheta, q, intensity = None, None, None\n dkeys = d.keys()\n d_merged = {}\n for k in TWOTHETA_KEYS:\n if k in dkeys:\n twotheta = d[k]\n d_merged[k] = twotheta\n for k in Q_KEYS:\n if k in dkeys:\n q = d[k]\n d_merged[k] = q\n for k in INTENSITY_KEYS:\n if k in dkeys:\n intensity = d[k]\n intensity_key = k\n if isinstance(intensity, np.ndarray):\n zfill = len(str(intensity.shape[0]))\n number_of_scans = intensity.shape[0]\n scans_to_stack = int(input(\"\\t\\t\\tHow many scans should be stacked \"\n \"together?: \"))\n full_stacks = number_of_scans // scans_to_stack\n remainder_to_stack = number_of_scans % scans_to_stack\n stack_indices = []\n for i in range(full_stacks):\n stack = intensity[i*scans_to_stack, :]\n stack_indices_str = str(i*scans_to_stack).zfill(zfill)\n for j in range(1, scans_to_stack):\n stack += intensity[i*scans_to_stack+j, :]\n stack_indices.append(f\"{stack_indices_str}-\"\n f\"{str(i*scans_to_stack+j).zfill(zfill)}\")\n if i == 0:\n d_merged[intensity_key] = stack\n else:\n d_merged[intensity_key] = np.vstack((d_merged[intensity_key],\n stack))\n if remainder_to_stack != 0:\n stack = intensity[(full_stacks * scans_to_stack),:]\n stack_indices_str = str(full_stacks * scans_to_stack).zfill(zfill)\n for j in range(1, remainder_to_stack-1):\n stack = intensity[(full_stacks * scans_to_stack) + 1 + j,:]\n if remainder_to_stack == 1:\n stack_indices.append(f\"{stack_indices_str}\")\n else:\n last_scan = str((full_stacks*scans_to_stack)+1+j).zfill(zfill)\n stack_indices.append(f\"{stack_indices_str}-{last_scan}\")\n d_merged[intensity_key] = np.vstack((d_merged[intensity_key],\n stack))\n d_merged[STACK_INDICES_KEY] = stack_indices\n\n return d_merged\n\n\ndef main():\n h5_path = Path.cwd() / \"h5\"\n if not h5_path.exists():\n h5_path.mkdir()\n print(f\"{80*'-'}\\nA folder called 'h5' has been created. Please \"\n f\"place your .h5 files there and\\nrerun the code.\\n{80*'-'}\")\n sys.exit()\n h5_files = list(h5_path.glob(\"*.h5\"))\n if len(h5_files) == 0:\n print(f\"{80*'-'}\\nNo .h5 files were found in the 'h5' folder. Please \"\n f\"place your .h5 files there\\nand rerun the code.\\n{80*'-'}\")\n sys.exit()\n output_paths = [\"xy\", \"png\", \"pdf\"]\n for e in output_paths:\n p = Path.cwd() / e\n if not p.exists():\n p.mkdir()\n print(\"Working w. files...\")\n for h5_file in h5_files:\n try:\n print(f\"{80*'-'}\\n\\tFile: {h5_file.name}\")\n fname = h5_file.stem\n d = h5_extract_to_dict(h5_file)\n for k in INTENSITY_KEYS:\n if k in d.keys():\n print(f\"\\t\\tNumber of scans: {d[k].shape[0]}\")\n mergereq = input(\"\\t\\tDo you want to merge any of the scans? \"\n \"(y/n): \")\n while mergereq not in [\"y\", \"n\"]:\n mergereq = input(\"\\t\\tDo you want to merge any of the scans? \"\n \"(y/n): \")\n if mergereq == \"y\":\n writereq = input(\"\\t\\tDo you want to write .xy files for all \"\n \"merged scans? (y/n): \")\n while writereq not in [\"y\", \"n\"]:\n writereq = input(\"\\t\\tDo you want to write .xy files for \"\n \"all merged scans? (y/n): \")\n else:\n writereq = input(\"\\t\\tDo you want to write .xy files for all \"\n \"scans? (y/n): \")\n while writereq not in [\"y\", \"n\"]:\n writereq = input(\"\\t\\tDo you want to write .xy files for \"\n \"merged scans? (y/n): \")\n if mergereq == \"y\":\n plotreq = input(\"\\t\\tDo you want to plot all merged scans? \"\n \"(y/n): \")\n while plotreq not in [\"y\", \"n\"]:\n plotreq = input(\"\\t\\tDo you want to plot all merged scans? \"\n \"(y/n): \")\n else:\n plotreq = input(\"\\t\\tDo you want to plot all scans? (y/n): \")\n while plotreq not in [\"y\", \"n\"]:\n plotreq = input(\"\\t\\tDo you want to plot all scans? \"\n \"(y/n): \")\n if mergereq.lower() == \"y\":\n d_merged = merge_dict(d)\n if writereq == \"y\":\n print(\"\\t\\tWriting to two-column files of merged scans...\")\n dict_to_xy_write(d_merged, fname)\n print(\"\\t\\tPlotting merged scans...\")\n if plotreq == \"y\":\n dict_to_plot(d_merged, fname)\n else:\n if writereq == \"y\":\n print(\"\\t\\tWriting to two-column files for each scan...\")\n dict_to_xy_write(d, fname)\n if plotreq == \"y\":\n print(\"\\t\\tPlotting each scan...\")\n dict_to_plot(d, fname)\n except KeyError:\n print(f\"\\t\\tThis file seems to contain non-integrated data. File \"\n \"skipped.\")\n print(f\"{80*'-'}\\nDone working w. files.\\n{80*'-'}\")\n\n return None\n\n\nif __name__ == \"__main__\":\n main()\n\n# End of file.\n"
] | [
[
"numpy.amax",
"numpy.amin",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ticklabel_format",
"matplotlib.pyplot.close",
"numpy.column_stack",
"numpy.savetxt",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.tick_params",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ilopata1/matplotlib-scalebar | [
"7e0d7b668021f01501b47b6eeecc1e8808d81c29",
"7e0d7b668021f01501b47b6eeecc1e8808d81c29",
"7e0d7b668021f01501b47b6eeecc1e8808d81c29"
] | [
"doc/example_dimension.py",
"doc/nomenclature.py",
"doc/example1.py"
] | [
"import matplotlib.pyplot as plt\nfrom matplotlib_scalebar.scalebar import ScaleBar\nfrom matplotlib_scalebar.dimension import _Dimension, _PREFIXES_FACTORS, _LATEX_MU\n\n\nclass TimeDimension(_Dimension):\n def __init__(self):\n super().__init__(\"s\")\n for prefix, factor in _PREFIXES_FACTORS.items():\n latexrepr = None\n if prefix == \"\\u00b5\" or prefix == \"u\":\n latexrepr = _LATEX_MU + \"s\"\n self.add_units(prefix + \"s\", factor, latexrepr)\n\n\nplt.figure()\nplt.gca().add_artist(\n ScaleBar(5, units=\"ms\", dimension=TimeDimension(), location=\"lower right\")\n)\n\nplt.savefig(\"example_dimension.png\")\n",
"import matplotlib.pyplot as plt\nfrom matplotlib.patches import FancyArrowPatch\nfrom matplotlib_scalebar.scalebar import ScaleBar\n\n\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 4))\n\nfor ax in [ax1, ax2, ax3]:\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n scalebar = ScaleBar(\n 1,\n \"cm\",\n width_fraction=0.05,\n location=\"center left\",\n label=\"label\",\n box_color=\"0.8\",\n pad=5,\n border_pad=2,\n font_properties={\"size\": \"xx-large\"},\n fixed_value=2,\n fixed_units=\"mm\",\n )\n ax.add_artist(scalebar)\n\n# Names\nax1.annotate(\n \"label\",\n (0.4, 0.6),\n (0.65, 0.65),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3,rad=0.1\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\nax1.annotate(\n \"scale bar\",\n (0.42, 0.5),\n (0.65, 0.5),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3,rad=0.0\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\nax1.annotate(\n \"scale\",\n (0.4, 0.42),\n (0.65, 0.35),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3,rad=-0.1\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\nax1.annotate(\n \"box\",\n (0.3, 0.75),\n (0.4, 0.9),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3,rad=0.1\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\n\n# Fractions\npatch = FancyArrowPatch(\n (0.19, 0.3), (0.405, 0.3), arrowstyle=\"|-|,widthA=6,widthB=6\", zorder=7, lw=2\n)\nax2.add_patch(patch)\nax2.annotate(\n \"length\",\n (0.3, 0.25),\n (0.5, 0.05),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3,rad=-0.3\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\n\npatch = FancyArrowPatch(\n (0.45, 0.46), (0.45, 0.54), arrowstyle=\"|-|,widthA=6,widthB=6\", zorder=7, lw=2\n)\nax2.add_patch(patch)\nax2.annotate(\n \"width\",\n (0.5, 0.5),\n (0.65, 0.5),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\n\n# Pad\npatch = FancyArrowPatch(\n (0.45, 0.44), (0.45, 0.485), arrowstyle=\"|-|,widthA=6,widthB=6\", zorder=7, lw=2\n)\nax3.add_patch(patch)\npatch = FancyArrowPatch(\n (0.45, 0.52), (0.45, 0.565), arrowstyle=\"|-|,widthA=6,widthB=6\", zorder=7, lw=2\n)\nax3.add_patch(patch)\nax3.annotate(\n \"sep\",\n (0.47, 0.465),\n (0.65, 0.5),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3,rad=-0.1\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\nax3.annotate(\n \"sep\",\n (0.47, 0.545),\n (0.65, 0.5),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3,rad=0.1\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\n\npatch = FancyArrowPatch(\n (0.0, 0.1), (0.065, 0.1), arrowstyle=\"|-|,widthA=6,widthB=6\", zorder=7, lw=2\n)\nax3.add_patch(patch)\nax3.annotate(\n \"border pad\",\n (0.08, 0.1),\n (0.2, 0.05),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\n\npatch = FancyArrowPatch(\n (0.065, 0.9), (0.19, 0.9), arrowstyle=\"|-|,widthA=6,widthB=6\", zorder=7, lw=2\n)\nax3.add_patch(patch)\nax3.annotate(\n \"pad\",\n (0.22, 0.9),\n (0.35, 0.9),\n arrowprops={\"arrowstyle\": \"->\", \"connectionstyle\": \"arc3\", \"lw\": 3},\n fontsize=20,\n zorder=7,\n)\n\nplt.tight_layout(pad=2)\n\nfig.savefig(\"nomenclature.png\", dpi=60)\n",
"import matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\nfrom matplotlib_scalebar.scalebar import ScaleBar\n\nplt.figure()\nimage = plt.imread(cbook.get_sample_data(\"grace_hopper.png\"))\nplt.imshow(image)\nscalebar = ScaleBar(0.2) # 1 pixel = 0.2 meter\nplt.gca().add_artist(scalebar)\nplt.savefig(\"example1.png\")\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.patches.FancyArrowPatch",
"matplotlib.pyplot.subplots"
],
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"matplotlib.cbook.get_sample_data",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
whigg/BeringSeaIce2018 | [
"b5404cfbb51cb6f893be78d53b94de8092b25a7b"
] | [
"Scripts/BeringSeaIce_NSIDC_Feb.py"
] | [
"\"\"\"\nScript calculates sea ice extent in the Bering Sea from SIC fields\nNotes\n-----\n Author : Zachary Labe\n Date : 12 March 2018\n\"\"\"\n\n### Import modules\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nimport datetime\nimport statsmodels.api as sm\n\n### Define directories\ndirectorydata2 = '/home/zlabe/Documents/Projects/BeringSeaIce2018/Data/'\ndirectoryfigure = '/home/zlabe/Documents/Projects/BeringSeaIce2018/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Bering SIE - %s----' % titletime)\n\n### Define years\nyears = np.arange(1850,2018+1,1)\nyearsat = np.arange(1979,2018+1,1)\n\n### Retrieve data from NSIDC regional extent in Bering Sea\nberingoldf = np.genfromtxt(directorydata2 +'BeringSeaIce_NSIDC_Feb.txt')\nberingf = beringoldf/1e6\n\nberingoldd = np.genfromtxt(directorydata2 +'BeringSeaIce_NSIDC_Dec.txt')\nberingd = beringoldd/1e6\n\nberingoldj = np.genfromtxt(directorydata2 +'BeringSeaIce_NSIDC_Jan.txt')\nberingj = beringoldj/1e6\n\n#beringoldn = np.genfromtxt(directorydata2 +'BeringSeaIce_NSIDC_Nov.txt')\n#beringn = beringoldn/1e6\n\nbering = (beringd + beringj + beringf)/3.\n#bering = (beringn + beringd + beringj + beringf)/4.\n#bering = (beringj + beringf)/2.\n\nprint('Completed: Data read!')\n\n### Calculate loess \nsmoothed = sm.nonparametric.lowess(bering,np.arange(yearsat.shape[0]))\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Plot figures\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \nplt.rc('savefig',facecolor='black')\nplt.rc('axes',edgecolor='darkgrey')\nplt.rc('xtick',color='white')\nplt.rc('ytick',color='white')\nplt.rc('axes',labelcolor='white')\nplt.rc('axes',facecolor='black')\n\nfig = plt.figure()\nax = plt.subplot(111)\n\n### Adjust axes in time series plots \ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n \nax.tick_params('both',length=5.5,width=2,which='major',color='darkgrey') \nadjust_spines(ax, ['left','bottom']) \nax.spines['top'].set_color('none')\nax.spines['right'].set_color('none') \nax.spines['bottom'].set_linewidth(2)\nax.spines['left'].set_linewidth(2) \n\nplt.plot(yearsat,bering,linewidth=3.5,color='orangered',\n marker='o',markersize=6,\n label=r'\\textbf{NSIDC Sea Ice Index, Version 3}')\nplt.scatter(yearsat[-1],bering[-1],s=45,color='r',zorder=3)\nplt.text(2012.5,0.1823,r'\\textbf{2018}',color='r',fontsize=15)\n\nplt.plot(np.arange(1987,1990,2),np.array([bering[8],bering[10]]),linewidth=1.7,color='orangered',\n label=r'Missing Data',linestyle='--',\n dashes=(1, 0.4))\nxlabels = list(map(str,np.arange(1979,2021,5)))\nplt.xticks(np.arange(1979,2021,5),xlabels,rotation=0,color='darkgrey')\nplt.xlim([1979,2019])\n\nplt.yticks(np.arange(0,2.5,0.1),list(map(str,np.arange(0,2.5,0.1))),\n color='darkgrey')\nplt.ylim([0.1,0.8])\n\nax.yaxis.grid(zorder=1,color='w',alpha=0.35,linewidth=0.5)\n\nplt.title(r'\\textbf{DEC-FEB : \\underline{BERING} SEA ICE}',\n fontsize=26,color='darkgrey') \nplt.ylabel(r'\\textbf{Extent [$\\bf{\\times 10^{6}}$\\ \\textbf{km}$\\bf{^2}$]}',\n fontsize=17,alpha=1,color='darkgrey',rotation=90) \n\nle = plt.legend(shadow=False,fontsize=8,loc='upper center',\n bbox_to_anchor=(0.212, 0.13),fancybox=True,frameon=False,ncol=1)\nfor text in le.get_texts():\n text.set_color('darkgrey') \n\nplt.savefig(directoryfigure + 'Bering_SeaIceExtent_DecJanFeb.png',dpi=600)\n\nprint('Completed: Figure plotted!')\nprint('Completed: Script done!')"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.text",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mvaz/textacy | [
"760b96a561eb3379b3a211a0353c9bc47127e99c"
] | [
"textacy/corpus.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nA class for working with a collection of spaCy docs. Includes functionality for\neasily adding, getting, and removing documents; saving to / loading their data\nfrom disk; and tracking basic corpus statistics.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport collections\nimport itertools\nimport logging\nimport math\n\nimport numpy as np\nimport spacy\nimport srsly\nfrom cytoolz import itertoolz\nfrom thinc.neural.ops import NumpyOps\n\nfrom . import cache\nfrom . import compat\nfrom . import io as tio\nfrom . import utils\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Corpus(object):\n \"\"\"\n An ordered collection of :class:`spacy.tokens.Doc`, all of the same language\n and sharing the same :class:`spacy.language.Language` processing pipeline\n and vocabulary, with data held *in-memory*.\n\n Initialize from a language / ``Language`` and (optionally) one or a stream\n of texts or (text, metadata) pairs:\n\n .. code-block:: pycon\n\n >>> ds = textacy.datasets.CapitolWords()\n >>> records = ds.records(limit=50)\n >>> corpus = textacy.Corpus(\"en\", data=records)\n >>> corpus\n Corpus(50 docs, 32175 tokens)\n\n Add or remove documents, with automatic updating of corpus statistics:\n\n .. code-block:: pycon\n\n >>> texts = ds.texts(congress=114, limit=25)\n >>> corpus.add(texts)\n >>> corpus.add(\"If Burton were a member of Congress, here's what he'd say.\")\n >>> corpus\n Corpus(76 docs, 55906 tokens)\n >>> corpus.remove(lambda doc: doc._.meta.get(\"speaker_name\") == \"Rick Santorum\")\n >>> corpus\n Corpus(61 docs, 48567 tokens)\n\n Get subsets of documents matching your particular use case:\n\n .. code-block:: pycon\n\n >>> match_func = lambda doc: doc._.meta.get(\"speaker_name\") == \"Bernie Sanders\"\n >>> for doc in corpus.get(match_func, limit=3):\n ... print(doc._.preview)\n Doc(159 tokens: \"Mr. Speaker, 480,000 Federal employees are work...\")\n Doc(336 tokens: \"Mr. Speaker, I thank the gentleman for yielding...\")\n Doc(177 tokens: \"Mr. Speaker, if we want to understand why in th...\")\n\n Get or remove documents by indexing, too:\n\n .. code-block:: pycon\n\n >>> corpus[0]._.preview\n 'Doc(159 tokens: \"Mr. Speaker, 480,000 Federal employees are work...\")'\n >>> [doc._.preview for doc in corpus[:3]]\n ['Doc(159 tokens: \"Mr. Speaker, 480,000 Federal employees are work...\")',\n 'Doc(219 tokens: \"Mr. Speaker, a relationship, to work and surviv...\")',\n 'Doc(336 tokens: \"Mr. Speaker, I thank the gentleman for yielding...\")']\n >>> del corpus[:5]\n >>> corpus\n Corpus(56 docs, 41573 tokens)\n\n Compute basic corpus statistics:\n\n .. code-block:: pycon\n\n >>> corpus.n_docs, corpus.n_sents, corpus.n_tokens\n (56, 1771, 41573)\n >>> word_counts = corpus.word_counts(as_strings=True)\n >>> sorted(word_counts.items(), key=lambda x: x[1], reverse=True)[:5]\n [('-PRON-', 2553), ('people', 215), ('year', 148), ('Mr.', 139), ('$', 137)]\n >>> word_doc_counts = corpus.word_doc_counts(weighting=\"freq\", as_strings=True)\n >>> sorted(word_doc_counts.items(), key=lambda x: x[1], reverse=True)[:5]\n [('-PRON-', 0.9821428571428571),\n ('Mr.', 0.7678571428571429),\n ('President', 0.5),\n ('people', 0.48214285714285715),\n ('need', 0.44642857142857145)]\n\n Save corpus data to and load from disk:\n\n .. code-block:: pycon\n\n >>> corpus.save(\"~/Desktop/capitol_words_sample.bin.gz\")\n >>> corpus = textacy.Corpus.load(\"en\", \"~/Desktop/capitol_words_sample.bin.gz\")\n >>> corpus\n Corpus(56 docs, 41573 tokens)\n\n Args:\n lang (str or :class:`spacy.language.Language`):\n Language with which spaCy processes (or processed) all documents\n added to the corpus, whether as ``data`` now or later.\n\n Pass a standard 2-letter language code (e.g. \"en\"),\n or the name of a spacy language pipeline (e.g. \"en_core_web_md\"),\n or an already-instantiated :class:`spacy.language.Language` object.\n\n A given / detected language string is then used to instantiate\n a corresponding ``Language`` with all default components enabled.\n data (obj or Iterable[obj]): One or a stream of texts, records,\n or :class:`spacy.tokens.Doc` s to be added to the corpus.\n\n .. seealso:: :meth:`Corpus.add()`\n\n Attributes:\n lang (str)\n spacy_lang (:class:`spacy.language.Language`)\n docs (List[:class:`spacy.tokens.Doc`])\n n_docs (int)\n n_sents (int)\n n_tokens (int)\n \"\"\"\n\n def __init__(self, lang, data=None):\n self.spacy_lang = _get_spacy_lang(lang)\n self.lang = self.spacy_lang.lang\n self.docs = []\n self._doc_ids = []\n self.n_docs = 0\n self.n_sents = 0\n self.n_tokens = 0\n if data:\n self.add(data)\n\n # dunder\n\n def __repr__(self):\n return \"Corpus({} docs, {} tokens)\".format(self.n_docs, self.n_tokens)\n\n def __len__(self):\n return self.n_docs\n\n def __iter__(self):\n for doc in self.docs:\n yield doc\n\n def __contains__(self, doc):\n return id(doc) in self._doc_ids\n\n def __getitem__(self, idx_or_slice):\n return self.docs[idx_or_slice]\n\n def __delitem__(self, idx_or_slice):\n if isinstance(idx_or_slice, int):\n self._remove_one_doc_by_index(idx_or_slice)\n elif isinstance(idx_or_slice, slice):\n start, end, step = idx_or_slice.indices(self.n_docs)\n idxs = compat.range_(start, end, step)\n self._remove_many_docs_by_index(idxs)\n else:\n raise TypeError(\n \"list indices must be integers or slices, not {}\".format(type(idx_or_slice))\n )\n\n # add documents\n\n def add(self, data, batch_size=1000):\n \"\"\"\n Add one or a stream of texts, records, or :class:`spacy.tokens.Doc` s\n to the corpus, ensuring that all processing is or has already been done\n by the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n data (obj or Iterable[obj]):\n str or Iterable[str]\n Tuple[str, dict] or Iterable[Tuple[str, dict]]\n :class:`spacy.tokens.Doc` or Iterable[:class:`spacy.tokens.Doc`]\n batch_size (int)\n\n See Also:\n * :meth:`Corpus.add_text()`\n * :meth:`Corpus.add_texts()`\n * :meth:`Corpus.add_record()`\n * :meth:`Corpus.add_records()`\n * :meth:`Corpus.add_doc()`\n * :meth:`Corpus.add_docs()`\n \"\"\"\n if isinstance(data, compat.unicode_):\n self.add_text(data)\n elif isinstance(data, spacy.tokens.Doc):\n self.add_doc(data)\n elif utils.is_record(data):\n self.add_record(data)\n elif isinstance(data, compat.Iterable):\n first, data = itertoolz.peek(data)\n if isinstance(first, compat.unicode_):\n self.add_texts(data, batch_size=batch_size)\n elif isinstance(first, spacy.tokens.Doc):\n self.add_docs(data)\n elif utils.is_record(first):\n self.add_records(data, batch_size=batch_size)\n else:\n raise TypeError(\n \"data must be one of {} or an interable thereof, not {}\".format(\n {compat.unicode_, spacy.tokens.Doc, tuple},\n type(data),\n )\n )\n else:\n raise TypeError(\n \"data must be one of {} or an interable thereof, not {}\".format(\n {compat.unicode_, spacy.tokens.Doc, tuple},\n type(data),\n )\n )\n\n def add_text(self, text):\n \"\"\"\n Add one text to the corpus, processing it into a :class:`spacy.tokens.Doc`\n using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n text (str)\n \"\"\"\n self._add_valid_doc(self.spacy_lang(text))\n\n def add_texts(self, texts, batch_size=1000):\n \"\"\"\n Add a stream of texts to the corpus, efficiently processing them into\n :class:`spacy.tokens.Doc` s using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n texts (Iterable[str])\n batch_size (int)\n \"\"\"\n for doc in self.spacy_lang.pipe(texts, as_tuples=False, batch_size=batch_size):\n self._add_valid_doc(doc)\n\n def add_record(self, record):\n \"\"\"\n Add one record to the corpus, processing it into a :class:`spacy.tokens.Doc`\n using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n record (Tuple[str, dict])\n \"\"\"\n doc = self.spacy_lang(record[0])\n doc._.meta = record[1]\n self._add_valid_doc(doc)\n\n def add_records(self, records, batch_size=1000):\n \"\"\"\n Add a stream of records to the corpus, efficiently processing them into\n :class:`spacy.tokens.Doc` s using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n records (Iterable[Tuple[str, dict]])\n batch_size (int)\n \"\"\"\n for doc, meta in self.spacy_lang.pipe(records, as_tuples=True, batch_size=batch_size):\n doc._.meta = meta\n self._add_valid_doc(doc)\n\n def add_doc(self, doc):\n \"\"\"\n Add one :class:`spacy.tokens.Doc` to the corpus, provided it was processed\n using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n doc (:class:`spacy.tokens.Doc`)\n \"\"\"\n if not isinstance(doc, spacy.tokens.Doc):\n raise TypeError(\n \"doc must be a {}, not {}\".format(spacy.tokens.Doc, type(doc))\n )\n if doc.vocab is not self.spacy_lang.vocab:\n raise ValueError(\n \"doc.vocab ({}) must be the same as corpus.vocab ({})\".format(\n doc.vocab, self.spacy_lang.vocab,\n )\n )\n self._add_valid_doc(doc)\n\n def add_docs(self, docs):\n \"\"\"\n Add a stream of :class:`spacy.tokens.Doc` s to the corpus, provided\n they were processed using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n doc (Iterable[:class:`spacy.tokens.Doc`])\n \"\"\"\n for doc in docs:\n self.add_doc(doc)\n\n def _add_valid_doc(self, doc):\n self.docs.append(doc)\n self._doc_ids.append(id(doc))\n self.n_docs += 1\n self.n_tokens += len(doc)\n if doc.is_sentenced:\n self.n_sents += itertoolz.count(doc.sents)\n\n # get documents\n\n def get(self, match_func, limit=None):\n \"\"\"\n Get all (or N <= ``limit``) docs in :class:`Corpus` for which\n ``match_func(doc)`` is True.\n\n Args:\n match_func (Callable): Function that takes a :class:`spacy.tokens.Doc`\n as input and returns a boolean value. For example::\n\n Corpus.get(lambda x: len(x) >= 100)\n\n gets all docs with at least 100 tokens. And::\n\n Corpus.get(lambda doc: doc._.meta[\"author\"] == \"Burton DeWilde\")\n\n gets all docs whose author was given as 'Burton DeWilde'.\n limit (int): Maximum number of matched docs to return.\n\n Yields:\n :class:`spacy.tokens.Doc`: Next document passing ``match_func``.\n\n .. tip:: To get doc(s) by index, treat :class:`Corpus` as a list and use\n Python's usual indexing and slicing: ``Corpus[0]`` gets the first\n document in the corpus; ``Corpus[:5]`` gets the first 5; etc.\n \"\"\"\n matched_docs = (doc for doc in self if match_func(doc) is True)\n for doc in itertools.islice(matched_docs, limit):\n yield doc\n\n # remove documents\n\n def remove(self, match_func, limit=None):\n \"\"\"\n Remove all (or N <= ``limit``) docs in :class:`Corpus` for which\n ``match_func(doc)`` is True. Corpus doc/sent/token counts are adjusted\n accordingly.\n\n Args:\n match_func (func): Function that takes a :class:`spacy.tokens.Doc`\n and returns a boolean value. For example::\n\n Corpus.remove(lambda x: len(x) >= 100)\n\n removes docs with at least 100 tokens. And::\n\n Corpus.remove(lambda doc: doc._.meta[\"author\"] == \"Burton DeWilde\")\n\n removes docs whose author was given as \"Burton DeWilde\".\n limit (int): Maximum number of matched docs to remove.\n\n .. tip:: To remove doc(s) by index, treat :class:`Corpus` as a list and use\n Python's usual indexing and slicing: ``del Corpus[0]`` removes the\n first document in the corpus; ``del Corpus[:5]`` removes the first\n 5; etc.\n \"\"\"\n matched_docs = (doc for doc in self if match_func(doc) is True)\n self._remove_many_docs_by_index(\n self._doc_ids.index(id(doc))\n for doc in itertools.islice(matched_docs, limit)\n )\n\n def _remove_many_docs_by_index(self, idxs):\n for idx in sorted(idxs, reverse=True):\n self._remove_one_doc_by_index(idx)\n\n def _remove_one_doc_by_index(self, idx):\n doc = self.docs[idx]\n self.n_docs -= 1\n self.n_tokens -= len(doc)\n if doc.is_sentenced:\n self.n_sents -= itertoolz.count(doc.sents)\n del self.docs[idx]\n del self._doc_ids[idx]\n\n # useful properties\n\n @property\n def vectors(self):\n \"\"\"Constituent docs' word vectors stacked in a 2d array.\"\"\"\n return np.vstack((doc.vector for doc in self))\n\n @property\n def vector_norms(self):\n \"\"\"Constituent docs' L2-normalized word vectors stacked in a 2d array.\"\"\"\n return np.vstack((doc.vector_norm for doc in self))\n\n # useful methods\n\n def word_counts(self, normalize=\"lemma\", weighting=\"count\", as_strings=False):\n \"\"\"\n Map the set of unique words in :class:`Corpus` to their counts as\n absolute, relative, or binary frequencies of occurence, similar to\n :meth:`Doc._.to_bag_of_words() <textacy.spacier.doc_extensions.to_bag_of_words>`\n but aggregated over all docs.\n\n Args:\n normalize (str): If \"lemma\", lemmatize words before counting; if\n \"lower\", lowercase words before counting; otherwise, words are\n counted using the form with which they appear.\n weighting ({\"count\", \"freq\"}): Type of weight to assign to words.\n If \"count\" (default), weights are the absolute number of\n occurrences (count) of word in corpus.\n If \"freq\", word counts are normalized by the total token count,\n giving their relative frequencies of occurrence.\n\n .. note:: The resulting set of frequencies won't (necessarily) sum\n to 1.0, since punctuation and stop words are filtered out after\n counts are normalized.\n\n as_strings (bool): If True, words are returned as strings; if False\n (default), words are returned as their unique integer ids.\n\n Returns:\n dict: mapping of a unique word id or string (depending on the value\n of ``as_strings``) to its absolute, relative, or binary frequency\n of occurrence (depending on the value of ``weighting``).\n\n See Also:\n * :func:`textacy.vsm.get_term_freqs() <textacy.vsm.matrix_utils.get_term_freqs>`\n \"\"\"\n word_counts_ = collections.Counter()\n for doc in self:\n word_counts_.update(\n doc._.to_bag_of_words(\n normalize=normalize, weighting=\"count\", as_strings=as_strings\n )\n )\n if weighting == \"count\":\n word_counts_ = dict(word_counts_)\n elif weighting == \"freq\":\n n_tokens = self.n_tokens\n word_counts_ = {\n word: count / n_tokens for word, count in word_counts_.items()\n }\n else:\n raise ValueError(\n \"weighting='{}' is invalid; valid values are {}\".format(\n weighting, {\"count\", \"freq\"}\n )\n )\n return word_counts_\n\n def word_doc_counts(\n self, normalize=\"lemma\", weighting=\"count\", smooth_idf=True, as_strings=False\n ):\n \"\"\"\n Map the set of unique words in :class:`Corpus` to their *document* counts\n as absolute, relative, inverse, or binary frequencies of occurence.\n\n Args:\n normalize (str): If \"lemma\", lemmatize words before counting; if\n \"lower\", lowercase words before counting; otherwise, words are\n counted using the form with which they appear.\n weighting ({\"count\", \"freq\", \"idf\"}): Type of weight to assign to words.\n If \"count\" (default), weights are the absolute number (count)\n of documents in which word appears. If \"freq\", word doc counts\n are normalized by the total document count, giving their relative\n frequencies of occurrence. If \"idf\", weights are the log of the\n inverse relative frequencies: ``log(n_docs / word_doc_count)``\n or (if ``smooth_idf`` is True) ``log(1 + (n_docs / word_doc_count))`` .\n smooth_idf (bool): If True, add 1 to all word doc counts when\n calculating \"idf\" weighting, equivalent to adding a single\n document to the corpus containing every unique word.\n as_strings (bool): If True, words are returned as strings; if False\n (default), words are returned as their unique integer ids\n\n Returns:\n dict: mapping of a unique word id or string (depending on the value\n of ``as_strings``) to the number of documents in which it appears\n weighted as absolute, relative, or binary frequencies (depending\n on the value of ``weighting``).\n\n See Also:\n * :func:`textacy.vsm.get_doc_freqs() <textacy.vsm.matrix_utils.get_doc_freqs>`\n \"\"\"\n word_doc_counts_ = collections.Counter()\n for doc in self:\n word_doc_counts_.update(\n doc._.to_bag_of_words(\n normalize=normalize, weighting=\"binary\", as_strings=as_strings\n )\n )\n if weighting == \"count\":\n word_doc_counts_ = dict(word_doc_counts_)\n elif weighting == \"freq\":\n n_docs = self.n_docs\n word_doc_counts_ = {\n word: count / n_docs for word, count in word_doc_counts_.items()\n }\n elif weighting == \"idf\":\n n_docs = self.n_docs\n if smooth_idf is True:\n word_doc_counts_ = {\n word: math.log(1 + (n_docs / count))\n for word, count in word_doc_counts_.items()\n }\n else:\n word_doc_counts_ = {\n word: math.log(n_docs / count)\n for word, count in word_doc_counts_.items()\n }\n else:\n raise ValueError(\n \"weighting='{}' is invalid; valid values are {}\".format(\n weighting, {\"count\", \"freq\", \"idf\"}\n )\n )\n return word_doc_counts_\n\n # file io\n\n def save(self, filepath):\n \"\"\"\n Save :class:`Corpus` to disk as binary data.\n\n Args:\n filepath (str): Full path to file on disk where :class:`Corpus` data\n will be saved as a binary file.\n\n See Also:\n :meth:`Corpus.load()`\n \"\"\"\n attrs = [\n spacy.attrs.ORTH,\n spacy.attrs.SPACY,\n spacy.attrs.LEMMA,\n spacy.attrs.ENT_IOB,\n spacy.attrs.ENT_TYPE,\n ]\n if self[0].is_tagged:\n attrs.append(spacy.attrs.TAG)\n if self[0].is_parsed:\n attrs.append(spacy.attrs.HEAD)\n attrs.append(spacy.attrs.DEP)\n else:\n attrs.append(spacy.attrs.SENT_START)\n\n tokens = []\n lengths = []\n strings = set()\n user_datas = []\n for doc in self:\n tokens.append(doc.to_array(attrs))\n lengths.append(len(doc))\n strings.update(tok.text for tok in doc)\n user_datas.append(doc.user_data)\n\n msg = {\n \"meta\": self.spacy_lang.meta,\n \"attrs\": attrs,\n \"tokens\": np.vstack(tokens).tobytes(\"C\"),\n \"lengths\": np.asarray(lengths, dtype=\"int32\").tobytes(\"C\"),\n \"strings\": list(strings),\n \"user_datas\": user_datas,\n }\n with tio.open_sesame(filepath, mode=\"wb\") as f:\n f.write(srsly.msgpack_dumps(msg))\n\n @classmethod\n def load(cls, lang, filepath):\n \"\"\"\n Load previously saved :class:`Corpus` binary data, reproduce the original\n `:class:`spacy.tokens.Doc`s tokens and annotations, and instantiate\n a new :class:`Corpus` from them.\n\n Args:\n lang (str or :class:`spacy.language.Language`)\n filepath (str): Full path to file on disk where :class:`Corpus` data\n was previously saved as a binary file.\n\n Returns:\n :class:`Corpus`\n\n See Also:\n :meth:`Corpus.save()`\n \"\"\"\n spacy_lang = _get_spacy_lang(lang)\n with tio.open_sesame(filepath, mode=\"rb\") as f:\n msg = srsly.msgpack_loads(f.read())\n if spacy_lang.meta != msg[\"meta\"]:\n LOGGER.warning(\"the spacy langs are different!\")\n for string in msg[\"strings\"]:\n spacy_lang.vocab[string]\n attrs = msg[\"attrs\"]\n lengths = np.frombuffer(msg[\"lengths\"], dtype=\"int32\")\n flat_tokens = np.frombuffer(msg[\"tokens\"], dtype=\"uint64\")\n flat_tokens = flat_tokens.reshape(\n (flat_tokens.size // len(attrs), len(attrs))\n )\n tokens = np.asarray(NumpyOps().unflatten(flat_tokens, lengths))\n user_datas = msg[\"user_datas\"]\n\n def _make_spacy_docs(tokens, user_datas):\n for toks, user_data in compat.zip_(tokens, user_datas):\n doc = spacy.tokens.Doc(\n spacy_lang.vocab,\n words=[spacy_lang.vocab.strings[orth] for orth in toks[:, 0]],\n spaces=np.ndarray.tolist(toks[:, 1]),\n )\n doc = doc.from_array(attrs[2:], toks[:, 2:])\n doc.user_data = user_data\n yield doc\n\n return cls(spacy_lang, data=_make_spacy_docs(tokens, user_datas))\n\n\ndef _get_spacy_lang(lang):\n if isinstance(lang, compat.unicode_):\n return cache.load_spacy_lang(lang)\n elif isinstance(lang, spacy.language.Language):\n return lang\n else:\n raise TypeError(\n \"`lang` must be {}, not {}\".format(\n {compat.unicode_, spacy.language.Language}, type(lang)\n )\n )\n"
] | [
[
"numpy.asarray",
"numpy.frombuffer",
"numpy.ndarray.tolist",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TomKingsfordUoA/ResidualMaskingNetwork | [
"b77abb6e548b9a09b5c96b1592d71332b45d050e"
] | [
"rmn/models/residual_attention_network.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.autograd import Variable\nimport numpy as np\nfrom .basic_layers import ResidualBlock\nfrom .attention_module import AttentionModule\n\n\nclass ResidualAttentionModel(nn.Module):\n def __init__(self, in_channels=3, num_classes=1000):\n super(ResidualAttentionModel, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n )\n self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.residual_block1 = ResidualBlock(64, 256)\n self.attention_module1 = AttentionModule(256, 256, (56, 56), (28, 28), (14, 14))\n self.residual_block2 = ResidualBlock(256, 512, 2)\n self.attention_module2 = AttentionModule(512, 512, (28, 28), (14, 14), (7, 7))\n self.residual_block3 = ResidualBlock(512, 1024, 2)\n self.attention_module3 = AttentionModule(1024, 1024, (14, 14), (7, 7), (4, 4))\n self.residual_block4 = ResidualBlock(1024, 2048, 2)\n self.residual_block5 = ResidualBlock(2048, 2048)\n self.residual_block6 = ResidualBlock(2048, 2048)\n self.mpool2 = nn.Sequential(\n nn.BatchNorm2d(2048),\n nn.ReLU(inplace=True),\n nn.AvgPool2d(kernel_size=7, stride=1),\n )\n self.fc = nn.Linear(2048, num_classes)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.mpool1(out)\n # print(out.data)\n out = self.residual_block1(out)\n out = self.attention_module1(out)\n out = self.residual_block2(out)\n out = self.attention_module2(out)\n out = self.residual_block3(out)\n # print(out.data)\n out = self.attention_module3(out)\n out = self.residual_block4(out)\n out = self.residual_block5(out)\n out = self.residual_block6(out)\n out = self.mpool2(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n\n return out\n\n\ndef res_attention(in_channels=3, num_classes=1000):\n return ResidualAttentionModel(in_channels, num_classes)\n"
] | [
[
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kumiori/stability-bifurcation | [
"9a82bf40742a9b16122b7a476ad8aec65fe22539",
"9a82bf40742a9b16122b7a476ad8aec65fe22539"
] | [
"scripts/parametric_1d.py",
"src/time_stepping.py"
] | [
"from traction_1d import *\nimport numpy as np\nfrom utils import ColorPrint\n\n# ell_list = np.linspace(.1, .5, 20)\n# ell_min = 0.1\n#ell_max = 2.\nell_list = np.logspace(np.log10(.15), np.log10(1.5), 20)\n\ndef t_stab(ell, q=2):\n\tcoeff_stab = 2.*np.pi*q/(q+1)**(3./2.)*np.sqrt(2)\n\tif 1/ell > coeff_stab:\n\t\treturn 1.\n\telse:\n\t\treturn coeff_stab*ell/1.\n\n\ndef t_bif(ell, q=2):\n\t# coeff = t_stab(ell, q)*(q+1)/(2.*q)\t\n\tcoeff_bif = 2.*np.pi*q/(q+1)**(3./2.)*np.sqrt(2)*(q+1)/(2.*q)\n\tif 1/ell > coeff_bif:\n\t\treturn 1.\n\telse:\n\t\treturn coeff_bif*ell/1.\n\nprint([t_stab(ell) for ell in ell_list])\nprint([t_bif(ell) for ell in ell_list])\nprint([3./4.*t_stab(ell) for ell in ell_list])\n\n# sys.exit()\nfor ell in ell_list:\n\t# tstab = 1./ell*4*np.pi/3.**(3./2.)\n\teps = .3\n\tell_r = ell*np.sqrt(2)\n\t# *np.sqrt(2)\n\ttstab = t_stab(ell_r, 2)\n\ttbif = t_bif(ell_r, 2)\n\tprint('tstab, tbif', tstab, tbif)\n\t# sys.exit(//z)\n\t# tstab = 1.\n\t# lmin = tstab - 1.\n\t# load_min = load_min if lmin > 0. else 0.\n\t# load_min = tstab - 1. - tstab/10 \n\tload_min = .5 \n\t# load_max = tstab + 1. + tstab/5\n\tload_max = 5.\n\t# loads = [tstab-2*eps, tstab+2*eps]\n\tColorPrint.print_info('Solving ell {}'.format(ell))\n\tColorPrint.print_info('Load: [{} {}]'.format(load_min, load_max))\n\tColorPrint.print_info('stab limit: {} '.format(tstab))\n\tColorPrint.print_info('uniq limit: {} '.format(tbif))\n\tloads = np.logspace(load_min, load_max, 50)\n\ttry:\n\t\ttraction_1d(\n\t\t\tell=ell,\n\t\t\tload_min=load_min,\n\t\t\tload_max=load_max,\n\t\t\t# loads = loads,\n\t\t\tnsteps=50,\n\t\t\tn=7,\n\t\t\t# Lx=Lx,\n\t\t\t# outdir='../output/parametric-traction-plane-stress/ell-{:2f}'.format(ell),\n\t\t\t# outdir='../output/parametric-traction-1d-validation-paper/ell-{:2f}'.format(ell),\n\t\t\t# outdir='../output/parametric-traction-1d-validation-paperdoublecheck/ell-{:2f}'.format(ell),\n\t\t\toutdir='../output/parametric-traction-1d-validation-auto/ell-{:2f}'.format(ell),\n\t\t\t# outdir='../output/parametric-traction-1d-validation-paper-auto/ell-{:2f}'.format(ell),\n\t\t\t# outdir='../output/parametric-traction-n-10/ell-{:2f}'.format(ell),\n\t\t\tbreakifunstable = True\n\t\t)\n\texcept:\n\t\tColorPrint.print_warn(\"Something went somewhere, most likely an instability\")\n\n",
"import sys\nsys.path.append(\"../src/\")\nfrom utils import ColorPrint\nfrom dolfin import Function, assemble, plot, norm\nimport numpy as np\nimport _post_processing as pp\nimport os\nimport pandas as pd\nimport json\nfrom distutils.util import strtobool\n\nclass TimeStepping(object):\n \"\"\"This class performs the incremental time stepping to solve the evolution problem\"\"\"\n def __init__(self,\n model,\n solver,\n stability,\n load_param,\n outfiles,\n parameters,\n user_density=None):\n\n super(TimeStepping, self).__init__()\n\n self.model = model\n self.solver = solver\n self.stability = stability\n self.load_param = load_param\n self.file_out = outfiles[0]\n self.file_con = outfiles[1]\n self.file_eig = outfiles[2]\n # if user_density:\n self.user_density = user_density\n self.parameters = parameters\n self.time_data, self.continuation_data = [], []\n self.load_steps = np.linspace(self.parameters['load_min'],\n self.parameters['load_max'],\n self.parameters['nsteps'])\n\n def user_postprocess_stability(self, load):\n pass\n\n def user_postprocess_timestep(self, load):\n pass\n\n def compile_continuation_data(self, load, iteration, perturbed):\n model = self.model\n u = self.solver.u\n alpha = self.solver.alpha\n\n if not perturbed:\n self.continuation_data_i = {}\n\n self.continuation_data_i[\"elastic_energy\"] = assemble(model.elastic_energy_density(model.eps(u), alpha)*model.dx)\n if self.user_density is not None:\n self.continuation_data_i[\"elastic_energy\"] += assemble(self.user_density*model.dx)\n self.continuation_data_i[\"dissipated_energy\"] = assemble(model.damage_dissipation_density(alpha)*model.dx)\n\n self.continuation_data_i[\"total_energy\"] = self.continuation_data_i[\"elastic_energy\"] + self.continuation_data_i[\"dissipated_energy\"]\n self.continuation_data_i[\"load\"] = load\n self.continuation_data_i[\"iteration\"] = iteration\n self.continuation_data_i[\"alpha_l2\"] = alpha.vector().norm('l2')\n self.continuation_data_i[\"alpha_h1\"] = norm(alpha, 'h1')\n self.continuation_data_i[\"alpha_max\"] = np.max(alpha.vector()[:])\n self.continuation_data_i[\"eigs\"] = self.stability.eigs\n\n else:\n elastic_post = assemble(model.elastic_energy_density(model.eps(u), alpha)*model.dx)\n if self.user_density is not None:\n elastic_post += assemble(self.user_density*model.dx)\n\n dissipated_post = assemble(model.damage_dissipation_density(alpha)*model.dx)\n\n self.continuation_data_i[\"elastic_energy_diff\"] = elastic_post-self.continuation_data_i[\"elastic_energy\"]\n self.continuation_data_i[\"dissipated_energy_diff\"] = dissipated_post-self.continuation_data_i[\"dissipated_energy\"]\n self.continuation_data_i[\"total_energy_diff\"] = self.continuation_data_i[\"elastic_energy_diff\"]\\\n +self.continuation_data_i[\"dissipated_energy_diff\"]\n\n # ColorPrint.print_info('energy {:4e}'.format(elastic_energy_post + dissipated_energy_post))\n # ColorPrint.print_info('estimate {:4e}'.format(stability.en_estimate))\n # ColorPrint.print_info('en-est {:4e}'.format(elastic_energy_post + dissipated_energy_post-stability.en_estimate))\n pass\n\n def compile_time_data(self, load):\n time_data_i = self.time_data_i\n model = self.model\n u = self.solver.u\n alpha = self.solver.alpha\n\n time_data_i[\"load\"] = load\n time_data_i[\"stable\"] = self.stability.stable\n time_data_i[\"# neg ev\"] = self.stability.negev\n time_data_i[\"elastic_energy\"] = assemble(\n model.elastic_energy_density(model.eps(u), alpha)*model.dx)\n if self.user_density is not None:\n time_data_i[\"elastic_energy\"] += assemble(self.user_density*model.dx)\n time_data_i[\"dissipated_energy\"] = assemble(\n model.damage_dissipation_density(alpha)*model.dx)\n # else:\n # time_data_i[\"dissipated_energy\"] = assemble(disspated_energy*model.dx)\n time_data_i[\"eigs\"] = self.stability.eigs if hasattr(self.stability, 'eigs') else np.inf\n ColorPrint.print_pass(\n \"Time step {:.4g}: it {:3d}, err_alpha={:.4g}\".format(\n time_data_i[\"load\"],\n time_data_i[\"iterations\"],\n time_data_i[\"alpha_error\"]))\n self.time_data.append(time_data_i)\n time_data_pd = pd.DataFrame(self.time_data)\n\n return time_data_pd\n\n def run(self):\n outdir = self.parameters['outdir']\n savelag = self.parameters['savelag']\n solver = self.solver\n stability = self.stability\n alpha = solver.alpha\n u = solver.u\n\n\n load_steps = self.load_steps \n alpha_old = Function(alpha.function_space())\n self.time_data_i = []\n stable = None; negev = -1; mineig = np.inf; iteration = 0\n diff = alpha.copy(deepcopy=True)\n for it, load in enumerate(load_steps):\n self.load_param.t = load\n alpha_old.assign(alpha)\n print('')\n ColorPrint.print_warn('Solving load = {:.2f}'.format(load))\n self.time_data_i, am_iter = solver.solve()\n\n diff.vector()[:] = alpha.vector() - alpha_old.vector()\n try:\n assert all(alpha.vector()[:]>=alpha_old.vector()[:])\n except AssertionError:\n print('check alpha.vector()[:]>=alpha_old.vector()')\n\n try:\n assert all(solver.problem_alpha.lb.vector()[:]==alpha_old.vector()[:])\n except AssertionError:\n print('check all(solver.problem_alpha.lb.vector()[:]==alpha_old.vector()[:])')\n\n if bool(strtobool(str(stability.parameters['checkstability']))):\n (stable, negev) = stability.solve(solver.problem_alpha.lb)\n ColorPrint.print_pass('Current state is{}stable'.format(' ' if stable else ' not '))\n if hasattr(stability, 'eigs') and len(stability.eigs)>0 and min(stability.eigs)<0:\n pp.plot_eigenmodes(stability.eigendata, alpha, load, outdir)\n self.user_postprocess_stability(load)\n else:\n solver.update()\n alpha.copy(deepcopy = True)\n\n if stable:\n solver.update()\n elif not stable and not bool(strtobool(str(stability.parameters['continuation']))):\n solver.update()\n\n elif not stable and bool(strtobool(str(stability.parameters['continuation']))):\n while stable == False:\n adm_pert = np.where(np.array([e['en_diff'] for e in stability.eigendata]) < 0)[0]\n if len(adm_pert)==0:\n ColorPrint.print_warn('No admissible perturbations found')\n ColorPrint.print_pass('Continuing load program')\n break\n else:\n continuation_data = []\n steepest = np.argmin([e['en_diff'] for e in stability.eigendata])\n if self.parameters['perturbation_choice']=='first':\n mode = 0\n elif self.parameters['perturbation_choice'] == 'steepest':\n mode = steepest\n elif isinstance(self.parameters['perturbation_choice'], int):\n mode = self.parameters['perturbation_choice']\n\n perturbation_v = stability.eigendata[mode]['v_n']\n perturbation_beta = stability.eigendata[mode]['beta_n']\n hstar = stability.eigendata[mode]['hstar']\n perturbation_v.rename('step displacement perturbation', 'step displacement perturbation')\n perturbation_beta.rename('step damage perturbation', 'step damage perturbation')\n ColorPrint.print_pass('Perturbation choice {}'.format(self.parameters['perturbation_choice']))\n ColorPrint.print_pass('Perturbing current state with mode {} Delta E={:.5%} (estimated)'.format(mode, stability.eigendata[mode]['en_diff']))\n ColorPrint.print_pass('...... chosen mode {} vs. steepest {} Delta E={:.5%} (estimated)'.format(mode, steepest, stability.eigendata[mode]['en_diff']/stability.eigendata[steepest]['en_diff']))\n ColorPrint.print_pass('...... steepest descent mode {} Delta E={:.5%} (estimated)'.format(steepest,stability.eigendata[steepest]['en_diff']))\n\n # perturb current state\n self.compile_continuation_data(load, iteration, perturbed=False)\n solver.alpha.copy(deepcopy=True)\n\n ColorPrint.print_pass('Perturbing current state, looking for stability, iteration {}'.format(iteration))\n uval = u.vector()[:] + hstar * perturbation_v.vector()[:]\n aval = alpha.vector()[:] + hstar * perturbation_beta.vector()[:]\n\n alpha.vector().vec().ghostUpdate()\n u.vector().vec().ghostUpdate()\n\n self.time_data_i, am_iter = solver.solve()\n\n if self.file_con is not None:\n with self.file_con as f:\n f.write(alpha, iteration)\n f.write(u, iteration)\n\n self.compile_continuation_data(load, iteration, perturbed=True)\n\n ColorPrint.print_pass('Energy diff {}, rel thhreshold {}'\n .format(self.continuation_data_i[\"total_energy_diff\"]/self.continuation_data_i[\"total_energy\"], \n self.stability.parameters['cont_rtol']))\n continuation_data.append(self.continuation_data_i)\n\n if np.mod(it, self.parameters['savelag']) == 0:\n continuation_data_pd=pd.DataFrame(continuation_data)\n continuation_data_pd.to_json(os.path.join(outdir + \"/continuation_data.json\"))\n\n if self.continuation_data_i[\"total_energy_diff\"]/self.continuation_data_i[\"total_energy\"] < - self.stability.parameters['cont_rtol']:\n ColorPrint.print_pass('Updating irreversibility')\n solver.update()\n else:\n ColorPrint.print_pass('Threshold not met, continuing load program')\n break\n\n (stable, negev) = stability.solve(alpha_old)\n\n if self.file_eig is not None:\n with self.file_eig as f:\n f.write(perturbation_beta, iteration)\n f.write(perturbation_v, iteration)\n\n iteration += 1\n\n time_data_pd = self.compile_time_data(load)\n\n if np.mod(it, self.parameters['savelag']) == 0:\n time_data_pd.to_json(os.path.join(outdir + \"/time_data.json\"))\n ColorPrint.print_pass('written data to file {}'.format(str(os.path.join(outdir + \"/time_data.json\"))))\n\n if self.file_out is not None:\n with self.file_out as f:\n f.write(alpha, load)\n f.write(u, load)\n\n pp.plot_global_data(time_data_pd,load,outdir)\n self.user_postprocess_timestep(load)\n return time_data_pd\n\n"
] | [
[
"numpy.logspace",
"numpy.log10",
"numpy.sqrt"
],
[
"numpy.linspace",
"pandas.DataFrame",
"numpy.argmin",
"numpy.mod",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hishamsajid/vsketch | [
"1b35b794972097b8fb5af94ea6e93f3e8c69448c"
] | [
"examples/random_flower/sketch_random_flower.py"
] | [
"import math\n\nimport numpy as np\n\nimport vsketch\n\n\nclass RandomFlowerSketch(vsketch.SketchClass):\n num_line = vsketch.Param(200, 1)\n point_per_line = vsketch.Param(100, 1)\n rdir_range = vsketch.Param(math.pi / 6)\n\n def draw(self, vsk: vsketch.Vsketch) -> None:\n vsk.size(\"a4\", landscape=True)\n vsk.scale(\"cm\")\n\n vsk.rotate(-90, degrees=True)\n\n noise_coord = np.linspace(0, 1, self.point_per_line)\n dirs = np.linspace(0, 2 * math.pi, self.num_line)\n perlin = vsk.noise(noise_coord, dirs, [0, 100])\n\n for i, direction in enumerate(dirs):\n rdir = vsk.map(\n perlin[:, i, 0], 0, 1, direction - self.rdir_range, direction + self.rdir_range\n )\n roffset = vsk.map(perlin[:, i, 1], 0, 1, 0.05, 0.12)\n\n xoffset = roffset * np.cos(rdir)\n yoffset = roffset * np.sin(rdir)\n\n vsk.polygon(np.cumsum(xoffset), np.cumsum(yoffset))\n\n def finalize(self, vsk: vsketch.Vsketch) -> None:\n vsk.vpype(\"linemerge linesimplify reloop linesort\")\n\n\nif __name__ == \"__main__\":\n RandomFlowerSketch.display()\n"
] | [
[
"numpy.cumsum",
"numpy.cos",
"numpy.linspace",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mpsampat/kaggle-ds-bowl-2018-baseline | [
"c769ba7b4db4d1f38f7190db9a11bf812b2b983a"
] | [
"bowl_config.py"
] | [
"from config import Config\nimport numpy as np\nclass BowlConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"bowl\"\n\n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n LEARNING_RATE = 0.00005\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 # background + nuclei\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 256\n IMAGE_MAX_DIM = 256\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (8, 16, 32, 64) # anchor side in pixels\n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 500\n\n STEPS_PER_EPOCH = 600\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 10\n MEAN_PIXEL = np.array([0.0, 0.0, 0.0])\n USE_MINI_MASK = False\n MINI_MASK_SHAPE = (28, 28)\t\n MAX_GT_INSTANCES = 500\n\n DETECTION_MAX_INSTANCES = 512\n\n RESNET_ARCHITECTURE = \"resnet50\"\n\n\nbowl_config = BowlConfig()\nbowl_config.display()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
flatironinstitute/sparse_dot | [
"d04a277016ec4af4e507131a3751daca028edc1d"
] | [
"sparse_dot_mkl/tests/test_gram_matrix.py"
] | [
"import unittest\nimport numpy as np\nimport numpy.testing as npt\nfrom sparse_dot_mkl import gram_matrix_mkl\nfrom sparse_dot_mkl.tests.test_mkl import MATRIX_1\n\n\nclass TestGramMatrix(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n gram_ut = np.dot(MATRIX_1.A.T, MATRIX_1.A)\n gram_ut[np.tril_indices(gram_ut.shape[0], k=-1)] = 0.\n cls.gram_ut = gram_ut\n\n gram_ut_t = np.dot(MATRIX_1.A, MATRIX_1.A.T)\n gram_ut_t[np.tril_indices(gram_ut_t.shape[0], k=-1)] = 0.\n cls.gram_ut_t = gram_ut_t\n\n def setUp(self):\n self.mat1 = MATRIX_1.copy()\n self.mat1_d = MATRIX_1.A\n\n def test_gram_matrix_sp(self):\n mat2 = gram_matrix_mkl(self.mat1)\n npt.assert_array_almost_equal(mat2.A, self.gram_ut)\n\n with self.assertRaises(ValueError):\n gram_matrix_mkl(self.mat1, out=np.zeros((self.mat1.shape[0], self.mat1.shape[0])))\n\n def test_gram_matrix_sp_single(self):\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32))\n npt.assert_array_almost_equal(mat2.A, self.gram_ut)\n\n def test_gram_matrix_d_single(self):\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32), dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32), dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float32), out_scalar=1.)\n mat2[np.tril_indices(mat2.shape[0], k=-1)] = 0.\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n with self.assertRaises(ValueError):\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32), dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1])),\n out_scalar=1.)\n\n def test_gram_matrix_d(self):\n mat2 = gram_matrix_mkl(self.mat1, dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(self.mat1, dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float64), out_scalar=1.)\n mat2[np.tril_indices(mat2.shape[0], k=-1)] = 0.\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_sp_t(self):\n mat2 = gram_matrix_mkl(self.mat1, transpose=True)\n npt.assert_array_almost_equal(mat2.A, self.gram_ut_t)\n\n def test_gram_matrix_d_t(self):\n mat2 = gram_matrix_mkl(self.mat1, dense=True, transpose=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut_t)\n\n def test_gram_matrix_csc_sp(self):\n mat2 = gram_matrix_mkl(self.mat1.tocsc(), cast=True)\n npt.assert_array_almost_equal(mat2.A, self.gram_ut)\n\n def test_gram_matrix_csc_d(self):\n mat2 = gram_matrix_mkl(self.mat1.tocsc(), dense=True, cast=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_dd_double(self):\n mat2 = gram_matrix_mkl(self.mat1.A, dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(self.mat1.A, dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float64), out_scalar=1.)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_dd_single(self):\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32).A, dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32).A, dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float32), out_scalar=1.)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_dd_double_F(self):\n mat2 = gram_matrix_mkl(np.asarray(self.mat1.A, order=\"F\"), dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(np.asarray(self.mat1.A, order=\"F\"), dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float64, order=\"F\"),\n out_scalar=1.)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_dd_single_F(self):\n mat2 = gram_matrix_mkl(np.asarray(self.mat1.astype(np.float32).A, order=\"F\"), dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(np.asarray(self.mat1.astype(np.float32).A, order=\"F\"), dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float32, order=\"F\"),\n out_scalar=1.)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n"
] | [
[
"numpy.dot",
"numpy.asarray",
"numpy.tril_indices",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
slamer59/awesome-panel | [
"91c30bd6d6859eadf9c65b1e143952f7e64d5290"
] | [
"application/pages/awesome_panel_express_tests/test_perspective.py"
] | [
"\"\"\"\r\n# Perspective Viewer\r\n\r\n[Perspective](https://github.com/finos/perspective#readme) is an interactive visualization\r\ncomponent for large, real-time datasets. It comes with the `perspective-viewer` web component.\r\n\r\nIt enables analysts and traders at large banks like J.P.Morgan to understand their data. But it is\r\nalso very usefull for analysts, engineers, scientists, data engineers and data scientists in\r\ngeneral.\r\n\r\n[Panel](https://panel.holoviz.org/) is a pwerful framework for creating awesome analytics apps\r\nin Python.\r\n\r\nIn this example we demonstrate how to use the `perspective-viewer` web component with Panel.\r\n\r\nIf you want Perspective supported in Panel, then go to GitHub and upvote\r\n\r\n- [Panel Feature 1107](https://github.com/holoviz/panel/issues/1107): Add Perspective widget.\r\n- [Perspective Feature 942](https://github.com/finos/perspective/issues/942): Enable Perspective in\r\nPanel.\r\n- [Panel PR 1261](https://github.com/holoviz/panel/pull/1261): Perspective-Viewer WebComponent\r\nExample.\r\n\r\n**Author:** [Marc Skov Madsen](https://datamodelanalytics.com)\r\n([awesome-panel.org](https://awesome-panel.org))\r\n\r\n**Tags:**\r\n[Perspective](https://github.com/finos/perspective#readme),\r\n[Panel](https://panel.holoviz.org/),\r\n[Python](https://www.python.org/)\r\n\r\n**Resources:**\r\n[Code](https://github.com/MarcSkovMadsen/awesome-panel/blob/master/application/pages/\\\r\nawesome_panel_express_tests/test_perspective.py),\r\n[Data](https://datahub.io/core/s-and-p-500-companies-financials)\r\n\"\"\"\r\n\r\nimport pathlib\r\n\r\nimport pandas as pd\r\nimport panel as pn\r\nfrom awesome_panel.express.components import PerspectiveViewer\r\n\r\nDARK_BACKGROUND = \"rgb(42, 44, 47)\"\r\nDARK_COLOR = \"white\"\r\nPERSPECTIVE_LOGO = \"https://perspective.finos.org/img/logo.png\"\r\nPANEL_LOGO = \"https://panel.holoviz.org/_static/logo_horizontal.png\"\r\nROOT = pathlib.Path(__file__).parent\r\n# Source: https://datahub.io/core/s-and-p-500-companies-financials\r\nDATA = ROOT / \"PerspectiveViewerData.csv\"\r\n\r\ndataframe = pd.read_csv(DATA)\r\n\r\n\r\ndef create_app(**params) -> pn.Column:\r\n \"\"\"Returns app using PerspectiveViewer\r\n\r\n Returns:\r\n pn.Column: The app\r\n \"\"\"\r\n\r\n perspective_viewer = PerspectiveViewer(sizing_mode=\"stretch_both\", data=dataframe)\r\n\r\n top_app_bar = pn.Row(\r\n pn.pane.PNG(PERSPECTIVE_LOGO, height=50, margin=(10, 25, 10, 10)),\r\n # pn.pane.PNG(PANEL_LOGO, height=40, margin=(10, 0, 10, 0)),\r\n pn.layout.HSpacer(),\r\n margin=0,\r\n background=DARK_BACKGROUND,\r\n )\r\n\r\n settings_parameters = [\r\n \"theme\",\r\n \"row_pivots\",\r\n \"plugin\",\r\n \"columns\",\r\n \"aggregates\",\r\n \"filters\",\r\n \"sort\",\r\n \"rows\",\r\n \"column_pivots\",\r\n ]\r\n\r\n settings_pane = pn.Param(\r\n perspective_viewer,\r\n parameters=settings_parameters,\r\n width=200,\r\n sizing_mode=\"stretch_height\",\r\n background=\"#9E9E9E\",\r\n )\r\n\r\n return pn.Column(\r\n pn.pane.Markdown(__doc__),\r\n top_app_bar,\r\n pn.Row(\r\n perspective_viewer,\r\n pn.layout.VSpacer(width=10),\r\n settings_pane,\r\n sizing_mode=\"stretch_both\",\r\n margin=0,\r\n background=DARK_BACKGROUND,\r\n ),\r\n pn.layout.HSpacer(height=50),\r\n **params\r\n )\r\n\r\n\r\ndef view() -> pn.Column:\r\n \"\"\"Return a PerspectiveViewer Test App for inclusion in the Gallery at awesome-panel.org\r\n\r\n Returns:\r\n pn.Column: The app\r\n \"\"\"\r\n return create_app(height=800, sizing_mode=\"stretch_width\")\r\n\r\n\r\nif __name__.startswith(\"bokeh\"):\r\n PerspectiveViewer.config()\r\n view().servable()\r\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
OscarPellicer/probreg | [
"8f1dd23dd86371b8040abad580332ff36967c078",
"8f1dd23dd86371b8040abad580332ff36967c078"
] | [
"tests/test_svr.py",
"probreg/gauss_transform.py"
] | [
"import unittest\nimport numpy as np\nimport transforms3d as t3d\nimport open3d as o3\nfrom probreg import l2dist_regs\nfrom probreg import transformation as tf\n\n\nclass SVRTest(unittest.TestCase):\n def setUp(self):\n pcd = o3.io.read_point_cloud('data/horse.ply')\n pcd = pcd.voxel_down_sample(voxel_size=0.01)\n self._source = np.asarray(pcd.points)\n rot = t3d.euler.euler2mat(*np.random.uniform(0.0, np.pi / 4, 3))\n self._tf = tf.RigidTransformation(rot, np.zeros(3))\n self._target = self._tf.transform(self._source)\n\n def test_svr_registration(self):\n res = l2dist_regs.registration_svr(self._source, self._target)\n self.assertTrue(np.allclose(t3d.euler.mat2euler(res.rot),\n t3d.euler.mat2euler(self._tf.rot), atol=1.0e-1, rtol=1.0e-1))\n self.assertTrue(np.allclose(res.t, self._tf.t, atol=1.0e-2, rtol=1.0e-3))\n\nif __name__ == \"__main__\":\n unittest.main()",
"from __future__ import division, print_function\n\nimport numpy as np\n\nfrom . import _ifgt\n\n\ndef _gauss_transform_direct(source, target, weights, h):\n \"\"\"\n \\sum_{j} weights[j] * \\exp{ - \\frac{||target[i] - source[j]||^2}{h^2} }\n \"\"\"\n h2 = h * h\n fn = lambda t: np.dot(weights, np.exp(-np.sum(np.square(t - source), axis=1) / h2))\n return np.apply_along_axis(fn, 1, target)\n\n\nclass Direct(object):\n def __init__(self, source, h):\n self._source = source\n self._h = h\n\n def compute(self, target, weights):\n return _gauss_transform_direct(self._source, target, weights, self._h)\n\n\nclass GaussTransform(object):\n \"\"\"Calculate Gauss Transform\n\n Args:\n source (numpy.ndarray): Source data.\n h (float): Bandwidth parameter of the Gaussian.\n eps (float): Small floating point used in Gauss Transform.\n sw_h (float): Value of the bandwidth parameter to\n switch between direct method and IFGT.\n \"\"\"\n\n def __init__(self, source, h, eps=1.0e-4, sw_h=0.01):\n self._m = source.shape[0]\n if h < sw_h:\n self._impl = Direct(source, h)\n else:\n self._impl = _ifgt.Ifgt(source, h, eps)\n\n def compute(self, target, weights=None):\n \"\"\"Compute gauss transform\n\n Args:\n target (numpy.ndarray): Target data.\n weights (numpy.ndarray): Weights of Gauss Transform.\n \"\"\"\n if weights is None:\n weights = np.ones(self._m)\n if weights.ndim == 1:\n return self._impl.compute(target, weights)\n elif weights.ndim == 2:\n return np.r_[[self._impl.compute(target, w) for w in weights]]\n else:\n raise ValueError(\"weights.ndim must be 1 or 2.\")\n"
] | [
[
"numpy.asarray",
"numpy.random.uniform",
"numpy.zeros",
"numpy.allclose"
],
[
"numpy.square",
"numpy.apply_along_axis",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anishacharya/BGMD | [
"03dee098217d2b9a209fea5759e2e0a2237390a5"
] | [
"src/aggregation_manager/trimmed_mean.py"
] | [
"# Copyright (c) Anish Acharya.\n# Licensed under the MIT License\nimport numpy as np\nfrom .base_gar import GAR\nfrom scipy import stats\nfrom typing import List\n\"\"\"\nComputes Trimmed mean estimates\nCite: Yin, Chen, Ramchandran, Bartlett : Byzantine-Robust Distributed Learning: Towards Optimal Statistical Rates \n\"\"\"\n\n\nclass TrimmedMean(GAR):\n def __init__(self, aggregation_config):\n GAR.__init__(self, aggregation_config=aggregation_config)\n self.trimmed_mean_config = aggregation_config.get('trimmed_mean_config', {})\n self.proportion = self.trimmed_mean_config.get('proportion', 0.1)\n\n def aggregate(self, G: np.ndarray, ix: List[int] = None, axis=0) -> np.ndarray:\n agg_grad = stats.trim_mean(a=G, proportiontocut=self.proportion, axis=axis)\n if ix is not None:\n return agg_grad[ix]\n else:\n return agg_grad\n"
] | [
[
"scipy.stats.trim_mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
guiomar/mne-python | [
"2d19800a07904cfe69c1ba290c3eaf712625c6ab",
"8714cda45d0f0269c15026323a9ac689b47722f8",
"8714cda45d0f0269c15026323a9ac689b47722f8",
"2d19800a07904cfe69c1ba290c3eaf712625c6ab",
"8714cda45d0f0269c15026323a9ac689b47722f8"
] | [
"mne/io/nirx/tests/test_nirx.py",
"examples/inverse/mixed_source_space_inverse.py",
"tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py",
"tutorials/clinical/10_ieeg_localize.py",
"tutorials/stats-sensor-space/20_erp_stats.py"
] | [
"# -*- coding: utf-8 -*-\n# Authors: Robert Luke <[email protected]>\n# Eric Larson <[email protected]>\n# simplified BSD-3 license\n\nimport os.path as op\nimport shutil\nimport os\nimport datetime as dt\nimport numpy as np\n\nimport pytest\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nfrom mne import pick_types\nfrom mne.datasets.testing import data_path, requires_testing_data\nfrom mne.io import read_raw_nirx, read_raw_snirf\nfrom mne.utils import requires_h5py\nfrom mne.io.tests.test_raw import _test_raw_reader\nfrom mne.preprocessing import annotate_nan\nfrom mne.transforms import apply_trans, _get_trans\nfrom mne.preprocessing.nirs import source_detector_distances,\\\n short_channels\nfrom mne.io.constants import FIFF\n\nfname_nirx_15_0 = op.join(data_path(download=False),\n 'NIRx', 'nirscout', 'nirx_15_0_recording')\nfname_nirx_15_2 = op.join(data_path(download=False),\n 'NIRx', 'nirscout', 'nirx_15_2_recording')\nfname_nirx_15_2_short = op.join(data_path(download=False),\n 'NIRx', 'nirscout',\n 'nirx_15_2_recording_w_short')\nfname_nirx_15_3_short = op.join(data_path(download=False),\n 'NIRx', 'nirscout', 'nirx_15_3_recording')\n\n\n# This file has no saturated sections\nnirsport1_wo_sat = op.join(data_path(download=False), 'NIRx', 'nirsport_v1',\n 'nirx_15_3_recording_wo_saturation')\n# This file has saturation, but not on the optode pairing in montage\nnirsport1_w_sat = op.join(data_path(download=False), 'NIRx', 'nirsport_v1',\n 'nirx_15_3_recording_w_saturation_'\n 'not_on_montage_channels')\n# This file has saturation in channels of interest\nnirsport1_w_fullsat = op.join(data_path(download=False), 'NIRx', 'nirsport_v1',\n 'nirx_15_3_recording_w_'\n 'saturation_on_montage_channels')\n\n# NIRSport2 device using Aurora software and matching snirf file\nnirsport2 = op.join(data_path(download=False), 'NIRx', 'nirsport_v2',\n 'aurora_recording _w_short_and_acc')\nnirsport2_snirf = op.join(data_path(download=False), 'SNIRF', 'NIRx',\n 'NIRSport2', '1.0.3', '2021-05-05_001.snirf')\n\nnirsport2_2021_9 = op.join(data_path(download=False), 'NIRx', 'nirsport_v2',\n 'aurora_2021_9')\nsnirf_nirsport2_20219 = op.join(data_path(download=False),\n 'SNIRF', 'NIRx', 'NIRSport2', '2021.9',\n '2021-10-01_002.snirf')\n\n\n@requires_h5py\n@requires_testing_data\[email protected]('ignore:.*Extraction of measurement.*:')\[email protected]('fname_nirx, fname_snirf', (\n [nirsport2, nirsport2_snirf],\n [nirsport2_2021_9, snirf_nirsport2_20219],\n))\ndef test_nirsport_v2_matches_snirf(fname_nirx, fname_snirf):\n \"\"\"Test NIRSport2 raw files return same data as snirf.\"\"\"\n raw = read_raw_nirx(fname_nirx, preload=True)\n raw_snirf = read_raw_snirf(fname_snirf, preload=True)\n\n assert_allclose(raw._data, raw_snirf._data)\n\n # Check the timing of annotations match (naming is different)\n assert_allclose(raw.annotations.onset, raw_snirf.annotations.onset)\n\n assert_array_equal(raw.ch_names, raw_snirf.ch_names)\n\n # This test fails as snirf encodes name incorrectly.\n # assert raw.info[\"subject_info\"][\"first_name\"] ==\n # raw_snirf.info[\"subject_info\"][\"first_name\"]\n\n\n@requires_testing_data\[email protected]('ignore:.*Extraction of measurement.*:')\ndef test_nirsport_v2():\n \"\"\"Test NIRSport2 file.\"\"\"\n raw = read_raw_nirx(nirsport2, preload=True)\n assert raw._data.shape == (40, 128)\n\n # Test distance between optodes matches values from\n # nirsite https://github.com/mne-tools/mne-testing-data/pull/86\n # figure 3\n allowed_distance_error = 0.005\n distances = source_detector_distances(raw.info)\n assert_allclose(distances[::2][:14],\n [0.0304, 0.0411, 0.008, 0.0400, 0.008, 0.0310, 0.0411,\n 0.008, 0.0299, 0.008, 0.0370, 0.008, 0.0404, 0.008],\n atol=allowed_distance_error)\n\n # Test location of detectors\n # The locations of detectors can be seen in the first\n # figure on this page...\n # https://github.com/mne-tools/mne-testing-data/pull/86\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D1'\n assert_allclose(\n mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][2][3:5] == 'D6'\n assert_allclose(\n mni_locs[2], [-0.0841, -0.0138, 0.0248], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][34][3:5] == 'D5'\n assert_allclose(\n mni_locs[34], [0.0845, -0.0451, -0.0123], atol=allowed_dist_error)\n\n # Test location of sensors\n # The locations of sensors can be seen in the second\n # figure on this page...\n # https://github.com/mne-tools/mne-testing-data/pull/86\n locs = [ch['loc'][3:6] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][:2] == 'S1'\n assert_allclose(\n mni_locs[0], [-0.0848, -0.0162, -0.0163], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][9][:2] == 'S2'\n assert_allclose(\n mni_locs[9], [-0.0, -0.1195, 0.0142], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][34][:2] == 'S8'\n assert_allclose(\n mni_locs[34], [0.0828, -0.046, 0.0285], atol=allowed_dist_error)\n\n assert len(raw.annotations) == 3\n assert raw.annotations.description[0] == '1.0'\n assert raw.annotations.description[2] == '6.0'\n # Lose tolerance as I am eyeballing the time differences on screen\n assert_allclose(\n np.diff(raw.annotations.onset), [2.3, 3.1], atol=0.1)\n\n mon = raw.get_montage()\n assert len(mon.dig) == 43\n\n\n@requires_testing_data\[email protected]('ignore:.*Extraction of measurement.*:')\ndef test_nirsport_v1_wo_sat():\n \"\"\"Test NIRSport1 file with no saturation.\"\"\"\n raw = read_raw_nirx(nirsport1_wo_sat, preload=True)\n\n # Test data import\n assert raw._data.shape == (26, 164)\n assert raw.info['sfreq'] == 10.416667\n\n # By default real data is returned\n assert np.sum(np.isnan(raw.get_data())) == 0\n\n raw = read_raw_nirx(nirsport1_wo_sat, preload=True, saturated='nan')\n data = raw.get_data()\n assert data.shape == (26, 164)\n assert np.sum(np.isnan(data)) == 0\n\n raw = read_raw_nirx(nirsport1_wo_sat, saturated='annotate')\n data = raw.get_data()\n assert data.shape == (26, 164)\n assert np.sum(np.isnan(data)) == 0\n\n\[email protected]('ignore:.*Extraction of measurement.*:')\n@requires_testing_data\ndef test_nirsport_v1_w_sat():\n \"\"\"Test NIRSport1 file with NaNs but not in channel of interest.\"\"\"\n raw = read_raw_nirx(nirsport1_w_sat)\n\n # Test data import\n data = raw.get_data()\n assert data.shape == (26, 176)\n assert raw.info['sfreq'] == 10.416667\n assert np.sum(np.isnan(data)) == 0\n\n raw = read_raw_nirx(nirsport1_w_sat, saturated='nan')\n data = raw.get_data()\n assert data.shape == (26, 176)\n assert np.sum(np.isnan(data)) == 0\n\n raw = read_raw_nirx(nirsport1_w_sat, saturated='annotate')\n data = raw.get_data()\n assert data.shape == (26, 176)\n assert np.sum(np.isnan(data)) == 0\n\n\[email protected]('ignore:.*Extraction of measurement.*:')\n@requires_testing_data\[email protected]('preload', (True, False))\ndef test_nirsport_v1_w_bad_sat(preload):\n \"\"\"Test NIRSport1 file with NaNs.\"\"\"\n fname = nirsport1_w_fullsat\n raw = read_raw_nirx(fname, preload=preload)\n data = raw.get_data()\n assert not np.isnan(data).any()\n assert len(raw.annotations) == 5\n # annotated version and ignore should have same data but different annot\n raw_ignore = read_raw_nirx(fname, saturated='ignore', preload=preload)\n assert_allclose(raw_ignore.get_data(), data)\n assert len(raw_ignore.annotations) == 2\n assert not any('NAN' in d for d in raw_ignore.annotations.description)\n # nan version should not have same data, but we can give it the same annot\n raw_nan = read_raw_nirx(fname, saturated='nan', preload=preload)\n data_nan = raw_nan.get_data()\n assert np.isnan(data_nan).any()\n assert not np.allclose(raw_nan.get_data(), data)\n raw_nan_annot = raw_ignore.copy()\n raw_nan_annot.set_annotations(annotate_nan(raw_nan))\n use_mask = np.where(raw.annotations.description == 'BAD_SATURATED')\n for key in ('onset', 'duration'):\n a = getattr(raw_nan_annot.annotations, key)[::2] # one ch in each\n b = getattr(raw.annotations, key)[use_mask] # two chs in each\n assert_allclose(a, b)\n\n\n@requires_testing_data\ndef test_nirx_hdr_load():\n \"\"\"Test reading NIRX files using path to header file.\"\"\"\n fname = fname_nirx_15_2_short + \"/NIRS-2019-08-23_001.hdr\"\n raw = read_raw_nirx(fname, preload=True)\n\n # Test data import\n assert raw._data.shape == (26, 145)\n assert raw.info['sfreq'] == 12.5\n\n\n@requires_testing_data\ndef test_nirx_missing_warn():\n \"\"\"Test reading NIRX files when missing data.\"\"\"\n with pytest.raises(FileNotFoundError, match='does not exist'):\n read_raw_nirx(fname_nirx_15_2_short + \"1\", preload=True)\n\n\n@requires_testing_data\ndef test_nirx_missing_evt(tmp_path):\n \"\"\"Test reading NIRX files when missing data.\"\"\"\n shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + \"/data/\")\n os.rename(str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.evt\",\n str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.xxx\")\n fname = str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.hdr\"\n raw = read_raw_nirx(fname, preload=True)\n assert raw.annotations.onset.shape == (0, )\n\n\n@requires_testing_data\ndef test_nirx_dat_warn(tmp_path):\n \"\"\"Test reading NIRX files when missing data.\"\"\"\n shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + \"/data/\")\n os.rename(str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.dat\",\n str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.tmp\")\n fname = str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.hdr\"\n with pytest.raises(RuntimeWarning, match='A single dat'):\n read_raw_nirx(fname, preload=True)\n\n\n@requires_testing_data\ndef test_nirx_15_2_short():\n \"\"\"Test reading NIRX files.\"\"\"\n raw = read_raw_nirx(fname_nirx_15_2_short, preload=True)\n\n # Test data import\n assert raw._data.shape == (26, 145)\n assert raw.info['sfreq'] == 12.5\n assert raw.info['meas_date'] == dt.datetime(2019, 8, 23, 7, 37, 4, 540000,\n tzinfo=dt.timezone.utc)\n\n # Test channel naming\n assert raw.info['ch_names'][:4] == [\"S1_D1 760\", \"S1_D1 850\",\n \"S1_D9 760\", \"S1_D9 850\"]\n assert raw.info['ch_names'][24:26] == [\"S5_D13 760\", \"S5_D13 850\"]\n\n # Test frequency encoding\n assert raw.info['chs'][0]['loc'][9] == 760\n assert raw.info['chs'][1]['loc'][9] == 850\n\n # Test info import\n assert raw.info['subject_info'] == dict(sex=1, first_name=\"MNE\",\n middle_name=\"Test\",\n last_name=\"Recording\",\n birthday=(2014, 8, 23),\n his_id=\"MNE_Test_Recording\")\n\n # Test distance between optodes matches values from\n # nirsite https://github.com/mne-tools/mne-testing-data/pull/51\n # step 4 figure 2\n allowed_distance_error = 0.0002\n distances = source_detector_distances(raw.info)\n assert_allclose(distances[::2], [\n 0.0304, 0.0078, 0.0310, 0.0086, 0.0416,\n 0.0072, 0.0389, 0.0075, 0.0558, 0.0562,\n 0.0561, 0.0565, 0.0077], atol=allowed_distance_error)\n\n # Test which channels are short\n # These are the ones marked as red at\n # https://github.com/mne-tools/mne-testing-data/pull/51 step 4 figure 2\n is_short = short_channels(raw.info)\n assert_array_equal(is_short[:9:2], [False, True, False, True, False])\n is_short = short_channels(raw.info, threshold=0.003)\n assert_array_equal(is_short[:3:2], [False, False])\n is_short = short_channels(raw.info, threshold=50)\n assert_array_equal(is_short[:3:2], [True, True])\n\n # Test trigger events\n assert_array_equal(raw.annotations.description, ['3.0', '2.0', '1.0'])\n\n # Test location of detectors\n # The locations of detectors can be seen in the first\n # figure on this page...\n # https://github.com/mne-tools/mne-testing-data/pull/51\n # And have been manually copied below\n # These values were reported in mm, but according to this page...\n # https://mne.tools/stable/auto_tutorials/intro/plot_40_sensor_locations.html\n # 3d locations should be specified in meters, so that's what's tested below\n # Detector locations are stored in the third three loc values\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D1'\n assert_allclose(\n mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][4][3:5] == 'D3'\n assert_allclose(\n mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][8][3:5] == 'D2'\n assert_allclose(\n mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][12][3:5] == 'D4'\n assert_allclose(\n mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][16][3:5] == 'D5'\n assert_allclose(\n mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][19][3:5] == 'D6'\n assert_allclose(\n mni_locs[19], [0.0352, 0.0283, 0.0780], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][21][3:5] == 'D7'\n assert_allclose(\n mni_locs[21], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error)\n\n\n@requires_testing_data\ndef test_nirx_15_3_short():\n \"\"\"Test reading NIRX files.\"\"\"\n raw = read_raw_nirx(fname_nirx_15_3_short, preload=True)\n\n # Test data import\n assert raw._data.shape == (26, 220)\n assert raw.info['sfreq'] == 12.5\n\n # Test channel naming\n assert raw.info['ch_names'][:4] == [\"S1_D2 760\", \"S1_D2 850\",\n \"S1_D9 760\", \"S1_D9 850\"]\n assert raw.info['ch_names'][24:26] == [\"S5_D13 760\", \"S5_D13 850\"]\n\n # Test frequency encoding\n assert raw.info['chs'][0]['loc'][9] == 760\n assert raw.info['chs'][1]['loc'][9] == 850\n\n # Test info import\n assert raw.info['subject_info'] == dict(birthday=(2020, 8, 18),\n sex=0,\n first_name=\"testMontage\\\\0A\"\n \"TestMontage\",\n his_id=\"testMontage\\\\0A\"\n \"TestMontage\")\n\n # Test distance between optodes matches values from\n # https://github.com/mne-tools/mne-testing-data/pull/72\n allowed_distance_error = 0.001\n distances = source_detector_distances(raw.info)\n assert_allclose(distances[::2], [\n 0.0304, 0.0078, 0.0310, 0.0086, 0.0416,\n 0.0072, 0.0389, 0.0075, 0.0558, 0.0562,\n 0.0561, 0.0565, 0.0077], atol=allowed_distance_error)\n\n # Test which channels are short\n # These are the ones marked as red at\n # https://github.com/mne-tools/mne-testing-data/pull/72\n is_short = short_channels(raw.info)\n assert_array_equal(is_short[:9:2], [False, True, False, True, False])\n is_short = short_channels(raw.info, threshold=0.003)\n assert_array_equal(is_short[:3:2], [False, False])\n is_short = short_channels(raw.info, threshold=50)\n assert_array_equal(is_short[:3:2], [True, True])\n\n # Test trigger events\n assert_array_equal(raw.annotations.description, ['4.0', '2.0', '1.0'])\n\n # Test location of detectors\n # The locations of detectors can be seen in the first\n # figure on this page...\n # https://github.com/mne-tools/mne-testing-data/pull/72\n # And have been manually copied below\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D2'\n assert_allclose(\n mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][4][3:5] == 'D1'\n assert_allclose(\n mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][8][3:5] == 'D3'\n assert_allclose(\n mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][12][3:5] == 'D4'\n assert_allclose(\n mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][16][3:5] == 'D5'\n assert_allclose(\n mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][19][3:5] == 'D6'\n assert_allclose(\n mni_locs[19], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][21][3:5] == 'D7'\n assert_allclose(\n mni_locs[21], [-0.0394, -0.0483, 0.0928], atol=allowed_dist_error)\n\n\n@requires_testing_data\ndef test_encoding(tmp_path):\n \"\"\"Test NIRx encoding.\"\"\"\n fname = tmp_path / 'latin'\n shutil.copytree(fname_nirx_15_2, fname)\n hdr_fname = op.join(fname, 'NIRS-2019-10-02_003.hdr')\n hdr = list()\n with open(hdr_fname, 'rb') as fid:\n hdr.extend(line for line in fid)\n hdr[2] = b'Date=\"jeu. 13 f\\xe9vr. 2020\"\\r\\n'\n with open(hdr_fname, 'wb') as fid:\n for line in hdr:\n fid.write(line)\n # smoke test\n with pytest.raises(RuntimeWarning, match='Extraction of measurement date'):\n read_raw_nirx(fname)\n\n\n@requires_testing_data\ndef test_nirx_15_2():\n \"\"\"Test reading NIRX files.\"\"\"\n raw = read_raw_nirx(fname_nirx_15_2, preload=True)\n\n # Test data import\n assert raw._data.shape == (64, 67)\n assert raw.info['sfreq'] == 3.90625\n assert raw.info['meas_date'] == dt.datetime(2019, 10, 2, 9, 8, 47, 511000,\n tzinfo=dt.timezone.utc)\n\n # Test channel naming\n assert raw.info['ch_names'][:4] == [\"S1_D1 760\", \"S1_D1 850\",\n \"S1_D10 760\", \"S1_D10 850\"]\n\n # Test info import\n assert raw.info['subject_info'] == dict(sex=1, first_name=\"TestRecording\",\n birthday=(1989, 10, 2),\n his_id=\"TestRecording\")\n\n # Test trigger events\n assert_array_equal(raw.annotations.description, ['4.0', '6.0', '2.0'])\n print(raw.annotations.onset)\n\n # Test location of detectors\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D1'\n assert_allclose(\n mni_locs[0], [-0.0292, 0.0852, -0.0142], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][15][3:5] == 'D4'\n assert_allclose(\n mni_locs[15], [-0.0739, -0.0756, -0.0075], atol=allowed_dist_error)\n\n # Old name aliases for backward compat\n assert 'fnirs_cw_amplitude' in raw\n with pytest.raises(ValueError, match='Invalid value'):\n 'fnirs_raw' in raw\n assert 'fnirs_od' not in raw\n picks = pick_types(raw.info, fnirs='fnirs_cw_amplitude')\n assert len(picks) > 0\n\n\n@requires_testing_data\ndef test_nirx_15_0():\n \"\"\"Test reading NIRX files.\"\"\"\n raw = read_raw_nirx(fname_nirx_15_0, preload=True)\n\n # Test data import\n assert raw._data.shape == (20, 92)\n assert raw.info['sfreq'] == 6.25\n assert raw.info['meas_date'] == dt.datetime(2019, 10, 27, 13, 53, 34,\n 209000,\n tzinfo=dt.timezone.utc)\n\n # Test channel naming\n assert raw.info['ch_names'][:12] == [\"S1_D1 760\", \"S1_D1 850\",\n \"S2_D2 760\", \"S2_D2 850\",\n \"S3_D3 760\", \"S3_D3 850\",\n \"S4_D4 760\", \"S4_D4 850\",\n \"S5_D5 760\", \"S5_D5 850\",\n \"S6_D6 760\", \"S6_D6 850\"]\n\n # Test info import\n assert raw.info['subject_info'] == {'birthday': (2004, 10, 27),\n 'first_name': 'NIRX',\n 'last_name': 'Test',\n 'sex': FIFF.FIFFV_SUBJ_SEX_UNKNOWN,\n 'his_id': \"NIRX_Test\"}\n\n # Test trigger events\n assert_array_equal(raw.annotations.description, ['1.0', '2.0', '2.0'])\n\n # Test location of detectors\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D1'\n assert_allclose(\n mni_locs[0], [0.0287, -0.1143, -0.0332], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][15][3:5] == 'D8'\n assert_allclose(\n mni_locs[15], [-0.0693, -0.0480, 0.0657], atol=allowed_dist_error)\n\n # Test distance between optodes matches values from\n allowed_distance_error = 0.0002\n distances = source_detector_distances(raw.info)\n assert_allclose(distances[::2], [\n 0.0301, 0.0315, 0.0343, 0.0368, 0.0408,\n 0.0399, 0.0393, 0.0367, 0.0336, 0.0447], atol=allowed_distance_error)\n\n\n@requires_testing_data\[email protected]('fname, boundary_decimal', (\n [fname_nirx_15_2_short, 1],\n [fname_nirx_15_2, 0],\n [fname_nirx_15_2, 0],\n [nirsport2_2021_9, 0]\n))\ndef test_nirx_standard(fname, boundary_decimal):\n \"\"\"Test standard operations.\"\"\"\n _test_raw_reader(read_raw_nirx, fname=fname,\n boundary_decimal=boundary_decimal) # low fs\n",
"\"\"\"\n=====================================================================\nCompute MNE inverse solution on evoked data with a mixed source space\n=====================================================================\n\nCreate a mixed source space and compute an MNE inverse solution on an\nevoked dataset.\n\"\"\"\n# Author: Annalisa Pascarella <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport os.path as op\nimport matplotlib.pyplot as plt\n\nfrom nilearn import plotting\n\nimport mne\nfrom mne.minimum_norm import make_inverse_operator, apply_inverse\n\n# Set dir\ndata_path = mne.datasets.sample.data_path()\nsubject = 'sample'\ndata_dir = op.join(data_path, 'MEG', subject)\nsubjects_dir = op.join(data_path, 'subjects')\nbem_dir = op.join(subjects_dir, subject, 'bem')\n\n# Set file names\nfname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)\nfname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')\n\nfname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)\nfname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)\n\nfname_evoked = data_dir + '/sample_audvis-ave.fif'\nfname_trans = data_dir + '/sample_audvis_raw-trans.fif'\nfname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'\nfname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'\n\n# %%\n# Set up our source space\n# -----------------------\n# List substructures we are interested in. We select only the\n# sub structures we want to include in the source space:\n\nlabels_vol = ['Left-Amygdala',\n 'Left-Thalamus-Proper',\n 'Left-Cerebellum-Cortex',\n 'Brain-Stem',\n 'Right-Amygdala',\n 'Right-Thalamus-Proper',\n 'Right-Cerebellum-Cortex']\n\n# %%\n# Get a surface-based source space, here with few source points for speed\n# in this demonstration, in general you should use oct6 spacing!\nsrc = mne.setup_source_space(subject, spacing='oct5',\n add_dist=False, subjects_dir=subjects_dir)\n\n# %%\n# Now we create a mixed src space by adding the volume regions specified in the\n# list labels_vol. First, read the aseg file and the source space bounds\n# using the inner skull surface (here using 10mm spacing to save time,\n# we recommend something smaller like 5.0 in actual analyses):\n\nvol_src = mne.setup_volume_source_space(\n subject, mri=fname_aseg, pos=10.0, bem=fname_model,\n volume_label=labels_vol, subjects_dir=subjects_dir,\n add_interpolator=False, # just for speed, usually this should be True\n verbose=True)\n\n# Generate the mixed source space\nsrc += vol_src\nprint(f\"The source space contains {len(src)} spaces and \"\n f\"{sum(s['nuse'] for s in src)} vertices\")\n\n# %%\n# View the source space\n# ---------------------\n\nsrc.plot(subjects_dir=subjects_dir)\n\n# %%\n# We could write the mixed source space with::\n#\n# >>> write_source_spaces(fname_mixed_src, src, overwrite=True)\n#\n# We can also export source positions to NIfTI file and visualize it again:\n\nnii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)\nsrc.export_volume(nii_fname, mri_resolution=True, overwrite=True)\nplotting.plot_img(nii_fname, cmap='nipy_spectral')\n\n# %%\n# Compute the fwd matrix\n# ----------------------\nfwd = mne.make_forward_solution(\n fname_evoked, fname_trans, src, fname_bem,\n mindist=5.0, # ignore sources<=5mm from innerskull\n meg=True, eeg=False, n_jobs=1)\ndel src # save memory\n\nleadfield = fwd['sol']['data']\nprint(\"Leadfield size : %d sensors x %d dipoles\" % leadfield.shape)\nprint(f\"The fwd source space contains {len(fwd['src'])} spaces and \"\n f\"{sum(s['nuse'] for s in fwd['src'])} vertices\")\n\n# Load data\ncondition = 'Left Auditory'\nevoked = mne.read_evokeds(fname_evoked, condition=condition,\n baseline=(None, 0))\nnoise_cov = mne.read_cov(fname_cov)\n\n# %%\n# Compute inverse solution\n# ------------------------\nsnr = 3.0 # use smaller SNR for raw data\ninv_method = 'dSPM' # sLORETA, MNE, dSPM\nparc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'\nloose = dict(surface=0.2, volume=1.)\n\nlambda2 = 1.0 / snr ** 2\n\ninverse_operator = make_inverse_operator(\n evoked.info, fwd, noise_cov, depth=None, loose=loose, verbose=True)\ndel fwd\n\nstc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,\n pick_ori=None)\nsrc = inverse_operator['src']\n\n# %%\n# Plot the mixed source estimate\n# ------------------------------\n\n# sphinx_gallery_thumbnail_number = 3\ninitial_time = 0.1\nstc_vec = apply_inverse(evoked, inverse_operator, lambda2, inv_method,\n pick_ori='vector')\nbrain = stc_vec.plot(\n hemi='both', src=inverse_operator['src'], views='coronal',\n initial_time=initial_time, subjects_dir=subjects_dir,\n brain_kwargs=dict(silhouette=True))\n\n# %%\n# Plot the surface\n# ----------------\nbrain = stc.surface().plot(initial_time=initial_time,\n subjects_dir=subjects_dir)\n# %%\n# Plot the volume\n# ---------------\n\nfig = stc.volume().plot(initial_time=initial_time, src=src,\n subjects_dir=subjects_dir)\n\n# %%\n# Process labels\n# --------------\n# Average the source estimates within each label of the cortical parcellation\n# and each sub structure contained in the src space\n\n# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi\nlabels_parc = mne.read_labels_from_annot(\n subject, parc=parc, subjects_dir=subjects_dir)\n\nlabel_ts = mne.extract_label_time_course(\n [stc], labels_parc, src, mode='mean', allow_empty=True)\n\n# plot the times series of 2 labels\nfig, axes = plt.subplots(1)\naxes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh')\naxes.plot(1e3 * stc.times, label_ts[0][-1, :].T, 'r', label='Brain-stem')\naxes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')\naxes.legend()\nmne.viz.tight_layout()\n",
"\"\"\"\n=================================================================\nPermutation t-test on source data with spatio-temporal clustering\n=================================================================\n\nThis example tests if the evoked response is significantly different between\ntwo conditions across subjects. Here just for demonstration purposes\nwe simulate data from multiple subjects using one subject's data.\nThe multiple comparisons problem is addressed with a cluster-level\npermutation test across space and time.\n\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n# Eric Larson <[email protected]>\n# License: BSD-3-Clause\n\n# %%\n\nimport os.path as op\n\nimport numpy as np\nfrom numpy.random import randn\nfrom scipy import stats as stats\n\nimport mne\nfrom mne.epochs import equalize_epoch_counts\nfrom mne.stats import (spatio_temporal_cluster_1samp_test,\n summarize_clusters_stc)\nfrom mne.minimum_norm import apply_inverse, read_inverse_operator\nfrom mne.datasets import sample\n\nprint(__doc__)\n\n# %%\n# Set parameters\n# --------------\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nsubjects_dir = data_path + '/subjects'\nsrc_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'\n\ntmin = -0.2\ntmax = 0.3 # Use a lower tmax to reduce multiple comparisons\n\n# Setup for reading the raw data\nraw = mne.io.read_raw_fif(raw_fname)\nevents = mne.read_events(event_fname)\n\n# %%\n# Read epochs for all channels, removing a bad one\n# ------------------------------------------------\nraw.info['bads'] += ['MEG 2443']\npicks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')\nevent_id = 1 # L auditory\nreject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)\nepochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=reject, preload=True)\n\nevent_id = 3 # L visual\nepochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=reject, preload=True)\n\n# Equalize trial counts to eliminate bias (which would otherwise be\n# introduced by the abs() performed below)\nequalize_epoch_counts([epochs1, epochs2])\n\n# %%\n# Transform to source space\n# -------------------------\n\nfname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'\nsnr = 3.0\nlambda2 = 1.0 / snr ** 2\nmethod = \"dSPM\" # use dSPM method (could also be MNE, sLORETA, or eLORETA)\ninverse_operator = read_inverse_operator(fname_inv)\nsample_vertices = [s['vertno'] for s in inverse_operator['src']]\n\n# Let's average and compute inverse, resampling to speed things up\nevoked1 = epochs1.average()\nevoked1.resample(50, npad='auto')\ncondition1 = apply_inverse(evoked1, inverse_operator, lambda2, method)\nevoked2 = epochs2.average()\nevoked2.resample(50, npad='auto')\ncondition2 = apply_inverse(evoked2, inverse_operator, lambda2, method)\n\n# Let's only deal with t > 0, cropping to reduce multiple comparisons\ncondition1.crop(0, None)\ncondition2.crop(0, None)\ntmin = condition1.tmin\ntstep = condition1.tstep * 1000 # convert to milliseconds\n\n# %%\n# Transform to common cortical space\n# ----------------------------------\n#\n# Normally you would read in estimates across several subjects and morph\n# them to the same cortical space (e.g. fsaverage). For example purposes,\n# we will simulate this by just having each \"subject\" have the same\n# response (just noisy in source space) here.\n#\n# .. note::\n# Note that for 7 subjects with a two-sided statistical test, the minimum\n# significance under a permutation test is only p = 1/(2 ** 6) = 0.015,\n# which is large.\nn_vertices_sample, n_times = condition1.data.shape\nn_subjects = 7\nprint('Simulating data for %d subjects.' % n_subjects)\n\n# Let's make sure our results replicate, so set the seed.\nnp.random.seed(0)\nX = randn(n_vertices_sample, n_times, n_subjects, 2) * 10\nX[:, :, :, 0] += condition1.data[:, :, np.newaxis]\nX[:, :, :, 1] += condition2.data[:, :, np.newaxis]\n\n# %%\n# It's a good idea to spatially smooth the data, and for visualization\n# purposes, let's morph these to fsaverage, which is a grade 5 source space\n# with vertices 0:10242 for each hemisphere. Usually you'd have to morph\n# each subject's data separately (and you might want to use morph_data\n# instead), but here since all estimates are on 'sample' we can use one\n# morph matrix for all the heavy lifting.\n\n# Read the source space we are morphing to\nsrc = mne.read_source_spaces(src_fname)\nfsave_vertices = [s['vertno'] for s in src]\nmorph_mat = mne.compute_source_morph(\n src=inverse_operator['src'], subject_to='fsaverage',\n spacing=fsave_vertices, subjects_dir=subjects_dir).morph_mat\n\nn_vertices_fsave = morph_mat.shape[0]\n\n# We have to change the shape for the dot() to work properly\nX = X.reshape(n_vertices_sample, n_times * n_subjects * 2)\nprint('Morphing data.')\nX = morph_mat.dot(X) # morph_mat is a sparse matrix\nX = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)\n\n# %%\n# Finally, we want to compare the overall activity levels in each condition,\n# the diff is taken along the last axis (condition). The negative sign makes\n# it so condition1 > condition2 shows up as \"red blobs\" (instead of blue).\nX = np.abs(X) # only magnitude\nX = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast\n\n\n# %%\n# Compute statistic\n# -----------------\n#\n# To use an algorithm optimized for spatio-temporal clustering, we\n# just pass the spatial adjacency matrix (instead of spatio-temporal)\nprint('Computing adjacency.')\nadjacency = mne.spatial_src_adjacency(src)\n\n# Note that X needs to be a multi-dimensional array of shape\n# samples (subjects) x time x space, so we permute dimensions\nX = np.transpose(X, [2, 1, 0])\n\n# Now let's actually do the clustering. This can take a long time...\n# Here we set the threshold quite high to reduce computation.\np_threshold = 0.001\nt_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)\nprint('Clustering.')\nT_obs, clusters, cluster_p_values, H0 = clu = \\\n spatio_temporal_cluster_1samp_test(X, adjacency=adjacency, n_jobs=1,\n threshold=t_threshold, buffer_size=None,\n verbose=True)\n# Now select the clusters that are sig. at p < 0.05 (note that this value\n# is multiple-comparisons corrected).\ngood_cluster_inds = np.where(cluster_p_values < 0.05)[0]\n\n# %%\n# Visualize the clusters\n# ----------------------\nprint('Visualizing clusters.')\n\n# Now let's build a convenient representation of each cluster, where each\n# cluster becomes a \"time point\" in the SourceEstimate\nstc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,\n vertices=fsave_vertices,\n subject='fsaverage')\n\n# Let's actually plot the first \"time point\" in the SourceEstimate, which\n# shows all the clusters, weighted by duration.\nsubjects_dir = op.join(data_path, 'subjects')\n# blue blobs are for condition A < condition B, red for A > B\nbrain = stc_all_cluster_vis.plot(\n hemi='both', views='lateral', subjects_dir=subjects_dir,\n time_label='temporal extent (ms)', size=(800, 800),\n smoothing_steps=5, clim=dict(kind='value', pos_lims=[0, 1, 40]))\n# brain.save_image('clusters.png')\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n.. _tut-ieeg-localize:\n\n========================================\nLocating intracranial electrode contacts\n========================================\n\nAnalysis of intracranial electrophysiology recordings typically involves\nfinding the position of each contact relative to brain structures. In a\ntypical setup, the brain and the electrode locations will be in two places\nand will have to be aligned; the brain is best visualized by a\npre-implantation magnetic resonance (MR) image whereas the electrode contact\nlocations are best visualized in a post-implantation computed tomography (CT)\nimage. The CT image has greater intensity than the background at each of the\nelectrode contacts and for the skull. Using the skull, the CT can be aligned\nto MR-space. This accomplishes our goal of obtaining contact locations in\nMR-space (which is where the brain structures are best determined using the\n:ref:`tut-freesurfer-reconstruction`). Contact locations in MR-space can also\nbe warped to a template space such as ``fsaverage`` for group comparisons.\n\"\"\"\n\n# Authors: Alex Rockhill <[email protected]>\n# Eric Larson <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport os.path as op\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport nibabel as nib\nimport nilearn.plotting\nfrom dipy.align import resample\n\nimport mne\nfrom mne.datasets import fetch_fsaverage\n\n# paths to mne datasets - sample sEEG and FreeSurfer's fsaverage subject\n# which is in MNI space\nmisc_path = mne.datasets.misc.data_path()\nsample_path = mne.datasets.sample.data_path()\nsubjects_dir = op.join(sample_path, 'subjects')\n\n# use mne-python's fsaverage data\nfetch_fsaverage(subjects_dir=subjects_dir, verbose=True) # downloads if needed\n\n###############################################################################\n# Aligning the T1 to ACPC\n# =======================\n#\n# For intracranial electrophysiology recordings, the Brain Imaging Data\n# Structure (BIDS) standard requires that coordinates be aligned to the\n# anterior commissure and posterior commissure (ACPC-aligned). Therefore, it is\n# recommended that you do this alignment before finding the positions of the\n# channels in your recording. Doing this will make the \"mri\" (aka surface RAS)\n# coordinate frame an ACPC coordinate frame. This can be done using\n# Freesurfer's freeview:\n#\n# .. code-block:: bash\n#\n# $ freeview $MISC_PATH/seeg/sample_seeg_T1.mgz\n#\n# And then interact with the graphical user interface:\n#\n# First, it is recommended to change the cursor style to long, this can be done\n# through the menu options like so:\n#\n# ``Freeview -> Preferences -> General -> Cursor style -> Long``\n#\n# Then, the image needs to be aligned to ACPC to look like the image below.\n# This can be done by pulling up the transform popup from the menu like so:\n#\n# ``Tools -> Transform Volume``\n#\n# .. note::\n# Be sure to set the text entry box labeled RAS (not TkReg RAS) to\n# ``0 0 0`` before beginning the transform.\n#\n# Then translate the image until the crosshairs meet on the AC and\n# run through the PC as shown in the plot. The eyes should be in\n# the ACPC plane and the image should be rotated until they are symmetrical,\n# and the crosshairs should transect the midline of the brain.\n# Be sure to use both the rotate and the translate menus and save the volume\n# after you're finished using ``Save Volume As`` in the transform popup\n# :footcite:`HamiltonEtAl2017`.\n\nT1 = nib.load(op.join(misc_path, 'seeg', 'sample_seeg', 'mri', 'T1.mgz'))\nviewer = T1.orthoview()\nviewer.set_position(0, 9.9, 5.8)\nviewer.figs[0].axes[0].annotate(\n 'PC', (107, 108), xytext=(10, 75), color='white',\n horizontalalignment='center',\n arrowprops=dict(facecolor='white', lw=0.5, width=2, headwidth=5))\nviewer.figs[0].axes[0].annotate(\n 'AC', (137, 108), xytext=(246, 75), color='white',\n horizontalalignment='center',\n arrowprops=dict(facecolor='white', lw=0.5, width=2, headwidth=5))\n\n# %%\n# Freesurfer recon-all\n# ====================\n#\n# The first step is the most time consuming; the freesurfer reconstruction.\n# This process segments out the brain from the rest of the MR image and\n# determines which voxels correspond to each brain area based on a template\n# deformation. This process takes approximately 8 hours so plan accordingly.\n#\n# .. code-block:: bash\n#\n# $ export SUBJECT=sample_seeg\n# $ export SUBJECTS_DIR=$MY_DATA_DIRECTORY\n# $ recon-all -subjid $SUBJECT -sd $SUBJECTS_DIR \\\n# -i $MISC_PATH/seeg/sample_seeg_T1.mgz -all -deface\n#\n# .. note::\n# You may need to include an additional ``-cw256`` flag which can be added\n# to the end of the recon-all command if your MR scan is not\n# ``256 x 256 x 256`` voxels.\n#\n# .. note::\n# Using the ``-deface`` flag will create a defaced, anonymized T1 image\n# located at ``$MY_DATA_DIRECTORY/$SUBJECT/mri/orig_defaced.mgz``,\n# which is helpful for when you publish your data. You can also use\n# :func:`mne_bids.write_anat` and pass ``deface=True``.\n\n\n# %%\n# Aligning the CT to the MR\n# =========================\n#\n# Let's load our T1 and CT images and visualize them. You can hardly\n# see the CT, it's so misaligned that all you can see is part of the\n# stereotactic frame that is anteriolateral to the skull in the middle plot.\n# Clearly, we need to align the CT to the T1 image.\n\ndef plot_overlay(image, compare, title, thresh=None):\n \"\"\"Define a helper function for comparing plots.\"\"\"\n image = nib.orientations.apply_orientation(\n np.asarray(image.dataobj), nib.orientations.axcodes2ornt(\n nib.orientations.aff2axcodes(image.affine))).astype(np.float32)\n compare = nib.orientations.apply_orientation(\n np.asarray(compare.dataobj), nib.orientations.axcodes2ornt(\n nib.orientations.aff2axcodes(compare.affine))).astype(np.float32)\n if thresh is not None:\n compare[compare < np.quantile(compare, thresh)] = np.nan\n fig, axes = plt.subplots(1, 3, figsize=(12, 4))\n fig.suptitle(title)\n for i, ax in enumerate(axes):\n ax.imshow(np.take(image, [image.shape[i] // 2], axis=i).squeeze().T,\n cmap='gray')\n ax.imshow(np.take(compare, [compare.shape[i] // 2],\n axis=i).squeeze().T, cmap='gist_heat', alpha=0.5)\n ax.invert_yaxis()\n ax.axis('off')\n fig.tight_layout()\n\n\nCT_orig = nib.load(op.join(misc_path, 'seeg', 'sample_seeg_CT.mgz'))\n\n# resample to T1's definition of world coordinates\nCT_resampled = resample(moving=np.asarray(CT_orig.dataobj),\n static=np.asarray(T1.dataobj),\n moving_affine=CT_orig.affine,\n static_affine=T1.affine)\nplot_overlay(T1, CT_resampled, 'Unaligned CT Overlaid on T1', thresh=0.95)\ndel CT_resampled\n\n# %%\n# Now we need to align our CT image to the T1 image.\n#\n# We want this to be a rigid transformation (just rotation + translation),\n# so we don't do a full affine registration (that includes shear) here.\n# This takes a while (~10 minutes) to execute so we skip actually running it\n# here::\n#\n# reg_affine, _ = mne.transforms.compute_volume_registration(\n# CT_orig, T1, pipeline='rigids')\n#\n# And instead we just hard-code the resulting 4x4 matrix:\n\nreg_affine = np.array([\n [0.99270756, -0.03243313, 0.11610254, -133.094156],\n [0.04374389, 0.99439665, -0.09623816, -97.58320673],\n [-0.11233068, 0.10061512, 0.98856381, -84.45551601],\n [0., 0., 0., 1.]])\nCT_aligned = mne.transforms.apply_volume_registration(CT_orig, T1, reg_affine)\nplot_overlay(T1, CT_aligned, 'Aligned CT Overlaid on T1', thresh=0.95)\ndel CT_orig\n\n# %%\n# .. note::\n# Alignment failures sometimes occur which requires manual alignment.\n# This can be done using Freesurfer's ``freeview`` to align manually\n#\n# - Load the two scans from the command line using\n# ``freeview $MISC_PATH/seeg/sample_seeg/mri/T1.mgz\n# $MISC_PATH/seeg/sample_seeg_CT.mgz``\n# - Navigate to the upper toolbar, go to ``Tools>>Transform Volume...``\n# - Use the rotation and translation slide bars to align the CT\n# to the MR (be sure to have the CT selected in the upper left menu)\n# - Save the modified volume using the ``Save Volume As...`` button\n# - Resample to the T1 shape and affine using::\n#\n# CT_aligned_pre = nib.load(op.join(misc_path, 'seeg',\n# 'sample_seeg_CT_aligned.mgz'))\n# CT_aligned = resample(\n# moving=np.asarray(CT_aligned_pre.dataobj),\n# static=np.asarray(T1.dataobj),\n# moving_affine=CT_aligned_pre.affine,\n# static_affine=T1.affine)\n#\n# The rest of the tutorial can then be completed using ``CT_aligned``\n# from this point on.\n\n# %%\n# We can now see how the CT image looks properly aligned to the T1 image.\n#\n# .. note::\n# The hyperintense skull is actually aligned to the hypointensity between\n# the brain and the scalp. The brighter area surrounding the skull in the\n# MR is actually subcutaneous fat.\n\n# make low intensity parts of the CT transparent for easier visualization\nCT_data = CT_aligned.get_fdata().copy()\nCT_data[CT_data < np.quantile(CT_data, 0.95)] = np.nan\nT1_data = np.asarray(T1.dataobj)\n\nfig, axes = plt.subplots(1, 3, figsize=(12, 6))\nfor ax in axes:\n ax.axis('off')\naxes[0].imshow(T1_data[T1.shape[0] // 2], cmap='gray')\naxes[0].set_title('MR')\naxes[1].imshow(np.asarray(CT_aligned.dataobj)[CT_aligned.shape[0] // 2],\n cmap='gray')\naxes[1].set_title('CT')\naxes[2].imshow(T1_data[T1.shape[0] // 2], cmap='gray')\naxes[2].imshow(CT_data[CT_aligned.shape[0] // 2], cmap='gist_heat', alpha=0.5)\nfor ax in (axes[0], axes[2]):\n ax.annotate('Subcutaneous fat', (110, 52), xytext=(100, 30),\n color='white', horizontalalignment='center',\n arrowprops=dict(facecolor='white'))\nfor ax in axes:\n ax.annotate('Skull (dark in MR, bright in CT)', (40, 175),\n xytext=(120, 246), horizontalalignment='center',\n color='white', arrowprops=dict(facecolor='white'))\naxes[2].set_title('CT aligned to MR')\nfig.tight_layout()\ndel CT_data, T1\n\n# %%\n# Now we need to estimate the \"head\" coordinate transform.\n#\n# MNE stores digitization montages in a coordinate frame called \"head\"\n# defined by fiducial points (origin is halfway between the LPA and RPA\n# see :ref:`tut-source-alignment`). For sEEG, it is convenient to get an\n# estimate of the location of the fiducial points for the subject\n# using the Talairach transform (see :func:`mne.coreg.get_mni_fiducials`)\n# to use to define the coordinate frame so that we don't have to manually\n# identify their location.\n\n# estimate head->mri transform\nsubj_trans = mne.coreg.estimate_head_mri_t(\n 'sample_seeg', op.join(misc_path, 'seeg'))\n\n# %%\n# Marking the Location of Each Electrode Contact\n# ==============================================\n#\n# Now, the CT and the MR are in the same space, so when you are looking at a\n# point in CT space, it is the same point in MR space. So now everything is\n# ready to determine the location of each electrode contact in the\n# individual subject's anatomical space (T1-space). To do this, we can use the\n# MNE intracranial electrode location graphical user interface.\n#\n# .. note: The most useful coordinate frame for intracranial electrodes is\n# generally the ``surface RAS`` coordinate frame because that is\n# the coordinate frame that all the surface and image files that\n# Freesurfer outputs are in, see :ref:`tut-freesurfer-mne`. These are\n# useful for finding the brain structures nearby each contact and\n# plotting the results.\n#\n# To operate the GUI:\n#\n# - Click in each image to navigate to each electrode contact\n# - Select the contact name in the right panel\n# - Press the \"Mark\" button or the \"m\" key to associate that\n# position with that contact\n# - Repeat until each contact is marked, they will both appear as circles\n# in the plots and be colored in the sidebar when marked\n#\n# .. note:: The channel locations are saved to the ``raw`` object every time\n# a location is marked or removed so there is no \"Save\" button.\n#\n# .. note:: Using the scroll or +/- arrow keys you can zoom in and out,\n# and the up/down, left/right and page up/page down keys allow\n# you to move one slice in any direction. This information is\n# available in the help menu, accessible by pressing the \"h\" key.\n#\n# .. note:: If \"Snap to Center\" is on, this will use the radius so be\n# sure to set it properly.\n\n# sphinx_gallery_thumbnail_number = 5\n\n# load electrophysiology data to find channel locations for\n# (the channels are already located in the example)\nraw = mne.io.read_raw(op.join(misc_path, 'seeg', 'sample_seeg_ieeg.fif'))\n\ngui = mne.gui.locate_ieeg(raw.info, subj_trans, CT_aligned,\n subject='sample_seeg',\n subjects_dir=op.join(misc_path, 'seeg'))\n# The `raw` object is modified to contain the channel locations\n# after closing the GUI and can now be saved\ngui.close() # close when done\n\n# %%\n# Let's do a quick sidebar and show what this looks like for ECoG as well.\n\nT1_ecog = nib.load(op.join(misc_path, 'ecog', 'sample_ecog', 'mri', 'T1.mgz'))\nCT_orig_ecog = nib.load(op.join(misc_path, 'ecog', 'sample_ecog_CT.mgz'))\n\n# pre-computed affine from `mne.transforms.compute_volume_registration`\nreg_affine = np.array([\n [0.99982382, -0.00414586, -0.01830679, 0.15413965],\n [0.00549597, 0.99721885, 0.07432601, -1.54316131],\n [0.01794773, -0.07441352, 0.99706595, -1.84162514],\n [0., 0., 0., 1.]])\n# align CT\nCT_aligned_ecog = mne.transforms.apply_volume_registration(\n CT_orig_ecog, T1_ecog, reg_affine)\n\nraw_ecog = mne.io.read_raw(op.join(misc_path, 'ecog', 'sample_ecog_ieeg.fif'))\n# use estimated `trans` which was used when the locations were found previously\nsubj_trans_ecog = mne.coreg.estimate_head_mri_t(\n 'sample_ecog', op.join(misc_path, 'ecog'))\ngui = mne.gui.locate_ieeg(raw_ecog.info, subj_trans_ecog, CT_aligned_ecog,\n subject='sample_ecog',\n subjects_dir=op.join(misc_path, 'ecog'))\n\n# %%\n# for ECoG, we typically want to account for \"brain shift\" or shrinking of the\n# brain away from the skull/dura due to changes in pressure during the\n# craniotomy\n# Note: this requires the BEM surfaces to have been computed e.g. using\n# :ref:`mne watershed_bem` or :ref:`mne flash_bem`.\n# First, let's plot the localized sensor positions without modification.\n\n# plot projected sensors\nbrain_kwargs = dict(cortex='low_contrast', alpha=0.2, background='white')\nbrain = mne.viz.Brain('sample_ecog', subjects_dir=op.join(misc_path, 'ecog'),\n title='Before Projection', **brain_kwargs)\nbrain.add_sensors(raw_ecog.info, trans=subj_trans_ecog)\nview_kwargs = dict(azimuth=60, elevation=100, distance=350,\n focalpoint=(0, 0, -15))\nbrain.show_view(**view_kwargs)\n\n# %%\n# Now, let's project the sensors to the brain surface and re-plot them.\n\n# project sensors to the brain surface\nraw_ecog.info = mne.preprocessing.ieeg.project_sensors_onto_brain(\n raw_ecog.info, subj_trans_ecog, 'sample_ecog',\n subjects_dir=op.join(misc_path, 'ecog'))\n\n# plot projected sensors\nbrain = mne.viz.Brain('sample_ecog', subjects_dir=op.join(misc_path, 'ecog'),\n title='After Projection', **brain_kwargs)\nbrain.add_sensors(raw_ecog.info, trans=subj_trans_ecog)\nbrain.show_view(**view_kwargs)\n\n# %%\n# Let's plot the electrode contact locations on the subject's brain.\n#\n# MNE stores digitization montages in a coordinate frame called \"head\"\n# defined by fiducial points (origin is halfway between the LPA and RPA\n# see :ref:`tut-source-alignment`). For sEEG, it is convenient to get an\n# estimate of the location of the fiducial points for the subject\n# using the Talairach transform (see :func:`mne.coreg.get_mni_fiducials`)\n# to use to define the coordinate frame so that we don't have to manually\n# identify their location. The estimated head->mri ``trans`` was used\n# when the electrode contacts were localized so we need to use it again here.\n\n# plot the alignment\nbrain = mne.viz.Brain('sample_seeg', subjects_dir=op.join(misc_path, 'seeg'),\n **brain_kwargs)\nbrain.add_sensors(raw.info, trans=subj_trans)\nbrain.show_view(**view_kwargs)\n\n# %%\n# Warping to a Common Atlas\n# =========================\n#\n# Electrode contact locations are often compared across subjects in a template\n# space such as ``fsaverage`` or ``cvs_avg35_inMNI152``. To transform electrode\n# contact locations to that space, we need to determine a function that maps\n# from the subject's brain to the template brain. We will use the symmetric\n# diffeomorphic registration (SDR) implemented by ``Dipy`` to do this.\n#\n# Before we can make a function to account for individual differences in the\n# shape and size of brain areas, we need to fix the alignment of the brains.\n# The plot below shows that they are not yet aligned.\n\n# load the subject's brain and the Freesurfer \"fsaverage\" template brain\nsubject_brain = nib.load(\n op.join(misc_path, 'seeg', 'sample_seeg', 'mri', 'brain.mgz'))\ntemplate_brain = nib.load(\n op.join(subjects_dir, 'fsaverage', 'mri', 'brain.mgz'))\n\nplot_overlay(template_brain, subject_brain,\n 'Alignment with fsaverage before Affine Registration')\n\n# %%\n# Now, we'll register the affine of the subject's brain to the template brain.\n# This aligns the two brains, preparing the subject's brain to be warped\n# to the template.\n#\n# .. warning:: Here we use ``zooms=4`` just for speed, in general we recommend\n# using ``zooms=None`` (default) for highest accuracy!\n\nreg_affine, sdr_morph = mne.transforms.compute_volume_registration(\n subject_brain, template_brain, zooms=4, verbose=True)\nsubject_brain_sdr = mne.transforms.apply_volume_registration(\n subject_brain, template_brain, reg_affine, sdr_morph)\n\n# apply the transform to the subject brain to plot it\nplot_overlay(template_brain, subject_brain_sdr,\n 'Alignment with fsaverage after SDR Registration')\n\ndel subject_brain, template_brain\n\n# %%\n# Finally, we'll apply the registrations to the electrode contact coordinates.\n# The brain image is warped to the template but the goal was to warp the\n# positions of the electrode contacts. To do that, we'll make an image that is\n# a lookup table of the electrode contacts. In this image, the background will\n# be ``0`` s all the bright voxels near the location of the first contact will\n# be ``1`` s, the second ``2`` s and so on. This image can then be warped by\n# the SDR transform. We can finally recover a position by averaging the\n# positions of all the voxels that had the contact's lookup number in\n# the warped image.\n\n# first we need our montage but it needs to be converted to \"mri\" coordinates\n# using our ``subj_trans``\nmontage = raw.get_montage()\nmontage.apply_trans(subj_trans)\n\nmontage_warped, elec_image, warped_elec_image = mne.warp_montage_volume(\n montage, CT_aligned, reg_affine, sdr_morph, thresh=0.5,\n subject_from='sample_seeg', subjects_dir_from=op.join(misc_path, 'seeg'),\n subject_to='fsaverage', subjects_dir_to=subjects_dir)\n\nfig, axes = plt.subplots(2, 1, figsize=(8, 8))\nnilearn.plotting.plot_glass_brain(elec_image, axes=axes[0], cmap='Dark2')\nfig.text(0.1, 0.65, 'Subject T1', rotation='vertical')\nnilearn.plotting.plot_glass_brain(warped_elec_image, axes=axes[1],\n cmap='Dark2')\nfig.text(0.1, 0.25, 'fsaverage', rotation='vertical')\nfig.suptitle('Electrodes warped to fsaverage')\n\ndel CT_aligned\n\n# %%\n# We can now plot the result. You can compare this to the plot in\n# :ref:`tut-working-with-seeg` to see the difference between this morph, which\n# is more complex, and the less-complex, linear Talairach transformation.\n# By accounting for the shape of this particular subject's brain using the\n# SDR to warp the positions of the electrode contacts, the position in the\n# template brain is able to be more accurately estimated.\n\n# first we need to add fiducials so that we can define the \"head\" coordinate\n# frame in terms of them (with the origin at the center between LPA and RPA)\nmontage_warped.add_estimated_fiducials('fsaverage', subjects_dir)\n\n# compute the head<->mri ``trans`` now using the fiducials\ntemplate_trans = mne.channels.compute_native_head_t(montage_warped)\n\n# now we can set the montage and, because there are fiducials in the montage,\n# the montage will be properly transformed to \"head\" coordinates when we do\n# (this step uses ``template_trans`` but it is recomputed behind the scenes)\nraw.set_montage(montage_warped)\n\n# plot the resulting alignment\nbrain = mne.viz.Brain('fsaverage', subjects_dir=subjects_dir, **brain_kwargs)\nbrain.add_sensors(raw.info, trans=template_trans)\nbrain.show_view(**view_kwargs)\n\n# %%\n# This pipeline was developed based on previous work\n# :footcite:`HamiltonEtAl2017`.\n\n# %%\n# References\n# ==========\n#\n# .. footbibliography::\n",
"\"\"\"\n===========================================================================\nVisualising statistical significance thresholds on EEG data\n===========================================================================\n\nMNE-Python provides a range of tools for statistical hypothesis testing\nand the visualisation of the results. Here, we show a few options for\nexploratory and confirmatory tests - e.g., targeted t-tests, cluster-based\npermutation approaches (here with Threshold-Free Cluster Enhancement);\nand how to visualise the results.\n\nThe underlying data comes from :footcite:`DufauEtAl2015`; we contrast long vs.\nshort words. TFCE is described in :footcite:`SmithNichols2009`.\n\"\"\"\n\n# %%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import ttest_ind\n\nimport mne\nfrom mne.channels import find_ch_adjacency, make_1020_channel_selections\nfrom mne.stats import spatio_temporal_cluster_test\n\nnp.random.seed(0)\n\n# Load the data\npath = mne.datasets.kiloword.data_path() + '/kword_metadata-epo.fif'\nepochs = mne.read_epochs(path)\nname = \"NumberOfLetters\"\n\n# Split up the data by the median length in letters via the attached metadata\nmedian_value = str(epochs.metadata[name].median())\nlong_words = epochs[name + \" > \" + median_value]\nshort_words = epochs[name + \" < \" + median_value]\n\n#############################################################################\n# If we have a specific point in space and time we wish to test, it can be\n# convenient to convert the data into Pandas Dataframe format. In this case,\n# the :class:`mne.Epochs` object has a convenient\n# :meth:`mne.Epochs.to_data_frame` method, which returns a dataframe.\n# This dataframe can then be queried for specific time windows and sensors.\n# The extracted data can be submitted to standard statistical tests. Here,\n# we conduct t-tests on the difference between long and short words.\n\ntime_windows = ((.2, .25), (.35, .45))\nelecs = [\"Fz\", \"Cz\", \"Pz\"]\nindex = ['condition', 'epoch', 'time']\n\n# display the EEG data in Pandas format (first 5 rows)\nprint(epochs.to_data_frame(index=index)[elecs].head())\n\nreport = \"{elec}, time: {tmin}-{tmax} s; t({df})={t_val:.3f}, p={p:.3f}\"\nprint(\"\\nTargeted statistical test results:\")\nfor (tmin, tmax) in time_windows:\n long_df = long_words.copy().crop(tmin, tmax).to_data_frame(index=index)\n short_df = short_words.copy().crop(tmin, tmax).to_data_frame(index=index)\n for elec in elecs:\n # extract data\n A = long_df[elec].groupby(\"condition\").mean()\n B = short_df[elec].groupby(\"condition\").mean()\n\n # conduct t test\n t, p = ttest_ind(A, B)\n\n # display results\n format_dict = dict(elec=elec, tmin=tmin, tmax=tmax,\n df=len(epochs.events) - 2, t_val=t, p=p)\n print(report.format(**format_dict))\n\n##############################################################################\n# Absent specific hypotheses, we can also conduct an exploratory\n# mass-univariate analysis at all sensors and time points. This requires\n# correcting for multiple tests.\n# MNE offers various methods for this; amongst them, cluster-based permutation\n# methods allow deriving power from the spatio-temoral correlation structure\n# of the data. Here, we use TFCE.\n\n# Calculate adjacency matrix between sensors from their locations\nadjacency, _ = find_ch_adjacency(epochs.info, \"eeg\")\n\n# Extract data: transpose because the cluster test requires channels to be last\n# In this case, inference is done over items. In the same manner, we could\n# also conduct the test over, e.g., subjects.\nX = [long_words.get_data().transpose(0, 2, 1),\n short_words.get_data().transpose(0, 2, 1)]\ntfce = dict(start=.2, step=.2)\n\n# Calculate statistical thresholds\nt_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(\n X, tfce, adjacency=adjacency,\n n_permutations=100) # a more standard number would be 1000+\nsignificant_points = cluster_pv.reshape(t_obs.shape).T < .05\nprint(str(significant_points.sum()) + \" points selected by TFCE ...\")\n\n##############################################################################\n# The results of these mass univariate analyses can be visualised by plotting\n# :class:`mne.Evoked` objects as images (via :class:`mne.Evoked.plot_image`)\n# and masking points for significance.\n# Here, we group channels by Regions of Interest to facilitate localising\n# effects on the head.\n\n# We need an evoked object to plot the image to be masked\nevoked = mne.combine_evoked([long_words.average(), short_words.average()],\n weights=[1, -1]) # calculate difference wave\ntime_unit = dict(time_unit=\"s\")\nevoked.plot_joint(title=\"Long vs. short words\", ts_args=time_unit,\n topomap_args=time_unit) # show difference wave\n\n# Create ROIs by checking channel labels\nselections = make_1020_channel_selections(evoked.info, midline=\"12z\")\n\n# Visualize the results\nfig, axes = plt.subplots(nrows=3, figsize=(8, 8))\naxes = {sel: ax for sel, ax in zip(selections, axes.ravel())}\nevoked.plot_image(axes=axes, group_by=selections, colorbar=False, show=False,\n mask=significant_points, show_names=\"all\", titles=None,\n **time_unit)\nplt.colorbar(axes[\"Left\"].images[-1], ax=list(axes.values()), shrink=.3,\n label=\"µV\")\n\nplt.show()\n\n# %%\n# References\n# ----------\n# .. footbibliography::\n"
] | [
[
"numpy.isnan",
"numpy.testing.assert_array_equal",
"numpy.diff",
"numpy.testing.assert_allclose",
"numpy.where"
],
[
"matplotlib.pyplot.subplots"
],
[
"numpy.abs",
"numpy.random.seed",
"scipy.stats.distributions.t.ppf",
"numpy.random.randn",
"numpy.transpose",
"numpy.where"
],
[
"numpy.take",
"numpy.asarray",
"numpy.quantile",
"matplotlib.pyplot.subplots",
"numpy.array"
],
[
"matplotlib.pyplot.show",
"scipy.stats.ttest_ind",
"matplotlib.pyplot.subplots",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Miles-Ma/mmclassification | [
"b54acfd5c431bf3a15a964c9d3d9a271c197ac18"
] | [
"mmcls/models/utils/attention.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn.bricks.transformer import build_dropout\nfrom mmcv.cnn.utils.weight_init import trunc_normal_\nfrom mmcv.runner.base_module import BaseModule\n\nfrom ..builder import ATTENTION\nfrom .helpers import to_2tuple\n\n\nclass WindowMSA(BaseModule):\n \"\"\"Window based multi-head self-attention (W-MSA) module with relative\n position bias.\n\n Args:\n embed_dims (int): Number of input channels.\n window_size (tuple[int]): The height and width of the window.\n num_heads (int): Number of attention heads.\n qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.\n Defaults to True.\n qk_scale (float | None, optional): Override default qk scale of\n head_dim ** -0.5 if set. Defaults to None.\n attn_drop (float, optional): Dropout ratio of attention weight.\n Defaults to 0.\n proj_drop (float, optional): Dropout ratio of output. Defaults to 0.\n init_cfg (dict, optional): The extra config for initialization.\n Defaults to None.\n \"\"\"\n\n def __init__(self,\n embed_dims,\n window_size,\n num_heads,\n qkv_bias=True,\n qk_scale=None,\n attn_drop=0.,\n proj_drop=0.,\n init_cfg=None):\n\n super().__init__(init_cfg)\n self.embed_dims = embed_dims\n self.window_size = window_size # Wh, Ww\n self.num_heads = num_heads\n head_embed_dims = embed_dims // num_heads\n self.scale = qk_scale or head_embed_dims**-0.5\n\n # define a parameter table of relative position bias\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),\n num_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n # About 2x faster than original impl\n Wh, Ww = self.window_size\n rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)\n rel_position_index = rel_index_coords + rel_index_coords.T\n rel_position_index = rel_position_index.flip(1).contiguous()\n self.register_buffer('relative_position_index', rel_position_index)\n\n self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(embed_dims, embed_dims)\n self.proj_drop = nn.Dropout(proj_drop)\n\n self.softmax = nn.Softmax(dim=-1)\n\n def init_weights(self):\n super(WindowMSA, self).init_weights()\n\n trunc_normal_(self.relative_position_bias_table, std=0.02)\n\n def forward(self, x, mask=None):\n \"\"\"\n Args:\n\n x (tensor): input features with shape of (num_windows*B, N, C)\n mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww,\n Wh*Ww), value should be between (-inf, 0].\n \"\"\"\n B_, N, C = x.shape\n qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads,\n C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[\n 2] # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose(-2, -1))\n\n relative_position_bias = self.relative_position_bias_table[\n self.relative_position_index.view(-1)].view(\n self.window_size[0] * self.window_size[1],\n self.window_size[0] * self.window_size[1],\n -1) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.permute(\n 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nW = mask.shape[0]\n attn = attn.view(B_ // nW, nW, self.num_heads, N,\n N) + mask.unsqueeze(1).unsqueeze(0)\n attn = attn.view(-1, self.num_heads, N, N)\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n @staticmethod\n def double_step_seq(step1, len1, step2, len2):\n seq1 = torch.arange(0, step1 * len1, step1)\n seq2 = torch.arange(0, step2 * len2, step2)\n return (seq1[:, None] + seq2[None, :]).reshape(1, -1)\n\n\[email protected]_module()\nclass ShiftWindowMSA(BaseModule):\n \"\"\"Shift Window Multihead Self-Attention Module.\n\n Args:\n embed_dims (int): Number of input channels.\n input_resolution (Tuple[int, int]): The resolution of the input feature\n map.\n num_heads (int): Number of attention heads.\n window_size (int): The height and width of the window.\n shift_size (int, optional): The shift step of each window towards\n right-bottom. If zero, act as regular window-msa. Defaults to 0.\n qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.\n Default: True\n qk_scale (float | None, optional): Override default qk scale of\n head_dim ** -0.5 if set. Defaults to None.\n attn_drop (float, optional): Dropout ratio of attention weight.\n Defaults to 0.0.\n proj_drop (float, optional): Dropout ratio of output. Defaults to 0.\n dropout_layer (dict, optional): The dropout_layer used before output.\n Defaults to dict(type='DropPath', drop_prob=0.).\n auto_pad (bool, optional): Auto pad the feature map to be divisible by\n window_size, Defaults to False.\n init_cfg (dict, optional): The extra config for initialization.\n Default: None.\n \"\"\"\n\n def __init__(self,\n embed_dims,\n input_resolution,\n num_heads,\n window_size,\n shift_size=0,\n qkv_bias=True,\n qk_scale=None,\n attn_drop=0,\n proj_drop=0,\n dropout_layer=dict(type='DropPath', drop_prob=0.),\n auto_pad=False,\n init_cfg=None):\n super().__init__(init_cfg)\n\n self.embed_dims = embed_dims\n self.input_resolution = input_resolution\n self.shift_size = shift_size\n self.window_size = window_size\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, don't partition\n self.shift_size = 0\n self.window_size = min(self.input_resolution)\n\n self.w_msa = WindowMSA(embed_dims, to_2tuple(self.window_size),\n num_heads, qkv_bias, qk_scale, attn_drop,\n proj_drop)\n\n self.drop = build_dropout(dropout_layer)\n\n H, W = self.input_resolution\n # Handle auto padding\n self.auto_pad = auto_pad\n if self.auto_pad:\n self.pad_r = (self.window_size -\n W % self.window_size) % self.window_size\n self.pad_b = (self.window_size -\n H % self.window_size) % self.window_size\n self.H_pad = H + self.pad_b\n self.W_pad = W + self.pad_r\n else:\n H_pad, W_pad = self.input_resolution\n assert H_pad % self.window_size + W_pad % self.window_size == 0,\\\n f'input_resolution({self.input_resolution}) is not divisible '\\\n f'by window_size({self.window_size}). Please check feature '\\\n f'map shape or set `auto_pad=True`.'\n self.H_pad, self.W_pad = H_pad, W_pad\n self.pad_r, self.pad_b = 0, 0\n\n if self.shift_size > 0:\n # calculate attention mask for SW-MSA\n img_mask = torch.zeros((1, self.H_pad, self.W_pad, 1)) # 1 H W 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size,\n -self.shift_size), slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size,\n -self.shift_size), slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n # nW, window_size, window_size, 1\n mask_windows = self.window_partition(img_mask)\n mask_windows = mask_windows.view(\n -1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0,\n float(-100.0)).masked_fill(\n attn_mask == 0, float(0.0))\n else:\n attn_mask = None\n\n self.register_buffer('attn_mask', attn_mask)\n\n def forward(self, query):\n H, W = self.input_resolution\n B, L, C = query.shape\n assert L == H * W, 'input feature has wrong size'\n query = query.view(B, H, W, C)\n\n if self.pad_r or self.pad_b:\n query = F.pad(query, (0, 0, 0, self.pad_r, 0, self.pad_b))\n\n # cyclic shift\n if self.shift_size > 0:\n shifted_query = torch.roll(\n query,\n shifts=(-self.shift_size, -self.shift_size),\n dims=(1, 2))\n else:\n shifted_query = query\n\n # nW*B, window_size, window_size, C\n query_windows = self.window_partition(shifted_query)\n # nW*B, window_size*window_size, C\n query_windows = query_windows.view(-1, self.window_size**2, C)\n\n # W-MSA/SW-MSA (nW*B, window_size*window_size, C)\n attn_windows = self.w_msa(query_windows, mask=self.attn_mask)\n\n # merge windows\n attn_windows = attn_windows.view(-1, self.window_size,\n self.window_size, C)\n\n # B H' W' C\n shifted_x = self.window_reverse(attn_windows, self.H_pad, self.W_pad)\n # reverse cyclic shift\n if self.shift_size > 0:\n x = torch.roll(\n shifted_x,\n shifts=(self.shift_size, self.shift_size),\n dims=(1, 2))\n else:\n x = shifted_x\n\n if self.pad_r or self.pad_b:\n x = x[:, :H, :W, :].contiguous()\n\n x = x.view(B, H * W, C)\n\n x = self.drop(x)\n return x\n\n def window_reverse(self, windows, H, W):\n window_size = self.window_size\n B = int(windows.shape[0] / (H * W / window_size / window_size))\n x = windows.view(B, H // window_size, W // window_size, window_size,\n window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n return x\n\n def window_partition(self, x):\n B, H, W, C = x.shape\n window_size = self.window_size\n x = x.view(B, H // window_size, window_size, W // window_size,\n window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()\n windows = windows.view(-1, window_size, window_size, C)\n return windows\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.zeros",
"torch.nn.Linear",
"torch.arange",
"torch.roll",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xpo0a/SpeechEnhancement | [
"2efd67c24289541e43d3943cc1a3f8989c0afeb4"
] | [
"data_preprocess.py"
] | [
"import os\nimport yaml\n\nimport librosa\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef slice_signal(file, window_size, stride, sample_rate):\n\n wav, sr = librosa.load(file, sr=None)\n\n if sr != sample_rate:\n wav = librosa.resample(wav, sr, sample_rate)\n\n wav = wav / np.max(np.abs(wav))\n\n if np.max(wav) > 1 or np.min(wav) < -1:\n print('need to norm')\n\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n slices.append(slice_sig)\n return slices\n\n\ndef process_and_serialize(data_type):\n\n stride = 0.5\n cfg_path = r'config/config.yaml'\n cfg = yaml.load(open(cfg_path, 'r'), Loader=yaml.FullLoader)\n\n root_dir = cfg['data']['root_path']\n corpus = cfg['data']['corpus']\n window_size = cfg['data']['window_size']\n sample_rate = cfg['data']['sample_rate']\n\n clean_folder = os.path.join(root_dir, corpus, data_type, 'clean')\n noisy_folder = os.path.join(root_dir, corpus, data_type, 'noise')\n serialized_folder = os.path.join(root_dir, corpus, data_type, 'serialized_data')\n\n if not os.path.exists(serialized_folder):\n os.makedirs(serialized_folder)\n\n for root, dirs, files in os.walk(clean_folder):\n if len(files) == 0:\n continue\n for filename in tqdm(files, desc='Serialize and down-sample {} audios'.format(data_type)):\n clean_file = os.path.join(clean_folder, filename)\n noisy_file = os.path.join(noisy_folder, filename)\n # slice both clean signal and noisy signal\n clean_sliced = slice_signal(clean_file, window_size, stride, sample_rate)\n noisy_sliced = slice_signal(noisy_file, window_size, stride, sample_rate)\n for idx, slice_tuple in enumerate(zip(clean_sliced, noisy_sliced)):\n pair = np.array([slice_tuple[0], slice_tuple[1]])\n np.save(os.path.join(serialized_folder, '{}_{}'.format(filename, idx)), arr=pair)\n data_verify(serialized_folder=serialized_folder, window_size=window_size)\n\n\ndef data_verify(serialized_folder, window_size):\n for root, dirs, files in os.walk(serialized_folder):\n for filename in tqdm(files, desc='Verify serialized audios'):\n data_pair = np.load(os.path.join(root, filename), allow_pickle=True)\n if data_pair.shape[1] != window_size:\n print('Snippet length not {} : {} instead'.format(window_size, data_pair.shape[1]))\n break\n\n\nif __name__ == '__main__':\n process_and_serialize('train')\n # process_and_serialize('test')\n\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.abs",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kbrodt/clog-loss | [
"0831b3a01b079609a71490bb921633110927206c"
] | [
"src/models/resnext.py"
] | [
"import math\nfrom functools import partial\nfrom functools import partialmethod\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .resnet import conv1x1x1, Bottleneck, ResNet\n\n\ndef partialclass(cls, *args, **kwargs):\n class PartialClass(cls):\n __init__ = partialmethod(cls.__init__, *args, **kwargs)\n\n return PartialClass\n\n\ndef get_inplanes():\n return [128, 256, 512, 1024]\n\n\nclass ResNeXtBottleneck(Bottleneck):\n expansion = 2\n\n def __init__(self, in_planes, planes, cardinality, stride=1,\n downsample=None):\n super().__init__(in_planes, planes, stride, downsample)\n\n mid_planes = cardinality * planes // 32\n self.conv1 = conv1x1x1(in_planes, mid_planes)\n self.bn1 = nn.BatchNorm3d(mid_planes)\n self.conv2 = nn.Conv3d(mid_planes,\n mid_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=cardinality,\n bias=False)\n self.bn2 = nn.BatchNorm3d(mid_planes)\n self.conv3 = conv1x1x1(mid_planes, planes * self.expansion)\n\n\nclass ResNeXt(ResNet):\n\n def __init__(self,\n block,\n layers,\n block_inplanes,\n n_input_channels=3,\n conv1_t_size=7,\n conv1_t_stride=1,\n no_max_pool=False,\n shortcut_type='B',\n cardinality=32,\n n_classes=400):\n block = partialclass(block, cardinality=cardinality)\n super().__init__(block, layers, block_inplanes, n_input_channels,\n conv1_t_size, conv1_t_stride, no_max_pool,\n shortcut_type, n_classes)\n\n self.fc = nn.Linear(cardinality * 32 * block.expansion, n_classes)\n\n\ndef generate_model(model_depth, **kwargs):\n assert model_depth in [50, 101, 152, 200]\n\n if model_depth == 50:\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], get_inplanes(),\n **kwargs)\n elif model_depth == 101:\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], get_inplanes(),\n **kwargs)\n elif model_depth == 152:\n model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], get_inplanes(),\n **kwargs)\n elif model_depth == 200:\n model = ResNeXt(ResNeXtBottleneck, [3, 24, 36, 3], get_inplanes(),\n **kwargs)\n\n return model\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Conv3d",
"torch.nn.BatchNorm3d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NVIDIA/cuQuantum | [
"0f00494d4639d760228ac002e83e6d2d3dd97eca",
"0f00494d4639d760228ac002e83e6d2d3dd97eca"
] | [
"python/samples/sampler.py",
"python/samples/coarse/example15.py"
] | [
"# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES\n#\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport numpy as np\nimport cupy as cp\n\nimport cuquantum\nfrom cuquantum import custatevec as cusv\n\n\nnIndexBits = 3\nnSvSize = (1 << nIndexBits)\nnMaxShots = 5\nnShots = 5\n\nbitStringLen = 2;\nbitOrdering = np.asarray([0, 1], dtype=np.int32)\n\nbitStrings = np.empty((nShots,), dtype=np.int64)\nbitStrings_expected = np.asarray([0b00, 0b01, 0b10, 0b11, 0b11], dtype=np.int64)\n\nh_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j, \n 0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)\n\nd_sv = cp.asarray(h_sv)\n\n# In real appliction, random numbers in range [0, 1) will be used.\nrandnums = np.asarray([0.1, 0.8, 0.4, 0.6, 0.2], dtype=np.float64)\n\n########################################################################\n\n# cuStateVec handle initialization\nhandle = cusv.create()\n\n# create sampler and check the size of external workspace\nsampler, extraWorkspaceSizeInBytes = cusv.sampler_create(\n handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, nMaxShots)\n\n# allocate external workspace\nextraWorkspace = cp.cuda.alloc(extraWorkspaceSizeInBytes)\n\n# sample preprocess\ncusv.sampler_preprocess(\n handle, sampler, extraWorkspace.ptr, extraWorkspaceSizeInBytes)\n\n# sample bit strings\ncusv.sampler_sample(\n handle, sampler, bitStrings.ctypes.data, bitOrdering.ctypes.data, bitStringLen,\n randnums.ctypes.data, nShots, cusv.SamplerOutput.ASCENDING_ORDER)\n\n# destroy sampler\ncusv.sampler_destroy(sampler)\n\n# destroy handle\ncusv.destroy(handle)\n\nif not np.allclose(bitStrings, bitStrings_expected):\n raise ValueError(\"results mismatch\")\nprint(\"test passed\")\n",
"# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES\n#\n# SPDX-License-Identifier: BSD-3-Clause\n\n\"\"\"\nExample illustrating a generalized Einstein summation expression.\n\"\"\"\nimport numpy as np\n\nfrom cuquantum import contract\n\n\na = np.random.rand(3,2)\nb = np.random.rand(3,3)\nc = np.random.rand(3,2)\nd = np.random.rand(3,4)\n\n# A hyperedge example.\nexpr = \"ij,ik,ij,kl->l\"\n\nr = contract(expr, a, b, c, d)\ns = np.einsum(expr, a, b, c, d)\nassert np.allclose(r, s), \"Incorrect results.\"\n"
] | [
[
"numpy.asarray",
"numpy.allclose",
"numpy.empty"
],
[
"numpy.random.rand",
"numpy.einsum",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JeromeMutgeert/Detectron-DA-Faster-RCNN | [
"86e4fb06bf3e934c12eb0913ef4210ad61114386"
] | [
"detectron/core/test_engine.py"
] | [
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"Test a Detectron network on an imdb (image database).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nimport cv2\nimport datetime\nimport logging\nimport numpy as np\nimport os\n\nfrom caffe2.python import workspace\n\nfrom detectron.core.config import cfg\nfrom detectron.core.config import get_output_dir\nfrom detectron.core.rpn_generator import generate_rpn_on_dataset\nfrom detectron.core.rpn_generator import generate_rpn_on_range\nfrom detectron.core.test import im_detect_all\nfrom detectron.datasets import task_evaluation\nfrom detectron.datasets.json_dataset import JsonDataset\nfrom detectron.modeling import model_builder\nfrom detectron.utils.io import save_object\nfrom detectron.utils.timer import Timer\nimport detectron.utils.c2 as c2_utils\nimport detectron.utils.env as envu\nimport detectron.utils.net as net_utils\nimport detectron.utils.subprocess as subprocess_utils\nimport detectron.utils.vis as vis_utils\n\n# for loading detections.pkl if already present\nfrom detectron.utils.io import load_object\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef get_eval_functions():\n # Determine which parent or child function should handle inference\n if cfg.MODEL.RPN_ONLY:\n child_func = generate_rpn_on_range\n parent_func = generate_rpn_on_dataset\n else:\n # Generic case that handles all network types other than RPN-only nets\n # and RetinaNet\n child_func = test_net\n parent_func = test_net_on_dataset\n\n return parent_func, child_func\n\n\ndef get_inference_dataset(index, is_parent=True):\n assert is_parent or len(cfg.TEST.DATASETS) == 1, \\\n 'The child inference process can only work on a single dataset'\n\n dataset_name = cfg.TEST.DATASETS[index]\n\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert is_parent or len(cfg.TEST.PROPOSAL_FILES) == 1, \\\n 'The child inference process can only work on a single proposal file'\n assert len(cfg.TEST.PROPOSAL_FILES) == len(cfg.TEST.DATASETS), \\\n 'If proposals are used, one proposal file must be specified for ' \\\n 'each dataset'\n proposal_file = cfg.TEST.PROPOSAL_FILES[index]\n else:\n proposal_file = None\n\n return dataset_name, proposal_file\n\n\ndef run_inference(\n weights_file, ind_range=None,\n multi_gpu_testing=False, gpu_id=0,\n check_expected_results=False,\n):\n parent_func, child_func = get_eval_functions()\n is_parent = ind_range is None\n\n def result_getter():\n if is_parent:\n # Parent case:\n # In this case we're either running inference on the entire dataset in a\n # single process or (if multi_gpu_testing is True) using this process to\n # launch subprocesses that each run inference on a range of the dataset\n all_results = {}\n \n subset_pointer = None\n if cfg.VOC_SUBSET != '':\n subset_pointer = result_getter #any dummy object could be used that is more advanced than 'object()' or similar builtins.\n subset_pointer.subset = np.load(cfg.VOC_SUBSET)\n print('loading subset')\n \n for i in range(len(cfg.TEST.DATASETS)):\n dataset_name, proposal_file = get_inference_dataset(i)\n output_dir = get_output_dir(dataset_name, training=False)\n print('len before',len(subset_pointer.subset))\n results = parent_func(\n weights_file,\n dataset_name,\n proposal_file,\n output_dir,\n multi_gpu=multi_gpu_testing,\n subset_pointer=subset_pointer\n )\n all_results.update(results)\n\n return all_results\n else:\n # Subprocess child case:\n # In this case test_net was called via subprocess.Popen to execute on a\n # range of inputs on a single dataset\n dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)\n output_dir = get_output_dir(dataset_name, training=False)\n return child_func(\n weights_file,\n dataset_name,\n proposal_file,\n output_dir,\n ind_range=ind_range,\n gpu_id=gpu_id\n )\n\n all_results = result_getter()\n if check_expected_results and is_parent:\n task_evaluation.check_expected_results(\n all_results,\n atol=cfg.EXPECTED_RESULTS_ATOL,\n rtol=cfg.EXPECTED_RESULTS_RTOL\n )\n task_evaluation.log_copy_paste_friendly_results(all_results)\n\n return all_results\n\n\ndef coco_detects_to_voc(all_boxes):\n # coco2voc: for each of the 81 coco classes the corresponding voc class index. See coco2voc.ipynb\n coco2voc = np.array([ 0, 15, 2, 7, 14, 1, 6, 19, 0, 4, 0, 0, 0, 0, 0, 3, 8,\n 12, 13, 17, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 9, 18, 16, 0, 11, 0, 20, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int32)\n \n voc_boxes = [[] for _ in range(21)]\n for i,cls_dets in enumerate(all_boxes):\n voc_i = coco2voc[i]\n if voc_i != 0:\n voc_boxes[voc_i] = cls_dets\n \n return voc_boxes\n\n\ndef test_net_on_dataset(\n weights_file,\n dataset_name,\n proposal_file,\n output_dir,\n multi_gpu=False,\n gpu_id=0,\n subset_pointer=None\n):\n \"\"\"Run inference on a dataset.\"\"\"\n if dataset_name[:5] != 'live_':\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n test_timer.tic()\n if multi_gpu:\n num_images = len(dataset.get_roidb())\n all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(\n weights_file, dataset_name, proposal_file, num_images, output_dir\n )\n else:\n all_boxes, all_segms, all_keyps = test_net(\n weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id,\n subset_pointer=subset_pointer\n )\n test_timer.toc()\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n \n if cfg.TEST.COCO_TO_VOC:\n all_boxes = coco_detects_to_voc(all_boxes)\n \n if dataset_name[:5] == 'live_':\n return None\n \n results = task_evaluation.evaluate_all(\n dataset, all_boxes, all_segms, all_keyps, output_dir,\n subset_pointer=subset_pointer\n )\n \n if subset_pointer is not None:\n # prune the subset for the following datasets:\n subset_pointer.subset = subset_pointer.subset[len(dataset.get_roidb()):]\n print('remains',len(subset_pointer.subset)) # should have 0 remains for the last set, voc_2012_train.\n \n return results\n\n\ndef multi_gpu_test_net_on_dataset(\n weights_file, dataset_name, proposal_file, num_images, output_dir\n):\n \"\"\"Multi-gpu inference on a dataset.\"\"\"\n binary_dir = envu.get_runtime_dir()\n binary_ext = envu.get_py_bin_ext()\n binary = os.path.join(binary_dir, 'test_net' + binary_ext)\n assert os.path.exists(binary), 'Binary \\'{}\\' not found'.format(binary)\n\n # Pass the target dataset and proposal file (if any) via the command line\n opts = ['TEST.DATASETS', '(\"{}\",)'.format(dataset_name)]\n opts += ['TEST.WEIGHTS', weights_file]\n if proposal_file:\n opts += ['TEST.PROPOSAL_FILES', '(\"{}\",)'.format(proposal_file)]\n\n # Run inference in parallel in subprocesses\n # Outputs will be a list of outputs from each subprocess, where the output\n # of each subprocess is the dictionary saved by test_net().\n outputs = subprocess_utils.process_in_parallel(\n 'detection', num_images, binary, output_dir, opts\n )\n\n # Collate the results from each subprocess\n all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n for det_data in outputs:\n all_boxes_batch = det_data['all_boxes']\n all_segms_batch = det_data['all_segms']\n all_keyps_batch = det_data['all_keyps']\n for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):\n all_boxes[cls_idx] += all_boxes_batch[cls_idx]\n all_segms[cls_idx] += all_segms_batch[cls_idx]\n all_keyps[cls_idx] += all_keyps_batch[cls_idx]\n det_file = os.path.join(output_dir, 'detections.pkl')\n cfg_yaml = envu.yaml_dump(cfg)\n save_object(\n dict(\n all_boxes=all_boxes,\n all_segms=all_segms,\n all_keyps=all_keyps,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n\n return all_boxes, all_segms, all_keyps\n\n\ndef test_net(\n weights_file,\n dataset_name,\n proposal_file,\n output_dir,\n ind_range=None,\n gpu_id=0,\n subset_pointer=None\n):\n \"\"\"Run inference on all images in a dataset or over an index range of images\n in a dataset using a single GPU.\n \"\"\"\n assert not cfg.MODEL.RPN_ONLY, \\\n 'Use rpn_generate to generate proposals from RPN-only models'\n \n # determine file name\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n det_file = os.path.join(output_dir, det_name)\n \n # load results if already present\n if os.path.exists(det_file):\n res = load_object(det_file)\n all_boxes, all_segms, all_keyps = res['all_boxes'],res['all_segms'],res['all_keyps']\n else:\n \n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(\n dataset_name, proposal_file, ind_range\n )\n \n if subset_pointer is not None:\n voc_subset = subset_pointer.subset\n this_sub = voc_subset[:len(roidb)]\n # subset_pointer.subset = voc_subset[len(roidb):]\n \n # filter roidb:\n roidb = [roi for taking,roi in zip(this_sub,roidb) if taking]\n \n total_num_images = len(roidb)\n end_ind = total_num_images\n \n model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)\n \n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n \n all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)\n if cfg.TEST.COLLECT_ALL:\n all_feats = []\n all_class_weights = np.empty(shape=(num_images,num_classes),dtype=np.float32)\n \n timers = defaultdict(Timer)\n \n for i, entry in enumerate(roidb):\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n # The roidb may contain ground-truth rois (for example, if the roidb\n # comes from the training or val split). We only want to evaluate\n # detection on the *non*-ground-truth rois. We select only the rois\n # that have the gt_classes field set to 0, which means there's no\n # ground truth.\n box_proposals = entry['boxes'][entry['gt_classes'] == 0]\n if len(box_proposals) == 0:\n continue\n else:\n # Faster R-CNN type models generate proposals on-the-fly with an\n # in-network RPN; 1-stage models don't require proposals.\n box_proposals = None\n \n im = cv2.imread(entry['image'])\n with c2_utils.NamedCudaScope(gpu_id):\n cls_boxes_i, cls_segms_i, cls_keyps_i, sum_softmax, topk_feats = im_detect_all(\n model, im, box_proposals, timers, return_feats= cfg.TEST.COLLECT_ALL\n )\n \n # print('nfeats:', topk_feats.shape[0])\n # print(topk_feats)\n \n extend_results(i, all_boxes, cls_boxes_i)\n if cls_segms_i is not None:\n extend_results(i, all_segms, cls_segms_i)\n if cls_keyps_i is not None:\n extend_results(i, all_keyps, cls_keyps_i)\n \n if cfg.TEST.COLLECT_ALL:\n all_class_weights[i] = sum_softmax\n all_feats.append(topk_feats) # will accumulate about 9 Gb of feats on COCO train set (118K imgs)\n \n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n det_time = (\n timers['im_detect_bbox'].average_time +\n timers['im_detect_mask'].average_time +\n timers['im_detect_keypoints'].average_time\n )\n misc_time = (\n timers['misc_bbox'].average_time +\n timers['misc_mask'].average_time +\n timers['misc_keypoints'].average_time\n )\n logger.info(\n (\n 'im_detect: range [{:d}, {:d}] of {:d}: '\n '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'\n ).format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1,\n start_ind + num_images, det_time, misc_time, eta\n )\n )\n \n if cfg.VIS:\n im_name = os.path.splitext(os.path.basename(entry['image']))[0]\n vis_utils.vis_one_image(\n im[:, :, ::-1],\n '{:d}_{:s}'.format(i, im_name),\n os.path.join(output_dir, 'vis'),\n cls_boxes_i,\n segms=cls_segms_i,\n keypoints=cls_keyps_i,\n thresh=cfg.VIS_TH,\n box_alpha=0.8,\n dataset=dataset,\n show_class=True\n )\n \n cfg_yaml = envu.yaml_dump(cfg)\n save_object(\n dict(\n all_boxes=all_boxes,\n all_segms=all_segms,\n all_keyps=all_keyps,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n if cfg.TEST.COLLECT_ALL:\n save_object(all_class_weights,os.path.join(output_dir,'class_weights.pkl'))\n save_object(all_feats,os.path.join(output_dir,'feature_vectors.pkl'))\n logger.info('Wrote class weights and feature vectors to output folder')\n \n return all_boxes, all_segms, all_keyps\n\n\ndef initialize_model_from_cfg(weights_file, gpu_id=0):\n \"\"\"Initialize a model from the global cfg. Loads test-time weights and\n creates the networks in the Caffe2 workspace.\n \"\"\"\n model = model_builder.create(cfg.MODEL.TYPE, train=False, gpu_id=gpu_id)\n net_utils.initialize_gpu_from_weights_file(\n model, weights_file, gpu_id=gpu_id,\n )\n model_builder.add_inference_inputs(model)\n workspace.CreateNet(model.net)\n workspace.CreateNet(model.conv_body_net)\n if cfg.MODEL.MASK_ON:\n workspace.CreateNet(model.mask_net)\n if cfg.MODEL.KEYPOINTS_ON:\n workspace.CreateNet(model.keypoint_net)\n return model\n\n\ndef get_roidb_and_dataset(dataset_name, proposal_file, ind_range):\n \"\"\"Get the roidb for the dataset specified in the global cfg. Optionally\n restrict it to a range of indices if ind_range is a pair of integers.\n \"\"\"\n \n if dataset_name == 'live_targets':\n from detectron.datasets.live_dataset import LiveRoidb\n roidb = LiveRoidb()\n import detectron.datasets.dummy_datasets as dummy_datasets\n json_dataset = dummy_datasets.get_coco_dataset()\n if not cfg.TRAIN.USE_FLIPPED:\n logger.info('Live target data set will use flipped examples anyway!')\n logger.info('\"Loaded\" dataset: {:s}'.format('live_targets'))\n return roidb, json_dataset, 0, len(roidb), len(roidb)\n \n dataset = JsonDataset(dataset_name)\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert proposal_file, 'No proposal file given'\n roidb = dataset.get_roidb(\n proposal_file=proposal_file,\n proposal_limit=cfg.TEST.PROPOSAL_LIMIT\n )\n else:\n roidb = dataset.get_roidb()\n\n if ind_range is not None:\n total_num_images = len(roidb)\n start, end = ind_range\n roidb = roidb[start:end]\n else:\n start = 0\n end = len(roidb)\n total_num_images = end\n\n return roidb, dataset, start, end, total_num_images\n\n\ndef empty_results(num_classes, num_images):\n \"\"\"Return empty results lists for boxes, masks, and keypoints.\n Box detections are collected into:\n all_boxes[cls][image] = N x 5 array with columns (x1, y1, x2, y2, score)\n Instance mask predictions are collected into:\n all_segms[cls][image] = [...] list of COCO RLE encoded masks that are in\n 1:1 correspondence with the boxes in all_boxes[cls][image]\n Keypoint predictions are collected into:\n all_keyps[cls][image] = [...] list of keypoints results, each encoded as\n a 3D array (#rois, 4, #keypoints) with the 4 rows corresponding to\n [x, y, logit, prob] (See: utils.keypoints.heatmaps_to_keypoints).\n Keypoints are recorded for person (cls = 1); they are in 1:1\n correspondence with the boxes in all_boxes[cls][image].\n \"\"\"\n # Note: do not be tempted to use [[] * N], which gives N references to the\n # *same* empty list.\n all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n all_segms = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n all_keyps = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n return all_boxes, all_segms, all_keyps\n\n\ndef extend_results(index, all_res, im_res):\n \"\"\"Add results for an image to the set of all results at the specified\n index.\n \"\"\"\n # Skip cls_idx 0 (__background__)\n for cls_idx in range(1, len(im_res)):\n all_res[cls_idx][index] = im_res[cls_idx]\n"
] | [
[
"numpy.load",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DamonU2/model-factory | [
"494f8b65afcd80fbedca3224e4c29a2e10cd484f"
] | [
"scripts/PSRA_combineSrcLossTable.py"
] | [
"#!/usr/bin/env python\n\nimport pandas as pd\nimport numpy as np\nimport csv\nimport glob \nimport os\nimport re\nimport sys\nimport argparse\nimport configparser\nimport logging\n\n'''\npython script to merge source loss tables for Provinces and territories\nwhere PSRA runs have een split up by economic region or sub regions\ncan be run from the command line with mandatory arguments like:\npython3 PSRA_combineSrcLossTable.py --srcLossDir=/usr/src/app/ebRisk/AB/\n'''\n\ndef main():\n args = parse_args()\n os.chdir(args.srcLossDir)\n\n for retrofit in 'b0', 'r2':\n erFileList = glob.glob('*src_loss_table_{}.csv'.format(retrofit))\n erFileList.sort()\n \n with open(erFileList[0], newline='') as f:\n reader = csv.reader(f)\n columns = next(reader)\n\n columns.append('region')\n\n dfFinal = pd.DataFrame(columns=columns)\n\n for erFile in erFileList:\n dfTemp = pd.read_csv(erFile)\n er = erFile.split('_')[1]\n #Remove the split econmic region identifiers \n #handle subregions and combined regions differently \n # For example 'QC2445-55' should remain the same\n # NB1330 should remain the same \n # BC5920A2 should be changed to BC5920 \n if len(re.split('(\\d+)',er)) == 1 or re.split('(\\d+)',er)[2] == '-':\n er = ''.join(re.split('(\\d+)',er)[0:4])\n else :\n er = ''.join(re.split('(\\d+)',er)[0:2])\n\n dfTemp['region'] = er\n dfFinal = dfFinal.append(dfTemp)\n outFileName = 'ebR_{er}_src_loss_table_{retrofit}.csv'.format(**{'er':er[0:2], 'retrofit':retrofit})\n\n if not os.path.isfile(outFileName): \n #Check if the file already exists, it should for \n #Provs/Territories that were process with a single \n #Economic region\n dfFinal.to_csv(outFileName, index=False)\n else: # else it exists, do nothing\n print('File ({}) already exists renaming original file'.format(outFileName))\n os.rename(outFileName, '{}_orginal.csv'.format(os.path.splitext(outFileName)[0]))\n dfFinal.to_csv(outFileName, index=False)\n return\n\ndef get_config_params(args):\n \"\"\"\n Parse Input/Output columns from supplied *.ini file\n \"\"\"\n configParseObj = configparser.ConfigParser()\n configParseObj.read(args)\n return configParseObj\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Combine Source Loss Tables across Economic Regions')\n parser.add_argument('--srcLossDir', type=str, help='', required=True)\n args = parser.parse_args()\n \n return args\n\nif __name__ == '__main__':\n main() "
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
bclarkson-code/search-query-classification | [
"8928faad459ef97934a6dbcf38a9347da5662415"
] | [
"gpt2_model/generate_gpt2_embeddings.py"
] | [
"import pickle\nimport torch\nfrom tqdm.auto import tqdm\nfrom gpt2_predictor import GPT2Predictor, GPT2TestSearchQueryDataModule\n\nif __name__ == '__main__':\n encoding = {\n 'Arts': 0,\n 'Business': 11,\n 'Computers': 10,\n 'Games': 12,\n 'Health': 9,\n 'Home': 6,\n 'News': 14,\n 'Recreation': 1,\n 'Reference': 13,\n 'Regional': 4,\n 'Science': 8,\n 'Shopping': 3,\n 'Society': 2,\n 'Sports': 5,\n 'World': 7\n }\n queries = GPT2TestSearchQueryDataModule(\n 'open_source.feather',\n batch_size=128,\n num_workers=0,\n tokeniser_string='gpt2',\n debug=False,\n encoding=encoding,\n )\n queries.prepare_data()\n queries.setup()\n\n model = GPT2Predictor.load_from_checkpoint(\n 'gpt2-checkpoints/model-epoch=00-valid/loss=1.86.ckpt',\n strict=False\n )\n test_data = queries.test_dataloader()\n preds = []\n with torch.no_grad():\n for batch in tqdm(test_data, desc='Predicting'):\n (input_ids, attention_mask), _ = batch\n pred = model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n preds.append(pred)\n\n with open('test_preds.pkl', 'wb') as f:\n pickle.dump(preds)\n\n\n\n"
] | [
[
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cpaxton/costar_plan | [
"be5c12f9d0e9d7078e6a5c283d3be059e7f3d040",
"be5c12f9d0e9d7078e6a5c283d3be059e7f3d040",
"be5c12f9d0e9d7078e6a5c283d3be059e7f3d040",
"be5c12f9d0e9d7078e6a5c283d3be059e7f3d040",
"be5c12f9d0e9d7078e6a5c283d3be059e7f3d040",
"be5c12f9d0e9d7078e6a5c283d3be059e7f3d040",
"be5c12f9d0e9d7078e6a5c283d3be059e7f3d040"
] | [
"costar_models/python/costar_models/conditional_image_costar.py",
"costar_models/python/costar_models/datasets/image.py",
"costar_task_plan/python/costar_task_plan/robotics/representation/cartesian.py",
"costar_task_plan/scripts/keras/lstm_nietzche_test.py",
"costar_task_plan/python/costar_task_plan/simulation/tasks/trays.py",
"costar_models/python/costar_models/pretrain_image_husky.py",
"costar_task_plan/python/costar_task_plan/tools/evaluate_mcts.py"
] | [
"from __future__ import print_function\n\nimport keras.backend as K\nimport keras.losses as losses\nimport keras.optimizers as optimizers\nimport numpy as np\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers import Input, RepeatVector, Reshape\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.merge import Concatenate, Multiply\nfrom keras.losses import binary_crossentropy\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import Adam\nfrom matplotlib import pyplot as plt\n\nfrom .robot_multi_models import *\nfrom .mhp_loss import *\nfrom .loss import *\nfrom .conditional_image import ConditionalImage\nfrom .multi import *\nfrom .costar import *\nfrom .callbacks import *\n\nclass ConditionalImageCostar(ConditionalImage):\n\n def __init__(self, *args, **kwargs):\n super(ConditionalImageCostar, self).__init__(*args, **kwargs)\n self.PredictorCb = ImageWithFirstCb\n\n def _makeModel(self, image, *args, **kwargs):\n\n img_shape = image.shape[1:]\n img_size = 1.\n for dim in img_shape:\n img_size *= dim\n gripper_size = 1\n arm_size = 6\n\n # =====================================================================\n # Load the image decoders\n img_in = Input(img_shape, name=\"predictor_img_in\")\n img0_in = Input(img_shape, name=\"predictor_img0_in\")\n #arm_in = Input((arm_size,))\n #gripper_in = Input((gripper_size,))\n #arm_gripper = Concatenate()([arm_in, gripper_in])\n label_in = Input((1,))\n ins = [img0_in, img_in]\n\n encoder = MakeImageEncoder(self, img_shape)\n decoder = MakeImageDecoder(self, self.hidden_shape)\n\n LoadEncoderWeights(self, encoder, decoder)\n\n # =====================================================================\n # Load the arm and gripper representation\n h = encoder([img0_in, img_in])\n\n if self.validate:\n self.loadValidationModels(arm_size, gripper_size, h0, h)\n\n next_option_in = Input((1,), name=\"next_option_in\")\n next_option_in2 = Input((1,), name=\"next_option_in2\")\n ins += [next_option_in, next_option_in2]\n\n # =====================================================================\n # Apply transforms\n y = Flatten()(OneHot(self.num_options)(next_option_in))\n y2 = Flatten()(OneHot(self.num_options)(next_option_in2))\n\n tform = self._makeTransform() if not self.dense_transform else self._makeDenseTransform()\n tform.summary()\n x = tform([h,y])\n x2 = tform([x,y2])\n\n image_out, image_out2 = decoder([x]), decoder([x2])\n\n # Compute classifier on the last transform\n if not self.no_disc:\n image_discriminator = LoadGoalClassifierWeights(self,\n make_classifier_fn=MakeCostarImageClassifier,\n img_shape=img_shape)\n #disc_out1 = image_discriminator([img0_in, image_out])\n disc_out2 = image_discriminator([img0_in, image_out2])\n\n # Create custom encoder loss\n if self.enc_loss:\n loss = EncoderLoss(self.image_encoder, self.loss)\n enc_losses = [loss, loss]\n enc_outs = [x, x2]\n enc_wts = [1e-2, 1e-2]\n img_loss_wt = 1.\n else:\n enc_losses = []\n enc_outs = []\n enc_wts = []\n img_loss_wt = 1.\n\n # Create models to train\n if self.no_disc:\n disc_wt = 0.\n else:\n disc_wt = 1e-3\n if self.no_disc:\n train_predictor = Model(ins + [label_in],\n [image_out, image_out2] + enc_outs)\n train_predictor.compile(\n loss=[self.loss, self.loss,] + enc_losses,\n loss_weights=[img_loss_wt, img_loss_wt] + enc_wts,\n optimizer=self.getOptimizer())\n else:\n train_predictor = Model(ins + [label_in],\n #[image_out, image_out2, disc_out1, disc_out2] + enc_outs)\n [image_out, image_out2, disc_out2] + enc_outs)\n train_predictor.compile(\n loss=[self.loss, self.loss, \"categorical_crossentropy\"] + enc_losses,\n #loss_weights=[img_loss_wt, img_loss_wt, 0.9*disc_wt, disc_wt] + enc_wts,\n loss_weights=[img_loss_wt, img_loss_wt, disc_wt] + enc_wts,\n optimizer=self.getOptimizer())\n train_predictor.summary()\n\n # Set variables\n self.predictor = None\n self.model = train_predictor\n\n\n def _getData(self, image, label, goal_idx, q, gripper, labels_to_name, *args, **kwargs):\n '''\n Parameters:\n -----------\n image: jpeg encoding of image\n label: integer code for which action is being performed\n goal_idx: index of the start of the next action\n q: joint states\n gripper: floating point gripper openness\n labels_to_name: list of high level actions (AKA options)\n '''\n\n # Null option to be set as the first option\n # Verify this to make sure we aren't loading things with different\n # numbers of available options/high-level actions\n if len(labels_to_name) != self.null_option:\n raise ValueError('labels_to_name must match the number of values in self.null_option. '\n 'self.null_option: ' + str(self.null_option) + ' ' +\n 'labels_to_name len: ' + str(len(labels_to_name)) + ' ' +\n 'labels_to_name values: ' + str(labels_to_name) + ' ' +\n 'If this is expected because you collected a dataset with new actions '\n 'or are using an old dataset, go to '\n 'costar_models/python/costar_models/util.py '\n 'and change model_instance.null_option and model_instance.num_options '\n 'accordingly in the \"costar\" features case.')\n self.null_option = len(labels_to_name)\n # Total number of options incl. null\n self.num_options = len(labels_to_name) + 1\n\n length = label.shape[0]\n prev_label = np.zeros_like(label)\n prev_label[1:] = label[:(length-1)]\n prev_label[0] = self.null_option\n\n goal_idx = np.min((goal_idx, np.ones_like(goal_idx)*(length-1)), axis=0)\n\n if not (image.shape[0] == goal_idx.shape[0]):\n print(\"Image shape:\", image.shape)\n print(\"Goal idxs:\", goal_idx.shape)\n print(label)\n print(goal_idx)\n raise RuntimeError('data type shapes did not match')\n goal_label = label[goal_idx]\n goal_image = image[goal_idx]\n goal_image2, goal_label2 = GetNextGoal(goal_image, label)\n\n # Extend image_0 to full length of sequence\n image0 = image[0]\n image0 = np.tile(np.expand_dims(image0,axis=0),[length,1,1,1])\n\n lbls_1h = np.squeeze(ToOneHot2D(label, self.num_options))\n lbls2_1h = np.squeeze(ToOneHot2D(goal_label2, self.num_options))\n if self.no_disc:\n return ([image0, image, label, goal_label, prev_label],\n [goal_image,\n goal_image2,])\n else:\n return ([image0, image, label, goal_label, prev_label],\n [goal_image,\n goal_image2,\n lbls2_1h,])\n\n",
"\"\"\"\nCopyright (c) 2018, Johns Hopkins University\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of the Johns Hopkins University nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL JOHNS HOPKINS UNIVERSITY BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport numpy as np\nimport io\nfrom PIL import Image\ntry:\n # don't require tensorflow for reading\n #import tensorflow as tf\n tf = None\nexcept ImportError:\n tf = None\n\n\ndef GetJpeg(img):\n '''\n Save a numpy array as a Jpeg, then get it out as a binary blob\n '''\n im = Image.fromarray(np.uint8(img))\n output = io.BytesIO()\n im.save(output, format=\"JPEG\", quality=80)\n return output.getvalue()\n\n\ndef GetPng(img):\n '''\n Save a numpy array as a Jpeg, then get it out as a binary blob\n '''\n im = Image.fromarray(img)\n output = io.BytesIO()\n # enabling optimized file size\n # increases saving time to ~0.4 seconds per image.\n #im.save(output, format=\"PNG\", optimize=True)\n im.save(output, format=\"PNG\")\n return output.getvalue()\n\n\ndef JpegToNumpy(jpeg):\n if tf is not None:\n # make sure to call tf.enable_eager_execution() at the start of your program\n image = tf.image.decode_jpeg(jpeg)\n else:\n stream = io.BytesIO(jpeg)\n image = Image.open(stream)\n return np.asarray(image, dtype=np.uint8)\n\n\ndef ConvertImageListToNumpy(data, format='numpy', data_format='NHWC', dtype=np.uint8):\n \"\"\" Convert a list of binary jpeg or png files to numpy format.\n\n # Arguments\n\n data: a list of binary jpeg images to convert\n format: default 'numpy' returns a 4d numpy array,\n 'list' returns a list of 3d numpy arrays\n \"\"\"\n length = len(data)\n images = []\n for raw in data:\n img = JpegToNumpy(raw)\n if data_format == 'NCHW':\n img = np.transpose(img, [2, 0, 1])\n images.append(img)\n if format == 'numpy':\n images = np.array(images, dtype=dtype)\n return images\n",
"\nfrom dmp.msg import DMPData\nfrom dmp_utils import RequestDMP, PlanDMP\nfrom geometry_msgs.msg import PoseArray\nfrom sensor_msgs.msg import JointState\nfrom tf_conversions import posemath as pm\n\nimport numpy as np\nimport PyKDL as kdl\nimport yaml\n\ntry:\n from yaml import CLoader as Loader, CDumper as Dumper\nexcept ImportError:\n from yaml import Loader, Dumper\n\n\nclass CartesianSkillInstance(yaml.YAMLObject):\n '''\n Model an instance of a skill as a cartesian DMP. We use this to create all\n of the different skills we may need.\n '''\n\n yaml_tag = u'!CartesianSkillInstance'\n\n def __init__(self, config, params=None, objs=[], dt=0.1):\n '''\n Needs:\n - a vector of end effector poses\n - a vector of world state observations (dictionaries)\n Assume that end effector and worlds are in the same coordinate system,\n which is supposed to be the base link.\n '''\n self.config = config\n self.dt = dt\n self.objs = [obj for obj in objs if obj not in ['time', 'gripper']]\n if params is not None:\n self._fromParams(params)\n\n def fit(self, ee_frames, worlds):\n '''\n call to create the dmp based on this observation\n '''\n\n traj_length = len(ee_frames)\n for k, v in worlds.items():\n assert len(v) == traj_length\n\n k_gain = self.config['dmp_k']\n d_gain = self.config['dmp_d']\n num_basis = self.config['dmp_basis']\n\n if len(self.objs) > 1:\n raise RuntimeError(\n 'CartesianSkillInstance does not handle multiple object goals!')\n elif len(self.objs) is 0:\n # goal is missing -- just relative\n goal_frame = [pm.fromMatrix(np.eye(4))] * traj_length\n else:\n #goal_frame = [world[self.objs[0]] for world in worlds]\n goal_frame = worlds[self.objs[0]]\n\n u = np.zeros((len(goal_frame), 6))\n last_rpy = None\n\n for i, (ee, goal) in enumerate(zip(ee_frames, goal_frame)):\n pose = goal.Inverse() * ee\n u[i, 0] = pose.p[0]\n u[i, 1] = pose.p[1]\n u[i, 2] = pose.p[2]\n\n # make sure all our motions are nice and continuous -- or strange things will happen\n adj_rpy = [0, 0, 0]\n rpy = pose.M.GetRPY()\n if last_rpy is not None:\n for j, (lvar, var) in enumerate(zip(last_rpy, rpy)):\n if lvar < 0 and var > lvar + np.pi:\n adj_rpy[j] = var - 2 * np.pi\n elif lvar > 0 and var < lvar - np.pi:\n adj_rpy[j] = var + 2 * np.pi\n else:\n adj_rpy[j] = var\n else:\n adj_rpy = rpy\n\n u[i, 3] = adj_rpy[0]\n u[i, 4] = adj_rpy[1]\n u[i, 5] = adj_rpy[2]\n\n # Sanity check!\n if last_rpy is not None:\n for lvar, var in zip(last_rpy, adj_rpy):\n if abs(lvar - var) > np.pi:\n raise RuntimeError(\n 'big jump when computing angle! %f, %f' % (lvar, var))\n\n last_rpy = adj_rpy\n\n resp = RequestDMP(u, self.dt, k_gain, d_gain, num_basis)\n\n self.goal_pose = pose\n self.goal_object_position = goal\n self.dmp_list = resp.dmp_list\n self.tau = resp.tau\n\n def params(self):\n params = [self.tau, ] + list(self.goal_pose.p) + \\\n list(self.goal_pose.M.GetQuaternion())\n for dmp in self.dmp_list:\n params += dmp.weights\n return params\n\n def _fromParams(self, params):\n '''\n Parse in the cartesian skill from a set of parameters and a config. Saves\n rotations as a quaternion instead of as RPY.\n '''\n k_gain = self.config['dmp_k']\n d_gain = self.config['dmp_d']\n num_basis = self.config['dmp_basis']\n num_dmps = 6\n\n self.dmp_list = []\n\n self.tau = params[0]\n x, y, z, qx, qy, qz, qw = params[1:8]\n self.goal_pose = kdl.Frame(kdl.Rotation.Quaternion(qx, qy, qz, qw))\n self.goal_pose.p[0] = x\n self.goal_pose.p[1] = y\n self.goal_pose.p[2] = z\n\n idx = 8\n for i in xrange(num_dmps):\n weights = params[idx:(idx + num_basis + 1)]\n self.dmp_list.append(DMPData(\n k_gain=k_gain,\n d_gain=d_gain,\n weights=weights))\n idx += num_basis + 1\n\n",
"#!/usr/bin/env python\n\n'''\nfrom: https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py\n'''\nfrom __future__ import print_function\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop\nfrom keras.utils.data_utils import get_file\nimport numpy as np\nimport random\nimport sys\n\npath = get_file('nietzsche.txt', origin=\"https://s3.amazonaws.com/text-datasets/nietzsche.txt\")\ntext = open(path).read().lower()\nprint('corpus length:', len(text))\n\nchars = sorted(list(set(text)))\nprint('total chars:', len(chars))\nchar_indices = dict((c, i) for i, c in enumerate(chars))\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\n# cut the text in semi-redundant sequences of maxlen characters\nmaxlen = 40\nstep = 3\nsentences = []\nnext_chars = []\nfor i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_chars.append(text[i + maxlen])\nprint('nb sequences:', len(sentences))\n\nprint('Vectorization...')\nX = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sentences), len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n X[i, t, char_indices[char]] = 1\n y[i, char_indices[next_chars[i]]] = 1\n\n\n# build the model: a single LSTM\nprint('Build model...')\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=(maxlen, len(chars))))\nmodel.add(Dense(len(chars)))\nmodel.add(Activation('softmax'))\n\noptimizer = RMSprop(lr=0.01)\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)\n\n\ndef sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n# train the model, output generated text after each iteration\nfor iteration in range(1, 60):\n print()\n print('-' * 50)\n print('Iteration', iteration)\n model.fit(X, y, batch_size=128, nb_epoch=1)\n\n start_index = random.randint(0, len(text) - maxlen - 1)\n\n for diversity in [0.2, 0.5, 1.0, 1.2]:\n print()\n print('----- diversity:', diversity)\n\n generated = ''\n sentence = text[start_index: start_index + maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(400):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n\n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\nprint()\n",
"from abstract import AbstractTaskDefinition\nfrom default import DefaultTaskDefinition\nfrom costar_task_plan.simulation.world import *\nfrom costar_task_plan.simulation.option import *\nfrom costar_task_plan.simulation.reward import *\nfrom costar_task_plan.simulation.condition import *\n\nimport numpy as np\nimport os\nimport pybullet as pb\nimport rospkg\n\n\nclass TraysTaskDefinition(DefaultTaskDefinition):\n\n '''\n Define a simple task. The robot needs to pick up and stack blocks of\n different colors in a particular order.\n '''\n\n # define object filenames\n tray_dir = \"tray\"\n tray_urdf = \"traybox.urdf\"\n spawn_pos_min = np.array([-0.4, -0.25, 0.10])\n spawn_pos_max = np.array([-0.65, 0.25, 0.155])\n spawn_pos_delta = spawn_pos_max - spawn_pos_min\n\n tray_poses = [np.array([-0.5, 0., 0.0]),\n np.array([0., +0.6, 0.0]),\n np.array([-1.0, -0.6, 0.0])]\n \n block_urdf = \"%s.urdf\"\n model = \"block\"\n blocks = [\"red\", \"blue\", \"yellow\", \"green\"]\n\n # Objects are placed into a random stack.\n stack_pos = [\n # np.array([-0.5, 0., 0.]),\n np.array([-0.5, 0.1, 0.]),\n np.array([-0.5, 0.2, 0.]),\n np.array([-0.5, -0.1, 0.]),\n np.array([-0.5, -0.2, 0.]),\n ]\n\n over_final_stack_pos = np.array([-0.5, 0., 0.5])\n final_stack_pos = np.array([-0.5, 0., 0.05])\n grasp_q = (-0.27, 0.65, 0.65, 0.27)\n\n def __init__(self, stage, *args, **kwargs):\n '''\n Read in arguments defining how many blocks to create, where to create\n them, and the size of the blocks. Size is given as mean and covariance,\n blocks are placed at random.\n '''\n super(TrayTaskDefinition, self).__init__(*args, **kwargs)\n self.stage = stage\n self.block_ids = []\n\n def _makeTask(self):\n AlignOption = lambda goal: GoalDirectedMotionOption(\n self.world,\n goal,\n pose=((0.05, 0, 0.05), self.grasp_q),\n pose_tolerance=(0.025, 0.025),\n joint_velocity_tolerance=0.05,)\n align_args = {\n \"constructor\": AlignOption,\n \"args\": [\"block\"],\n \"remap\": {\"block\": \"goal\"},\n }\n GraspOption = lambda goal: GoalDirectedMotionOption(\n self.world,\n goal,\n pose=((0.0, 0, 0.0), self.grasp_q),\n pose_tolerance=(0.025, 0.025),\n joint_velocity_tolerance=0.05,)\n grasp_args = {\n \"constructor\": GraspOption,\n \"args\": [\"block\"],\n \"remap\": {\"block\": \"goal\"},\n }\n LiftOption = lambda: GeneralMotionOption(\n pose=(self.over_final_stack_pos, self.grasp_q),\n pose_tolerance=(0.025, 0.025),\n joint_velocity_tolerance=0.05,)\n lift_args = {\n \"constructor\": LiftOption,\n \"args\": []\n }\n PlaceOption = lambda: GeneralMotionOption(\n pose=(self.final_stack_pos, self.grasp_q),\n pose_tolerance=(0.025, 0.025),\n joint_velocity_tolerance=0.05,)\n place_args = {\n \"constructor\": PlaceOption,\n \"args\": []\n }\n close_gripper_args = {\n \"constructor\": CloseGripperOption,\n \"args\": []\n }\n open_gripper_args = {\n \"constructor\": OpenGripperOption,\n \"args\": []\n }\n\n # Create a task model\n task = Task()\n task.add(\"align\", None, align_args)\n task.add(\"grasp\", \"align\", grasp_args)\n task.add(\"close_gripper\", \"grasp\", close_gripper_args)\n task.add(\"lift\", \"close_gripper\", lift_args)\n task.add(\"place\", \"lift\", place_args)\n task.add(\"open_gripper\", \"place\", open_gripper_args)\n task.add(\"done\", \"open_gripper\", lift_args)\n\n return task\n\n def _addTower(self, pos, blocks, urdf_dir):\n '''\n Helper function that generats a tower containing listed blocks at the\n specific position\n '''\n z = 0.025\n ids = []\n for block in blocks:\n urdf_filename = os.path.join(\n urdf_dir, self.model, self.block_urdf % block)\n obj_id = pb.loadURDF(urdf_filename)\n pb.resetBasePositionAndOrientation(\n obj_id,\n (pos[0], pos[1], z),\n (0, 0, 0, 1))\n self.addObject(\"block\", \"%s_block\" % block, obj_id)\n z += 0.05\n ids.append(obj_id)\n return ids\n\n def _setup(self):\n '''\n Create task by adding objects to the scene\n '''\n\n rospack = rospkg.RosPack()\n path = rospack.get_path('costar_simulation')\n urdf_dir = os.path.join(path, self.urdf_dir)\n \n \n tray_filename = os.path.join(urdf_dir, self.tray_dir, self.tray_urdf)\n\n for position in self.tray_poses:\n obj_id = pb.loadURDF(tray_filename)\n pb.resetBasePositionAndOrientation(obj_id, position, (0, 0, 0, 1))\n # placement =\n # np.random.randint(0,len(self.stack_pos),(len(self.blocks),))\n \n placement = np.array(range(len(self.stack_pos)))\n np.random.shuffle(placement)\n for i, pos in enumerate(self.stack_pos):\n blocks = []\n for idx, block in zip(placement, self.blocks):\n if idx == i:\n blocks.append(block)\n ids = self._addTower(pos, blocks, urdf_dir)\n self.block_ids += ids\n\n self.world.addCondition(JointLimitViolationCondition(), -100,\n \"joints must stay in limits\")\n self.world.addCondition(TimeCondition(10.), -100, \"time limit reached\")\n self.world.reward = EuclideanReward(\"red_block\")\n\n # =====================================================================\n # Set up the \"first stage\" of the tower -- so that we only need to\n # correctly place a single block.\n # NOTE: switching to give positive rewards for all to make it easier to\n # distinguish good training data from bad.\n if self.stage == 0:\n threshold = 0.035\n self.world.addCondition(\n ObjectAtPositionCondition(\"red_block\",\n self.final_stack_pos, threshold),\n 100,\n \"block in right position\")\n self.world.addCondition(\n ObjectAtPositionCondition(\"blue_block\",\n self.final_stack_pos,\n threshold),\n 50,\n \"wrong block\")\n self.world.addCondition(\n ObjectAtPositionCondition(\"green_block\",\n self.final_stack_pos,\n threshold),\n 50,\n \"wrong block\")\n self.world.addCondition(\n ObjectAtPositionCondition(\"yellow_block\",\n self.final_stack_pos,\n threshold),\n 50,\n \"wrong block\")\n\n def reset(self):\n '''\n Reset blocks to new random towers. Also resets the world and the\n configuration for all of the new objects, including the robot.\n '''\n\n # placement = np.random.randint(\n # 0,\n # len(self.stack_pos),\n # (len(self.blocks),))\n placement = np.array(range(len(self.stack_pos)))\n np.random.shuffle(placement)\n self.world.done = False\n self.world.ticks = 0\n\n # loop over all stacks\n # pull out ids now associated with a stack\n for i, pos in enumerate(self.stack_pos):\n blocks = []\n for idx, block in zip(placement, self.block_ids):\n if idx == i:\n blocks.append(block)\n\n # add blocks to tower\n z = 0.025\n for block_id in blocks:\n pb.resetBasePositionAndOrientation(\n block_id,\n (pos[0], pos[1], z),\n (0, 0, 0, 1))\n z += 0.05\n\n self._setupRobot(self.robot.handle)\n\n def getName(self):\n return \"trays\"\n",
"from __future__ import print_function\n\nimport keras.backend as K\nimport keras.losses as losses\nimport keras.optimizers as optimizers\nimport numpy as np\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers import Input, RepeatVector, Reshape\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.merge import Concatenate, Multiply\nfrom keras.losses import binary_crossentropy\nfrom keras.models import Model, Sequential\n\nfrom .husky_sampler import *\nfrom .husky import *\n\nclass PretrainImageAutoencoderHusky(HuskyRobotMultiPredictionSampler):\n\n def __init__(self, taskdef, *args, **kwargs):\n '''\n As in the other models, we call super() to parse arguments from the\n command line and set things like our optimizer and learning rate.\n '''\n super(PretrainImageAutoencoderHusky, self).__init__(taskdef, *args, **kwargs)\n self.PredictorCb = ImageCb\n self.num_options = HuskyNumOptions()\n self.null_option = HuskyNumOptions()\n self.save_encoder_decoder = True\n\n def _makeModel(self, image, *args, **kwargs):\n '''\n Create model to predict possible manipulation goals.\n '''\n img_shape = image.shape[1:]\n\n img_in = Input(img_shape,name=\"predictor_img_in\")\n img0_in = Input(img_shape,name=\"predictor_img0_in\")\n option_in = Input((1,), name=\"predictor_option_in\")\n encoder = self._makeImageEncoder(img_shape)\n ins = [img0_in, img_in]\n\n enc = encoder([img_in])\n decoder = self._makeImageDecoder(\n self.hidden_shape,\n self.skip_shape,)\n out = decoder(enc)\n\n if self.no_disc:\n ae = Model(ins, [out])\n ae.compile(\n loss=[\"mae\"],\n loss_weights=[1.],\n optimizer=self.getOptimizer())\n else:\n # Discriminate on distinctive features like heading we hope\n image_discriminator = LoadClassifierWeights(self,\n MakeImageClassifier,\n img_shape)\n o2 = image_discriminator([img0_in, out])\n\n ae = Model(ins, [out, o2])\n ae.compile(\n loss=[\"mae\", \"categorical_crossentropy\"],\n loss_weights=[1.,1e-3],\n optimizer=self.getOptimizer())\n\n ae.summary()\n self.predictor = None\n self.model = ae\n self.actor = None\n\n def _getData(self, image, label, *args, **kwargs):\n I = np.array(image) / 255.\n o1 = np.array(label)\n I0 = I[0,:,:,:]\n length = I.shape[0]\n I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1])\n if self.no_disc:\n return [I0, I], [I]\n else:\n o1_1h = np.squeeze(ToOneHot2D(o1, self.num_options))\n return [I0, I], [I, o1_1h]\n\n",
"import os\nimport numpy as np\n# TODO(cpaxton): remove pygame from this\n#import pygame as pg\n\nfrom costar_task_plan.mcts import Node\n\n'''\nloop over all MCTS scenarios\n- generate the scenarios you need to collect the data\n- create \n'''\n\n\ndef mctsLoop(env, policies, seed, save, animate, **kwargs):\n\n if seed is not None:\n world_id = int(seed)\n else:\n world_id = np.random.randint(10000)\n np.random.seed(world_id)\n\n env.reset()\n world = env._world\n current_root = Node(world=world)\n done = current_root.terminal\n\n if policies._rollout is None:\n rollout = \"norollout\"\n else:\n rollout = \"rollout\"\n if policies._dfs:\n dfs = \"_dfs\"\n else:\n dfs = \"\"\n if policies._sample is not None:\n sample = policies._sample.getName()\n else:\n sample = \"none\"\n\n dirname = \"world%d_%s_%s%s\" % (world_id, sample, rollout, dfs)\n\n if save or animate:\n window = world._getScreen()\n os.mkdir(dirname)\n\n while not done:\n\n # planning loop: determine the set of policies\n for i in xrange(kwargs['iter']):\n # do whatever you want here\n policies.explore(current_root)\n path = policies.extract(current_root)\n\n # execute loop: follow these policies for however long we are supposed\n # to follow them according to their conditions\n\n while current_root.state.t < 1.0:\n # compute the next action according to the current policy\n # if a policy is finished, pop it off of the stack\n pass\n done = current_root.terminal\n if animate:\n # show the current window\n pass\n # if save:\n # # Save pygame image to disk\n # pg.image.save(window, \"%s/iter%d.png\"%(dirname,iter))\n if done:\n break\n\n # update current root\n"
] | [
[
"numpy.ones_like",
"numpy.expand_dims",
"numpy.zeros_like"
],
[
"numpy.asarray",
"numpy.uint8",
"numpy.array",
"numpy.transpose"
],
[
"numpy.eye"
],
[
"numpy.log",
"numpy.asarray",
"numpy.random.multinomial",
"numpy.argmax",
"numpy.exp",
"numpy.sum"
],
[
"numpy.array",
"numpy.random.shuffle"
],
[
"numpy.array",
"numpy.expand_dims"
],
[
"numpy.random.seed",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jackwaterveg/Parakeet | [
"e75a07076ba5766206a6cd1fb2e5f82b0ba3842c"
] | [
"utils/gen_duration_from_textgrid.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nimport os\nfrom pathlib import Path\n\nimport librosa\nimport numpy as np\nfrom praatio import tgio\n\n\ndef readtg(tg_path, sample_rate=24000, n_shift=300):\n alignment = tgio.openTextgrid(tg_path, readRaw=True)\n phones = []\n ends = []\n for interval in alignment.tierDict[\"phones\"].entryList:\n phone = interval.label\n phones.append(phone)\n ends.append(interval.end)\n frame_pos = librosa.time_to_frames(ends, sr=sample_rate, hop_length=n_shift)\n durations = np.diff(frame_pos, prepend=0)\n assert len(durations) == len(phones)\n # merge \"\" and sp in the end\n if phones[-1] == \"\" and len(phones) > 1 and phones[-2] == \"sp\":\n phones = phones[:-1]\n durations[-2] += durations[-1]\n durations = durations[:-1]\n # replace the last \"sp\" with \"sil\" in MFA1.x\n phones[-1] = \"sil\" if phones[-1] == \"sp\" else phones[-1]\n # replace the edge \"\" with \"sil\", replace the inner \"\" with \"sp\"\n new_phones = []\n for i, phn in enumerate(phones):\n if phn == \"\":\n if i in {0, len(phones) - 1}:\n new_phones.append(\"sil\")\n else:\n new_phones.append(\"sp\")\n else:\n new_phones.append(phn)\n phones = new_phones\n results = \"\"\n for (p, d) in zip(phones, durations):\n results += p + \" \" + str(d) + \" \"\n return results.strip()\n\n\n# assume that the directory structure of inputdir is inputdir/speaker/*.TextGrid\n# in MFA1.x, there are blank labels(\"\") in the end, and maybe \"sp\" before it\n# in MFA2.x, there are blank labels(\"\") in the begin and the end, while no \"sp\" and \"sil\" anymore\n# we replace it with \"sil\"\ndef gen_duration_from_textgrid(inputdir, output, sample_rate=24000,\n n_shift=300):\n # key: utt_id, value: (speaker, phn_durs)\n durations_dict = {}\n list_dir = os.listdir(inputdir)\n speakers = [dir for dir in list_dir if os.path.isdir(inputdir / dir)]\n for speaker in speakers:\n subdir = inputdir / speaker\n for file in os.listdir(subdir):\n if file.endswith(\".TextGrid\"):\n tg_path = subdir / file\n name = file.split(\".\")[0]\n durations_dict[name] = (speaker, readtg(\n tg_path, sample_rate=sample_rate, n_shift=n_shift))\n with open(output, \"w\") as wf:\n for name in sorted(durations_dict.keys()):\n wf.write(name + \"|\" + durations_dict[name][0] + \"|\" +\n durations_dict[name][1] + \"\\n\")\n\n\ndef main():\n # parse config and args\n parser = argparse.ArgumentParser(\n description=\"Preprocess audio and then extract features.\")\n parser.add_argument(\n \"--inputdir\",\n default=None,\n type=str,\n help=\"directory to alignment files.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"output duration file.\")\n parser.add_argument(\"--sample-rate\", type=int, help=\"the sample of wavs.\")\n parser.add_argument(\n \"--n-shift\",\n type=int,\n help=\"the n_shift of time_to_freames, also called hop_length.\")\n\n args = parser.parse_args()\n\n inputdir = Path(args.inputdir).expanduser()\n output = Path(args.output).expanduser()\n gen_duration_from_textgrid(inputdir, output, args.sample_rate, args.n_shift)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.diff"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ufukhurriyetoglu/allennlp | [
"3f431799776dbf2a42091ba114fc3b6f38b268c8"
] | [
"allennlp/models/coreference_resolution/coref.py"
] | [
"import logging\nimport math\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom overrides import overrides\n\nfrom allennlp.common import Params\nfrom allennlp.data import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.modules import FeedForward\nfrom allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder\nfrom allennlp.nn import util, InitializerApplicator, RegularizerApplicator\nfrom allennlp.training.metrics import MentionRecall, ConllCorefScores\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\[email protected](\"coref\")\nclass CoreferenceResolver(Model):\n \"\"\"\n This ``Model`` implements the coreference resolution model described \"End-to-end Neural\n Coreference Resolution\"\n <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>\n by Lee et al., 2017.\n The basic outline of this model is to get an embedded representation of each span in the\n document. These span representations are scored and used to prune away spans that are unlikely\n to occur in a coreference cluster. For the remaining spans, the model decides which antecedent\n span (if any) they are coreferent with. The resulting coreference links, after applying\n transitivity, imply a clustering of the spans in the document.\n\n Parameters\n ----------\n vocab : ``Vocabulary``\n text_field_embedder : ``TextFieldEmbedder``\n Used to embed the ``text`` ``TextField`` we get as input to the model.\n context_layer : ``Seq2SeqEncoder``\n This layer incorporates contextual information for each word in the document.\n mention_feedforward : ``FeedForward``\n This feedforward network is applied to the span representations which is then scored\n by a linear layer.\n antecedent_feedforward: ``FeedForward``\n This feedforward network is applied to pairs of span representation, along with any\n pairwise features, which is then scored by a linear layer.\n feature_size: ``int``\n The embedding size for all the embedded features, such as distances or span widths.\n max_span_width: ``int``\n The maximum width of candidate spans.\n spans_per_word: float, required.\n A multiplier between zero and one which controls what percentage of candidate mention\n spans we retain with respect to the number of words in the document.\n max_antecedents: int, required.\n For each mention which survives the pruning stage, we consider this many antecedents.\n lexical_dropout: ``int``\n The probability of dropping out dimensions of the embedded text.\n initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)\n Used to initialize the model parameters.\n regularizer : ``RegularizerApplicator``, optional (default=``None``)\n If provided, will be used to calculate the regularization penalty during training.\n \"\"\"\n def __init__(self,\n vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n context_layer: Seq2SeqEncoder,\n mention_feedforward: FeedForward,\n antecedent_feedforward: FeedForward,\n feature_size: int,\n max_span_width: int,\n spans_per_word: float,\n max_antecedents: int,\n lexical_dropout: float = 0.2,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None) -> None:\n super(CoreferenceResolver, self).__init__(vocab, regularizer)\n\n self._text_field_embedder = text_field_embedder\n self._context_layer = context_layer\n self._mention_feedforward = TimeDistributed(mention_feedforward)\n self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)\n self._mention_scorer = TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1))\n self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))\n self._head_scorer = TimeDistributed(torch.nn.Linear(context_layer.get_output_dim(), 1))\n\n # 10 possible distance buckets.\n self._num_distance_buckets = 10\n self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)\n self._span_width_embedding = Embedding(max_span_width, feature_size)\n\n self._max_span_width = max_span_width\n self._spans_per_word = spans_per_word\n self._max_antecedents = max_antecedents\n\n self._mention_recall = MentionRecall()\n self._conll_coref_scores = ConllCorefScores()\n if lexical_dropout > 0:\n self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)\n else:\n self._lexical_dropout = lambda x: x\n initializer(self)\n\n @overrides\n def forward(self, # type: ignore\n text: Dict[str, torch.LongTensor],\n span_starts: torch.IntTensor,\n span_ends: torch.IntTensor,\n span_labels: torch.IntTensor = None,\n metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n Parameters\n ----------\n text : ``Dict[str, torch.LongTensor]``, required.\n The output of a ``TextField`` representing the text of\n the document.\n span_starts : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 1), representing the start indices of\n candidate spans for mentions. Comes from a ``ListField[IndexField]`` of indices into\n the text of the document.\n span_ends : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 1), representing the end indices of\n candidate spans for mentions. Comes from a ``ListField[IndexField]`` of indices into\n the text of the document.\n span_labels : ``torch.IntTensor``, optional (default = None)\n A tensor of shape (batch_size, num_spans), representing the cluster ids\n of each span, or -1 for those which do not appear in any clusters.\n\n Returns\n -------\n An output dictionary consisting of:\n top_spans : ``torch.IntTensor``\n A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing\n the start and end word indices of the top spans that survived the pruning stage.\n antecedent_indices : ``torch.IntTensor``\n A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span\n the index (with respect to top_spans) of the possible antecedents the model considered.\n predicted_antecedents : ``torch.IntTensor``\n A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the\n index (with respect to antecedent_indices) of the most likely antecedent. -1 means there\n was no predicted link.\n loss : ``torch.FloatTensor``, optional\n A scalar loss to be optimised.\n \"\"\"\n # Shape: (batch_size, document_length, embedding_size)\n text_embeddings = self._lexical_dropout(self._text_field_embedder(text))\n\n document_length = text_embeddings.size(1)\n num_spans = span_starts.size(1)\n\n # Shape: (batch_size, document_length)\n text_mask = util.get_text_field_mask(text).float()\n\n # Shape: (batch_size, num_spans, 1)\n span_mask = (span_starts >= 0).float()\n # IndexFields return -1 when they are used as padding. As we do\n # some comparisons based on span widths when we attend over the\n # span representations that we generate from these indices, we\n # need them to be <= 0. This is only relevant in edge cases where\n # the number of spans we consider after the pruning stage is >= the\n # total number of spans, because in this case, it is possible we might\n # consider a masked span.\n span_starts = F.relu(span_starts.float()).long()\n span_ends = F.relu(span_ends.float()).long()\n\n # Shape: (batch_size, num_spans, embedding_size)\n span_embeddings = self._compute_span_representations(text_embeddings,\n text_mask,\n span_starts,\n span_ends)\n # Compute a score for whether each span is a mention,\n # making sure that masked spans have very low scores.\n # Shape: (batch_size, num_spans, 1)\n mention_scores = self._mention_scorer(self._mention_feedforward(span_embeddings))\n mention_scores += span_mask.log()\n\n # Prune based on mention scores.\n num_spans_to_keep = int(math.floor(self._spans_per_word * document_length))\n\n # Shape: (batch_size, num_spans_to_keep)\n # These are indices (with values between 0 and num_spans) into\n # the span_embeddings tensor.\n top_span_indices = self._prune_and_sort_spans(mention_scores, num_spans_to_keep)\n\n # Now that we've decided which spans are actually mentions the next\n # few steps are reformatting all of our variables to be in terms of\n # num_spans_to_keep instead of num_spans, so we don't waste computation\n # on spans that we've already discarded.\n\n # Shape: (batch_size * num_spans_to_keep)\n # torch.index_select only accepts 1D indices, but here\n # we need to select spans for each element in the batch.\n # This reformats the indices to take into account their\n # index into the batch. We precompute this here to make\n # the multiple calls to util.batched_index_select below more efficient.\n flat_top_span_indices = util.flatten_and_batch_shift_indices(top_span_indices, num_spans)\n\n # Select the span embeddings corresponding to the\n # top spans based on the mention scorer.\n # Shape: (batch_size, num_spans_to_keep, embedding_size)\n top_span_embeddings = util.batched_index_select(span_embeddings,\n top_span_indices,\n flat_top_span_indices)\n # Shape: (batch_size, num_spans_to_keep, 1)\n # TODO(Mark): If we parameterised the mention scorer to score things in (0, inf)\n # I think we could get rid of the need for this mask entirely.\n top_span_mask = util.batched_index_select(span_mask,\n top_span_indices,\n flat_top_span_indices)\n top_span_mention_scores = util.batched_index_select(mention_scores,\n top_span_indices,\n flat_top_span_indices)\n top_span_starts = util.batched_index_select(span_starts,\n top_span_indices,\n flat_top_span_indices)\n top_span_ends = util.batched_index_select(span_ends,\n top_span_indices,\n flat_top_span_indices)\n\n # Compute indices for antecedent spans to consider.\n max_antecedents = min(self._max_antecedents, num_spans_to_keep)\n\n # Now that we have our variables in terms of num_spans_to_keep, we need to\n # compare span pairs to decide each span's antecedent. Each span can only\n # have prior spans as antecedents, and we only consider up to max_antecedents\n # prior spans. So the first thing we do is construct a matrix mapping a span's\n # index to the indices of its allowed antecedents. Note that this is independent\n # of the batch dimension - it's just a function of the span's position in\n # top_spans. The spans are in document order, so we can just use the relative\n # index of the spans to know which other spans are allowed antecedents.\n\n # Once we have this matrix, we reformat our variables again to get embeddings\n # for all valid antecedents for each span. This gives us variables with shapes\n # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which\n # we can use to make coreference decisions between valid span pairs.\n\n # Shapes:\n # (num_spans_to_keep, max_antecedents),\n # (1, max_antecedents),\n # (1, num_spans_to_keep, max_antecedents)\n valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\\n self._generate_valid_antecedents(num_spans_to_keep, max_antecedents, text_mask.is_cuda)\n # Select tensors relating to the antecedent spans.\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n candidate_antecedent_embeddings = util.flattened_index_select(top_span_embeddings,\n valid_antecedent_indices)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n candidate_antecedent_mention_scores = util.flattened_index_select(top_span_mention_scores,\n valid_antecedent_indices).squeeze(-1)\n # Compute antecedent scores.\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n span_pair_embeddings = self._compute_span_pair_embeddings(top_span_embeddings,\n candidate_antecedent_embeddings,\n valid_antecedent_offsets)\n\n # Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents)\n coreference_scores = self._compute_coreference_scores(span_pair_embeddings,\n top_span_mention_scores,\n candidate_antecedent_mention_scores,\n valid_antecedent_log_mask)\n # Compute final predictions.\n # Shape: (batch_size, num_spans_to_keep, 2)\n top_spans = torch.cat([top_span_starts, top_span_ends], -1)\n\n # We now have, for each span which survived the pruning stage,\n # a predicted antecedent. This implies a clustering if we group\n # mentions which refer to each other in a chain.\n # Shape: (batch_size, num_spans_to_keep)\n _, predicted_antecedents = coreference_scores.max(2)\n # Subtract one here because index 0 is the \"no antecedent\" class,\n # so this makes the indices line up with actual spans if the prediction\n # is greater than -1.\n predicted_antecedents -= 1\n\n output_dict = {\"top_spans\": top_spans,\n \"antecedent_indices\": valid_antecedent_indices,\n \"predicted_antecedents\": predicted_antecedents}\n if span_labels is not None:\n # Find the gold labels for the spans which we kept.\n pruned_gold_labels = util.batched_index_select(span_labels.unsqueeze(-1),\n top_span_indices,\n flat_top_span_indices)\n\n antecedent_labels = util.flattened_index_select(pruned_gold_labels,\n valid_antecedent_indices).squeeze(-1)\n antecedent_labels += valid_antecedent_log_mask.long()\n\n # Compute labels.\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)\n gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,\n antecedent_labels)\n # Now, compute the loss using the negative marginal log-likelihood.\n # This is equal to the log of the sum of the probabilities of all antecedent predictions\n # that would be consistent with the data, in the sense that we are minimising, for a\n # given span, the negative marginal log likelihood of all antecedents which are in the\n # same gold cluster as the span we are currently considering. Each span i predicts a\n # single antecedent j, but there might be several prior mentions k in the same\n # coreference cluster that would be valid antecedents. Our loss is the sum of the\n # probability assigned to all valid antecedents. This is a valid objective for\n # clustering as we don't mind which antecedent is predicted, so long as they are in\n # the same coreference cluster.\n coreference_log_probs = util.last_dim_log_softmax(coreference_scores, top_span_mask)\n correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()\n negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()\n\n self._mention_recall(top_spans, metadata)\n self._conll_coref_scores(top_spans, valid_antecedent_indices, predicted_antecedents, metadata)\n\n output_dict[\"loss\"] = negative_marginal_log_likelihood\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]):\n \"\"\"\n Converts the list of spans and predicted antecedent indices into clusters\n of spans for each element in the batch.\n\n Parameters\n ----------\n output_dict : ``Dict[str, torch.Tensor]``, required.\n The result of calling :func:`forward` on an instance or batch of instances.\n\n Returns\n -------\n The same output dictionary, but with an additional ``clusters`` key:\n\n clusters : ``List[List[List[Tuple[int, int]]]]``\n A nested list, representing, for each instance in the batch, the list of clusters,\n which are in turn comprised of a list of (start, end) inclusive spans into the\n original document.\n \"\"\"\n\n # A tensor of shape (batch_size, num_spans_to_keep, 2), representing\n # the start and end indices of each span.\n batch_top_spans = output_dict[\"top_spans\"].data.cpu()\n\n # A tensor of shape (batch_size, num_spans_to_keep) representing, for each span,\n # the index into ``antecedent_indices`` which specifies the antecedent span. Additionally,\n # the index can be -1, specifying that the span has no predicted antecedent.\n batch_predicted_antecedents = output_dict[\"predicted_antecedents\"].data.cpu()\n\n # A tensor of shape (num_spans_to_keep, max_antecedents), representing the indices\n # of the predicted antecedents with respect to the 2nd dimension of ``batch_top_spans``\n # for each antecedent we considered.\n antecedent_indices = output_dict[\"antecedent_indices\"].data.cpu()\n batch_clusters: List[List[List[Tuple[int, int]]]] = []\n\n # Calling zip() on two tensors results in an iterator over their\n # first dimension. This is iterating over instances in the batch.\n for top_spans, predicted_antecedents in zip(batch_top_spans, batch_predicted_antecedents):\n spans_to_cluster_ids: Dict[Tuple[int, int], int] = {}\n clusters: List[List[Tuple[int, int]]] = []\n\n for i, (span, predicted_antecedent) in enumerate(zip(top_spans, predicted_antecedents)):\n if predicted_antecedent < 0:\n # We don't care about spans which are\n # not co-referent with anything.\n continue\n\n # Find the right cluster to update with this span.\n # To do this, we find the row in ``antecedent_indices``\n # corresponding to this span we are considering.\n # The predicted antecedent is then an index into this list\n # of indices, denoting the span from ``top_spans`` which is the\n # most likely antecedent.\n predicted_index = antecedent_indices[i, predicted_antecedent]\n\n antecedent_span = (top_spans[predicted_index, 0],\n top_spans[predicted_index, 1])\n # Check if we've seen the span before.\n if antecedent_span in spans_to_cluster_ids.keys():\n predicted_cluster_id: int = spans_to_cluster_ids[antecedent_span]\n else:\n # We start a new cluster.\n predicted_cluster_id = len(clusters)\n # Append a new cluster containing only this span.\n clusters.append([antecedent_span])\n # Record the new id of this span.\n spans_to_cluster_ids[antecedent_span] = predicted_cluster_id\n\n # Now add the span we are currently considering.\n span_start, span_end = span\n clusters[predicted_cluster_id].append((span_start, span_end))\n spans_to_cluster_ids[(span_start, span_end)] = predicted_cluster_id\n batch_clusters.append(clusters)\n\n output_dict[\"clusters\"] = batch_clusters\n return output_dict\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n mention_recall = self._mention_recall.get_metric(reset)\n coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)\n\n return {\"coref_precision\": coref_precision,\n \"coref_recall\": coref_recall,\n \"coref_f1\": coref_f1,\n \"mention_recall\": mention_recall}\n\n def _create_attended_span_representations(self,\n head_scores: torch.FloatTensor,\n text_embeddings: torch.FloatTensor,\n span_ends: torch.IntTensor,\n span_widths: torch.IntTensor) -> torch.FloatTensor:\n \"\"\"\n Given a tensor of unnormalized attention scores for each word in the document, compute\n distributions over every span with respect to these scores by normalising the headedness\n scores for words inside the span.\n\n Given these headedness distributions over every span, weight the corresponding vector\n representations of the words in the span by this distribution, returning a weighted\n representation of each span.\n\n Parameters\n ----------\n head_scores : ``torch.FloatTensor``, required.\n Unnormalized headedness scores for every word. This score is shared for every\n candidate. The only way in which the headedness scores differ over different\n spans is in the set of words over which they are normalized.\n text_embeddings: ``torch.FloatTensor``, required.\n The embeddings with shape (batch_size, document_length, embedding_size)\n over which we are computing a weighted sum.\n span_ends: ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 1), representing the end indices\n of each span.\n span_widths : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 1) representing the width of each\n span candidates.\n Returns\n -------\n attended_text_embeddings : ``torch.FloatTensor``\n A tensor of shape (batch_size, num_spans, embedding_dim) - the result of\n applying attention over all words within each candidate span.\n \"\"\"\n # Shape: (1, 1, max_span_width)\n max_span_range_indices = util.get_range_vector(self._max_span_width,\n text_embeddings.is_cuda).view(1, 1, -1)\n\n # Shape: (batch_size, num_spans, max_span_width)\n # This is a broadcasted comparison - for each span we are considering,\n # we are creating a range vector of size max_span_width, but masking values\n # which are greater than the actual length of the span.\n span_mask = (max_span_range_indices <= span_widths).float()\n raw_span_indices = span_ends - max_span_range_indices\n # We also don't want to include span indices which are less than zero,\n # which happens because some spans near the beginning of the document\n # are of a smaller width than max_span_width, so we add this to the mask here.\n span_mask = span_mask * (raw_span_indices >= 0).float()\n # Spans\n span_indices = F.relu(raw_span_indices.float()).long()\n\n # Shape: (batch_size * num_spans * max_span_width)\n flat_span_indices = util.flatten_and_batch_shift_indices(span_indices, text_embeddings.size(1))\n\n # Shape: (batch_size, num_spans, max_span_width, embedding_dim)\n span_text_embeddings = util.batched_index_select(text_embeddings, span_indices, flat_span_indices)\n\n # Shape: (batch_size, num_spans, max_span_width)\n span_head_scores = util.batched_index_select(head_scores, span_indices, flat_span_indices).squeeze(-1)\n\n # Shape: (batch_size, num_spans, max_span_width)\n span_head_weights = util.last_dim_softmax(span_head_scores, span_mask)\n\n # Do a weighted sum of the embedded spans with\n # respect to the normalised head score distributions.\n # Shape: (batch_size, num_spans, embedding_dim)\n attended_text_embeddings = util.weighted_sum(span_text_embeddings, span_head_weights)\n\n return attended_text_embeddings\n\n def _compute_span_representations(self,\n text_embeddings: torch.FloatTensor,\n text_mask: torch.FloatTensor,\n span_starts: torch.IntTensor,\n span_ends: torch.IntTensor) -> torch.FloatTensor:\n \"\"\"\n Computes an embedded representation of every candidate span. This is a concatenation\n of the contextualized endpoints of the span, an embedded representation of the width of\n the span and a representation of the span's predicted head.\n\n Parameters\n ----------\n text_embeddings : ``torch.FloatTensor``, required.\n The embedded document of shape (batch_size, document_length, embedding_dim)\n over which we are computing a weighted sum.\n text_mask : ``torch.FloatTensor``, required.\n A mask of shape (batch_size, document_length) representing non-padding entries of\n ``text_embeddings``.\n span_starts : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans) representing the start of each span candidate.\n span_ends : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans) representing the end of each span candidate.\n Returns\n -------\n span_embeddings : ``torch.FloatTensor``\n An embedded representation of every candidate span with shape:\n (batch_size, num_spans, context_layer.get_output_dim() * 2 + embedding_size + feature_size)\n \"\"\"\n # Shape: (batch_size, document_length, encoding_dim)\n contextualized_embeddings = self._context_layer(text_embeddings, text_mask)\n\n # Shape: (batch_size, num_spans, encoding_dim)\n start_embeddings = util.batched_index_select(contextualized_embeddings, span_starts.squeeze(-1))\n end_embeddings = util.batched_index_select(contextualized_embeddings, span_ends.squeeze(-1))\n\n # Compute and embed the span_widths (strictly speaking the span_widths - 1)\n # Shape: (batch_size, num_spans, 1)\n span_widths = span_ends - span_starts\n # Shape: (batch_size, num_spans, encoding_dim)\n span_width_embeddings = self._span_width_embedding(span_widths.squeeze(-1))\n\n # Shape: (batch_size, document_length, 1)\n head_scores = self._head_scorer(contextualized_embeddings)\n\n # Shape: (batch_size, num_spans, embedding_dim)\n # Note that we used the original text embeddings, not the contextual ones here.\n attended_text_embeddings = self._create_attended_span_representations(head_scores,\n text_embeddings,\n span_ends,\n span_widths)\n # (batch_size, num_spans, context_layer.get_output_dim() * 2 + embedding_dim + feature_dim)\n span_embeddings = torch.cat([start_embeddings,\n end_embeddings,\n span_width_embeddings,\n attended_text_embeddings], -1)\n return span_embeddings\n\n @staticmethod\n def _prune_and_sort_spans(mention_scores: torch.FloatTensor,\n num_spans_to_keep: int) -> torch.IntTensor:\n \"\"\"\n The indices of the top-k scoring spans according to span_scores. We return the\n indices in their original order, not ordered by score, so that we can rely on\n the ordering to consider the previous k spans as antecedents for each span later.\n\n Parameters\n ----------\n mention_scores : ``torch.FloatTensor``, required.\n The mention score for every candidate, with shape (batch_size, num_spans, 1).\n num_spans_to_keep : ``int``, required.\n The number of spans to keep when pruning.\n Returns\n -------\n top_span_indices : ``torch.IntTensor``, required.\n The indices of the top-k scoring spans. Has shape (batch_size, num_spans_to_keep).\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, 1)\n _, top_span_indices = mention_scores.topk(num_spans_to_keep, 1)\n top_span_indices, _ = torch.sort(top_span_indices, 1)\n\n # Shape: (batch_size, num_spans_to_keep)\n top_span_indices = top_span_indices.squeeze(-1)\n return top_span_indices\n\n @staticmethod\n def _generate_valid_antecedents(num_spans_to_keep: int,\n max_antecedents: int,\n is_cuda: bool) -> Tuple[torch.IntTensor,\n torch.IntTensor,\n torch.FloatTensor]:\n \"\"\"\n This method generates possible antecedents per span which survived the pruning\n stage. This procedure is `generic across the batch`. The reason this is the case is\n that each span in a batch can be coreferent with any previous span, but here we\n are computing the possible `indices` of these spans. So, regardless of the batch,\n the 1st span _cannot_ have any antecedents, because there are none to select from.\n Similarly, each element can only predict previous spans, so this returns a matrix\n of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to\n (i - 1) - j if j <= i, or zero otherwise.\n\n Parameters\n ----------\n num_spans_to_keep : ``int``, required.\n The number of spans that were kept while pruning.\n max_antecedents : ``int``, required.\n The maximum number of antecedent spans to consider for every span.\n is_cuda : ``bool``, required.\n Whether the computation is being done on the GPU or not.\n\n Returns\n -------\n valid_antecedent_indices : ``torch.IntTensor``\n The indices of every antecedent to consider with respect to the top k spans.\n Has shape ``(num_spans_to_keep, max_antecedents)``.\n valid_antecedent_offsets : ``torch.IntTensor``\n The distance between the span and each of its antecedents in terms of the number\n of considered spans (i.e not the word distance between the spans).\n Has shape ``(1, max_antecedents)``.\n valid_antecedent_log_mask : ``torch.FloatTensor``\n The logged mask representing whether each antecedent span is valid. Required since\n different spans have different numbers of valid antecedents. For example, the first\n span in the document should have no valid antecedents.\n Has shape ``(1, num_spans_to_keep, max_antecedents)``.\n \"\"\"\n # Shape: (num_spans_to_keep, 1)\n target_indices = util.get_range_vector(num_spans_to_keep, is_cuda).unsqueeze(1)\n\n # Shape: (1, max_antecedents)\n valid_antecedent_offsets = (util.get_range_vector(max_antecedents, is_cuda) + 1).unsqueeze(0)\n\n # This is a broadcasted subtraction.\n # Shape: (num_spans_to_keep, max_antecedents)\n raw_antecedent_indices = target_indices - valid_antecedent_offsets\n\n # In our matrix of indices, the upper triangular part will be negative\n # because the offsets will be > the target indices. We want to mask these,\n # because these are exactly the indices which we don't want to predict, per span.\n # We're generating a logspace mask here because we will eventually create a\n # distribution over these indices, so we need the 0 elements of the mask to be -inf\n # in order to not mess up the normalisation of the distribution.\n # Shape: (1, num_spans_to_keep, max_antecedents)\n valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()\n\n # Shape: (num_spans_to_keep, max_antecedents)\n valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()\n return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask\n\n def _compute_span_pair_embeddings(self,\n top_span_embeddings: torch.FloatTensor,\n antecedent_embeddings: torch.FloatTensor,\n antecedent_offsets: torch.FloatTensor):\n \"\"\"\n Computes an embedding representation of pairs of spans for the pairwise scoring function\n to consider. This includes both the original span representations, the element-wise\n similarity of the span representations, and an embedding representation of the distance\n between the two spans.\n\n Parameters\n ----------\n top_span_embeddings : ``torch.FloatTensor``, required.\n Embedding representations of the top spans. Has shape\n (batch_size, num_spans_to_keep, embedding_size).\n antecedent_embeddings : ``torch.FloatTensor``, required.\n Embedding representations of the antecedent spans we are considering\n for each top span. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, embedding_size).\n antecedent_offsets : ``torch.IntTensor``, required.\n The offsets between each top span and its antecedent spans in terms\n of spans we are considering. Has shape (1, max_antecedents).\n\n Returns\n -------\n span_pair_embeddings : ``torch.FloatTensor``\n Embedding representation of the pair of spans to consider. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)\n\n # Shape: (1, max_antecedents, embedding_size)\n antecedent_distance_embeddings = self._distance_embedding(\n util.bucket_values(antecedent_offsets,\n num_total_buckets=self._num_distance_buckets))\n\n # Shape: (1, 1, max_antecedents, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)\n\n expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),\n antecedent_embeddings.size(1),\n antecedent_embeddings.size(2),\n antecedent_distance_embeddings.size(-1))\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n span_pair_embeddings = torch.cat([target_embeddings,\n antecedent_embeddings,\n antecedent_embeddings * target_embeddings,\n antecedent_distance_embeddings], -1)\n return span_pair_embeddings\n\n @staticmethod\n def _compute_antecedent_gold_labels(top_span_labels: torch.IntTensor,\n antecedent_labels: torch.IntTensor):\n \"\"\"\n Generates a binary indicator for every pair of spans. This label is one if and\n only if the pair of spans belong to the same cluster. The labels are augmented\n with a dummy antecedent at the zeroth position, which represents the prediction\n that a span does not have any antecedent.\n\n Parameters\n ----------\n top_span_labels : ``torch.IntTensor``, required.\n The cluster id label for every span. The id is arbitrary,\n as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).\n antecedent_labels : ``torch.IntTensor``, required.\n The cluster id label for every antecedent span. The id is arbitrary,\n as we just care about the clustering. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n\n Returns\n -------\n pairwise_labels_with_dummy_label : ``torch.FloatTensor``\n A binary tensor representing whether a given pair of spans belong to\n the same cluster in the gold clustering.\n Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).\n\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n target_labels = top_span_labels.expand_as(antecedent_labels)\n same_cluster_indicator = (target_labels == antecedent_labels).float()\n non_dummy_indicator = (target_labels >= 0).float()\n pairwise_labels = same_cluster_indicator * non_dummy_indicator\n\n # Shape: (batch_size, num_spans_to_keep, 1)\n dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)\n pairwise_labels_with_dummy_label = torch.cat([dummy_labels, pairwise_labels], -1)\n return pairwise_labels_with_dummy_label\n\n def _compute_coreference_scores(self,\n pairwise_embeddings: torch.FloatTensor,\n top_span_mention_scores: torch.FloatTensor,\n antecedent_mention_scores: torch.FloatTensor,\n antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Computes scores for every pair of spans. Additionally, a dummy label is included,\n representing the decision that the span is not coreferent with anything. For the dummy\n label, the score is always zero. For the true antecedent spans, the score consists of\n the pairwise antecedent score and the unary mention scores for the span and its\n antecedent. The factoring allows the model to blame many of the absent links on bad\n spans, enabling the pruning strategy used in the forward pass.\n\n Parameters\n ----------\n pairwise_embeddings: ``torch.FloatTensor``, required.\n Embedding representations of pairs of spans. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, encoding_dim)\n top_span_mention_scores: ``torch.FloatTensor``, required.\n Mention scores for every span. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n antecedent_mention_scores: ``torch.FloatTensor``, required.\n Mention scores for every antecedent. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n antecedent_log_mask: ``torch.FloatTensor``, required.\n The log of the mask for valid antecedents.\n\n Returns\n -------\n coreference_scores: ``torch.FloatTensor``\n A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),\n representing the unormalised score for each (span, antecedent) pair\n we considered.\n\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n antecedent_scores = self._antecedent_scorer(\n self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)\n antecedent_scores += top_span_mention_scores + antecedent_mention_scores\n antecedent_scores += antecedent_log_mask\n\n # Shape: (batch_size, num_spans_to_keep, 1)\n shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]\n dummy_scores = Variable(antecedent_scores.data.new(*shape).fill_(0), requires_grad=False)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)\n coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)\n return coreference_scores\n\n @classmethod\n def from_params(cls, vocab: Vocabulary, params: Params) -> \"CoreferenceResolver\":\n embedder_params = params.pop(\"text_field_embedder\")\n text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)\n context_layer = Seq2SeqEncoder.from_params(params.pop(\"context_layer\"))\n mention_feedforward = FeedForward.from_params(params.pop(\"mention_feedforward\"))\n antecedent_feedforward = FeedForward.from_params(params.pop(\"antecedent_feedforward\"))\n\n feature_size = params.pop(\"feature_size\")\n max_span_width = params.pop(\"max_span_width\")\n spans_per_word = params.pop(\"spans_per_word\")\n max_antecedents = params.pop(\"max_antecedents\")\n lexical_dropout = params.pop(\"lexical_dropout\", 0.2)\n\n init_params = params.pop(\"initializer\", None)\n reg_params = params.pop(\"regularizer\", None)\n initializer = (InitializerApplicator.from_params(init_params)\n if init_params is not None\n else InitializerApplicator())\n regularizer = RegularizerApplicator.from_params(reg_params) if reg_params is not None else None\n\n params.assert_empty(cls.__name__)\n return cls(vocab=vocab,\n text_field_embedder=text_field_embedder,\n context_layer=context_layer,\n mention_feedforward=mention_feedforward,\n antecedent_feedforward=antecedent_feedforward,\n feature_size=feature_size,\n max_span_width=max_span_width,\n spans_per_word=spans_per_word,\n max_antecedents=max_antecedents,\n lexical_dropout=lexical_dropout,\n initializer=initializer,\n regularizer=regularizer)\n"
] | [
[
"torch.nn.Dropout",
"torch.sort",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
civic-jabber/data-ingest | [
"bf44c6041ad947547ceede535124c5db004d2f43"
] | [
"civic_jabber_ingest/cli.py"
] | [
"import click\nimport pandas as pd\n\n\nfrom civic_jabber_ingest.external_services.newspaper import load_news\nfrom civic_jabber_ingest.external_services.open_states import get_all_people\nfrom civic_jabber_ingest.regs.va import load_va_regulations\nfrom civic_jabber_ingest.utils.config import read_config\n\n\[email protected]()\ndef main():\n pass\n\n\[email protected](\"run-ingest\")\ndef run_ingest():\n \"\"\"Runs all of the ingest commands that are current implemented. Currently, this\n includes:\n\n 1. Regulations for VA\n \"\"\"\n print(\"Loading VA regs ...\")\n load_va_regulations()\n\n\nmain.add_command(run_ingest)\n\n\[email protected](\"ingest-news\")\[email protected](\"--start\")\[email protected](\"--end\")\ndef ingest_news(start, end):\n \"\"\"Ingests news for states in the specified range. States are in alphabetical\n order.\"\"\"\n states = list(read_config(\"states\").keys())\n states = [state.lower() for state in states]\n load_news(states)\n\n\nmain.add_command(ingest_news)\n\n\[email protected](\"people-to-csv\")\[email protected](\"--state\")\[email protected](\"--outfile\")\ndef people_to_csv(state, outfile):\n \"\"\"Finds a list of legislators for a state and saves the results as a CSV file.\"\"\"\n people = get_all_people(state, per_page=25, links=True)\n\n data = {\"name\": [], \"party\": [], \"role\": [], \"district\": [], \"link\": []}\n for person in people:\n data[\"name\"].append(person[\"name\"])\n data[\"party\"].append(person[\"party\"])\n data[\"role\"].append(person[\"current_role\"][\"title\"])\n data[\"district\"].append(person[\"current_role\"][\"district\"])\n\n if person[\"links\"]:\n data[\"link\"].append(person[\"links\"][0][\"url\"])\n else:\n data[\"link\"].append(None)\n\n people_data = pd.DataFrame(data)\n people_data.to_csv(outfile, index=False)\n\n\nmain.add_command(people_to_csv)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
beaulian/fpn.pytorch | [
"49fe36711cff71e26d9bef838613577dabd02336"
] | [
"lib/datasets/pascal_voc.py"
] | [
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport xml.dom.minidom as minidom\n\nimport os\n# import PIL\nimport numpy as np\nimport scipy.sparse\nimport subprocess\nimport six.moves.cPickle as cPickle\nimport math\nimport glob\nimport uuid\nimport scipy.io as sio\nimport xml.etree.ElementTree as ET\n\nfrom .imdb import imdb\nfrom .imdb import ROOT_DIR\nfrom datasets import ds_utils\nfrom .voc_eval import voc_eval\n\n# TODO: make fast_rcnn irrelevant\n# >>>> obsolete, because it depends on sth outside of this project\nfrom model.utils.config import cfg\n\n\n# <<<< obsolete\n\n\nclass pascal_voc(imdb):\n def __init__(self, image_set, year, devkit_path=None):\n imdb.__init__(self, 'voc_' + year + '_' + image_set)\n self._year = year\n self._image_set = image_set\n self._devkit_path = self._get_default_path() if devkit_path is None \\\n else devkit_path\n self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)\n self._classes = ('__background__', # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._image_ext = '.jpg'\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n # self._roidb_handler = self.selective_search_roidb\n self._roidb_handler = self.gt_roidb\n self._salt = str(uuid.uuid4())\n self._comp_id = 'comp4'\n\n # PASCAL specific config options\n self.config = {'cleanup': True,\n 'use_salt': True,\n 'use_diff': False,\n 'matlab_eval': False,\n 'rpn_file': None,\n 'min_size': 2}\n\n assert os.path.exists(self._devkit_path), \\\n 'VOCdevkit path does not exist: {}'.format(self._devkit_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_id_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return i\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL VOC is expected to be installed.\n \"\"\"\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_pascal_annotation(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def selective_search_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = self._load_selective_search_roidb(None)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def rpn_roidb(self):\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n rpn_roidb = self._load_rpn_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)\n else:\n roidb = self._load_rpn_roidb(None)\n\n return roidb\n\n def _load_rpn_roidb(self, gt_roidb):\n filename = self.config['rpn_file']\n print('loading {}'.format(filename))\n assert os.path.exists(filename), \\\n 'rpn data not found at: {}'.format(filename)\n with open(filename, 'rb') as f:\n box_list = cPickle.load(f)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_selective_search_roidb(self, gt_roidb):\n filename = os.path.abspath(os.path.join(cfg.DATA_DIR,\n 'selective_search_data',\n self.name + '.mat'))\n assert os.path.exists(filename), \\\n 'Selective search data not found at: {}'.format(filename)\n raw_data = sio.loadmat(filename)['boxes'].ravel()\n\n box_list = []\n for i in range(raw_data.shape[0]):\n boxes = raw_data[i][:, (1, 0, 3, 2)] - 1\n keep = ds_utils.unique_boxes(boxes)\n boxes = boxes[keep, :]\n keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])\n boxes = boxes[keep, :]\n box_list.append(boxes)\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n # if not self.config['use_diff']:\n # # Exclude the samples labeled as difficult\n # non_diff_objs = [\n # obj for obj in objs if int(obj.find('difficult').text) == 0]\n # # if len(non_diff_objs) != len(objs):\n # # print 'Removed {} difficult objects'.format(\n # # len(objs) - len(non_diff_objs))\n # objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n ishards = np.zeros((num_objs), dtype=np.int32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n\n diffc = obj.find('difficult')\n difficult = 0 if diffc == None else int(diffc.text)\n ishards[ix] = difficult\n\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_ishard': ishards,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas}\n\n def _get_comp_id(self):\n comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']\n else self._comp_id)\n return comp_id\n\n def _get_voc_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'\n filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n path = os.path.join(filedir, filename)\n return path\n\n def _write_voc_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print('Writing {} VOC results file'.format(cls))\n filename = self._get_voc_results_file_template().format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n def _do_python_eval(self, output_dir='output'):\n annopath = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'Annotations',\n '{:s}.xml')\n imagesetfile = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'ImageSets',\n 'Main',\n self._image_set + '.txt')\n cachedir = os.path.join(self._devkit_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n use_07_metric = True if int(self._year) < 2010 else False\n print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n rec, prec, ap = voc_eval(\n filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric)\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:\n cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print('{:.3f}'.format(ap))\n print('{:.3f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n\n def _do_matlab_eval(self, output_dir='output'):\n print('-----------------------------------------------------')\n print('Computing results with the official MATLAB eval code.')\n print('-----------------------------------------------------')\n path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',\n 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(self._devkit_path, self._get_comp_id(),\n self._image_set, output_dir)\n print('Running:\\n{}'.format(cmd))\n status = subprocess.call(cmd, shell=True)\n\n def evaluate_detections(self, all_boxes, output_dir):\n self._write_voc_results_file(all_boxes)\n self._do_python_eval(output_dir)\n if self.config['matlab_eval']:\n self._do_matlab_eval(output_dir)\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n os.remove(filename)\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\n\nif __name__ == '__main__':\n d = pascal_voc('trainval', '2007')\n res = d.roidb\n from IPython import embed;\n\n embed()\n"
] | [
[
"scipy.io.loadmat",
"numpy.zeros",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
supunab/Lantern | [
"f453de532da638c1f467953b32bbe49a3dedfa45",
"f453de532da638c1f467953b32bbe49a3dedfa45"
] | [
"src/out/NIPS18evaluation/evaluationRNN/min-char-rnn-pytorch.py",
"src/out/PLDI19evaluation/deepspeech2/ds2-tensorflow/src/model.py"
] | [
"\"\"\"\nMinimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)\nBSD License\n\"\"\"\nimport numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\ndef run(write_to):\n\n torch.set_num_threads(1)\n\n start = time.time()\n data = open('graham.txt', 'r').read() # should be simple plain text file\n chars = list(set(data))\n data_size, vocab_size = len(data), len(chars)\n print('data has %d characters, %d unique.' % (data_size, vocab_size))\n char_to_ix = { ch:i for i,ch in enumerate(chars) }\n ix_to_char = { i:ch for i,ch in enumerate(chars) }\n\n # hyper-parameters\n hidden_size = 50 # size of hidden layer of neurons\n seq_length = 20 # number of steps to unroll the RNN for\n batch_size = 20\n learning_rate = 1e-1\n n_iter = 5000\n iter_step = 100\n\n torch.manual_seed(1)\n\n def lineToTensor(line):\n tensor = torch.zeros(seq_length, batch_size, vocab_size)\n for i in range(seq_length):\n for j in range(batch_size):\n tensor[i][j][char_to_ix[line[j * seq_length + i]]] = 1\n return tensor\n\n def lineToLongTensor(line):\n tensor = torch.LongTensor(seq_length, batch_size).zero_()\n for i in range(seq_length):\n for j in range(batch_size):\n tensor[i][j] = char_to_ix[line[j * seq_length + i]]\n return tensor\n\n class RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(hidden_size, output_size)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = F.tanh(self.i2h(combined))\n output = self.i2o(hidden)\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(batch_size, self.hidden_size))\n\n rnn = RNN(vocab_size, hidden_size, vocab_size)\n optimizer = torch.optim.Adagrad(rnn.parameters(), lr = learning_rate)\n criterion = nn.CrossEntropyLoss()\n\n def train(output_tensor, input_tensor):\n hidden = rnn.initHidden()\n\n optimizer.zero_grad()\n\n loss = 0\n\n for i in range(input_tensor.size()[0]):\n output, hidden = rnn(input_tensor[i], hidden)\n loss += criterion(output, output_tensor[i])\n\n loss.backward()\n\n # grad clipping and stepping\n torch.nn.utils.clip_grad_norm(rnn.parameters(), 5.0, norm_type=1)\n optimizer.step()\n\n return loss.data[0]\n\n end = time.time()\n prepareTime = end-start\n\n loss_save = []\n p = -seq_length * batch_size\n start = time.time()\n for iter in range(n_iter + 1):\n p += seq_length * batch_size\n if p+seq_length * batch_size+1 >= len(data): p = 0\n\n inputs = Variable(lineToTensor(data[p:p+seq_length * batch_size]))\n targets = Variable(lineToLongTensor(data[p+1:p+seq_length * batch_size +1]))\n loss = train(targets, inputs)\n if iter % iter_step == 0:\n print('iter %d, loss: %f' % (iter, loss))\n loss_save.append(loss)\n\n end = time.time()\n loopTime = end -start\n\n with open(write_to, \"w\") as f:\n f.write(\"unit: \" + \"100 iteration\\n\")\n for loss in loss_save:\n f.write(\"{}\\n\".format(loss))\n f.write(\"run time: \" + str(prepareTime) + \" \" + str(loopTime) + \"\\n\")\n\nif __name__ == '__main__':\n import sys\n if (len(sys.argv) != 2):\n print(\"should have a file to write results to\")\n exit(0)\n run(sys.argv[1])\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import conv2d, avg_pool2d, max_pool2d\nfrom tensorflow.contrib.layers import batch_norm, l2_regularizer\nfrom tensorflow.contrib.framework import add_arg_scope\nfrom tensorflow.contrib.framework import arg_scope\nfrom tensorflow.python.ops import control_flow_ops\nimport math\nimport numpy as np\n\ndef relux(x, capping=None):\n \"\"\"Clipped ReLU\"\"\"\n x = tf.nn.relu(x)\n if capping is not None:\n y = tf.minimum(x, capping)\n return y\n\ndef _variable_on_cpu(name, shape, initializer=None, use_fp16=False, trainable=True):\n with tf.device('/cpu'):\n dtype = tf.float16 if use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)\n return var\n\ndef _variable_on_gpu(name, shape, initializer=None, use_fp16=False, trainable=True):\n with tf.device('/device:GPU:0'):\n dtype = tf.float16 if use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)\n return var\n\ndef getInputSize(sample_rate, window_size):\n rnn_input_size = int(math.floor((sample_rate * window_size) / 2) + 1)\n rnn_input_size = int(math.floor(rnn_input_size - 41) / 2 + 1)\n rnn_input_size = int(math.floor(rnn_input_size - 21) / 2 + 1)\n rnn_input_size *= 32\n return run_input_size\n\ndef getSeqLength(raw_length):\n seqL = tf.math.floor((raw_length - 11) / 2)\n seqL = seqL + 1 # tf.constant(1, dtype=tf.int32)\n seqL = seqL - 10 # tf.constant(10)\n return seqL\n\ndef batchNorm2D(input1, paramSize, variance_epsilon=0.00001):\n batch_mean, batch_var = tf.nn.moments(input1, [0, 2, 3], name='moments', keep_dims=True)\n ema = tf.train.ExponentialMovingAverage(decay=0.9997)\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n mean, var = control_flow_ops.cond(tf.constant(True), mean_var_with_update, lambda:(ema.average(batch_mean), ema.average(batch_var)))\n\n offset = _variable_on_gpu('offset', [1, paramSize, 1, 1], initializer=tf.zeros_initializer(), trainable=True)\n scale = _variable_on_gpu('scale', [1, paramSize, 1, 1], initializer=tf.ones_initializer(), trainable=True)\n return tf.nn.batch_normalization(input1, mean, var, offset, scale, variance_epsilon)\n\n\ndef batchNorm1D(input1, paramSize, variance_epsilon=0.00001):\n batch_mean, batch_var = tf.nn.moments(input1, [0], name='moments', keep_dims=True)\n ema = tf.train.ExponentialMovingAverage(decay=0.9997)\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n mean, var = control_flow_ops.cond(tf.constant(True), mean_var_with_update, lambda:(ema.average(batch_mean), ema.average(batch_var)))\n\n offset = _variable_on_gpu('offset', [paramSize], initializer=tf.zeros_initializer(), trainable=True)\n scale = _variable_on_gpu('scale', [paramSize], initializer=tf.ones_initializer(), trainable=True)\n\n return tf.nn.batch_normalization(input1, mean, var, offset, scale, variance_epsilon)\n\n\ndef fully_connected(inputs, batchSize, inputSize, num_classes):\n # 1D batchNorm\n with tf.variable_scope(\"batchNorm1D\"):\n inputs = tf.reshape(inputs, [-1, inputSize])\n batch_norm = batchNorm1D(inputs, inputSize)\n batch_norm.set_shape([None, inputSize])\n with tf.variable_scope(\"fully_connected\"):\n linear = tf.contrib.layers.fully_connected(batch_norm, num_classes, activation_fn=None)\n outputs = tf.reshape(linear, [-1, batchSize, num_classes])\n return outputs\n\n\ndef BatchRNN(inputs, batchSize, inputSize, hiddenSize):\n\n with tf.variable_scope(\"batchNorm1D\"):\n inputs = tf.reshape(inputs, [-1, inputSize])\n batch_norm = batchNorm1D(inputs, inputSize)\n inputs = tf.reshape(batch_norm, [-1, batchSize, inputSize])\n inputs.set_shape([None, batchSize, inputSize])\n\n # bidirectional RNN\n # rnn_cell = tf.contrib.cudnn_rnn.CudnnRNNTanh(1, hidden_size, direction='bidirectional')\n with tf.variable_scope(\"bidirectionalRNN\"):\n# cell_fw = tf.nn.rnn_cell.BasicRNNCell(hiddenSize)\n cell_fw = tf.keras.layers.SimpleRNNCell(hiddenSize)\n# cell_bw = tf.nn.rnn_cell.BasicRNNCell(hiddenSize)\n cell_bw = tf.keras.layers.SimpleRNNCell(hiddenSize)\n # initial_state = rnn_cell._zero_state(inputShape[1])\n # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]\n # 'state' is a tensor of shape [batch_size, cell_state_size]\n ((outputs_fw, outputs_bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw,\n cell_bw, inputs, dtype=tf.float32,\n time_major=True)\n\n #outputs, _ = tf.nn.dynamic_rnn(rnn_cell, inputs, sequence_length=None,\n # initial_state=initial_state, time_major=True,\n # dtype=tf.float32)\n # shape = tf.shape(outputs)\n # outputs = tf.reshape(outputs, [shape[0], shape[1], 2, -1])\n # outputs = tf.math.reduce_sum(outputs, axis=2, keepdims=None)\n outputs = outputs_fw + outputs_bw\n return outputs # checkshape\n\n\ndef inference(feats, sample_rate, window_size, rnn_hidden_size, num_classes):\n\n with tf.variable_scope(\"convolution1\"):\n step1 = conv2d(feats, 32, [41, 11], stride=[2, 2], padding='VALID',\n data_format='NCHW', activation_fn=None)\n # do batchNorm separately, otherwise it errors (if added in conv2d)\n step1 = batchNorm2D(step1, 32)\n step2 = relux(step1, capping = 20)\n\n with tf.variable_scope(\"convolution2\"):\n step3 = conv2d(step2, 32, [21, 11], stride=[2, 1], padding='VALID',\n data_format='NCHW', activation_fn=None)\n step3 = batchNorm2D(step3, 32)\n step4 = relux(step3, capping = 20)\n\n # reshape and transpose\n with tf.variable_scope(\"reshape\"):\n step5 = tf.reshape(step4, [32, 32 * 21, -1])\n step6 = tf.transpose(step5, perm=[2, 0, 1])\n step6.set_shape([None, 32, 32 * 21])\n \n # RNN layers\n # rnn_input_size = getInputSize(sample_rate, window_size)\n with tf.variable_scope(\"BatchRNN1\"):\n step7 = BatchRNN(step6, 32, 32 * 21, rnn_hidden_size)\n with tf.variable_scope(\"BatchRNN2\"):\n step8 = BatchRNN(step7, 32, rnn_hidden_size, rnn_hidden_size)\n with tf.variable_scope(\"BatchRNN3\"):\n step9 = BatchRNN(step8, 32, rnn_hidden_size, rnn_hidden_size)\n\n # fc layer\n with tf.variable_scope(\"fc\"):\n step10 = fully_connected(step9, 32, rnn_hidden_size, num_classes)\n return step10\n\n\ndef loss(feats, sample_rate, window_size, rnn_hidden_size, labels, percent, raw_length, num_classes):\n logits = inference(feats, sample_rate, window_size, rnn_hidden_size, num_classes)\n # Calculate the average ctc loss across the batch.\n # labels: An int32 SparseTensor. labels.indices[i, :] == [b, t] means labels.values[i] stores the id for (batch b, time t).\n # labels.values[i] must take on values in [0, num_labels). See core/ops/ctc_ops.cc for more details.\n reducedLength = getSeqLength(raw_length)\n seqLength = tf.math.floor(reducedLength * percent)\n seqLength = tf.cast(seqLength, dtype=tf.int32)\n # pp = tf.print(seqLength)\n\n with tf.variable_scope(\"ctc_loss\"):\n# with tf.control_dependencies([pp]):\n ctc_loss = tf.nn.ctc_loss(labels=labels, inputs=logits,\n sequence_length=seqLength,\n preprocess_collapse_repeated=True,\n time_major=True)\n # ctc_loss = tf.Print(ctc_loss, [ctc_loss], \"CTC loss: \", summarize=32)\n ctc_loss_mean = tf.reduce_mean(ctc_loss, name='ctc_loss')\n return ctc_loss_mean\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"torch.zeros",
"torch.cat",
"torch.manual_seed",
"torch.nn.Linear",
"torch.set_num_threads"
],
[
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.control_dependencies",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.nn.moments",
"tensorflow.ones_initializer",
"tensorflow.nn.batch_normalization",
"tensorflow.zeros_initializer",
"tensorflow.identity",
"tensorflow.contrib.layers.conv2d",
"tensorflow.keras.layers.SimpleRNNCell",
"tensorflow.math.floor",
"tensorflow.nn.ctc_loss",
"tensorflow.nn.relu",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
mcflugen/terrainbento | [
"1b756477b8a8ab6a8f1275b1b30ec84855c840ea",
"1b756477b8a8ab6a8f1275b1b30ec84855c840ea",
"1b756477b8a8ab6a8f1275b1b30ec84855c840ea"
] | [
"terrainbento/derived_models/model_440_basicChSa/model_440_basicChSa.py",
"terrainbento/base_class/stochastic_erosion_model.py",
"terrainbento/derived_models/model_108_basicDdSt/model_108_basicDdSt.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nmodel_440_basicChSa.py: erosion model using depth-dependent cubic diffusion\nwith a soil layer, basic stream power, and discharge proportional to drainage\narea.\n\nModel 440 BasicChSa\n\nLandlab components used: FlowRouter, DepressionFinderAndRouter,\n FastscapeStreamPower, DepthDependentCubicDiffuser,\n ExponentialWeatherer\n\n@author: gtucker\n@author: Katherine Barnhart\n\"\"\"\n\nfrom terrainbento.base_class import ErosionModel\nfrom landlab.components import (FlowAccumulator, DepressionFinderAndRouter,\n FastscapeEroder, DepthDependentTaylorDiffuser,\n ExponentialWeatherer)\nimport numpy as np\n\n\nclass BasicChSa(ErosionModel):\n \"\"\"\n A BasicChSa model computes erosion using depth-dependent cubic diffusion\n with a soil layer, basic stream power, and Q~A.\n \"\"\"\n\n def __init__(self, input_file=None, params=None,\n BaselevelHandlerClass=None):\n \"\"\"Initialize the BasicChSa model.\"\"\"\n\n # Call ErosionModel's init\n super(BasicChSa, self).__init__(input_file=input_file,\n params=params,\n BaselevelHandlerClass=BaselevelHandlerClass)\n\n self.K_sp = self.get_parameter_from_exponent('K_sp')\n linear_diffusivity = (self._length_factor**2.)*self.get_parameter_from_exponent('linear_diffusivity') # has units length^2/time\n try:\n initial_soil_thickness = (self._length_factor)*self.params['initial_soil_thickness'] # has units length\n except KeyError:\n initial_soil_thickness = 1.0 # default value\n soil_transport_decay_depth = (self._length_factor)*self.params['soil_transport_decay_depth'] # has units length\n max_soil_production_rate = (self._length_factor)*self.params['max_soil_production_rate'] # has units length per time\n soil_production_decay_depth = (self._length_factor)*self.params['soil_production_decay_depth'] # has units length\n\n # Create soil thickness (a.k.a. depth) field\n if 'soil__depth' in self.grid.at_node:\n soil_thickness = self.grid.at_node['soil__depth']\n else:\n soil_thickness = self.grid.add_zeros('node', 'soil__depth')\n\n # Create bedrock elevation field\n if 'bedrock__elevation' in self.grid.at_node:\n bedrock_elev = self.grid.at_node['bedrock__elevation']\n else:\n bedrock_elev = self.grid.add_zeros('node', 'bedrock__elevation')\n\n soil_thickness[:] = initial_soil_thickness\n bedrock_elev[:] = self.z - initial_soil_thickness\n\n # Instantiate a FlowAccumulator with DepressionFinderAndRouter using D8 method\n self.flow_router = FlowAccumulator(self.grid,\n flow_director='D8',\n depression_finder = DepressionFinderAndRouter)\n\n # Instantiate a FastscapeEroder component\n self.eroder = FastscapeEroder(self.grid,\n K_sp=self.K_sp,\n m_sp=self.params['m_sp'],\n n_sp=self.params['n_sp'])\n\n # Instantiate a weathering component\n self.weatherer = ExponentialWeatherer(self.grid,\n max_soil_production_rate=max_soil_production_rate,\n soil_production_decay_depth=soil_production_decay_depth)\n\n # Instantiate a soil-transport component\n self.diffuser = DepthDependentTaylorDiffuser(self.grid,\n linear_diffusivity=linear_diffusivity,\n slope_crit=self.params['slope_crit'],\n soil_transport_decay_depth=soil_transport_decay_depth,\n nterms=11)\n\n def run_one_step(self, dt):\n \"\"\"\n Advance model for one time-step of duration dt.\n \"\"\"\n\n # Route flow\n self.flow_router.run_one_step()\n\n # Get IDs of flooded nodes, if any\n flooded = np.where(self.flow_router.depression_finder.flood_status==3)[0]\n\n # Do some erosion (but not on the flooded nodes)\n # (if we're varying K through time, update that first)\n if self.opt_var_precip:\n self.eroder.K = (self.K_sp\n * self.pc.get_erodibility_adjustment_factor(self.model_time))\n\n self.eroder.run_one_step(dt, flooded_nodes=flooded)\n\n # We must also now erode the bedrock where relevant. If water erosion\n # into bedrock has occurred, the bedrock elevation will be higher than\n # the actual elevation, so we simply re-set bedrock elevation to the\n # lower of itself or the current elevation.\n b = self.grid.at_node['bedrock__elevation']\n b[:] = np.minimum(b, self.grid.at_node['topographic__elevation'])\n\n # Calculate regolith-production rate\n self.weatherer.calc_soil_prod_rate()\n\n # Do some soil creep\n self.diffuser.run_one_step(dt,\n dynamic_dt=True,\n if_unstable='raise',\n courant_factor=0.1)\n\n # calculate model time\n self.model_time += dt\n\n # Lower outlet\n self.update_outlet(dt)\n\n # Check walltime\n self.check_walltime()\n\ndef main():\n \"\"\"Executes model.\"\"\"\n import sys\n\n try:\n infile = sys.argv[1]\n except IndexError:\n print('Must include input file name on command line')\n sys.exit(1)\n\n cdsp = BasicChSa(input_file=infile)\n cdsp.run()\n\n\nif __name__ == '__main__':\n main()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nBase class for terrainbento models using stochastic hydrology.\n\nTEXT here.\n\"\"\"\n\nfrom terrainbento.base_class import ErosionModel\n\nfrom landlab.components import (PrecipitationDistribution)\n\nimport numpy as np\nimport scipy.stats as stats\nimport textwrap\n\n_STRING_LENGTH = 80\n\nclass StochasticErosionModel(ErosionModel):\n \"\"\"\n An StochasticErosionModel is a basic model for erosion and landscape\n evolution in a watershed, as represented by an input DEM.\n\n This is a base class that handles only processes used by all Stochastic\n Hydrology based modeles.\n \"\"\"\n\n def __init__(self, input_file=None, params=None,\n BaselevelHandlerClass=None):\n \"\"\"Initialize the _BaseSt base class.\"\"\"\n\n # Call _StochasticErosionModel init\n super(_StochasticErosionModel, self).__init__(input_file=input_file,\n params=params,\n BaselevelHandlerClass=BaselevelHandlerClass)\n\n self.opt_stochastic_duration = (self.params['opt_stochastic_duration'])\n # initialize record for storms. Depending on how this model is run\n # (stochastic time, number_time_steps>1, more manually) the dt may\n # change. Thus, rather than writing routines to reconstruct the time\n # series of precipitation from the dt could change based on users use,\n # we'll record this with the model run instead of re-running.\n\n # make this the non-default option.\n\n # First test for consistency between filenames and boolean parameters\n if (((self.params.get('storm_sequence_filename') is not None) or\n (self.params.get('frequency_filename') is not None)) and\n (self.params.get('record_rain') != True)):\n print('A storm sequence or frequency filename was specified but '\n 'record_rain was not set or set to False. Overriding '\n 'record_rain and recording rain so that the file can be '\n 'written')\n self.params['record_rain'] = True\n\n # Second, test that\n if self.params.get('record_rain'):\n self.record_rain = True\n self.rain_record = {'event_start_time': [],\n 'event_duration': [],\n 'rainfall_rate': [],\n 'runoff_rate': []}\n else:\n self.record_rain = False\n self.rain_record = None\n\n # check that if (self.opt_stochastic_duration==True) that\n # frequency_filename does not exist. For stochastic time, computing\n # exceedance frequencies is not super sensible. So make a warning that\n # it won't be done.\n if ((self.opt_stochastic_duration==True) and\n (self.params.get('frequency_filename'))):\n print('opt_stochastic_duration is set to True and a '\n 'frequency_filename was specified. Frequency calculations '\n 'are not done with stochastic time so the filename is being '\n 'ignored.')\n\n def run_for_stochastic(self, dt, runtime):\n \"\"\"\n Run model without interruption for a specified time period, using\n random storm/interstorm sequence.\n \"\"\"\n self.rain_generator.delta_t = dt\n self.rain_generator.run_time = runtime\n for (tr, p) in self.rain_generator.yield_storm_interstorm_duration_intensity():\n self.rain_rate = p\n self.run_one_step(tr)\n\n def instantiate_rain_generator(self):\n \"\"\"Instantiate RainGenerator.\"\"\"\n # Handle option for duration.\n self.opt_stochastic_duration = (self.params['opt_stochastic_duration'])\n if self.opt_stochastic_duration:\n self.rain_generator = \\\n PrecipitationDistribution(mean_storm_duration=self.params['mean_storm_duration'],\n mean_interstorm_duration=self.params['mean_interstorm_duration'],\n mean_storm_depth=self.params['mean_storm_depth'],\n total_t=self.params['run_duration'],\n delta_t=self.params['dt'],\n random_seed=int(self.params['random_seed']))\n self.run_for = self.run_for_stochastic # override base method\n else:\n from scipy.special import gamma\n mean_storm__intensity = (self._length_factor)*self.params['mean_storm__intensity']# has units length per time\n intermittency_factor = self.params['intermittency_factor']\n\n self.rain_generator = \\\n PrecipitationDistribution(mean_storm_duration=1.0,\n mean_interstorm_duration=1.0,\n mean_storm_depth=1.0,\n random_seed=int(self.params['random_seed']))\n self.intermittency_factor = intermittency_factor\n self.mean_storm__intensity = mean_storm__intensity\n self.shape_factor = self.params['precip_shape_factor']\n self.scale_factor = (self.mean_storm__intensity\n / gamma(1.0 + (1.0 / self.shape_factor)))\n self.n_sub_steps = int(self.params['number_of_sub_time_steps'])\n\n def reset_random_seed(self):\n \"\"\"Re-set the random number generation sequence.\"\"\"\n try:\n seed = int(self.params['random_seed'])\n except KeyError:\n seed = 0\n self.rain_generator.seed_generator(seedval=seed)\n\n def handle_water_erosion(self, dt, flooded):\n \"\"\"Handle water erosion.\n\n If we are running stochastic duration, then self.rain_rate will\n have been calculated already. It might be zero, in which case we\n are between storms, so we don't do water erosion.\n\n If we're NOT doing stochastic duration, then we'll run water\n erosion for one or more sub-time steps, each with its own\n randomly drawn precipitation intensity.\n\n This routine assumes that a model-specific method\n\n **calc_runoff_and_discharge()**\n\n will have been defined.\n\n For example, BasicStVs calculated runoff and discharge in a different\n way than the other models.\n\n If the model has a function **update_threshold_field**, this\n function will test for it and run it. This is presently done in\n BasicDdSt.\n\n \"\"\"\n # (if we're varying precipitation parameters through time, update them)\n if self.opt_var_precip:\n self.intermittency_factor, self.mean_storm__intensity = self.pc.get_current_precip_params(self.model_time)\n\n if self.opt_stochastic_duration and self.rain_rate > 0.0:\n\n runoff = self.calc_runoff_and_discharge()\n\n self.eroder.run_one_step(dt, flooded_nodes=flooded,\n rainfall_intensity_if_used=runoff)\n if self.record_rain:\n #save record into the rain record\n self.record_rain_event(self.model_time, dt, self.rain_rate, runoff)\n\n elif self.opt_stochastic_duration and self.rain_rate <= 0.0:\n # calculate and record the time with no rain:\n if self.record_rain:\n self.record_rain_event(self.model_time, dt, 0, 0)\n\n elif not self.opt_stochastic_duration:\n\n dt_water = ((dt * self.intermittency_factor)\n / float(self.n_sub_steps))\n for i in range(self.n_sub_steps):\n self.rain_rate = \\\n self.rain_generator.generate_from_stretched_exponential(\n self.scale_factor, self.shape_factor)\n\n runoff = self.calc_runoff_and_discharge()\n self.eroder.run_one_step(dt_water, flooded_nodes=flooded,\n rainfall_intensity_if_used=runoff)\n #save record into the rain record\n if self.record_rain:\n event_start_time = self.model_time + (i * dt_water)\n self.record_rain_event(event_start_time, dt_water, self.rain_rate, runoff)\n\n # once all the rain time_steps are complete,\n # calculate and record the time with no rain:\n if self.record_rain:\n\n # calculate dry time\n dt_dry = dt * (1 - self.intermittency_factor)\n\n # if dry time is greater than zero, record.\n if dt_dry > 0:\n event_start_time = self.model_time + ((i + 1) * dt_water)\n self.record_rain_event(event_start_time, dt_dry, 0.0, 0.0)\n\n def finalize(self):\n\n # if rain was recorded, write it out.\n if self.record_rain:\n filename = self.params.get('storm_sequence_filename')\n self.write_storm_sequence_to_file(filename)\n\n if self.record_rain and (self.opt_stochastic_duration==False):\n # if opt_stochastic_duration = False, calculate exceedance\n # frequencies and write out.\n frequency_filename = self.params.get('frequency_filename')\n self.write_exceedance_frequency_file(frequency_filename)\n\n def record_rain_event(self, event_start_time, event_duration, rainfall_rate, runoff_rate):\n \"\"\"Record rain events.\n\n Create a record of event start time, event duration, rainfall rate, and\n runoff rate.\n\n \"\"\"\n self.rain_record['event_start_time'].append(event_start_time)\n self.rain_record['event_duration'].append(event_duration)\n self.rain_record['rainfall_rate'].append(rainfall_rate)\n self.rain_record['runoff_rate'].append(runoff_rate)\n\n def write_storm_sequence_to_file(self, filename=None):\n \"\"\"\n Write event duration and intensity to a formatted text file.\n \"\"\"\n\n # Open a file for writing\n if self.record_rain == False:\n raise ValueError('Rain was not recorded when the model run. To '\n 'record rain, set the parameter \"record_rain\"'\n 'to True.')\n if filename is None:\n filename = 'event_sequence.txt'\n stormfile = open(filename, 'w')\n stormfile.write('event_start_time' + ',' +\n 'event_duration' + ',' +\n 'rainfall_rate' + ',' +\n 'runoff_rate' + '\\n')\n\n n_events = len(self.rain_record['event_start_time'])\n for i in range(n_events):\n stormfile.write(str(self.rain_record['event_start_time'][i]) + ',' +\n str(self.rain_record['event_duration'][i]) + ',' +\n str(self.rain_record['rainfall_rate'][i]) + ',' +\n str(self.rain_record['runoff_rate'][i])+ '\\n')\n\n # Close the file\n stormfile.close()\n\n def write_exceedance_frequency_file(self, filename=None):\n \"\"\"\n \"\"\"\n if filename is None:\n filename = 'exceedance_summary.txt'\n exceedance_file = open(filename, 'w')\n\n # calculate the number of wet days per year.\n number_of_days_per_year = 365\n nwet = int(np.ceil(self.intermittency_factor * number_of_days_per_year))\n #ndry = int(number_of_days_per_year - nwet)\n\n # Write some basic information about the distribution to the file.\n exceedance_file.write('Section 1: Distribution Description\\n')\n exceedance_file.write('Scale Factor: ' + str(self.scale_factor) + '\\n')\n exceedance_file.write('Shape Factor: ' + str(self.shape_factor) + '\\n')\n exceedance_file.write(('Intermittency Factor: ' +\n str(self.intermittency_factor) + '\\n'))\n exceedance_file.write(('Number of wet days per year: ' +\n str(nwet) + '\\n\\n'))\n message_text = ('The scale factor that describes this distribution is ' +\n 'calculated based on a provided value for the mean wet day rainfall.')\n exceedance_file.write('\\n'.join(textwrap.wrap(message_text, _STRING_LENGTH)))\n exceedance_file.write('\\n')\n\n exceedance_file.write(('This provided value was:\\n' +\n str(self.mean_storm__intensity) + '\\n'))\n\n # calculate the predictions for 10, 25, and 100 year event based on\n # the analytical form of the exceedance function.\n event_intervals = np.array([10., 25, 100.])\n\n # calculate the probability of each event based on the number of years\n # and the number of wet days per year.\n daily_distribution_exceedance_probabilities = (1./(nwet * event_intervals))\n\n # exceedance probability is given as\n # Probability of daily rainfall of p exceeding a value of po is given as:\n #\n # P(p>po) = e^(-(po/P)^c)\n # P = scale\n # c = shape\n #\n # this can be re-arranged to\n #\n # po = P * (- ln (P(p>po))) ^ (1 / c)\n\n expected_rainfall = self.scale_factor * (-1. * np.log(daily_distribution_exceedance_probabilities)) ** (1. / self.shape_factor)\n\n exceedance_file.write('\\n\\nSection 2: Theoretical Predictions\\n')\n\n message_text = ('Based on the analytical form of the wet day rainfall ' +\n 'distribution, we can calculate theoretical predictions ' +\n 'of the daily rainfall amounts associated with N-year events.')\n exceedance_file.write('\\n'.join(textwrap.wrap(message_text, _STRING_LENGTH)))\n exceedance_file.write('\\n')\n\n for i in range(len(daily_distribution_exceedance_probabilities)):\n exceedance_file.write(('Expected value for the wet day total of the ' +\n str(event_intervals[i]) +\n ' year event is: ' +\n str(np.round(expected_rainfall[i], decimals=3)) + '\\n'))\n\n # get rainfall record and filter out time without any rain\n all_precipitation = np.array(self.rain_record['rainfall_rate'])\n rainy_day_inds = np.where(all_precipitation>0)\n if len(rainy_day_inds[0])>0:\n wet_day_totals = all_precipitation[rainy_day_inds]\n else:\n raise ValueError('No rain fell, which makes calculating exceedance '\n 'frequencies problematic. We recommend that you '\n 'check the valude of intermittency_factor.')\n\n # construct the distribution of yearly maxima.\n # here an effective year is represented by the number of draws implied\n # by the intermittency factor\n\n # first calculate the number of effective years.\n num_days = len(wet_day_totals)\n num_effective_years = int(np.floor(wet_day_totals.size/nwet))\n\n\n # write out the calculated event only if the duration\n exceedance_file.write('\\n\\n')\n message_text = ('Section 3: Predicted 95% confidence bounds on the ' +\n 'exceedance values based on number of samples drawn.')\n exceedance_file.write('\\n'.join(textwrap.wrap(message_text, _STRING_LENGTH)))\n exceedance_file.write('\\n')\n\n message_text = ('The ability to empirically estimate the rainfall ' +\n 'associated with an N-year event depends on the ' +\n 'probability of that event occurring and the number of ' +\n 'draws from the probability distribution. The ability ' +\n 'to estimate increases with an increasing number of samples ' +\n 'and decreases with decreasing probability of event ' +\n 'occurrence.')\n exceedance_file.write('\\n'.join(textwrap.wrap(message_text, _STRING_LENGTH)))\n exceedance_file.write('\\n')\n\n message_text = ('Exceedance values calculated from ' + str(len(wet_day_totals)) +\n ' draws from the daily-rainfall probability distribution. '+\n 'This corresponds to ' + str(num_effective_years) +\n ' effective years.')\n exceedance_file.write('\\n'.join(textwrap.wrap(message_text, _STRING_LENGTH)))\n exceedance_file.write('\\n')\n\n # For a general probability distribution, f, with a continuous not zero\n # quantile function at F-1(p), the order statistic associated with the\n # p percentile given n draws from the distribution is given as:\n\n # X[np] ~ AN ( F-1(p), (p * (p - 1 ))/ (n * [f (F-1 (p)) ]**2))\n\n # where AN is the asymptotic normal. The value for the variance is more\n # intuitive once you consider that [f (F-1 (p)) ] is the probability\n # that an event of percentile p will occur. Thus the variance increases\n # non-linearly with decreasing event probability and decreases linearly\n # with increaseing observations.\n\n # we've already calculated F-1(p) for our events, and it is represented\n # by the variable expected_rainfall\n\n daily_distribution_event_percentile = 1.0 - daily_distribution_exceedance_probabilities\n\n event_probability = ((self.shape_factor/self.scale_factor) *\n ((expected_rainfall/self.scale_factor) ** (self.shape_factor - 1.0)) *\n (np.exp(-1. * (expected_rainfall/self.scale_factor) ** self.shape_factor)))\n\n event_variance = ((daily_distribution_event_percentile * (1.0 - daily_distribution_event_percentile)) /\n (num_days * (event_probability ** 2)))\n\n event_std = event_variance ** 0.5\n\n t_statistic = stats.t.ppf(0.975, num_effective_years, loc=0, scale=1)\n\n exceedance_file.write('\\n')\n message_text = ('For the given number of samples, the 95% ' +\n 'confidence bounds for the following event ' +\n 'return intervals are as follows: ')\n exceedance_file.write('\\n'.join(textwrap.wrap(message_text, _STRING_LENGTH)))\n exceedance_file.write('\\n')\n for i in range(len(event_intervals)):\n\n min_expected_val = expected_rainfall[i] - t_statistic * event_std[i]\n max_expected_val = expected_rainfall[i] + t_statistic * event_std[i]\n\n exceedance_file.write(('Expected range for the wet day total of the ' +\n str(event_intervals[i]) +\n ' year event is: (' +\n str(np.round(min_expected_val, decimals=3)) + ', ' +\n str(np.round(max_expected_val, decimals=3)) + ')\\n'))\n # next, calculate the emperical exceedance values, if a sufficient record\n # exists.\n\n # inititialize a container for the maximum yearly precipitation.\n maximum_yearly_precipitation = np.nan * np.zeros((num_effective_years))\n for yi in range(num_effective_years):\n\n # identify the starting and ending index coorisponding to the\n # year\n starting_index = yi*nwet\n ending_index = starting_index+nwet\n\n # select the years portion of the wet_day_totals\n selected_wet_day_totals = wet_day_totals[starting_index:ending_index]\n\n # record the yearly maximum precipitation\n maximum_yearly_precipitation[yi] = selected_wet_day_totals.max()\n\n\n # calculate the distribution percentiles associated with each interval\n event_percentiles = (1. - (1./event_intervals)) * 100.\n\n # calculated the event magnitudes associated with the percentiles.\n event_magnitudes = np.percentile(maximum_yearly_precipitation, event_percentiles)\n\n # write out the calculated event only if the duration\n exceedance_file.write('\\n\\nSection 4: Empirical Values\\n')\n message_text = ('These empirical values should be interpreted in the ' +\n 'context of the expected ranges printed in Section 3. ' +\n 'If the expected range is large, consider using a longer ' +\n 'record of rainfall. The empirical values should fall ' +\n 'within the expected range at a 95% confidence level.')\n exceedance_file.write('\\n'.join(textwrap.wrap(message_text, _STRING_LENGTH)))\n exceedance_file.write('\\n')\n\n for i in range(len(event_percentiles)):\n\n exceedance_file.write(('Estimated value for the wet day total of the ' +\n str(np.round(event_intervals[i], decimals=3)) +\n ' year event is: ' +\n str(np.round(event_magnitudes[i], decimals=3)) + '\\n'))\n\n exceedance_file.close()\n\n\ndef main():\n \"\"\"Executes model.\"\"\"\n import sys\n\n try:\n infile = sys.argv[1]\n except IndexError:\n print('Must include input file name on command line')\n sys.exit(1)\n\n em = _StochasticErosionModel(input_file=infile)\n em.run()\n\n\nif __name__ == '__main__':\n main()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nmodel_108_basicDdSt.py: erosion model with stochastic\nrainfall, and water erosion proportional to stream power in excess of a\nthreshold that increases progressively with incision depth.\n\nModel 108 BasicDdSt\n\nThe hydrology aspect models discharge and erosion across a topographic\nsurface assuming (1) stochastic Poisson storm arrivals, (2) single-direction\nflow routing, and (3) Hortonian infiltration model. Includes stream-power\nerosion plus linear diffusion.\n\nThe hydrology uses calculation of drainage area using the standard \"D8\"\napproach (assuming the input grid is a raster; \"DN\" if not), then modifies it\nby running a lake-filling component. It then iterates through a sequence of\nstorm and interstorm periods. Storm depth is drawn at random from a gamma\ndistribution, and storm duration from an exponential distribution; storm\nintensity is then depth divided by duration. Given a storm precipitation\nintensity $P$, the runoff production rate $R$ [L/T] is calculated using:\n\n$R = P - I (1 - \\exp ( -P / I ))$\n\nwhere $I$ is the soil infiltration capacity. At the sub-grid scale, soil\ninfiltration capacity is assumed to have an exponential distribution of which\n$I$ is the mean. Hence, there are always some spots within any given grid cell\nthat will generate runoff. This approach yields a smooth transition from\nnear-zero runoff (when $I>>P$) to $R \\approx P$ (when $P>>I$), without a\n\"hard threshold.\"\n\nLandlab components used: FlowRouter, DepressionFinderAndRouter,\nPrecipitationDistribution, LinearDiffuser, StreamPowerSmoothThresholdEroder\n\n@author: gtucker\n@author: Katherine Barnhart\n\"\"\"\n\nfrom terrainbento.base_class import StochasticErosionModel\nfrom landlab.components import (FlowAccumulator, DepressionFinderAndRouter,\n LinearDiffuser, StreamPowerSmoothThresholdEroder)\n\nimport numpy as np\n\n\nclass BasicDdSt(StochasticErosionModel):\n \"\"\"\n A BasicDdSt computes erosion using (1) unit\n stream power with a threshold, (2) linear nhillslope diffusion, and\n (3) generation of a random sequence of runoff events across a topographic\n surface.\n\n Examples\n --------\n >>> from erosion_model import StochasticRainDepthDepThresholdModel\n >>> my_pars = {}\n >>> my_pars['dt'] = 1.0\n >>> my_pars['run_duration'] = 1.0\n >>> my_pars['infiltration_capacity'] = 1.0\n >>> my_pars['K_sp'] = 1.0\n >>> my_pars['threshold_sp'] = 1.0\n >>> my_pars['linear_diffusivity'] = 0.01\n >>> my_pars['mean_storm_duration'] = 0.002\n >>> my_pars['mean_interstorm_duration'] = 0.008\n >>> my_pars['mean_storm_depth'] = 0.025\n >>> srt = StochasticRainDepthDepThresholdModel(params=my_pars)\n Warning: no DEM specified; creating 4x5 raster grid\n \"\"\"\n\n def __init__(self, input_file=None, params=None,\n BaselevelHandlerClass=None):\n \"\"\"Initialize the BasicDdSt.\"\"\"\n\n # Call ErosionModel's init\n super(BasicDdSt, self).__init__(input_file=input_file,\n params=params,\n BaselevelHandlerClass=BaselevelHandlerClass)\n\n # Get Parameters:\n K_sp = self.get_parameter_from_exponent('K_stochastic_sp')\n linear_diffusivity = (self._length_factor**2.)*self.get_parameter_from_exponent('linear_diffusivity') # has units length^2/time\n\n # threshold has units of Length per Time which is what\n # StreamPowerSmoothThresholdEroder expects\n self.threshold_value = self._length_factor*self.get_parameter_from_exponent('erosion__threshold') # has units length/time\n\n # Get the parameter for rate of threshold increase with erosion depth\n self.thresh_change_per_depth = self.params['thresh_change_per_depth']\n\n # Instantiate a FlowAccumulator with DepressionFinderAndRouter using D8 method\n self.flow_router = FlowAccumulator(self.grid,\n flow_director='D8',\n depression_finder = DepressionFinderAndRouter)\n\n # instantiate rain generator\n self.instantiate_rain_generator()\n\n # Add a field for discharge\n if 'surface_water__discharge' not in self.grid.at_node:\n self.grid.add_zeros('node', 'surface_water__discharge')\n self.discharge = self.grid.at_node['surface_water__discharge']\n\n # Get the infiltration-capacity parameter\n infiltration_capacity = (self._length_factor)*self.params['infiltration_capacity']# has units length per time\n self.infilt = infiltration_capacity\n\n # Keep a reference to drainage area\n self.area = self.grid.at_node['drainage_area']\n\n # Run flow routing and lake filler\n self.flow_router.run_one_step()\n\n # Create a field for the (initial) erosion threshold\n self.threshold = self.grid.add_zeros('node', 'erosion__threshold')\n self.threshold[:] = self.threshold_value\n\n # Get the parameter for rate of threshold increase with erosion depth\n self.thresh_change_per_depth = self.params['thresh_change_per_depth']\n\n # Instantiate a FastscapeEroder component\n self.eroder = StreamPowerSmoothThresholdEroder(self.grid,\n m_sp=self.params['m_sp'],\n n_sp=self.params['n_sp'],\n K_sp=K_sp,\n use_Q=self.discharge,\n threshold_sp=self.threshold)\n\n # Instantiate a LinearDiffuser component\n self.diffuser = LinearDiffuser(self.grid,\n linear_diffusivity = linear_diffusivity)\n\n def calc_runoff_and_discharge(self):\n \"\"\"Calculate runoff rate and discharge; return runoff.\"\"\"\n if self.rain_rate > 0.0 and self.infilt > 0.0:\n runoff = self.rain_rate - (self.infilt *\n (1.0 -\n np.exp(-self.rain_rate / self.infilt)))\n if runoff < 0:\n runoff = 0\n else:\n runoff = self.rain_rate\n self.discharge[:] = runoff * self.area\n return runoff\n\n def update_threshold_field(self):\n \"\"\"Update the threshold based on cumulative erosion depth.\"\"\"\n cum_ero = self.grid.at_node['cumulative_erosion__depth']\n cum_ero[:] = (self.z\n - self.grid.at_node['initial_topographic__elevation'])\n self.threshold[:] = (self.threshold_value\n - (self.thresh_change_per_depth * cum_ero))\n self.threshold[self.threshold < self.threshold_value] = \\\n self.threshold_value\n\n def run_one_step(self, dt):\n \"\"\"\n Advance model for one time-step of duration dt.\n \"\"\"\n\n # Route flow\n self.flow_router.run_one_step()\n\n # Get IDs of flooded nodes, if any\n flooded = np.where(self.flow_router.depression_finder.flood_status==3)[0]\n\n # Handle water erosion\n self.handle_water_erosion_with_threshold(dt, flooded)\n\n # Do some soil creep\n self.diffuser.run_one_step(dt)\n\n # calculate model time\n self.model_time += dt\n\n # Lower outlet\n self.update_outlet(dt)\n\n # Check walltime\n self.check_walltime()\n\n def handle_water_erosion_with_threshold(self, dt, flooded):\n \"\"\"Handle water erosion.\n\n This function takes the place of the _BaseSt function of the name\n handle_water_erosion_with_threshold in order to handle water erosion\n correctly for model BasicDdSt.\n \"\"\"\n # (if we're varying precipitation parameters through time, update them)\n if self.opt_var_precip:\n self.intermittency_factor, self.mean_storm__intensity = self.pc.get_current_precip_params(self.model_time)\n\n # If we're handling duration deterministically, as a set fraction of\n # time step duration, calculate a rainfall intensity. Otherwise,\n # assume it's already been calculated.\n if not self.opt_stochastic_duration:\n self.rain_rate = np.random.exponential(self.mean_storm__intensity)\n dt_water = dt * self.intermittency_factor\n else:\n dt_water = dt\n\n # Calculate discharge field\n area = self.grid.at_node['drainage_area']\n if self.rain_rate > 0.0 and self.infilt > 0.0:\n runoff = self.rain_rate - (self.infilt *\n (1.0 -\n np.exp(-self.rain_rate / self.infilt)))\n else:\n runoff = self.rain_rate\n\n self.discharge[:] = runoff * area\n\n # Handle water erosion:\n #\n # If we are running stochastic duration, then self.rain_rate will\n # have been calculated already. It might be zero, in which case we\n # are between storms, so we don't do water erosion.\n #\n # If we're NOT doing stochastic duration, then we'll run water\n # erosion for one or more sub-time steps, each with its own\n # randomly drawn precipitation intensity.\n #\n if self.opt_stochastic_duration and self.rain_rate > 0.0:\n self.update_threshold_field()\n runoff = self.calc_runoff_and_discharge()\n self.eroder.run_one_step(dt, flooded_nodes=flooded)\n elif not self.opt_stochastic_duration:\n dt_water = ((dt * self.intermittency_factor)\n / float(self.n_sub_steps))\n for i in range(self.n_sub_steps):\n self.rain_rate = \\\n self.rain_generator.generate_from_stretched_exponential(\n self.scale_factor, self.shape_factor)\n self.update_threshold_field()\n runoff = self.calc_runoff_and_discharge()\n self.eroder.run_one_step(dt_water, flooded_nodes=flooded)\n\n\ndef main():\n \"\"\"Executes model.\"\"\"\n import sys\n\n try:\n infile = sys.argv[1]\n except IndexError:\n print('Must include input file name on command line')\n sys.exit(1)\n\n em = BasicDdSt(input_file=infile)\n em.run()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.minimum",
"numpy.where"
],
[
"numpy.log",
"scipy.special.gamma",
"numpy.percentile",
"scipy.stats.t.ppf",
"numpy.ceil",
"numpy.round",
"numpy.floor",
"numpy.exp",
"numpy.array",
"numpy.where",
"numpy.zeros"
],
[
"numpy.random.exponential",
"numpy.exp",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jdvelasq/techminer2 | [
"ad64a49402749755798a18417c38a7ad10e83bad"
] | [
"techminer2/co_occurrence_matrix_cluster_mds_map.py"
] | [
"\"\"\"\nCo-occurrence Matrix / Cluster MDS Map\n===============================================================================\n\n>>> from techminer2 import *\n>>> directory = \"data/\"\n>>> file_name = \"sphinx/images/co_occurrence_matrix_cluster_mds_map.png\"\n>>> co_occurrence_matrix_cluster_mds_map(\n... 'author_keywords',\n... min_occ=2, \n... directory=directory,\n... ).savefig(file_name)\n\n.. image:: images/co_occurrence_matrix_cluster_mds_map.png\n :width: 700px\n :align: center\n\n\n\"\"\"\nfrom sklearn.manifold import MDS\n\nfrom .co_occurrence_matrix import co_occurrence_matrix\nfrom .network import network\nfrom .network_map import network_map\n\n\ndef co_occurrence_matrix_cluster_mds_map(\n column,\n min_occ=2,\n max_occ=None,\n normalization=None,\n clustering_method=\"louvain\",\n directory=\"./\",\n color_scheme=\"clusters\",\n figsize=(7, 7),\n):\n\n coc_matrix = co_occurrence_matrix(\n column=column,\n min_occ=min_occ,\n max_occ=max_occ,\n normalization=normalization,\n directory=directory,\n )\n\n manifold_method = MDS(n_components=2)\n\n network_ = network(\n matrix=coc_matrix,\n clustering_method=clustering_method,\n manifold_method=manifold_method,\n )\n\n return network_map(\n network_,\n color_scheme=color_scheme,\n figsize=figsize,\n )\n"
] | [
[
"sklearn.manifold.MDS"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Telcrome/ai-trainer | [
"54bca3252e194c054bdd3af2b94d6dde940a2a86",
"54bca3252e194c054bdd3af2b94d6dde940a2a86"
] | [
"trainer/ml/utils.py",
"trainer/lib/gen_utils.py"
] | [
"from enum import Enum\nfrom typing import Generator, Tuple, Iterable, Dict, List\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom scipy.ndimage import label, generate_binary_structure\nfrom scipy.ndimage.morphology import distance_transform_edt as dist_trans\n\nimport trainer.lib as lib\n\n\nclass ImageNormalizations(Enum):\n UnitRange = 1\n\n\ndef duplicate_columns(data, minoccur=2):\n ind = np.lexsort(data)\n diff = np.any(data.T[ind[1:]] != data.T[ind[:-1]], axis=1)\n edges = np.where(diff)[0] + 1\n result = np.split(ind, edges)\n result = [group for group in result if len(group) >= minoccur]\n return result\n\n\ndef pad(small_arr: np.ndarray, size=(30, 30)) -> np.ndarray:\n # if small_arr.shape[0] < size[0] or small_arr.shape[1] < size[1]:\n size = max(small_arr.shape[0], size[0]), max(small_arr.shape[1], size[1])\n res = np.zeros(size, dtype=np.int32)\n res[:small_arr.shape[0], :small_arr.shape[1]] = small_arr\n return res\n # else:\n # return small_arr # There is no need for padding\n\n\ndef split_into_regions(arr: np.ndarray, mode=0) -> List[np.ndarray]:\n \"\"\"\n Splits an array into its coherent regions.\n\n :param mode: 0 for orthogonal connection, 1 for full connection\n :param arr: Numpy array with shape [W, H]\n :return: A list with length #NumberOfRegions of arrays with shape [W, H]\n \"\"\"\n res = []\n if mode == 0:\n rs, num_regions = label(arr)\n elif mode == 1:\n rs, num_regions = label(arr, structure=generate_binary_structure(2, 2))\n else:\n raise Exception(\"Please specify a valid Neighborhood mode for split_into_regions\")\n\n for i in range(1, num_regions + 1):\n res.append(rs == i)\n return res\n\n\ndef normalize_im(im: np.ndarray, norm_type=ImageNormalizations.UnitRange) -> np.ndarray:\n \"\"\"\n Currently just normalizes an image with pixel intensities in range [0, 255] to [-1, 1]\n :return: The normalized image\n \"\"\"\n if norm_type == ImageNormalizations.UnitRange:\n return (im.astype(np.float32) / 127.5) - 1\n else:\n raise Exception(\"Unknown Normalization type\")\n\n\ndef distance_transformed(mask: np.ndarray) -> np.ndarray:\n if mask.dtype != np.bool:\n mask = mask.astype(np.bool)\n return dist_trans(np.invert(mask).astype(np.float32))\n\n\ndef one_hot_to_cont(x: np.ndarray) -> np.ndarray:\n \"\"\"\n Convert a one hot encoded image into the same image with integer representations.\n\n :param x: np.ndarray with (C, W, H)\n :return: np.ndarray with (W, H)\n \"\"\"\n return np.argmax(x, axis=len(x.shape) - 3)\n\n\ndef cont_to_ont_hot(arr: np.ndarray, n_values=-1) -> np.ndarray:\n if n_values == -1:\n n_values = np.max(arr) + 1\n res = np.zeros((n_values,) + arr.shape)\n for v in np.unique(arr):\n res[v, :, :][arr == v] = 1\n return res\n\n\ndef reduce_by_attention(arr: np.ndarray, att: np.ndarray):\n \"\"\"\n Reduce an array by a field of attention, such that the result is a rectangle with the empty borders cropped.\n\n :param arr: Target array. The last two dimensions need to be of the same shape as the attention field\n :param att: field of attention\n :return: cropped array\n \"\"\"\n assert arr.shape[-2] == att.shape[0] and arr.shape[-1] == att.shape[1]\n ones = np.argwhere(att)\n lmost, rmost = np.min(ones[:, 0]), np.max(ones[:, 0]) + 1\n bmost, tmost = np.min(ones[:, 1]), np.max(ones[:, 1]) + 1\n grid_slice = [slice(None) for _ in range(len(arr.shape) - 2)]\n grid_slice.extend([slice(lmost, rmost), slice(bmost, tmost)])\n return arr[tuple(grid_slice)], att[lmost:rmost, bmost:tmost], (lmost, rmost, bmost, tmost)\n\n\ndef pair_augmentation(g: Iterable[Tuple[np.ndarray, np.ndarray]], aug_ls) -> Iterable[Tuple[np.ndarray, np.ndarray]]:\n import imgaug.augmenters as iaa\n seq = iaa.Sequential(aug_ls)\n for im, gt, frame_number in g:\n im_prep = im[frame_number] if im.shape[3] > 1 else im.squeeze()\n gt_prep = np.expand_dims(gt, len(gt.shape))\n images_aug = seq(images=[im_prep], segmentation_maps=[gt_prep])\n yield images_aug[0][0].astype(np.float32), images_aug[1][0][:, :, 0].astype(np.float32), frame_number\n\n\ndef insert_np_at(a1: np.ndarray, a2: np.ndarray, pos: Tuple[int, int], filter_arr=None) -> np.ndarray:\n assert len(a1.shape) == 2 and len(a2.shape) == 2\n if filter_arr is None:\n filter_arr = np.ones_like(a2).astype(np.bool)\n x, y = pos\n res = np.copy(a1)\n a1_x = slice(x, min(x + a2.shape[0], a1.shape[0]))\n a1_y = slice(y, min(y + a2.shape[1], a1.shape[1]))\n\n if x + a2.shape[0] <= a1.shape[0]:\n a2_x = slice(0, a2.shape[0])\n else:\n a2_x = slice(0, a1.shape[0] - (x + a2.shape[0]))\n\n if y + a2.shape[1] <= a1.shape[1]:\n a2_y = slice(0, a2.shape[1])\n else:\n a2_y = slice(0, a1.shape[1] - (y + a2.shape[1]))\n item_filter = filter_arr[(a2_x, a2_y)]\n assert res[(a1_x, a1_y)].shape == a2[(a2_x, a2_y)].shape\n res[(a1_x, a1_y)][item_filter] = a2[(a2_x, a2_y)][item_filter]\n return res\n\n\nif __name__ == '__main__':\n fit = insert_np_at(np.ones((10, 10)), np.ones((3, 3)) * 2, (2, 3))\n too_big1 = insert_np_at(np.ones((10, 10)), np.ones((3, 10)) * 2, (2, 3))\n too_big = insert_np_at(np.ones((10, 10)), np.ones((10, 10)) * 2, (2, 3))\n\n# def put_array(big_arr: np.ndarray, small_arr: np.ndarray, offset=(0, 0)) -> np.ndarray:\n# \"\"\"\n# Puts the small array into the big array. Ignores problems and does its best to fulfill the task\n# \"\"\"\n# b, t =\n# big_arr[]\n# big_arr = np.putmask(big_arr, )\n\n\n# if __name__ == '__main__':\n# # a = np.zeros((10, 10))\n# # b = np.random.random((4, 4))\n# # c = put_array(a, b)\n# # lib.logger.debug_var(c)\n",
"from typing import Generator, TypeVar, Generic, Tuple, List, Iterator, Union\nimport itertools\nimport time\nimport random\n\nimport numpy as np\nfrom scipy.special import softmax\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\n\nV = TypeVar('V')\n\n\nclass GenCacher(Generic[V]):\n \"\"\"\n Wrapper around a generator that stores the already yielded values and therefore allows indexing.\n \"\"\"\n\n def __init__(self, generator: Generator[V, None, None]):\n self._g = generator\n self._cache = []\n self._is_exhausted = False\n\n def is_exhausted(self) -> bool:\n return self._is_exhausted\n\n def get_cache_len(self) -> int:\n return len(self._cache)\n\n def fill_cache(self, idx: int):\n while not self.is_exhausted() and idx >= self.get_cache_len():\n try:\n self._cache.append(next(self._g))\n except StopIteration as _:\n self._is_exhausted = True\n\n def __getitem__(self, idx: int) -> V:\n self.fill_cache(idx)\n return self._cache[idx]\n\n\ndef summations(sum_to: int, ls: List[int]) -> Generator[Tuple, None, None]:\n if len(ls) == 1:\n if sum_to < ls[0]:\n yield sum_to,\n else:\n for head in range(min(sum_to + 1, ls[0])):\n for tail in summations(sum_to - head, ls[1:]):\n yield (head,) + tail\n\n\ndef product(gens: List[Generator]) -> Generator:\n \"\"\"\n Utility to compute the cartesian product between an arbitrary number of generators.\n Developed to handle the case of a possible mix of finite and infinite generators.\n The built-in itertools.product can only compute the cartesian product between finite generators.\n\n The exploration strategy can be visualized using the following code block:\n\n .. code-block:: python\n :linenos:\n\n import matplotlib.pyplot as plt\n import trainer.demo_data as dd\n import trainer.lib as lib\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlim3d([0.0, 10.0])\n ax.set_xlabel('X')\n ax.set_ylim3d([0.0, 10.0])\n ax.set_ylabel('Y')\n ax.set_zlim3d([0.0, 10.0])\n ax.set_zlabel('Z')\n xs, ys, zs = [], [], []\n\n gens = [\n dd.finite_test_gen(start=0, end=3),\n dd.infinite_test_gen(first=0),\n dd.finite_test_gen(start=0, end=3)\n ]\n\n for c in lib.product(gens):\n xs.append(c[0])\n ys.append(c[1])\n zs.append(c[2])\n ax.plot(xs=xs[-2:], ys=ys[-2:], zs=zs[-2:])\n fig.show()\n plt.pause(0.01)\n\n The result looks as following:\n\n .. image:: ../media/gen_product_exploration.gif\n\n :param gens: Between 1 and N generators\n :return: One generator that returns all N-tuples, built from the input generators\n \"\"\"\n gens = list(map(GenCacher, gens))\n\n for distance in itertools.count(0):\n changed = False\n for gen in gens:\n gen.fill_cache(distance)\n for idxs in summations(distance, [gen.get_cache_len() for gen in gens]):\n res = tuple(gen[idx] for gen, idx in zip(gens, idxs))\n yield res\n changed = True\n if not changed:\n return\n\n\ndef sample_randomly(gens: Union[List[Generator], List[Iterator]], probas: List[float], use_softmax=False):\n \"\"\"\n Draw from one generator in a list according to uniformly distributed probabilities.\n\n :param gens: A list of generators\n :param probas: List of generator probabilities, must correspond to the list of generators\n :param use_softmax: Use softmax to press priorities to one\n :return: Randomly drawn value from one of the generators\n \"\"\"\n assert len(gens) == len(probas)\n\n while gens:\n if use_softmax:\n i = np.random.choice(range(len(gens)), 1, p=softmax(probas))[0]\n else:\n i = np.random.choice(range(len(gens)), 1, p=probas/np.sum(probas))[0]\n if (not isinstance(gens[i], Generator)) and (not isinstance(gens[i], Iterator)):\n yield gens[i]\n gens.pop(i)\n probas.pop(i)\n else:\n try:\n yield next(gens[i])\n except StopIteration as e:\n gens.pop(i)\n probas.pop(i)\n\n\nif __name__ == '__main__':\n gens = [\n itertools.islice(itertools.count(), 0, 5),\n itertools.islice(itertools.count(), 10, 15)\n ]\n\n for x in sample_randomly(gens, [0.5, 0.5]):\n print(x)\n"
] | [
[
"numpy.split",
"numpy.ones_like",
"numpy.invert",
"numpy.unique",
"numpy.min",
"scipy.ndimage.generate_binary_structure",
"numpy.lexsort",
"numpy.argwhere",
"numpy.ones",
"scipy.ndimage.label",
"numpy.copy",
"numpy.max",
"numpy.any",
"numpy.zeros",
"numpy.where"
],
[
"scipy.special.softmax",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
sxontheway/BalanceFL | [
"43bb7539c932b7b6f7ad03f94a724452ae3855a3",
"43bb7539c932b7b6f7ad03f94a724452ae3855a3"
] | [
"IMU/fed.py",
"CIFAR10/data/dataloader.py"
] | [
"import copy\nimport time\nfrom collections import OrderedDict\n\nimport torch\nfrom data.dataloader import local_client_dataset, test_dataset\nfrom models.utils import *\nfrom utils.train_helper import validate_one_model\nfrom utils.sampling import *\n\n\nimport numpy as np\nfrom multiprocessing import Process\nimport time\n\n\ndef return_state_dict(network):\n \"\"\"\n save model to state_dict\n \"\"\"\n feat_model = {k: v.cpu() for k, v in network[\"feat_model\"].state_dict().items()}\n classifier = {k: v.cpu() for k, v in network[\"classifier\"].state_dict().items()}\n return {\"feat_model\": feat_model, \"classifier\": classifier}\n\n\ndef load_state_dict(network, state_dict):\n \"\"\"\n restore model from state_dict\n \"\"\"\n network[\"feat_model\"].load_state_dict(state_dict[\"feat_model\"])\n network[\"classifier\"].load_state_dict(state_dict[\"classifier\"])\n\n # for name, param in state_dict[\"feat_model\"].items():\n # print(name, \"\\t\", param.size())\n return network\n\n\ndef check_status(status_list, selected_idx, target_status):\n \"\"\"\n 0. original status (1st FL round)\n 1. server finished sending: server_network --> mp_list\n 2. client received, and returned the model: mp_list --> networks[i] --> local_update --> mp_list\n 3. server received: mp_list --> networks[i]\n --> 1. aggregation finished. networks[i] --> aggregate --> server_network --> mp_list, the status change to 1\n ---\n Return True: when all clients meet conditions, else False\n \"\"\"\n tmp = np.array(status_list)\n if (tmp[selected_idx] == target_status).all() == True:\n return True\n else:\n return False\n\n\ndef set_status(status_list, selected_idx, target_status):\n \"\"\"\n see function: check_status\n \"\"\"\n if type(selected_idx) is int:\n selected_idx = [selected_idx]\n for i in selected_idx:\n status_list[i] = target_status\n # print(f\"set_status {target_status}\")\n\n\ndef difference_models_norm_2(model_1, model_2):\n \"\"\"\n Return the norm 2 difference between the two model parameters. Used in FedProx. \n \"\"\"\n tensor_1_backbone = list(model_1[\"feat_model\"].parameters())\n tensor_1_classifier = list(model_1[\"classifier\"].parameters())\n tensor_2_backbone = list(model_2[\"feat_model\"].parameters())\n tensor_2_classifier = list(model_2[\"classifier\"].parameters())\n\n diff_list = [\n torch.sum((tensor_1_backbone[i] - tensor_2_backbone[i]) ** 2)\n for i in range(len(tensor_1_backbone))\n ]\n diff_list.extend(\n [\n torch.sum((tensor_1_classifier[i] - tensor_2_classifier[i]) ** 2)\n for i in range(len(tensor_1_classifier))\n ]\n )\n\n norm = sum(diff_list)\n return norm\n\n\nclass Fed_server(Process):\n \"\"\"\n Class for client updating and model aggregation\n \"\"\"\n\n def __init__(\n self,\n init_network,\n criterion,\n config,\n per_client_data,\n per_client_label,\n idx_per_client_train,\n test_data,\n test_label,\n state_list=None,\n state_dict_list=None,\n idx=None,\n ):\n\n super(Fed_server, self).__init__()\n\n self.local_bs = config[\"fl_opt\"][\"local_bs\"]\n self.local_ep = config[\"fl_opt\"][\"local_ep\"]\n self.num_clients = config[\"fl_opt\"][\"num_clients\"]\n self.criterion = criterion\n self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = (\n [],\n [],\n [],\n [],\n )\n self.train_loaders = [] # include dataloader or pre-loaded dataset\n self.train_loader_balanced = [] # balanced-sampling dataloader\n self.local_num_per_cls = [] # list to store local data number per class\n self.test_loaders = []\n self.status_list = state_list\n self.state_dict_list = state_dict_list\n self.client_idx = idx # physical idx of clients (hardcoded)\n\n self.config = config\n self.prefetch = False\n self.feat_aug = config[\"fl_opt\"][\"feat_aug\"]\n self.crt = config[\"fl_opt\"][\"crt\"]\n\n self.client_weights = np.array([i for i in idx_per_client_train])\n self.client_weights = self.client_weights / self.client_weights.sum()\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.server_network = copy.deepcopy(init_network)\n self.server_network[\"feat_model\"].to(self.device)\n self.server_network[\"classifier\"].to(self.device)\n\n # per-client accuracy and loss\n self.acc = [0 for i in range(self.num_clients)]\n self.losses_cls = [-1 for i in range(self.num_clients)]\n self.losses_kd = [-1 for i in range(self.num_clients)]\n\n print(f'=====> {config[\"metainfo\"][\"optimizer\"]}, Server (fed.py)\\n ')\n\n ######## init backbone, classifier, optimizer and dataloader ########\n for client_i in range(self.num_clients):\n\n backbone = copy.deepcopy(self.server_network[\"feat_model\"])\n classifier = copy.deepcopy(self.server_network[\"classifier\"])\n self.networks.append({\"feat_model\": backbone, \"classifier\": classifier})\n\n \"\"\" Server does not need\n # list of optimizer_dict. One optimizer for one network\n self.optimizers.append(init_optimizers(self.networks[client_i], config)) \n optim_params_dict = {'params': self.networks[client_i][\"classifier\"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0} \n self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))\n\n # dataloader\n num_workers = 0\n local_dataset = \\\n local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)\n self.train_loaders.append(\n torch.utils.data.DataLoader(\n local_dataset, batch_size=self.local_bs, shuffle=True, \n num_workers=num_workers, pin_memory=False)\n )\n self.train_loader_balanced.append(\n torch.utils.data.DataLoader(\n local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(), \n num_workers=num_workers, pin_memory=False)\n )\n self.local_num_per_cls.append(local_dataset.class_sample_count)\n \"\"\"\n\n # centralized train dataset\n train_data_all, train_label_all = [], []\n for client_i in range(len(per_client_label)):\n train_data_all = train_data_all + per_client_data[client_i]\n train_label_all = train_label_all + per_client_label[client_i]\n self.train_dataset = local_client_dataset(\n train_data_all, train_label_all, config\n )\n self.test_dataset = test_dataset(test_data, test_label, config)\n\n def local_train(self, selected_idx):\n \"\"\"\n server-side code\n \"\"\"\n # self.server_network --> mp_list\n for i in selected_idx:\n self.state_dict_list[i] = return_state_dict(\n self.server_network\n ) # model transfer\n set_status(self.status_list, selected_idx, 1)\n\n # wait until all clients returning the model\n while check_status(self.status_list, selected_idx, 2) is False:\n time.sleep(0.1)\n\n # mp_list --> self.networks (copys of client models on the server). Prepare for aggregation.\n for i in selected_idx:\n load_state_dict(self.networks[i], self.state_dict_list[i]) # model transfer\n print(\"===> Local training finished\")\n\n def aggregation(self, selected_idx, mode):\n \"\"\"\n server-side code: aggregation\n \"\"\"\n if mode in [\"fedavg\", \"fedavgm\", \"fedbn\", \"fedprox\"]:\n self.aggregate_layers(selected_idx, mode, backbone_only=False)\n elif mode == \"fedavg_fs\":\n opt = self.config[\"fl_opt\"]\n backbone_only, imprint, spread_out = (\n opt[\"backbone_only\"],\n opt[\"imprint\"],\n opt[\"spread_out\"],\n )\n self.aggregate_layers(selected_idx, \"fedavg\", backbone_only=backbone_only)\n if imprint:\n self.imprint(selected_idx)\n if spread_out:\n self.spread_out()\n\n # model: self.server_network --> mp_list\n for i in selected_idx:\n self.state_dict_list[i] = return_state_dict(\n self.server_network\n ) # model transfer\n set_status(self.status_list, selected_idx, 0) # back to original\n\n print(\"===> Aggregation finished\")\n\n def aggregate_layers(self, selected_idx, mode, backbone_only):\n \"\"\"\n backbone_only: choose to only aggregate backbone\n \"\"\"\n weights_sum = self.client_weights[selected_idx].sum()\n with torch.no_grad():\n if mode in [\"fedavg\", \"fedprox\"]:\n for net_name, net in self.server_network.items():\n if net_name == \"classifier\" and backbone_only:\n pass\n else:\n for key, layer in net.state_dict().items():\n if \"num_batches_tracked\" in key:\n # num_batches_tracked is a non trainable LongTensor\n # and num_batches_tracked are the same for\n # all clients for the given datasets\n layer.data.copy_(\n self.networks[0][net_name].state_dict()[key]\n )\n else:\n temp = torch.zeros_like(layer)\n # Fedavg\n for idx in selected_idx:\n weight = self.client_weights[idx] / weights_sum\n temp += (\n weight\n * self.networks[idx][net_name].state_dict()[key]\n )\n layer.data.copy_(temp)\n # update client models\n # for idx in selected_idx:\n # self.networks[idx][net_name].state_dict()[key].data.copy_(layer)\n\n elif mode == \"fedbn\": # https://openreview.net/pdf?id=6YEQUn0QICG\n for net_name, net in self.server_network.items():\n if net_name == \"classifier\" and backbone_only:\n pass\n else:\n for key, layer in net.state_dict().items():\n if \"bn\" not in key:\n temp = torch.zeros_like(layer)\n # Fedavg\n for idx in selected_idx:\n weight = self.client_weights[idx] / weights_sum\n temp += (\n weight\n * self.networks[idx][net_name].state_dict()[key]\n )\n layer.data.copy_(temp)\n # update client models\n # for idx in selected_idx:\n # self.networks[idx][net_name].state_dict()[key].data.copy_(layer)\n elif mode == \"fedavgm\":\n raise NotImplementedError\n\n def evaluate_global(self, train_dataset=None, test_dataset=None):\n \"\"\"\n Accuracy of the global model and all classes\n \"\"\"\n # evaluate on training set\n if train_dataset is None:\n train_dataset = self.train_dataset\n if test_dataset is None:\n test_dataset = self.test_dataset\n train_loss_per_cls, train_acc_per_cls = validate_one_model(\n self.server_network, train_dataset, self.device, per_cls_acc=True\n )\n\n # evaluate on test set: per-class loss/acc\n test_loss_per_cls, test_acc_per_cls = validate_one_model(\n self.server_network, test_dataset, self.device, per_cls_acc=True\n )\n print(\"===> Evaluation finished\\n\")\n\n return (\n train_loss_per_cls,\n train_acc_per_cls,\n test_loss_per_cls,\n test_acc_per_cls,\n )\n\n def evaluate_global_all(self, train_dataset=None, test_dataset=None):\n \"\"\"\n Accuracy of models of all nodes and all classes\n\n Return: all_results\n shape: (4, num_client, num_cls), 4 for (train_loss, train_acc, test_loss, test_acc)\n \"\"\"\n # evaluate on training set\n if train_dataset is None:\n train_dataset = self.train_dataset\n if test_dataset is None:\n test_dataset = self.test_dataset\n\n all_results = [None for i in range(self.num_clients)]\n for idx in range(self.num_clients):\n # evaluate on test set: per-class loss/acc\n train_loss_per_cls, train_acc_per_cls = validate_one_model(\n self.networks[idx], train_dataset, self.device, per_cls_acc=True\n )\n # evaluate on test set: per-class loss/acc\n test_loss_per_cls, test_acc_per_cls = validate_one_model(\n self.networks[idx], test_dataset, self.device, per_cls_acc=True\n )\n all_results[idx] = (\n train_loss_per_cls,\n train_acc_per_cls,\n test_loss_per_cls,\n test_acc_per_cls,\n )\n\n print(f\"===> Evaluation finished{idx}\\n\")\n\n all_results = np.array(all_results).transpose(1, 0, 2)\n return all_results\n\n\nclass Fed_client(Process):\n \"\"\"\n Class for client updating and model aggregation\n \"\"\"\n\n def __init__(\n self,\n init_network,\n criterion,\n config,\n per_client_data,\n per_client_label,\n idx_per_client_train,\n test_data,\n test_label,\n state_list=None,\n state_dict_list=None,\n idx=None,\n ):\n\n super(Fed_client, self).__init__()\n\n self.local_bs = config[\"fl_opt\"][\"local_bs\"]\n self.local_ep = config[\"fl_opt\"][\"local_ep\"]\n self.num_clients = config[\"fl_opt\"][\"num_clients\"]\n self.criterion = criterion\n self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = (\n [],\n [],\n [],\n [],\n )\n self.train_loaders = [] # include dataloader or pre-loaded dataset\n self.train_loader_balanced = [] # balanced-sampling dataloader\n self.local_num_per_cls = [] # list to store local data number per class\n self.test_loaders = []\n self.status_list = state_list\n self.state_dict_list = state_dict_list\n self.client_idx = idx # physical idx of clients (hardcoded)\n\n self.config = config\n self.device = config[\"device_client\"][idx]\n self.server_network = copy.deepcopy(init_network)\n self.balanced_loader = config[\"fl_opt\"][\"balanced_loader\"]\n\n self.prefetch = False\n self.feat_aug = config[\"fl_opt\"][\"feat_aug\"]\n self.crt = config[\"fl_opt\"][\"crt\"]\n\n if config[\"fl_opt\"][\"aggregation\"] == \"fedprox\":\n self.fedprox = True\n else:\n self.fedprox = False\n self.mu = 0.05\n\n self.client_weights = np.array([i for i in idx_per_client_train])\n self.client_weights = self.client_weights / self.client_weights.sum()\n\n # per-client accuracy and loss\n self.acc = [0 for i in range(self.num_clients)]\n self.losses_cls = [-1 for i in range(self.num_clients)]\n self.losses_kd = [-1 for i in range(self.num_clients)]\n\n print(f'=====> {config[\"metainfo\"][\"optimizer\"]}, Client {idx} (fed.py)\\n ')\n\n ######## init backbone, classifier, optimizer and dataloader ########\n for client_i in range(self.num_clients):\n # list of network and optimizer_dict. One optimizer for one network.\n if client_i != self.client_idx:\n self.networks.append(None)\n self.optimizers.append(None)\n self.optimizers_stage2.append(None)\n else:\n backbone = copy.deepcopy(self.server_network[\"feat_model\"])\n classifier = copy.deepcopy(self.server_network[\"classifier\"])\n self.networks.append({\"feat_model\": backbone, \"classifier\": classifier})\n self.optimizers.append(init_optimizers(self.networks[client_i], config))\n optim_params_dict = {\n \"params\": self.networks[client_i][\"classifier\"].parameters(),\n \"lr\": 0.001,\n \"momentum\": 0.9,\n \"weight_decay\": 0,\n }\n self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))\n\n # dataloader\n num_workers = 0\n local_dataset = local_client_dataset(\n per_client_data[client_i], per_client_label[client_i], config\n )\n self.train_loaders.append(\n torch.utils.data.DataLoader(\n local_dataset,\n batch_size=self.local_bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=False,\n )\n )\n self.train_loader_balanced.append(\n torch.utils.data.DataLoader(\n local_dataset,\n batch_size=self.local_bs,\n sampler=local_dataset.get_balanced_sampler(),\n num_workers=num_workers,\n pin_memory=False,\n )\n )\n self.local_num_per_cls.append(local_dataset.class_sample_count)\n\n \"\"\" clients do not need\n # centralized train dataset\n train_data_all, train_label_all = [], []\n for client_i in range(len(per_client_label)):\n train_data_all = train_data_all + per_client_data[client_i]\n train_label_all = train_label_all + per_client_label[client_i]\n self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)\n self.test_dataset = test_dataset(test_data, test_label, config)\n \"\"\"\n\n def run(self):\n \"\"\"\n client-side code\n \"\"\"\n self.server_network[\"feat_model\"].to(self.device)\n self.server_network[\"classifier\"].to(self.device)\n self.networks[self.client_idx][\"feat_model\"].to(self.device)\n self.networks[self.client_idx][\"classifier\"].to(self.device)\n\n while 1:\n while check_status(self.status_list, self.client_idx, 1) is False:\n time.sleep(0.1)\n\n # model: mp_list --> server_network\n load_state_dict(\n self.server_network, self.state_dict_list[self.client_idx]\n ) # model transfer\n self.train_lt(self.client_idx) # local model updating\n\n # self.networks[i] --> mp_list\n self.state_dict_list[self.client_idx] = return_state_dict(\n self.networks[self.client_idx]\n ) # model transfer\n set_status(self.status_list, self.client_idx, 2)\n\n def train_lt(self, idx):\n \"\"\"\n client-side code\n ---\n Argus:\n - idx: the index in all clients (e.g., 50) or selected clients (e.g., 10).\n If self.prefetch is true: the index in selected clients,\n If self.prefetch is true: the index in all clients\n \"\"\"\n idx_in_all = idx\n\n # server broadcast the model to clients\n \"\"\"\n # optimizer will not work if use this, because optimizer needs the params from the model\n # self.networks[idx_in_all] = copy.deepcopy(self.server_network) \n \"\"\"\n for net_name, net in self.server_network.items(): # feat_model, classifier\n state_dict = self.networks[idx_in_all][net_name].state_dict()\n for key, layer in net.state_dict().items():\n state_dict[key].data.copy_(layer.data)\n\n for net in self.networks[idx_in_all].values():\n net.train()\n for net in self.server_network.values():\n net.train()\n teacher = self.server_network\n\n # torch.cuda.empty_cache()\n\n \"\"\"\n (Per-cls) Covariance Calculation\n \"\"\"\n if self.feat_aug:\n # probability for augmentation for every class\n max_num = max(self.local_num_per_cls[idx])\n prob = torch.tensor(\n [1.0 - i / max_num for i in self.local_num_per_cls[idx]]\n )\n\n # obtain features and labels under eval mode\n feat_list, label_list = [], []\n\n # self.networks[idx_in_all]['feat_model'].eval()\n\n for (imgs, labels, indexs) in self.train_loaders[idx]:\n with torch.no_grad():\n imgs = imgs.to(self.device)\n feat_list.append(teacher[\"feat_model\"](imgs).cpu())\n label_list.append(labels)\n feat_list = torch.cat(feat_list, 0)\n\n # self.networks[idx_in_all]['feat_model'].train()\n\n label_list = torch.cat(label_list, 0)\n unique_labels = list(np.unique(label_list)) # e.g., size (6, )\n transformed_label_list = torch.tensor(\n [unique_labels.index(i) for i in label_list]\n ) # e.g., size (n, )\n\n # per-cls features\n feats_per_cls = [[] for i in range(len(unique_labels))]\n for feats, label in zip(feat_list, transformed_label_list):\n feats_per_cls[label].append(feats)\n\n # calculate the variance\n sampled_data, sample_label = [], []\n per_cls_cov = []\n for feats in feats_per_cls:\n if len(feats) > 1:\n per_cls_cov.append(np.cov(torch.stack(feats, 1).numpy()))\n else:\n per_cls_cov.append(np.zeros((feats[0].shape[0], feats[0].shape[0])))\n per_cls_cov = np.array(per_cls_cov)\n # per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls])\n cov = np.average(\n per_cls_cov, axis=0, weights=self.local_num_per_cls[idx]\n ) # covariance for feature dimension, shape: e.g., (128, 128)\n\n # pre-generate deviation\n divider = 500\n pointer = 0\n augs = (\n torch.from_numpy(\n np.random.multivariate_normal(\n mean=np.zeros(cov.shape[0]),\n cov=cov, # covariance for feature dimension, shape: e.g., (128, 128)\n size=divider,\n )\n )\n .float()\n .to(self.device)\n )\n\n with torch.set_grad_enabled(True):\n losses_cls = 0\n losses_kd = 0\n\n ##########################\n #### stage 1 training ####\n ##########################\n for epoch in range(self.local_ep):\n\n \"\"\"\n model update\n \"\"\"\n if self.local_ep > 10: # locla training mode\n print(epoch, end=\" \")\n\n if self.balanced_loader:\n tmp_loader = self.train_loader_balanced[idx]\n else:\n tmp_loader = self.train_loaders[idx]\n for (imgs, labels, indexs) in tmp_loader:\n # to device\n imgs = imgs.to(self.device)\n\n # forward\n feat = self.networks[idx_in_all][\"feat_model\"](imgs)\n logits = self.networks[idx_in_all][\"classifier\"](feat)\n\n # do feature space augmentation with a likelihood\n if self.feat_aug:\n # prob = torch.tensor([1.0 for i in self.local_num_per_cls[idx]])\n rand_list = torch.rand(len(labels))\n mask = (\n rand_list\n < prob[\n torch.tensor([unique_labels.index(i) for i in labels])\n ]\n )\n degree = 1\n aug_num = sum(mask).item()\n if aug_num > 0:\n if pointer + aug_num >= divider:\n pointer = 0\n feat_aug = feat.clone()\n feat_aug[mask] = (\n feat[mask] + augs[pointer : pointer + aug_num] * degree\n )\n logits_aug = self.networks[idx_in_all][\"classifier\"](\n feat_aug\n )\n pointer = pointer + aug_num\n\n # teacher\n with torch.no_grad():\n feat_teacher = teacher[\"feat_model\"](imgs)\n pred_teacher = teacher[\"classifier\"](feat_teacher)\n\n # loss\n labels = labels.to(self.device)\n if self.config[\"criterions\"][\"def_file\"].find(\"LwF\") > 0:\n if self.feat_aug:\n if len(labels) != len(logits_aug):\n continue\n loss, loss_cls, loss_kd = self.criterion(\n labels, pred_teacher, logits, logits_aug\n )\n else:\n loss, loss_cls, loss_kd = self.criterion(\n labels, pred_teacher, logits\n )\n elif self.config[\"criterions\"][\"def_file\"].find(\"KDLoss\") > 0:\n loss, loss_cls, loss_kd = self.criterion(\n logits,\n labels,\n feat,\n feat_teacher,\n classfier_weight=self.networks[idx_in_all][\n \"classifier\"\n ].fc.weight,\n )\n\n # fedprox loss: https://epione.gitlabpages.inria.fr/flhd/federated_learning/FedAvg_FedProx_MNIST_iid_and_noniid.html#federated-training-with-fedprox\n if self.fedprox:\n prox_loss = difference_models_norm_2(\n self.networks[idx_in_all], teacher\n )\n # print(\"FedProx Loss: \", prox_loss, loss)\n loss += self.mu / 2 * prox_loss\n\n # backward\n for optimizer in self.optimizers[idx_in_all].values():\n optimizer.zero_grad()\n loss.backward()\n for optimizer in self.optimizers[idx_in_all].values():\n optimizer.step()\n\n # classifier L2-norm\n if self.networks[idx_in_all][\"classifier\"].l2_norm:\n self.networks[idx_in_all][\"classifier\"].weight_norm()\n losses_cls += loss_cls.item()\n losses_kd += loss_kd.item()\n\n self.losses_cls[idx_in_all] = (\n losses_cls / len(self.train_loaders[idx]) / self.local_ep\n )\n self.losses_kd[idx_in_all] = (\n losses_kd / len(self.train_loaders[idx]) / self.local_ep\n )\n\n ##########################\n #### stage 2 training ####\n ##########################\n if self.crt:\n self.networks[idx_in_all][\"feat_model\"].eval()\n\n if self.feat_aug:\n # obtain features and labels\n feat_list = []\n label_list = []\n for (imgs, labels, indexs) in self.train_loaders[idx]:\n imgs = imgs.to(self.device)\n with torch.no_grad():\n feat_list.append(\n self.networks[idx_in_all][\"feat_model\"](imgs).cpu()\n )\n label_list.append(labels)\n feat_list = torch.cat(feat_list, 0)\n label_list = torch.cat(label_list, 0)\n unique_labels = list(np.unique(label_list)) # e.g., size (6, )\n transformed_label_list = torch.tensor(\n [unique_labels.index(i) for i in label_list]\n ) # e.g., size (n, )\n\n # per-cls features\n feats_per_cls = [[] for i in range(len(unique_labels))]\n for feat, label in zip(feat_list, transformed_label_list):\n feats_per_cls[label].append(feat)\n\n # determine the extra sample number for every existing samples\n num_per_cls = np.array(\n [len(np.where(label_list == t)[0]) for t in unique_labels]\n ) # e.g., size (6, )\n max_num = max(num_per_cls)\n gen_nums = [\n np.array(\n [max_num // num_per_cls[i] - 1 for _ in feats_per_cls[i]]\n )\n for i in range(len(unique_labels))\n ]\n for cls_i, nums in enumerate(gen_nums):\n nums[: max_num % num_per_cls[cls_i]] = (\n nums[: max_num % num_per_cls[cls_i]] + 1\n )\n\n # generate samples\n sampled_data, sample_label = [], []\n per_cls_cov = np.array(\n [\n np.cov(torch.stack(feats, 1).numpy())\n for feats in feats_per_cls\n ]\n )\n cov = np.average(per_cls_cov, axis=0, weights=num_per_cls)\n # print([np.mean(i) for i in per_cls_cov])\n for cls_i, nums in enumerate(gen_nums):\n for sample_i, num in enumerate(nums):\n if num > 0:\n sampled_data.append(\n torch.from_numpy(\n np.random.multivariate_normal(\n mean=feats_per_cls[cls_i][sample_i],\n cov=cov, # covariance for feature dimension, shape: e.g., (128, 128)\n size=num,\n )\n ).float()\n )\n sample_label.append(torch.full((num,), cls_i).long())\n\n # add generated fetaures to training data\n feat_list = torch.cat([feat_list, *sampled_data], 0)\n label_list = torch.cat([transformed_label_list, *sample_label], 0)\n\n # build new dataloader\n feats_dataset = local_client_dataset(\n feat_list, label_list, self.config\n )\n feats_loader = torch.utils.data.DataLoader(\n feats_dataset,\n batch_size=self.local_bs,\n shuffle=True,\n num_workers=0,\n pin_memory=False,\n )\n\n # train classifier\n for epoch in range(5):\n for (feats, labels, indexs) in feats_loader:\n feats = feats.to(self.device)\n labels = labels.to(self.device)\n logits = self.networks[idx_in_all][\"classifier\"](feats)\n loss = torch.nn.CrossEntropyLoss()(\n logits[:, unique_labels], labels\n )\n\n self.optimizers_stage2[idx_in_all].zero_grad()\n loss.backward()\n self.optimizers_stage2[idx_in_all].step()\n # print(loss)\n\n # re-sampling without feature augmentation\n else:\n for epoch in range(5):\n for (imgs, labels, indexs) in self.train_loader_balanced[idx]:\n # to device\n imgs = imgs.to(self.device)\n # forward\n with torch.no_grad():\n feat = self.networks[idx_in_all][\"feat_model\"](imgs)\n logits = self.networks[idx_in_all][\"classifier\"](feat)\n\n pos_cls = torch.unique(labels).tolist()\n transformed_labels = torch.tensor(\n [pos_cls.index(i) for i in labels]\n ).to(self.device)\n loss = torch.nn.CrossEntropyLoss()(\n logits[:, pos_cls], transformed_labels\n )\n\n self.optimizers_stage2[idx_in_all].zero_grad()\n loss.backward()\n self.optimizers_stage2[idx_in_all].step()\n # print(loss)\n\n print(\"=> \", end=\"\")\n\n\ndef fedavg(w):\n w_avg = copy.deepcopy(w[0])\n for k in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[k] += w[i][k]\n w_avg[k] = torch.div(w_avg[k] * 1.0, len(w))\n return w_avg\n\n\n# See: https://arxiv.org/abs/1909.06335\ndef fedavgm(new_ws, old_w, vel, args):\n \"\"\"\n fedavg + momentum\n - new_ws (list of OrderedDict): The new calculated global model\n - old_w (OrderedDict) : Initial state of the global model (which needs to be updated here) \n \"\"\"\n global_lr = 1\n beta1 = 0\n\n new_w = fedavg(new_ws)\n\n # For the first round: initialize old_w, create an Orderdict to store velocity\n if old_w is None:\n old_w = new_w\n new_v = OrderedDict()\n for key in old_w.keys():\n new_v[key] = torch.zeros(old_w[key].shape, dtype=old_w[key].dtype).to(\n args.device\n )\n else:\n new_v = copy.deepcopy(vel)\n\n for key in new_w.keys():\n delta_w_tmp = old_w[key] - new_w[key]\n new_v[key] = beta1 * new_v[key] + torch.mul(delta_w_tmp, global_lr)\n old_w[key] -= new_v[key]\n\n return old_w, new_v\n\n\ndef fedavgw(new_ws, old_w, args, round_i):\n \"\"\"\n fedavg + adaptive updating parameter\n - new_ws (list of OrderedDict): The new calculated global model\n - old_w (OrderedDict) : Initial state of the global model (which needs to be updated here) \n \"\"\"\n\n new_w = fedavg(new_ws)\n\n # For the first round: initialize old_w\n if old_w is None:\n old_w = new_w\n\n for key in new_w.keys():\n old_w[key] = new_w[key] * (1 / (round_i + 1)) + old_w[key] * (\n round_i / (round_i + 1)\n )\n\n # for key in new_w.keys():\n # if key == \"classifier.fc.weight\":\n # old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1))\n # else:\n # old_w[key] = new_w[key]\n\n return old_w\n",
"import torch\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.sampler import WeightedRandomSampler\nfrom torchvision import transforms\nimport os, random\nfrom PIL import Image\n\n\n# Data transformation with augmentation\ndef get_data_transform(split, rgb_mean, rbg_std, key='default'):\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(rgb_mean, rbg_std)\n ]) if key == 'iNaturalist18' else transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),\n transforms.ToTensor(),\n transforms.Normalize(rgb_mean, rbg_std)\n ]),\n 'val': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(rgb_mean, rbg_std)\n ]),\n 'test': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(rgb_mean, rbg_std)\n ])\n }\n return data_transforms[split]\n\n\ndef pil_loader(path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef non_iidness_cal(labels, idx_per_client, img_per_client):\n \"\"\"\n Argu:\n labels: list with length of n, where n is the dataset size.\n idx_per_client: list. idx_per_client[i] is the img idx in the dataset for client i\n img_per_client: list. Number of images per client.\n Return:\n - non_iidness\n \"\"\"\n client_num = len(idx_per_client)\n class_num = max(labels)+1\n label_per_client_count = np.zeros((client_num, class_num))\n\n # generate per client label counting\n labels = np.array(labels)\n for i in range(client_num):\n count = np.bincount(labels[idx_per_client[i]])\n count_pad = np.pad(count, (0, class_num-len(count)), 'constant', constant_values=(0,0))\n label_per_client_count[i] += count_pad\n\n # obtain non_iidness \n summation = 0\n label_per_client_count /= np.array([img_per_client]).T # broadcast\n for i, client_i in enumerate(label_per_client_count):\n for client_j in label_per_client_count[i:]:\n summation += np.linalg.norm(client_i-client_j, ord=1)\n \n non_iidness = summation/(client_num*(client_num-1))\n\n return non_iidness\n\n\ndef tao_sampling(img_per_client, tao):\n \"\"\"\n Do non-iid or iid sampling, according to \"tao\". \n We will sample number of \"tao\" images for every client in turn. \n --- \n Argu:\n - img_per_client: list. Number of images per client.\n - tao: number of sampled image for each client in each round. \n We use tao to control the non-iidness. When tao==1, nearly iid; \n when tao is large, it becomes non-iid. \n \"tao <= min(img_per_client/2)\" to let each client has at least 2 classes\n Return:\n - idx_per_client: list. idx_per_client[i] is the img idx in the dataset for client i\n \"\"\"\n # prepare parameters\n total_img_num = sum(img_per_client)\n client_num = len(img_per_client)\n idx_per_client = [[] for i in range(client_num)]\n # assert tao <= min(img_per_client)/2 \n \n available_per_client = img_per_client\n tao_count = 0\n client_k = 0\n idx = 0\n client_count = 0\n client_order = [*range(client_num)]\n\n while idx < total_img_num: # assign every samples to a client\n\n client_k = client_order[client_count]\n if available_per_client[client_k] > 0 and tao_count < tao:\n idx_per_client[client_k].append(total_img_num-idx-1) # reverse the head and tail\n tao_count += 1\n idx += 1\n available_per_client[client_k] -= 1\n \n # the client is already full, or tao samples are already assigned\n else:\n client_count = client_count + 1\n # shuffle the order of clients if a round is finished\n if client_count >= client_num:\n random.shuffle(client_order)\n client_count = 0\n tao_count = 0\n continue\n\n return idx_per_client\n\n\ndef gen_fl_data(train_label_all, num_per_cls, config):\n \"\"\"\n Generate distributed data for FL training.\n ---\n Argu:\n - train_label_all: object of a class inheriting from torch.utils.data.Dataset \n Or a list pre-stored in the RAM.\n - config: configuration dictionary\n Return:\n - idx_per_client: list. The i^th item is the img idx of the training set for client i\n - tao: int\n - non_iidness: the calculated non_iidness\n \"\"\" \n # generate img_per_client\n client_num = config[\"fl_opt\"][\"num_clients\"]\n img_per_client_dist = config[\"dataset\"][\"img_per_client_dist\"]\n total_img_num = len(train_label_all)\n if img_per_client_dist == \"uniform\":\n img_per_client = np.full(client_num, total_img_num//client_num)\n img_per_client[:total_img_num % client_num] += 1\n else: # use other img_per_client distributions: normal, LT, reverse LT\n pass\n\n # iid: tao=1; non_iid: tao=max(img_per_client)\n non_iidness_degree = config[\"dataset\"][\"non_iidness\"]\n # tao_max = min(img_per_client)#//2\n # tao = round(1 + non_iidness_degree*(tao_max-1))\n tao = int(config[\"dataset\"][\"tao_ratio\"] * num_per_cls[-1])\n idx_per_client = tao_sampling(img_per_client.copy(), tao)\n\n # calculate the real non_iidness on training set\n non_iidness = non_iidness_cal(train_label_all, idx_per_client, img_per_client)\n \n # classes per client\n cls_per_client = []\n num_per_cls_per_client = []\n for idxs in idx_per_client:\n cls, tmp = np.unique(np.array(train_label_all)[idxs], return_counts=True)\n num_per_cls = np.zeros(config[\"dataset\"][\"num_classes\"], dtype=np.int)\n np.put_along_axis(num_per_cls, cls, tmp, axis=0)\n cls_per_client.append(cls)\n num_per_cls_per_client.append(num_per_cls)\n\n return idx_per_client, tao, non_iidness, cls_per_client, num_per_cls_per_client\n \n\nfrom data.ImbalanceCIFAR import IMBALANCECIFAR10, IMBALANCECIFAR100\ndef load_CIFAR(root, cifar_select, train, num_classes, shot_num):\n \"\"\"\n Load dataset CIFAR into memory. Shot version.\n \"\"\"\n if num_classes > 10 and cifar_select == \"CIFAR10\":\n raise RuntimeError\n\n if train:\n if cifar_select == \"CIFAR100\":\n dataset = IMBALANCECIFAR100(\n \"train\", imbalance_ratio=1, root=root, test_imb_ratio=None, reverse=None)\n elif cifar_select == \"CIFAR10\":\n dataset = IMBALANCECIFAR10(\n \"train\", imbalance_ratio=1, root=root, test_imb_ratio=None, reverse=None)\n else:\n raise RuntimeError\n else:\n if cifar_select == \"CIFAR100\":\n dataset = IMBALANCECIFAR100(\n \"test\", imbalance_ratio=1, root=root, test_imb_ratio=None, reverse=None)\n elif cifar_select == \"CIFAR10\":\n dataset = IMBALANCECIFAR10(\n \"test\", imbalance_ratio=1, root=root, test_imb_ratio=None, reverse=None)\n else:\n raise RuntimeError\n \n ###############################################\n ####### load the whole dataset into RAM #######\n ###############################################\n # without transformation\n if cifar_select == \"CIFAR10\": # 5000*10+1000*10\n num_per_cls = 5000\n if not train:\n num_per_cls = 1000\n shot_num = 1000\n elif cifar_select == \"CIFAR100\": # 500*100+100*100\n num_per_cls = 500 \n if not train:\n num_per_cls = 100\n shot_num = 100\n\n # transformation: data are pre-loaded and do not support augmentation\n train_transform = transforms.Compose([ \n # transforms.RandomCrop(32, padding=4), \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n all_imgs = []\n all_targets = []\n cnt = 0\n for img, label in zip(dataset.data, dataset.labels):\n cnt += 1\n if train and cnt % num_per_cls >= shot_num:\n continue\n if train:\n all_imgs.append(train_transform(Image.fromarray(img)))\n else:\n all_imgs.append(test_transform(Image.fromarray(img)))\n all_targets.append(label)\n\n return all_imgs, all_targets\n\n\ndef load_CIFAR_imb(root, cifar_select, train, num_classes, imb_ratio):\n \"\"\"\n Load CIFAR into memory. Imbalance version.\n \"\"\"\n if num_classes > 10 and cifar_select == \"CIFAR10\":\n raise RuntimeError\n\n if train:\n if cifar_select == \"CIFAR100\":\n dataset = IMBALANCECIFAR100(\n \"train\", imbalance_ratio=imb_ratio, root=root, test_imb_ratio=None, reverse=None,\n )\n elif cifar_select == \"CIFAR10\":\n dataset = IMBALANCECIFAR10(\n \"train\", imbalance_ratio=imb_ratio, root=root, test_imb_ratio=None, reverse=None,\n )\n else:\n raise RuntimeError\n else:\n if cifar_select == \"CIFAR100\":\n dataset = IMBALANCECIFAR100(\n \"test\", imbalance_ratio=1, root=root, test_imb_ratio=None, reverse=None\n )\n elif cifar_select == \"CIFAR10\":\n dataset = IMBALANCECIFAR10(\n \"test\", imbalance_ratio=1, root=root, test_imb_ratio=None, reverse=None\n )\n else:\n raise RuntimeError\n\n print(\"Number of items per class: \", dataset.get_cls_num_list())\n\n ###############################################\n ####### load the whole dataset into RAM #######\n ###############################################\n\n # transformation: data are pre-loaded and do not support augmentation\n train_transform = transforms.Compose(\n [\n # transforms.RandomCrop(32, padding=4),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ]\n )\n test_transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ]\n )\n\n all_imgs = []\n all_targets = []\n cnt = 0\n for img, label in zip(dataset.data, dataset.labels):\n cnt += 1\n if train:\n all_imgs.append(train_transform(Image.fromarray(img)))\n else:\n all_imgs.append(test_transform(Image.fromarray(img)))\n all_targets.append(label)\n return all_imgs, all_targets, dataset.get_cls_num_list()\n\n\ndef CIFAR_FL(root, config):\n \"\"\"\n Divide CIFAR dataset into small ones for FL. \n mode: base, novel or all classes\n shot_num: the number of n (n-shot)\n Return: \n ---\n per_client_data, per_client_label: list of lists \n test_data, test_label: both are lists \n \"\"\" \n # shot_num = config['dataset']['shot']\n # assert shot_num != 0\n\n num_classes = config[\"networks\"][\"classifier\"][\"params\"][\"num_classes\"]\n imb_ratio = config[\"dataset\"][\"imb_ratio\"]\n\n # training\n cifar_select = config[\"dataset\"][\"name\"]\n train_data_all, train_label_all, train_num_per_cls = load_CIFAR_imb( \n root, cifar_select, train=True, num_classes=num_classes, imb_ratio=imb_ratio\n )\n # test\n test_data, test_label, test_num_per_cls = load_CIFAR_imb(\n root, cifar_select, train=False, num_classes=num_classes, imb_ratio=imb_ratio\n )\n\n # generate per-client FL data\n idx_per_client, tao, non_iidness, cls_per_client, num_per_cls_per_client \\\n = gen_fl_data(train_label_all, train_num_per_cls, config)\n\n client_num = config[\"fl_opt\"][\"num_clients\"]\n per_client_data = [[] for i in range(client_num)]\n per_client_label = [[] for i in range(client_num)]\n for client_i in range(client_num):\n for j in idx_per_client[client_i]:\n per_client_data[client_i].append(train_data_all[j]) \n per_client_label[client_i].append(train_label_all[j]) \n\n print(\"tao:\", tao, \"non-iidness:\", non_iidness)\n return per_client_data, per_client_label, test_data, test_label, cls_per_client, num_per_cls_per_client, train_num_per_cls\n\n\ndef CIFAR_FL_mixed(root, config):\n \"\"\"\n Divide CIFAR dataset into small ones for FL. \n (iid + many shot) for half of all classes; (non-iid + few shot) for remaining half classes.\n mode: base, novel or all classes\n shot_num: the number of n (n-shot)\n Return: \n ---\n per_client_data, per_client_label: list of lists \n test_data, test_label: both are lists \n \"\"\" \n shot_num = config['dataset']['shot']\n few_shot_num = config['dataset']['shot_few']\n assert (shot_num != 0 and few_shot_num != 0)\n\n num_classes = config[\"networks\"][\"classifier\"][\"params\"][\"num_classes\"]\n\n # training\n cifar_select = config[\"dataset\"][\"name\"]\n train_data_all, train_label_all = load_CIFAR(\n root, cifar_select, train=True, num_classes=num_classes, shot_num=shot_num\n )\n # test\n test_data, test_label = load_CIFAR(\n root, cifar_select, train=False, num_classes=num_classes, shot_num=shot_num\n )\n\n # per-client FL data for the first half (iid + many shot) classes\n half_data_len = int(len(train_label_all)/2)\n iid_train_data_all, iid_train_label_all = train_data_all[:half_data_len], train_label_all[:half_data_len]\n config[\"dataset\"][\"non_iidness\"] = 0\n iid_idx_per_client, tao, non_iidness, iid_cls_per_client = gen_fl_data(iid_train_label_all, config)\n print(\"IID, tao:\", tao, \"non-iidness:\", non_iidness)\n\n # per-client FL data for the remaining half (non-iid + few shot) classes\n noniid_train_data_all = []\n noniid_train_label_all = []\n cnt = 0\n for img, label in zip(train_data_all[half_data_len:], train_label_all[half_data_len:]):\n cnt += 1\n if cnt % shot_num >= few_shot_num:\n continue\n noniid_train_data_all.append(img)\n noniid_train_label_all.append(label)\n config[\"dataset\"][\"non_iidness\"] = 1\n noniid_idx_per_client, tao, non_iidness, noniid_cls_per_client = gen_fl_data(noniid_train_label_all, config)\n print(\"Non-IID, tao:\", tao, \"non-iidness:\", non_iidness)\n\n # iid + non-iid combination\n client_num = config[\"fl_opt\"][\"num_clients\"]\n per_client_data = [[] for i in range(client_num)]\n per_client_label = [[] for i in range(client_num)]\n for client_i in range(client_num):\n for j in iid_idx_per_client[client_i]:\n per_client_data[client_i].append(iid_train_data_all[j]) \n per_client_label[client_i].append(iid_train_label_all[j]) \n for j in noniid_idx_per_client[client_i]:\n per_client_data[client_i].append(noniid_train_data_all[j]) \n per_client_label[client_i].append(noniid_train_label_all[j]) \n\n cls_per_client = []\n for iid_cls, noniid_cls in zip(iid_cls_per_client, noniid_cls_per_client):\n cls_per_client.append(np.concatenate((iid_cls, noniid_cls)))\n\n return per_client_data, per_client_label, test_data, test_label, cls_per_client\n\n\nfrom matplotlib import pyplot as plt \nclass local_client_dataset(Dataset):\n def __init__(self, per_client_data, per_client_label, config, aug=False):\n self.data = per_client_data\n self.label = per_client_label\n self.dataset_name = config[\"dataset\"][\"name\"]\n self.aug = aug\n if self.dataset_name == \"CUB\":\n self.train_transform = transforms.Compose(\n [transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.4856, 0.4994, 0.4324], std=[0.2321, 0.2277, 0.2665])\n ])\n\n elif self.dataset_name in [\"CIFAR10\", \"CIFAR100\"]:\n self.train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n \n def __len__(self):\n return len(self.label)\n\n def __getitem__(self, index): \n data = self.data[index]\n # plt.imshow(data); plt.savefig(\"./a.jpg\")\n\n if self.dataset_name == \"CUB\":\n return self.val_transform(data), self.label[index], index\n else:\n if self.aug:\n return self.train_transform(data), self.label[index], index\n else:\n return data, self.label[index], index\n\n def get_balanced_sampler(self):\n labels = np.array(self.label) # e.g., size (n, )\n unique_labels = list(np.unique(labels)) # e.g., size (6, )\n transformed_labels = torch.tensor([unique_labels.index(i) for i in labels]) # e.g., size (n, )\n class_sample_count = np.array([len(np.where(labels==t)[0]) for t in unique_labels]) # e.g., size (6, )\n weight = 1. / class_sample_count # make every class to have balanced chance to be chosen\n samples_weight = torch.tensor([weight[t] for t in transformed_labels])\n self.class_sample_count = class_sample_count\n sampler = WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'), len(samples_weight))\n return sampler\n\n\nclass test_dataset(Dataset):\n def __init__(self, per_client_data, per_client_label, config):\n self.data = per_client_data\n self.label = per_client_label\n self.dataset = config[\"dataset\"][\"name\"]\n if self.dataset == \"CUB\":\n self.val_transform = transforms.Compose(\n [transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.4856, 0.4994, 0.4324], std=[0.2321, 0.2277, 0.2665])\n ])\n\n def __len__(self):\n return len(self.label)\n\n def __getitem__(self, index):\n \n data = self.data[index]\n # plt.imshow(data)\n # plt.savefig(\"./a.jpg\")\n\n if self.dataset == \"CUB\":\n return self.val_transform(data), self.label[index], index\n else:\n return data, self.label[index], index"
] | [
[
"torch.cat",
"torch.zeros",
"numpy.random.multivariate_normal",
"torch.sum",
"torch.utils.data.DataLoader",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.unique",
"torch.cuda.is_available",
"numpy.where",
"torch.nn.CrossEntropyLoss",
"numpy.unique",
"torch.tensor",
"torch.mul",
"torch.optim.SGD",
"numpy.zeros",
"torch.full",
"torch.zeros_like",
"torch.stack",
"numpy.array",
"numpy.average"
],
[
"numpy.unique",
"numpy.put_along_axis",
"numpy.linalg.norm",
"numpy.full",
"torch.tensor",
"numpy.concatenate",
"numpy.bincount",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alphardex/looter | [
"2be094576e31fd13123719ca94e42cb31475dffa"
] | [
"examples/baidu_index.py"
] | [
"\"\"\"\n爬取百度指数的某一时间段内的特定关键词的所有指数\n\"\"\"\nimport time\nimport looter as lt\nimport requests\nimport pandas as pd\nimport arrow\nfrom loguru import logger\n\nwords = [] # 关键词列表\nstart_date = '2018-01-29'\nend_date = '2018-12-31'\nkinds = ['all', 'pc', 'wise']\ndomain = 'http://index.baidu.com'\nheaders = {\n 'Host':\n 'index.baidu.com',\n 'Connection':\n 'keep-alive',\n 'X-Requested-With':\n 'XMLHttpRequest',\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',\n 'Cookie':\n 'BD_UPN=12314753; ORIGIN=2; ISSW=1; ISSW=1; BAIDUID=F0F664464891FF22022016FEED575109:FG=1; PSTM=1558524896; BIDUPSID=C9733DAACC84E56AF9FED0BDDAADA245; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BDUSS=lZaZ3I2RzZnN2QtN3doRjlOcnpKMDRYOUJvVDFxVFl-WmFZODVwYTlKLW5MQ0JkSVFBQUFBJCQAAAAAAAAAAAEAAABBGFGnsOvU2MH39fwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKef-Fynn~hcQ; bdindexid=2cka9urn2rk1o4dmnsueadarc7; H_PS_PSSID=1468_21103_29237_28519_29098_29368_28832_29220; BD_HOME=1; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; delPer=0; BD_CK_SAM=1; PSINO=2; H_PS_645EC=22aaZNHp8tp6Pqs1f3AIplUyT%2F67VGrp%2B2iogcH66TNgP6TYyCWal3%2BTHPaWCW6LDeS3'\n}\ntotal = []\nname = f'popularity({start_date}-{end_date})'\nlogger.add(f'{name}.log')\n\n\ndef decrypt(key, data):\n m = list(key)\n v = data\n d = dict(zip(m[:len(m) // 2:], m[len(m) // 2::]))\n return ''.join(map(lambda x: d[x], v))\n\n\ndef crawl(word):\n try:\n url = f'{domain}/api/SearchApi/index'\n params = {\n 'word': word,\n 'startDate': arrow.get(start_date).naive,\n 'endDate': arrow.get(end_date).naive,\n 'area': 0\n }\n data = requests.get(url, params=params, headers=headers).json()\n uniqid = data['data']['uniqid']\n user_indexes = data['data']['userIndexes'][0]\n key = requests.get(f'{domain}/Interface/api/ptbk?uniqid={uniqid}', headers=headers).json()['data']\n encrypted_data = {kind: user_indexes[kind]['data'] for kind in kinds}\n decrypted_data = {kind: decrypt(key, d).split(',') for kind, d in encrypted_data.items()}\n date_range = pd.date_range(start_date, end_date).to_native_types()\n result = []\n for kind, indexes in decrypted_data.items():\n rows = [{\n 'kind': kind,\n 'date': date,\n 'index': index,\n 'keyword': word\n } for date, index in zip(date_range, indexes)]\n result.extend(rows)\n logger.info((rows[0], rows[-1]))\n total.extend(result)\n time.sleep(5)\n except Exception as e:\n logger.error(f'{word}抓取失败')\n\n\nif __name__ == '__main__':\n [crawl(word) for word in words]\n lt.save(total, name=f'{name}.csv')\n"
] | [
[
"pandas.date_range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
aiyasin/X2Paddle | [
"b37959f2ecdc09fdec7a38c01272126a7f3800e4"
] | [
"x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport numbers\nimport numpy as np\nfrom x2paddle.core.op_mapper import OpMapper\nfrom x2paddle.core.util import *\nfrom x2paddle.core.program import PaddleGraph \nfrom x2paddle.decoder.caffe_decoder import CaffeGraphNode\n\n\ndef _adjust_parameters(node):\n data = node.data\n # When using the protobuf-backend, each parameter initially has four dimensions.\n # In certain cases (like FC layers), we want to eliminate the singleton dimensions.\n # This implementation takes care of the common cases. However, it does leave the\n # potential for future issues.\n # The Caffe-backend does not suffer from this problem.\n data = list(data)\n\n squeeze_indices = [1] # Squeeze biases.\n if node.layer_type == 'InnerProduct':\n squeeze_indices.append(0) # Squeeze FC.\n\n for idx in squeeze_indices:\n if idx >= len(data):\n continue\n\n d = data[idx]\n assert len(\n d.shape\n ) == 4, 'invalid shape[%s] from caffe when adjust_parameters' % (\n str(d.shape))\n\n shape_old = d.shape\n sq_axis = None\n if idx == 0:\n sq_axis = (0, 1)\n elif idx == 1:\n sq_axis = (0, 1, 2)\n else:\n continue\n\n data[idx] = np.squeeze(d, axis=sq_axis)\n shape_new = data[idx].shape\n return data\n\ndef _get_kernel_parameters(kind, params):\n assert kind in [\"Convolution\", \"Pooling\", \"Deconvolution\", \"ConvolutionDepthwise\"]\n [k_h, k_w] = [1, 1]\n if isinstance(params.kernel_size, numbers.Number):\n [k_h, k_w] = [params.kernel_size] * 2\n elif len(params.kernel_size) > 0:\n k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[\n 0]\n k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[\n len(params.kernel_size) - 1]\n elif params.kernel_h > 0 or params.kernel_w > 0:\n k_h = params.kernel_h\n k_w = params.kernel_w\n [s_h, s_w] = [1, 1]\n if isinstance(params.stride, numbers.Number):\n [s_h, s_w] = [params.stride] * 2\n elif len(params.stride) > 0:\n s_h = params.stride_h if params.stride_h > 0 else params.stride[0]\n s_w = params.stride_w if params.stride_w > 0 else params.stride[len(\n params.stride) - 1]\n elif params.stride_h > 0 or params.stride_w > 0:\n s_h = params.stride_h\n s_w = params.stride_w\n [p_h, p_w] = [0, 0]\n if isinstance(params.pad, numbers.Number):\n [p_h, p_w] = [params.pad] * 2\n elif len(params.pad) > 0:\n p_h = params.pad_h if params.pad_h > 0 else params.pad[0]\n p_w = params.pad_w if params.pad_w > 0 else params.pad[len(\n params.pad) - 1]\n elif params.pad_h > 0 or params.pad_w > 0:\n p_h = params.pad_h\n p_w = params.pad_w\n dila_h = dila_w = 1\n group = 1\n c_o = 1\n if kind in [\"Convolution\", \"Deconvolution\", \"ConvolutionDepthwise\"]:\n if kind in [\"Convolution\", \"Deconvolution\"]:\n c_o = params.num_output\n dila_len = len(params.dilation)\n if dila_len == 2:\n dila_h = params.dilation[0]\n dila_w = params.dilation[1]\n elif dila_len == 1:\n dila_h = dila_w = params.dilation[0]\n else:\n assert dila_len == 0, \"invalid length[%s] of dilation in convolution\" % (\n dila_len)\n if kind in ['Convolution', 'Deconvolution']:\n group = params.group\n kernel = [k_h, k_w]\n stride = [s_h, s_w]\n pad = [p_h, p_w]\n dilation = [dila_h, dila_w]\n return c_o, kernel, stride, pad, dilation, group\n\n\nclass CaffeOpMapper(OpMapper):\n directly_map_ops = {\n 'Sigmoid': ['paddle.nn.layer.Sigmoid'],\n 'TanH': ['paddle.nn.Tanh'],\n }\n\n def __init__(self, decoder):\n super(CaffeOpMapper, self).__init__()\n self.graph = decoder.caffe_graph\n if not self.op_checker():\n raise Exception(\"Model is not supported yet.\")\n self.params = dict()\n self.paddle_graph = PaddleGraph(parent_layer=None, graph_type=\"dygraph\", source_type=\"caffe\")\n self.paddle_graph.outputs = self.graph.output_nodes\n self.input_index = 0 \n self.inputs_info = {}\n self.nn_name2id = {}\n print(\"Total nodes: {}\".format(\n sum([\n isinstance(node, CaffeGraphNode)\n for name, node in self.graph.node_map.items()\n ])))\n print(\"Nodes converting ...\")\n for i, node_name in enumerate(self.graph.topo_sort):\n sys.stderr.write(\"\\rConverting node {} ... \".format(i + 1))\n node = self.graph.get_node(node_name)\n op = node.layer_type\n if hasattr(self, op):\n func = getattr(self, op)\n func(node)\n elif op in self.directly_map_ops:\n self.directly_map(node)\n print(\"\\nNodes converted.\")\n self.paddle_graph.set_name(self.graph.graph_name)\n self.paddle_graph.set_parameters(self.params)\n self.paddle_graph.set_inputs_info(self.inputs_info)\n \n def op_checker(self):\n unsupported_ops = set()\n for node_name in self.graph.topo_sort:\n node = self.graph.get_node(node_name)\n op = node.layer_type\n if not hasattr(self, op) and op not in self.directly_map_ops:\n unsupported_ops.add(op)\n if len(unsupported_ops) == 0:\n return True\n else:\n if len(unsupported_ops) > 0:\n print(\"\\n========= {} OPs are not supported yet ===========\".format(\n len(unsupported_ops)))\n for op in unsupported_ops:\n print(\"========== {} ============\".format(op))\n return False\n \n def directly_map(self, node):\n inputs = node.layer.input\n assert len(inputs) == 1, 'directly_map error with multi inputs'\n op_info = self.directly_map_ops[node.layer_type]\n input = self.graph.get_input_node(node, 0)\n paddle_op = op_info[0]\n if paddle_op.startswith(\"paddle.nn\"):\n op_name = paddle_op[10:].lower()\n op_name = name_generator(op_name, self.nn_name2id)\n output_name = node.name\n layer_outputs = [op_name, output_name]\n self.paddle_graph.add_layer(\n kernel=paddle_op,\n inputs={\"x\": input.name},\n outputs=layer_outputs)\n else:\n self.paddle_graph.add_layer(\n kernel=paddle_op,\n inputs={\"x\": input.name},\n outputs=[node.name])\n\n def Input(self, node):\n self.paddle_graph.add_layer(\n \"paddle.to_tensor\",\n inputs={},\n outputs=[node.layer_name],\n data=\"x{}\".format(self.input_index))\n shape = list(node.layer.input_param.shape[0].dim)[1:]\n self.inputs_info[\"x{}\".format(self.input_index)] = [[-1] + shape, \"float32\"]\n self.input_index += 1\n \n def MemoryData(self, node):\n params = node.layer.memory_data_param\n transform_params = node.layer.transform_param\n self.paddle_graph.add_layer(\n \"paddle.to_tensor\",\n inputs={},\n outputs=[node.layer_name],\n data=\"x{}\".format(self.input_index))\n shape = list()\n shape.append(params.batch_size)\n shape.append(params.channels)\n if hasattr(transform_params, \"crop_size\"):\n shape.append(transform_params.crop_size)\n shape.append(transform_params.crop_size)\n else:\n shape.append(params.width)\n shape.append(params.height)\n self.inputs_info[\"x{}\".format(self.input_index)] = [shape, \"float32\"]\n self.input_index += 1\n\n def Convolution(self, node):\n conv2d_name = name_generator(\"conv\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [conv2d_name, output_name]\n data = node.data\n params = node.layer.convolution_param\n out_channel, kernel, stride, pad, dilation, group = _get_kernel_parameters(\n node.layer_type, params)\n if data is None:\n data = []\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n data.append(\n np.zeros([out_channel, node.in_shapes[0][1], kernel[0], kernel[1]]).astype(\n 'float32'))\n data.append(np.zeros([out_channel, ]).astype('float32'))\n else:\n data = _adjust_parameters(node)\n self.params[conv2d_name + \".weight\"] = data[0]\n if len(data) == 2:\n self.params[conv2d_name + \".bias\"] = data[1]\n assert len(node.inputs\n ) == 1, \"The count of Convolution node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n layer_attrs = {\n \"in_channels\": node.in_shapes[0][1],\n \"out_channels\": out_channel,\n \"kernel_size\": kernel,\n \"stride\": stride,\n \"padding\": pad,\n \"dilation\": dilation,\n \"groups\": group\n }\n if len(data) == 1:\n layer_attrs[\"bias_attr\"] = False\n self.paddle_graph.add_layer(\n \"paddle.nn.Conv2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n \n def DepthwiseConvolution(self, node):\n node.layer_type = \"ConvolutionDepthwise\"\n self.ConvolutionDepthwise(node)\n\n def Deconvolution(self, node):\n conv2d_name = name_generator(\"conv\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [conv2d_name, output_name]\n data = node.data\n params = node.layer.convolution_param\n out_channel, kernel, stride, pad, dilation, group = _get_kernel_parameters(\n node.layer_type, params)\n if data is None:\n data = []\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n data.append(\n np.zeros([out_channel, node.in_shapes[0][1], kernel[0], kernel[1]]).astype(\n 'float32'))\n data.append(np.zeros([out_channel, ]).astype('float32'))\n else:\n data = _adjust_parameters(node)\n self.params[conv2d_name + \".weight\"] = data[0]\n if len(data) == 2:\n self.params[conv2d_name + \".bias\"] = data[1]\n assert len(node.inputs\n ) == 1, \"The count of Deconvolution node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n layer_attrs = {\n \"in_channels\": node.in_shapes[0][1],\n \"out_channels\": out_channel,\n \"kernel_size\": kernel,\n \"stride\": stride,\n \"padding\": pad,\n \"dilation\": dilation,\n \"groups\": group\n }\n if len(data) == 1:\n layer_attrs[\"bias_attr\"] = False\n self.paddle_graph.add_layer(\n \"paddle.nn.Conv2DTranspose\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n \n def ConvolutionDepthwise(self, node):\n conv2d_name = name_generator(\"conv\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [conv2d_name, output_name]\n data = node.data\n params = node.layer.convolution_param\n out_channel, kernel, stride, pad, dilation, group = _get_kernel_parameters(\n node.layer_type, params)\n out_channel = params.num_output if params.num_output is not None else node.in_shapes[0][1]\n in_channel = node.in_shapes[0][1]\n group = int(in_channel / (in_channel / out_channel)) if in_channel > out_channel else int(in_channel /\n (out_channel / in_channel))\n if data is None:\n data = []\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n data.append(\n np.zeros([out_channel, node.in_shapes[0][1], kernel[0], kernel[1]]).astype(\n 'float32'))\n data.append(np.zeros([out_channel, ]).astype('float32'))\n else:\n data = _adjust_parameters(node)\n self.params[conv2d_name + \".weight\"] = data[0]\n if len(data) == 2:\n self.params[conv2d_name + \".bias\"] = data[1]\n assert len(node.inputs\n ) == 1, \"The count of Deconvolution node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n layer_attrs = {\n \"in_channels\": in_channel,\n \"out_channels\": out_channel,\n \"kernel_size\": kernel,\n \"stride\": stride,\n \"padding\": pad,\n \"dilation\": dilation,\n \"groups\": group\n }\n if len(data) == 1:\n layer_attrs[\"bias_attr\"] = False\n self.paddle_graph.add_layer(\n \"paddle.nn.Conv2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n\n def Pooling(self, node):\n pool2d_name = name_generator(\"pool\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [pool2d_name, output_name]\n params = node.layer.pooling_param\n ceil_mode = getattr(params, \"ceil_mode\", True)\n if not hasattr(params, 'ceil_mode'):\n ceil_mode = True if getattr(params, \"round_mode\", 0) == 0 else False\n global_pool = getattr(params, \"global_pooling\", False)\n kernel_default = [1, 1]\n channel, kernel, stride, pad, dilation, group = _get_kernel_parameters(\n node.layer_type, params)\n if params.pool == 0:\n pool_type = \"max\"\n else:\n pool_type = \"avg\"\n assert len(\n node.inputs) == 1, \"The count of Pooling node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n if global_pool:\n if kernel[0] == 0:\n kernel = [1, 1]\n if params.pool == 0:\n self.paddle_graph.add_layer(\n \"paddle.nn.AdaptiveMaxPool2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n output_size=kernel)\n else:\n self.paddle_graph.add_layer(\n \"paddle.nn.AdaptiveAvgPool2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n output_size=kernel)\n else:\n layer_attrs = {\n 'kernel_size': kernel,\n 'stride': stride,\n 'padding': pad,\n 'ceil_mode': ceil_mode,\n }\n if params.pool == 0:\n self.paddle_graph.add_layer(\n \"paddle.nn.MaxPool2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n else:\n self.paddle_graph.add_layer(\n \"paddle.nn.AvgPool2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n\n def LRN(self, node):\n lrn_name = name_generator(\"lrn\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [lrn_name, output_name]\n assert len(node.inputs) == 1, \"The count of LRN node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.lrn_param\n assert params.local_size % 2 == 1\n alpha = params.alpha / float(params.local_size)\n layer_attrs = {\n \"n\": params.local_size,\n \"k\": params.k,\n \"alpha\": alpha,\n \"beta\": params.beta,\n }\n self.paddle_graph.add_layer(\n \"paddle.fluid.layers.lrn\", \n inputs={\"input\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n\n\n def InnerProduct(self, node):\n linear_name = name_generator(\"linear\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [linear_name, output_name]\n data = node.data\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.inner_product_param\n if data is None:\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0.\"\n .format(node.layer_name, node.layer_type))\n data = []\n data.append(\n np.zeros([node.in_shapes[0][1], params.num_output]).astype(\"float32\").astype(\n \"float32\"))\n data.append(\n np.zeros([params.num_output]).astype(\"float32\").astype(\"float32\"))\n else:\n data = _adjust_parameters(node)\n # Reshape the parameters to Paddle's ordering\n transpose_order = (1, 0)\n w = data[0]\n fc_shape = w.shape\n output_channels = fc_shape[0]\n w = w.reshape((output_channels, -1))\n w = w.transpose(transpose_order)\n data[0] = w\n\n self.params[linear_name + \".weight\"] = data[0]\n if len(data) == 2:\n self.params[linear_name + \".bias\"] = data[1]\n assert len(node.inputs\n ) == 1, \"The count of InnerProduct node\\'s input is not 1.\"\n assert params.axis == 1\n assert params.bias_term == True\n layer_attrs = {\n \"in_features\": data[0].shape[0],\n \"out_features\": params.num_output \n }\n if len(data) == 1:\n layer_attrs[\"bias\"] = False\n if node.in_shapes[0][-1] != data[0].shape[0]:\n self.paddle_graph.add_layer(\n \"paddle.reshape\",\n inputs={\"x\": input.name},\n outputs=[output_name],\n shape=[-1, data[0].shape[0]])\n self.paddle_graph.add_layer(\n \"paddle.nn.Linear\",\n inputs={\"input\": output_name},\n outputs=layer_outputs,\n **layer_attrs)\n else:\n self.paddle_graph.add_layer(\n \"paddle.nn.Linear\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n \n def AbsVal(self, node):\n assert len(\n node.inputs\n ) >= 1, \"The count of AbsVal node\\'s input is not more than 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n self.paddle_graph.add_layer(\n \"paddle.abs\",\n inputs={\"input\": input.name},\n outputs=[node.layer_name])\n\n def Softmax(self, node):\n softmax_name = name_generator(\"softmax\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [softmax_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of Softmax node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.softmax_param\n axis = params.axis\n shape = node.in_shapes[0]\n dims = len(shape)\n axis = axis + dims if axis < 0 else axis\n layer_attrs = {'axis': axis}\n self.paddle_graph.add_layer(\n \"paddle.nn.Softmax\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n\n def Slice(self, node):\n assert len(\n node.inputs) == 1, \"The count of Slice node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n top_len = len(node.layer.top)\n params = node.layer.slice_param\n axis = params.axis\n slice_dim = params.slice_dim\n if slice_dim != 1 and axis == 1:\n axis = slice_dim\n output_shape = node.out_shapes\n sections_list = list()\n outputs_list = list()\n for i, s in enumerate(output_shape):\n sections_list.append(s[axis])\n outputs_list.append(\"{}_p{}\".format(node.layer_name, i))\n layer_attrs = {\n 'num_or_sections': sections_list,\n 'axis': axis,\n }\n self.paddle_graph.add_layer(\n \"paddle.split\",\n inputs={\"x\": input.name},\n outputs=outputs_list,\n **layer_attrs)\n\n def Concat(self, node):\n assert len(\n node.inputs\n ) >= 1, \"The count of Concat node\\'s input is not more than 1.\"\n inputs_list = list()\n for i in range(len(node.inputs)):\n input = self.graph.get_input_node(node, idx=i, copy=True)\n inputs_list.append(input.name)\n params = node.layer.concat_param\n axis = params.axis\n layer_attrs = {'axis': axis}\n self.paddle_graph.add_layer(\n \"paddle.concat\",\n inputs={\"x\": inputs_list},\n outputs=[node.layer_name],\n **layer_attrs)\n\n def ReLU(self, node):\n relu_name = name_generator(\"relu\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [relu_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of RelU node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.relu_param\n if params.HasField('negative_slope') and params.negative_slope != 0:\n negative_slope = float(params.negative_slope)\n\n layer_attrs = {'negative_slope': negative_slope}\n self.paddle_graph.add_layer(\n \"paddle.nn.LeakyReLU\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n else:\n self.paddle_graph.add_layer(\n \"paddle.nn.ReLU\",\n inputs={\"input\": input.name},\n outputs=layer_outputs)\n\n def PReLU(self, node):\n prelu_name = name_generator(\"prelu\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [prelu_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of PReLU node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.prelu_param\n mode_bool = params.channel_shared\n output_shape = node.out_shapes[0]\n if mode_bool:\n num_parameters = 1\n else:\n num_parameters = output_shape[1]\n data = node.data\n self.params[prelu_name + '._weight'] = np.squeeze(data[0])\n assert data is not None, \"The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.\".format(\n node.layer_name, node.layer_type)\n self.paddle_graph.add_layer(\n \"paddle.nn.PReLU\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n num_parameters=num_parameters)\n\n def Eltwise(self, node):\n assert len(\n node.inputs) == 2, \"The count of Eltwise node\\'s input is not 2.\"\n params = node.layer.eltwise_param\n mode = params.operation\n inputs = []\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n input0_name = input0.name\n input1_name = input1.name\n if mode == 0:\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = input1_name\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n elif mode == 1:\n if hasattr(params, 'coeff') and len(params.coeff) == 2:\n coeff = params.coeff\n self.paddle_graph.add_layer(\n \"paddle.scale\",\n inputs={\"x\": input0_name},\n outputs=[node.layer_name + '_mul0'],\n scale=coeff[0])\n self.paddle_graph.add_layer(\n \"paddle.scale\",\n inputs={\"x\": input1_name},\n outputs=[node.layer_name + '_mul1'],\n scale=coeff[1])\n inputs_dict = {}\n inputs_dict['x'] = node.layer_name + '_mul0'\n inputs_dict['y'] = node.layer_name + '_mul1'\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n else:\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = input1_name\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n else:\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = input1_name\n self.paddle_graph.add_layer(\n \"paddle.max\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n\n def BatchNorm(self, node):\n batchnorm_name = name_generator(\"batchnorm\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [batchnorm_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of BatchNorm node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.batch_norm_param\n if hasattr(params, \"eps\"):\n eps = params.eps\n else:\n eps = 1e-5\n if node.data is None or len(node.data) != 3:\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n mean = np.zeros([node.in_shapes[0][1], ]).astype(\"float32\")\n variance = np.zeros([node.in_shapes[0][1], ]).astype(\"float32\")\n scale = 0\n else:\n\n node.data = [np.squeeze(i).astype(\"float32\") for i in node.data]\n mean, variance, scale = node.data\n # Prescale the stats\n scaling_factor = 1.0 / scale if scale != 0 else 0\n mean *= scaling_factor\n variance *= scaling_factor\n self.params[batchnorm_name + \"._mean\"] = mean\n self.params[batchnorm_name + '._variance'] = variance\n layer_attrs = {\n \"num_features\": node.in_shapes[0][1],\n \"epsilon\": eps,\n \"weight_attr\": False,\n \"bias_attr\": False,\n }\n if len(node.in_shapes[0]) == 2:\n self.paddle_graph.add_layer(\n \"paddle.unsqueeze\",\n inputs={\"x\": input.name},\n outputs=[input.name],\n axis=[2,3])\n self.paddle_graph.add_layer(\n \"paddle.nn.BatchNorm2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n if len(node.in_shapes[0]) == 2:\n self.paddle_graph.add_layer(\n \"paddle.squeeze\",\n inputs={\"x\": node.layer_name},\n outputs=[node.layer_name],\n axis=[2,3])\n \n def Scale(self, node):\n if node.data is None:\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n self.params[node.layer_name + \"_cparam1\"] = np.zeros([\n node.in_shapes[0][1],\n ]).astype(\"float32\")\n self.params[node.layer_name + \"_cparam2\"] = np.zeros([\n node.in_shapes[0][1],\n ]).astype(\"float32\")\n else:\n self.params[node.layer_name + \"_cparam1\"] = np.squeeze(node.data[\n 0]).astype(\"float32\")\n if not node.layer.scale_param.bias_term:\n self.params[node.layer_name + \"_cparam2\"] = np.zeros([\n node.in_shapes[0][1],\n ]).astype(\"float32\")\n else:\n self.params[node.layer_name + \"_cparam2\"] = np.squeeze(node.data[\n 1]).astype(\"float32\")\n params = node.layer.scale_param\n axis = params.axis\n inputs = []\n if len(node.inputs) == 2:\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n input0_name = input0.name\n input1_name = input1.name\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = input1_name\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"],\n axis=1)\n else:\n self.paddle_graph.add_layer(\n \"self.create_parameter\",\n inputs={},\n outputs=[node.layer_name + \"_cparam1\"],\n shape=self.params[node.layer_name + \"_cparam1\"].shape,\n attr=string(node.layer_name + \"_cparam1\"))\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input0_name = input0.name\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = node.layer_name + \"_cparam1\"\n if len(node.in_shapes[0]) == 2:\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"])\n else:\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"],\n axis=axis)\n self.paddle_graph.add_layer(\n \"self.create_parameter\",\n inputs={},\n outputs=[node.layer_name + \"_cparam2\"],\n shape=self.params[node.layer_name + \"_cparam2\"].shape,\n attr=string(node.layer_name + \"_cparam2\"))\n inputs_dict = {}\n inputs_dict['x'] = node.layer_name + \"_mul\"\n inputs_dict['y'] = node.layer_name + \"_cparam2\"\n output_shape = node.out_shapes[0]\n if axis == -1:\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n else:\n if axis < 0:\n axis = axis + len(output_shape)\n param2_shape = self.params[node.layer_name + \"_cparam2\"].shape\n param2_shape_len = len(param2_shape)\n diff_len = len(output_shape) - axis - param2_shape_len\n new_shape = list(param2_shape) + [1] * diff_len\n self.paddle_graph.add_layer(\n \"paddle.reshape\",\n inputs={\"x\": node.layer_name + \"_cparam2\"},\n outputs=[node.layer_name + \"_cparam2\"],\n shape=new_shape)\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n \n def Reshape(self, node):\n input = self.graph.get_input_node(node, idx=0, copy=True)\n output_shape = node.out_shapes[0]\n self.paddle_graph.add_layer(\n \"paddle.reshape\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n shape=output_shape)\n\n\n def ArgMax(self, node):\n assert len(node.inputs) == 1 and len(\n node.outputs\n ) == 1, \"The count of ArgMax node\\'s input and output is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n input_shape = node.in_shapes[0]\n params = node.layer.argmax_param\n out_max_val = params.out_max_val if hasattr(params,\n out_max_val) else False\n top_k = params.top_k if hasattr(params, top_k) else 1\n axis = params.axis if hasattr(params, axis) else -1\n if axis < 0:\n axis += len(input_shape)\n if out_max_val is True:\n self.paddle_graph.add_layer(\n \"paddle.topk\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name + \"_topk_var\", node.layer_name + \"_index_var\"],\n k=top_k)\n self.paddle_graph.add_layer(\n \"paddle.cast\",\n inputs={\"x\": node.layer_name + \"_index_var\"},\n outputs=[node.layer_name + \"_index_var\"],\n dtype=\"{}_topk_var.dtype\".format(node.layer_name))\n self.paddle_graph.add_layer(\n \"paddle.concat\",\n inputs={\"x\": [node.layer_name + \"_topk_var\", node.layer_name + \"_index_var\"]},\n outputs=[node.layer_name],\n axis=axis)\n else:\n self.paddle_graph.add_layer(\n \"paddle.topk\",\n inputs={\"x\": input.name},\n outputs=[\"_\", node.layer_name],\n k=top_k)\n \n def Axpy(self, node):\n assert len(node.inputs) == 1 and len(\n node.outputs\n ) == 1, \"The count of Axpy node\\'s input and output is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.axpy_param\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n input2 = self.graph.get_input_node(node, idx=2, copy=True)\n input0_name = input0.name\n input1_name = input1.name\n input2_name = input2.name\n inputs_dict = {}\n inputs_dict['x'] = input1_name\n inputs_dict['y'] = input0_name\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"],\n axis=0)\n inputs_dict = {}\n inputs_dict['x'] = node.layer_name + \"_mul\"\n inputs_dict['y'] = input2_name\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"])\n \n\n def Crop(self, node):\n assert len(\n node.inputs) == 2, \"The count of Crop node\\'s input is not 2.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n example = self.graph.get_input_node(node, idx=1, copy=True)\n params = node.layer.crop_param\n axis = params.axis\n input_shape = node.in_shapes[0]\n if axis < 0:\n axis += len(input_shape)\n offset_real = [0] * len(input_shape)\n if hasattr(params, \"offset\") and len(params.offset) > 0:\n offset = list(params.offset)\n assert (len(input_shape) - axis\n ) == len(offset), \"invalid offset[%s] in crop layer\" % (\n str(offset))\n offset_real = [0] * axis + offset\n self.paddle_graph.add_layer(\n \"paddle.crop\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n shape=node.in_shapes[1],\n offsets=list(offset_real))\n\n def Flatten(self, node):\n assert len(\n node.\n inputs) == 1, \"The count of DetectionOutput node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n self.paddle_graph.add_layer(\n \"paddle.reshape\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n shape=node.out_shapes[0])\n\n def Power(self, node):\n assert len(\n node.inputs) == 1, \"The count of Permute node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.power_param\n layer_attrs = {\n 'scale': params.scale,\n 'bias': params.shift,\n 'bias_after_scale': True\n }\n self.paddle_graph.add_layer(\n \"paddle.scale\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n self.paddle_graph.add_layer(\n \"paddle.pow\",\n inputs={\"x\": node.layer_name},\n outputs=[node.layer_name],\n exponent=params.power)\n\n def Reduction(self, node):\n assert len(\n node.inputs) == 1, \"The count of Reduction node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.reduction_param\n operation = params.operation\n axis = params.axis\n coeff = params.coeff\n assert operation >= 1 and operation <= 4, \"reduction reduction [%s] error\" % (\n operation)\n input_len = len(node.in_shapes[0])\n if axis < 0:\n axis += input_len + 1\n dim = list(range(input_len))\n # operation = SUM\n if operation == 1: \n layer_attrs = {\n \"dim\": dim[axis:],\n \"keep_dim\": False,\n }\n self.paddle_graph.add_layer(\n \"paddle.sum\",\n inputs={\"input\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n # operation = ASUM\n elif operation == 2: \n self.paddle_graph.add_layer(\n \"paddle.abs\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name])\n layer_attrs = {\n \"dim\": dim[axis:],\n \"keep_dim\": False,\n }\n self.paddle_graph.add_layer(\n \"paddle.sum\",\n inputs={\"input\": node.layer_name},\n outputs=[node.layer_name],\n **layer_attrs)\n # operation = SUMSQ\n elif operation == 3: \n self.paddle_graph.add_layer(\n \"paddle.pow\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n exponent=2.0)\n layer_attrs = {\n \"dim\": dim[axis:],\n \"keep_dim\": False,\n }\n self.paddle_graph.add_layer(\n \"paddle.sum\",\n inputs={\"input\": node.layer_name},\n outputs=[node.layer_name],\n **layer_attrs)\n # operation = MEAN\n else: \n layer_attrs = {\n \"axis\": dim[axis:],\n \"keepdim\": False,\n }\n self.paddle_graph.add_layer(\n \"paddle.mean\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n self.paddle_graph.add_layer(\n \"paddle.scale\",\n inputs={\"x\": node.layer_name},\n outputs=[node.layer_name],\n scale=coeff)\n \n def DetectionOutput(self, node):\n detection_output_name = name_generator(\"detection_output\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [detection_output_name, output_name]\n assert len(\n node.inputs) == 3, \"The count of DetectionOutput node\\'s input is not 3.\"\n inputs_dict = dict()\n for i in range(len(node.inputs)):\n input = self.graph.get_input_node(node, idx=i, copy=True)\n if i == 1:\n input = self.graph.get_input_node(node, idx=i, copy=True)\n while input is not None \\\n and input.layer_type != 'Softmax' \\\n and input.layer_type != 'Sigmoid':\n input = self.graph.get_input_node(input, idx=0, copy=True)\n assert input is not None, 'This kind of DetectionOutput is not supported!'\n input = self.graph.get_input_node(input, idx=0, copy=True)\n inputs_dict[\"x{}\".format(i)] = input.name\n params = node.layer.detection_output_param\n nms_param = params.nms_param\n nms_param_dict = dict()\n nms_param_dict[\"nms_threshold\"] = nms_param.nms_threshold\n nms_param_dict[\"top_k\"] = nms_param.top_k\n nms_param_dict[\"eta\"] = nms_param.eta\n if nms_param is None:\n nms_param_dict = {\"nms_threshold\": 0.3, \"top_k\": 10, \"eta\": 1.0}\n default = {\"nms_threshold\": 0.3, \"top_k\": 10, \"eta\": 1.0}\n fields = [\"eta\", \"top_k\", \"nms_threshold\"]\n for f in default.keys():\n if f not in nms_param_dict:\n nms_param_dict[f] = default[f]\n layer_attrs = {\n \"background_label\": params.background_label_id,\n \"nms_threshold\": nms_param_dict[\"nms_threshold\"],\n \"nms_top_k\": nms_param_dict[\"top_k\"],\n \"keep_top_k\": params.keep_top_k,\n \"score_threshold\": params.confidence_threshold,\n \"nms_eta\": nms_param_dict[\"eta\"]}\n self.paddle_graph.add_layer(\n kernel=\"custom_layer:DetectionOutput\",\n inputs=inputs_dict,\n outputs=layer_outputs,\n **layer_attrs)\n \n def Normalize(self, node):\n normalize_name = name_generator(\"normalize\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [normalize_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of Normalize node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.norm_param\n param_name = node.layer_name + \"_scale\"\n if node.data is None or len(node.data) != 1:\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n self.params[param_name] = \\\n np.zeros([1] if params.channel_shared else [node.in_shapes[0][1]]).astype(\"float32\")\n else:\n self.params[param_name] = _adjust_parameters(node)[0]\n \n \n self.paddle_graph.add_layer(\n \"self.create_parameter\",\n inputs={},\n outputs=[param_name],\n shape=self.params[param_name].shape,\n attr=string(param_name))\n inputs_dict = {}\n layer_attrs = {\n \"axis\": -1 if params.channel_shared else 1}\n self.paddle_graph.add_layer(\n \"custom_layer:Normalize\",\n inputs={\"x\": input.name,\n \"param\": param_name},\n outputs=layer_outputs,\n **layer_attrs)\n \n def Permute(self, node):\n assert len(\n node.inputs) == 1, \"The count of Permute node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.permute_param\n order = list(params.order) \n self.paddle_graph.add_layer(\n \"paddle.transpose\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n perm=order)\n \n def PriorBox(self, node):\n priorbox_name = name_generator(\"priorbox\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [priorbox_name, output_name]\n assert len(\n node.inputs) == 2, \"The count of PriorBox node\\'s input is not 2.\"\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n inputs_dict = {}\n inputs_dict[\"x0\"] = input0.name\n inputs_dict[\"x1\"] = input1.name\n params = node.layer.prior_box_param\n steps = tuple(params.step) if type(params.step) \\\n is list or type(params.step) is tuple \\\n else (params.step, params.step)\n layer_attrs = {\n \"min_sizes\": params.min_size,\n \"max_sizes\": params.max_size,\n \"aspect_ratios\": params.aspect_ratio,\n \"variance\": params.variance,\n \"flip\": params.flip,\n \"clip\": params.clip,\n \"steps\": steps,\n \"offset\": params.offset,\n \"min_max_aspect_ratios_order\": True}\n self.paddle_graph.add_layer(\n \"custom_layer:PriorBox\",\n inputs=inputs_dict,\n outputs=layer_outputs,\n **layer_attrs)\n \n def ReLU6(self, node):\n relu6_name = name_generator(\"relu6\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [relu6_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of RelU6 node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n self.paddle_graph.add_layer(\n \"paddle.nn.ReLU6\",\n inputs={\"input\": input.name},\n outputs=layer_outputs)\n \n def ROIPooling(self, node):\n roipooling_name = name_generator(\"roipooling\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [roipooling_name, output_name]\n assert len(\n node.inputs) == 2, \"The count of ROIPooling node\\'s input is not 2.\"\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n inputs_dict = {}\n inputs_dict[\"x0\"] = input0.name\n inputs_dict[\"x1\"] = input1.name\n params = node.layer.roi_pooling_param\n layer_attrs = {\n \"pooled_height\": params.pooled_h,\n \"pooled_width\": params.pooled_w,\n \"spatial_scale\": params.spatial_scale}\n self.paddle_graph.add_layer(\n \"custom_layer:ROIPooling\",\n inputs=inputs_dict,\n outputs=layer_outputs,\n **layer_attrs)\n \n def ShuffleChannel(self, node):\n assert len(\n node.inputs) == 1, \"The count of ShuffleChannel node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.shuffle_channel_param\n self.paddle_graph.add_layer(\n \"paddle.fluid.layers.shuffle_channel\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n group=params.group)\n \n def Upsample(self, node):\n assert len(\n node.inputs) == 1, \"The count of Upsample node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.upsample_param\n layer_attrs = {\n \"align_corners\": False,\n \"scale_factor\": params.scale,\n \"mode\": \"nearest\"}\n self.paddle_graph.add_layer(\n \"paddle.nn.functional.interpolate\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n \n def Select(self, node):\n select_name = name_generator(\"select\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [select_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of Select node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n input_shape = node.in_shapes[0]\n params = node.layer.select_param\n layer_attrs = {\n \"input_shape\": input_shape,\n \"point\": params.slice_point,\n \"axis\": params.axis}\n self.paddle_graph.add_layer(\n \"custom_layer:Select\",\n inputs={\"x\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n \n\n \n\n"
] | [
[
"numpy.squeeze",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahillbs/minimum_scan_cover | [
"e41718e5a8e0e3039d161800da70e56bd50a1b97",
"e41718e5a8e0e3039d161800da70e56bd50a1b97"
] | [
"code/instance_evolver.py",
"code/solver/mip/angular_dependency.py"
] | [
"import os\nimport subprocess\nfrom inspect import isclass\n\nimport configargparse\nimport numpy as np\nimport sqlalchemy\nimport yaml\nfrom IPython import embed\n\n\nfrom angular_solver import solve\nfrom database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\nfrom genetic_algorithm import (GeneticAlgorithm, Genome,\n IterationTerminationConditionMet, SaveCallback,\n k_point_crossover, linear_rank_selection,\n one_point_crossover, uniform_crossover,\n uniform_wheel_selection)\nfrom instance_generation import (create_circle, create_circle_n_k,\n create_random_circle)\nfrom solver import MscColoringSolver, AngularMinSumGreedySolver\nfrom solver.min_sum_simple_solver import solve_min_sum_simple_n_gon\nfrom solver.mip import (AngularGraphScanMakespanAbsolute,\n AngularGraphScanMakespanAbsoluteReduced,\n AngularGraphScanMakespanHamilton,\n AngularGraphScanMinSumHamilton,\n AngularDependencySolver,\n AngularDependencyLocalMinSumSolver,\n AngularGraphScanLocalMinSumHamilton)\nfrom solver.cp import (ConstraintAbsSolver,\n ConstraintDependencySolver)\nfrom utils import (Multidict, visualize_graph_2d, visualize_min_sum_sol_2d,\n visualize_solution_2d)\nfrom angular_evolver import (AngularSolverFitness, CompleteGraphGenome, GraphGenome, GraphGenomeCreator,\n CompleteGraphGenomeCreator, mutate_2d_points, mutate_vertex_edge_genomes)\nfrom solver import ALL_SOLVER\n\nclass GroupedAction(configargparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n group, dest = self.dest.split('.', 2)\n groupspace = getattr(namespace, group, configargparse.Namespace())\n setattr(groupspace, dest, values)\n setattr(namespace, group, groupspace)\n\ndef string_to_callable(function_name):\n assert function_name != 'eval', \"Eval is not allowed!\"\n warning_displayed_once = getattr(StringToCallableAction, \"warning_displayed\", False)\n if not warning_displayed_once:\n print(\"WARNING: Do not use StringToCallableAction in production code! This is just a hack for faster development!\")\n setattr(StringToCallableAction, \"warning_displayed\", True)\n try:\n call = ALL_SOLVER[function_name]\n except KeyError:\n call = globals()[function_name]\n return call\n\nclass StringToCallableAction(configargparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n \n warning_displayed_once = getattr(StringToCallableAction, \"warning_displayed\", False)\n if not warning_displayed_once:\n print(\"WARNING: Do not use StringToCallableAction in production code! This is just a hack for faster development!\")\n setattr(StringToCallableAction, \"warning_displayed\", True)\n call = globals()[values]\n if callable(call):\n setattr(namespace, self.dest, call)\n else:\n raise TypeError(f\"{values} is not callable\")\n\ndef _instantiate_callables(func_name, obj_args):\n callable_obj = string_to_callable(func_name)\n if not callable_obj:\n raise AttributeError(f\"{func_name} function is not set.\".capitalize())\n if not isclass(callable_obj):\n return callable_obj\n if not obj_args:\n obj_args = {}\n return callable_obj(**obj_args)\n\ndef _get_task_and_config(session, arg_config):\n task = None\n config = None\n if arg_config.url_path:\n if hasattr(arg_config, \"task\") and arg_config.task is not None:\n task = session.query(Task).filter(Task.id == arg_config.task).one()\n if arg_config.override_config and \\\n input(f\"Are you sure to override the configs for {task.id}? (y/N)\").lower() in [\"y\", \"yes\"]:\n print(f\"Override config from task {task.id})\")\n for task_config in task.configs:\n session.delete(task_config)\n arg_config.override_config = False\n config = ConfigHolder.fromNamespace(arg_config, task, [\"override_config\", \"url_path\", \"PreEvolveInteractive\", \"create_only\"])\n session.add(config)\n session.commit()\n else:\n print(\"Using config from database\")\n config = ConfigHolder(task)\n else:\n if input(\"New Task will be created (Y/n)?\").lower() in [\"\", \"yes\", \"y\"]:\n print(\"Will create a new task.\")\n task = Task(task_type=\"instance_evolver\", status=Task.STATUS_OPTIONS.CREATED, name=arg_config.name)\n session.add(task)\n session.commit()\n arg_config.task = task.id\n config = ConfigHolder.fromNamespace(arg_config, task, ignored_attributes=[\"url_path\", \"create_only\", \"name\", \"override_config\"])\n session.add_all(config.database_configs)\n session.commit()\n savepath = input(f\"Task ID is {task.id}. Type a filepath to save the ID in a config file (default: Skip save): \")\n if savepath:\n _save_task_file(savepath, config, task)\n else:\n config = arg_config\n return task, config\n\ndef _save_task_file(savepath, config, task):\n n_s = configargparse.Namespace()\n n_s.task = task.id\n parser = configargparse.Parser()\n parser.add_argument(\"--task\")\n parser.add_argument(\"--database\")\n parsed = parser.parse_args(args=[f\"--task={task.id}\", f\"--database={config.url_path}\"])\n parser.write_config_file(n_s, [savepath])\n\ndef _evolve_instances(arg_config):\n session = get_session(arg_config.url_path)\n task, config = _get_task_and_config(session, arg_config)\n\n if not arg_config.create_only:\n process_task(config, task, session)\n\ndef process_task(config, task, session):\n # First init all callable classes\n try:\n mutation = _instantiate_callables(config.mutation_func, None)\n selection = _instantiate_callables(config.selection_func, None)\n crossover = _instantiate_callables(config.crossover_func, None)\n fitness = _instantiate_callables(config.fitness_func, config.fitness_func_initargs)\n if config.term_condition == 'IterationTerminationConditionMet' and not config.term_condition_initargs:\n term_con = IterationTerminationConditionMet(max_iter=config.generations)\n else:\n term_con = _instantiate_callables(config.term_condition, config.term_condition_initargs)\n if config.callback == 'SaveCallback' and config.callback_initargs is None:\n callback = SaveCallback(config.generations, config.population_amount, task, session)\n else:\n callback = _instantiate_callables(config.callback, config.callback_initargs)\n task.status = Task.STATUS_OPTIONS.PROCESSING\n if session:\n session.commit()\n # Now load population if provided, else generate it\n starting_generation, population = _load_population(config, task, session)\n\n if config.PreEvolveInteractive:\n print(\"Config set up. To change the population just change the 'population' variable.\")\n print(\"For other variables just refer to the locals.\")\n embed()\n\n gen_algo = GeneticAlgorithm(\n genomes=population,\n selection=selection,\n mutation=mutation,\n fitness=fitness,\n crossover=crossover,\n callback=callback,\n termCon=term_con,\n elitism=config.elitism,\n mutationChance=config.mutation_chance_genome,\n mutationChanceGene=config.mutation_chance_gene\n )\n gen_algo.evolve(generation=starting_generation)\n task.status = Task.STATUS_OPTIONS.FINISHED\n if session:\n session.commit()\n except InterruptedError as e:\n task.status = task.STATUS_OPTIONS.INTERRUPTED\n if session:\n session.commit()\n except Exception as e:\n if session:\n task.status = Task.STATUS_OPTIONS.ERROR\n task.error_message = str(e)\n session.commit()\n print(e)\n raise e\n\ndef _load_population(config, task, session: 'Session'):\n population = []\n curr_generation = 0\n if session is not None:\n try:\n last_gen = session.query(DatabaseGraphGenome)\\\n .filter(DatabaseGraphGenome.task_id == task.id)\\\n .order_by(DatabaseGraphGenome.generation.desc())\\\n .limit(1)\\\n .one()\n curr_generation = last_gen.generation\n queue = session.query(DatabaseGraphGenome)\\\n .filter(DatabaseGraphGenome.task_id == task.id, DatabaseGraphGenome.generation == curr_generation)\\\n .order_by(DatabaseGraphGenome.generation.desc())\\\n .limit(config.population_amount)\n population = np.zeros(config.population_amount, dtype=object)\n population[:] = [genome for genome in queue]\n assert isinstance(population[0], Genome), \"Loaded data does not contain valid genomes\"\n except sqlalchemy.orm.exc.NoResultFound as e:\n pass\n\n if len(population) < config.population_amount:\n if population:\n print(\"Given population smaller than wanted. Fill with random instances\")\n temp_pop = np.zeros(config.population_amount - len(population), dtype=object)\n create_instances = _instantiate_callables(config.instance_creation_func, config.instance_creation_initargs)\n temp_pop[:] = [\n create_instances(task, generation=curr_generation)\n for i in range(config.population_amount - len(population))\n ]\n session.add_all(temp_pop.tolist())\n session.commit()\n population = np.hstack([population[:len(population)],\n temp_pop]) # ToDo: This call needs to be reworked\n elif len(population) > config.population_amount:\n print(\"Given population too large. Will slice off the end\")\n population = population[:config.population_amount]\n\n return curr_generation, population\n\n\ndef _argument_parser():\n parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\")\n parser.add_argument(\n '--config',\n type=str,\n help='Path to config file (default: inst_evo_settings.yaml)',\n default=\"inst_evo_settings.yaml\",\n is_config_file_arg=True)\n parser.add_argument(\n '--PreEvolveInteractive',\n action='store_true',\n help='Ipython interactive for instance creation (default: False)',\n default=False)\n parser.add_argument('--override-config', action=\"store_true\", default=False, help=\"Set this flag to override configuration with passed arguments\")\n parser.add_argument('--url-path', type=str, default=\"angular.db\", help=\"Path to database. Creates Database if it does not exist (Default: angular.db)\")\n parser.add_argument('--task', type=int, help=\"Id of the task that shall be continued\")\n parser.add_argument('--generations', type=int, default=200, help=\"Amount of generations evolved. If a save is loaded, it will only evolve the difference for the generations (default: 200)\")\n parser.add_argument('--elitism', type=float, default=0.01, help=\"Elitism rate (default: 0.01)\")\n #parser.add_argument('--genome-creator',\n parser.add_argument('--instance-creation-func', type=str, help=\"Function for initial creation of instances\")\n parser.add_argument('--instance-creation-initargs', type=yaml.safe_load, help=\"Parameter for instance creation\")\n parser.add_argument('--population-amount', type=int, default=200, help=\"Amont of genomes per generation (default: 200)\")\n parser.add_argument('--mutation-chance-genome', type=float, default=0.03, help=\"Chance a genome will be selected for mutation (default: 0.03)\")\n parser.add_argument('--mutation-chance-gene', type=float, default=0.03, help=\"Chance a gene is changed (default: 0.03)\")\n parser.add_argument('--mutation-func', type=str, help=\"Mutation callable used. Required if no safefile config is used\")\n parser.add_argument('--selection-func', type=str, help=\"Selection callable used. Required if no safefile is used\")\n parser.add_argument('--crossover-func', type=str, help=\"Crossover callable used. Required if no safefile is used\")\n parser.add_argument('--fitness-func', type=str, help=\"Fitness callable used. Required if no safefile is used\")\n parser.add_argument('--fitness-func-initargs', type=yaml.safe_load, default=None, help=\"Fitness callable init keyword arguments. Omitted when emtpy.\")\n parser.add_argument('--term-condition', type=str, default='IterationTerminationConditionMet', help=\"Termination callable used. (default: IterationTerminationConditionMet)\")\n parser.add_argument('--term-condition-initargs', type=yaml.safe_load, default=None, help=\"Keyword arguments dict for termination condition callable init. Not needed for standard term-condition.\")\n parser.add_argument('--callback', type=str, default='SaveCallback', help=\"Callback used in genetic_algorithm (default: SaveCallback)\")\n parser.add_argument('--callback-initargs', type=yaml.safe_load, default=None, help=\"Callback keyword arguments dict for init. Not needed for standard SaveCallback else omitted if not provided\")\n parser.add_argument('--create-only', action=\"store_true\", help=\"Only create task instead of also solving it\")\n parser.add_argument('--name', type=str, default=\"\", help=\"Optional name description of the task\")\n parsed = parser.parse_args()\n #parser.write_config_file()\n #print(vars(parsed))\n return parsed\n\nif __name__ == \"__main__\":\n CONFIG = _argument_parser()\n _evolve_instances(CONFIG)\n",
"import math\nfrom typing import Union, Optional, List, Tuple\nimport gurobipy as grb\nimport numpy as np\n\nfrom utils import Multidict, callback_rerouter, convert_graph_to_angular_abstract_graph, calculate_times, is_debug_env\nfrom utils.dependency_graph import DependencyNode, calculate_order, DisconnectedDependencyGraphException, CircularDependencyException, calculate_cycle\nfrom solver import Solver\nfrom database import AngularGraphSolution, Graph\n\nclass AngularDependencySolver(Solver):\n solution_type = \"min_sum\"\n\n def __init__(self, time_limit=900, with_vertex_subtour_constr=False, **kwargs):\n self.with_vertex_subtour_constr = with_vertex_subtour_constr\n self.graph = None\n self.abstract_graph = None\n self.model = None\n self.edges = None\n self.v_incident_edges = dict()\n #self.vertex_edges = None\n super().__init__(kwargs.pop(\"params\", {\"TimeLimit\": time_limit}))\n\n def is_multicore(self):\n return True\n\n def solve(self, graph: Graph, **kwargs):\n error_message = None\n returned_order = None\n is_optimal = False\n runtime = 0\n try:\n self.build_model(graph)\n if \"time_limit\" in kwargs: \n self.model.setParam(\"TimeLimit\", kwargs.pop(\"time_limit\"))\n self.add_start_solution(graph, kwargs.pop(\"start_solution\", None))\n self._add_callbacks(kwargs.pop(\"callbacks\", None))\n if kwargs.pop(\"relax\", False):\n old_edges = self.edges\n used_edges = None\n rel_model = self.model.relax()\n keys, self.edges = grb.multidict({key: rel_model.getVarByName(self.edges[key].VarName) for key in self.edges})\n rel_model.optimize(callback_rerouter)\n runtime = self.model.Runtime\n \n else:\n circle_found = True\n max_runtime = self.params[\"TimeLimit\"]\n while(circle_found and max_runtime > 0):\n self.model.optimize(callback_rerouter)\n max_runtime -= self.model.Runtime\n runtime = abs(self.params[\"TimeLimit\"] - max_runtime)\n try:\n used_edges = grb.tupledict({key: self.edges[key] for key in self.edges if not math.isclose(0, self.edges[key].x, abs_tol=10**-6)})\n circle_found = self._check_for_cycle(used_edges, self.model, lazy=False)\n if circle_found and max_runtime > 0:\n self.model.setParam(\"TimeLimit\", max_runtime)\n except AttributeError as e:\n # Can happen if no solution was found in the time limit\n # If not, raise error\n if runtime < self.params[\"TimeLimit\"]:\n raise e\n \n\n is_optimal = self.model.Status == grb.GRB.OPTIMAL\n if is_debug_env():\n local_subtours = 0\n for circle in self.found_circles:\n verts = [key[1] for key in circle]\n for v_i in self.v_incident_edges:\n if self.v_incident_edges[v_i].issuperset(verts):\n local_subtours += 1\n break\n print(\"Overall subtours:\", len(self.found_circles), \"local subtours:\", local_subtours,\\\n \"in percent:\", local_subtours*100/len(self.found_circles))\n \n try:\n used_edges = {key: self.edges[key] for key in self.edges if not math.isclose(0, self.edges[key].x, abs_tol=10**-6)}\n dep_graph = self._get_dep_graph(used_edges)\n order = calculate_order(dep_graph, calculate_circle_dep=True)\n returned_order = [tuple(self.abstract_graph.vertices[i]) for i in order]\n except (CircularDependencyException, AttributeError) as e:\n # If we have a circular dependency after the time limit we just didnt managed to get a feasable solution in time\n # Else something went wrong and the error should be raised\n if runtime < self.params[\"TimeLimit\"]:\n raise e\n except Exception as e:\n error_message = str(e)\n if is_debug_env():\n raise e\n #times = calculate_times(returned_order, self.graph)\n sol = AngularGraphSolution(self.graph,\n runtime,\n solution_type=self.solution_type,\n solver=self.__class__.__name__,\n is_optimal=is_optimal,\n order=returned_order,\n error_message=error_message)\n return sol\n\n def build_model(self, graph: Graph):\n self.found_circles = []\n self.graph = graph\n self.abstract_graph = convert_graph_to_angular_abstract_graph(graph, simple_graph=False)\n self.model = grb.Model()\n self.model.setParam(\"LazyConstraints\", 1)\n for param in self.params:\n self.model.setParam(param, self.params[param])\n\n #self.vertex_edges = grb.tupledict()\n costs = self._add_variables(self.abstract_graph)\n self._add_objective(costs)\n self._add_constraints()\n \n self.model.update()\n \n def add_start_solution(self, graph: Graph, solution: Union[AngularGraphSolution, List[Tuple[int, int]]]):\n if solution is None:\n return\n if isinstance(solution, AngularGraphSolution):\n assert graph == solution.graph, \"Solution does not match the graph\"\n edge_indices = {(v1, v2): i for i, (v1, v2) in enumerate(graph.edges)}\n heads = [[] for i in range(graph.vert_amount)]\n order = solution.order\n else:\n order = solution\n for edge in order:\n for vert in edge:\n other = set(edge).difference([vert]).pop()\n heads[vert].append(other)\n cost = 0\n for i, head in enumerate(heads):\n prev = None\n for vertex in head:\n if prev is not None:\n sorted_edge_prev = tuple(sorted([i, prev]))\n sorted_edge = tuple(sorted([i, vertex]))\n cost += self.abstract_graph.costs[edge_indices[sorted_edge_prev], edge_indices[sorted_edge]]\n abs_edge = self.edges[edge_indices[sorted_edge_prev], edge_indices[sorted_edge]]\n abs_edge.Start = 1\n prev = vertex \n \n def set_output(self, get_output: bool):\n try:\n self.model.setParam(\"OutputFlag\", int(get_output))\n except Exception as e:\n raise e\n def set_max_threads(self, max_threads: int):\n self.model.setParam(grb.GRB.Param.Threads, max_threads)\n\n def _add_variables(self, abs_graph: Graph):\n edges, costs = grb.multidict(abs_graph.costs)\n self.edges = self.model.addVars(edges, vtype=grb.GRB.BINARY, name=\"Abs_graph_edges\")\n return costs\n\n def _add_constraints(self):\n l = len(self.abstract_graph.vertices)\n for v_i in range(l):\n v = self.abstract_graph.vertices[v_i]\n for v_j in v:\n incident_vertices = [\n i for i in range(l)\n if np.intersect1d(self.abstract_graph.vertices[i], v_j).size > 0\n ]\n sub_out = self.edges.subset(v_i, incident_vertices)\n sub_in = self.edges.subset(incident_vertices, v_i)\n if len(sub_out) > 1:\n self.model.addConstr(sub_out.sum() <= 1)\n if len(sub_in) > 1:\n self.model.addConstr(sub_in.sum() <= 1)\n\n for v_i in range(len(self.graph.vertices)):\n # Constraint over all vertices: least 2k-1 connections between incident edges\n incident_vertices = [\n i for i in range(l)\n if np.intersect1d(self.abstract_graph.vertices[i], v_i).size > 0\n ]\n self.v_incident_edges[v_i] = set(incident_vertices)\n if len(incident_vertices) > 1:\n self.model.addConstr(\n self.edges.sum(incident_vertices, incident_vertices) == len(incident_vertices)-1,\n name=\"IncidentEdgeNumConstr\")\n \n # No self circle\n for t in self.edges:\n sub = self.edges.subset(t, t)\n self.model.addConstr(sub.sum() <= 1, name=\"SelfCycleConstr\")\n \n \n\n def _add_objective(self, costs):\n self.model.setObjective(sum([costs[edge] * self.edges[edge] for edge in self.edges]), grb.GRB.MINIMIZE)\n\n def _general_circle_elimination(self, model: grb.Model):\n edges_solution = model.cbGetSolution(self.edges)\n used_edges = grb.tupledict({key: edges_solution[key] for key in edges_solution if not math.isclose(0, edges_solution[key], abs_tol=10**-5)})\n for edge in used_edges:\n if used_edges[edge] < 0.7:\n print(\"Found an edge with value less than 0.7\")\n \n self._check_for_cycle(used_edges, model)\n return\n # Turn used_edges into a dependency graph\n dep_graph = self._get_dep_graph(used_edges)\n\n try:\n calculate_order(dep_graph, calculate_circle_dep=True)\n except CircularDependencyException as dep_exception:\n self._add_cycle_constr(dep_exception.circle_nodes, model)\n # For now we try to ignore this disconnected graphs\n except DisconnectedDependencyGraphException as disc_exception:\n cycle = calculate_cycle(disc_exception.disconnected_nodes)\n self._add_cycle_constr(cycle, model)\n #print(\"WARNING: DISCONNECTED DEPENDENCY GRAPH DETECTED!\")\n \n def _check_for_cycle(self, used_edges: grb.tupledict, model: grb.Model, lazy=True):\n unseen = {i for i in range(len(self.abstract_graph.vertices))}\n queued = set()\n while unseen:\n queued.add((None, unseen.pop()))\n prev = {}\n seen = set()\n while queued:\n edge = queued.pop()\n sub = used_edges.subset(edge[1], '*')\n seen.add(edge[1])\n for key in sub:\n destination = key[1]\n if destination in unseen:\n try:\n pass#unseen.remove(destination)\n except KeyError:\n print(\"Double unseen event!\")\n pass # It can happen that some nodes will be seen multiple times\n queued.add(key)\n prev[key] = edge\n if destination in seen:\n path = [key, edge]\n while path[-1][0] and path[-1][0] != destination:\n path.append(prev[path[-1]])\n if path[-1][0] == destination:\n self.found_circles.append(path)\n expr = None\n if self.with_vertex_subtour_constr:\n abs_vert_on_path = [key[1] for key in path]\n for v_i in range(self.graph.vert_amount):\n if self.v_incident_edges[v_i].issuperset(abs_vert_on_path):\n diff = self.v_incident_edges[v_i].difference(abs_vert_on_path)\n incoming = self.edges.subset(diff, abs_vert_on_path)\n outgoing = self.edges.subset(abs_vert_on_path, diff)\n expr = incoming.sum() + outgoing.sum() >= 1\n break\n \n if expr is None:\n expr = sum(self.edges[i] for i in path) <= len(path)-1\n \n if lazy:\n model.cbLazy(expr)\n else:\n model.addConstr(expr)\n #print(\"Found cycle\", path)\n return True\n #print(\"No cycle found for:\", prev)\n return False\n \n\n def _add_cycle_constr(self, cycle_nodes, model: grb.Model):\n cycle = [nodes.value for nodes in cycle_nodes]\n l = len(cycle)\n cycle_edges = [\n (cycle[i], cycle[((i+1) % l)])\n for i in range(l)\n ]\n cycle_edges_rev = [\n (cycle[((i+1) % l)], cycle[i])\n for i in range(l)\n ]\n #print(\"CYCLE EDGES:\", cycle_edges)\n l = len(cycle_edges)-1\n try:\n cycle_edges_vars = grb.tupledict({i: self.edges[i] for i in cycle_edges})\n cycle_edges_rev_vars = grb.tupledict({i: self.edges[i] for i in cycle_edges_rev})\n exp = grb.LinExpr(cycle_edges_vars.sum())\n exp_rev = grb.LinExpr(cycle_edges_rev_vars.sum())\n model.cbLazy(exp <= l)\n model.cbLazy(exp_rev <= l)\n #model.addConstr(exp <= l)\n except grb.GurobiError as err:\n print(\"ERROR: Gurobi error with number:\", err.errno)\n\n def _get_dep_graph(self, used_edges):\n dep_graph = {key: DependencyNode(key) for key in range(len(self.abstract_graph.vertices))}\n for come, to in used_edges:\n dep_graph[come].add_dependency(dep_graph[to])\n return dep_graph\n\n def _add_callbacks(self, callbacks: Optional[Union[Multidict, dict]] = None):\n # Add callbacks\n own_callbacks = Multidict({grb.GRB.Callback.MIPSOL: self._general_circle_elimination})\n if callbacks:\n own_callbacks.update(callbacks)\n callback_rerouter.inner_callbacks = own_callbacks\n\n def _cleanup(self, **kwargs):\n callback_rerouter.inner_callbacks = None\n self.abstract_graph = None\n self.graph = None\n self.model = None\n self.edges = None\n self.add_path_at_start = self._overridden_path_at_start\n \n\nclass AngularDependencyLocalMinSumSolver(AngularDependencySolver):\n solution_type = \"local_min_sum\"\n\n def __init__(self, **kwargs):\n self.local_sum = None\n super().__init__(**kwargs)\n\n def _add_variables(self, abs_graph: Graph):\n super()._add_variables(abs_graph)\n self.local_sum = self.model.addVar(lb=0, name=\"local_sum\")\n\n def _add_constraints(self):\n super()._add_constraints()\n l = len(self.abstract_graph.vertices)\n for v_i in range(len(self.graph.vertices)):\n # Constraint over all vertices: least 2k-1 connections between incident edges\n incident_vertices = [\n i for i in range(l)\n if np.intersect1d(self.abstract_graph.vertices[i], v_i).size > 0\n ]\n edges = self.edges.subset(incident_vertices, incident_vertices)\n self.model.addConstr(sum(self.abstract_graph.costs[key] * edges[key] for key in edges)\n <= self.local_sum,\n name=\"local_sum_constr\")\n \n def _add_objective(self, costs):\n self.model.setObjective(self.local_sum, grb.GRB.MINIMIZE)\n "
] | [
[
"numpy.zeros"
],
[
"numpy.intersect1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aounleonardo/Spread-Classification | [
"22c643252e31df367dfeb55fd1a5397dabd7f2b4"
] | [
"modeling/src/nlp_ignite_engines.py"
] | [
"import torch\n\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.utils import convert_tensor\n\n\ndef _prepare_batch(batch, device=None, non_blocking=False):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, attention_mask, y = batch\n return (\n convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(attention_mask, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking).float(),\n )\n\n\ndef create_nlp_trainer(\n model,\n optimizer,\n loss_fn,\n device=None,\n non_blocking=False,\n prepare_batch=_prepare_batch,\n output_transform=lambda x, y, y_pred, loss: loss.item(),\n):\n \"\"\"\n Factory function for creating a trainer for nlp models.\n The only difference with the ignite create_supervised_trainer is the attention to attention_mask (pun intented).\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n\n Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss\n of the processed batch by default.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n x, attention_mask, y = prepare_batch(\n batch, device=device, non_blocking=non_blocking\n )\n y_pred = model(x, attention_mask=attention_mask)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return output_transform(x, y, y_pred, loss)\n\n return Engine(_update)\n\n\ndef create_nlp_evaluator(\n model,\n metrics=None,\n device=None,\n non_blocking=False,\n prepare_batch=_prepare_batch,\n output_transform=lambda x, y, y_pred: (y_pred, y,),\n):\n \"\"\"\n Factory function for creating an evaluator for nlp models.\n The only difference with the ignite create_supervised_evaluator is the attention to attention_mask (pun intented).\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits\n output expected by metrics. If you change it you should use `output_transform` in metrics.\n\n Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is\n a tuple of `(batch_pred, batch_y)` by default.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n metrics = metrics or {}\n\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, attention_mask, y = prepare_batch(\n batch, device=device, non_blocking=non_blocking\n )\n y_pred = model(x, attention_mask=attention_mask)\n return output_transform(x, y, y_pred)\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n"
] | [
[
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
janfreyberg/ipyannotate | [
"b1c30fe73bfda107d4ef75945338d42bfe8e3b64"
] | [
"tests/images/test_abstract_canvas.py"
] | [
"import pathlib\nimport tempfile\nfrom typing import Tuple, Union\nfrom unittest.mock import patch\n\nimport ipywidgets as widgets\nimport numpy as np\nfrom hypothesis import assume, given, infer, settings, strategies\nfrom PIL import Image\n\nfrom ipyannotations.images.canvases.abstract_canvas import (\n AbstractAnnotationCanvas,\n)\nimport ipyannotations.images.canvases.image_utils\nfrom ipyannotations.images.canvases.image_utils import fit_image\n\n\nclass TestCanvas(AbstractAnnotationCanvas):\n \"\"\"Test canvas to test the abstract canvas.\"\"\"\n\n def init_empty_data(self):\n self.data = []\n\n\n@settings(deadline=None)\n@given(img=infer)\ndef test_that_loading_image_clears_data(\n img: Union[widgets.Image, np.ndarray, Image.Image]\n):\n\n with patch.object(\n AbstractAnnotationCanvas, \"init_empty_data\"\n ) as mock_init_empty_data:\n canvas = AbstractAnnotationCanvas()\n mock_init_empty_data.reset_mock()\n canvas.load_image(img)\n\n mock_init_empty_data.assert_called_once()\n\n\n@settings(deadline=None)\n@given(img=infer)\ndef test_that_loading_image_from_path_succeeds(img: Image.Image):\n\n with tempfile.TemporaryDirectory(dir=\".\") as tmp:\n tmp = pathlib.Path(tmp)\n tmp = tmp / \"testfile.jpg\"\n img.save(tmp)\n\n with patch.object(\n AbstractAnnotationCanvas, \"init_empty_data\"\n ) as mock_init_empty_data:\n canvas = AbstractAnnotationCanvas()\n mock_init_empty_data.reset_mock()\n canvas.load_image(tmp)\n\n mock_init_empty_data.assert_called_once()\n\n\n@given(img=infer)\ndef test_that_fit_image_always_fits_image(img: widgets.Image):\n\n with patch.object(AbstractAnnotationCanvas, \"init_empty_data\"):\n canvas = AbstractAnnotationCanvas()\n\n x0, y0, x1, y1, _, _ = fit_image(img, canvas)\n\n assert (x1, y1) < canvas.size\n\n\n@given(\n img=infer, click_x=strategies.floats(0, 1), click_y=strategies.floats(0, 1)\n)\ndef test_that_points_clicked_get_translated_correctly(\n img: widgets.Image, click_x: float, click_y: float\n):\n with patch.object(AbstractAnnotationCanvas, \"init_empty_data\"):\n canvas = AbstractAnnotationCanvas()\n canvas.load_image(img)\n\n x0, y0, width, height, img_width, img_height = fit_image(img, canvas)\n assume((img_width, img_height) > (20, 20))\n\n click_x = round(x0 + click_x * width)\n click_y = round(y0 + click_y * height)\n\n assert (\n (0, 0)\n <= canvas.canvas_to_image_coordinates((click_x, click_y))\n <= (img_width, img_height)\n )\n\n round_trip_x, round_trip_y = canvas.image_to_canvas_coordinates(\n canvas.canvas_to_image_coordinates((click_x, click_y))\n )\n assert np.isclose(round_trip_x, click_x) and np.isclose(\n round_trip_y, click_y, atol=1\n )\n\n\n@settings(deadline=None)\n@given(img=infer)\ndef test_that_images_are_adjusted(img: widgets.Image):\n with patch(\n \"ipyannotations.images.canvases.abstract_canvas.adjust\", autospec=True\n ) as mock_adjust:\n mock_adjust.return_value = img\n canvas = TestCanvas()\n canvas.image_brightness = 1.1\n canvas.image_contrast = 1.1\n canvas.load_image(img)\n\n mock_adjust.assert_called_once_with(\n img,\n contrast_factor=1.1,\n brightness_factor=1.1,\n )\n"
] | [
[
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thomashopf/EVcouplings-1 | [
"d3e4947d29b62537bd79215ce72b6eea18134850",
"a3780dec6570ebec4facd62e9f968423f68c971d",
"a3780dec6570ebec4facd62e9f968423f68c971d"
] | [
"evcouplings/compare/protocol.py",
"evcouplings/visualize/mutations.py",
"evcouplings/compare/sifts.py"
] | [
"\"\"\"\nEC to 3D structure comparison protocols/workflows.\n\nAuthors:\n Thomas A. Hopf\n Anna G. Green (complex and _make_complex_contact_maps)\n\"\"\"\n\nfrom copy import deepcopy\nfrom math import ceil\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom evcouplings.align.alignment import (\n read_fasta, parse_header\n)\nfrom evcouplings.utils.config import (\n check_required, InvalidParameterError\n)\n\nfrom evcouplings.utils.system import (\n create_prefix_folders, insert_dir, verify_resources,\n)\nfrom evcouplings.couplings import Segment\nfrom evcouplings.compare.pdb import load_structures\nfrom evcouplings.compare.distances import (\n intra_dists, multimer_dists, remap_chains,\n inter_dists, remap_complex_chains\n)\nfrom evcouplings.compare.sifts import SIFTS, SIFTSResult\nfrom evcouplings.compare.ecs import (\n coupling_scores_compared, add_precision\n)\nfrom evcouplings.visualize import pairs, misc\n\n\ndef _identify_structures(**kwargs):\n \"\"\"\n Identify set of 3D structures for comparison\n\n Parameters\n ----------\n **kwargs\n See check_required in code below\n\n Returns\n -------\n SIFTSResult\n Identified structures and residue index mappings\n \"\"\"\n\n def _filter_by_id(x, id_list):\n x = deepcopy(x)\n x.hits = x.hits.loc[\n x.hits.pdb_id.isin(id_list)\n ]\n return x\n\n check_required(\n kwargs,\n [\n \"prefix\", \"pdb_ids\", \"compare_multimer\",\n \"max_num_hits\", \"max_num_structures\",\n \"pdb_mmtf_dir\",\n \"sifts_mapping_table\", \"sifts_sequence_db\",\n \"by_alignment\", \"pdb_alignment_method\",\n \"alignment_min_overlap\",\n \"sequence_id\", \"sequence_file\", \"region\",\n \"use_bitscores\", \"domain_threshold\",\n \"sequence_threshold\"\n ]\n )\n # get SIFTS mapping object/sequence DB\n s = SIFTS(\n kwargs[\"sifts_mapping_table\"],\n kwargs[\"sifts_sequence_db\"]\n )\n\n reduce_chains = not kwargs[\"compare_multimer\"]\n\n # determine if we need to find structures\n # by sequence search or just fetching\n # based on Uniprot/PDB identifier\n if kwargs[\"by_alignment\"]:\n\n # if searching by alignment, verify that\n # user selected jackhmmer or hmmsearch\n SEARCH_METHODS = [\"jackhmmer\", \"hmmsearch\"]\n\n if kwargs[\"pdb_alignment_method\"] not in SEARCH_METHODS:\n raise InvalidParameterError(\n \"Invalid pdb search method: \" +\n \"{}. Valid selections are: {}\".format(\n \", \".join(SEARCH_METHODS.keys())\n )\n )\n\n sifts_map = s.by_alignment(\n reduce_chains=reduce_chains,\n min_overlap=kwargs[\"alignment_min_overlap\"],\n **kwargs\n )\n else:\n sifts_map = s.by_uniprot_id(\n kwargs[\"sequence_id\"], reduce_chains=reduce_chains\n )\n\n sifts_map_full = deepcopy(sifts_map)\n\n # filter ID list down to manually selected PDB entries\n if kwargs[\"pdb_ids\"] is not None:\n pdb_ids = kwargs[\"pdb_ids\"]\n\n # make sure we have a list of PDB IDs\n if not isinstance(pdb_ids, list):\n pdb_ids = [pdb_ids]\n\n pdb_ids = [x.lower() for x in pdb_ids]\n\n sifts_map = _filter_by_id(sifts_map, pdb_ids)\n\n # limit number of hits and structures\n if kwargs[\"max_num_hits\"] is not None:\n sifts_map.hits = sifts_map.hits.iloc[:kwargs[\"max_num_hits\"]]\n\n if kwargs[\"max_num_structures\"] is not None:\n keep_ids = sifts_map.hits.pdb_id.unique()\n keep_ids = keep_ids[:kwargs[\"max_num_structures\"]]\n sifts_map = _filter_by_id(sifts_map, keep_ids)\n\n return sifts_map, sifts_map_full\n\n\ndef _make_contact_maps(ec_table, d_intra, d_multimer, **kwargs):\n \"\"\"\n Plot contact maps with all ECs above a certain probability threshold,\n or a given count of ECs\n\n Parameters\n ----------\n ec_table : pandas.DataFrame\n Full set of evolutionary couplings (all pairs)\n d_intra : DistanceMap\n Computed residue-residue distances inside chain\n d_multimer : DistanceMap\n Computed residue-residue distances between homomultimeric\n chains\n **kwargs\n Further plotting parameters, see check_required in code\n for necessary values.\n\n Returns\n -------\n cm_files : list(str)\n Paths of generated contact map files\n \"\"\"\n\n def plot_cm(ecs, output_file=None):\n \"\"\"\n Simple wrapper for contact map plotting\n \"\"\"\n with misc.plot_context(\"Arial\"):\n fig = plt.figure(figsize=(8, 8))\n if kwargs[\"scale_sizes\"]:\n ecs = ecs.copy()\n ecs.loc[:, \"size\"] = ecs.cn.values / ecs.cn.max()\n\n pairs.plot_contact_map(\n ecs, d_intra, d_multimer,\n distance_cutoff=kwargs[\"distance_cutoff\"],\n show_secstruct=kwargs[\"draw_secondary_structure\"],\n margin=5,\n boundaries=kwargs[\"boundaries\"]\n )\n\n plt.suptitle(\"{} evolutionary couplings\".format(len(ecs)), fontsize=14)\n\n if output_file is not None:\n plt.savefig(output_file, bbox_inches=\"tight\")\n plt.close(fig)\n\n check_required(\n kwargs,\n [\n \"prefix\", \"min_sequence_distance\",\n \"plot_probability_cutoffs\",\n \"boundaries\", \"plot_lowest_count\",\n \"plot_highest_count\", \"plot_increase\",\n \"draw_secondary_structure\"\n ]\n )\n prefix = kwargs[\"prefix\"]\n\n cm_files = []\n\n ecs_longrange = ec_table.query(\n \"abs(i - j) >= {}\".format(kwargs[\"min_sequence_distance\"])\n )\n\n # based on significance cutoff\n if kwargs[\"plot_probability_cutoffs\"]:\n cutoffs = kwargs[\"plot_probability_cutoffs\"]\n if not isinstance(cutoffs, list):\n cutoffs = [cutoffs]\n\n for c in cutoffs:\n ec_set = ecs_longrange.query(\"probability >= @c\")\n # only can plot if we have any significant ECs above threshold\n if len(ec_set) > 0:\n output_file = prefix + \"_significant_ECs_{}.pdf\".format(c)\n plot_cm(ec_set, output_file=output_file)\n cm_files.append(output_file)\n\n # based on number of long-range ECs\n\n # identify number of sites in EC model\n num_sites = len(\n set.union(set(ec_table.i.unique()), set(ec_table.j.unique()))\n )\n\n # transform fraction of number of sites into discrete number of ECs\n def _discrete_count(x):\n if isinstance(x, float):\n x = ceil(x * num_sites)\n return int(x)\n\n # range of plots to make\n lowest = _discrete_count(kwargs[\"plot_lowest_count\"])\n highest = _discrete_count(kwargs[\"plot_highest_count\"])\n step = _discrete_count(kwargs[\"plot_increase\"])\n\n # create individual plots\n for c in range(lowest, highest + 1, step):\n ec_set = ecs_longrange.iloc[:c]\n output_file = prefix + \"_{}_ECs.pdf\".format(c)\n plot_cm(ec_set, output_file=output_file)\n cm_files.append(output_file)\n\n # give back list of all contact map file names\n return cm_files\n\n\ndef _make_complex_contact_maps(ec_table, d_intra_i, d_multimer_i,\n d_intra_j, d_multimer_j,\n d_inter, first_segment_name,\n second_segment_name, **kwargs):\n \"\"\"\n Plot contact maps with all ECs above a certain probability threshold,\n or a given count of ECs\n\n Parameters\n ----------\n ec_table : pandas.DataFrame\n Full set of evolutionary couplings (all pairs)\n d_intra_i, d_intra_j: DistanceMap\n Computed residue-residue distances within chains for\n monomers i and j\n d_multimer_i, d_multimer_j : DistanceMap\n Computed residue-residue distances between homomultimeric\n chains for monomers i and j\n d_inter: DistanceMap\n Computed residue-residue distances between heteromultimeric\n chains i and j\n first_segment_name, second_segment_name: str\n Name of segment i and segment j in the ec_table\n **kwargs\n Further plotting parameters, see check_required in code\n for necessary values.\n\n Returns\n -------\n cm_files : list(str)\n Paths of generated contact map files\n \"\"\"\n\n def plot_complex_cm(ecs_i, ecs_j, ecs_inter, \n first_segment_name,\n second_segment_name, output_file=None):\n \"\"\"\n Simple wrapper for contact map plotting\n \"\"\"\n with misc.plot_context(\"Arial\"):\n if kwargs[\"scale_sizes\"]:\n # to scale sizes, combine all ecs to rescale together\n ecs = pd.concat([ecs_i, ecs_j, ecs_inter])\n ecs.loc[:, \"size\"] = ecs.cn.values / ecs.cn.max()\n\n # split back into three separate DataFrames\n ecs_i = ecs.query(\"segment_i == segment_j == @first_segment_name\")\n ecs_j = ecs.query(\"segment_i == segment_j == @second_segment_name\")\n ecs_inter = ecs.query(\"segment_i != segment_j\")\n\n # if any of these groups are entry, replace with None\n if len(ecs_i) == 0:\n ecs_i = None\n if len(ecs_j) == 0:\n ecs_j = None\n if len(ecs_inter) == 0:\n ecs_inter = None\n\n # Currently, we require at least one of the monomer \n # to have either ECs or distances in order to make a plot\n if ((ecs_i is None or ecs_i.empty) and d_intra_i is None and d_multimer_i is None) \\\n or ((ecs_j is None or ecs_j.empty) and d_intra_j is None and d_multimer_i is None):\n return False\n\n fig = plt.figure(figsize=(8, 8))\n\n # create the contact map\n pairs.complex_contact_map(\n ecs_i, ecs_j, ecs_inter,\n d_intra_i, d_multimer_i,\n d_intra_j, d_multimer_j,\n d_inter,\n margin=5,\n boundaries=kwargs[\"boundaries\"],\n scale_sizes=kwargs[\"scale_sizes\"]\n )\n\n # Add title to the plot\n if ecs_inter is None:\n ec_len = '0'\n else:\n ec_len = len(ecs_inter)\n plt.suptitle(\n \"{} inter-molecule evolutionary couplings\".format(ec_len), \n fontsize=14\n )\n\n # save to output\n if output_file is not None:\n plt.savefig(output_file, bbox_inches=\"tight\")\n plt.close(fig)\n\n return True\n\n check_required(\n kwargs,\n [\n \"prefix\", \"min_sequence_distance\",\n \"plot_probability_cutoffs\",\n \"boundaries\",\n \"draw_secondary_structure\", \"plot_lowest_count\",\n \"plot_highest_count\", \"plot_increase\",\n \"scale_sizes\"\n ]\n )\n\n prefix = kwargs[\"prefix\"]\n\n cm_files = []\n\n ecs_longrange = ec_table.query(\n \"abs(i - j) >= {} or segment_i != segment_j\".format(kwargs[\"min_sequence_distance\"])\n )\n\n # create plots based on significance cutoff\n if kwargs[\"plot_probability_cutoffs\"]:\n cutoffs = kwargs[\"plot_probability_cutoffs\"]\n if not isinstance(cutoffs, list):\n cutoffs = [cutoffs]\n\n for c in cutoffs:\n ec_set = ecs_longrange.query(\"probability >= @c\")\n\n # only can plot if we have any significant ECs above threshold\n if len(ec_set) > 0:\n ec_set_i = ec_set.query(\"segment_i == segment_j == @first_segment_name\")\n ec_set_j = ec_set.query(\"segment_i == segment_j == @second_segment_name\")\n ec_set_inter = ec_set.query(\"segment_i != segment_j\")\n\n output_file = prefix + \"_significant_ECs_{}.pdf\".format(c)\n plot_completed = plot_complex_cm(\n ec_set_i, ec_set_j, ec_set_inter,\n first_segment_name, second_segment_name,\n output_file=output_file\n )\n if plot_completed:\n cm_files.append(output_file)\n\n # transform fraction of number of sites into discrete number of ECs\n def _discrete_count(x):\n if isinstance(x, float):\n num_sites = 0\n for seg_name in [first_segment_name, second_segment_name]:\n num_sites += len(\n set.union(\n set(ec_table.query(\"segment_i == @seg_name\").i.unique()),\n set(ec_table.query(\"segment_j == @seg_name\").j.unique())\n )\n )\n\n x = ceil(x * num_sites)\n\n return int(x)\n\n # range of plots to make\n lowest = _discrete_count(kwargs[\"plot_lowest_count\"])\n highest = _discrete_count(kwargs[\"plot_highest_count\"])\n step = _discrete_count(kwargs[\"plot_increase\"])\n\n for c in range(lowest, highest + 1, step):\n # get the inter ECs to plot\n ec_set_inter = ecs_longrange.query(\"segment_i != segment_j\")[0:c]\n\n # if there are no inter ecs to be plotted, continue\n if ec_set_inter.empty:\n continue\n\n # get the index of the lowest inter EC\n last_inter_index = ec_set_inter.index[-1]\n\n # take all intra-protein ECs that score higher than the lowest plotted inter-protein EC\n ec_set_i = ecs_longrange.iloc[0:last_inter_index].query(\n \"segment_i == segment_j == @first_segment_name\"\n )\n ec_set_j = ecs_longrange.iloc[0:last_inter_index].query(\n \"segment_i == segment_j == @second_segment_name\"\n )\n\n output_file = prefix + \"_{}_ECs.pdf\".format(c)\n plot_completed = plot_complex_cm(\n ec_set_i, ec_set_j, ec_set_inter,\n first_segment_name, second_segment_name,\n output_file=output_file\n )\n if plot_completed:\n cm_files.append(output_file)\n\n # give back list of all contact map file names\n return cm_files\n\n\ndef standard(**kwargs):\n \"\"\"\n Protocol:\n Compare ECs for single proteins (or domains)\n to 3D structure information\n\n Parameters\n ----------\n Mandatory kwargs arguments:\n See list below in code where calling check_required\n\n Returns\n -------\n outcfg : dict\n Output configuration of the pipeline, including\n the following fields:\n\n * ec_file_compared_all\n * ec_file_compared_all_longrange\n * pdb_structure_hits\n * distmap_monomer\n * distmap_multimer\n * contact_map_files\n * remapped_pdb_files\n \"\"\"\n check_required(\n kwargs,\n [\n \"prefix\", \"ec_file\", \"min_sequence_distance\",\n \"pdb_mmtf_dir\", \"atom_filter\", \"compare_multimer\",\n \"distance_cutoff\", \"target_sequence_file\",\n \"scale_sizes\",\n ]\n )\n\n prefix = kwargs[\"prefix\"]\n\n outcfg = {\n \"ec_compared_all_file\": prefix + \"_CouplingScoresCompared_all.csv\",\n \"ec_compared_longrange_file\": prefix + \"_CouplingScoresCompared_longrange.csv\",\n \"pdb_structure_hits_file\": prefix + \"_structure_hits.csv\",\n \"pdb_structure_hits_unfiltered_file\": prefix + \"_structure_hits_unfiltered.csv\",\n # cannot have the distmap files end with \"_file\" because there are\n # two files (.npy and .csv), which would cause problems with automatic\n # checking if those files exist\n \"distmap_monomer\": prefix + \"_distance_map_monomer\",\n \"distmap_multimer\": prefix + \"_distance_map_multimer\",\n }\n\n # make sure EC file exists\n verify_resources(\n \"EC file does not exist\",\n kwargs[\"ec_file\"]\n )\n\n # make sure output directory exists\n create_prefix_folders(prefix)\n\n # store auxiliary files here (too much for average user)\n aux_prefix = insert_dir(prefix, \"aux\", rootname_subdir=False)\n create_prefix_folders(aux_prefix)\n\n # Step 1: Identify 3D structures for comparison\n sifts_map, sifts_map_full = _identify_structures(**{\n **kwargs,\n \"prefix\": aux_prefix,\n })\n\n # save selected PDB hits\n sifts_map.hits.to_csv(\n outcfg[\"pdb_structure_hits_file\"], index=False\n )\n\n # also save full list of hits\n sifts_map_full.hits.to_csv(\n outcfg[\"pdb_structure_hits_unfiltered_file\"], index=False\n )\n\n # Step 2: Compute distance maps\n\n # load all structures at once\n structures = load_structures(\n sifts_map.hits.pdb_id,\n kwargs[\"pdb_mmtf_dir\"],\n raise_missing=False\n )\n\n # compute distance maps and save\n # (but only if we found some structure)\n if len(sifts_map.hits) > 0:\n d_intra = intra_dists(\n sifts_map, structures, atom_filter=kwargs[\"atom_filter\"],\n output_prefix=aux_prefix + \"_distmap_intra\"\n )\n d_intra.to_file(outcfg[\"distmap_monomer\"])\n\n # save contacts to separate file\n outcfg[\"monomer_contacts_file\"] = prefix + \"_contacts_monomer.csv\"\n d_intra.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[\"monomer_contacts_file\"], index=False\n )\n\n # compute multimer distances, if requested;\n # note that d_multimer can be None if there\n # are no structures with multiple chains\n if kwargs[\"compare_multimer\"]:\n d_multimer = multimer_dists(\n sifts_map, structures, atom_filter=kwargs[\"atom_filter\"],\n output_prefix=aux_prefix + \"_distmap_multimer\"\n )\n else:\n d_multimer = None\n\n # if we have a multimer contact mapin the end, save it\n if d_multimer is not None:\n d_multimer.to_file(outcfg[\"distmap_multimer\"])\n outcfg[\"multimer_contacts_file\"] = prefix + \"_contacts_multimer.csv\"\n\n # save contacts to separate file\n d_multimer.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[\"multimer_contacts_file\"], index=False\n )\n else:\n outcfg[\"distmap_multimer\"] = None\n\n # at this point, also create remapped structures (e.g. for\n # later comparison of folding results)\n verify_resources(\n \"Target sequence file does not exist\",\n kwargs[\"target_sequence_file\"]\n )\n\n # create target sequence map for remapping structure\n with open(kwargs[\"target_sequence_file\"]) as f:\n header, seq = next(read_fasta(f))\n\n seq_id, seq_start, seq_end = parse_header(header)\n seqmap = dict(zip(range(seq_start, seq_end + 1), seq))\n\n # remap structures, swap mapping index and filename in\n # dictionary so we have a list of files in the dict keys\n outcfg[\"remapped_pdb_files\"] = {\n filename: mapping_index for mapping_index, filename in\n remap_chains(sifts_map, aux_prefix, seqmap).items()\n }\n else:\n # if no structures, can not compute distance maps\n d_intra = None\n d_multimer = None\n outcfg[\"distmap_monomer\"] = None\n outcfg[\"distmap_multimer\"] = None\n outcfg[\"remapped_pdb_files\"] = None\n\n # Step 3: Compare ECs to distance maps\n\n ec_table = pd.read_csv(kwargs[\"ec_file\"])\n\n # identify number of sites in EC model\n num_sites = len(\n set.union(set(ec_table.i.unique()), set(ec_table.j.unique()))\n )\n\n for out_file, min_seq_dist in [\n (\"ec_compared_longrange_file\", kwargs[\"min_sequence_distance\"]),\n (\"ec_compared_all_file\", 0),\n ]:\n # compare ECs only if we minimally have intra distance map\n if d_intra is not None:\n coupling_scores_compared(\n ec_table, d_intra, d_multimer,\n dist_cutoff=kwargs[\"distance_cutoff\"],\n output_file=outcfg[out_file],\n min_sequence_dist=min_seq_dist\n )\n else:\n outcfg[out_file] = None\n\n # also create line-drawing script if we made the csv\n if outcfg[\"ec_compared_longrange_file\"] is not None:\n ecs_longrange = pd.read_csv(outcfg[\"ec_compared_longrange_file\"])\n\n outcfg[\"ec_lines_compared_pml_file\"] = prefix + \"_draw_ec_lines_compared.pml\"\n pairs.ec_lines_pymol_script(\n ecs_longrange.iloc[:num_sites, :],\n outcfg[\"ec_lines_compared_pml_file\"],\n distance_cutoff=kwargs[\"distance_cutoff\"]\n )\n\n # Step 4: Make contact map plots\n # if no structures available, defaults to EC-only plot\n\n outcfg[\"contact_map_files\"] = _make_contact_maps(\n ec_table, d_intra, d_multimer, **kwargs\n )\n\n return outcfg\n\n\ndef complex(**kwargs):\n \"\"\"\n Protocol:\n Compare ECs for a complex to\n 3D structure\n\n Parameters\n ----------\n Mandatory kwargs arguments:\n See list below in code where calling check_required\n\n Returns\n -------\n outcfg : dict\n Output configuration of the pipeline, including\n the following fields:\n\n * ec_file_compared_all\n * ec_file_compared_all_longrange\n * pdb_structure_hits\n * distmap_monomer\n * distmap_multimer\n * contact_map_files\n * remapped_pdb_files\n \"\"\"\n check_required(\n kwargs,\n [\n \"prefix\", \"ec_file\", \"min_sequence_distance\",\n \"pdb_mmtf_dir\", \"atom_filter\",\n \"first_compare_multimer\", \"second_compare_multimer\",\n \"distance_cutoff\", \"segments\",\n \"first_sequence_id\", \"second_sequence_id\",\n \"first_sequence_file\", \"second_sequence_file\",\n \"first_target_sequence_file\", \"second_target_sequence_file\",\n \"scale_sizes\"\n ]\n )\n\n prefix = kwargs[\"prefix\"]\n\n outcfg = {\n # initialize output EC files\n \"ec_compared_all_file\": prefix + \"_CouplingScoresCompared_all.csv\",\n \"ec_compared_longrange_file\": prefix + \"_CouplingScoresCompared_longrange.csv\",\n \"ec_compared_inter_file\": prefix + \"_CouplingScoresCompared_inter.csv\",\n\n # initialize output inter distancemap files\n \"distmap_inter\": prefix + \"_distmap_inter\",\n \"inter_contacts_file\": prefix + \"_inter_contacts_file\"\n }\n\n # Add PDB comparison files for first and second monomer\n for monomer_prefix in [\"first\", \"second\"]:\n outcfg = {\n **outcfg,\n monomer_prefix + \"_pdb_structure_hits_file\":\n \"{}_{}_structure_hits.csv\".format(prefix, monomer_prefix),\n monomer_prefix + \"_pdb_structure_hits_unfiltered_file\":\n \"{}_{}_structure_hits_unfitered.csv\".format(prefix, monomer_prefix),\n monomer_prefix + \"_distmap_monomer\":\n \"{}_{}_distance_map_monomer\".format(prefix, monomer_prefix),\n monomer_prefix + \"_distmap_multimer\":\n \"{}_{}_distance_map_multimer\".format(prefix, monomer_prefix),\n }\n\n # make sure EC file exists\n verify_resources(\n \"EC file does not exist\",\n kwargs[\"ec_file\"]\n )\n\n # make sure output directory exists\n create_prefix_folders(prefix)\n\n # store auxiliary files here (too much for average user)\n aux_prefix = insert_dir(prefix, \"aux\", rootname_subdir=False)\n create_prefix_folders(aux_prefix)\n\n # store auxiliary files here (too much for average user)\n first_aux_prefix = insert_dir(aux_prefix, \"first_monomer\", rootname_subdir=False)\n create_prefix_folders(first_aux_prefix)\n\n # store auxiliary files here (too much for average user)\n second_aux_prefix = insert_dir(aux_prefix, \"second_monomer\", rootname_subdir=False)\n create_prefix_folders(second_aux_prefix)\n\n # Step 1: Identify 3D structures for comparison\n def _identify_monomer_structures(name_prefix, outcfg, aux_prefix):\n # create a dictionary with kwargs for just the current monomer\n # remove the \"prefix\" kwargs so that we can replace with the \n # aux prefix when calling _identify_structures\n # only replace first occurrence of name_prefix\n monomer_kwargs = {\n k.replace(name_prefix + \"_\", \"\", 1): v for k, v in kwargs.items() if \"prefix\" not in k\n }\n\n # this field needs to be set explicitly else it gets overwritten by concatenated file\n monomer_kwargs[\"alignment_file\"] = kwargs[name_prefix + \"_alignment_file\"]\n monomer_kwargs[\"raw_focus_alignment_file\"] = kwargs[name_prefix + \"_raw_focus_alignment_file\"]\n\n # identify structures for that monomer\n sifts_map, sifts_map_full = _identify_structures(\n **monomer_kwargs,\n prefix=aux_prefix\n )\n\n # save selected PDB hits\n sifts_map.hits.to_csv(\n outcfg[name_prefix + \"_pdb_structure_hits_file\"], index=False\n )\n\n # also save full list of hits\n sifts_map_full.hits.to_csv(\n outcfg[name_prefix + \"_pdb_structure_hits_unfiltered_file\"], index=False\n )\n return outcfg, sifts_map\n\n outcfg, first_sifts_map = _identify_monomer_structures(\"first\", outcfg, first_aux_prefix)\n outcfg, second_sifts_map = _identify_monomer_structures(\"second\", outcfg, second_aux_prefix)\n\n # get the segment names from the kwargs\n segment_list = kwargs[\"segments\"]\n\n # Make sure user provided exactly two segments\n if len(segment_list) != 2:\n raise InvalidParameterError(\n \"Compare stage for protein complexes requires exactly two segments\"\n )\n\n first_segment_name = Segment.from_list(kwargs[\"segments\"][0]).segment_id\n second_segment_name = Segment.from_list(kwargs[\"segments\"][1]).segment_id\n\n first_chain_name = Segment.from_list(kwargs[\"segments\"][0]).default_chain_name()\n second_chain_name = Segment.from_list(kwargs[\"segments\"][1]).default_chain_name()\n\n # Step 2: Compute distance maps\n def _compute_monomer_distance_maps(sifts_map, name_prefix, chain_name):\n\n # prepare a sequence map to remap the structures we have found\n verify_resources(\n \"Target sequence file does not exist\",\n kwargs[name_prefix + \"_target_sequence_file\"]\n )\n\n # create target sequence map for remapping structure\n with open(kwargs[name_prefix + \"_target_sequence_file\"]) as f:\n header, seq = next(read_fasta(f))\n\n # create target sequence map for remapping structure\n seq_id, seq_start, seq_end = parse_header(header)\n seqmap = dict(zip(range(seq_start, seq_end + 1), seq))\n\n # compute distance maps and save\n # (but only if we found some structure)\n if len(sifts_map.hits) > 0:\n d_intra = intra_dists(\n sifts_map, structures, atom_filter=kwargs[\"atom_filter\"],\n output_prefix=aux_prefix + \"_\" + name_prefix + \"_distmap_intra\"\n )\n d_intra.to_file(outcfg[name_prefix + \"_distmap_monomer\"])\n\n # save contacts to separate file\n outcfg[name_prefix + \"_monomer_contacts_file\"] = prefix + \"_\" + name_prefix + \"_contacts_monomer.csv\"\n d_intra.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[name_prefix + \"_monomer_contacts_file\"], index=False\n )\n\n # compute multimer distances, if requested;\n # note that d_multimer can be None if there\n # are no structures with multiple chains\n if kwargs[name_prefix + \"_compare_multimer\"]:\n d_multimer = multimer_dists(\n sifts_map, structures, atom_filter=kwargs[\"atom_filter\"],\n output_prefix=aux_prefix + \"_\" + name_prefix + \"_distmap_multimer\"\n )\n else:\n d_multimer = None\n\n # if we have a multimer contact map, save it\n if d_multimer is not None:\n d_multimer.to_file(outcfg[name_prefix + \"_distmap_multimer\"])\n outcfg[name_prefix + \"_multimer_contacts_file\"] = prefix + name_prefix + \"_contacts_multimer.csv\"\n\n # save contacts to separate file\n d_multimer.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[name_prefix + \"_multimer_contacts_file\"], index=False\n )\n else:\n outcfg[name_prefix + \"_distmap_multimer\"] = None\n\n # create remapped structures (e.g. for\n # later comparison of folding results)\n # remap structures, swap mapping index and filename in\n # dictionary so we have a list of files in the dict keys\n outcfg[name_prefix + \"_remapped_pdb_files\"] = {\n filename: mapping_index for mapping_index, filename in\n remap_chains(\n sifts_map, aux_prefix, seqmap, chain_name=chain_name,\n raise_missing=kwargs[\"raise_missing\"]\n ).items()\n }\n\n else:\n # if no structures, cannot compute distance maps\n d_intra = None\n d_multimer = None\n outcfg[name_prefix + \"_distmap_monomer\"] = None\n outcfg[name_prefix + \"_distmap_multimer\"] = None\n outcfg[name_prefix + \"remapped_pdb_files\"] = None\n\n return d_intra, d_multimer, seqmap\n\n # load all structures for both monomers\n all_structures = set(first_sifts_map.hits.pdb_id).union(\n set(second_sifts_map.hits.pdb_id)\n )\n structures = load_structures(\n all_structures,\n kwargs[\"pdb_mmtf_dir\"],\n raise_missing=False\n )\n\n d_intra_i, d_multimer_i, seqmap_i = _compute_monomer_distance_maps(\n first_sifts_map, \"first\", first_chain_name\n )\n d_intra_j, d_multimer_j, seqmap_j = _compute_monomer_distance_maps(\n second_sifts_map, \"second\", second_chain_name\n )\n\n # compute inter distance map if sifts map for each monomer exists\n if len(first_sifts_map.hits) > 0 and len(second_sifts_map.hits) > 0:\n d_inter = inter_dists(\n first_sifts_map, second_sifts_map,\n raise_missing=kwargs[\"raise_missing\"]\n )\n # if there were overlapping PDBs, save the results\n if d_inter is not None:\n d_inter.to_file(outcfg[\"distmap_inter\"])\n\n # save contacts to separate file\n d_inter.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[\"inter_contacts_file\"], index=False\n )\n\n else:\n outcfg[\"inter_contacts_file\"] = None\n d_inter = None\n\n # # Step 3: Compare ECs to distance maps\n ec_table = pd.read_csv(kwargs[\"ec_file\"])\n\n for out_file, min_seq_dist in [\n (\"ec_compared_longrange_file\", kwargs[\"min_sequence_distance\"]),\n (\"ec_compared_all_file\", 0),\n ]:\n\n # compare ECs only if we have an intra distance map\n # for at least one monomer - inter can't exist unless\n # we have both monomers\n if (d_intra_i is not None) or (d_intra_j is not None):\n # compare distances individually for each segment pair\n ecs_intra_i = ec_table.query(\"segment_i == segment_j == @first_segment_name\")\n if d_intra_i is not None:\n ecs_intra_i_compared = coupling_scores_compared(\n ecs_intra_i, d_intra_i, d_multimer_i,\n dist_cutoff=kwargs[\"distance_cutoff\"],\n output_file=None,\n min_sequence_dist=min_seq_dist\n )\n else:\n # If no distance map, the distance is saved as np.nan\n ecs_intra_i_compared = ecs_intra_i.assign(dist=np.nan)\n\n ecs_intra_j = ec_table.query(\"segment_i == segment_j == @second_segment_name\")\n if d_intra_j is not None:\n ecs_intra_j_compared = coupling_scores_compared(\n ecs_intra_j, d_intra_j, d_multimer_j,\n dist_cutoff=kwargs[\"distance_cutoff\"],\n output_file=None,\n min_sequence_dist=min_seq_dist\n )\n else:\n ecs_intra_j_compared = ecs_intra_j.assign(dist=np.nan)\n\n ecs_inter = ec_table.query(\"segment_i != segment_j\")\n if d_inter is not None:\n ecs_inter_compared = coupling_scores_compared(\n ecs_inter, d_inter, dist_map_multimer=None,\n dist_cutoff=kwargs[\"distance_cutoff\"],\n output_file=None,\n min_sequence_dist=None # does not apply for inter-protein ECs\n )\n else:\n ecs_inter_compared = ecs_inter.assign(dist=np.nan)\n\n # combine the tables\n ec_table_compared = pd.concat([\n ecs_inter_compared,\n ecs_intra_i_compared,\n ecs_intra_j_compared\n ])\n\n # rename the precision column to \"segmentwise_precision\"\n # because we calculated precision for each segment independently\n ec_table_compared = ec_table_compared.rename(\n columns={\"precision\": \"segmentwise_precision\"}\n )\n # TODO: change \"cn\" to \"score\" eventually\n ec_table_compared = ec_table_compared.sort_values(\"cn\", ascending=False)\n\n # add the total precision\n # TODO: implement different cutoffs for intra vs inter contacts\n ec_table_compared = add_precision(\n ec_table_compared,\n dist_cutoff=kwargs[\"distance_cutoff\"]\n )\n\n # save to file\n # all ecs\n ec_table_compared.to_csv(outcfg[out_file])\n\n # save the inter ECs to a file\n ecs_inter_compared.to_csv(outcfg[\"ec_compared_inter_file\"])\n\n # create the inter-ecs line drawing script\n if outcfg[\"ec_compared_inter_file\"] is not None and kwargs[\"plot_highest_count\"] is not None:\n inter_ecs = ec_table.query(\"segment_i != segment_j\")\n\n outcfg[\"ec_lines_compared_pml_file\"] = prefix + \"_draw_ec_lines_compared.pml\"\n\n pairs.ec_lines_pymol_script(\n inter_ecs.iloc[:kwargs[\"plot_highest_count\"], :],\n outcfg[\"ec_lines_compared_pml_file\"],\n distance_cutoff=kwargs[\"distance_cutoff\"],\n chain={\n first_segment_name: first_chain_name,\n second_segment_name: second_chain_name\n }\n )\n\n # Remap the complex crystal structures, if available\n if len(first_sifts_map.hits) > 0 and len(second_sifts_map.hits) > 0:\n outcfg[\"complex_remapped_pdb_files\"] = {\n filename: mapping_index for mapping_index, filename in\n remap_complex_chains(\n first_sifts_map, second_sifts_map,\n seqmap_i, seqmap_j, output_prefix=aux_prefix,\n raise_missing=kwargs[\"raise_missing\"]\n ).items()\n }\n\n # Step 4: Make contact map plots\n # if no structures available, defaults to EC-only plot\n outcfg[\"contact_map_files\"] = _make_complex_contact_maps(\n ec_table, d_intra_i, d_multimer_i,\n d_intra_j, d_multimer_j,\n d_inter, first_segment_name,\n second_segment_name, **kwargs\n )\n\n return outcfg\n\n\n# list of available EC comparison protocols\nPROTOCOLS = {\n # standard monomer comparison protocol\n \"standard\": standard,\n\n # comparison for protein complexes\n \"complex\": complex\n}\n\n\ndef run(**kwargs):\n \"\"\"\n Run inference protocol to calculate ECs from\n input sequence alignment.\n\n Parameters\n ----------\n Mandatory kwargs arguments:\n protocol: EC protocol to run\n prefix: Output prefix for all generated files\n\n Returns\n -------\n outcfg : dict\n Output configuration of stage\n (see individual protocol for fields)\n \"\"\"\n check_required(kwargs, [\"protocol\"])\n\n if kwargs[\"protocol\"] not in PROTOCOLS:\n raise InvalidParameterError(\n \"Invalid protocol selection: \" +\n \"{}. Valid protocols are: {}\".format(\n kwargs[\"protocol\"], \", \".join(PROTOCOLS.keys())\n )\n )\n\n return PROTOCOLS[kwargs[\"protocol\"]](**kwargs)\n",
"\"\"\"\nVisualization of mutation effects\n\nAuthors:\n Thomas A. Hopf\n Anna G. Green (mutation_pymol_script generalization)\n\"\"\"\n\nfrom math import isnan\nfrom copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom bokeh import plotting as bp\nfrom bokeh.core.properties import value as bokeh_value\nfrom bokeh.models import HoverTool\n\nfrom evcouplings.couplings.model import CouplingsModel\nfrom evcouplings.visualize.pairs import (\n secondary_structure_cartoon, find_secondary_structure_segments\n)\nfrom evcouplings.visualize.pymol import pymol_mapping\nfrom evcouplings.mutate.calculations import split_mutants\nfrom evcouplings.visualize.misc import rgb2hex, colormap\nfrom evcouplings.utils.calculations import entropy_vector\n\nAA_LIST_PROPERTY = \"WFYPMILVAGCSTQNDEHRK\"\n\n\ndef plot_mutation_matrix(source, mutant_column=\"mutant\",\n effect_column=\"prediction_epistatic\",\n conservation_column=\"column_conservation\",\n order=AA_LIST_PROPERTY,\n min_value=None, max_value=None,\n min_percentile=None, max_percentile=None,\n show_conservation=False,\n secondary_structure=None, engine=\"mpl\",\n **matrix_style):\n \"\"\"\n Plot a single-substitution mutation matrix\n\n Parameters\n ----------\n source : evcouplings.couplings.CouplingsModel or pandas.DataFrame\n Plot single mutation matrix predicted using CouplingsModel,\n or effect data for single mutations DataFrame\n mutant_column : str, optional (default: \"mutant\")\n If using source dataframe, extract single mutations from this column.\n Mutations have to be in format A100V.\n effect_column : str, optional (default: \"prediction_epistatic\")\n If using source dataframe, extract mutation effect from this column.\n Effects must be numeric.\n conservation_column : str, optional (default: \"column_conservation\")\n If using source dataframe, extract column conservation information\n from this column. Conservation values must be between 0 and 1. To\n plot conservation, set show_conservation=True.\n order : str or list, optional (default: AA_LIST_PROPERTY)\n Reorder y-axis (substitutions) according to this parameter. If None,\n substitutions will be inferred from source, and sorted alphabetically\n if source is a DataFrame.\n min_value : float, optional (default: None)\n Threshold colormap at this minimum value. If None, defaults to\n minimum value in matrix; if max_value is also None, defaults to\n -max(abs(matrix))\n max_value : float, optional (default: None)\n Threshold colormap at this maximum value. If None, defaults to\n maximum value in matrix; if min_value is also None, defaults to\n max(abs(matrix))\n min_percentile : int or float, optional (default: None)\n Set min_value to this percentile of the effect distribution. Overrides\n min_value.\n max_percentile : int or float, optional (default: None)\n Set max_value to this percentile of the effect distribution. Overrides\n max_value.\n show_conservation : bool, optional (default: False)\n Plot positional conservation underneath matrix. Only possible for\n engine == \"mpl\".\n secondary_structure : dict or pd.DataFrame\n Secondary structure to plot above matrix.\n Can be a dictionary of position (int) to\n secondary structure character (\"H\", \"E\", \"-\"/\"C\"),\n or a DataFrame with columns \"id\" and \"sec_struct_3state\"\n (as returned by Chain.residues, and DistanceMap.residues_i\n and DistanceMap.residues_j). Only supported by engine == \"mpl\".\n engine : {\"mpl\", \"bokeh\"}\n Plot matrix using matplotlib (static, more visualization options)\n or with bokeh (interactive, less visualization options)\n **matrix_style : kwargs\n Will be passed on to matrix_base_mpl or matrix_base_bokeh as kwargs\n\n Returns\n -------\n matplotlib AxesSuplot or bokeh Figure\n Figure/Axes object. Display bokeh figure using show().\n \"\"\"\n def _extract_secstruct(secondary_structure):\n \"\"\"\n Extract secondary structure for plotting functions\n \"\"\"\n # turn into dictionary representation if\n # passed as a DataFrame\n if isinstance(secondary_structure, pd.DataFrame):\n secondary_structure = dict(\n zip(\n secondary_structure.id.astype(int),\n secondary_structure.sec_struct_3state\n )\n )\n\n # make sure we only retain secondary structure\n # inside the range of the mutation matrix\n secondary_structure = {\n i: sstr for (i, sstr) in secondary_structure.items()\n if i in positions\n }\n\n secstruct_str = \"\".join(\n [secondary_structure.get(i, \"-\") for i in positions]\n )\n\n return secstruct_str\n\n conservation = None\n\n # test if we will extract information from CouplingsModel,\n # or from a dataframe with mutations\n if isinstance(source, CouplingsModel):\n matrix = source.smm()\n positions = source.index_list\n substitutions = source.alphabet\n wildtype_sequence = source.seq()\n\n if show_conservation:\n conservation = entropy_vector(source)\n else:\n # extract position, WT and subs for each mutant, and keep singles only\n source = split_mutants(\n source, mutant_column\n ).query(\"num_mutations == 1\")\n\n # turn positions into numbers (may be strings)\n source.loc[:, \"pos\"] = pd.to_numeric(source.loc[:, \"pos\"]).astype(int)\n\n # same for effects, ensure they are numeric\n source.loc[:, effect_column] = pd.to_numeric(\n source.loc[:, effect_column], errors=\"coerce\"\n )\n\n substitutions = sorted(source.subs.unique())\n\n # group dataframe to get positional information\n source_grp = source.groupby(\"pos\").first().reset_index().sort_values(by=\"pos\")\n positions = source_grp.pos.values\n wildtype_sequence = source_grp.wt.values\n\n if show_conservation:\n source_grp.loc[:, conservation_column] = pd.to_numeric(\n source_grp.loc[:, conservation_column], errors=\"coerce\"\n )\n conservation = source_grp.loc[:, conservation_column].values\n\n # create mutation effect matrix\n matrix = np.full((len(positions), len(substitutions)), np.nan)\n\n # mapping from position/substitution into matrix\n pos_to_i = {p: i for i, p in enumerate(positions)}\n subs_to_j = {s: j for j, s in enumerate(substitutions)}\n\n # fill matrix with values\n for idx, r in source.iterrows():\n matrix[pos_to_i[r[\"pos\"]], subs_to_j[r[\"subs\"]]] = r[effect_column]\n\n # reorder substitutions\n if order is not None:\n matrix_final = np.full((len(positions), len(substitutions)), np.nan)\n substitutions_list = list(substitutions)\n\n # go through new order row by row and put in right place\n for i, subs in enumerate(order):\n if subs in substitutions:\n matrix_final[:, i] = matrix[:, substitutions_list.index(subs)]\n\n # set substitutions to new list\n substitutions = list(order)\n else:\n matrix_final = matrix\n\n # determine ranges for matrix colormaps\n # get effects without NaNs\n effects = matrix_final.ravel()\n effects = effects[np.isfinite(effects)]\n\n if min_percentile is not None:\n min_value = np.percentile(effects, min_percentile)\n\n if max_percentile is not None:\n max_value = np.percentile(effects, max_percentile)\n\n matrix_style[\"min_value\"] = min_value\n matrix_style[\"max_value\"] = max_value\n\n # extract secondary structure\n if secondary_structure is not None:\n secondary_structure_str = _extract_secstruct(secondary_structure)\n else:\n secondary_structure_str = None\n\n if engine == \"mpl\":\n return matrix_base_mpl(\n matrix_final, positions, substitutions,\n conservation=conservation,\n wildtype_sequence=wildtype_sequence,\n secondary_structure=secondary_structure_str,\n **matrix_style\n )\n elif engine == \"bokeh\":\n # cannot pass conservation for bokeh\n return matrix_base_bokeh(\n matrix_final, positions, substitutions,\n wildtype_sequence=wildtype_sequence,\n **matrix_style\n )\n else:\n raise ValueError(\n \"Invalid plotting engine selected, valid options are: \"\n \"mpl, bokeh\"\n )\n\n\ndef matrix_base_bokeh(matrix, positions, substitutions,\n wildtype_sequence=None, label_size=8,\n min_value=None, max_value=None,\n colormap=plt.cm.RdBu_r, na_color=\"#bbbbbb\",\n title=None):\n \"\"\"\n Bokeh-based interactive mutation matrix plotting. This is the base\n plotting function, see plot_mutation_matrix() for more convenient access.\n\n Parameters\n ----------\n matrix : np.array(float)\n 2D numpy array with values for individual single mutations\n (first axis: position, second axis: substitution)\n positions : list(int) or list(str)\n List of positions along x-axis of matrix\n (length has to agree with first dimension of matrix)\n substitutions : list(str)\n List of substitutions along y-axis of matrix\n (length has to agree with second dimension of matrix)\n wildtype_sequence : str or list(str), optional (default: None)\n Sequence of wild-type symbols. If given, will indicate wild-type\n entries in matrix with a dot.\n label_size : int, optional (default: 8)\n Font size of x/y-axis labels.\n min_value : float, optional (default: None)\n Threshold colormap at this minimum value. If None, defaults to\n minimum value in matrix; if max_value is also None, defaults to\n -max(abs(matrix))\n max_value : float, optional (default: None)\n Threshold colormap at this maximum value. If None, defaults to\n maximum value in matrix; if min_value is also None, defaults to\n max(abs(matrix))\n colormap : matplotlib colormap object, optional (default: plt.cm.RdBu_r)\n Maps mutation effects to colors of matrix cells.\n na_color : str, optional (default: \"#bbbbbb\")\n Color for missing values in matrix\n title : str, optional (default: None)\n If given, set title of plot to this value.\n\n Returns\n -------\n bokeh.plotting.figure.Figure\n Bokeh figure (for displaying or saving)\n \"\"\"\n\n # figure out maximum and minimum values for color map\n if max_value is None and min_value is None:\n max_value = np.nanmax(np.abs(matrix))\n min_value = -max_value\n elif min_value is None:\n min_value = np.nanmin(matrix)\n elif max_value is None:\n max_value = np.nanmax(matrix)\n\n # use matplotlib colormaps to create color values,\n # set ranges based on given values\n norm = mpl.colors.Normalize(vmin=min_value, vmax=max_value)\n mapper = plt.cm.ScalarMappable(norm=norm, cmap=colormap)\n\n # build list of values for plotting from source matrix\n pos_list = []\n subs_list = []\n color_list = []\n effect_list = []\n\n # go through values on x-axis (substitutions)\n for i, pos in enumerate(positions):\n if wildtype_sequence is not None:\n wt_symbol = wildtype_sequence[i]\n if type(pos) is tuple:\n # label will be in format segment AA pos, eg B_1 A 151\n pos = \"{} {} {}\".format(pos[0], wt_symbol, pos[1])\n else:\n pos = \"{} {}\".format(wt_symbol, pos)\n else:\n wt_symbol = None\n if type(pos) is tuple:\n pos = \" \".join(map(str, pos))\n else:\n pos = str(pos)\n\n # go through all values on y-axis (substitutions)\n for j, subs in enumerate(substitutions):\n pos_list.append(pos)\n subs_list.append(str(subs))\n\n cur_effect = matrix[i, j]\n if isnan(cur_effect):\n cur_effect_str = \"n/a\"\n color_list.append(na_color)\n else:\n cur_effect_str = \"{:.2f}\".format(cur_effect)\n color_list.append(\n rgb2hex(*mapper.to_rgba(cur_effect))\n )\n\n # attach info if this is WT to WT self substitution\n if subs == wt_symbol:\n cur_effect_str += \" (WT)\"\n\n effect_list.append(cur_effect_str)\n\n source = bp.ColumnDataSource(\n data=dict(\n position=pos_list,\n substitution=subs_list,\n color=color_list,\n effect=effect_list,\n )\n )\n\n TOOLS = \"hover\"\n height_factor = 12\n width_factor = 10\n\n # create lists of values for x- and y-axes, which will be\n # axis labels;\n # keep all of these as strings so we can have WT/substitution\n # symbol in the label\n if wildtype_sequence is None:\n if type(positions[0]) is tuple:\n positions = [\" \".join(list(map(str, p))) for p in positions]\n else:\n positions = list(map(str, positions))\n else:\n if type(positions[0]) is tuple:\n positions = [\n \"{} {} {}\".format(p[0], wildtype_sequence[i], p[1])\n for i, p in enumerate(positions)\n ]\n else:\n positions = [\n \"{} {}\".format(wildtype_sequence[i], p)\n for i, p in enumerate(positions)\n ]\n\n substitutions = list(map(str, substitutions))\n\n p = bp.figure(\n title=title,\n x_range=positions, y_range=substitutions,\n x_axis_location=\"above\", plot_width=width_factor * len(positions),\n plot_height=height_factor * len(substitutions),\n toolbar_location=\"left\", tools=TOOLS\n )\n\n p.rect(\n \"position\", \"substitution\", 1, 1, source=source,\n color=\"color\", line_color=None\n )\n\n # modify plot style\n p.grid.grid_line_color = None\n p.axis.axis_line_color = None\n p.axis.major_tick_line_color = None\n p.axis.major_label_text_font_size = bokeh_value(\"{}pt\".format(label_size))\n p.axis.major_label_standoff = 0\n p.xaxis.major_label_orientation = np.pi / 2\n p.toolbar_location = None\n\n p.select_one(HoverTool).tooltips = [\n ('mutant', '@position @substitution'),\n ('effect', '@effect'),\n ]\n\n return p\n\n\ndef matrix_base_mpl(matrix, positions, substitutions, conservation=None,\n secondary_structure=None, wildtype_sequence=None,\n min_value=None, max_value=None,\n ax=None, colormap=plt.cm.RdBu_r,\n colormap_conservation=plt.cm.Oranges, na_color=\"#bbbbbb\",\n title=None, position_label_size=8, substitution_label_size=8,\n show_colorbar=True, colorbar_indicate_bounds=False,\n show_wt_char=True, label_filter=None, secondary_structure_style=None):\n \"\"\"\n Matplotlib-based mutation matrix plotting. This is the base plotting function,\n see plot_mutation_matrix() for more convenient access.\n\n Parameters\n ----------\n matrix : np.array(float)\n 2D numpy array with values for individual single mutations\n (first axis: position, second axis: substitution)\n positions : list(int) or list(str)\n List of positions along x-axis of matrix\n (length has to agree with first dimension of matrix)\n substitutions : list(str)\n List of substitutions along y-axis of matrix\n (length has to agree with second dimension of matrix)\n conservation : list(float) or np.array(float), optional (default: None)\n Positional conservation along sequence. Values must range\n between 0 (not conserved) and 1 (fully conserved). If given,\n will plot conservation along bottom of mutation matrix.\n secondary_structure : str or list(str), optional (default: None)\n Secondary structure for each position along sequence. If given,\n will draw secondary structure cartoon on top of matrix.\n wildtype_sequence : str or list(str), optional (default: None)\n Sequence of wild-type symbols. If given, will indicate wild-type\n entries in matrix with a dot.\n min_value : float, optional (default: None)\n Threshold colormap at this minimum value. If None, defaults to\n minimum value in matrix; if max_value is also None, defaults to\n -max(abs(matrix))\n max_value : float, optional (default: None)\n Threshold colormap at this maximum value. If None, defaults to\n maximum value in matrix; if min_value is also None, defaults to\n max(abs(matrix))\n ax : Matplotlib axes object, optional (default: None)\n Draw mutation matrix on this axis. If None, new figure and axis\n will be created.\n colormap : matplotlib colormap object, optional (default: plt.cm.RdBu_r)\n Maps mutation effects to colors of matrix cells.\n colormap_conservation: matplotlib colormap object, optional (default: plt.cm.Oranges)\n Maps sequence conservation to colors of conservation vector plot.\n na_color : str, optional (default: \"#bbbbbb\")\n Color for missing values in matrix\n title : str, optional (default: None)\n If given, set title of plot to this value.\n position_label_size : int, optional (default: 8)\n Font size of x-axis labels.\n substitution_label_size : int, optional (default: 8)\n Font size of y-axis labels.\n show_colorbar : bool, optional (default: True)\n If True, show colorbar next to matrix.\n colorbar_indicate_bounds : bool, optional (default: False)\n If True, add greater-than/less-than signs to limits of colorbar\n to indicate that colors were thresholded at min_value/max_value\n show_wt_char : bool, optional (default: True)\n Display wild-type symbol in axis labels\n label_filter : function, optional (default: None)\n Function with one argument (integer) that determines if a certain position\n label will be printed (if label_filter(pos)==True) or not.\n secondary_structure_style : dict, optional (default: None)\n Pass on as **kwargs to evcouplings.visualize.pairs.secondary_structure_cartoon\n to determine appearance of secondary structure cartoon.\n\n Returns\n -------\n ax : Matplotlib axes object\n Axes on which mutation matrix was drawn\n \"\"\"\n LINEWIDTH = 0.0\n LABEL_X_OFFSET = 0.55\n LABEL_Y_OFFSET = 0.45\n\n def _draw_rect(x_range, y_range, linewidth):\n r = plt.Rectangle(\n (min(x_range), min(y_range)),\n max(x_range) - min(x_range), max(y_range) - min(y_range),\n fc='None', linewidth=linewidth\n )\n ax.add_patch(r)\n\n matrix_width = matrix.shape[0]\n matrix_height = len(substitutions)\n\n # mask NaN entries in mutation matrix\n matrix_masked = np.ma.masked_where(np.isnan(matrix), matrix)\n\n # figure out maximum and minimum values for color map\n if max_value is None and min_value is None:\n max_value = np.abs(matrix_masked).max()\n min_value = -max_value\n elif min_value is None:\n min_value = matrix_masked.min()\n elif max_value is None:\n max_value = matrix_masked.max()\n\n # set NaN color value in colormaps\n colormap = deepcopy(colormap)\n colormap.set_bad(na_color)\n colormap_conservation = deepcopy(colormap_conservation)\n colormap_conservation.set_bad(na_color)\n\n # determine size of plot (depends on how much tracks\n # with information we will add)\n num_rows = (\n len(substitutions) +\n (conservation is not None) +\n (secondary_structure is not None)\n )\n\n ratio = matrix_width / float(num_rows)\n\n # create axis, if not given\n if ax is None:\n fig = plt.figure(figsize=(ratio * 5, 5))\n ax = fig.gca()\n\n # make square-shaped matrix cells\n ax.set_aspect(\"equal\", \"box\")\n\n # define matrix coordinates\n # always add +1 because coordinates are used by\n # pcolor(mesh) as beginning and start of rectangles\n x_range = np.array(range(matrix_width + 1))\n y_range = np.array(range(matrix_height + 1))\n y_range_avg = range(-2, 0)\n x_range_avg = range(matrix_width + 1, matrix_width + 3)\n y_range_cons = np.array(y_range_avg) - 1.5\n\n # coordinates for text labels (fixed axis)\n x_left_subs = min(x_range) - 1\n x_right_subs = max(x_range_avg) + 1\n\n if conservation is None:\n y_bottom_res = min(y_range_avg) - 0.5\n else:\n y_bottom_res = min(y_range_cons) - 0.5\n\n # coordinates for additional annotation\n y_ss = max(y_range) + 2\n\n # 1) main mutation matrix\n X, Y = np.meshgrid(x_range, y_range)\n cm = ax.pcolormesh(\n X, Y, matrix_masked.T, cmap=colormap, vmax=max_value, vmin=min_value\n )\n _draw_rect(x_range, y_range, LINEWIDTH)\n\n # 2) mean column effect (bottom \"subplot\")\n mean_pos = np.mean(matrix_masked, axis=1)[:, np.newaxis]\n X_pos, Y_pos = np.meshgrid(x_range, y_range_avg)\n ax.pcolormesh(\n X_pos, Y_pos, mean_pos.T, cmap=colormap, vmax=max_value, vmin=min_value\n )\n _draw_rect(x_range, y_range_avg, LINEWIDTH)\n\n # 3) amino acid average (right \"subplot\")\n mean_aa = np.mean(matrix_masked, axis=0)[:, np.newaxis]\n X_aa, Y_aa = np.meshgrid(x_range_avg, y_range)\n ax.pcolormesh(X_aa, Y_aa, mean_aa, cmap=colormap, vmax=max_value, vmin=min_value)\n _draw_rect(x_range_avg, y_range, LINEWIDTH)\n\n # mark wildtype residues\n if wildtype_sequence is not None:\n subs_list = list(substitutions)\n\n for i, wt in enumerate(wildtype_sequence):\n # skip unspecified entries\n if wt is not None and wt != \"\":\n marker = plt.Circle(\n (x_range[i] + 0.5, y_range[subs_list.index(wt)] + 0.5),\n 0.1, fc='k', axes=ax\n )\n ax.add_patch(marker)\n\n # put labels along both axes of matrix\n\n # x-axis (positions)\n for i, pos in zip(x_range, positions):\n # filter labels, if selected\n if label_filter is not None and not label_filter(pos):\n continue\n\n # determine what position label should be\n if show_wt_char and wildtype_sequence is not None:\n wt_symbol = wildtype_sequence[i]\n if type(pos) is tuple and len(pos) == 2:\n # label will be in format segment AA pos, eg B_1 A 151\n label = \"{} {} {}\".format(pos[0], wt_symbol, pos[1])\n else:\n label = \"{} {}\".format(wt_symbol, pos)\n\n else:\n if type(pos) is tuple:\n label = \" \".join(map(str, pos))\n else:\n label = str(pos)\n\n ax.text(\n i + LABEL_X_OFFSET, y_bottom_res, label,\n size=position_label_size,\n horizontalalignment='center',\n verticalalignment='top',\n rotation=90\n )\n\n # y-axis (substitutions)\n for j, subs in zip(y_range, substitutions):\n # put on lefthand side of matrix...\n ax.text(\n x_left_subs, j + LABEL_Y_OFFSET, subs,\n size=substitution_label_size,\n horizontalalignment='center',\n verticalalignment='center'\n )\n\n # ...and on right-hand side of matrix\n ax.text(\n x_right_subs, j + LABEL_Y_OFFSET, subs,\n size=substitution_label_size,\n horizontalalignment='center', verticalalignment='center'\n )\n\n # draw colorbar\n if show_colorbar:\n cb = plt.colorbar(\n cm, ticks=[min_value, max_value],\n shrink=0.3, pad=0.15 / ratio, aspect=8\n )\n\n if colorbar_indicate_bounds:\n symbol_min, symbol_max = u\"\\u2264\", u\"\\u2265\"\n else:\n symbol_min, symbol_max = \"\", \"\"\n\n cb.ax.set_yticklabels(\n [\n \"{symbol} {value:>+{width}.1f}\".format(\n symbol=s, value=v, width=0\n ) for (v, s) in [(min_value, symbol_min), (max_value, symbol_max)]\n ]\n )\n cb.ax.xaxis.set_ticks_position(\"none\")\n cb.ax.yaxis.set_ticks_position(\"none\")\n cb.outline.set_linewidth(0)\n\n # plot secondary structure cartoon\n if secondary_structure is not None:\n # if no style given for secondary structure, set default\n if secondary_structure_style is None:\n secondary_structure_style = {\n \"width\": 0.8,\n \"line_width\": 2,\n \"strand_width_factor\": 0.5,\n \"helix_turn_length\": 2,\n \"min_sse_length\": 2,\n }\n\n start, end, sse = find_secondary_structure_segments(secondary_structure)\n secondary_structure_cartoon(\n sse, sequence_start=start, sequence_end=end, center=y_ss, ax=ax,\n **secondary_structure_style\n )\n\n # plot conservation\n if conservation is not None:\n conservation = np.array(conservation)[:, np.newaxis]\n cons_masked = np.ma.masked_where(np.isnan(conservation), conservation)\n X_cons, Y_cons = np.meshgrid(x_range, y_range_cons)\n ax.pcolormesh(\n X_cons, Y_cons, cons_masked.T, cmap=colormap_conservation, vmax=1, vmin=0\n )\n _draw_rect(x_range, y_range_cons, LINEWIDTH)\n\n # remove chart junk\n for line in ['top', 'bottom', 'right', 'left']:\n ax.spines[line].set_visible(False)\n\n ax.xaxis.set_ticks_position(\"none\")\n ax.yaxis.set_ticks_position(\"none\")\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n if title is not None:\n ax.set_title(title)\n\n return ax\n\n\ndef mutation_pymol_script(mutation_table, output_file,\n effect_column=\"prediction_epistatic\",\n mutant_column=\"mutant\", agg_func=\"mean\",\n cmap=plt.cm.RdBu_r, segment_to_chain_mapping=None):\n \"\"\"\n Create a Pymol .pml script to visualize single mutation\n effects\n\n Parameters\n ----------\n mutation_table : pandas.DataFrame\n Table with mutation effects (will be filtered\n for single mutants)\n output_file : str\n File path where to store pml script\n effect_column : str, optional (default: \"prediction_epistatic\")\n Column in mutation_table that contains mutation effects\n mutant_column : str, optional (default: \"mutant\")\n Column in mutation_table that contains mutations\n (in format \"A123G\")\n agg_func : str, optional (default: \"mean\")\n Function used to aggregate single mutations into one\n aggregated effect per position (any pandas aggregation\n operation, including \"mean\", \"min, \"max\")\n cmap : matplotlib.colors.LinearSegmentedColormap, optional\n (default: plt.cm.RdBu_r)\n Colormap used to map mutation effects to colors\n segment_to_chain_mapping: str or dict(str -> str), optional (default: None)\n PDB chain(s) that should be targeted by line drawing\n\n * If None, residues will be selected\n py position alone, which may cause wrong assignments\n if multiple chains are present in the structure.\n\n * Different chains can be assigned for position\n if a dictionary that maps from segment (str) to PDB chain (str)\n is given.\n\n Raises\n ------\n ValueError\n If no single mutants contained in mutation_table\n ValueError\n If mutation_table contains a segment identifier not\n found in segment_to_chain_mapping\n \"\"\"\n # split mutation strings\n t = split_mutants(mutation_table, mutant_column)\n\n # only pick single mutants\n t = t.query(\"num_mutations == 1\")\n\n if len(t) == 0:\n raise ValueError(\n \"mutation_table does not contain any single \"\n \"amino acid substitutions.\"\n )\n\n # add a segment column if missing\n if \"segment\" not in t.columns:\n t.loc[:, \"segment\"] = None\n\n with open(output_file, \"w\") as f:\n\n #handle each segment independently\n # have to fill NaNs with a string for groupby to work\n t = t.fillna(\"none\")\n for segment_name, _t in t.groupby(\"segment\"):\n\n if segment_to_chain_mapping is None:\n chain = None\n\n elif type(segment_to_chain_mapping) is str:\n chain = segment_to_chain_mapping\n\n elif segment_name not in segment_to_chain_mapping:\n raise ValueError(\n \"Segment name {} has no mapping to PyMOL \"\n \"chain. Available mappings are: {}\".format(\n segment_name, segment_to_chain_mapping\n )\n )\n else:\n chain = segment_to_chain_mapping[segment_name]\n\n # aggregate into positional information\n _t = _t.loc[:, [\"pos\", effect_column]].rename(\n columns={\"pos\": \"i\", effect_column: \"effect\"}\n )\n\n t_agg = _t.groupby(\"i\").agg(agg_func).reset_index()\n t_agg.loc[:, \"i\"] = pd.to_numeric(t_agg.i).astype(int)\n\n # map aggregated effects to colors\n max_val = t_agg.effect.abs().max()\n mapper = colormap(-max_val, max_val, cmap)\n t_agg.loc[:, \"color\"] = t_agg.effect.map(mapper)\n t_agg.loc[:, \"show\"] = \"spheres\"\n\n if chain is not None:\n chain_sel = \", chain '{}'\".format(chain)\n else:\n chain_sel = \"\"\n\n f.write(\"as cartoon{}\\n\".format(chain_sel))\n f.write(\"color grey80{}\\n\".format(chain_sel))\n\n pymol_mapping(t_agg, f, chain, atom=\"CA\")\n",
"\"\"\"\nUniprot to PDB structure identification and\nindex mapping using the SIFTS database\n(https://www.ebi.ac.uk/pdbe/docs/sifts/)\n\nThis functionality is centered around the\npdb_chain_uniprot.csv table available from SIFTS.\n(ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/flatfiles/csv/pdb_chain_uniprot.csv.gz)\n\nAuthors:\n Thomas A. Hopf\n Anna G. Green (find_homologs)\n Chan Kang (find_homologs)\n\"\"\"\n\nfrom os import path\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport pandas as pd\nimport requests\n\nfrom evcouplings.align.alignment import (\n Alignment, read_fasta, parse_header\n)\nfrom evcouplings.align.protocol import (\n jackhmmer_search, hmmbuild_and_search\n)\nfrom evcouplings.align.tools import read_hmmer_domtbl\nfrom evcouplings.compare.mapping import map_indices\nfrom evcouplings.utils.system import (\n get_urllib, ResourceError, valid_file, tempdir, temp\n)\nfrom evcouplings.utils.config import (\n parse_config, check_required, InvalidParameterError\n)\nfrom evcouplings.utils.helpers import range_overlap\n\nUNIPROT_MAPPING_URL = \"https://www.uniprot.org/mapping/\"\nSIFTS_URL = \"ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/flatfiles/csv/uniprot_segments_observed.csv.gz\"\nSIFTS_REST_API = \"http://www.ebi.ac.uk/pdbe/api/mappings/uniprot_segments/{}\"\n\n# TODO: make this default parametrization more explicit (e.g. a config file in repository)\n# these parameters are fed as a default into SIFTS.by_alignment so that the method can be\n# easily used without a configuration file/any further setup\nHMMER_CONFIG = \"\"\"\nprefix:\nsequence_id:\nsequence_file:\nregion:\nfirst_index: 1\n\nuse_bitscores: True\ndomain_threshold: 0.5\nsequence_threshold: 0.5\niterations: 1\ndatabase: sequence_database\n\nextract_annotation: False\ncpu: 1\nnobias: False\nreuse_alignment: False\ncheckpoints_hmm: False\ncheckpoints_ali: False\n\n# database\njackhmmer: jackhmmer\nsequence_database:\nsequence_download_url: http://www.uniprot.org/uniprot/{}.fasta\n\"\"\"\n\n\ndef fetch_uniprot_mapping(ids, from_=\"ACC\", to=\"ACC\", format=\"fasta\"):\n \"\"\"\n Fetch data from UniProt ID mapping service\n (e.g. download set of sequences)\n\n Parameters\n ----------\n ids : list(str)\n List of UniProt identifiers for which to\n retrieve mapping\n from_ : str, optional (default: \"ACC\")\n Source identifier (i.e. contained in \"ids\" list)\n to : str, optional (default: \"ACC\")\n Target identifier (to which source should be mapped)\n format : str, optional (default: \"fasta\")\n Output format to request from Uniprot server\n\n Returns\n -------\n str:\n Response from UniProt server\n \"\"\"\n params = {\n \"from\": from_,\n \"to\": to,\n \"format\": format,\n \"query\": \" \".join(ids)\n }\n url = UNIPROT_MAPPING_URL\n r = requests.post(url, data=params)\n\n if r.status_code != requests.codes.ok:\n raise ResourceError(\n \"Invalid status code ({}) for URL: {}\".format(\n r.status_code, url\n )\n )\n\n return r.text\n\n\ndef find_homologs(pdb_alignment_method=\"jackhmmer\", **kwargs):\n \"\"\"\n Identify homologs using jackhmmer or hmmbuild/hmmsearch\n\n Parameters\n ----------\n pdb_alignment_method : {\"jackhmmer\", \"hmmsearch\"}, \n optional (default: \"jackhmmer\")\n Sequence alignment method used for searching the PDB\n **kwargs\n Passed into jackhmmer / hmmbuild_and_search protocol\n (see documentation for available options)\n\n Returns\n -------\n ali : evcouplings.align.Alignment\n Alignment of homologs of query sequence\n in sequence database\n hits : pandas.DataFrame\n Tabular representation of hits\n \"\"\"\n\n # load default configuration\n config = parse_config(HMMER_CONFIG)\n\n # update with overrides from kwargs\n config = {\n **config,\n **kwargs,\n }\n\n # create temporary output if no prefix is given\n if config[\"prefix\"] is None:\n config[\"prefix\"] = path.join(tempdir(), \"compare\")\n\n check_required(\n config, [\"prefix\"]\n )\n\n # run hmmsearch (possibly preceded by hmmbuild)\n if pdb_alignment_method == \"hmmsearch\":\n # set up config to run hmmbuild_and_search on the unfiltered alignment file\n updated_config = deepcopy(config)\n updated_config[\"alignment_file\"] = config.get(\"raw_focus_alignment_file\")\n ar = hmmbuild_and_search(**updated_config)\n\n # For hmmbuild and search, we have to read the raw focus alignment file\n # to guarantee that the query sequence is present\n with open(ar[\"raw_focus_alignment_file\"]) as a:\n ali = Alignment.from_file(a, \"fasta\")\n\n # run jackhmmer against sequence database\n # at this point we have already checked to ensure\n # that the input is either jackhmmer or hmmsearch\n elif pdb_alignment_method == \"jackhmmer\":\n ar = jackhmmer_search(**config)\n\n with open(ar[\"raw_alignment_file\"]) as a:\n ali = Alignment.from_file(a, \"stockholm\")\n\n # write alignment as FASTA file for easier checking by hand,\n # if necessary\n with open(config[\"prefix\"] + \"_raw.fasta\", \"w\") as f:\n ali.write(f)\n else:\n raise InvalidParameterError(\n \"Invalid pdb_alignment_method selected. Valid options are: \" +\n \", \".join([\"jackhmmer\", \"hmmsearch\"])\n )\n\n # read hmmer hittable and simplify\n hits = read_hmmer_domtbl(ar[\"hittable_file\"])\n\n hits.loc[:, \"uniprot_ac\"] = hits.loc[:, \"target_name\"].map(lambda x: x.split(\"|\")[1])\n hits.loc[:, \"uniprot_id\"] = hits.loc[:, \"target_name\"].map(lambda x: x.split(\"|\")[2])\n\n hits = hits.rename(\n columns={\n \"domain_score\": \"bitscore\",\n \"domain_i_Evalue\": \"e_value\",\n \"ali_from\": \"alignment_start\",\n \"ali_to\": \"alignment_end\",\n \"hmm_from\": \"hmm_start\",\n \"hmm_to\": \"hmm_end\",\n }\n )\n\n hits.loc[:, \"alignment_start\"] = pd.to_numeric(hits.alignment_start).astype(int)\n hits.loc[:, \"alignment_end\"] = pd.to_numeric(hits.alignment_end).astype(int)\n\n hits.loc[:, \"alignment_id\"] = (\n hits.target_name + \"/\" +\n hits.alignment_start.astype(str) + \"-\" +\n hits.alignment_end.astype(str)\n )\n\n hits = hits.loc[\n :, [\"alignment_id\", \"uniprot_ac\", \"uniprot_id\", \"alignment_start\",\n \"alignment_end\", \"bitscore\", \"e_value\"]\n ]\n\n return ali, hits\n\n\nclass SIFTSResult:\n \"\"\"\n Store results of SIFTS structure/mapping identification.\n\n (Full class defined for easify modification of fields)\n \"\"\"\n def __init__(self, hits, mapping):\n \"\"\"\n Create new SIFTS structure / mapping record.\n\n Parameters\n ----------\n hits : pandas.DataFrame\n Table with identified PDB chains\n mapping : dict\n Mapping from seqres to Uniprot numbering\n for each PDB chain\n (index by mapping_index column in hits\n dataframe)\n \"\"\"\n self.hits = hits\n self.mapping = mapping\n\n\nclass SIFTS:\n \"\"\"\n Provide Uniprot to PDB mapping data and functions\n starting from SIFTS mapping table.\n \"\"\"\n def __init__(self, sifts_table_file, sequence_file=None):\n \"\"\"\n Create new SIFTS mapper from mapping table.\n\n Note that creation of the mapping files, if not existing,\n takes a while.\n\n Parameters\n ----------\n sifts_table_file : str\n Path to *corrected* SIFTS pdb_chain_uniprot.csv\n To generate this file, point to an empty file path.\n sequence_file : str, optional (default: None)\n Path to file containing all UniProt sequences\n in SIFTS (used for homology-based identification\n of structures).\n Note: This file can be created using the\n create_sequence_file() method.\n \"\"\"\n # test if table exists, if not, download and modify\n if not valid_file(sifts_table_file):\n self._create_mapping_table(sifts_table_file)\n\n self.table = pd.read_csv(\n sifts_table_file, comment=\"#\"\n )\n\n # final table has still some entries where lengths do not match,\n # remove these\n self.table = self.table.query(\n \"(resseq_end - resseq_start) == (uniprot_end - uniprot_start)\"\n )\n\n self.sequence_file = sequence_file\n\n # if path for sequence file given, but not there, create\n if sequence_file is not None and not valid_file(sequence_file):\n self.create_sequence_file(sequence_file)\n\n # add Uniprot ID column if we have sequence mapping\n # from FASTA file\n if self.sequence_file is not None:\n self._add_uniprot_ids()\n\n def _create_mapping_table(self, sifts_table_file):\n \"\"\"\n Create modified SIFTS mapping table (based on\n file at SIFTS_URL). For some of the entries,\n the Uniprot sequence ranges do not map to a\n SEQRES sequence range of the same length. These\n PDB IDs will be entirely replaced by a segment-\n based mapping extracted from the SIFTS REST API.\n\n Parameters\n ----------\n sifts_table_file : str\n Path where computed table will be stored\n \"\"\"\n def extract_rows(M, pdb_id):\n res = []\n\n M = M[pdb_id.lower()][\"UniProt\"]\n\n for uniprot_ac, Ms in M.items():\n for x in Ms[\"mappings\"]:\n res.append({\n \"pdb_id\": pdb_id,\n \"pdb_chain\": x[\"chain_id\"],\n \"uniprot_ac\": uniprot_ac,\n \"resseq_start\": x[\"start\"][\"residue_number\"],\n \"resseq_end\": x[\"end\"][\"residue_number\"],\n \"coord_start\": (\n str(x[\"start\"][\"author_residue_number\"]) +\n x[\"start\"][\"author_insertion_code\"].replace(\" \", \"\")\n ),\n \"coord_end\": (\n str(x[\"end\"][\"author_residue_number\"]) +\n x[\"end\"][\"author_insertion_code\"].replace(\" \", \"\")\n ),\n \"uniprot_start\": x[\"unp_start\"],\n \"uniprot_end\": x[\"unp_end\"],\n })\n\n return res\n\n # download SIFTS table (gzip-compressed csv) to temp file\n temp_download_file = temp()\n get_urllib(SIFTS_URL, temp_download_file)\n\n # load table and rename columns for internal use, if SIFTS\n # ever decided to rename theirs\n table = pd.read_csv(\n temp_download_file, comment=\"#\",\n compression=\"gzip\"\n ).rename(\n columns={\n \"PDB\": \"pdb_id\",\n \"CHAIN\": \"pdb_chain\",\n \"SP_PRIMARY\": \"uniprot_ac\",\n \"RES_BEG\": \"resseq_start\",\n \"RES_END\": \"resseq_end\",\n \"PDB_BEG\": \"coord_start\",\n \"PDB_END\": \"coord_end\",\n \"SP_BEG\": \"uniprot_start\",\n \"SP_END\": \"uniprot_end\",\n }\n )\n\n # TODO: remove the following if new segment-based table proves as robust solution\n \"\"\"\n # this block disabled for now due to use of new table\n # based on observed UniProt segments\n # - can probably be removed eventually\n\n # identify problematic PDB IDs\n problematic_ids = table.query(\n \"(resseq_end - resseq_start) != (uniprot_end - uniprot_start)\"\n ).pdb_id.unique()\n \n # collect new mappings from segment based REST API\n res = []\n for i, pdb_id in enumerate(problematic_ids):\n r = requests.get(\n SIFTS_REST_API.format(pdb_id.lower())\n )\n mapping = json.loads(r.text)\n\n res += extract_rows(mapping, pdb_id)\n\n # remove bad PDB IDs from table and add new mapping\n new_table = table.loc[~table.pdb_id.isin(problematic_ids)]\n\n # also disabled due to use of new table based on observed\n # UniProt segments - can probably be removed eventually \n \n new_table = new_table.append(\n pd.DataFrame(res).loc[:, table.columns]\n )\n \"\"\"\n\n # save for later reuse\n table.to_csv(sifts_table_file, index=False)\n\n def _add_uniprot_ids(self):\n \"\"\"\n Add Uniprot ID column to SIFTS table based on\n AC to ID mapping extracted from sequence database\n \"\"\"\n # iterate through headers in sequence file and store\n # AC to ID mapping\n ac_to_id = {}\n with open(self.sequence_file) as f:\n for seq_id, _ in read_fasta(f):\n _, ac, id_ = seq_id.split(\" \")[0].split(\"|\")\n ac_to_id[ac] = id_\n\n # add column to dataframe\n self.table.loc[:, \"uniprot_id\"] = self.table.loc[:, \"uniprot_ac\"].map(ac_to_id)\n\n def create_sequence_file(self, output_file, chunk_size=1000, max_retries=100):\n \"\"\"\n Create FASTA sequence file containing all UniProt\n sequences of proteins in SIFTS. This file is required\n for homology-based structure identification and\n index remapping.\n This function will also automatically associate\n the sequence file with the SIFTS object.\n\n Parameters\n ----------\n output_file : str\n Path at which to store sequence file\n chunk_size : int, optional (default: 1000)\n Retrieve sequences from UniProt in chunks of this size\n (too large chunks cause the mapping service to stall)\n max_retries : int, optional (default: 100)\n Allow this many retries when fetching sequences\n from UniProt ID mapping service, which unfortunately\n often suffers from connection failures.\n \"\"\"\n ids = self.table.uniprot_ac.unique().tolist()\n\n # retrieve sequences in chunks since ID mapping service\n # tends to fail on large requests\n id_chunks = [\n ids[i:i + chunk_size] for i in range(0, len(ids), chunk_size)\n ]\n\n # store individual retrieved chunks as list of strings\n seq_chunks = []\n\n # keep track of how many retries were necessary and\n # abort if number exceeds max_retries\n num_retries = 0\n\n for ch in id_chunks:\n # fetch sequence chunk;\n # if there is a problem retry as long as we stay within\n # maximum number of retries\n while True:\n try:\n seqs = fetch_uniprot_mapping(ch)\n break\n except requests.ConnectionError as e:\n # count as failed try\n num_retries += 1\n\n # if we retried too often, abort\n if num_retries > max_retries:\n raise ResourceError(\n \"Could not fetch sequences for SIFTS mapping tables from UniProt since \"\n \"maximum number of retries after connection errors was exceeded. Retry \"\n \"at a later time, or call SIFTS.create_sequence_file() with a higher value \"\n \"for max_retries.\"\n ) from e\n\n # rename identifiers in sequence file, so\n # we can circumvent Uniprot sequence identifiers\n # being prefixed by hmmer if a hit has exactly the\n # same identifier as the query sequence\n seqs = seqs.replace(\n \">sp|\", \">evsp|\",\n ).replace(\n \">tr|\", \">evtr|\",\n )\n\n assert seqs.endswith(\"\\n\")\n\n # store for writing\n seq_chunks.append(seqs)\n\n # store sequences to FASTA file in one go at the end\n with open(output_file, \"w\") as f:\n f.write(\"\".join(seq_chunks))\n\n self.sequence_file = output_file\n\n # add Uniprot ID column to SIFTS table\n self._add_uniprot_ids()\n\n def _create_sequence_file(self, output_file):\n \"\"\"\n Create FASTA sequence file containing all UniProt\n sequences of proteins in SIFTS. This file is required\n for homology-based structure identification and\n index remapping.\n This function will also automatically associate\n the sequence file with the SIFTS object.\n\n Note: this would be the nicer function, but unfortunately\n the UniProt server frequently closes the connection running it\n\n Parameters\n ----------\n output_file : str\n Path at which to store sequence file\n \"\"\"\n # fetch all the sequences\n seqs = fetch_uniprot_mapping(\n self.table.uniprot_ac.unique().tolist()\n )\n\n # then store to FASTA file\n with open(output_file, \"w\") as f:\n f.write(seqs)\n\n self.sequence_file = output_file\n\n def _finalize_hits(self, hit_segments):\n \"\"\"\n Create final hit/mapping record from\n table of segments in PDB chains in\n SIFTS file.\n\n Parameters\n ----------\n hit_segments : pd.DataFrame\n Subset of self.table that will be\n turned into final mapping record\n\n Returns\n -------\n SIFTSResult\n Identified hits plus index mappings\n to Uniprot\n \"\"\"\n # compile final set of hits\n hits = []\n\n # compile mapping from Uniprot to seqres for\n # each final hit\n mappings = {}\n\n # go through all SIFTS segments per PDB chain\n for i, ((pdb_id, pdb_chain), chain_grp) in enumerate(\n hit_segments.groupby([\"pdb_id\", \"pdb_chain\"])\n ):\n # put segments together in one segment-based\n # mapping for chain; this will be used by pdb.Chain.remap()\n mapping = {\n (r[\"resseq_start\"], r[\"resseq_end\"]): (r[\"uniprot_start\"], r[\"uniprot_end\"])\n for j, r in chain_grp.iterrows()\n }\n\n # append current hit and mapping\n hits.append([pdb_id, pdb_chain, i])\n mappings[i] = mapping\n\n # create final hit representation as DataFrame\n hits_df = pd.DataFrame(\n hits, columns=[\"pdb_id\", \"pdb_chain\", \"mapping_index\"]\n )\n\n return SIFTSResult(hits_df, mappings)\n\n def by_pdb_id(self, pdb_id, pdb_chain=None, uniprot_id=None):\n \"\"\"\n Find structures and mapping by PDB id\n and chain name\n\n Parameters\n ----------\n pdb_id : str\n 4-letter PDB identifier\n pdb_chain : str, optional (default: None)\n PDB chain name (if not given, all\n chains for PDB entry will be returned)\n uniprot_id : str, optional (default: None)\n Filter to keep only this Uniprot accession\n number or identifier (necessary for chimeras,\n or multi-chain complexes with different proteins)\n\n Returns\n -------\n SIFTSResult\n Identified hits plus index mappings\n to Uniprot\n\n Raises\n ------\n ValueError\n If selected segments in PDB file do\n not unambigously map to one Uniprot\n entry\n \"\"\"\n pdb_id = pdb_id.lower()\n query = \"pdb_id == @pdb_id\"\n\n # filter by PDB chain if selected\n if pdb_chain is not None:\n query += \" and pdb_chain == @pdb_chain\"\n\n # filter by UniProt AC/ID if selected\n # (to remove chimeras)\n if uniprot_id is not None:\n if \"uniprot_id\" in self.table.columns:\n query += (\" and (uniprot_ac == @uniprot_id or \"\n \"uniprot_id == @uniprot_id)\")\n else:\n query += \" and uniprot_ac == @uniprot_id\"\n\n x = self.table.query(query)\n\n # check we only have one protein (might not\n # be the case with multiple chains, or with\n # chimeras)\n if len(x.uniprot_ac.unique()) > 1:\n id_list = \", \".join(x.uniprot_ac.unique())\n\n if \"uniprot_id\" in self.table.columns:\n id_list += \" or \" + \", \".join(x.uniprot_id.unique())\n\n raise ValueError(\n \"Multiple Uniprot sequences on chains, \"\n \"please disambiguate using uniprot_id \"\n \"parameter: {}\".format(id_list)\n )\n\n # create hit and mapping result\n return self._finalize_hits(x)\n\n def by_uniprot_id(self, uniprot_id, reduce_chains=False):\n \"\"\"\n Find structures and mapping by Uniprot\n access number.\n\n Parameters\n ----------\n uniprot_ac : str\n Find PDB structures for this Uniprot accession\n number. If sequence_file was given while creating\n the SIFTS object, Uniprot identifiers can also be\n used.\n reduce_chains : bool, optional (Default: True)\n If true, keep only first chain per PDB ID\n (i.e. remove redundant occurrences of same\n protein in PDB structures). Should be set to\n False to identify homomultimeric contacts.\n\n Returns\n -------\n SIFTSResult\n Record of hits and mappings found for this\n Uniprot protein. See by_pdb_id() for detailed\n explanation of fields.\n \"\"\"\n query = \"uniprot_ac == @uniprot_id\"\n\n if \"uniprot_id\" in self.table.columns:\n query += \" or uniprot_id == @uniprot_id\"\n\n x = self.table.query(query)\n\n hit_table = self._finalize_hits(x)\n\n # only retain one chain if this option is active\n if reduce_chains:\n hit_table.hits = hit_table.hits.groupby(\n \"pdb_id\"\n ).first().reset_index()\n\n return hit_table\n\n def by_alignment(self, min_overlap=20, reduce_chains=False, **kwargs):\n \"\"\"\n Find structures by sequence alignment between\n query sequence and sequences in PDB.\n\n Parameters\n ----------\n min_overlap : int, optional (default: 20)\n Require at least this many aligned positions\n with the target structure\n reduce_chains : bool, optional (Default: True)\n If true, keep only first chain per PDB ID\n (i.e. remove redundant occurrences of same\n protein in PDB structures). Should be set to\n False to identify homomultimeric contacts.\n **kwargs\n Defines the behaviour of find_homologs() function\n used to find homologs by sequence alignment:\n - which alignment method is used \n (pdb_alignment_method: {\"jackhmmer\", \"hmmsearch\"}, \n default: \"jackhmmer\"),\n - parameters passed into the protocol for the selected\n alignment method (evcouplings.align.jackhmmer_search or\n evcouplings.align.hmmbuild_and_search).\n \n Default parameters are set in the HMMER_CONFIG string in this\n module, other parameters will need to be overriden; these\n minimally are:\n - for pdb_alignment_method == \"jackhmmer\":\n - sequence_id : str, identifier of target sequence\n - jackhmmer : str, path to jackhmmer binary if not on path \n - for pdb_alignment_method == \"hmmsearch\":\n - sequence_id : str, identifier of target sequence\n - raw_focus_alignment_file : str, path to input alignment file \n - hmmbuild : str, path to hmmbuild binary if not on path\n - hmmsearch : str, path to search binary if not on path\n - additionally, if \"prefix\" is given,\n individual mappings will be saved to files suffixed\n by the respective key in mapping table.\n\n Returns\n -------\n SIFTSResult\n Record of hits and mappings found for this\n query sequence by alignment. See by_pdb_id()\n for detailed explanation of fields.\n \"\"\"\n def _create_mapping(r):\n _, query_start, query_end = parse_header(ali.ids[0])\n\n # create mapping from query into PDB Uniprot sequence\n # A_i will be query sequence indices, A_j Uniprot sequence indices\n m = map_indices(\n ali[0], query_start, query_end,\n ali[r[\"alignment_id\"]], r[\"alignment_start\"], r[\"alignment_end\"]\n )\n\n # create mapping from PDB Uniprot into seqres numbering\n # j will be Uniprot sequence index, k seqres index\n n = pd.DataFrame(\n {\n \"j\": list(range(r[\"uniprot_start\"], r[\"uniprot_end\"] + 1)),\n \"k\": list(range(r[\"resseq_start\"], r[\"resseq_end\"] + 1)),\n }\n )\n\n # need to convert to strings since other mapping has indices as strings\n n.loc[:, \"j\"] = n.j.astype(str)\n n.loc[:, \"k\"] = n.k.astype(str)\n\n # join over Uniprot indices (i.e. j);\n # get rid of any position that is not aligned\n mn = m.merge(n, on=\"j\", how=\"inner\").dropna()\n\n # extract final mapping from seqres (k) to query (i)\n map_ = dict(\n zip(mn.k, mn.i)\n )\n\n return map_, mn\n\n if self.sequence_file is None:\n raise ValueError(\n \"Need to have SIFTS sequence file. \"\n \"Create using create_sequence_file() \"\n \"method or constructor.\"\n )\n\n ali, hits = find_homologs(\n sequence_database=self.sequence_file, \n **kwargs\n )\n\n # merge with internal table to identify overlap of\n # aligned regions and regions with structural coverage\n hits = hits.merge(\n self.table, on=\"uniprot_ac\", suffixes=(\"\", \"_\")\n )\n\n # add 1 to end of range since overlap function treats\n # ends as exclusive, while ends here are inclusive\n hits.loc[:, \"overlap\"] = [\n range_overlap(\n (r[\"uniprot_start\"], r[\"uniprot_end\"] + 1),\n (r[\"alignment_start\"], r[\"alignment_end\"] + 1)\n ) for i, r in hits.iterrows()\n ]\n\n # collect complete index mappings in here...\n mappings = {}\n # ... as well as dataframe rows for assignment of hit to mapping\n mapping_rows = []\n\n # complication: if there are multiple segments per hit and chain, we should\n # reduce these into a single mapping (even though split mappings\n # are possible in principle) so we can count unique number of hits etc.\n hit_columns = [\"alignment_id\", \"pdb_id\", \"pdb_chain\"]\n for i, (hit, grp) in enumerate(\n hits.groupby(hit_columns)\n ):\n agg_mapping = {}\n agg_df = pd.DataFrame()\n # go through each segment\n for j, r in grp.iterrows():\n # compute mapping for that particular segment\n map_j, map_j_df = _create_mapping(r)\n\n # add to overall mapping dictionary for this hit\n agg_mapping.update(map_j)\n agg_df = agg_df.append(map_j_df)\n\n # store assignment of group to mapping index\n mapping_rows.append(\n list(hit) + [i, len(grp) > 1]\n )\n\n mappings[i] = agg_mapping\n\n # store index mappings if filename prefix is given\n prefix = kwargs.get(\"prefix\", None)\n if prefix is not None:\n agg_df = agg_df.rename(\n columns={\n \"j\": \"uniprot_of_pdb_index\",\n \"A_j\": \"uniprot_of_pdb_residue\",\n \"k\": \"pdb_seqres_index\",\n }\n )\n\n agg_df.to_csv(\n \"{}_mapping{}.csv\".format(prefix, i), index=False\n )\n\n # create dataframe from mapping rows\n mapping_df = pd.DataFrame(\n mapping_rows, columns=hit_columns + [\n \"mapping_index\", \"grouped_segments\",\n ]\n )\n\n # now group again, to aggregate full hit dataframe\n def _agg_type(x):\n if x == \"overlap\":\n return \"sum\"\n elif x.endswith(\"_start\"):\n return \"min\"\n elif x.endswith(\"end\"):\n return \"max\"\n else:\n return \"first\"\n\n agg_types = OrderedDict(\n [(c, _agg_type(c)) for c in hits.columns\n if c not in hit_columns]\n )\n\n # only aggregate if we have anything to aggregate,\n # otherwise pandas drops the index columns\n # alignment_id, pdb_id, pdb_chain and things go\n # wrong horribly in the following join\n if len(hits) > 0:\n hits_grouped = hits.groupby(\n hit_columns\n ).agg(agg_types).reset_index()\n else:\n hits_grouped = hits\n\n # join with mapping information\n hits_grouped = hits_grouped.merge(\n mapping_df, on=hit_columns\n )\n\n # remove hits with too little residue coverage\n hits_grouped = hits_grouped.query(\"overlap >= @min_overlap\")\n\n hits_grouped.loc[:, \"bitscore\"] = pd.to_numeric(\n hits_grouped.loc[:, \"bitscore\"], errors=\"coerce\"\n )\n hits_grouped = hits_grouped.sort_values(by=\"bitscore\", ascending=False)\n\n # if requested, only keep one chain per PDB;\n # sort by score before this to keep best hit\n if reduce_chains:\n hits_grouped = hits_grouped.groupby(\"pdb_id\").first().reset_index()\n # sort again, just to be sure...\n hits_grouped = hits_grouped.sort_values(by=\"bitscore\", ascending=False)\n\n # remove any zombie mappings we did not keep in table\n mappings = {\n idx: map_ for idx, map_ in mappings.items()\n if idx in hits_grouped.mapping_index.values\n }\n\n return SIFTSResult(hits_grouped, mappings)\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
],
[
"numpy.nanmax",
"numpy.abs",
"numpy.isfinite",
"numpy.isnan",
"numpy.nanmin",
"numpy.percentile",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.colorbar",
"numpy.mean",
"numpy.array",
"numpy.meshgrid",
"pandas.to_numeric",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.pyplot.figure"
],
[
"pandas.read_csv",
"pandas.to_numeric",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
srkasuMsft/MLOpsTemplate | [
"0c90ed954c553a3936ecb882cbf35dfd03e14e9d"
] | [
"src/workshop/core/scoring/batch_score.py"
] | [
"\nimport os\nimport tempfile\nimport logging\nfrom azureml.core.model import Model\nimport pickle\nimport pandas as pd\nfrom azureml.core import Run\nimport os\nimport mlflow\n\ndef init():\n global model\n model_dir =os.getenv('AZUREML_MODEL_DIR')\n model_file = os.listdir(model_dir)[0]\n model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_file)\n model = mlflow.sklearn.load_model(model_path)\n\ndef run(mini_batch):\n print(f\"run method start: {__file__}, run({mini_batch})\")\n resultList = []\n\n \n # Set up logging\n\n for batch in mini_batch:\n # prepare each image\n data = pd.read_json(batch)\n predictions = model.predict(data)\n data[\"prediction\"] =predictions\n resultList.append(data)\n result = pd.concat(resultList)\n\n return result\n"
] | [
[
"pandas.concat",
"pandas.read_json"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
bayesxl/PB2 | [
"54b94dc2ebae488ea5e2bf5250a9d10b89011852"
] | [
"run_ppo.py"
] | [
"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport argparse\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom datetime import datetime\n\nimport ray\nfrom ray.tune import run, sample_from\nfrom ray.tune.schedulers import PopulationBasedTraining, AsyncHyperBandScheduler\n\nfrom pb2 import PB2\n\n# Postprocess the perturbed config to ensure it's still valid\ndef explore(config):\n # ensure we collect enough timesteps to do sgd\n if config[\"train_batch_size\"] < config[\"sgd_minibatch_size\"] * 2:\n config[\"train_batch_size\"] = config[\"sgd_minibatch_size\"] * 2\n # ensure we run at least one sgd iter\n if config[\"lambda\"] > 1:\n config[\"lambda\"] = 1\n config[\"train_batch_size\"] = int(config[\"train_batch_size\"])\n return config\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--max\", type=int, default=1000000)\n parser.add_argument(\"--algo\", type=str, default='PPO')\n parser.add_argument(\"--num_workers\", type=int, default=4)\n parser.add_argument(\"--num_samples\", type=int, default=4)\n parser.add_argument(\"--freq\", type=int, default=50000)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--horizon\", type=int, default=1600) # make this 1000 for other envs\n parser.add_argument(\"--perturb\", type=float, default=0.25)\n parser.add_argument(\"--env_name\", type=str, default=\"BipedalWalker-v2\")\n parser.add_argument(\"--criteria\", type=str, default=\"timesteps_total\") # \"training_iteration\"\n parser.add_argument(\"--net\", type=str, default=\"32_32\") # didn't play with this, but may be important for bigger tasks\n parser.add_argument(\"--batchsize\", type=str, default=\"1000_60000\")\n parser.add_argument(\"--num_sgd_iter\", type=int, default=10)\n parser.add_argument(\"--sgd_minibatch_size\", type=int, default=128)\n parser.add_argument(\"--use_lstm\", type=int, default=0) # for future, not used\n parser.add_argument(\"--filename\", type=str, default=\"\")\n parser.add_argument(\"--method\", type=str, default=\"pb2\") # ['pbt', 'pb2', 'asha']\n \n args = parser.parse_args()\n ray.init()\n \n args.dir = \"{}_{}_{}_Size{}_{}_{}\".format(args.algo, args.filename, args.method, str(args.num_samples), args.env_name, args.criteria)\n if not(os.path.exists('data/'+args.dir)):\n os.makedirs('data/'+args.dir)\n\n pbt = PopulationBasedTraining(\n time_attr= args.criteria,\n metric=\"episode_reward_mean\",\n mode=\"max\",\n perturbation_interval=args.freq,\n resample_probability=args.perturb,\n quantile_fraction = args.perturb, # copy bottom % with top %\n # Specifies the mutations of these hyperparams\n hyperparam_mutations={\n \"lambda\": lambda: random.uniform(0.9, 1.0),\n \"clip_param\": lambda: random.uniform(0.1, 0.5),\n \"lr\": lambda: random.uniform(1e-3, 1e-5),\n \"train_batch_size\": lambda: random.randint(int(args.batchsize.split(\"_\")[0]), int(args.batchsize.split(\"_\")[1])),\n },\n custom_explore_fn=explore)\n \n pb2 = PB2(\n time_attr= args.criteria,\n metric=\"episode_reward_mean\",\n mode=\"max\",\n perturbation_interval=args.freq,\n resample_probability=0,\n quantile_fraction = args.perturb, # copy bottom % with top %\n # Specifies the mutations of these hyperparams\n hyperparam_mutations={\n \"lambda\": lambda: random.uniform(0.9, 1.0),\n \"clip_param\": lambda: random.uniform(0.1, 0.5),\n \"lr\": lambda: random.uniform(1e-3, 1e-5),\n \"train_batch_size\": lambda: random.randint(int(args.batchsize.split(\"_\")[0]), int(args.batchsize.split(\"_\")[1])),\n },\n custom_explore_fn=explore)\n\n asha = AsyncHyperBandScheduler(\n time_attr=args.criteria,\n metric=\"episode_reward_mean\",\n mode=\"max\",\n grace_period=args.freq,\n max_t=args.max)\n \n \n methods = {'pbt': pbt,\n 'pb2': pb2,\n 'asha': asha}\n \n timelog = str(datetime.date(datetime.now())) + '_' + str(datetime.time(datetime.now()))\n \n analysis = run(\n args.algo,\n name=\"{}_{}_{}_seed{}_{}\".format(timelog, args.method, args.env_name, str(args.seed), args.filename),\n scheduler=methods[args.method],\n verbose=1,\n num_samples= args.num_samples,\n stop= {args.criteria: args.max},\n config= {\n \"env\": args.env_name,\n \"log_level\": \"INFO\",\n \"seed\": args.seed,\n \"kl_coeff\": 1.0,\n #\"monitor\": True, uncomment this for videos... it may slow it down a LOT, but hey :)\n \"num_gpus\": 0,\n \"horizon\": args.horizon,\n \"observation_filter\": \"MeanStdFilter\",\n \"model\": {'fcnet_hiddens': [int(args.net.split('_')[0]),int(args.net.split('_')[1])],\n 'free_log_std': True,\n 'use_lstm': args.use_lstm\n },\n \"num_sgd_iter\":args.num_sgd_iter,\n \"sgd_minibatch_size\":args.sgd_minibatch_size,\n \"lambda\": sample_from(\n lambda spec: random.uniform(0.9, 1.0)),\n \"clip_param\": sample_from(\n lambda spec: random.uniform(0.1, 0.5)),\n \"lr\": sample_from(\n lambda spec: random.uniform(1e-3, 1e-5)), \n \"train_batch_size\": sample_from(\n lambda spec: random.choice([1000 * i for i in range(int(int(args.batchsize.split(\"_\")[0])/1000), int(int(args.batchsize.split(\"_\")[1])/1000))]))\n }\n )\n \n all_dfs = analysis.trial_dataframes\n names = list(all_dfs.keys())\n \n results = pd.DataFrame() \n for i in range(args.num_samples):\n df = all_dfs[names[i]]\n df = df[['timesteps_total', 'time_total_s','episodes_total', 'episode_reward_mean', 'info/learner/default_policy/cur_kl_coeff']]\n df['Agent'] = i\n results = pd.concat([results, df]).reset_index(drop=True)\n \n results.to_csv(\"data/{}/seed{}.csv\".format(args.dir, str(args.seed)))\n\n\n"
] | [
[
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
MerkleBros/generate-ascii-art-from-photographs | [
"966f83737d32bd7cd8858e94ac0d2b1aef24e676"
] | [
"services/image-to-ascii-api/generate_ascii_post.py"
] | [
"import base64\nimport io\nimport os\nimport json\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom colour import Color\n\ndef generate_ascii_post(event, context):\n\n try:\n\n print(\"## ENVIRONMENT\")\n print(os.environ)\n print(\"## EVENT\")\n print(event)\n\n response = {\n \"statusCode\": 500,\n \"isBase64Encoded\": True,\n \"headers\": {'Content-Type': 'application/json'},\n \"body\": \"\"\n }\n # body = json.loads(event[\"body\"])\n # input_file = body[\"input_file\"]\n # HORIZONTAL_SAMPLING_RATE = body[\"HORIZONTAL_SAMPLING_RATE\"]\n # GCF = body[\"GCF\"]\n # output_file = body[\"output_file\"]\n # color1 = body[\"color1\"]\n # color2 = body[\"color2\"]\n # bgcolor = body[\"bgcolor\"]\n\n\n # TODO: Remove, hard coded to see if function works\n print(\"## RETRIEVING INPUT FILE\")\n\n input_file = event[\"body\"]\n\n print(input_file)\n\n HORIZONTAL_SAMPLING_RATE = 0.1\n GCF = 1\n color1 = \"black\"\n color2 = \"black\"\n bgcolor = \"white\"\n\n # The array of ascii symbols from white to black\n chars = np.asarray(list(' .,:irs?@9B&#'))\n\n # Load the fonts and then get the the height and width of a typical symbol\n # You can use different fonts here\n font = ImageFont.load_default()\n letter_width = font.getsize(\"x\")[0]\n letter_height = font.getsize(\"x\")[1]\n\n height_width_ratio = letter_height/letter_width\n\n #open the input file\n print(\"## BASE64 DECODING THE INPUT FILE\")\n message = base64.b64decode(input_file)\n print(message)\n print(\"## IMAGE FILE TO BUFFER\")\n buffer = io.BytesIO(message)\n buffer.seek(0)\n print(\"## GET IMAGE FROM BUFFER\")\n img = Image.open(buffer)\n\n #Calculate how many ascii letters are needed on the width and height\n width_by_letter = round(img.size[0]*HORIZONTAL_SAMPLING_RATE*height_width_ratio)\n height_by_letter = round(img.size[1]*HORIZONTAL_SAMPLING_RATE)\n letter_size = (width_by_letter, height_by_letter)\n\n #Resize the image based on the symbol width and height\n print(\"## RESIZING IMAGE\")\n img = img.resize(letter_size)\n\n #Get the RGB color values of each sampled pixel and convert them to graycolor using average.\n #https://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/\n img = np.sum(np.asarray(img), axis=2)\n\n # Normalize the results, enhance and reduce the brightness contrast.\n # Map grayscale values to bins of symbols\n img -= img.min()\n img = (1.0 - img/img.max())**GCF*(chars.size-1)\n\n # Generate the ascii art symbols\n lines = (\"\\n\".join((\"\".join(r) for r in chars[img.astype(int)]))).split(\"\\n\")\n\n # Create gradient color bins\n nbins = len(lines)\n color_range = list(Color(color1).range_to(Color(color2), nbins))\n\n #Create an image object, set its width and height\n new_image_width = letter_width *width_by_letter\n new_image_height = letter_height * height_by_letter\n new_image = Image.new(\"RGBA\", (new_image_width, new_image_height), bgcolor)\n draw = ImageDraw.Draw(new_image)\n\n # Print symbols to image\n left_padding = 0\n y = 0\n line_index = 0\n for line in lines:\n color = color_range[line_index]\n line_index += 1\n\n draw.text((left_padding, y), line, color.hex, font=font)\n y += letter_height\n\n print(\"## FINISHED PRINTING ASCII IMAGE\")\n\n # Save the image file\n print(\"## RETRIEVING IMAGE FROM BUFFER\")\n buffered = io.BytesIO()\n print(\"## SAVING IMAGE as PNG\")\n new_image.save(buffered, format=\"PNG\")\n print(\"## BASE 64 ENCODING IMAGE\")\n image_string = base64.b64encode(buffered.getvalue()).decode('ascii')\n\n print(\"## base64 image_string:\")\n print(image_string)\n\n response = {\n \"statusCode\": 200,\n \"isBase64Encoded\": True,\n \"headers\": {'Content-Type': 'image/png'},\n \"body\": image_string\n }\n\n print(\"## response:\")\n print(response)\n\n return response\n\n except Exception as err:\n\n print(\"## ERROR\")\n print(\"Error: {}\".format(err))\n\n response[\"body\"] = \"Error {}\".format(err)\n return response\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zjzh/vega | [
"aa6e7b8c69024262fc483ee06113b4d1bd5156d8",
"aa6e7b8c69024262fc483ee06113b4d1bd5156d8",
"aa6e7b8c69024262fc483ee06113b4d1bd5156d8",
"aa6e7b8c69024262fc483ee06113b4d1bd5156d8",
"aa6e7b8c69024262fc483ee06113b4d1bd5156d8",
"aa6e7b8c69024262fc483ee06113b4d1bd5156d8",
"aa6e7b8c69024262fc483ee06113b4d1bd5156d8",
"aa6e7b8c69024262fc483ee06113b4d1bd5156d8"
] | [
"vega/networks/pytorch/customs/modnas/arch_space/construct/torch/torch.py",
"vega/networks/pytorch/customs/modnas/arch_space/torch/resnet.py",
"vega/algorithms/nas/modnas/contrib/callback/metrics_stats.py",
"vega/algorithms/nas/cars/utils.py",
"vega/networks/pytorch/losses/smooth_l1_loss.py",
"vega/trainer/modules/optimizer/optimizer.py",
"vega/algorithms/nas/auto_lane/utils/listdict.py",
"vega/networks/pytorch/blocks/conv_ws.py"
] | [
"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Torch constructors.\"\"\"\nimport torch\nfrom modnas.registry.construct import register\nfrom modnas.arch_space.slot import Slot\nfrom modnas.arch_space import ops\nfrom modnas.core.param_space import ParamSpace\nfrom modnas.utils.logging import get_logger\nfrom modnas import backend\n\n\nlogger = get_logger('construct')\n\n\ndef parse_device(device):\n \"\"\"Return device ids from config.\"\"\"\n if isinstance(device, int):\n device = str(device)\n if not isinstance(device, str):\n return []\n device = device.lower()\n if device in ['cpu', 'nil', 'none']:\n return []\n if device == 'all':\n return list(range(torch.cuda.device_count()))\n else:\n return [int(s) for s in device.split(',')]\n\n\ndef configure_ops(new_config):\n \"\"\"Set global operator config.\"\"\"\n config = ops.config\n config.update(new_config)\n if isinstance(config.ops_order, str):\n config.ops_order = config.ops_order.split('_')\n if config.ops_order[-1] == 'bn':\n config.conv.bias = False\n if config.ops_order[0] == 'act':\n config.act.inplace = False\n logger.info('ops config: {}'.format(config.to_dict()))\n\n\n@register\nclass TorchInitConstructor():\n \"\"\"Constructor that initializes the architecture space.\"\"\"\n\n def __init__(self, seed=None, device=None, ops_conf=None):\n self.seed = seed\n self.device = device\n self.ops_conf = ops_conf\n\n def __call__(self, model):\n \"\"\"Run constructor.\"\"\"\n Slot.reset()\n ParamSpace().reset()\n seed = self.seed\n if seed:\n backend.init_device(self.device, seed)\n configure_ops(self.ops_conf or {})\n return model\n\n\n@register\nclass TorchToDevice():\n \"\"\"Constructor that moves model to some device.\"\"\"\n\n def __init__(self, device='all', data_parallel=True):\n device_ids = parse_device(device) or [None]\n self.device_ids = device_ids\n self.data_parallel = data_parallel\n\n def __call__(self, model):\n \"\"\"Run constructor.\"\"\"\n if model is None:\n return\n device_ids = self.device_ids\n backend.set_device(device_ids[0])\n if device_ids[0] is not None:\n torch.cuda.set_device(device_ids[0])\n model.to(device=device_ids[0])\n if self.data_parallel and len(device_ids) > 1:\n model = torch.nn.DataParallel(model, device_ids=device_ids)\n return model\n\n\n@register\nclass TorchCheckpointLoader():\n \"\"\"Constructor that loads model checkpoints.\"\"\"\n\n def __init__(self, path):\n logger.info('Loading torch checkpoint from {}'.format(path))\n self.chkpt = torch.load(path)\n\n def __call__(self, model):\n \"\"\"Run constructor.\"\"\"\n model.load_state_dict(self.chkpt)\n return model\n",
"# -*- coding:utf-8 -*-\n\n# This file is adapted from the torchvision library at\n# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\n# 2020.6.29-Changed for Modular-NAS search space.\n# Huawei Technologies Co., Ltd. <[email protected]>\n# Copyright 2020 Huawei Technologies Co., Ltd.\n\n\"\"\"ResNet architectures.\"\"\"\n\nfrom functools import partial\nimport torch.nn as nn\nfrom modnas.registry.construct import DefaultSlotTraversalConstructor\nfrom modnas.registry.construct import register as register_constructor\nfrom modnas.registry.arch_space import register\nfrom ..ops import Identity\nfrom ..slot import Slot\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1):\n \"\"\"Return 3x3 convolution with padding.\"\"\"\n return Slot(_chn_in=in_planes, _chn_out=out_planes, _stride=stride, groups=groups)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"Return 1x1 convolution.\"\"\"\n return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_planes))\n\n\nclass BasicBlock(nn.Module):\n \"\"\"Basic Block class.\"\"\"\n\n expansion = 1\n chn_init = 16\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=None, norm_layer=None):\n super(BasicBlock, self).__init__()\n del base_width\n self.conv1 = conv3x3(inplanes, planes, stride, groups)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=False)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n \"\"\"Compute network output.\"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n \"\"\"Bottleneck block class.\"\"\"\n\n expansion = 4\n chn_init = 16\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=None, norm_layer=None):\n super(Bottleneck, self).__init__()\n width = int(planes * (1. * base_width / self.chn_init)) * groups\n self.conv1 = conv1x1(inplanes, width)\n self.conv2 = conv3x3(width, width, stride, groups)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.relu = nn.ReLU(inplace=False)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n \"\"\"Compute network output.\"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n \"\"\"ResNet architecture class.\"\"\"\n\n def __init__(self,\n chn_in,\n chn,\n block,\n layers,\n n_classes,\n zero_init_residual=False,\n groups=1,\n width_per_group=None,\n use_bn=False,\n expansion=None):\n super(ResNet, self).__init__()\n if use_bn:\n norm_layer = nn.BatchNorm2d\n else:\n norm_layer = Identity\n self.use_bn = use_bn\n if expansion is not None:\n block.expansion = expansion\n block.chn_init = chn\n\n self.chn = chn\n self.groups = groups\n self.base_width = chn // groups if width_per_group is None else width_per_group\n self.conv1 = self.get_stem(chn_in, chn, nn.BatchNorm2d)\n\n self.layers = nn.Sequential(*[\n self._make_layer(block, (2**i) * chn, layers[i], stride=(1 if i == 0 else 2), norm_layer=norm_layer)\n for i in range(len(layers))\n ])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(self.chn, n_classes)\n self.zero_init_residual = zero_init_residual\n\n def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):\n downsample = None\n if stride != 1 or self.chn != planes * block.expansion:\n downsample = nn.Sequential(conv1x1(\n self.chn,\n planes * block.expansion,\n stride,\n ), )\n\n layers = []\n layers.append(block(self.chn, planes, stride, downsample, self.groups, self.base_width, norm_layer=norm_layer))\n self.chn = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.chn, planes, 1, None, self.groups, self.base_width, norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"Compute network output.\"\"\"\n x = self.conv1(x)\n\n x = self.layers(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\n@register_constructor\nclass ResNetPredefinedConstructor(DefaultSlotTraversalConstructor):\n \"\"\"ResNet original network constructor.\"\"\"\n\n def __init__(self, use_bn=False):\n super().__init__()\n self.use_bn = use_bn\n\n def convert(self, slot):\n \"\"\"Convert slot to module.\"\"\"\n return nn.Sequential(\n nn.Conv2d(slot.chn_in, slot.chn_out, 3, stride=slot.stride, padding=1, bias=False, **slot.kwargs),\n nn.BatchNorm2d(slot.chn_out) if self.use_bn else Identity(),\n )\n\n\nclass ImageNetResNet(ResNet):\n \"\"\"ResNet for ImageNet dataset.\"\"\"\n\n def get_stem(self, chn_in, chn, norm_layer):\n \"\"\"Return stem layers.\"\"\"\n return nn.Sequential(\n nn.Conv2d(chn_in, chn, kernel_size=7, stride=2, padding=3, bias=False),\n norm_layer(chn),\n nn.ReLU(inplace=False),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n )\n\n\nclass CIFARResNet(ResNet):\n \"\"\"ResNet for CIFAR dataset.\"\"\"\n\n def get_stem(self, chn_in, chn, norm_layer):\n \"\"\"Return stem layers.\"\"\"\n return nn.Sequential(\n nn.Conv2d(chn_in, chn, kernel_size=3, stride=1, padding=1, bias=False),\n norm_layer(chn),\n nn.ReLU(inplace=False),\n )\n\n\ndef resnet10(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-10 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[1, 1, 1, 1], **kwargs)\n\n\ndef resnet18(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-18 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)\n\n\ndef resnet32(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-32 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[5, 5, 5], **kwargs)\n\n\ndef resnet34(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-34 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)\n\n\ndef resnet50(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-50 model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n\n\ndef resnet56(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-56 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[9, 9, 9], **kwargs)\n\n\ndef resnet101(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-101 model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)\n\n\ndef resnet110(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-110 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[18, 18, 18], **kwargs)\n\n\ndef resnet152(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-152 model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)\n\n\ndef resnext50_32x4d(resnet_cls, **kwargs):\n \"\"\"Construct a ResNeXt-50 32x4d model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4, **kwargs)\n\n\ndef resnext101_32x8d(resnet_cls, **kwargs):\n \"\"\"Construct a ResNeXt-50 32x8d model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 4, 23, 3], groups=32, width_per_group=8, **kwargs)\n\n\ndef resnet(resnet_cls, bottleneck=False, **kwargs):\n \"\"\"Construct a ResNet model.\"\"\"\n block = Bottleneck if bottleneck else BasicBlock\n return resnet_cls(block=block, **kwargs)\n\n\nfor net_cls in [CIFARResNet, ImageNetResNet]:\n name = 'CIFAR-' if net_cls == CIFARResNet else 'ImageNet-'\n register(partial(resnet10, net_cls), name + 'ResNet-10')\n register(partial(resnet18, net_cls), name + 'ResNet-18')\n register(partial(resnet32, net_cls), name + 'ResNet-32')\n register(partial(resnet34, net_cls), name + 'ResNet-34')\n register(partial(resnet50, net_cls), name + 'ResNet-50')\n register(partial(resnet56, net_cls), name + 'ResNet-56')\n register(partial(resnet101, net_cls), name + 'ResNet-101')\n register(partial(resnet152, net_cls), name + 'ResNet-152')\n register(partial(resnext50_32x4d, net_cls), name + 'ResNeXt-50')\n register(partial(resnext101_32x8d, net_cls), name + 'ResNeXt-101')\n register(partial(resnet, net_cls), name + 'ResNet')\n",
"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Metrics statistics reporter.\"\"\"\nimport itertools\nfrom collections import OrderedDict\nfrom matplotlib import pyplot as plt\nfrom typing import Dict, List, Tuple, Optional, Any\nfrom modnas.registry.callback import register\nfrom modnas.callback.base import CallbackBase\nfrom modnas.estim.base import EstimBase\nfrom modnas.optim.base import OptimBase\nfrom vega.common import FileOps\n\nplt.switch_backend('Agg')\n\n\n@register\nclass MetricsStatsReporter(CallbackBase):\n \"\"\"Metrics statistics reporter class.\"\"\"\n\n def __init__(self, axis_list: List[Tuple[int, int]] = None) -> None:\n super().__init__({\n 'after:EstimBase.step_done': self.on_step_done,\n 'after:EstimBase.run': self.save_stats,\n })\n self.results = []\n self.axis_list = axis_list\n\n def on_step_done(\n self, ret: Dict[str, bool], estim: EstimBase, params: Optional[OrderedDict],\n value: Dict[str, float], arch_desc: Optional[Any] = None\n ) -> None:\n \"\"\"Record Estimator evaluation result on each step.\"\"\"\n self.results.append((params, value))\n\n def save_stats(self, ret: Dict[str, Any], estim: EstimBase, optim: OptimBase) -> Dict[str, Any]:\n \"\"\"Save statistics on search end.\"\"\"\n results = self.results\n if not results:\n return\n axis_list = self.axis_list\n if axis_list is None:\n metrics = list(results[0][1].keys())\n axis_list = list(itertools.combinations(metrics, r=2))\n self.logger.info('metrics stats: {} axis: {}'.format(len(results), axis_list))\n for i, axis in enumerate(axis_list):\n plt.figure(i)\n axis_str = '-'.join(axis)\n plt.title('metrics: {}'.format(axis_str))\n values = [[res[1][ax] for res in results] for ax in axis]\n plt.scatter(values[0], values[1])\n plt.xlabel(axis[0])\n plt.ylabel(axis[1])\n plt.savefig(estim.expman.join('plot', 'metrics_{}.png'.format(axis_str)))\n result_path = estim.expman.join('output', 'metrics_results.pkl')\n FileOps.dump_pickle(results, result_path)\n self.logger.info('metrics results saved to {}'.format(result_path))\n self.results = []\n return ret\n",
"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Util functions.\"\"\"\nimport numpy as np\nimport vega\n\n\nclass AverageMeter(object):\n \"\"\"This is a meter class to calculate average values.\"\"\"\n\n def __init__(self):\n \"\"\"Construct method.\"\"\"\n self.reset()\n\n def reset(self):\n \"\"\"Reset the meter.\"\"\"\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n \"\"\"Update the meter.\"\"\"\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\ndef eval_model_parameters(model):\n \"\"\"Calculate number of parameters in million (M) for a model.\n\n :param model: A model\n :type model: nn.Module\n :return: The number of parameters\n :rtype: Float\n \"\"\"\n if vega.is_torch_backend():\n return np.sum(v.numel() for name, v in model.named_parameters() if \"auxiliary\" not in name) / 1e6\n elif vega.is_tf_backend():\n import tensorflow as tf\n tf.compat.v1.reset_default_graph()\n dummy_input = tf.compat.v1.placeholder(\n dtype=tf.float32,\n shape=[1, 32, 32, 3] if model.data_format == 'channels_last' else [1, 3, 32, 32])\n model.training = True\n model(dummy_input)\n all_weight = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n weight_op = [t for t in all_weight if \"auxiliary\" not in t.name]\n return np.sum([np.prod(t.get_shape().as_list()) for t in weight_op]) * 1e-6\n elif vega.is_ms_backend():\n return 0\n",
"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Smooth L1 Loss.\"\"\"\nimport torch\nfrom vega.modules.module import Module\nfrom vega.common import ClassType, ClassFactory\nfrom .reduce_loss import weighted_loss\n\n\n@weighted_loss\ndef smooth_l1_loss(pred, target, beta=1.0):\n \"\"\"Smooth l1 loss.\n\n :param pred: predict\n :param target: target\n :param beta: beta\n :return: loss\n \"\"\"\n if beta > 0 and pred.size() == target.size() and target.numel() > 0:\n diff = torch.abs(pred - target)\n loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta)\n return loss\n else:\n raise ValueError('Failed to calculate smooth l1 loss.')\n\n\[email protected](ClassType.NETWORK)\nclass SmoothL1Loss(Module):\n \"\"\"Smooth L1 Loss.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init smooth l1 loss.\n\n :param desc: config dict\n \"\"\"\n super(SmoothL1Loss, self).__init__()\n self.beta = desc['beta'] if 'beta' in desc else 1.0\n self.reduction = desc['reduction'] if 'reduction' in desc else 'mean'\n self.loss_weight = desc['loss_weight'] if 'loss_weight' in desc else 1.0\n\n def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):\n \"\"\"Forward compute.\n\n :param pred: predict\n :param target: target\n :param weight: weight\n :param avg_factor: avg factor\n :param reduction_override: reduce override\n :return: loss\n \"\"\"\n reduction = (\n reduction_override if reduction_override else self.reduction)\n if target.numel() > 0:\n loss_bbox = self.loss_weight * smooth_l1_loss(\n pred,\n target,\n weight,\n beta=self.beta,\n reduction=reduction,\n avg_factor=avg_factor,\n **kwargs)\n return loss_bbox\n else:\n return torch.FloatTensor([0.0]).cuda()\n",
"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TF Adam.\"\"\"\n\n\nclass OptimizerStep(object):\n \"\"\"Adam optimizer for tensorflow.\"\"\"\n\n def __init__(self, learning_rate, weight_decay=0.):\n self.weight_decay = weight_decay\n self.base_lr = learning_rate\n\n def set_lr(self, learning_rate):\n \"\"\"Uptate learning rate of optimizer.\"\"\"\n if hasattr(self, '_learning_rate'):\n self._learning_rate = learning_rate\n elif hasattr(self, '_lr'):\n self._lr = learning_rate\n\n def step(self, loss, loss_scale, global_step, var_list=None):\n \"\"\"Compute and update gradients.\"\"\"\n loss = loss + self.regularize_loss(loss)\n if loss_scale != 1:\n scaled_grad_vars = self.compute_gradients(loss * loss_scale, var_list=var_list)\n unscaled_grad_vars = []\n for grad, var in scaled_grad_vars:\n unscaled_grad_vars.append((grad, var) if grad is None else (grad / loss_scale, var))\n minimize_op = self.apply_gradients(unscaled_grad_vars, global_step)\n else:\n grad_vars = self.compute_gradients(loss, var_list=var_list)\n minimize_op = self.apply_gradients(grad_vars, global_step)\n return minimize_op\n\n def regularize_loss(self, loss):\n \"\"\"Compute and return l2 loss.\"\"\"\n import tensorflow as tf\n l2_loss_list = [tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables()\n if 'batch_normalization' not in v.name]\n loss = loss + self.weight_decay * tf.add_n(l2_loss_list)\n return loss\n\n\ndef dynamic_optimizer(optimizer_class, **params):\n \"\"\"Dynamically choose optimizer.\"\"\"\n class DynamicOptimizer(optimizer_class, OptimizerStep):\n \"\"\"Dynamic optimizer for tensorflow.\"\"\"\n\n def __init__(self, **kwargs):\n weight_decay = 0.\n learning_rate = 0.\n if 'weight_decay' in kwargs:\n weight_decay = kwargs.pop('weight_decay')\n if 'learning_rate' in kwargs:\n learning_rate = kwargs['learning_rate']\n optimizer_class.__init__(self, **kwargs)\n OptimizerStep.__init__(self, learning_rate=learning_rate, weight_decay=weight_decay)\n return DynamicOptimizer(**params)\n\n\ndef dynamic_distributed_optimizer(optimizer_class, optimizer):\n \"\"\"Dynamically choose distributed optimizer.\"\"\"\n class DynamicDistributedOptimizer(optimizer_class, OptimizerStep):\n \"\"\"Dynamic distributed optimizer for tensorflow.\"\"\"\n\n def __init__(self, optimizer):\n optimizer_class.__init__(self, optimizer)\n OptimizerStep.__init__(self, learning_rate=optimizer.base_lr, weight_decay=optimizer.weight_decay)\n return DynamicDistributedOptimizer(optimizer)\n",
"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Class of list dict.\"\"\"\n\nimport os\nfrom collections import OrderedDict\nimport pandas as pd\n\n\nclass ListDict:\n \"\"\"Class of list dict.\n\n :param data: data\n :type data: list\n \"\"\"\n\n def __init__(self, data=None, **kwargs):\n if data is None:\n data = []\n self.data = data\n self.kwargs = kwargs\n\n def __len__(self):\n \"\"\"Get the length of data.\"\"\"\n return len(self.data)\n\n def __getitem__(self, key: (int, slice, str, tuple, list)):\n \"\"\"Get item.\"\"\"\n if isinstance(key, str):\n return [p[key] for p in self.data]\n elif isinstance(key, int):\n return self.data[key]\n elif isinstance(key, slice):\n return self.__class__(data=self.data[key], **self.kwargs)\n elif isinstance(key, (tuple, list)):\n records = []\n for key_ in key:\n records.append(self[key_])\n if isinstance(records[-1], (dict, OrderedDict)):\n return self.__class__(data=records, **self.kwargs)\n else:\n return list(zip(*records))\n else:\n raise TypeError('Key must be str or list')\n\n def __str__(self):\n \"\"\"Str.\"\"\"\n s = []\n for i in self.data:\n s.append(str(i))\n return '\\n'.join(s)\n\n @property\n def header(self):\n \"\"\"Get the header of the data.\"\"\"\n if len(self.data) > 0:\n return list(self.data[0].keys())\n else:\n return None\n\n def get(self, key, default=None):\n \"\"\"Get value for key.\"\"\"\n try:\n return self[key]\n except BaseException:\n return default\n\n def append(self, data):\n \"\"\"Append data.\"\"\"\n if isinstance(data, ListDict):\n if len(data) != 0:\n raise Exception('data len must be 0')\n data = data.data[0]\n if isinstance(data, (dict, OrderedDict)):\n self.data.append(data)\n else:\n raise TypeError(\n 'Method append does support for type {}'.format(\n type(data)))\n\n def extend(self, data):\n \"\"\"Extend data.\"\"\"\n if isinstance(data, ListDict):\n data = data.data\n if isinstance(data, list):\n self.data.extend(data)\n else:\n raise TypeError(\n 'Method extend does support for type {}'.format(\n type(data)))\n\n def insert(self, idx, data):\n \"\"\"Insert an item.\"\"\"\n if isinstance(data, ListDict):\n if len(data) != 0:\n raise Exception('data len must be 0')\n data = data.data[0]\n if isinstance(data, (dict, OrderedDict)):\n self.data.insert(idx, data)\n else:\n raise TypeError(\n 'Method insert does support for type {}'.format(\n type(data)))\n\n def pop(self, idx):\n \"\"\"Pop an item.\"\"\"\n return self.data.pop(idx)\n\n def to_dataframe(self):\n \"\"\"Dump to DataFrame.\"\"\"\n return pd.DataFrame(self.data)\n\n def to_csv(self, path, index=False, **kwargs):\n \"\"\"Dump to csv file.\"\"\"\n df = self.to_dataframe()\n df.to_csv(path, columns=self.header, index=index, **kwargs)\n\n @classmethod\n def load_csv(cls, path, **kwargs):\n \"\"\"Load csv file.\"\"\"\n if not os.path.isfile(path):\n raise FileExistsError('{} does not exist.'.format(path))\n df = pd.read_csv(path)\n data = df.to_dict('records')\n return cls(data=data, **kwargs)\n",
"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"conv weight standarlization.\"\"\"\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef conv_ws_2d(input,\n weight,\n bias=None,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n eps=1e-5):\n \"\"\"Conv2d with weight standarlization.\n\n :param input: input feature map\n :type input: torch.Tensor\n :param weight: weight of conv layer\n :type weight: torch.Tensor\n :param bias: bias\n :type bias: torch.Tensor\n :param stride: conv stride\n :type stride: int\n :param padding: num of padding\n :type padding: int\n :param dilation: num of dilation\n :type dilation: int\n :param groups: num of group\n :type groups: int\n :param eps: weight eps\n :type eps: float\n :return: feature map after weight standarlization\n :rtype: torch.Tensor\n \"\"\"\n c_in = weight.size(0)\n weight_flat = weight.view(c_in, -1)\n mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)\n std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)\n weight = (weight - mean) / (std + eps)\n return F.conv2d(input, weight, bias, stride, padding, dilation, groups)\n\n\nclass ConvWS2d(nn.Conv2d):\n \"\"\"Conv2d with weight standarlization.\"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias=True,\n eps=1e-5):\n \"\"\"Init conv2d with weight standarlization.\n\n :param in_channels: input channels\n :param out_channels: output channels\n :param kernel_size: kernel size\n :param stride: stride\n :param padding: num of padding\n :param dilation: num of dilation\n :param groups: num of groups\n :param bias: bias\n :param eps: eps\n \"\"\"\n super(ConvWS2d, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n self.eps = eps\n\n def forward(self, x):\n \"\"\"Forward function of conv2d with weight standarlization.\"\"\"\n return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,\n self.dilation, self.groups, self.eps)\n"
] | [
[
"torch.cuda.device_count",
"torch.nn.DataParallel",
"torch.cuda.set_device",
"torch.load"
],
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.reset_default_graph"
],
[
"torch.abs",
"torch.FloatTensor",
"torch.where"
],
[
"tensorflow.nn.l2_loss",
"tensorflow.add_n",
"tensorflow.compat.v1.trainable_variables"
],
[
"pandas.read_csv",
"pandas.DataFrame"
],
[
"torch.nn.functional.conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CyberZHG/keras-global-self-attention | [
"f3bf21dbb1f3251b5417a8bb254dd91807b1aec5"
] | [
"keras_self_attention/seq_self_attention.py"
] | [
"from tensorflow import keras\nfrom tensorflow.keras import backend as K\n\n\nclass SeqSelfAttention(keras.layers.Layer):\n\n ATTENTION_TYPE_ADD = 'additive'\n ATTENTION_TYPE_MUL = 'multiplicative'\n\n def __init__(self,\n units=32,\n attention_width=None,\n attention_type=ATTENTION_TYPE_ADD,\n return_attention=False,\n history_only=False,\n kernel_initializer='glorot_normal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_additive_bias=True,\n use_attention_bias=True,\n attention_activation=None,\n attention_regularizer_weight=0.0,\n **kwargs):\n \"\"\"Layer initialization.\n\n For additive attention, see: https://arxiv.org/pdf/1806.01264.pdf\n\n :param units: The dimension of the vectors that used to calculate the attention weights.\n :param attention_width: The width of local attention.\n :param attention_type: 'additive' or 'multiplicative'.\n :param return_attention: Whether to return the attention weights for visualization.\n :param history_only: Only use historical pieces of data.\n :param kernel_initializer: The initializer for weight matrices.\n :param bias_initializer: The initializer for biases.\n :param kernel_regularizer: The regularization for weight matrices.\n :param bias_regularizer: The regularization for biases.\n :param kernel_constraint: The constraint for weight matrices.\n :param bias_constraint: The constraint for biases.\n :param use_additive_bias: Whether to use bias while calculating the relevance of inputs features\n in additive mode.\n :param use_attention_bias: Whether to use bias while calculating the weights of attention.\n :param attention_activation: The activation used for calculating the weights of attention.\n :param attention_regularizer_weight: The weights of attention regularizer.\n :param kwargs: Parameters for parent class.\n \"\"\"\n super(SeqSelfAttention, self).__init__(**kwargs)\n self.supports_masking = True\n self.units = units\n self.attention_width = attention_width\n self.attention_type = attention_type\n self.return_attention = return_attention\n self.history_only = history_only\n if history_only and attention_width is None:\n self.attention_width = int(1e9)\n\n self.use_additive_bias = use_additive_bias\n self.use_attention_bias = use_attention_bias\n self.kernel_initializer = keras.initializers.get(kernel_initializer)\n self.bias_initializer = keras.initializers.get(bias_initializer)\n self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = keras.regularizers.get(bias_regularizer)\n self.kernel_constraint = keras.constraints.get(kernel_constraint)\n self.bias_constraint = keras.constraints.get(bias_constraint)\n self.attention_activation = keras.activations.get(attention_activation)\n self.attention_regularizer_weight = attention_regularizer_weight\n self._backend = keras.backend.backend()\n\n if attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n self.Wx, self.Wt, self.bh = None, None, None\n self.Wa, self.ba = None, None\n elif attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n self.Wa, self.ba = None, None\n else:\n raise NotImplementedError('No implementation for attention type : ' + attention_type)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'attention_width': self.attention_width,\n 'attention_type': self.attention_type,\n 'return_attention': self.return_attention,\n 'history_only': self.history_only,\n 'use_additive_bias': self.use_additive_bias,\n 'use_attention_bias': self.use_attention_bias,\n 'kernel_initializer': keras.initializers.serialize(self.kernel_initializer),\n 'bias_initializer': keras.initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': keras.regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': keras.regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': keras.constraints.serialize(self.kernel_constraint),\n 'bias_constraint': keras.constraints.serialize(self.bias_constraint),\n 'attention_activation': keras.activations.serialize(self.attention_activation),\n 'attention_regularizer_weight': self.attention_regularizer_weight,\n }\n base_config = super(SeqSelfAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def build(self, input_shape):\n if self.attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n self._build_additive_attention(input_shape)\n elif self.attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n self._build_multiplicative_attention(input_shape)\n super(SeqSelfAttention, self).build(input_shape)\n\n def _build_additive_attention(self, input_shape):\n feature_dim = int(input_shape[2])\n\n self.Wt = self.add_weight(shape=(feature_dim, self.units),\n name='{}_Add_Wt'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.Wx = self.add_weight(shape=(feature_dim, self.units),\n name='{}_Add_Wx'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_additive_bias:\n self.bh = self.add_weight(shape=(self.units,),\n name='{}_Add_bh'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n self.Wa = self.add_weight(shape=(self.units, 1),\n name='{}_Add_Wa'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_attention_bias:\n self.ba = self.add_weight(shape=(1,),\n name='{}_Add_ba'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n def _build_multiplicative_attention(self, input_shape):\n feature_dim = int(input_shape[2])\n\n self.Wa = self.add_weight(shape=(feature_dim, feature_dim),\n name='{}_Mul_Wa'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_attention_bias:\n self.ba = self.add_weight(shape=(1,),\n name='{}_Mul_ba'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n def call(self, inputs, mask=None, **kwargs):\n input_len = K.shape(inputs)[1]\n\n if self.attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n e = self._call_additive_emission(inputs)\n elif self.attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n e = self._call_multiplicative_emission(inputs)\n\n if self.attention_activation is not None:\n e = self.attention_activation(e)\n if self.attention_width is not None:\n if self.history_only:\n lower = K.arange(0, input_len) - (self.attention_width - 1)\n else:\n lower = K.arange(0, input_len) - self.attention_width // 2\n lower = K.expand_dims(lower, axis=-1)\n upper = lower + self.attention_width\n indices = K.expand_dims(K.arange(0, input_len), axis=0)\n e -= 10000.0 * (1.0 - K.cast(lower <= indices, K.floatx()) * K.cast(indices < upper, K.floatx()))\n if mask is not None:\n mask = K.expand_dims(K.cast(mask, K.floatx()), axis=-1)\n e -= 10000.0 * ((1.0 - mask) * (1.0 - K.permute_dimensions(mask, (0, 2, 1))))\n\n # a_{t} = \\text{softmax}(e_t)\n e = K.exp(e - K.max(e, axis=-1, keepdims=True))\n a = e / K.sum(e, axis=-1, keepdims=True)\n\n # l_t = \\sum_{t'} a_{t, t'} x_{t'}\n v = K.batch_dot(a, inputs)\n if self.attention_regularizer_weight > 0.0:\n self.add_loss(self._attention_regularizer(a))\n\n if self.return_attention:\n return [v, a]\n return v\n\n def _call_additive_emission(self, inputs):\n input_shape = K.shape(inputs)\n batch_size, input_len = input_shape[0], input_shape[1]\n\n # h_{t, t'} = \\tanh(x_t^T W_t + x_{t'}^T W_x + b_h)\n q = K.expand_dims(K.dot(inputs, self.Wt), 2)\n k = K.expand_dims(K.dot(inputs, self.Wx), 1)\n if self.use_additive_bias:\n h = K.tanh(q + k + self.bh)\n else:\n h = K.tanh(q + k)\n\n # e_{t, t'} = W_a h_{t, t'} + b_a\n if self.use_attention_bias:\n e = K.reshape(K.dot(h, self.Wa) + self.ba, (batch_size, input_len, input_len))\n else:\n e = K.reshape(K.dot(h, self.Wa), (batch_size, input_len, input_len))\n return e\n\n def _call_multiplicative_emission(self, inputs):\n # e_{t, t'} = x_t^T W_a x_{t'} + b_a\n e = K.batch_dot(K.dot(inputs, self.Wa), K.permute_dimensions(inputs, (0, 2, 1)))\n if self.use_attention_bias:\n e += self.ba[0]\n return e\n\n def compute_output_shape(self, input_shape):\n output_shape = input_shape\n if self.return_attention:\n attention_shape = (input_shape[0], output_shape[1], input_shape[1])\n return [output_shape, attention_shape]\n return output_shape\n\n def compute_mask(self, inputs, mask=None):\n if self.return_attention:\n return [mask, None]\n return mask\n\n def _attention_regularizer(self, attention):\n batch_size = K.cast(K.shape(attention)[0], K.floatx())\n input_len = K.shape(attention)[-1]\n indices = K.expand_dims(K.arange(0, input_len), axis=0)\n diagonal = K.expand_dims(K.arange(0, input_len), axis=-1)\n eye = K.cast(K.equal(indices, diagonal), K.floatx())\n return self.attention_regularizer_weight * K.sum(K.square(K.batch_dot(\n attention,\n K.permute_dimensions(attention, (0, 2, 1))) - eye)) / batch_size\n\n @staticmethod\n def get_custom_objects():\n return {'SeqSelfAttention': SeqSelfAttention}\n"
] | [
[
"tensorflow.keras.backend.floatx",
"tensorflow.keras.constraints.serialize",
"tensorflow.keras.backend.tanh",
"tensorflow.keras.regularizers.serialize",
"tensorflow.keras.backend.backend",
"tensorflow.keras.backend.batch_dot",
"tensorflow.keras.backend.permute_dimensions",
"tensorflow.keras.backend.max",
"tensorflow.keras.backend.expand_dims",
"tensorflow.keras.initializers.get",
"tensorflow.keras.backend.dot",
"tensorflow.keras.backend.sum",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.activations.get",
"tensorflow.keras.constraints.get",
"tensorflow.keras.activations.serialize",
"tensorflow.keras.regularizers.get",
"tensorflow.keras.backend.shape",
"tensorflow.keras.backend.arange",
"tensorflow.keras.backend.equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.