repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
Manny27nyc/intel-extension-for-pytorch | [
"b40faedf6b00d520f6483d519d2e82bce0a6c0d1"
] | [
"intel_extension_for_pytorch/optim/_functional.py"
] | [
"r\"\"\"Functional interface, port from torch/optim/_function.py\"\"\"\nimport torch\nfrom torch import Tensor\nfrom typing import List, Optional\n\ndef is_master_weight(param, params_attr):\n return (\n param.dtype == torch.float and\n param in params_attr and\n 'bf16_param' in params_attr[param]\n )\n\ndef get_bf16_grad(param, params_attr):\n assert is_master_weight(param, params_attr)\n return params_attr[param]['bf16_param'].grad\n\ndef _make_sparse(grad, grad_indices, values):\n size = grad.size()\n if grad_indices.numel() == 0 or values.numel() == 0:\n return torch.empty_like(grad)\n return torch.sparse_coo_tensor(grad_indices, values, size)\n\ndef _adagrad_impl(\n params: List[Tensor],\n grads: List[Tensor],\n state_sums: List[Tensor],\n state_steps: List[int],\n attr: dict,\n lr: float,\n weight_decay: float,\n lr_decay: float,\n eps: float,\n fused: bool,\n):\n r\"\"\"Functional API that performs Adagrad algorithm computation.\n\n See :class:`~torch.optim.Adagrad` for details.\n \"\"\"\n\n for (param, grad, state_sum, step) in zip(params, grads, state_sums, state_steps):\n param2 = torch.Tensor()\n if param in attr:\n if 'trail' in attr[param]:\n assert param.dtype is torch.bfloat16\n param2 = attr[param]['trail']\n if 'bf16_param' in attr[param]:\n assert param.dtype is torch.float\n param2 = attr[param]['bf16_param']\n if fused and not grad.is_sparse:\n torch.ops.torch_ipex.adagrad_fused_step(\n param,\n grad,\n state_sum,\n param2,\n step,\n lr,\n weight_decay,\n lr_decay,\n eps)\n continue\n\n if weight_decay != 0:\n if grad.is_sparse:\n raise RuntimeError(\"weight_decay option is not compatible with sparse gradients\")\n grad = grad.add(param, alpha=weight_decay)\n\n clr = lr / (1 + (step - 1) * lr_decay)\n\n if grad.is_sparse:\n grad = grad.coalesce() # the update is non-linear so indices must be unique\n grad_indices = grad._indices()\n grad_values = grad._values()\n size = grad.size()\n\n state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2)))\n std = state_sum.sparse_mask(grad)\n std_values = std._values().sqrt_().add_(eps)\n param.add_(_make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr)\n else:\n state_sum.addcmul_(grad, grad, value=1)\n std = state_sum.sqrt().add_(eps)\n param.addcdiv_(grad, std, value=-clr)\n\[email protected]_grad()\ndef adagrad_step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n params_with_grad = []\n grads = []\n state_sums = []\n state_steps = []\n\n for p in group['params']:\n grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad\n if grad is not None:\n params_with_grad.append(p)\n grads.append(grad)\n state = self.state[p]\n state_sums.append(state['sum'])\n # update the steps for each param group update\n state['step'] += 1\n # record the step after step update\n state_steps.append(state['step'])\n\n _adagrad_impl(\n params_with_grad,\n grads,\n state_sums,\n state_steps,\n self.params_attr,\n group['lr'],\n group['weight_decay'],\n group['lr_decay'],\n group['eps'],\n self.fused)\n\n return loss\n\ndef _sgd_non_fused_micro_step(\n params: Tensor,\n d_p_list: Tensor,\n momentum_buffer_list: Optional[Tensor],\n weight_decay: float,\n momentum: float,\n lr: float,\n dampening: float,\n nesterov: bool,\n):\n if weight_decay != 0:\n d_p = d_p.add(param, alpha=weight_decay)\n\n if momentum != 0:\n buf = momentum_buffer_list[i]\n\n if buf is None:\n buf = torch.clone(d_p).detach()\n momentum_buffer_list[i] = buf\n else:\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n\n if nesterov:\n d_p = d_p.add(buf, alpha=momentum)\n else:\n d_p = buf\n\n param.add_(d_p, alpha=alpha)\n\ndef _sgd_impl(\n params: List[Tensor],\n d_p_list: List[Tensor],\n attr: dict,\n momentum_buffer_list: List[Optional[Tensor]],\n *,\n weight_decay: float,\n momentum: float,\n lr: float,\n dampening: float,\n nesterov: bool,\n fused: bool\n):\n r\"\"\"Functional API that performs SGD algorithm computation.\n\n See :class:`~torch.optim.SGD` for details.\n \"\"\"\n\n for i, param in enumerate(params):\n d_p = d_p_list[i]\n param2 = torch.Tensor()\n if param in attr:\n if 'trail' in attr[param]:\n assert param.dtype is torch.bfloat16\n param2 = attr[param]['trail']\n if 'bf16_param' in attr[param]:\n assert param.dtype is torch.float\n param2 = attr[param]['bf16_param']\n\n if fused and not d_p.is_sparse:\n momentum_buffer_list[i] = torch.ops.torch_ipex.sgd_fused_step(\n param,\n d_p,\n momentum_buffer_list[i],\n param2,\n momentum,\n lr,\n weight_decay,\n dampening,\n nesterov)\n continue\n\n if (\n d_p.is_sparse and\n d_p.dtype == torch.bfloat16 and\n weight_decay == 0 and\n momentum == 0\n ):\n # packed_add can support sparse tensor\n torch.ops.torch_ipex.packed_add(param, param2, d_p, alpha=-lr)\n else:\n # no special optimize for other non fused case, fall back to naive implementation\n d_p = d_p.to(param.dtype)\n _sgd_non_fused_micro_step(\n param,\n d_p,\n momentum_buffer_list[i],\n momentum,\n lr,\n weight_decay,\n dampening,\n nesterov\n )\n\[email protected]_grad()\ndef sgd_step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n params_with_grad = []\n d_p_list = []\n momentum_buffer_list = []\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n lr = group['lr']\n\n for p in group['params']:\n grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad\n if grad is not None:\n params_with_grad.append(p)\n d_p_list.append(grad)\n\n state = self.state[p]\n if 'momentum_buffer' not in state:\n momentum_buffer_list.append(None)\n else:\n momentum_buffer_list.append(state['momentum_buffer'])\n\n _sgd_impl(\n params_with_grad,\n d_p_list,\n self.params_attr,\n momentum_buffer_list,\n weight_decay=weight_decay,\n momentum=momentum,\n lr=lr,\n dampening=dampening,\n nesterov=nesterov,\n fused=self.fused)\n\n # update momentum_buffers in state\n for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):\n state = self.state[p]\n state['momentum_buffer'] = momentum_buffer\n\n return loss\n\n\ndef _lamb_fused_impl(\n params: List[Tensor],\n grads: List[Tensor],\n exp_avgs: List[Tensor],\n exp_avg_sqs: List[Tensor],\n attr: dict,\n state_steps: List[int],\n beta1: float,\n beta2: float,\n lr: float,\n weight_decay: float,\n eps: float,\n):\n\n r\"\"\"Functional API that performs Lamb algorithm computation.\n See :class:`~torch.optim.Lamb` for details.\n \"\"\"\n\n for i, param in enumerate(params):\n\n grad = grads[i]\n exp_avg = exp_avgs[i]\n exp_avg_sq = exp_avg_sqs[i]\n step = state_steps[i]\n param2 = torch.Tensor()\n if param in attr:\n if 'trail' in attr[param]:\n assert param.dtype is torch.bfloat16\n param2 = attr[param]['trail']\n if 'bf16_param' in attr[param]:\n assert param.dtype is torch.float\n param2 = attr[param]['bf16_param']\n torch.ops.torch_ipex.lamb_fused_step(\n param,\n exp_avg,\n exp_avg_sq,\n grad,\n param2,\n step,\n beta1,\n beta2,\n lr,\n weight_decay,\n eps)\n\ndef _lamb_impl(\n params: List[Tensor],\n grads: List[Tensor],\n exp_avgs: List[Tensor],\n exp_avg_sqs: List[Tensor],\n state_steps: List[int],\n beta1: float,\n beta2: float,\n lr: float,\n weight_decay: float,\n eps: float,\n):\n r\"\"\"Functional API that performs Lamb algorithm computation.\n \"\"\"\n for i, param in enumerate(params):\n grad = grads[i]\n exp_avg = exp_avgs[i]\n exp_avg_sq = exp_avg_sqs[i]\n step = state_steps[i]\n\n bias_correction1 = 1 - beta1 ** step\n bias_correction2 = 1 - beta2 ** step\n grad = grad.to(exp_avg.dtype)\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n adam_step = (exp_avg / bias_correction1) / ((exp_avg_sq / bias_correction2).sqrt() + eps)\n\n if weight_decay != 0:\n adam_step.add_(param, alpha=weight_decay)\n\n weight_norm = param.norm(p=2)\n rtw_norm = adam_step.norm(p=2)\n true_ratio = weight_norm / rtw_norm\n\n param.add_(adam_step, alpha=-lr * true_ratio)\n\[email protected]_grad()\ndef lamb_step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n params_with_grad = []\n grads = []\n exp_avgs = []\n exp_avg_sqs = []\n trails = []\n state_steps = []\n\n for p in group['params']:\n grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad\n if grad is not None:\n params_with_grad.append(p)\n if grad.is_sparse:\n raise RuntimeError('Lamb does not support sparse gradients')\n if grad.device != torch.device('cpu'):\n raise RuntimeError('Lamb supports only CPU device')\n grads.append(grad)\n\n state = self.state[p]\n # Lazy state initialization\n if len(state) == 0:\n state['step'] = 0\n buffer_dtype = p.dtype if p.dtype is torch.float64 else torch.float\n state['exp_avg'] = torch.zeros(p.shape, dtype=buffer_dtype)\n state['exp_avg_sq'] = torch.zeros(p.shape, dtype=buffer_dtype)\n\n exp_avgs.append(state['exp_avg'])\n exp_avg_sqs.append(state['exp_avg_sq'])\n\n # update the steps for each param group update\n state['step'] += 1\n # record the step after step update\n state_steps.append(state['step'])\n\n beta1, beta2 = group['betas']\n _lamb_fused_impl(\n params_with_grad,\n grads,\n exp_avgs,\n exp_avg_sqs,\n self.params_attr,\n state_steps,\n beta1,\n beta2,\n group['lr'],\n group['weight_decay'],\n group['eps'])\n return loss"
] | [
[
"torch.ops.torch_ipex.packed_add",
"torch.empty_like",
"torch.sparse_coo_tensor",
"torch.ops.torch_ipex.lamb_fused_step",
"torch.no_grad",
"torch.enable_grad",
"torch.zeros",
"torch.clone",
"torch.ops.torch_ipex.adagrad_fused_step",
"torch.device",
"torch.ops.torch_ipex.sgd_fused_step",
"torch.Tensor"
]
] |
sedurCode/seld-dcase2021 | [
"f8e09dbbbb5ac7d6ae0b82083f1a11b013c5dd51"
] | [
"seld.py"
] | [
"#\n# A wrapper script that trains the SELDnet. The training stops when the early stopping metric - SELD error stops improving.\n#\n\nimport os\nimport sys\nimport numpy as np\nimport cls_feature_class\nimport cls_data_generator\nfrom cls_compute_seld_results import ComputeSELDResults, reshape_3Dto2D\nimport keras_model\nimport parameter\nimport time\n\ndef dump_DCASE2021_results(_data_gen, _feat_cls, _dcase_output_folder, _sed_pred, _doa_pred):\n '''\n Write the filewise results to individual csv files\n '''\n\n # Number of frames for a 60 second audio with 100ms hop length = 600 frames\n max_frames_with_content = _data_gen.get_nb_frames()\n\n # Number of frames in one batch (batch_size* sequence_length) consists of all the 600 frames above with\n # zero padding in the remaining frames\n test_filelist = _data_gen.get_filelist()\n frames_per_file = _data_gen.get_frame_per_file()\n for file_cnt in range(_sed_pred.shape[0] // frames_per_file):\n output_file = os.path.join(_dcase_output_folder, test_filelist[file_cnt].replace('.npy', '.csv'))\n dc = file_cnt * frames_per_file\n output_dict = _feat_cls.regression_label_format_to_output_format(\n _sed_pred[dc:dc + max_frames_with_content, :],\n _doa_pred[dc:dc + max_frames_with_content, :]\n )\n _data_gen.write_output_format_file(output_file, output_dict)\n return\n\n\ndef get_accdoa_labels(accdoa_in, nb_classes):\n x, y, z = accdoa_in[:, :, :nb_classes], accdoa_in[:, :, nb_classes:2*nb_classes], accdoa_in[:, :, 2*nb_classes:]\n sed = np.sqrt(x**2 + y**2 + z**2) > 0.5\n \n return sed, accdoa_in\n\ndef main(argv):\n \"\"\"\n Main wrapper for training sound event localization and detection network.\n \n :param argv: expects two optional inputs. \n first input: task_id - (optional) To chose the system configuration in parameters.py.\n (default) 1 - uses default parameters\n second input: job_id - (optional) all the output files will be uniquely represented with this.\n (default) 1\n\n \"\"\"\n print(argv)\n if len(argv) != 3:\n print('\\n\\n')\n print('-------------------------------------------------------------------------------------------------------')\n print('The code expected two optional inputs')\n print('\\t>> python seld.py <task-id> <job-id>')\n print('\\t\\t<task-id> is used to choose the user-defined parameter set from parameter.py')\n print('Using default inputs for now')\n print('\\t\\t<job-id> is a unique identifier which is used for output filenames (models, training plots). '\n 'You can use any number or string for this.')\n print('-------------------------------------------------------------------------------------------------------')\n print('\\n\\n')\n\n # use parameter set defined by user\n task_id = '1' if len(argv) < 2 else argv[1]\n params = parameter.get_params(task_id)\n\n job_id = 1 if len(argv) < 3 else argv[-1]\n\n feat_cls = cls_feature_class.FeatureClass(params)\n train_splits, val_splits, test_splits = None, None, None\n\n if params['mode'] == 'dev':\n test_splits = [6]\n val_splits = [5]\n train_splits = [[1, 2, 3, 4]]\n\n elif params['mode'] == 'eval':\n test_splits = [[7, 8]]\n val_splits = [[6]]\n train_splits = [[1, 2, 3, 4, 5]]\n\n for split_cnt, split in enumerate(test_splits):\n print('\\n\\n---------------------------------------------------------------------------------------------------')\n print('------------------------------------ SPLIT {} -----------------------------------------------'.format(split))\n print('---------------------------------------------------------------------------------------------------')\n\n # Unique name for the run\n cls_feature_class.create_folder(params['model_dir'])\n unique_name = '{}_{}_{}_{}_split{}'.format(\n task_id, job_id, params['dataset'], params['mode'], split\n )\n unique_name = os.path.join(params['model_dir'], unique_name)\n model_name = '{}_model.h5'.format(unique_name)\n print(\"unique_name: {}\\n\".format(unique_name))\n\n # Load train and validation data\n print('Loading training dataset:')\n data_gen_train = cls_data_generator.DataGenerator(\n params=params, split=train_splits[split_cnt]\n )\n\n print('Loading validation dataset:')\n data_gen_val = cls_data_generator.DataGenerator(\n params=params, split=val_splits[split_cnt], shuffle=False, per_file=True, is_eval=False\n )\n\n # Collect the reference labels for validation data\n data_in, data_out = data_gen_train.get_data_sizes()\n print('FEATURES:\\n\\tdata_in: {}\\n\\tdata_out: {}\\n'.format(data_in, data_out))\n\n nb_classes = data_gen_train.get_nb_classes()\n print('MODEL:\\n\\tdropout_rate: {}\\n\\tCNN: nb_cnn_filt: {}, f_pool_size{}, t_pool_size{}\\n\\trnn_size: {}, fnn_size: {}\\n\\tdoa_objective: {}\\n'.format(\n params['dropout_rate'], params['nb_cnn2d_filt'], params['f_pool_size'], params['t_pool_size'], params['rnn_size'],\n params['fnn_size'], params['doa_objective']))\n\n print('Using loss weights : {}'.format(params['loss_weights']))\n model = keras_model.get_model(data_in=data_in, data_out=data_out, dropout_rate=params['dropout_rate'],\n nb_cnn2d_filt=params['nb_cnn2d_filt'], f_pool_size=params['f_pool_size'], t_pool_size=params['t_pool_size'],\n rnn_size=params['rnn_size'], fnn_size=params['fnn_size'],\n weights=params['loss_weights'], doa_objective=params['doa_objective'], is_accdoa=params['is_accdoa'])\n\n # Dump results in DCASE output format for calculating final scores\n dcase_output_val_folder = os.path.join(params['dcase_output_dir'], '{}_{}_{}_val'.format(task_id, params['dataset'], params['mode']))\n cls_feature_class.delete_and_create_folder(dcase_output_val_folder)\n print('Dumping recording-wise val results in: {}'.format(dcase_output_val_folder))\n\n # Initialize evaluation metric class\n score_obj = ComputeSELDResults(params)\n\n best_seld_metric = 99999\n best_epoch = -1\n patience_cnt = 0\n nb_epoch = 2 if params['quick_test'] else params['nb_epochs']\n tr_loss = np.zeros(nb_epoch)\n seld_metric = np.zeros((nb_epoch, 5))\n\n # start training\n for epoch_cnt in range(nb_epoch):\n start = time.time()\n\n # train once per epoch\n hist = model.fit_generator(\n generator=data_gen_train.generate(),\n steps_per_epoch=2 if params['quick_test'] else data_gen_train.get_total_batches_in_data(),\n epochs=params['epochs_per_fit'],\n verbose=2,\n )\n tr_loss[epoch_cnt] = hist.history.get('loss')[-1]\n\n # predict once per epoch\n pred = model.predict_generator(\n generator=data_gen_val.generate(),\n steps=2 if params['quick_test'] else data_gen_val.get_total_batches_in_data(),\n verbose=2\n )\n\n if params['is_accdoa']:\n sed_pred, doa_pred = get_accdoa_labels(pred, nb_classes)\n sed_pred = reshape_3Dto2D(sed_pred)\n doa_pred = reshape_3Dto2D(doa_pred)\n else:\n sed_pred = reshape_3Dto2D(pred[0]) > 0.5\n doa_pred = reshape_3Dto2D(pred[1] if params['doa_objective'] is 'mse' else pred[1][:, :, nb_classes:])\n \n # Calculate the DCASE 2021 metrics - Location-aware detection and Class-aware localization scores\n dump_DCASE2021_results(data_gen_val, feat_cls, dcase_output_val_folder, sed_pred, doa_pred)\n seld_metric[epoch_cnt, :] = score_obj.get_SELD_Results(dcase_output_val_folder)\n\n patience_cnt += 1\n if seld_metric[epoch_cnt, -1] < best_seld_metric:\n best_seld_metric = seld_metric[epoch_cnt, -1]\n best_epoch = epoch_cnt\n model.save(model_name)\n patience_cnt = 0\n\n print(\n 'epoch_cnt: {}, time: {:0.2f}s, tr_loss: {:0.2f}, '\n '\\n\\t\\t DCASE2021 SCORES: ER: {:0.2f}, F: {:0.1f}, LE: {:0.1f}, LR:{:0.1f}, seld_score (early stopping score): {:0.2f}, '\n 'best_seld_score: {:0.2f}, best_epoch : {}\\n'.format(\n epoch_cnt, time.time() - start, tr_loss[epoch_cnt],\n seld_metric[epoch_cnt, 0], seld_metric[epoch_cnt, 1]*100,\n seld_metric[epoch_cnt, 2], seld_metric[epoch_cnt, 3]*100,\n seld_metric[epoch_cnt, -1], best_seld_metric, best_epoch\n )\n )\n if patience_cnt > params['patience']:\n break\n\n print('\\nResults on validation split:')\n print('\\tUnique_name: {} '.format(unique_name))\n print('\\tSaved model for the best_epoch: {}'.format(best_epoch))\n print('\\tSELD_score (early stopping score) : {}'.format(best_seld_metric))\n\n print('\\n\\tDCASE2021 scores')\n print('\\tClass-aware localization scores: Localization Error: {:0.1f}, Localization Recall: {:0.1f}'.format(seld_metric[best_epoch, 2], seld_metric[best_epoch, 3]*100))\n print('\\tLocation-aware detection scores: Error rate: {:0.2f}, F-score: {:0.1f}'.format(seld_metric[best_epoch, 0], seld_metric[best_epoch, 1]*100))\n\n # ------------------ Calculate metric scores for unseen test split ---------------------------------\n print('\\nLoading the best model and predicting results on the testing split')\n print('\\tLoading testing dataset:')\n data_gen_test = cls_data_generator.DataGenerator(\n params=params, split=split, shuffle=False, per_file=True, is_eval=True if params['mode'] is 'eval' else False\n )\n\n model = keras_model.load_seld_model('{}_model.h5'.format(unique_name), params['doa_objective'])\n pred_test = model.predict_generator(\n generator=data_gen_test.generate(),\n steps=2 if params['quick_test'] else data_gen_test.get_total_batches_in_data(),\n verbose=2\n )\n if params['is_accdoa']:\n test_sed_pred, test_doa_pred = get_accdoa_labels(pred_test, nb_classes)\n test_sed_pred = reshape_3Dto2D(test_sed_pred)\n test_doa_pred = reshape_3Dto2D(test_doa_pred)\n else:\n test_sed_pred = reshape_3Dto2D(pred_test[0]) > 0.5\n test_doa_pred = reshape_3Dto2D(pred_test[1] if params['doa_objective'] is 'mse' else pred_test[1][:, :, nb_classes:])\n\n\n\n # Dump results in DCASE output format for calculating final scores\n dcase_output_test_folder = os.path.join(params['dcase_output_dir'], '{}_{}_{}_test'.format(task_id, params['dataset'], params['mode']))\n cls_feature_class.delete_and_create_folder(dcase_output_test_folder)\n print('Dumping recording-wise test results in: {}'.format(dcase_output_test_folder))\n dump_DCASE2021_results(data_gen_test, feat_cls, dcase_output_test_folder, test_sed_pred, test_doa_pred)\n\n if params['mode'] is 'dev':\n # Calculate DCASE2021 scores\n test_seld_metric = score_obj.get_SELD_Results(dcase_output_test_folder)\n\n print('Results on test split:')\n print('\\tDCASE2021 Scores')\n print('\\tClass-aware localization scores: Localization Error: {:0.1f}, Localization Recall: {:0.1f}'.format(test_seld_metric[2], test_seld_metric[3]*100))\n print('\\tLocation-aware detection scores: Error rate: {:0.2f}, F-score: {:0.1f}'.format(test_seld_metric[0], test_seld_metric[1]*100))\n print('\\tSELD (early stopping metric): {:0.2f}'.format(test_seld_metric[-1]))\n\n\nif __name__ == \"__main__\":\n try:\n sys.exit(main(sys.argv))\n except (ValueError, IOError) as e:\n sys.exit(e)\n"
] | [
[
"numpy.sqrt",
"numpy.zeros"
]
] |
jongwookyi/Mask_RCNN | [
"9a26fa067a2087dbdf07f21a43dc2aa872ffe059"
] | [
"mrcnn/model.py"
] | [
"\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport datetime\nimport re\nimport math\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow.keras.backend as K\nimport tensorflow.keras.layers as KL\nimport tensorflow.keras.layers as KE\nimport tensorflow.keras.utils as KU\nimport tensorflow.keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 2.0+\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"2.0\")\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(), array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\", \"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.math.log(x) / tf.math.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random.shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random.shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn=lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn=lambda: tf.cast(tf.constant([]), tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(tf.shape(probs)[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse.to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse.to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n s1 = s[1] if s[1] != None else -1\n mrcnn_bbox = KL.Reshape((s1, num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:, 0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\nclass DataGenerator(KU.Sequence):\n \"\"\"An iterable that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n It inherits from keras.utils.Sequence to avoid data redundancy when multiprocessing=True.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n\n Returns a Python iterable. Upon calling __getitem__() on it, the\n iterable returns two lists, inputs and outputs. The contents\n of the lists differ depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n\n def __init__(self, dataset, config, shuffle=True, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False):\n self.dataset = dataset\n self.config = config\n self.shuffle = shuffle\n self.augmentation = augmentation\n self.random_rois = random_rois\n self.batch_size = batch_size\n self.detection_targets = detection_targets\n self.image_ids = np.copy(dataset.image_ids)\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n self.backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n def __len__(self):\n return int(np.ceil(len(self.image_ids) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n b = 0 # batch item index\n image_index = -1\n while b < self.batch_size:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(self.image_ids)\n if self.shuffle and image_index == 0:\n np.random.shuffle(self.image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = self.image_ids[image_index]\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(self.dataset, self.config, image_id,\n augmentation=self.augmentation,\n use_mini_mask=self.config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,\n gt_class_ids, gt_boxes, self.config)\n\n # Mask R-CNN Targets\n if self.random_rois:\n rpn_rois = generate_random_rois(\n image.shape, self.random_rois, gt_class_ids, gt_boxes)\n if self.detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (self.batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (self.batch_size, gt_masks.shape[0], gt_masks.shape[1],\n self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if self.random_rois:\n batch_rpn_rois = np.zeros(\n (self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if self.detection_targets:\n batch_rois = np.zeros(\n (self.batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), self.config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if self.random_rois:\n batch_rpn_rois[b] = rpn_rois\n if self.detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if self.random_rois:\n inputs.extend([batch_rpn_rois])\n if self.detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n return inputs, outputs\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n\n # A hack to get around Keras's bad support for constants\n # This class returns a constant layer\n class ConstLayer(KE.Layer):\n def __init__(self, x, name=None):\n super(ConstLayer, self).__init__(name=name)\n self.x = tf.Variable(x)\n\n def call(self, input):\n return self.x\n\n anchors = ConstLayer(anchors, name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n from tensorflow.python.keras.saving import hdf5_format\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n with h5py.File(filepath, mode='r') as f:\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n hdf5_format.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n hdf5_format.load_weights_from_hdf5_group(f, layers)\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_metric(loss, name, aggregation='mean')\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n # Use string for regex since we might want to use pathlib.Path as model_path\n m = re.match(regex, str(model_path))\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n custom_callbacks: Optional. Add custom callbacks to be called\n with the keras fit_generator method. Must be list of type keras.callbacks.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = DataGenerator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE)\n val_generator = DataGenerator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name == 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=(1 < workers),\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n"
] | [
[
"numpy.ones",
"tensorflow.gather_nd",
"numpy.any",
"tensorflow.logical_and",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.abs",
"tensorflow.keras.layers.Conv2D",
"tensorflow.identity",
"numpy.stack",
"tensorflow.python.keras.saving.hdf5_format.load_weights_from_hdf5_group",
"tensorflow.math.log",
"tensorflow.random.shuffle",
"numpy.abs",
"tensorflow.keras.backend.not_equal",
"tensorflow.keras.backend.squeeze",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.backend.int_shape",
"tensorflow.cast",
"tensorflow.equal",
"numpy.divide",
"tensorflow.sqrt",
"tensorflow.unique",
"numpy.array",
"tensorflow.keras.layers.Input",
"tensorflow.round",
"tensorflow.squeeze",
"tensorflow.keras.backend.learning_phase",
"tensorflow.Variable",
"tensorflow.keras.backend.sparse_categorical_crossentropy",
"tensorflow.keras.backend.less",
"numpy.reshape",
"tensorflow.keras.layers.Activation",
"numpy.delete",
"tensorflow.keras.layers.Dense",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.keras.layers.MaxPooling2D",
"numpy.argmax",
"numpy.hstack",
"tensorflow.boolean_mask",
"tensorflow.keras.layers.UpSampling2D",
"numpy.random.shuffle",
"tensorflow.stop_gradient",
"tensorflow.exp",
"tensorflow.argmax",
"tensorflow.keras.backend.equal",
"numpy.concatenate",
"tensorflow.gather",
"numpy.random.randint",
"tensorflow.keras.layers.Reshape",
"tensorflow.maximum",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.backend.reshape",
"numpy.sum",
"tensorflow.keras.layers.Concatenate",
"numpy.copy",
"numpy.log",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.amax",
"tensorflow.split",
"tensorflow.minimum",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.add_n",
"numpy.zeros",
"tensorflow.keras.backend.abs",
"tensorflow.expand_dims",
"tensorflow.keras.backend.binary_crossentropy",
"numpy.arange",
"tensorflow.keras.backend.shape",
"numpy.sort",
"tensorflow.control_dependencies",
"tensorflow.size",
"tensorflow.map_fn",
"tensorflow.image.non_max_suppression",
"tensorflow.reduce_max",
"tensorflow.reshape",
"tensorflow.nn.top_k",
"tensorflow.python.keras.saving.hdf5_format.load_weights_from_hdf5_group_by_name",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Add",
"tensorflow.keras.optimizers.SGD",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.divide",
"tensorflow.keras.backend.mean",
"numpy.random.choice",
"numpy.expand_dims",
"numpy.where",
"tensorflow.stack",
"tensorflow.sparse.to_dense",
"numpy.max",
"numpy.broadcast_to",
"tensorflow.pad",
"numpy.empty",
"tensorflow.reduce_mean",
"tensorflow.image.crop_and_resize",
"tensorflow.where"
]
] |
ictnlp/Dual-Path | [
"8c4577236908797ede1d971c11c2b3ef247e3469"
] | [
"examples/simultaneous_translation/utils/functions.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\n\n\ndef exclusive_cumprod(tensor, dim: int, eps: float = 1e-10):\n \"\"\"\n Implementing exclusive cumprod.\n There is cumprod in pytorch, however there is no exclusive mode.\n cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i]\n exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]\n \"\"\"\n tensor_size = list(tensor.size())\n tensor_size[dim] = 1\n return_tensor = safe_cumprod(\n torch.cat([torch.ones(tensor_size, device=tensor.device), tensor], dim=dim),\n dim=dim,\n eps=eps,\n )\n\n if dim == 0:\n return return_tensor[:-1]\n elif dim == 1:\n return return_tensor[:, :-1]\n elif dim == 2:\n return return_tensor[:, :, :-1]\n else:\n raise RuntimeError(\"Cumprod on dimension 3 and more is not implemented\")\n\n\ndef safe_cumprod(tensor, dim: int, eps: float = 1e-10):\n \"\"\"\n An implementation of cumprod to prevent precision issue.\n cumprod(x)\n = [x1, x1x2, x1x2x3, ....]\n = [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...]\n = exp(cumsum(log(x)))\n \"\"\"\n\n if (tensor + eps < 0).any().item():\n raise RuntimeError(\n \"Safe cumprod can only take non-negative tensors as input.\"\n \"Consider use torch.cumprod if you want to calculate negative values.\"\n )\n\n log_tensor = torch.log(tensor + eps)\n cumsum_log_tensor = torch.cumsum(log_tensor, dim)\n exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)\n return exp_cumsum_log_tensor\n\n\ndef lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False):\n \"\"\"\n Convert a tensor of lengths to mask\n For example, lengths = [[2, 3, 4]], max_len = 5\n mask =\n [[1, 1, 1],\n [1, 1, 1],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 0]]\n \"\"\"\n assert len(lengths.size()) <= 2\n if len(lengths) == 2:\n if dim == 1:\n lengths = lengths.t()\n lengths = lengths\n else:\n lengths = lengths.unsqueeze(1)\n\n # lengths : batch_size, 1\n lengths = lengths.view(-1, 1)\n\n batch_size = lengths.size(0)\n # batch_size, max_len\n mask = (\n torch.arange(max_len, device=\"cuda\")\n .expand(batch_size, max_len)\n .type_as(lengths)\n < lengths\n )\n\n if negative_mask:\n mask = ~mask\n\n if dim == 0:\n # max_len, batch_size\n mask = mask.t()\n\n return mask\n\n\ndef moving_sum(x, start_idx: int, end_idx: int):\n \"\"\"\n From MONOTONIC CHUNKWISE ATTENTION\n https://arxiv.org/pdf/1712.05382.pdf\n Equation (18)\n\n x = [x_1, x_2, ..., x_N]\n MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m\n for n in {1, 2, 3, ..., N}\n\n x : src_len, batch_size\n start_idx : start idx\n end_idx : end idx\n\n Example\n src_len = 5\n batch_size = 3\n x =\n [[ 0, 5, 10],\n [ 1, 6, 11],\n [ 2, 7, 12],\n [ 3, 8, 13],\n [ 4, 9, 14]]\n\n MovingSum(x, 3, 1) =\n [[ 0, 5, 10],\n [ 1, 11, 21],\n [ 3, 18, 33],\n [ 6, 21, 36],\n [ 9, 24, 39]]\n\n MovingSum(x, 1, 3) =\n [[ 3, 18, 33],\n [ 6, 21, 36],\n [ 9, 24, 39],\n [ 7, 17, 27],\n [ 4, 9, 14]]\n \"\"\"\n assert start_idx > 0 and end_idx > 0\n assert len(x.size()) == 2\n src_len, batch_size = x.size()\n # batch_size, 1, src_len\n x = x.t().unsqueeze(1)\n # batch_size, 1, src_len\n moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1])\n\n moving_sum = (\n torch.nn.functional.conv1d(\n x, moving_sum_weight, padding=start_idx + end_idx - 1\n )\n .squeeze(1)\n .t()\n )\n moving_sum = moving_sum[end_idx:-start_idx]\n\n assert src_len == moving_sum.size(0)\n assert batch_size == moving_sum.size(1)\n\n return moving_sum\n"
] | [
[
"torch.ones",
"torch.nn.functional.conv1d",
"torch.exp",
"torch.cumsum",
"torch.log",
"torch.arange"
]
] |
LucasAlegre/stable-baselines3 | [
"6b598323ae070bb0a998d25230f6e11eca4cbe61"
] | [
"stable_baselines3/dqn/policies.py"
] | [
"from typing import Any, Dict, List, Optional, Type\n\nimport gym\nimport torch as th\nfrom torch import nn\n\nfrom stable_baselines3.common.policies import BasePolicy, register_policy\nfrom stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp\nfrom stable_baselines3.common.type_aliases import Schedule\n\n\nclass QNetwork(BasePolicy):\n \"\"\"\n Action-Value (Q-Value) network for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n features_extractor: nn.Module,\n features_dim: int,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n normalize_images: bool = True,\n ):\n super(QNetwork, self).__init__(\n observation_space,\n action_space,\n features_extractor=features_extractor,\n normalize_images=normalize_images,\n )\n\n if net_arch is None:\n net_arch = [64, 64]\n\n self.net_arch = net_arch\n self.activation_fn = activation_fn\n self.features_extractor = features_extractor\n self.features_dim = features_dim\n self.normalize_images = normalize_images\n action_dim = self.action_space.n # number of actions\n q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)\n self.q_net = nn.Sequential(*q_net)\n\n def forward(self, obs: th.Tensor) -> th.Tensor:\n \"\"\"\n Predict the q-values.\n\n :param obs: Observation\n :return: The estimated Q-Value for each action.\n \"\"\"\n return self.q_net(self.extract_features(obs))\n\n def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:\n q_values = self.forward(observation)\n # Greedy action\n action = q_values.argmax(dim=1).reshape(-1)\n return action\n\n def _get_data(self) -> Dict[str, Any]:\n data = super()._get_data()\n\n data.update(\n dict(\n net_arch=self.net_arch,\n features_dim=self.features_dim,\n activation_fn=self.activation_fn,\n features_extractor=self.features_extractor,\n epsilon=self.epsilon,\n )\n )\n return data\n\n\nclass DQNPolicy(BasePolicy):\n \"\"\"\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super(DQNPolicy, self).__init__(\n observation_space,\n action_space,\n features_extractor_class,\n features_extractor_kwargs,\n optimizer_class=optimizer_class,\n optimizer_kwargs=optimizer_kwargs,\n )\n\n if net_arch is None:\n if features_extractor_class == FlattenExtractor:\n net_arch = [64, 64]\n else:\n net_arch = []\n\n self.net_arch = net_arch\n self.activation_fn = activation_fn\n self.normalize_images = normalize_images\n\n self.net_args = {\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"net_arch\": self.net_arch,\n \"activation_fn\": self.activation_fn,\n \"normalize_images\": normalize_images,\n }\n\n self.q_net, self.q_net_target = None, None\n self._build(lr_schedule)\n\n def _build(self, lr_schedule: Schedule) -> None:\n \"\"\"\n Create the network and the optimizer.\n\n :param lr_schedule: Learning rate schedule\n lr_schedule(1) is the initial learning rate\n \"\"\"\n\n self.q_net = self.make_q_net()\n self.q_net_target = self.make_q_net()\n self.q_net_target.load_state_dict(self.q_net.state_dict())\n\n # Setup optimizer with initial learning rate\n self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)\n\n def make_q_net(self) -> QNetwork:\n # Make sure we always have separate networks for features extractors etc\n net_args = self._update_features_extractor(self.net_args, features_extractor=None)\n return QNetwork(**net_args).to(self.device)\n\n def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:\n return self._predict(obs, deterministic=deterministic)\n\n def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:\n return self.q_net._predict(obs, deterministic=deterministic)\n\n def _get_data(self) -> Dict[str, Any]:\n data = super()._get_data()\n\n data.update(\n dict(\n net_arch=self.net_args[\"net_arch\"],\n activation_fn=self.net_args[\"activation_fn\"],\n lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone\n optimizer_class=self.optimizer_class,\n optimizer_kwargs=self.optimizer_kwargs,\n features_extractor_class=self.features_extractor_class,\n features_extractor_kwargs=self.features_extractor_kwargs,\n )\n )\n return data\n\n\nMlpPolicy = DQNPolicy\n\n\nclass CnnPolicy(DQNPolicy):\n \"\"\"\n Policy class for DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super(CnnPolicy, self).__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n features_extractor_class,\n features_extractor_kwargs,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )\n\n\nregister_policy(\"MlpPolicy\", MlpPolicy)\nregister_policy(\"CnnPolicy\", CnnPolicy)\n"
] | [
[
"torch.nn.Sequential"
]
] |
srihari-humbarwadi/adain-tensorflow2.x | [
"c0da16e4d39d5316683ed0988787aedbb1c9768c"
] | [
"adain/learning_rate_schedule.py"
] | [
"import tensorflow as tf\r\n\r\n\r\nclass InverseDecay(tf.optimizers.schedules.LearningRateSchedule):\r\n def __init__(self, initial_learning_rate, decay_rate):\r\n super(InverseDecay, self).__init__()\r\n\r\n self.initial_learning_rate = initial_learning_rate\r\n self.decay_rate = decay_rate\r\n\r\n def __call__(self, step):\r\n learning_rate = tf.math.divide_no_nan(\r\n self.initial_learning_rate,\r\n (1.0 + self.decay_rate * tf.cast(step, dtype=tf.float32)))\r\n return learning_rate\r\n\r\n def get_config(self):\r\n config = {\r\n \"initial_learning_rate\": self.initial_learning_rate,\r\n \"decay_rate\": self.decay_rate,\r\n }\r\n base_config = super(InverseDecay,\r\n self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n"
] | [
[
"tensorflow.cast"
]
] |
andrewgryan/bokeh-playground | [
"aeab70627a5ccd7f210c354098d30bdf92bb553f"
] | [
"grove/main.py"
] | [
"import argparse\nimport bokeh.plotting\nimport bokeh.models\nimport bokeh.palettes\nimport bokeh.colors\nimport cartopy\nimport numpy as np\nimport netCDF4\n\n\nGOOGLE = cartopy.crs.Mercator.GOOGLE\nPLATE_CARREE = cartopy.crs.PlateCarree()\n\n\ndef parse_args(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"paths\", nargs=\"+\")\n return parser.parse_args(args=argv)\n\n\ndef main():\n args = parse_args()\n x_range, y_range = google_mercator([-180, 180], [-80, 80])\n figure = bokeh.plotting.figure(\n sizing_mode=\"stretch_both\",\n x_range=x_range,\n y_range=y_range,\n x_axis_type=\"mercator\",\n y_axis_type=\"mercator\",\n active_scroll=\"wheel_zoom\")\n figure.toolbar_location = None\n figure.axis.visible = False\n figure.min_border = 0\n tile = bokeh.models.WMTSTileSource(\n url='http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png',\n attribution=\"Attribution text goes here\"\n )\n figure.add_tile(tile)\n\n box_select_tool = bokeh.models.BoxSelectTool()\n figure.add_tools(box_select_tool)\n figure.toolbar.active_drag = box_select_tool\n\n # Plot Class 4 positions\n source = bokeh.models.ColumnDataSource({\n \"x\": [],\n \"y\": [],\n \"v\": []\n })\n circle_source = bokeh.models.ColumnDataSource({\n \"x\": [],\n \"y\": []})\n\n def callback(attr, old, new):\n v = np.ma.copy(source.data[\"v\"])\n x = np.argsort(np.argsort(v))[new]\n y = v[new]\n circle_source.data = {\n \"x\": x,\n \"y\": y\n }\n source.selected.on_change(\"indices\", callback)\n color_mapper = bokeh.models.LinearColorMapper(\n palette=bokeh.palettes.Plasma[256],\n nan_color=bokeh.colors.RGB(0, 0, 0, a=0),\n )\n view = View(args.paths, source, color_mapper, figure)\n figure.circle(\n x=\"x\",\n y=\"y\",\n source=source,\n color={\"field\": \"v\", \"transform\": color_mapper},\n line_color={\"field\": \"v\", \"transform\": color_mapper})\n color_bar = bokeh.models.ColorBar(\n color_mapper=color_mapper,\n orientation='horizontal',\n background_fill_alpha=0,\n location='bottom_center',\n major_tick_line_color='black',\n bar_line_color='black')\n figure.add_layout(color_bar, 'center')\n\n widgets = []\n check_box = bokeh.models.CheckboxGroup(labels=[\"quality control\"])\n check_box.on_change('active', view.on_change_quality)\n widgets.append(check_box)\n\n radio_group = bokeh.models.RadioGroup(labels=view.parameters)\n radio_group.on_change('active', view.on_change_parameter)\n widgets.append(radio_group)\n\n radio_group = bokeh.models.RadioGroup(labels=view.fields)\n radio_group.on_change('active', view.on_change_field)\n widgets.append(radio_group)\n\n radio_group = bokeh.models.RadioGroup(labels=view.models)\n radio_group.on_change('active', view.on_change_model)\n widgets.append(radio_group)\n\n controls = bokeh.layouts.column(*widgets, name=\"hello\")\n\n second_figure = bokeh.plotting.figure(\n name=\"hello\",\n plot_width=300,\n plot_height=300)\n line_source = bokeh.models.ColumnDataSource({\n \"x\": [],\n \"y\": []})\n second_figure.line(x=\"x\", y=\"y\", source=line_source)\n second_figure.circle(x=\"x\", y=\"y\", source=circle_source)\n second_figure.toolbar.logo = None\n second_figure.toolbar_location = None\n second_figure.min_border_left = 20\n second_figure.min_border_right = 20\n second_figure.border_fill_alpha = 0\n\n def on_change(attr, old, new):\n values = np.ma.copy(source.data[\"v\"]).compressed()\n values.sort()\n line_source.data = {\n \"x\": np.arange(len(values)),\n \"y\": values\n }\n source.on_change(\"data\", on_change)\n\n document = bokeh.plotting.curdoc()\n document.title = \"Geo-relational ocean verification exploration tool\"\n document.add_root(figure)\n document.add_root(controls)\n document.add_root(second_figure)\n\n\nclass View(object):\n def __init__(self, paths, source, color_mapper, figure):\n self.parameter = None\n self.model = None\n self.field = None\n self.paths = paths\n self.source = source\n self.color_mapper = color_mapper\n self.figure = figure\n self.store = {}\n models = []\n parameters = []\n for path in self.paths:\n with netCDF4.Dataset(path) as dataset:\n parameter = dataset.obs_type\n model = \" \".join([\n dataset.system,\n dataset.version,\n dataset.configuration])\n self.store[(parameter, model)] = path\n models.append(model)\n parameters.append(parameter)\n self.parameters = list(sorted(set(parameters)))\n self.models = list(sorted(set(models)))\n self.fields = [\n \"observation\",\n \"forecast\",\n \"forecast - observation\",\n \"|forecast - observation|\"]\n self.quality_control = False\n\n def on_change_field(self, attr, old, new):\n self.field = self.fields[new]\n self.render()\n\n def on_change_quality(self, attr, old, new):\n self.quality_control = 0 in new\n self.render()\n\n def on_change_model(self, attr, old, new):\n self.model = self.models[new]\n self.render()\n\n def on_change_parameter(self, attr, old, new):\n self.parameter = self.parameters[new]\n self.render()\n\n def render(self):\n if self.field is None:\n return\n if self.parameter is None:\n return\n if self.model is None:\n return\n path = self.store[(self.parameter, self.model)]\n print(path, self.field)\n with netCDF4.Dataset(path) as dataset:\n lats = dataset.variables[\"latitude\"][:]\n lons = dataset.variables[\"longitude\"][:]\n if self.field == \"forecast - observation\":\n f = dataset.variables[\"forecast\"][:, 0, 0, 0]\n o = dataset.variables[\"observation\"][:, 0, 0]\n v = f - o\n elif self.field == \"|forecast - observation|\":\n f = dataset.variables[\"forecast\"][:, 0, 0, 0]\n o = dataset.variables[\"observation\"][:, 0, 0]\n v = np.ma.abs(f - o)\n elif self.field == \"forecast\":\n v = dataset.variables[\"forecast\"][:, 0, 0, 0]\n elif self.field == \"observation\":\n v = dataset.variables[\"observation\"][:, 0, 0]\n else:\n raise Exception(\"unknown field: {}\".format(self.field))\n if self.quality_control:\n flags = dataset.variables[\"qc\"][:, 0, 0]\n pts = np.ma.where(flags == 0)\n lons = lons[pts]\n lats = lats[pts]\n v = v[pts]\n x, y = google_mercator(lons, lats)\n\n # Geographic filtering\n pts = np.ma.where(\n (x >= self.figure.x_range.start) &\n (x <= self.figure.x_range.end) &\n (y >= self.figure.y_range.start) &\n (y <= self.figure.y_range.end))\n x = x[pts]\n y = y[pts]\n v = v[pts]\n\n self.source.data = {\n \"x\": x,\n \"y\": y,\n \"v\": v\n }\n self.color_mapper.low = v.min()\n self.color_mapper.high = v.max()\n\n\ndef google_mercator(lons, lats):\n return transform(PLATE_CARREE, GOOGLE, lons, lats)\n\n\ndef plate_carree(x, y):\n return transform(GOOGLE, PLATE_CARREE, x, y)\n\n\ndef transform(src_crs, dst_crs, x, y):\n x, y = np.asarray(x), np.asarray(y)\n xt, yt, _ = dst_crs.transform_points(src_crs, x.flatten(), y.flatten()).T\n return xt, yt\n\n\nif __name__.startswith(\"bk\"):\n main()\n"
] | [
[
"numpy.argsort",
"numpy.asarray",
"numpy.ma.copy",
"numpy.ma.abs",
"numpy.ma.where"
]
] |
PTPeraire/openprotein | [
"3f6ede8c63d18f14e938bd47935001a82c4d6897"
] | [
"experiments/tmhmm3/tm_models.py"
] | [
"\"\"\"\nThis file is part of the OpenProtein project.\n\nFor license information, please see the LICENSE file in the root directory.\n\"\"\"\n\nimport sys\nfrom enum import Enum\nimport glob\nimport pickle\nimport numpy as np\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport openprotein\nfrom experiments.tmhmm3.tm_util import label_list_to_topology\nfrom experiments.tmhmm3.tm_util import get_predicted_type_from_labels\nfrom experiments.tmhmm3.tm_util import remapped_labels_hmm_to_orginal_labels\nfrom experiments.tmhmm3.tm_util import is_topologies_equal\nfrom experiments.tmhmm3.tm_util import original_labels_to_fasta\nfrom pytorchcrf.torchcrf import CRF\nfrom util import write_out, get_experiment_id\n\n# seed random generator for reproducibility\ntorch.manual_seed(1)\n\n\nclass TMHMM3(openprotein.BaseModel):\n def __init__(self,\n embedding,\n hidden_size,\n use_gpu,\n model_mode,\n use_marg_prob,\n type_predictor_model,\n profile_path):\n super(TMHMM3, self).__init__(embedding, use_gpu)\n\n # initialize model variables\n num_tags = 5\n num_labels = 5\n self.max_signal_length = 67\n if model_mode == TMHMM3Mode.LSTM_CRF_HMM:\n num_tags += 2 * 40 + self.max_signal_length\n elif model_mode == TMHMM3Mode.LSTM_CRF_MARG:\n num_tags = num_tags * 4 # 4 different types\n # num_labels = num_tags # 4 different types\n self.hidden_size = hidden_size\n self.use_gpu = use_gpu\n self.use_marg_prob = use_marg_prob\n self.model_mode = model_mode\n self.embedding = embedding\n self.profile_path = profile_path\n self.bi_lstm = nn.LSTM(self.get_embedding_size(),\n self.hidden_size,\n num_layers=1,\n bidirectional=True)\n self.hidden_to_labels = nn.Linear(self.hidden_size * 2, num_labels) # * 2 for bidirectional\n self.hidden_layer = None\n crf_start_mask = torch.ones(num_tags).byte()\n crf_end_mask = torch.ones(num_tags).byte()\n if model_mode == TMHMM3Mode.LSTM_CRF_HMM:\n allowed_transitions = [\n (3, 3), (4, 4),\n (3, 5), (4, 45)]\n for i in range(5, 45 - 1):\n allowed_transitions.append((i, i + 1))\n if 8 < i < 43:\n allowed_transitions.append((8, i))\n allowed_transitions.append((44, 4))\n for i in range(45, 85 - 1):\n allowed_transitions.append((i, i + 1))\n if 48 < i < 83:\n allowed_transitions.append((48, i))\n allowed_transitions.append((84, 3))\n for i in range(85, 151):\n allowed_transitions.append((i, i + 1))\n allowed_transitions.append((2, i))\n allowed_transitions.append((2, 151))\n allowed_transitions.append((2, 4))\n allowed_transitions.append((151, 4))\n\n crf_start_mask[2] = 0\n crf_start_mask[3] = 0\n crf_start_mask[4] = 0\n crf_end_mask[3] = 0\n crf_end_mask[4] = 0\n elif model_mode == TMHMM3Mode.LSTM_CRF_MARG:\n allowed_transitions = [\n (0, 0), (1, 1), (3, 3), (4, 4), (3, 0), (0, 4), (4, 1), (1, 3),\n (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (8, 5), (5, 9), (9, 6), (6, 8), (7, 9),\n (12, 12), (14, 14), (12, 14),\n (18, 18),\n ]\n crf_start_mask[3] = 0\n crf_start_mask[4] = 0\n crf_start_mask[7] = 0\n crf_start_mask[8] = 0\n crf_start_mask[9] = 0\n crf_start_mask[12] = 0\n crf_start_mask[18] = 0\n crf_end_mask[3] = 0\n crf_end_mask[4] = 0\n crf_end_mask[8] = 0\n crf_end_mask[9] = 0\n crf_end_mask[14] = 0\n crf_end_mask[18] = 0\n else:\n allowed_transitions = [\n (0, 0), (1, 1), (2, 2), (3, 3), (4, 4),\n (3, 0), (0, 4), (4, 1), (1, 3), (2, 4)]\n\n crf_start_mask[2] = 0\n crf_start_mask[3] = 0\n crf_start_mask[4] = 0\n crf_end_mask[3] = 0\n crf_end_mask[4] = 0\n self.allowed_transitions = allowed_transitions\n self.crf_model = CRF(num_tags)\n self.type_classifier = type_predictor_model\n self.type_tm_classier = None\n self.type_sp_classier = None\n crf_transitions_mask = torch.ones((num_tags, num_tags)).byte()\n\n self.label_01loss_values = []\n self.type_01loss_values = []\n self.topology_01loss_values = []\n\n # if on GPU, move state to GPU memory\n if self.use_gpu:\n self.crf_model = self.crf_model.cuda()\n self.bi_lstm = self.bi_lstm.cuda()\n self.hidden_to_labels = self.hidden_to_labels.cuda()\n crf_transitions_mask = crf_transitions_mask.cuda()\n crf_start_mask = crf_start_mask.cuda()\n crf_end_mask = crf_end_mask.cuda()\n\n # compute mask matrix from allow transitions list\n for i in range(num_tags):\n for k in range(num_tags):\n if (i, k) in self.allowed_transitions:\n crf_transitions_mask[i][k] = 0\n\n # generate masked transition parameters\n crf_start_transitions, crf_end_transitions, crf_transitions = \\\n generate_masked_crf_transitions(\n self.crf_model, (crf_start_mask, crf_transitions_mask, crf_end_mask)\n )\n\n # initialize CRF\n initialize_crf_parameters(self.crf_model,\n start_transitions=crf_start_transitions,\n end_transitions=crf_end_transitions,\n transitions=crf_transitions)\n\n def get_embedding_size(self):\n if self.embedding == \"BLOSUM62\":\n return 24 # bloom matrix has size 24\n elif self.embedding == \"PROFILE\":\n return 51 # protein profiles have size 51\n\n def flatten_parameters(self):\n self.bi_lstm.flatten_parameters()\n\n def encode_amino_acid(self, letter):\n if self.embedding == \"BLOSUM62\":\n # blosum encoding\n if not globals().get('blosum_encoder'):\n blosum = \\\n \"\"\"4,-1,-2,-2,0,-1,-1,0,-2,-1,-1,-1,-1,-2,-1,1,0,-3,-2,0,-2,-1,0,-4\n -1,5,0,-2,-3,1,0,-2,0,-3,-2,2,-1,-3,-2,-1,-1,-3,-2,-3,-1,0,-1,-4\n -2,0,6,1,-3,0,0,0,1,-3,-3,0,-2,-3,-2,1,0,-4,-2,-3,3,0,-1,-4\n -2,-2,1,6,-3,0,2,-1,-1,-3,-4,-1,-3,-3,-1,0,-1,-4,-3,-3,4,1,-1,-4\n 0,-3,-3,-3,9,-3,-4,-3,-3,-1,-1,-3,-1,-2,-3,-1,-1,-2,-2,-1,-3,-3,-2,-4\n -1,1,0,0,-3,5,2,-2,0,-3,-2,1,0,-3,-1,0,-1,-2,-1,-2,0,3,-1,-4\n -1,0,0,2,-4,2,5,-2,0,-3,-3,1,-2,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4\n 0,-2,0,-1,-3,-2,-2,6,-2,-4,-4,-2,-3,-3,-2,0,-2,-2,-3,-3,-1,-2,-1,-4\n -2,0,1,-1,-3,0,0,-2,8,-3,-3,-1,-2,-1,-2,-1,-2,-2,2,-3,0,0,-1,-4\n -1,-3,-3,-3,-1,-3,-3,-4,-3,4,2,-3,1,0,-3,-2,-1,-3,-1,3,-3,-3,-1,-4\n -1,-2,-3,-4,-1,-2,-3,-4,-3,2,4,-2,2,0,-3,-2,-1,-2,-1,1,-4,-3,-1,-4\n -1,2,0,-1,-3,1,1,-2,-1,-3,-2,5,-1,-3,-1,0,-1,-3,-2,-2,0,1,-1,-4\n -1,-1,-2,-3,-1,0,-2,-3,-2,1,2,-1,5,0,-2,-1,-1,-1,-1,1,-3,-1,-1,-4\n -2,-3,-3,-3,-2,-3,-3,-3,-1,0,0,-3,0,6,-4,-2,-2,1,3,-1,-3,-3,-1,-4\n -1,-2,-2,-1,-3,-1,-1,-2,-2,-3,-3,-1,-2,-4,7,-1,-1,-4,-3,-2,-2,-1,-2,-4\n 1,-1,1,0,-1,0,0,0,-1,-2,-2,0,-1,-2,-1,4,1,-3,-2,-2,0,0,0,-4\n 0,-1,0,-1,-1,-1,-1,-2,-2,-1,-1,-1,-1,-2,-1,1,5,-2,-2,0,-1,-1,0,-4\n -3,-3,-4,-4,-2,-2,-3,-2,-2,-3,-2,-3,-1,1,-4,-3,-2,11,2,-3,-4,-3,-2,-4\n -2,-2,-2,-3,-2,-1,-2,-3,2,-1,-1,-2,-1,3,-3,-2,-2,2,7,-1,-3,-2,-1,-4\n 0,-3,-3,-3,-1,-2,-2,-3,-3,3,1,-2,1,-1,-2,-2,0,-3,-1,4,-3,-2,-1,-4\n -2,-1,3,4,-3,0,1,-1,0,-3,-4,0,-3,-3,-2,0,-1,-4,-3,-3,4,1,-1,-4\n -1,0,0,1,-3,3,4,-2,0,-3,-3,1,-1,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4\n 0,-1,-1,-1,-2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,0,0,-2,-1,-1,-1,-1,-1,-4\n -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,1\"\"\" \\\n .replace('\\n', ',')\n blosum_matrix = np.fromstring(blosum, sep=\",\").reshape(24, 24)\n blosum_key = \"A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U\".split(\",\")\n key_map = {}\n for idx, value in enumerate(blosum_key):\n key_map[value] = list([int(v) for v in blosum_matrix[idx].astype('int')])\n globals().__setitem__(\"blosum_encoder\", key_map)\n return globals().get('blosum_encoder')[letter]\n elif self.embedding == \"ONEHOT\":\n # one hot encoding\n one_hot_key = \"A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U\".split(\",\")\n arr = []\n for idx, k in enumerate(one_hot_key):\n if k == letter:\n arr.append(1)\n else:\n arr.append(0)\n return arr\n elif self.embedding == \"PYTORCH\":\n key_id = \"A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U\".split(\",\")\n for idx, k in enumerate(key_id):\n if k == letter:\n return idx\n\n def embed(self, prot_aa_list):\n embed_list = []\n for aa_list in prot_aa_list:\n if self.embedding == \"PYTORCH\":\n tensor = torch.LongTensor(tensor)\n elif self.embedding == \"PROFILE\":\n if not globals().get('profile_encoder'):\n print(\"Load profiles...\")\n files = glob.glob(self.profile_path.strip(\"/\") + \"/*\")\n profile_dict = {}\n for profile_file in files:\n profile = pickle.load(open(profile_file, \"rb\")).popitem()[1]\n profile_dict[profile[\"seq\"]] = torch.from_numpy(profile[\"profile\"]).float()\n globals().__setitem__(\"profile_encoder\", profile_dict)\n print(\"Loaded profiles\")\n tensor = globals().get('profile_encoder')[aa_list]\n else:\n tensor = list([self.encode_amino_acid(aa) for aa in aa_list])\n tensor = torch.FloatTensor(tensor)\n if self.use_gpu:\n tensor = tensor.cuda()\n embed_list.append(tensor)\n return embed_list\n\n def init_hidden(self, minibatch_size):\n # number of layers (* 2 since bidirectional), minibatch_size, hidden size\n initial_hidden_state = torch.zeros(1 * 2, minibatch_size, self.hidden_size)\n initial_cell_state = torch.zeros(1 * 2, minibatch_size, self.hidden_size)\n if self.use_gpu:\n initial_hidden_state = initial_hidden_state.cuda()\n initial_cell_state = initial_cell_state.cuda()\n self.hidden_layer = (autograd.Variable(initial_hidden_state),\n autograd.Variable(initial_cell_state))\n\n def _get_network_emissions(self, input_sequences):\n batch_sizes = torch.LongTensor(list([i.size(0) for i in input_sequences]))\n pad_seq_embed = torch.nn.utils.rnn.pad_sequence(input_sequences)\n minibatch_size = len(input_sequences)\n self.init_hidden(minibatch_size)\n bi_lstm_out, self.hidden_layer = self.bi_lstm(pad_seq_embed, self.hidden_layer)\n emissions = self.hidden_to_labels(bi_lstm_out)\n if self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:\n inout_select = torch.LongTensor([0])\n outin_select = torch.LongTensor([1])\n signal_select = torch.LongTensor([2])\n if self.use_gpu:\n inout_select = inout_select.cuda()\n outin_select = outin_select.cuda()\n signal_select = signal_select.cuda()\n inout = torch.index_select(emissions, 2, autograd.Variable(inout_select))\n outin = torch.index_select(emissions, 2, autograd.Variable(outin_select))\n signal = torch.index_select(emissions, 2, autograd.Variable(signal_select))\n emissions = torch.cat((emissions, inout.expand(-1, len(batch_sizes), 40),\n outin.expand(-1, len(batch_sizes), 40),\n signal.expand(-1, len(batch_sizes), self.max_signal_length)), 2)\n elif self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:\n emissions = emissions.repeat(1, 1, 4)\n return emissions, batch_sizes\n\n def batch_sizes_to_mask(self, batch_sizes):\n mask = torch.autograd.Variable(torch.t(torch.ByteTensor(\n [[1] * int(batch_size) + [0] * (int(batch_sizes[0])\n - int(batch_size)) for batch_size in batch_sizes]\n )))\n if self.use_gpu:\n mask = mask.cuda()\n return mask\n\n def compute_loss(self, training_minibatch):\n _, labels_list, remapped_labels_list_crf_hmm, remapped_labels_list_crf_marg, \\\n _prot_type_list, _prot_topology_list, _prot_name_list, original_aa_string, \\\n _original_label_string = training_minibatch\n minibatch_size = len(labels_list)\n if self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:\n labels_to_use = remapped_labels_list_crf_marg\n elif self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:\n labels_to_use = remapped_labels_list_crf_hmm\n else:\n labels_to_use = labels_list\n input_sequences = [autograd.Variable(x) for x in self.embed(original_aa_string)]\n\n actual_labels = torch.nn.utils.rnn.pad_sequence([autograd.Variable(l)\n for l in labels_to_use])\n emissions, batch_sizes = self._get_network_emissions(input_sequences)\n if self.model_mode == TMHMM3Mode.LSTM:\n prediction = emissions.transpose(0, 1).contiguous().view(-1, emissions.size(-1))\n target = actual_labels.transpose(0, 1).contiguous().view(-1, 1)\n losses = -torch.gather(nn.functional.log_softmax(prediction),\n dim=1, index=target).view(*actual_labels\n .transpose(0, 1).size())\n mask_expand = torch.range(0, batch_sizes.data.max() - 1).long() \\\n .unsqueeze(0).expand(batch_sizes.size(0), batch_sizes.data.max())\n if self.use_gpu:\n mask_expand = mask_expand.cuda()\n batch_sizes = batch_sizes.cuda()\n mask = mask_expand < batch_sizes.unsqueeze(1).expand_as(mask_expand)\n loss = (losses * mask.float()).sum() / batch_sizes.float().sum()\n else:\n mask = (self.batch_sizes_to_mask(batch_sizes))\n loss = -1 * self.crf_model(emissions, actual_labels, mask=mask) / minibatch_size\n if float(loss) > 100000: # if loss is this large, an invalid tx must have been found\n for idx, batch_size in enumerate(batch_sizes):\n last_label = None\n for i in range(batch_size):\n label = int(actual_labels[i][idx])\n write_out(str(label) + \",\", end='')\n if last_label is not None and (last_label, label) \\\n not in self.allowed_transitions:\n write_out(\"Error: invalid transition found\")\n write_out((last_label, label))\n sys.exit(1)\n last_label = label\n write_out(\" \")\n return loss\n\n def forward(self, input_sequences, forced_types=None):\n emissions, batch_sizes = self._get_network_emissions(input_sequences)\n if self.model_mode == TMHMM3Mode.LSTM:\n output = torch.nn.functional.log_softmax(emissions, dim=2)\n _, predicted_labels = output[:, :, 0:5].max(dim=2)\n predicted_labels = list(\n [list(map(int, x[:batch_sizes[idx]])) for idx, x in enumerate(predicted_labels\n .transpose(0, 1))])\n predicted_labels = list(\n torch.cuda.LongTensor(l) if self.use_gpu else torch.LongTensor(l)\n for l in predicted_labels)\n predicted_topologies = list(map(label_list_to_topology, predicted_labels))\n predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,\n predicted_labels)))\n\n else:\n mask = self.batch_sizes_to_mask(batch_sizes)\n labels_predicted = list(torch.cuda.LongTensor(l) if self.use_gpu\n else torch.LongTensor(l) for l in\n self.crf_model.decode(emissions, mask=mask))\n\n if self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:\n predicted_labels = list(map(remapped_labels_hmm_to_orginal_labels,\n labels_predicted))\n predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,\n predicted_labels)))\n elif self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:\n alpha = self.crf_model._compute_log_alpha(emissions, mask, run_backwards=False)\n z_value = alpha[alpha.size(0) - 1] + self.crf_model.end_transitions\n types = z_value.view((-1, 4, 5))\n types = logsumexp(types, dim=2)\n _, predicted_types = torch.max(types, dim=1)\n predicted_labels = list([l % 5 for l in labels_predicted]) # remap\n else:\n predicted_labels = labels_predicted\n predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,\n predicted_labels)))\n\n if self.use_gpu:\n predicted_types = predicted_types.cuda()\n predicted_topologies = list(map(label_list_to_topology, predicted_labels))\n\n # if all O's, change to all I's (by convention)\n for idx, labels in enumerate(predicted_labels):\n if torch.eq(labels, 4).all():\n predicted_labels[idx] = labels - 1\n\n return predicted_labels, predicted_types if forced_types \\\n is None else forced_types, predicted_topologies\n\n def evaluate_model(self, data_loader):\n validation_loss_tracker = []\n validation_type_loss_tracker = []\n validation_topology_loss_tracker = []\n confusion_matrix = np.zeros((5, 5), dtype=np.int64)\n protein_names = []\n protein_aa_strings = []\n protein_label_actual = []\n protein_label_prediction = []\n for _, minibatch in enumerate(data_loader, 0):\n validation_loss_tracker.append(self.compute_loss(minibatch).detach())\n\n _, _, _, _, prot_type_list, prot_topology_list, \\\n prot_name_list, original_aa_string, original_label_string = minibatch\n input_sequences = [x for x in self.embed(original_aa_string)]\n predicted_labels, predicted_types, predicted_topologies = self(input_sequences)\n\n protein_names.extend(prot_name_list)\n protein_aa_strings.extend(original_aa_string)\n protein_label_actual.extend(original_label_string)\n\n # if we're using an external type predictor\n if self.type_classifier is not None:\n predicted_labels_type_classifer, \\\n predicted_types_type_classifier, \\\n predicted_topologies_type_classifier = self.type_classifier(input_sequences)\n\n for idx, actual_type in enumerate(prot_type_list):\n\n predicted_type = predicted_types[idx]\n predicted_topology = predicted_topologies[idx]\n predicted_labels_for_protein = predicted_labels[idx]\n\n if self.type_classifier is not None:\n if predicted_type != predicted_types_type_classifier[idx]:\n # we must always use the type predicted by the type predictor if available\n predicted_type = predicted_types_type_classifier[idx]\n predicted_topology = predicted_topologies_type_classifier[idx]\n predicted_labels_for_protein = predicted_labels_type_classifer[idx]\n\n prediction_topology_match = is_topologies_equal(prot_topology_list[idx],\n predicted_topology, 5)\n\n if actual_type == predicted_type:\n validation_type_loss_tracker.append(0)\n # if we guessed the type right for SP+GLOB or GLOB,\n # count the topology as correct\n if actual_type == 2 or actual_type == 3 or prediction_topology_match:\n validation_topology_loss_tracker.append(0)\n confusion_matrix[actual_type][4] += 1\n else:\n validation_topology_loss_tracker.append(1)\n confusion_matrix[actual_type][predicted_type] += 1\n\n # if the type was correctly guessed to be 2 or 3 by the type classifier,\n # use its topology prediction\n if (actual_type in (2, 3)) and self.type_classifier is not None:\n protein_label_prediction.append(predicted_labels_type_classifer[idx])\n else:\n protein_label_prediction.append(predicted_labels_for_protein)\n else:\n confusion_matrix[actual_type][predicted_type] += 1\n validation_type_loss_tracker.append(1)\n validation_topology_loss_tracker.append(1)\n protein_label_prediction.append(predicted_labels_for_protein)\n\n write_out(confusion_matrix)\n _loss = float(torch.stack(validation_loss_tracker).mean())\n\n type_loss = float(torch.FloatTensor(validation_type_loss_tracker).mean().detach())\n topology_loss = float(torch.FloatTensor(validation_topology_loss_tracker).mean().detach())\n\n self.type_01loss_values.append(type_loss)\n self.topology_01loss_values.append(topology_loss)\n\n if get_experiment_id() is not None and \"TYPE\" in get_experiment_id():\n # optimize for type\n validation_loss = type_loss\n else:\n # optimize for topology\n validation_loss = topology_loss\n\n data = {}\n data['type_01loss_values'] = self.type_01loss_values\n data['topology_01loss_values'] = self.topology_01loss_values\n data['confusion_matrix'] = confusion_matrix.tolist()\n\n return validation_loss, data, (\n protein_names, protein_aa_strings, protein_label_actual, protein_label_prediction)\n\n\ndef post_process_prediction_data(prediction_data):\n data = []\n for (name, aa_string, actual, prediction) in zip(*prediction_data):\n data.append(\"\\n\".join([\">\" + name,\n aa_string,\n actual,\n original_labels_to_fasta(prediction)]))\n return \"\\n\".join(data)\n\n\ndef logsumexp(data, dim):\n return data.max(dim)[0] + torch.log(torch.sum(\n torch.exp(data - data.max(dim)[0].unsqueeze(dim)), dim))\n\n\ndef initialize_crf_parameters(crf_model,\n start_transitions=None,\n end_transitions=None,\n transitions=None) -> None:\n \"\"\"Initialize the transition parameters.\n\n The parameters will be initialized randomly from a uniform distribution\n between -0.1 and 0.1, unless given explicitly as an argument.\n \"\"\"\n if start_transitions is None:\n nn.init.uniform(crf_model.start_transitions, -0.1, 0.1)\n else:\n crf_model.start_transitions.data = start_transitions\n if end_transitions is None:\n nn.init.uniform(crf_model.end_transitions, -0.1, 0.1)\n else:\n crf_model.end_transitions.data = end_transitions\n if transitions is None:\n nn.init.uniform(crf_model.transitions, -0.1, 0.1)\n else:\n crf_model.transitions.data = transitions\n\n\ndef generate_masked_crf_transitions(crf_model, transition_mask):\n start_transitions_mask, transitions_mask, end_transition_mask = transition_mask\n start_transitions = crf_model.start_transitions.data.clone()\n end_transitions = crf_model.end_transitions.data.clone()\n transitions = crf_model.transitions.data.clone()\n if start_transitions_mask is not None:\n start_transitions.masked_fill_(start_transitions_mask, -100000000)\n if end_transition_mask is not None:\n end_transitions.masked_fill_(end_transition_mask, -100000000)\n if transitions_mask is not None:\n transitions.masked_fill_(transitions_mask, -100000000)\n return start_transitions, end_transitions, transitions\n\n\nclass TMHMM3Mode(Enum):\n LSTM = 1\n LSTM_CRF = 2\n LSTM_CRF_HMM = 3\n LSTM_CRF_MARG = 4\n"
] | [
[
"torch.nn.utils.rnn.pad_sequence",
"torch.nn.init.uniform",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.ones",
"numpy.zeros",
"torch.FloatTensor",
"torch.stack",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.eq",
"torch.from_numpy",
"torch.max",
"torch.zeros",
"torch.cuda.LongTensor",
"torch.LongTensor",
"numpy.fromstring"
]
] |
THUYimingLi/Open-sourced_Dataset_Protection | [
"910962c57e7d132497443b26c8e5da1dcb5ba4eb"
] | [
"GTSRB/train_standard_vgg.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nThis is the implement of standard training on GTSRB dataset.\n\nCopyright (c) Yiming Li, 2020\n'''\n\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport shutil\nimport time\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport gtsrb_dataset as dataset\nfrom model import *\nfrom utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig\n\n\n\nparser = argparse.ArgumentParser(description='PyTorch GTSRB')\n# Datasets\nparser.add_argument('-j', '--workers', default=2, type=int, metavar='N',\n help='number of data loading workers (default: 2)')\n# Optimization options\nparser.add_argument('--epochs', default=30, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--train-batch', default=128, type=int, metavar='N',\n help='train batchsize')\nparser.add_argument('--test-batch', default=128, type=int, metavar='N',\n help='test batchsize')\nparser.add_argument('--lr', '--learning-rate', default=0.01, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--drop', '--dropout', default=0, type=float,\n metavar='Dropout', help='Dropout ratio')\nparser.add_argument('--schedule', type=int, nargs='+', default=[20],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\n# Checkpoints\nparser.add_argument('-c', '--checkpoint', default='checkpoint/benign', type=str, metavar='PATH',\n help='path to save checkpoint (default: checkpoint)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n# Miscs\nparser.add_argument('--manualSeed', type=int, default=1, help='manual seed')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n#Device options\nparser.add_argument('--gpu-id', default='0', type=str,\n help='id(s) for CUDA_VISIBLE_DEVICES')\n\n\n\nargs = parser.parse_args()\nstate = {k: v for k, v in args._get_kwargs()}\n\n# Use CUDA\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\nuse_cuda = torch.cuda.is_available()\n\n\n# Random seed\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\nif use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\n\nbest_acc = 0 # best test accuracy\n\n\n\n\ndef main():\n global best_acc\n start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch\n\n if not os.path.isdir(args.checkpoint):\n mkdir_p(args.checkpoint)\n\n # Dataset preprocessing\n title = 'GTSRB'\n\n print('==> Preparing GTSRB dataset')\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor()\n ])\n\n # Create Datasets\n trainset = dataset.GTSRB(\n root_dir='./data', train=True, transform=transform)\n testset = dataset.GTSRB(\n root_dir='./data', train=False, transform=transform)\n\n # Load Datasets\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)\n\n # Model\n model = vgg19_bn()\n model = torch.nn.DataParallel(model).cuda()\n cudnn.benchmark = True\n print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n # Resume\n if args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'\n args.checkpoint = os.path.dirname(args.resume)\n checkpoint = torch.load(args.resume)\n best_acc = checkpoint['best_acc']\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)\n else:\n logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)\n logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])\n\n if args.evaluate:\n print('\\nEvaluation only')\n test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)\n print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))\n return\n\n # Train and val\n for epoch in range(start_epoch, args.epochs):\n adjust_learning_rate(optimizer, epoch)\n\n print('\\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))\n\n train_loss, train_acc = train(args, model, trainloader, criterion, optimizer, epoch, use_cuda)\n test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)\n\n # append logger file\n logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])\n\n # save model\n is_best = test_acc > best_acc\n best_acc = max(test_acc, best_acc)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'acc': test_acc,\n 'best_acc': best_acc,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, checkpoint=args.checkpoint)\n\n logger.close()\n logger.plot()\n savefig(os.path.join(args.checkpoint, 'log.eps'))\n\n print('Best acc:')\n print(best_acc)\n\n\n\n\n\ndef train(args, model, trainloader, criterion, optimizer, epoch, use_cuda):\n # switch to train mode\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n end = time.time()\n\n bar = Bar('Processing', max=len(trainloader))\n \n for batch_idx, (image, target) in enumerate(trainloader):\n # measure data loading time\n data_time.update(time.time() - end)\n if use_cuda:\n image, target = image.cuda(), target.cuda()\n \n\n # compute loss and do SGD step\n outputs = model(image)\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure train accuracy and record loss\n prec1, prec5 = accuracy(outputs.data, target.data, topk=(1, 5))\n losses.update(loss.item(), image.size(0))\n top1.update(prec1.item(), image.size(0))\n top5.update(prec5.item(), image.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(\n batch=batch_idx + 1,\n size=len(trainloader),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n top5=top5.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\n\n\n\ndef test(testloader, model, criterion, epoch, use_cuda):\n global best_acc\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n bar = Bar('Processing', max=len(testloader))\n for batch_idx, (inputs, targets) in enumerate(testloader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # compute output\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n # measure accuracy and record standard loss\n prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec1.item(), inputs.size(0))\n top5.update(prec5.item(), inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(\n batch=batch_idx + 1,\n size=len(testloader),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n top5=top5.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\n\ndef save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))\n\n\n\n\ndef adjust_learning_rate(optimizer, epoch):\n global state\n if epoch in args.schedule:\n state['lr'] *= args.gamma\n for param_group in optimizer.param_groups:\n param_group['lr'] = state['lr']\n\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed_all",
"torch.load",
"torch.manual_seed",
"torch.save",
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"torch.nn.DataParallel"
]
] |
iosurodri/Fancy_aggregations | [
"647019452a074767706893ecdd431a3ee503b554"
] | [
"Fancy_aggregations/moderate_deviations.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nFile containing different functions to aggregate data using Moderate Deviations. The expressions have been obtained from the following paper:\n\nA.H. Altalhi, J.I. Forcén, M. Pagola, E. Barrenechea, H. Bustince, Zdenko Takáč,\nModerate deviation and restricted equivalence functions for measuring similarity between data,\nInformation Sciences,\nVolume 501,\n2019,\nPages 19-29,\nISSN 0020-0255,\nhttps://doi.org/10.1016/j.ins.2019.05.078.\n(http://www.sciencedirect.com/science/article/pii/S0020025519305031)\n\nPlease, cite accordingly.\n\n@author: Javier Fumanal Idocin (UPNA).\n\nTo suggest changes or submit new code please use the github page.\n\"\"\"\n\nimport numpy as np\n\n# =============================================================================\n# ~ MODERATE DEVIATIONS\n# =============================================================================\ndef custom_distance(x, y, Mp, Mn, R1, R2):\n '''\n\n :param R1:\n :param R2:\n :return:\n '''\n if x <= y:\n return Mp - Mp*R1(x, y)\n else:\n return Mn*R2(x,y) - Mn\n\ndef custom_distance_morphs(x, y, Mp, Mn, F1, F2, T1, T2):\n '''\n TODO, and will probably stay like that for long.\n :param x:\n :param y:\n :param Mp:\n :param Mn:\n :param F1:\n :param F2:\n :param T1:\n :param T2:\n :return:\n '''\n pass\n\ndef distance_f1(x, y, Mp, Mn):\n '''\n\n :return:\n '''\n if x <= y:\n return Mp*(y - x)*(y - x)\n else:\n return Mn*(y*y - x*x)\n\ndef distance_f2(x, y, Mp, Mn):\n '''\n\n :return:\n '''\n if x <= y:\n return Mp*(y - x)\n else:\n return Mn*(y - x)\n\ndef cut_point(D, x_sigma, Mp, Mn):\n k = -1\n\n for ix, element in enumerate(x_sigma):\n if ix < len(x_sigma) - 1:\n con1 = np.sum([D(x_sigma[i], element, Mp, Mn) for i in range(len(x_sigma))]) <= 0\n cond2 = np.sum([D(x_sigma[i], x_sigma[ix + 1], Mp, Mn) for i in range(len(x_sigma))]) >= 0\n\n if con1 and cond2:\n k = ix\n return k\n\ndef moderate_deviation_f(X, D=distance_f2, Mp=1, Mn=1, axis=0):\n '''\n\n\n '''\n n = len(X)\n x_sigma = np.sort(X, axis=0)\n k = cut_point(D, x_sigma, Mp, Mn)\n\n f = (Mp * np.sum(x_sigma[0:k+1]) + Mn*np.sum(x_sigma[k+1:])) / (k*Mp + (n - k)*Mn)\n\n return f\n\ndef moderate_deviation_eq(X, D=distance_f1, Mp=1, Mn=1):\n '''\n\n '''\n n = len(X)\n x_sigma = np.sort(X)\n k = cut_point(D, x_sigma, Mp ,Mn)\n\n a = (k+1)*Mp + (n - k-1)*Mn\n b = -2*Mp*np.sum(x_sigma[0:k+1])\n x_sigma_squared = np.power(x_sigma, 2)\n c = Mp*np.sum(x_sigma_squared[0:k+1]) - Mn*np.sum(x_sigma_squared[k+1:])\n\n sqr_term = np.sqrt(b*b - 4*a*c)\n y1 = (-b + sqr_term) / (2*a)\n y2 = (-b - sqr_term) / (2*a)\n\n return y1, y2\n\ndef md_aggregation(X, axis=0, keepdims=True, md_function=moderate_deviation_f, Mp=1, Mn=10):\n '''\n Designed to use the md functions using the same interface as the rest of the numpy aggregation functions.\n IT ONLY WORKS IN 3 DIMENSIONAL ARRAY (features, samples, classes)\n :param X:\n :param axis:\n :param keepdims:\n :param md_function:\n :return:\n '''\n if axis != 0:\n X = np.transpose(X, (0, axis))\n\n clasificadores, muestras, clases = X.shape\n if keepdims:\n result = np.zeros([1] +list(X.shape[1:]))\n else:\n result = np.zeros(X.shape[1:])\n\n for m in range(muestras):\n #print(md_function(X[:, m, 0], Mp=1, Mn=10))\n if keepdims:\n for clase in range(clases):\n result[0, m, clase] = md_function(X[:, m, clase], Mp=1, Mn=10)\n else:\n for clase in range(clases):\n result[m, clase] = md_function(X[:, m, clase], Mp=1, Mn=10)\n\n if axis != 0:\n X = np.transpose(X, (0, axis))\n\n\n return result\n\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.transpose",
"numpy.zeros",
"numpy.power",
"numpy.sort"
]
] |
jprzywoski/faster-python | [
"44252bf0a746dd862d752efbe2012a8a404ec7bf"
] | [
"ceuclid.py"
] | [
"import ctypes\nfrom numpy.ctypeslib import ndpointer\n\nlib = ctypes.cdll.LoadLibrary('./libdist.so')\nfn = lib.dist\nfn.restype = ctypes.c_double\nfn.argtypes = [\n ndpointer(ctypes.c_double),\n ndpointer(ctypes.c_double),\n ctypes.c_size_t\n]\n\n\ndef dist(x, y):\n return fn(x, y, len(x))\n"
] | [
[
"numpy.ctypeslib.ndpointer"
]
] |
jieyanzhu/codes-effective-computation-in-physics | [
"0c99f2da9d462229e6b174a010d7c7b08af4482b"
] | [
"chap_7/decay_plot.py"
] | [
"import numpy as np\n\n# as in the previous example, load decays.csv into a NumPy array\ndecaydata = np.loadtxt('decays.csv', delimiter=',', skiprows=1)\n\n# provide handles for the x and y columns\ntime = decaydata[:,0]\ndecays = decaydata[:,1]\n\n# import the matplotlib plotting functionality\nimport pylab as plt\n\nplt.plot(time, decays)\n\nplt.xlabel('Time (s)')\nplt.ylabel('Decays') \nplt.title('Decays')\nplt.grid(True)\nplt.savefig(\"decays_matplotlib.png\")\n"
] | [
[
"numpy.loadtxt"
]
] |
vuk119/RL | [
"2f5309bfff719b2965060492a19d008ed8382856"
] | [
"Easy21/plot_cuts.py"
] | [
"\"\"\"\n\nSome useful plot functions\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef matrix_surf(m, xlimits=None, ylimits=None, **kwargs):\n if xlimits is None:\n xlimits = [0, m.shape[0]]\n if ylimits is None:\n ylimits = [0, m.shape[1]]\n\n Y, X = np.meshgrid(np.arange(ylimits[0], ylimits[1]), np.arange(xlimits[0], xlimits[1]))\n\n fig = plt.figure()\n ax = fig.add_subplot(111,projection='3d',**kwargs)\n ax.plot_surface(X,Y,m)\n plt.show()\n\ndef matrix_scatter(m):\n X=[]\n Y=[]\n Z=[]\n for i in range(m.shape[0]):\n for j in range(m.shape[1]):\n X.append(i)\n Y.append(j)\n Z.append(m[i,j])\n\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X, Y, Z)\n plt.show()\n\n\n\n# mat = np.zeros((6,5))\n# mat[0,0] = 5\n# mat[0,1] = 4\n# mat[1,0] = 4\n# mat[1,1] = 3\n# mat[1,2] = 3\n# mat[2,1] = 3\n# mat[0,2] = 3\n# mat[2,0] = 3\n# mat[0,3] = 3\n# mat[3,0] = 3\n# matrix_surf(mat, xlabel = 'X AXIS', ylabel = 'Y AXIS', zlabel='Z', xticks =range(10))\n#\n#\n#\n# Y, X = np.meshgrid(np.arange(mat.shape[1]), np.arange(mat.shape[0]))\n#\n# print(X)\n# print(Y)\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
]
] |
zmlabe/StratoVari | [
"c5549f54482a2b05e89bded3e3b0b3c9faa686f3"
] | [
"Scripts/plot_ProfileVar_Monthly_FDR.py"
] | [
"\"\"\"\nPlot vertical plots of PAMIP data for each month from November to April using\nthe ensemble mean (300)\n\nNotes\n-----\n Author : Zachary Labe\n Date : 26 June 2019\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport read_MonthlyData as MO\nimport statsmodels.stats.multitest as fdr\nimport cmocean\nimport itertools\n\n### Define directories\ndirectorydata = '/seley/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/STRATOVARI/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Monthly Vertical Profiles- %s----' % titletime)\n\n### Alott time series (300 ensemble members)\nyear1 = 1701\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Call arguments\nvarnames = ['U','GEOP','TEMP','V','EGR']\n\ndef calc_indttestfdr(varx,vary):\n \"\"\"\n Function calculates statistical difference for 2 independent\n sample t-test\n\n Parameters\n ----------\n varx : 3d array\n vary : 3d array\n \n Returns\n -------\n stat = calculated t-statistic\n pvalue = two-tailed p-value\n\n Usage\n -----\n stat,pvalue = calc_ttest(varx,vary)\n \"\"\"\n print('\\n>>> Using calc_ttest function!')\n \n ### Import modules\n import scipy.stats as sts\n \n ### 2-independent sample t-test\n stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')\n \n print('*Completed: Finished calc_ttest function!')\n return stat,pvalue\n\n######################\ndef readDataPeriods(varnames,sliceq):\n ### Call function for 4d variable data\n lat,lon,lev,varfuture = MO.readExperiAll(varnames,'Future','profile')\n lat,lon,lev,varpast = MO.readExperiAll(varnames,'Past','profile')\n \n ### Select ensemble mean period\n if sliceq == 'Mean':\n varfuture = varfuture[:,:,:,:,:]\n varpast = varpast[:,:,:,:,:]\n elif sliceq == 'A':\n varfuture = varfuture[:100,:,:,:,:]\n varpast = varpast[:100,:,:,:,:]\n elif sliceq == 'B':\n varfuture = varfuture[100:200,:,:,:,:]\n varpast = varpast[100:200,:,:,:,:]\n elif sliceq == 'C':\n varfuture = varfuture[200:,:,:,:,:]\n varpast = varpast[200:,:,:,:,:]\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon,lat)\n \n ### Remove missing data\n varfuture[np.where(varfuture <= -1e10)] = np.nan\n varpast[np.where(varpast <= -1e10)] = np.nan\n \n ### Rearrange months (N,D,J,F,M,A)\n varfuturem = np.append(varfuture[:,-2:,:,:,:],varfuture[:,:4,:,:,:],\n axis=1)\n varpastm = np.append(varpast[:,-2:,:,:,:],varpast[:,:4,:,:,:],axis=1)\n \n ### Calculate zonal means\n varfuturemz = np.nanmean(varfuturem,axis=4)\n varpastmz = np.nanmean(varpastm,axis=4)\n \n ### Calculate anomalies\n anompi = varfuturemz - varpastmz\n \n ### Calculate ensemble mean\n anompim = np.nanmean(anompi,axis=0)\n zdiffruns = anompim\n \n ### Calculate climatologies\n zclimo = np.nanmean(varpastmz,axis=0)\n \n ### Calculate significance for each month\n stat_past = np.empty((varpastm.shape[1],len(lev),len(lat)))\n pvalue_past = np.empty((varpastm.shape[1],len(lev),len(lat)))\n for i in range(varpastm.shape[1]):\n stat_past[i],pvalue_past[i] = calc_indttestfdr(varfuturemz[:,i,:,:],\n varpastmz[:,i,:,:])\n\n ### Ravel into month x all p values\n prunsr = np.reshape(pvalue_past,\n (pvalue_past.shape[0],pvalue_past.shape[1] \\\n * pvalue_past.shape[2]))\n \n ### Calculate false discovery rate\n prunsq = np.empty((prunsr.shape))\n prunsq.fill(np.nan)\n prunsqq = np.empty((prunsr.shape[1]))\n prunsqq.fill(np.nan)\n for i in range(prunsr.shape[0]):\n ### Check for nans before correction!!\n mask = np.isfinite(prunsr[i,:])\n prunsrr = prunsr[i,:]\n score,prunsqq[mask] = fdr.fdrcorrection(prunsrr[mask],alpha=0.05,\n method='indep')\n prunsq[i,:] = prunsqq\n \n ### Reshape into month x lat x lon\n pruns = np.reshape(prunsq,(pvalue_past.shape))\n \n ### Mask variables by their adjusted p-values\n pruns[np.where(pruns >= 0.05)] = np.nan\n pruns[np.where(pruns < 0.05)] = 1.\n pruns[np.where(np.isnan(pruns))] = 0.\n \n return zdiffruns,zclimo,pruns,lat,lon,lev\n \n###########################################################################\n###########################################################################\n###########################################################################\n### Read in data\nfor v in range(len(varnames)):\n diffm,climom,pvalm,lat,lon,lev = readDataPeriods(varnames[v],'Mean')\n diffa,climoa,pvala,lat,lon,lev = readDataPeriods(varnames[v],'A')\n diffb,climob,pvalb,lat,lon,lev = readDataPeriods(varnames[v],'B')\n diffc,climoc,pvalc,lat,lon,lev = readDataPeriods(varnames[v],'C')\n\n varn = list(itertools.chain(*[diffm,diffa,diffb,diffc]))\n zclimo = list(itertools.chain(*[climom,climoa,climob,climoc])) \n pvarn = list(itertools.chain(*[pvalm,pvala,pvalb,pvalc]))\n \n ### Plot Variables\n plt.rc('text',usetex=True)\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n \n ### Set limits for contours and colorbars\n if varnames[v] == 'U':\n limit = np.arange(-2,2.1,0.1)\n barlim = np.arange(-2,3,1)\n elif varnames[v] == 'TEMP':\n limit = np.arange(-4,4.1,0.2)\n barlim = np.arange(-4,5,1)\n elif varnames[v] == 'GEOP':\n limit = np.arange(-60,61,2)\n barlim = np.arange(-60,61,30)\n elif varnames[v] == 'V':\n limit = np.arange(-0.2,0.21,0.02)\n barlim = np.arange(-0.2,0.3,0.1)\n elif varnames[v] == 'EGR':\n limit = np.arange(-0.08,0.081,0.005)\n barlim = np.arange(-0.08,0.09,0.04)\n \n zscale = np.array([1000,700,500,300,200,\n 100,50,30,10])\n latq,levq = np.meshgrid(lat,lev)\n \n fig = plt.figure()\n for i in range(len(varn)):\n ax1 = plt.subplot(4,6,i+1)\n \n ax1.spines['top'].set_color('dimgrey')\n ax1.spines['right'].set_color('dimgrey')\n ax1.spines['bottom'].set_color('dimgrey')\n ax1.spines['left'].set_color('dimgrey')\n ax1.spines['left'].set_linewidth(2)\n ax1.spines['bottom'].set_linewidth(2)\n ax1.spines['right'].set_linewidth(2)\n ax1.spines['top'].set_linewidth(2)\n ax1.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\n ax1.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \n \n cs = plt.contourf(lat,lev,varn[i]*pvarn[i],limit,extend='both')\n \n if varnames[v] == 'U': \n cs2 = plt.contour(lat,lev,zclimo[i],np.arange(-20,101,5),\n linewidths=0.5,colors='dimgrey')\n \n plt.gca().invert_yaxis()\n plt.yscale('log',nonposy='clip')\n \n plt.xticks(np.arange(0,96,30),map(str,np.arange(0,91,30)),fontsize=5)\n plt.yticks(zscale,map(str,zscale),ha='right',fontsize=5)\n plt.minorticks_off()\n \n plt.xlim([0,90])\n plt.ylim([1000,10])\n \n if any([i==0,i==6,i==12,i==18]):\n ax1.tick_params(labelleft='on') \n else:\n ax1.tick_params(labelleft='off') \n if i < 18:\n ax1.tick_params(labelbottom='off') \n if any([i==0,i==6,i==12]):\n ax1.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\n ax1.tick_params(axis='x',direction='out',which='major',pad=3,\n width=0,color='dimgrey') \n else:\n if i < 24 and i != 18:\n ax1.tick_params(axis='y',direction='out',which='major',pad=3,\n width=0,color='dimgrey')\n if i < 18:\n ax1.tick_params(axis='y',direction='out',which='major',\n pad=3,width=0,color='dimgrey')\n ax1.tick_params(axis='x',direction='out',which='major',\n pad=3,width=0,color='dimgrey') \n \n if varnames[v] == 'U':\n cmap = cmocean.cm.balance \n cs.set_cmap(cmap) \n elif varnames[v] == 'TEMP':\n cmap = cmocean.cm.balance \n cs.set_cmap(cmap) \n elif varnames[v] == 'GEOP':\n cmap = cmocean.cm.balance \n cs.set_cmap(cmap) \n elif varnames[v] == 'V':\n cmap = cmocean.cm.balance \n cs.set_cmap(cmap) \n elif varnames[v] == 'EGR':\n cmap = cmocean.cm.diff \n cs.set_cmap(cmap) \n \n labelmonths = [r'NOV',r'DEC',r'JAN',r'FEB',r'MAR',r'APR']\n if i < 6:\n ax1.annotate(r'\\textbf{%s}' % labelmonths[i],\n xy=(0, 0),xytext=(0.5,1.13),xycoords='axes fraction',\n fontsize=13,color='dimgrey',rotation=0,\n ha='center',va='center')\n if i==0: \n plt.annotate(r'\\textbf{Mean}',\n xy=(0, 0),xytext=(-0.6,0.5),xycoords='axes fraction',\n fontsize=15,color='k',rotation=90,\n ha='center',va='center') \n elif i==6: \n plt.annotate(r'\\textbf{A}',\n xy=(0, 0),xytext=(-0.6,0.5),xycoords='axes fraction',\n fontsize=15,color='k',rotation=90,\n ha='center',va='center') \n elif i==12: \n plt.annotate(r'\\textbf{B}',\n xy=(0, 0),xytext=(-0.6,0.5),xycoords='axes fraction',\n fontsize=15,color='k',rotation=90,\n ha='center',va='center') \n elif i==18: \n plt.annotate(r'\\textbf{C}',\n xy=(0, 0),xytext=(-0.6,0.5),xycoords='axes fraction',\n fontsize=15,color='k',rotation=90,\n ha='center',va='center') \n \n cbar_ax = fig.add_axes([0.312,0.07,0.4,0.02]) \n cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='both',extendfrac=0.07,drawedges=False)\n \n if varnames[v] == 'U':\n cbar.set_label(r'\\textbf{m/s}',fontsize=9,color='dimgray',\n labelpad=0)\n elif varnames[v] == 'TEMP':\n cbar.set_label(r'\\textbf{$^\\circ$C}',fontsize=9,color='dimgray',\n labelpad=0)\n elif varnames[v] == 'GEOP':\n cbar.set_label(r'\\textbf{m}',fontsize=9,color='dimgray',\n labelpad=0)\n elif varnames[v] == 'V':\n cbar.set_label(r'\\textbf{m/s}',fontsize=9,color='dimgray',\n labelpad=0)\n elif varnames[v] == 'EGR':\n cbar.set_label(r'\\textbf{1/day}',fontsize=9,color='dimgray',\n labelpad=0)\n \n cbar.set_ticks(barlim)\n cbar.set_ticklabels(list(map(str,barlim))) \n cbar.ax.tick_params(axis='x', size=.01)\n cbar.outline.set_edgecolor('dimgrey')\n cbar.outline.set_linewidth(0.5)\n cbar.ax.tick_params(labelsize=6)\n \n plt.annotate(r'\\textbf{Latitude ($^{\\circ}$N)',\n xy=(0, 0),xytext=(0.515,0.12),xycoords='figure fraction',\n fontsize=6,color='k',rotation=0,\n ha='center',va='center') \n\n plt.subplots_adjust(hspace=0.1,bottom=0.17,top=0.93,wspace=0.1)\n \n plt.savefig(directoryfigure + '%s_MonthlyProfiles_100yr_FDR.png' % varnames[v],\n dpi=300)\n print('Completed: Script done!')\n\n\n "
] | [
[
"scipy.stats.ttest_ind",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.subplots_adjust",
"numpy.meshgrid",
"numpy.isfinite",
"numpy.append",
"numpy.nanmean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"numpy.isnan",
"numpy.where",
"matplotlib.pyplot.minorticks_off",
"matplotlib.pyplot.rc",
"numpy.arange",
"matplotlib.pyplot.ylim",
"numpy.empty",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.contourf"
]
] |
DEVESHTARASIA/tensorflow | [
"d3edb8c60ed4fd831d62833ed22f5c23486c561c",
"d3edb8c60ed4fd831d62833ed22f5c23486c561c",
"d3edb8c60ed4fd831d62833ed22f5c23486c561c"
] | [
"tensorflow/contrib/keras/python/keras/models_test.py",
"tensorflow/contrib/keras/python/keras/applications/inception_v3.py",
"tensorflow/python/ops/rnn_cell_impl.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for training routines.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nimport numpy as np\n\nfrom tensorflow.contrib.keras.python import keras\nfrom tensorflow.python.platform import test\n\ntry:\n import h5py # pylint:disable=g-import-not-at-top\nexcept ImportError:\n h5py = None\n\n\nclass TestModelSaving(test.TestCase):\n\n def test_sequential_model_saving(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,)))\n model.add(keras.layers.RepeatVector(3))\n model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))\n model.compile(loss=keras.losses.MSE,\n optimizer=keras.optimizers.RMSprop(lr=0.0001),\n metrics=[keras.metrics.categorical_accuracy],\n sample_weight_mode='temporal')\n x = np.random.random((1, 3))\n y = np.random.random((1, 3, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n _, fname = tempfile.mkstemp('.h5')\n keras.models.save_model(model, fname)\n\n new_model = keras.models.load_model(fname)\n os.remove(fname)\n\n out2 = new_model.predict(x)\n self.assertAllClose(out, out2, atol=1e-05)\n\n # test that new updates are the same with both models\n x = np.random.random((1, 3))\n y = np.random.random((1, 3, 3))\n model.train_on_batch(x, y)\n new_model.train_on_batch(x, y)\n out = model.predict(x)\n out2 = new_model.predict(x)\n self.assertAllClose(out, out2, atol=1e-05)\n\n def test_sequential_model_saving_2(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n with self.test_session():\n # test with custom optimizer, loss\n\n class CustomOp(keras.optimizers.RMSprop):\n pass\n\n def custom_loss(y_true, y_pred):\n return keras.losses.mse(y_true, y_pred)\n\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,)))\n model.add(keras.layers.Dense(3))\n model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])\n\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n _, fname = tempfile.mkstemp('.h5')\n keras.models.save_model(model, fname)\n\n model = keras.models.load_model(\n fname,\n custom_objects={'CustomOp': CustomOp,\n 'custom_loss': custom_loss})\n os.remove(fname)\n\n out2 = model.predict(x)\n self.assertAllClose(out, out2, atol=1e-05)\n\n def test_functional_model_saving(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n with self.test_session():\n inputs = keras.layers.Input(shape=(3,))\n x = keras.layers.Dense(2)(inputs)\n output = keras.layers.Dense(3)(x)\n\n model = keras.models.Model(inputs, output)\n model.compile(loss=keras.losses.MSE,\n optimizer=keras.optimizers.RMSprop(lr=0.0001),\n metrics=[keras.metrics.categorical_accuracy])\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n _, fname = tempfile.mkstemp('.h5')\n keras.models.save_model(model, fname)\n\n model = keras.models.load_model(fname)\n os.remove(fname)\n\n out2 = model.predict(x)\n self.assertAllClose(out, out2, atol=1e-05)\n\n def test_saving_without_compilation(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,)))\n model.add(keras.layers.Dense(3))\n model.compile(loss='mse', optimizer='sgd', metrics=['acc'])\n\n _, fname = tempfile.mkstemp('.h5')\n keras.models.save_model(model, fname)\n model = keras.models.load_model(fname)\n os.remove(fname)\n\n def test_saving_right_after_compilation(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,)))\n model.add(keras.layers.Dense(3))\n model.compile(loss='mse', optimizer='sgd', metrics=['acc'])\n model.model._make_train_function()\n\n _, fname = tempfile.mkstemp('.h5')\n keras.models.save_model(model, fname)\n model = keras.models.load_model(fname)\n os.remove(fname)\n\n def test_saving_lambda_numpy_array_arguments(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n mean = np.random.random((4, 2, 3))\n std = np.abs(np.random.random((4, 2, 3))) + 1e-5\n inputs = keras.layers.Input(shape=(4, 2, 3))\n output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,\n arguments={'mu': mean, 'std': std})(inputs)\n model = keras.models.Model(inputs, output)\n model.compile(loss='mse', optimizer='sgd', metrics=['acc'])\n\n _, fname = tempfile.mkstemp('.h5')\n keras.models.save_model(model, fname)\n\n model = keras.models.load_model(fname)\n os.remove(fname)\n\n self.assertAllClose(mean, model.layers[1].arguments['mu'])\n self.assertAllClose(std, model.layers[1].arguments['std'])\n\n\nclass TestSequential(test.TestCase):\n \"\"\"Most Sequential model API tests are covered in `training_test.py`.\n \"\"\"\n\n def test_sequential_pop(self):\n num_hidden = 5\n input_dim = 3\n batch_size = 5\n num_classes = 2\n with self.test_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))\n model.add(keras.layers.Dense(num_classes))\n model.compile(loss='mse', optimizer='sgd')\n x = np.random.random((batch_size, input_dim))\n y = np.random.random((batch_size, num_classes))\n model.fit(x, y, epochs=1)\n model.pop()\n self.assertEqual(len(model.layers), 1)\n self.assertEqual(model.output_shape, (None, num_hidden))\n model.compile(loss='mse', optimizer='sgd')\n y = np.random.random((batch_size, num_hidden))\n model.fit(x, y, epochs=1)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=invalid-name\n\"\"\"Inception V3 model for Keras.\n\nNote that the input image format for this model is different than for\nthe VGG16 and ResNet models (299x299 instead of 224x224),\nand that the input preprocessing function is also different (same as Xception).\n\n# Reference\n\n- [Rethinking the Inception Architecture for Computer\nVision](http://arxiv.org/abs/1512.00567)\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.keras.python.keras import backend as K\nfrom tensorflow.contrib.keras.python.keras import layers\nfrom tensorflow.contrib.keras.python.keras.applications.imagenet_utils import _obtain_input_shape\nfrom tensorflow.contrib.keras.python.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import\nfrom tensorflow.contrib.keras.python.keras.engine.topology import get_source_inputs\nfrom tensorflow.contrib.keras.python.keras.layers import Activation\nfrom tensorflow.contrib.keras.python.keras.layers import AveragePooling2D\nfrom tensorflow.contrib.keras.python.keras.layers import BatchNormalization\nfrom tensorflow.contrib.keras.python.keras.layers import Conv2D\nfrom tensorflow.contrib.keras.python.keras.layers import Dense\nfrom tensorflow.contrib.keras.python.keras.layers import GlobalAveragePooling2D\nfrom tensorflow.contrib.keras.python.keras.layers import GlobalMaxPooling2D\nfrom tensorflow.contrib.keras.python.keras.layers import Input\nfrom tensorflow.contrib.keras.python.keras.layers import MaxPooling2D\nfrom tensorflow.contrib.keras.python.keras.models import Model\nfrom tensorflow.contrib.keras.python.keras.utils.data_utils import get_file\n\n\nWEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'\nWEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\n\ndef conv2d_bn(x,\n filters,\n num_row,\n num_col,\n padding='same',\n strides=(1, 1),\n name=None):\n \"\"\"Utility function to apply conv + BN.\n\n Arguments:\n x: input tensor.\n filters: filters in `Conv2D`.\n num_row: height of the convolution kernel.\n num_col: width of the convolution kernel.\n padding: padding mode in `Conv2D`.\n strides: strides in `Conv2D`.\n name: name of the ops; will become `name + '_conv'`\n for the convolution and `name + '_bn'` for the\n batch norm layer.\n\n Returns:\n Output tensor after applying `Conv2D` and `BatchNormalization`.\n \"\"\"\n if name is not None:\n bn_name = name + '_bn'\n conv_name = name + '_conv'\n else:\n bn_name = None\n conv_name = None\n if K.image_data_format() == 'channels_first':\n bn_axis = 1\n else:\n bn_axis = 3\n x = Conv2D(\n filters, (num_row, num_col),\n strides=strides,\n padding=padding,\n use_bias=False,\n name=conv_name)(x)\n x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)\n x = Activation('relu', name=name)(x)\n return x\n\n\ndef InceptionV3(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000):\n \"\"\"Instantiates the Inception v3 architecture.\n\n Optionally loads weights pre-trained\n on ImageNet. Note that when using TensorFlow,\n for best performance you should set\n `image_data_format=\"channels_last\"` in your Keras config\n at ~/.keras/keras.json.\n The model and the weights are compatible with both\n TensorFlow and Theano. The data format\n convention used by the model is the one\n specified in your Keras config file.\n Note that the default input image size for this model is 299x299.\n\n Arguments:\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization)\n or \"imagenet\" (pre-training on ImageNet).\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(299, 299, 3)` (with `channels_last` data format)\n or `(3, 299, 299)` (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 139.\n E.g. `(150, 150, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n \"\"\"\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n input_shape = _obtain_input_shape(\n input_shape,\n default_size=299,\n min_size=139,\n data_format=K.image_data_format(),\n include_top=include_top)\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n img_input = Input(tensor=input_tensor, shape=input_shape)\n\n if K.image_data_format() == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = 3\n\n x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')\n x = conv2d_bn(x, 32, 3, 3, padding='valid')\n x = conv2d_bn(x, 64, 3, 3)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n x = conv2d_bn(x, 80, 1, 1, padding='valid')\n x = conv2d_bn(x, 192, 3, 3, padding='valid')\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n # mixed 0, 1, 2: 35 x 35 x 256\n branch1x1 = conv2d_bn(x, 64, 1, 1)\n\n branch5x5 = conv2d_bn(x, 48, 1, 1)\n branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)\n\n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 32, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch5x5, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed0')\n\n # mixed 1: 35 x 35 x 256\n branch1x1 = conv2d_bn(x, 64, 1, 1)\n\n branch5x5 = conv2d_bn(x, 48, 1, 1)\n branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)\n\n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 64, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch5x5, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed1')\n\n # mixed 2: 35 x 35 x 256\n branch1x1 = conv2d_bn(x, 64, 1, 1)\n\n branch5x5 = conv2d_bn(x, 48, 1, 1)\n branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)\n\n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 64, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch5x5, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed2')\n\n # mixed 3: 17 x 17 x 768\n branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')\n\n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n branch3x3dbl = conv2d_bn(\n branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')\n\n branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)\n x = layers.concatenate(\n [branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')\n\n # mixed 4: 17 x 17 x 768\n branch1x1 = conv2d_bn(x, 192, 1, 1)\n\n branch7x7 = conv2d_bn(x, 128, 1, 1)\n branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\n\n branch7x7dbl = conv2d_bn(x, 128, 1, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\n\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch7x7, branch7x7dbl, branch_pool],\n axis=channel_axis,\n name='mixed4')\n\n # mixed 5, 6: 17 x 17 x 768\n for i in range(2):\n branch1x1 = conv2d_bn(x, 192, 1, 1)\n\n branch7x7 = conv2d_bn(x, 160, 1, 1)\n branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\n\n branch7x7dbl = conv2d_bn(x, 160, 1, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\n\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch7x7, branch7x7dbl, branch_pool],\n axis=channel_axis,\n name='mixed' + str(5 + i))\n\n # mixed 7: 17 x 17 x 768\n branch1x1 = conv2d_bn(x, 192, 1, 1)\n\n branch7x7 = conv2d_bn(x, 192, 1, 1)\n branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\n\n branch7x7dbl = conv2d_bn(x, 192, 1, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\n\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch7x7, branch7x7dbl, branch_pool],\n axis=channel_axis,\n name='mixed7')\n\n # mixed 8: 8 x 8 x 1280\n branch3x3 = conv2d_bn(x, 192, 1, 1)\n branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding='valid')\n\n branch7x7x3 = conv2d_bn(x, 192, 1, 1)\n branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)\n branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)\n branch7x7x3 = conv2d_bn(\n branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')\n\n branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)\n x = layers.concatenate(\n [branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')\n\n # mixed 9: 8 x 8 x 2048\n for i in range(2):\n branch1x1 = conv2d_bn(x, 320, 1, 1)\n\n branch3x3 = conv2d_bn(x, 384, 1, 1)\n branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)\n branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)\n branch3x3 = layers.concatenate(\n [branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))\n\n branch3x3dbl = conv2d_bn(x, 448, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)\n branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)\n branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)\n branch3x3dbl = layers.concatenate(\n [branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)\n\n branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch3x3, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed' + str(9 + i))\n if include_top:\n # Classification block\n x = GlobalAveragePooling2D(name='avg_pool')(x)\n x = Dense(classes, activation='softmax', name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n # Create model.\n model = Model(inputs, x, name='inception_v3')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file(\n 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')\n else:\n weights_path = get_file(\n 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='bcbd6486424b2319ff4ef7d526e38f63')\n model.load_weights(weights_path)\n return model\n\n\ndef preprocess_input(x):\n x /= 255.\n x -= 0.5\n x *= 2.\n return x\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Module implementing RNN Cells.\n\nThis module provides a number of basic commonly used RNN cells, such as LSTM\n(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of\noperators that allow adding dropouts, projections, or embeddings for inputs.\nConstructing multi-layer cells is supported by the class `MultiRNNCell`, or by\ncalling the `rnn` ops several times.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport hashlib\nimport numbers\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.layers import base as base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\n\n\n_BIAS_VARIABLE_NAME = \"bias\"\n_WEIGHTS_VARIABLE_NAME = \"kernel\"\n\n\ndef _like_rnncell(cell):\n \"\"\"Checks that a given object is an RNNCell by using duck typing.\"\"\"\n conditions = [hasattr(cell, \"output_size\"), hasattr(cell, \"state_size\"),\n hasattr(cell, \"zero_state\"), callable(cell)]\n return all(conditions)\n\n\ndef _concat(prefix, suffix, static=False):\n \"\"\"Concat that enables int, Tensor, or TensorShape values.\n\n This function takes a size specification, which can be an integer, a\n TensorShape, or a Tensor, and converts it into a concatenated Tensor\n (if static = False) or a list of integers (if static = True).\n\n Args:\n prefix: The prefix; usually the batch size (and/or time step size).\n (TensorShape, int, or Tensor.)\n suffix: TensorShape, int, or Tensor.\n static: If `True`, return a python list with possibly unknown dimensions.\n Otherwise return a `Tensor`.\n\n Returns:\n shape: the concatenation of prefix and suffix.\n\n Raises:\n ValueError: if `suffix` is not a scalar or vector (or TensorShape).\n ValueError: if prefix or suffix was `None` and asked for dynamic\n Tensors out.\n \"\"\"\n if isinstance(prefix, ops.Tensor):\n p = prefix\n p_static = tensor_util.constant_value(prefix)\n if p.shape.ndims == 0:\n p = array_ops.expand_dims(p, 0)\n elif p.shape.ndims != 1:\n raise ValueError(\"prefix tensor must be either a scalar or vector, \"\n \"but saw tensor: %s\" % p)\n else:\n p = tensor_shape.as_shape(prefix)\n p_static = p.as_list() if p.ndims is not None else None\n p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)\n if p.is_fully_defined() else None)\n if isinstance(suffix, ops.Tensor):\n s = suffix\n s_static = tensor_util.constant_value(suffix)\n if s.shape.ndims == 0:\n s = array_ops.expand_dims(s, 0)\n elif s.shape.ndims != 1:\n raise ValueError(\"suffix tensor must be either a scalar or vector, \"\n \"but saw tensor: %s\" % s)\n else:\n s = tensor_shape.as_shape(suffix)\n s_static = s.as_list() if s.ndims is not None else None\n s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)\n if s.is_fully_defined() else None)\n\n if static:\n shape = tensor_shape.as_shape(p_static).concatenate(s_static)\n shape = shape.as_list() if shape.ndims is not None else None\n else:\n if p is None or s is None:\n raise ValueError(\"Provided a prefix or suffix of None: %s and %s\"\n % (prefix, suffix))\n shape = array_ops.concat((p, s), 0)\n return shape\n\n\ndef _zero_state_tensors(state_size, batch_size, dtype):\n \"\"\"Create tensors of zeros based on state_size, batch_size, and dtype.\"\"\"\n def get_state_shape(s):\n \"\"\"Combine s with batch_size to get a proper tensor shape.\"\"\"\n c = _concat(batch_size, s)\n c_static = _concat(batch_size, s, static=True)\n size = array_ops.zeros(c, dtype=dtype)\n size.set_shape(c_static)\n return size\n return nest.map_structure(get_state_shape, state_size)\n\n\nclass RNNCell(base_layer.Layer):\n \"\"\"Abstract object representing an RNN cell.\n\n Every `RNNCell` must have the properties below and implement `call` with\n the signature `(output, next_state) = call(input, state)`. The optional\n third input argument, `scope`, is allowed for backwards compatibility\n purposes; but should be left off for new subclasses.\n\n This definition of cell differs from the definition used in the literature.\n In the literature, 'cell' refers to an object with a single scalar output.\n This definition refers to a horizontal array of such units.\n\n An RNN cell, in the most abstract setting, is anything that has\n a state and performs some operation that takes a matrix of inputs.\n This operation results in an output matrix with `self.output_size` columns.\n If `self.state_size` is an integer, this operation also results in a new\n state matrix with `self.state_size` columns. If `self.state_size` is a\n (possibly nested tuple of) TensorShape object(s), then it should return a\n matching structure of Tensors having shape `[batch_size].concatenate(s)`\n for each `s` in `self.batch_size`.\n \"\"\"\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run this RNN cell on inputs, starting from the given state.\n\n Args:\n inputs: `2-D` tensor with shape `[batch_size x input_size]`.\n state: if `self.state_size` is an integer, this should be a `2-D Tensor`\n with shape `[batch_size x self.state_size]`. Otherwise, if\n `self.state_size` is a tuple of integers, this should be a tuple\n with shapes `[batch_size x s] for s in self.state_size`.\n scope: VariableScope for the created subgraph; defaults to class name.\n\n Returns:\n A pair containing:\n\n - Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.\n - New state: Either a single `2-D` tensor, or a tuple of tensors matching\n the arity and shapes of `state`.\n \"\"\"\n if scope is not None:\n with vs.variable_scope(scope,\n custom_getter=self._rnn_get_variable) as scope:\n return super(RNNCell, self).__call__(inputs, state, scope=scope)\n else:\n with vs.variable_scope(vs.get_variable_scope(),\n custom_getter=self._rnn_get_variable):\n return super(RNNCell, self).__call__(inputs, state)\n\n def _rnn_get_variable(self, getter, *args, **kwargs):\n variable = getter(*args, **kwargs)\n trainable = (variable in tf_variables.trainable_variables() or\n (isinstance(variable, tf_variables.PartitionedVariable) and\n list(variable)[0] in tf_variables.trainable_variables()))\n if trainable and variable not in self._trainable_weights:\n self._trainable_weights.append(variable)\n elif not trainable and variable not in self._non_trainable_weights:\n self._non_trainable_weights.append(variable)\n return variable\n\n @property\n def state_size(self):\n \"\"\"size(s) of state(s) used by this cell.\n\n It can be represented by an Integer, a TensorShape or a tuple of Integers\n or TensorShapes.\n \"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n @property\n def output_size(self):\n \"\"\"Integer or TensorShape: size of outputs produced by this cell.\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def build(self, _):\n # This tells the parent Layer object that it's OK to call\n # self.add_variable() inside the call() method.\n pass\n\n def zero_state(self, batch_size, dtype):\n \"\"\"Return zero-filled state tensor(s).\n\n Args:\n batch_size: int, float, or unit Tensor representing the batch size.\n dtype: the data type to use for the state.\n\n Returns:\n If `state_size` is an int or TensorShape, then the return value is a\n `N-D` tensor of shape `[batch_size x state_size]` filled with zeros.\n\n If `state_size` is a nested list or tuple, then the return value is\n a nested list or tuple (of the same structure) of `2-D` tensors with\n the shapes `[batch_size x s]` for each s in `state_size`.\n \"\"\"\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n state_size = self.state_size\n return _zero_state_tensors(state_size, batch_size, dtype)\n\n\nclass BasicRNNCell(RNNCell):\n \"\"\"The most basic RNN cell.\n\n Args:\n num_units: int, The number of units in the RNN cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n\n def __init__(self, num_units, activation=None, reuse=None):\n super(BasicRNNCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def call(self, inputs, state):\n \"\"\"Most basic RNN: output = new_state = act(W * input + U * state + B).\"\"\"\n output = self._activation(_linear([inputs, state], self._num_units, True))\n return output, output\n\n\nclass GRUCell(RNNCell):\n \"\"\"Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).\"\"\"\n\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n kernel_initializer=None,\n bias_initializer=None):\n super(GRUCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def call(self, inputs, state):\n \"\"\"Gated recurrent unit (GRU) with nunits cells.\"\"\"\n with vs.variable_scope(\"gates\"): # Reset gate and update gate.\n # We start with bias of 1.0 to not reset and not update.\n bias_ones = self._bias_initializer\n if self._bias_initializer is None:\n dtype = [a.dtype for a in [inputs, state]][0]\n bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)\n value = math_ops.sigmoid(\n _linear([inputs, state], 2 * self._num_units, True, bias_ones,\n self._kernel_initializer))\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n with vs.variable_scope(\"candidate\"):\n c = self._activation(\n _linear([inputs, r * state], self._num_units, True,\n self._bias_initializer, self._kernel_initializer))\n new_h = u * state + (1 - u) * c\n return new_h, new_h\n\n\n_LSTMStateTuple = collections.namedtuple(\"LSTMStateTuple\", (\"c\", \"h\"))\n\n\nclass LSTMStateTuple(_LSTMStateTuple):\n \"\"\"Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.\n\n Stores two elements: `(c, h)`, in that order.\n\n Only used when `state_is_tuple=True`.\n \"\"\"\n __slots__ = ()\n\n @property\n def dtype(self):\n (c, h) = self\n if c.dtype != h.dtype:\n raise TypeError(\"Inconsistent internal state: %s vs %s\" %\n (str(c.dtype), str(h.dtype)))\n return c.dtype\n\n\nclass BasicLSTMCell(RNNCell):\n \"\"\"Basic LSTM recurrent network cell.\n\n The implementation is based on: http://arxiv.org/abs/1409.2329.\n\n We add forget_bias (default: 1) to the biases of the forget gate in order to\n reduce the scale of forgetting in the beginning of the training.\n\n It does not allow cell clipping, a projection layer, and does not\n use peep-hole connections: it is the basic baseline.\n\n For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}\n that follows.\n \"\"\"\n\n def __init__(self, num_units, forget_bias=1.0,\n state_is_tuple=True, activation=None, reuse=None):\n \"\"\"Initialize the basic LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n Must set to `0.0` manually when restoring from CudnnLSTM-trained\n checkpoints.\n state_is_tuple: If True, accepted and returned states are 2-tuples of\n the `c_state` and `m_state`. If False, they are concatenated\n along the column axis. The latter behavior will soon be deprecated.\n activation: Activation function of the inner states. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n\n When restoring from CudnnLSTM-trained checkpoints, must use\n CudnnCompatibleLSTMCell instead.\n \"\"\"\n super(BasicLSTMCell, self).__init__(_reuse=reuse)\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._state_is_tuple = state_is_tuple\n self._activation = activation or math_ops.tanh\n\n @property\n def state_size(self):\n return (LSTMStateTuple(self._num_units, self._num_units)\n if self._state_is_tuple else 2 * self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n\n def call(self, inputs, state):\n \"\"\"Long short-term memory cell (LSTM).\n\n Args:\n inputs: `2-D` tensor with shape `[batch_size x input_size]`.\n state: An `LSTMStateTuple` of state tensors, each shaped\n `[batch_size x self.state_size]`, if `state_is_tuple` has been set to\n `True`. Otherwise, a `Tensor` shaped\n `[batch_size x 2 * self.state_size]`.\n\n Returns:\n A pair containing the new hidden state, and the new state (either a\n `LSTMStateTuple` or a concatenated state, depending on\n `state_is_tuple`).\n \"\"\"\n sigmoid = math_ops.sigmoid\n # Parameters of gates are concatenated into one multiply for efficiency.\n if self._state_is_tuple:\n c, h = state\n else:\n c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)\n\n concat = _linear([inputs, h], 4 * self._num_units, True)\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)\n\n new_c = (\n c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))\n new_h = self._activation(new_c) * sigmoid(o)\n\n if self._state_is_tuple:\n new_state = LSTMStateTuple(new_c, new_h)\n else:\n new_state = array_ops.concat([new_c, new_h], 1)\n return new_h, new_state\n\n\nclass LSTMCell(RNNCell):\n \"\"\"Long short-term memory unit (LSTM) recurrent network cell.\n\n The default non-peephole implementation is based on:\n\n http://www.bioinf.jku.at/publications/older/2604.pdf\n\n S. Hochreiter and J. Schmidhuber.\n \"Long Short-Term Memory\". Neural Computation, 9(8):1735-1780, 1997.\n\n The peephole implementation is based on:\n\n https://research.google.com/pubs/archive/43905.pdf\n\n Hasim Sak, Andrew Senior, and Francoise Beaufays.\n \"Long short-term memory recurrent neural network architectures for\n large scale acoustic modeling.\" INTERSPEECH, 2014.\n\n The class uses optional peep-hole connections, optional cell clipping, and\n an optional projection layer.\n \"\"\"\n\n def __init__(self, num_units,\n use_peepholes=False, cell_clip=None,\n initializer=None, num_proj=None, proj_clip=None,\n num_unit_shards=None, num_proj_shards=None,\n forget_bias=1.0, state_is_tuple=True,\n activation=None, reuse=None):\n \"\"\"Initialize the parameters for an LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell\n use_peepholes: bool, set True to enable diagonal/peephole connections.\n cell_clip: (optional) A float value, if provided the cell state is clipped\n by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight and\n projection matrices.\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is\n provided, then the projected values are clipped elementwise to within\n `[-proj_clip, proj_clip]`.\n num_unit_shards: Deprecated, will be removed by Jan. 2017.\n Use a variable_scope partitioner instead.\n num_proj_shards: Deprecated, will be removed by Jan. 2017.\n Use a variable_scope partitioner instead.\n forget_bias: Biases of the forget gate are initialized by default to 1\n in order to reduce the scale of forgetting at the beginning of\n the training. Must set it manually to `0.0` when restoring from\n CudnnLSTM trained checkpoints.\n state_is_tuple: If True, accepted and returned states are 2-tuples of\n the `c_state` and `m_state`. If False, they are concatenated\n along the column axis. This latter behavior will soon be deprecated.\n activation: Activation function of the inner states. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n\n When restoring from CudnnLSTM-trained checkpoints, must use\n CudnnCompatibleLSTMCell instead.\n \"\"\"\n super(LSTMCell, self).__init__(_reuse=reuse)\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n if num_unit_shards is not None or num_proj_shards is not None:\n logging.warn(\n \"%s: The num_unit_shards and proj_unit_shards parameters are \"\n \"deprecated and will be removed in Jan 2017. \"\n \"Use a variable scope with a partitioner instead.\", self)\n\n self._num_units = num_units\n self._use_peepholes = use_peepholes\n self._cell_clip = cell_clip\n self._initializer = initializer\n self._num_proj = num_proj\n self._proj_clip = proj_clip\n self._num_unit_shards = num_unit_shards\n self._num_proj_shards = num_proj_shards\n self._forget_bias = forget_bias\n self._state_is_tuple = state_is_tuple\n self._activation = activation or math_ops.tanh\n\n if num_proj:\n self._state_size = (\n LSTMStateTuple(num_units, num_proj)\n if state_is_tuple else num_units + num_proj)\n self._output_size = num_proj\n else:\n self._state_size = (\n LSTMStateTuple(num_units, num_units)\n if state_is_tuple else 2 * num_units)\n self._output_size = num_units\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def call(self, inputs, state):\n \"\"\"Run one step of LSTM.\n\n Args:\n inputs: input Tensor, 2D, batch x num_units.\n state: if `state_is_tuple` is False, this must be a state Tensor,\n `2-D, batch x state_size`. If `state_is_tuple` is True, this must be a\n tuple of state Tensors, both `2-D`, with column sizes `c_state` and\n `m_state`.\n\n Returns:\n A tuple containing:\n\n - A `2-D, [batch x output_dim]`, Tensor representing the output of the\n LSTM after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - Tensor(s) representing the new state of LSTM after reading `inputs` when\n the previous state was `state`. Same type and shape(s) as `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n num_proj = self._num_units if self._num_proj is None else self._num_proj\n sigmoid = math_ops.sigmoid\n\n if self._state_is_tuple:\n (c_prev, m_prev) = state\n else:\n c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])\n m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])\n\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n scope = vs.get_variable_scope()\n with vs.variable_scope(scope, initializer=self._initializer) as unit_scope:\n if self._num_unit_shards is not None:\n unit_scope.set_partitioner(\n partitioned_variables.fixed_size_partitioner(\n self._num_unit_shards))\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True)\n i, j, f, o = array_ops.split(\n value=lstm_matrix, num_or_size_splits=4, axis=1)\n # Diagonal connections\n if self._use_peepholes:\n with vs.variable_scope(unit_scope) as projection_scope:\n if self._num_unit_shards is not None:\n projection_scope.set_partitioner(None)\n w_f_diag = vs.get_variable(\n \"w_f_diag\", shape=[self._num_units], dtype=dtype)\n w_i_diag = vs.get_variable(\n \"w_i_diag\", shape=[self._num_units], dtype=dtype)\n w_o_diag = vs.get_variable(\n \"w_o_diag\", shape=[self._num_units], dtype=dtype)\n\n if self._use_peepholes:\n c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +\n sigmoid(i + w_i_diag * c_prev) * self._activation(j))\n else:\n c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *\n self._activation(j))\n\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n if self._use_peepholes:\n m = sigmoid(o + w_o_diag * c) * self._activation(c)\n else:\n m = sigmoid(o) * self._activation(c)\n\n if self._num_proj is not None:\n with vs.variable_scope(\"projection\") as proj_scope:\n if self._num_proj_shards is not None:\n proj_scope.set_partitioner(\n partitioned_variables.fixed_size_partitioner(\n self._num_proj_shards))\n m = _linear(m, self._num_proj, bias=False)\n\n if self._proj_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)\n # pylint: enable=invalid-unary-operand-type\n\n new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else\n array_ops.concat([c, m], 1))\n return m, new_state\n\n\ndef _enumerated_map_structure(map_fn, *args, **kwargs):\n ix = [0]\n def enumerated_fn(*inner_args, **inner_kwargs):\n r = map_fn(ix[0], *inner_args, **inner_kwargs)\n ix[0] += 1\n return r\n return nest.map_structure(enumerated_fn, *args, **kwargs)\n\n\nclass DropoutWrapper(RNNCell):\n \"\"\"Operator adding dropout to inputs and outputs of the given cell.\"\"\"\n\n def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,\n state_keep_prob=1.0, variational_recurrent=False,\n input_size=None, dtype=None, seed=None):\n \"\"\"Create a cell with added input, state, and/or output dropout.\n\n If `variational_recurrent` is set to `True` (**NOT** the default behavior),\n then the same dropout mask is applied at every step, as described in:\n\n Y. Gal, Z Ghahramani. \"A Theoretically Grounded Application of Dropout in\n Recurrent Neural Networks\". https://arxiv.org/abs/1512.05287\n\n Otherwise a different dropout mask is applied at every time step.\n\n Args:\n cell: an RNNCell, a projection to output_size is added to it.\n input_keep_prob: unit Tensor or float between 0 and 1, input keep\n probability; if it is constant and 1, no input dropout will be added.\n output_keep_prob: unit Tensor or float between 0 and 1, output keep\n probability; if it is constant and 1, no output dropout will be added.\n state_keep_prob: unit Tensor or float between 0 and 1, output keep\n probability; if it is constant and 1, no output dropout will be added.\n State dropout is performed on the *output* states of the cell.\n variational_recurrent: Python bool. If `True`, then the same\n dropout pattern is applied across all time steps per run call.\n If this parameter is set, `input_size` **must** be provided.\n input_size: (optional) (possibly nested tuple of) `TensorShape` objects\n containing the depth(s) of the input tensors expected to be passed in to\n the `DropoutWrapper`. Required and used **iff**\n `variational_recurrent = True` and `input_keep_prob < 1`.\n dtype: (optional) The `dtype` of the input, state, and output tensors.\n Required and used **iff** `variational_recurrent = True`.\n seed: (optional) integer, the randomness seed.\n\n Raises:\n TypeError: if cell is not an RNNCell.\n ValueError: if any of the keep_probs are not between 0 and 1.\n \"\"\"\n if not _like_rnncell(cell):\n raise TypeError(\"The parameter cell is not a RNNCell.\")\n with ops.name_scope(\"DropoutWrapperInit\"):\n def tensor_and_const_value(v):\n tensor_value = ops.convert_to_tensor(v)\n const_value = tensor_util.constant_value(tensor_value)\n return (tensor_value, const_value)\n for prob, attr in [(input_keep_prob, \"input_keep_prob\"),\n (state_keep_prob, \"state_keep_prob\"),\n (output_keep_prob, \"output_keep_prob\")]:\n tensor_prob, const_prob = tensor_and_const_value(prob)\n if const_prob is not None:\n if const_prob < 0 or const_prob > 1:\n raise ValueError(\"Parameter %s must be between 0 and 1: %d\"\n % (attr, const_prob))\n setattr(self, \"_%s\" % attr, float(const_prob))\n else:\n setattr(self, \"_%s\" % attr, tensor_prob)\n\n # Set cell, variational_recurrent, seed before running the code below\n self._cell = cell\n self._variational_recurrent = variational_recurrent\n self._seed = seed\n\n self._recurrent_input_noise = None\n self._recurrent_state_noise = None\n self._recurrent_output_noise = None\n\n if variational_recurrent:\n if dtype is None:\n raise ValueError(\n \"When variational_recurrent=True, dtype must be provided\")\n\n def convert_to_batch_shape(s):\n # Prepend a 1 for the batch dimension; for recurrent\n # variational dropout we use the same dropout mask for all\n # batch elements.\n return array_ops.concat(\n ([1], tensor_shape.TensorShape(s).as_list()), 0)\n\n def batch_noise(s, inner_seed):\n shape = convert_to_batch_shape(s)\n return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)\n\n if (not isinstance(self._input_keep_prob, numbers.Real) or\n self._input_keep_prob < 1.0):\n if input_size is None:\n raise ValueError(\n \"When variational_recurrent=True and input_keep_prob < 1.0 or \"\n \"is unknown, input_size must be provided\")\n self._recurrent_input_noise = _enumerated_map_structure(\n lambda i, s: batch_noise(s, inner_seed=self._gen_seed(\"input\", i)),\n input_size)\n self._recurrent_state_noise = _enumerated_map_structure(\n lambda i, s: batch_noise(s, inner_seed=self._gen_seed(\"state\", i)),\n cell.state_size)\n self._recurrent_output_noise = _enumerated_map_structure(\n lambda i, s: batch_noise(s, inner_seed=self._gen_seed(\"output\", i)),\n cell.output_size)\n\n def _gen_seed(self, salt_prefix, index):\n if self._seed is None:\n return None\n salt = \"%s_%d\" % (salt_prefix, index)\n string = (str(self._seed) + salt).encode(\"utf-8\")\n return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n return self._cell.zero_state(batch_size, dtype)\n\n def _variational_recurrent_dropout_value(\n self, index, value, noise, keep_prob):\n \"\"\"Performs dropout given the pre-calculated noise tensor.\"\"\"\n # uniform [keep_prob, 1.0 + keep_prob)\n random_tensor = keep_prob + noise\n\n # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)\n binary_tensor = math_ops.floor(random_tensor)\n ret = math_ops.div(value, keep_prob) * binary_tensor\n ret.set_shape(value.get_shape())\n return ret\n\n def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob):\n \"\"\"Decides whether to perform standard dropout or recurrent dropout.\"\"\"\n if not self._variational_recurrent:\n def dropout(i, v):\n return nn_ops.dropout(\n v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i))\n return _enumerated_map_structure(dropout, values)\n else:\n def dropout(i, v, n):\n return self._variational_recurrent_dropout_value(i, v, n, keep_prob)\n return _enumerated_map_structure(dropout, values, recurrent_noise)\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run the cell with the declared dropouts.\"\"\"\n def _should_dropout(p):\n return (not isinstance(p, float)) or p < 1\n\n if _should_dropout(self._input_keep_prob):\n inputs = self._dropout(inputs, \"input\",\n self._recurrent_input_noise,\n self._input_keep_prob)\n output, new_state = self._cell(inputs, state, scope)\n if _should_dropout(self._state_keep_prob):\n new_state = self._dropout(new_state, \"state\",\n self._recurrent_state_noise,\n self._state_keep_prob)\n if _should_dropout(self._output_keep_prob):\n output = self._dropout(output, \"output\",\n self._recurrent_output_noise,\n self._output_keep_prob)\n return output, new_state\n\n\nclass ResidualWrapper(RNNCell):\n \"\"\"RNNCell wrapper that ensures cell inputs are added to the outputs.\"\"\"\n\n def __init__(self, cell):\n \"\"\"Constructs a `ResidualWrapper` for `cell`.\n\n Args:\n cell: An instance of `RNNCell`.\n \"\"\"\n self._cell = cell\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n return self._cell.zero_state(batch_size, dtype)\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run the cell and add its inputs to its outputs.\n\n Args:\n inputs: cell inputs.\n state: cell state.\n scope: optional cell scope.\n\n Returns:\n Tuple of cell outputs and new state.\n\n Raises:\n TypeError: If cell inputs and outputs have different structure (type).\n ValueError: If cell inputs and outputs have different structure (value).\n \"\"\"\n outputs, new_state = self._cell(inputs, state, scope=scope)\n nest.assert_same_structure(inputs, outputs)\n # Ensure shapes match\n def assert_shape_match(inp, out):\n inp.get_shape().assert_is_compatible_with(out.get_shape())\n nest.map_structure(assert_shape_match, inputs, outputs)\n res_outputs = nest.map_structure(\n lambda inp, out: inp + out, inputs, outputs)\n return (res_outputs, new_state)\n\n\nclass DeviceWrapper(RNNCell):\n \"\"\"Operator that ensures an RNNCell runs on a particular device.\"\"\"\n\n def __init__(self, cell, device):\n \"\"\"Construct a `DeviceWrapper` for `cell` with device `device`.\n\n Ensures the wrapped `cell` is called with `tf.device(device)`.\n\n Args:\n cell: An instance of `RNNCell`.\n device: A device string or function, for passing to `tf.device`.\n \"\"\"\n self._cell = cell\n self._device = device\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n with ops.device(self._device):\n return self._cell.zero_state(batch_size, dtype)\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run the cell on specified device.\"\"\"\n with ops.device(self._device):\n return self._cell(inputs, state, scope=scope)\n\n\nclass MultiRNNCell(RNNCell):\n \"\"\"RNN cell composed sequentially of multiple simple cells.\"\"\"\n\n def __init__(self, cells, state_is_tuple=True):\n \"\"\"Create a RNN cell composed sequentially of a number of RNNCells.\n\n Args:\n cells: list of RNNCells that will be composed in this order.\n state_is_tuple: If True, accepted and returned states are n-tuples, where\n `n = len(cells)`. If False, the states are all\n concatenated along the column axis. This latter behavior will soon be\n deprecated.\n\n Raises:\n ValueError: if cells is empty (not allowed), or at least one of the cells\n returns a state tuple but the flag `state_is_tuple` is `False`.\n \"\"\"\n super(MultiRNNCell, self).__init__()\n if not cells:\n raise ValueError(\"Must specify at least one cell for MultiRNNCell.\")\n if not nest.is_sequence(cells):\n raise TypeError(\n \"cells must be a list or tuple, but saw: %s.\" % cells)\n\n self._cells = cells\n self._state_is_tuple = state_is_tuple\n if not state_is_tuple:\n if any(nest.is_sequence(c.state_size) for c in self._cells):\n raise ValueError(\"Some cells return tuples of states, but the flag \"\n \"state_is_tuple is not set. State sizes are: %s\"\n % str([c.state_size for c in self._cells]))\n\n @property\n def state_size(self):\n if self._state_is_tuple:\n return tuple(cell.state_size for cell in self._cells)\n else:\n return sum([cell.state_size for cell in self._cells])\n\n @property\n def output_size(self):\n return self._cells[-1].output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n if self._state_is_tuple:\n return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)\n else:\n # We know here that state_size of each cell is not a tuple and\n # presumably does not contain TensorArrays or anything else fancy\n return super(MultiRNNCell, self).zero_state(batch_size, dtype)\n\n def call(self, inputs, state):\n \"\"\"Run this multi-layer cell on inputs, starting from state.\"\"\"\n cur_state_pos = 0\n cur_inp = inputs\n new_states = []\n for i, cell in enumerate(self._cells):\n with vs.variable_scope(\"cell_%d\" % i):\n if self._state_is_tuple:\n if not nest.is_sequence(state):\n raise ValueError(\n \"Expected state to be a tuple of length %d, but received: %s\" %\n (len(self.state_size), state))\n cur_state = state[i]\n else:\n cur_state = array_ops.slice(state, [0, cur_state_pos],\n [-1, cell.state_size])\n cur_state_pos += cell.state_size\n cur_inp, new_state = cell(cur_inp, cur_state)\n new_states.append(new_state)\n\n new_states = (tuple(new_states) if self._state_is_tuple else\n array_ops.concat(new_states, 1))\n\n return cur_inp, new_states\n\n\nclass _SlimRNNCell(RNNCell):\n \"\"\"A simple wrapper for slim.rnn_cells.\"\"\"\n\n def __init__(self, cell_fn):\n \"\"\"Create a SlimRNNCell from a cell_fn.\n\n Args:\n cell_fn: a function which takes (inputs, state, scope) and produces the\n outputs and the new_state. Additionally when called with inputs=None and\n state=None it should return (initial_outputs, initial_state).\n\n Raises:\n TypeError: if cell_fn is not callable\n ValueError: if cell_fn cannot produce a valid initial state.\n \"\"\"\n if not callable(cell_fn):\n raise TypeError(\"cell_fn %s needs to be callable\", cell_fn)\n self._cell_fn = cell_fn\n self._cell_name = cell_fn.func.__name__\n init_output, init_state = self._cell_fn(None, None)\n output_shape = init_output.get_shape()\n state_shape = init_state.get_shape()\n self._output_size = output_shape.with_rank(2)[1].value\n self._state_size = state_shape.with_rank(2)[1].value\n if self._output_size is None:\n raise ValueError(\"Initial output created by %s has invalid shape %s\" %\n (self._cell_name, output_shape))\n if self._state_size is None:\n raise ValueError(\"Initial state created by %s has invalid shape %s\" %\n (self._cell_name, state_shape))\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def __call__(self, inputs, state, scope=None):\n scope = scope or self._cell_name\n output, state = self._cell_fn(inputs, state, scope=scope)\n return output, state\n\n\ndef _linear(args,\n output_size,\n bias,\n bias_initializer=None,\n kernel_initializer=None):\n \"\"\"Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.\n\n Args:\n args: a 2D Tensor or a list of 2D, batch x n, Tensors.\n output_size: int, second dimension of W[i].\n bias: boolean, whether to add a bias term or not.\n bias_initializer: starting value to initialize the bias\n (default is all zeros).\n kernel_initializer: starting value to initialize the weight.\n\n Returns:\n A 2D Tensor with shape [batch x output_size] equal to\n sum_i(args[i] * W[i]), where W[i]s are newly created matrices.\n\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n \"\"\"\n if args is None or (nest.is_sequence(args) and not args):\n raise ValueError(\"`args` must be specified\")\n if not nest.is_sequence(args):\n args = [args]\n\n # Calculate the total size of arguments on dimension 1.\n total_arg_size = 0\n shapes = [a.get_shape() for a in args]\n for shape in shapes:\n if shape.ndims != 2:\n raise ValueError(\"linear is expecting 2D arguments: %s\" % shapes)\n if shape[1].value is None:\n raise ValueError(\"linear expects shape[1] to be provided for shape %s, \"\n \"but saw %s\" % (shape, shape[1]))\n else:\n total_arg_size += shape[1].value\n\n dtype = [a.dtype for a in args][0]\n\n # Now the computation.\n scope = vs.get_variable_scope()\n with vs.variable_scope(scope) as outer_scope:\n weights = vs.get_variable(\n _WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],\n dtype=dtype,\n initializer=kernel_initializer)\n if len(args) == 1:\n res = math_ops.matmul(args[0], weights)\n else:\n res = math_ops.matmul(array_ops.concat(args, 1), weights)\n if not bias:\n return res\n with vs.variable_scope(outer_scope) as inner_scope:\n inner_scope.set_partitioner(None)\n if bias_initializer is None:\n bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)\n biases = vs.get_variable(\n _BIAS_VARIABLE_NAME, [output_size],\n dtype=dtype,\n initializer=bias_initializer)\n return nn_ops.bias_add(res, biases)\n"
] | [
[
"tensorflow.contrib.keras.python.keras.models.save_model",
"tensorflow.contrib.keras.python.keras.models.Model",
"tensorflow.contrib.keras.python.keras.models.load_model",
"numpy.random.random",
"tensorflow.python.platform.test.main",
"tensorflow.contrib.keras.python.keras.layers.Input",
"tensorflow.contrib.keras.python.keras.optimizers.RMSprop",
"tensorflow.contrib.keras.python.keras.models.Sequential",
"tensorflow.contrib.keras.python.keras.layers.Lambda",
"tensorflow.contrib.keras.python.keras.losses.mse",
"tensorflow.contrib.keras.python.keras.layers.RepeatVector",
"tensorflow.contrib.keras.python.keras.layers.Dense"
],
[
"tensorflow.contrib.keras.python.keras.layers.GlobalAveragePooling2D",
"tensorflow.contrib.keras.python.keras.layers.concatenate",
"tensorflow.contrib.keras.python.keras.layers.Dense",
"tensorflow.contrib.keras.python.keras.backend.image_data_format",
"tensorflow.contrib.keras.python.keras.layers.AveragePooling2D",
"tensorflow.contrib.keras.python.keras.models.Model",
"tensorflow.contrib.keras.python.keras.layers.Conv2D",
"tensorflow.contrib.keras.python.keras.layers.BatchNormalization",
"tensorflow.contrib.keras.python.keras.layers.Input",
"tensorflow.contrib.keras.python.keras.engine.topology.get_source_inputs",
"tensorflow.contrib.keras.python.keras.layers.MaxPooling2D",
"tensorflow.contrib.keras.python.keras.layers.GlobalMaxPooling2D",
"tensorflow.contrib.keras.python.keras.utils.data_utils.get_file",
"tensorflow.contrib.keras.python.keras.layers.Activation"
],
[
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.nn_ops.bias_add",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.ops.partitioned_variables.fixed_size_partitioner",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.ops.clip_ops.clip_by_value",
"tensorflow.python.ops.math_ops.floor",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.ops.math_ops.div"
]
] |
sparks-baird/modnet | [
"2b4a88aa8a3323756b6daee52450569cddd0068b"
] | [
"modnet/matbench/benchmark.py"
] | [
"import os\nfrom collections import defaultdict\nfrom traceback import print_exc\nfrom typing import List, Dict, Any, Optional, Tuple, Type\n\nimport numpy as np\n\nfrom modnet.preprocessing import MODData\nfrom modnet.models import MODNetModel\nfrom modnet.utils import LOG\nfrom modnet.hyper_opt import FitGenetic\n\nMATBENCH_SEED = 18012019\n\n\ndef matbench_kfold_splits(data: MODData, n_splits=5, classification=False):\n \"\"\"Return the pre-defined k-fold splits to use when reporting matbench results.\n\n Arguments:\n data: The featurized MODData.\n \"\"\"\n\n if classification:\n from sklearn.model_selection import StratifiedKFold as KFold\n else:\n from sklearn.model_selection import KFold\n\n kf = KFold(n_splits=n_splits, shuffle=True, random_state=MATBENCH_SEED)\n kf_splits = kf.split(data.df_featurized, y=data.df_targets)\n return kf_splits\n\n\ndef matbench_benchmark(\n data: MODData,\n target: List[str],\n target_weights: Dict[str, float],\n fit_settings: Optional[Dict[str, Any]] = None,\n ga_settings: Optional[Dict[str, float]] = None,\n classification: bool = False,\n model_type: Type[MODNetModel] = MODNetModel,\n save_folds: bool = False,\n save_models: bool = False,\n hp_optimization: bool = True,\n hp_strategy: str = \"fit_preset\",\n inner_feat_selection: bool = True,\n use_precomputed_cross_nmi: bool = True,\n presets: Optional[List[dict]] = None,\n fast: bool = False,\n n_jobs: Optional[int] = None,\n nested: bool = False,\n **model_init_kwargs,\n) -> dict:\n \"\"\"Train and cross-validate a model against Matbench data splits, optionally\n performing hyperparameter optimisation.\n\n Arguments:\n data: The entire dataset as a `MODData`.\n target: The list of target names to train on.\n target_weights: The target weights to use for the `MODNetModel`.\n fit_settings: Any settings to pass to `model.fit(...)` directly\n (typically when not performing hyperparameter optimisation).\n classification: Whether all tasks are classification rather than regression.\n model_type: The type of the model to create and benchmark.\n save_folds: Whether to save dataframes with pre-processed fold\n data (e.g. feature selection).\n save_models: Whether to pickle all trained models according to\n their fold index and performance.\n hp_optimization: Whether to perform hyperparameter optimisation.\n hp_strategy: Which optimization strategy to choose. Use either \\\"fit_preset\\\" or \\\"ga\\\".\n inner_feat_selection: Whether to perform split-level feature\n selection or try to use pre-computed values.\n use_precomputed_cross_nmi: Whether to use the precmputed cross NMI\n from the Materials Project dataset, or recompute per fold.\n presets: Override the built-in hyperparameter grid with these presets.\n fast: Whether to perform debug training, i.e. reduced presets and epochs, for the fit_preset strategy.\n n_jobs: Try to parallelize the inner fit_preset over this number of\n processes. Maxes out at number_of_presets*nested_folds\n nested: Whether to perform nested CV for hyperparameter optimisation.\n **model_init_kwargs: Additional arguments to pass to the model on creation.\n\n Returns:\n A dictionary containing all the results from the training, broken\n down by model and by fold.\n\n \"\"\"\n\n if hp_optimization:\n if hp_strategy not in [\"fit_preset\", \"ga\"]:\n raise RuntimeError(\n f'{hp_strategy} not supported. Choose from \"fit_genetic\" or \"ga\".'\n )\n\n if fit_settings is None:\n fit_settings = {}\n\n if not fit_settings.get(\"n_feat\"):\n nf = len(data.df_featurized.columns)\n fit_settings[\"n_feat\"] = nf\n if not fit_settings.get(\"num_neurons\"):\n # Pass dummy network\n fit_settings[\"num_neurons\"] = [[4], [4], [4], [4]]\n\n if ga_settings is None:\n ga_settings = {\n \"size_pop\": 20,\n \"num_generations\": 10,\n \"early_stopping\": 4,\n \"refit\": False,\n }\n\n fold_data = []\n results = defaultdict(list)\n\n for ind, (train, test) in enumerate(\n matbench_kfold_splits(data, classification=classification)\n ):\n train_data, test_data = data.split((train, test))\n if inner_feat_selection:\n path = \"folds/train_moddata_f{}\".format(ind + 1)\n if os.path.isfile(path):\n train_data = MODData.load(path)\n else:\n train_data.feature_selection(\n n=-1,\n use_precomputed_cross_nmi=use_precomputed_cross_nmi,\n n_jobs=n_jobs,\n )\n os.makedirs(\"folds\", exist_ok=True)\n train_data.save(path)\n\n fold_data.append((train_data, test_data))\n\n args = (target, target_weights, fit_settings, ga_settings)\n\n model_kwargs = {\n \"model_type\": model_type,\n \"hp_optimization\": hp_optimization,\n \"fast\": fast,\n \"classification\": classification,\n \"save_folds\": save_folds,\n \"presets\": presets,\n \"hp_strategy\": hp_strategy,\n \"save_models\": save_models,\n \"nested\": nested,\n \"n_jobs\": n_jobs,\n }\n\n model_kwargs.update(model_init_kwargs)\n\n fold_results = []\n for fold in enumerate(fold_data):\n fold_results.append(train_fold(fold, *args, **model_kwargs))\n\n for fold in fold_results:\n for key in fold:\n results[key].append(fold[key])\n\n return results\n\n\ndef train_fold(\n fold: Tuple[int, Tuple[MODData, MODData]],\n target: List[str],\n target_weights: Dict[str, float],\n fit_settings: Dict[str, Any],\n ga_settings: Dict[str, float],\n model_type: Type[MODNetModel] = MODNetModel,\n presets=None,\n hp_optimization=True,\n hp_strategy=\"fit_preset\",\n classification=False,\n save_folds=False,\n fast=False,\n save_models=False,\n nested=False,\n n_jobs=None,\n **model_kwargs,\n) -> dict:\n \"\"\"Train one fold of a CV.\n Unless stated, all arguments have the same meaning as in `matbench_benchmark(...)`.\n\n Arguments:\n fold: A tuple containing the fold index, and another tuple of the\n training MODData and test MODData.\n\n Returns:\n A dictionary summarising the fold results.\n\n \"\"\"\n\n fold_ind, (train_data, test_data) = fold\n\n results = {}\n multi_target = bool(len(target) - 1)\n\n # If not performing hp_optimization, load model init settings from fit_settings\n model_settings = {}\n if not hp_optimization:\n model_settings = {\n \"num_neurons\": fit_settings[\"num_neurons\"],\n \"num_classes\": fit_settings.get(\"num_classes\"),\n \"act\": fit_settings.get(\"act\"),\n \"out_act\": fit_settings.get(\"out_act\", \"linear\"),\n \"n_feat\": fit_settings[\"n_feat\"],\n }\n\n model_settings.update(model_kwargs)\n\n if classification:\n model_settings[\"num_classes\"] = {t: 2 for t in target_weights}\n\n model = model_type(target, target_weights, **model_settings)\n\n if hp_optimization:\n if hp_strategy == \"fit_preset\":\n (\n models,\n val_losses,\n best_learning_curve,\n learning_curves,\n best_presets,\n ) = model.fit_preset(\n train_data,\n presets=presets,\n fast=fast,\n classification=classification,\n nested=nested,\n n_jobs=n_jobs,\n )\n results[\"nested_losses\"] = val_losses\n results[\"nested_learning_curves\"] = learning_curves\n results[\"best_learning_curves\"] = best_learning_curve\n results[\"best_presets\"] = best_presets\n\n elif hp_strategy == \"ga\":\n ga = FitGenetic(train_data)\n model = ga.run(\n size_pop=ga_settings[\"size_pop\"],\n num_generations=ga_settings[\"num_generations\"],\n nested=nested,\n n_jobs=n_jobs,\n early_stopping=ga_settings[\"early_stopping\"],\n refit=ga_settings[\"refit\"],\n fast=fast,\n )\n\n if save_models:\n for ind, nested_model in enumerate(models):\n score = val_losses[ind]\n nested_model.save(f\"results/nested_model_{fold_ind}_{ind}_{score:3.3f}\")\n\n model.save(f\"results/best_model_{fold_ind}_{score:3.3f}\")\n\n else:\n if fit_settings[\"increase_bs\"]:\n model.fit(\n train_data,\n lr=fit_settings[\"lr\"],\n epochs=fit_settings[\"epochs\"],\n batch_size=fit_settings[\"batch_size\"],\n loss=fit_settings[\"loss\"],\n )\n model.fit(\n train_data,\n lr=fit_settings[\"lr\"] / 7,\n epochs=fit_settings[\"epochs\"] // 2,\n batch_size=fit_settings[\"batch_size\"] * 2,\n loss=fit_settings[\"loss\"],\n )\n else:\n model.fit(train_data, **fit_settings)\n\n try:\n predict_kwargs = {}\n if classification:\n predict_kwargs[\"return_prob\"] = True\n if model.can_return_uncertainty:\n predict_kwargs[\"return_unc\"] = True\n\n pred_results = model.predict(test_data, **predict_kwargs)\n if isinstance(pred_results, tuple):\n predictions, stds = pred_results\n else:\n predictions = pred_results\n stds = None\n\n targets = test_data.df_targets\n\n if classification:\n from sklearn.metrics import roc_auc_score\n from sklearn.preprocessing import OneHotEncoder\n\n y_true = OneHotEncoder().fit_transform(targets.values).toarray()\n score = roc_auc_score(y_true, predictions.values)\n pred_bool = model.predict(test_data, return_prob=False)\n LOG.info(f\"ROC-AUC: {score}\")\n errors = targets - pred_bool\n elif multi_target:\n errors = targets - predictions\n score = np.mean(np.abs(errors.values), axis=0)\n else:\n errors = targets - predictions\n score = np.mean(np.abs(errors.values))\n except Exception:\n print_exc()\n print(\"Something went wrong benchmarking this model.\")\n predictions = None\n errors = None\n score = None\n\n if save_folds:\n opt_feat = train_data.optimal_features[: fit_settings[\"n_feat\"]]\n df_train = train_data.df_featurized\n df_train = df_train[opt_feat]\n df_train.to_csv(\"folds/train_f{}.csv\".format(ind + 1))\n df_test = test_data.df_featurized\n df_test = df_test[opt_feat]\n errors.columns = [x + \"_error\" for x in errors.columns]\n df_test = df_test.join(errors)\n df_test.to_csv(\"folds/test_f{}.csv\".format(ind + 1))\n\n results[\"predictions\"] = predictions\n if stds is not None:\n results[\"stds\"] = stds\n results[\"targets\"] = targets\n results[\"errors\"] = errors\n results[\"scores\"] = score\n results[\"model\"] = model\n\n return results\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"numpy.abs",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.model_selection.KFold"
]
] |
lippman1125/pytorch_FAN | [
"ffc9c968478d55cb0c75c062bb8774923f961110"
] | [
"utils/evaluation.py"
] | [
"from __future__ import absolute_import, print_function\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import randint\n\nfrom .misc import *\nfrom .transforms import transform, transform_preds\n\n__all__ = ['accuracy', 'AverageMeter']\n\n\ndef get_preds(scores):\n ''' get predictions from score maps in torch Tensor\n return type: torch.LongTensor\n '''\n assert scores.dim() == 4, 'Score maps should be 4-dim'\n # batch, chn, height, width ===> batch, chn, height*width\n # chn = 68\n # height*width = score_map\n maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)\n\n maxval = maxval.view(scores.size(0), scores.size(1), 1)\n idx = idx.view(scores.size(0), scores.size(1), 1) + 1\n\n preds = idx.repeat(1, 1, 2).float()\n\n # batchsize * numPoints * 2\n # 0 is x coord\n # 1 is y coord\n # shape = batchsize, numPoints, 2\n preds[:, :, 0] = (preds[:, :, 0] - 1) % scores.size(3) + 1\n preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / scores.size(2)) + 1\n\n pred_mask = maxval.gt(0).repeat(1, 1, 2).float()\n preds *= pred_mask\n return preds\n\n\ndef calc_dists(preds, target, normalize):\n preds = preds.float()\n target = target.float()\n # dists = 68 x batch\n dists = torch.zeros(preds.size(1), preds.size(0))\n for n in range(preds.size(0)):\n for c in range(preds.size(1)):\n if target[n, c, 0] > 1 and target[n, c, 1] > 1:\n dists[c, n] = torch.dist(preds[n, c, :], target[n, c, :]) / normalize[n]\n else:\n dists[c, n] = -1\n return dists\n\n\ndef dist_acc(dists, thr=0.5):\n ''' Return percentage below threshold while ignoring values with a -1 '''\n if dists.ne(-1).sum() > 0:\n return dists.le(thr).eq(dists.ne(-1)).sum() * 1.0 / dists.ne(-1).sum()\n else:\n return -1\n\n\ndef calc_metrics(dists, path='', category=''):\n errors = torch.mean(dists, 0).view(dists.size(1))\n axes1 = np.linspace(0, 1, 1000)\n axes2 = np.zeros(1000)\n for i in range(1000):\n axes2[i] = float((errors < axes1[i]).sum()) / float(errors.size(0))\n\n auc = round(np.sum(axes2[:70]) / .7, 2)\n\n if path:\n label = '{}({}) : {}'.format(path.split('/')[2], category, str(auc))\n plt.xlim(0, 7)\n plt.ylim(0, 100)\n plt.yticks(np.arange(0, 110, 10))\n plt.xticks(np.arange(0, 8, 1))\n\n plt.grid()\n plt.title('NME (%)', fontsize=20)\n plt.xlabel('NME (%)', fontsize=16)\n plt.ylabel('Test images (%)', fontsize=16)\n if category:\n if category in ['Easy', 'Category A']:\n plt.plot(axes1 * 100, axes2 * 100, 'b-', label=label, lw=3)\n if category in ['Media', 'Category B']:\n plt.plot(axes1 * 100, axes2 * 100, 'r-', label=label, lw=3)\n if category in ['Hard', 'Category C']:\n plt.plot(axes1 * 100, axes2 * 100, 'g-', label=label, lw=3)\n else:\n plt.plot(axes1 * 100, axes2 * 100, 'b-', label=label, lw=3)\n plt.legend(loc=4, fontsize=12)\n\n plt.savefig(os.path.join(path + '/CED.eps'))\n return auc\n\n\ndef _get_bboxsize(iterable):\n # iterable = 68 x 2\n # torch.min return values, idxs\n mins = torch.min(iterable, 0)[0].view(2)\n maxs = torch.max(iterable, 0)[0].view(2)\n\n center = torch.FloatTensor((maxs[0] - (maxs[0] - mins[0]) / 2,\n maxs[1] - (maxs[1] - mins[1]) / 2))\n # center[1] = center[1] - ((maxs[1] - mins[1]) * 0.12)\n\n return np.sqrt(abs(maxs[0] - mins[0]) * abs(maxs[1] - mins[1]))\n\n\ndef accuracy(output, target, idxs, thr=0.08):\n ''' Calculate accuracy according to NME, but uses ground truth heatmap rather than x,y locations\n First value to be returned is accuracy calculated based on overall 'idxs'\n followed by individual accuracies\n '''\n # preds = batch, 68, 64, 64\n preds = get_preds(output)\n gts = get_preds(target)\n # B * 2\n norm = torch.ones(preds.size(0))\n # use face bbox to normalize\n for i, gt in enumerate(gts):\n norm[i] = _get_bboxsize(gt)\n\n dists = calc_dists(preds, gts, norm)\n\n acc = torch.zeros(len(idxs) + 1)\n avg_acc = 0\n cnt = 0\n\n mean_dists = torch.mean(dists, 0)\n acc[0] = mean_dists.le(thr).sum() * 1.0 / preds.size(0)\n # for i in range(len(idxs)):\n # acc[i+1] = dist_acc(dists[idxs[i]-1], thr=thr)\n # if acc[i+1] >= 0:\n # avg_acc = avg_acc + acc[i+1]\n # cnt += 1\n\n # if cnt != 0:\n # acc[0] = avg_acc / cnt\n return acc, dists\n\n\ndef final_preds(output, center, scale, res):\n if output.size(1) == 136:\n coords = output.view((output.szie(0), 68, 2))\n else:\n coords = get_preds(output) # float type\n\n # output shape is batch, 68, 64, 64\n # coords shape is batch, 68, 2\n # pose-processing\n for n in range(coords.size(0)):\n for p in range(coords.size(1)):\n hm = output[n][p]\n px = int(math.floor(coords[n][p][0]))\n py = int(math.floor(coords[n][p][1]))\n if px > 1 and px < res[0] and py > 1 and py < res[1]:\n diff = torch.Tensor(\n [hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1] - hm[py - 2][px - 1]])\n coords[n][p] += diff.sign() * .25\n coords += 0.5\n preds = coords.clone()\n\n # Transform back\n for i in range(coords.size(0)):\n preds[i] = transform_preds(coords[i], center[i], scale[i], res)\n\n if preds.dim() < 3:\n preds = preds.view(1, preds.size())\n return preds\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n"
] | [
[
"numpy.sum",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
StanfordASL/MATS | [
"b31a86eb56728fc6025c71c7202ab425b078e3e5"
] | [
"mats/model/components/gmm2d.py"
] | [
"import torch\nimport torch.distributions as td\n\n\nclass GMM2D(td.MixtureSameFamily):\n def __init__(self, mixture_distribution, component_distribution):\n super(GMM2D, self).__init__(mixture_distribution, component_distribution)\n\n def mode_mode(self):\n mode_k = torch.argmax(self.mixture_distribution.probs[0, 0]).item()\n mode_gaussian = self.component_distribution.mean[:, 0, mode_k, :2]\n return mode_gaussian\n\n def position_log_prob(self, x):\n # Computing the log probability over only the positions.\n component_dist = td.MultivariateNormal(loc=self.component_distribution.mean[..., :2],\n scale_tril=self.component_distribution.scale_tril[..., :2, :2])\n position_dist = td.MixtureSameFamily(self.mixture_distribution, component_dist)\n return position_dist.log_prob(x)\n\n @property\n def pis(self):\n return self.mixture_distribution.probs[0, 0]\n"
] | [
[
"torch.distributions.MixtureSameFamily",
"torch.distributions.MultivariateNormal",
"torch.argmax"
]
] |
emacip/incubator-superset | [
"83ee9178328c5193808fe356ceb3090a299477f6"
] | [
"superset/db_engine_specs.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=C,R,W\n\"\"\"Compatibility layer for different database engines\n\nThis modules stores logic specific to different database engines. Things\nlike time-related functions that are similar but not identical, or\ninformation as to expose certain features or not and how to expose them.\n\nFor instance, Hive/Presto supports partitions and have a specific API to\nlist partitions. Other databases like Vertica also support partitions but\nhave different API to get to them. Other databases don't support partitions\nat all. The classes here will use a common interface to specify all this.\n\nThe general idea is to use static classes and an inheritance scheme.\n\"\"\"\nfrom collections import namedtuple\nimport hashlib\nimport inspect\nimport logging\nimport os\nimport re\nimport textwrap\nimport time\n\nfrom flask import g\nfrom flask_babel import lazy_gettext as _\nimport pandas\nimport sqlalchemy as sqla\nfrom sqlalchemy import Column, select\nfrom sqlalchemy.engine import create_engine\nfrom sqlalchemy.engine.url import make_url\nfrom sqlalchemy.sql import quoted_name, text\nfrom sqlalchemy.sql.expression import TextAsFrom\nimport sqlparse\nfrom werkzeug.utils import secure_filename\n\nfrom superset import app, conf, db, sql_parse\nfrom superset.exceptions import SupersetTemplateException\nfrom superset.utils import core as utils\n\nQueryStatus = utils.QueryStatus\nconfig = app.config\n\ntracking_url_trans = conf.get('TRACKING_URL_TRANSFORMER')\nhive_poll_interval = conf.get('HIVE_POLL_INTERVAL')\n\nGrain = namedtuple('Grain', 'name label function duration')\n\nbuiltin_time_grains = {\n None: 'Time Column',\n 'PT1S': 'second',\n 'PT1M': 'minute',\n 'PT5M': '5 minute',\n 'PT10M': '10 minute',\n 'PT15M': '15 minute',\n 'PT0.5H': 'half hour',\n 'PT1H': 'hour',\n 'P1D': 'day',\n 'P1W': 'week',\n 'P1M': 'month',\n 'P0.25Y': 'quarter',\n 'P1Y': 'year',\n '1969-12-28T00:00:00Z/P1W': 'week_start_sunday',\n '1969-12-29T00:00:00Z/P1W': 'week_start_monday',\n 'P1W/1970-01-03T00:00:00Z': 'week_ending_saturday',\n 'P1W/1970-01-04T00:00:00Z': 'week_ending_sunday',\n}\n\n\ndef _create_time_grains_tuple(time_grains, time_grain_functions, blacklist):\n ret_list = []\n blacklist = blacklist if blacklist else []\n for duration, func in time_grain_functions.items():\n if duration not in blacklist:\n name = time_grains.get(duration)\n ret_list.append(Grain(name, _(name), func, duration))\n return tuple(ret_list)\n\n\nclass LimitMethod(object):\n \"\"\"Enum the ways that limits can be applied\"\"\"\n FETCH_MANY = 'fetch_many'\n WRAP_SQL = 'wrap_sql'\n FORCE_LIMIT = 'force_limit'\n\n\nclass BaseEngineSpec(object):\n\n \"\"\"Abstract class for database engine specific configurations\"\"\"\n\n engine = 'base' # str as defined in sqlalchemy.engine.engine\n time_grain_functions = {}\n time_groupby_inline = False\n limit_method = LimitMethod.FORCE_LIMIT\n time_secondary_columns = False\n inner_joins = True\n allows_subquery = True\n force_column_alias_quotes = False\n arraysize = None\n\n @classmethod\n def get_time_grains(cls):\n blacklist = config.get('TIME_GRAIN_BLACKLIST', [])\n grains = builtin_time_grains.copy()\n grains.update(config.get('TIME_GRAIN_ADDONS', {}))\n grain_functions = cls.time_grain_functions.copy()\n grain_addon_functions = config.get('TIME_GRAIN_ADDON_FUNCTIONS', {})\n grain_functions.update(grain_addon_functions.get(cls.engine, {}))\n return _create_time_grains_tuple(grains, grain_functions, blacklist)\n\n @classmethod\n def fetch_data(cls, cursor, limit):\n if cls.arraysize:\n cursor.arraysize = cls.arraysize\n if cls.limit_method == LimitMethod.FETCH_MANY:\n return cursor.fetchmany(limit)\n return cursor.fetchall()\n\n @classmethod\n def epoch_to_dttm(cls):\n raise NotImplementedError()\n\n @classmethod\n def epoch_ms_to_dttm(cls):\n return cls.epoch_to_dttm().replace('{col}', '({col}/1000.000)')\n\n @classmethod\n def get_datatype(cls, type_code):\n if isinstance(type_code, str) and len(type_code):\n return type_code.upper()\n\n @classmethod\n def extra_table_metadata(cls, database, table_name, schema_name):\n \"\"\"Returns engine-specific table metadata\"\"\"\n return {}\n\n @classmethod\n def apply_limit_to_sql(cls, sql, limit, database):\n \"\"\"Alters the SQL statement to apply a LIMIT clause\"\"\"\n if cls.limit_method == LimitMethod.WRAP_SQL:\n sql = sql.strip('\\t\\n ;')\n qry = (\n select('*')\n .select_from(\n TextAsFrom(text(sql), ['*']).alias('inner_qry'),\n )\n .limit(limit)\n )\n return database.compile_sqla_query(qry)\n elif LimitMethod.FORCE_LIMIT:\n parsed_query = sql_parse.ParsedQuery(sql)\n sql = parsed_query.get_query_with_new_limit(limit)\n return sql\n\n @classmethod\n def get_limit_from_sql(cls, sql):\n parsed_query = sql_parse.ParsedQuery(sql)\n return parsed_query.limit\n\n @classmethod\n def get_query_with_new_limit(cls, sql, limit):\n parsed_query = sql_parse.ParsedQuery(sql)\n return parsed_query.get_query_with_new_limit(limit)\n\n @staticmethod\n def csv_to_df(**kwargs):\n kwargs['filepath_or_buffer'] = \\\n config['UPLOAD_FOLDER'] + kwargs['filepath_or_buffer']\n kwargs['encoding'] = 'utf-8'\n kwargs['iterator'] = True\n chunks = pandas.read_csv(**kwargs)\n df = pandas.DataFrame()\n df = pandas.concat(chunk for chunk in chunks)\n return df\n\n @staticmethod\n def df_to_db(df, table, **kwargs):\n df.to_sql(**kwargs)\n table.user_id = g.user.id\n table.schema = kwargs['schema']\n table.fetch_metadata()\n db.session.add(table)\n db.session.commit()\n\n @staticmethod\n def create_table_from_csv(form, table):\n def _allowed_file(filename):\n # Only allow specific file extensions as specified in the config\n extension = os.path.splitext(filename)[1]\n return extension and extension[1:] in config['ALLOWED_EXTENSIONS']\n\n filename = secure_filename(form.csv_file.data.filename)\n if not _allowed_file(filename):\n raise Exception('Invalid file type selected')\n kwargs = {\n 'filepath_or_buffer': filename,\n 'sep': form.sep.data,\n 'header': form.header.data if form.header.data else 0,\n 'index_col': form.index_col.data,\n 'mangle_dupe_cols': form.mangle_dupe_cols.data,\n 'skipinitialspace': form.skipinitialspace.data,\n 'skiprows': form.skiprows.data,\n 'nrows': form.nrows.data,\n 'skip_blank_lines': form.skip_blank_lines.data,\n 'parse_dates': form.parse_dates.data,\n 'infer_datetime_format': form.infer_datetime_format.data,\n 'chunksize': 10000,\n }\n df = BaseEngineSpec.csv_to_df(**kwargs)\n\n df_to_db_kwargs = {\n 'table': table,\n 'df': df,\n 'name': form.name.data,\n 'con': create_engine(form.con.data.sqlalchemy_uri_decrypted, echo=False),\n 'schema': form.schema.data,\n 'if_exists': form.if_exists.data,\n 'index': form.index.data,\n 'index_label': form.index_label.data,\n 'chunksize': 10000,\n }\n\n BaseEngineSpec.df_to_db(**df_to_db_kwargs)\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n @classmethod\n def fetch_result_sets(cls, db, datasource_type):\n \"\"\"Returns a list of tables [schema1.table1, schema2.table2, ...]\n\n Datasource_type can be 'table' or 'view'.\n Empty schema corresponds to the list of full names of the all\n tables or views: <schema>.<result_set_name>.\n \"\"\"\n schemas = db.all_schema_names(cache=db.schema_cache_enabled,\n cache_timeout=db.schema_cache_timeout,\n force=True)\n all_result_sets = []\n for schema in schemas:\n if datasource_type == 'table':\n all_datasource_names = db.all_table_names_in_schema(\n schema=schema, force=True,\n cache=db.table_cache_enabled,\n cache_timeout=db.table_cache_timeout)\n elif datasource_type == 'view':\n all_datasource_names = db.all_view_names_in_schema(\n schema=schema, force=True,\n cache=db.table_cache_enabled,\n cache_timeout=db.table_cache_timeout)\n all_result_sets += [\n '{}.{}'.format(schema, t) for t in all_datasource_names]\n return all_result_sets\n\n @classmethod\n def handle_cursor(cls, cursor, query, session):\n \"\"\"Handle a live cursor between the execute and fetchall calls\n\n The flow works without this method doing anything, but it allows\n for handling the cursor and updating progress information in the\n query object\"\"\"\n pass\n\n @classmethod\n def extract_error_message(cls, e):\n \"\"\"Extract error message for queries\"\"\"\n return utils.error_msg_from_exception(e)\n\n @classmethod\n def adjust_database_uri(cls, uri, selected_schema):\n \"\"\"Based on a URI and selected schema, return a new URI\n\n The URI here represents the URI as entered when saving the database,\n ``selected_schema`` is the schema currently active presumably in\n the SQL Lab dropdown. Based on that, for some database engine,\n we can return a new altered URI that connects straight to the\n active schema, meaning the users won't have to prefix the object\n names by the schema name.\n\n Some databases engines have 2 level of namespacing: database and\n schema (postgres, oracle, mssql, ...)\n For those it's probably better to not alter the database\n component of the URI with the schema name, it won't work.\n\n Some database drivers like presto accept '{catalog}/{schema}' in\n the database component of the URL, that can be handled here.\n \"\"\"\n return uri\n\n @classmethod\n def patch(cls):\n pass\n\n @classmethod\n def get_schema_names(cls, inspector):\n return sorted(inspector.get_schema_names())\n\n @classmethod\n def get_table_names(cls, inspector, schema):\n return sorted(inspector.get_table_names(schema))\n\n @classmethod\n def get_view_names(cls, inspector, schema):\n return sorted(inspector.get_view_names(schema))\n\n @classmethod\n def where_latest_partition(\n cls, table_name, schema, database, qry, columns=None):\n return False\n\n @classmethod\n def _get_fields(cls, cols):\n return [sqla.column(c.get('name')) for c in cols]\n\n @classmethod\n def select_star(cls, my_db, table_name, engine, schema=None, limit=100,\n show_cols=False, indent=True, latest_partition=True,\n cols=None):\n fields = '*'\n cols = cols or []\n if (show_cols or latest_partition) and not cols:\n cols = my_db.get_columns(table_name, schema)\n\n if show_cols:\n fields = cls._get_fields(cols)\n quote = engine.dialect.identifier_preparer.quote\n if schema:\n full_table_name = quote(schema) + '.' + quote(table_name)\n else:\n full_table_name = quote(table_name)\n\n qry = select(fields).select_from(text(full_table_name))\n\n if limit:\n qry = qry.limit(limit)\n if latest_partition:\n partition_query = cls.where_latest_partition(\n table_name, schema, my_db, qry, columns=cols)\n if partition_query != False: # noqa\n qry = partition_query\n sql = my_db.compile_sqla_query(qry)\n if indent:\n sql = sqlparse.format(sql, reindent=True)\n return sql\n\n @classmethod\n def modify_url_for_impersonation(cls, url, impersonate_user, username):\n \"\"\"\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n \"\"\"\n if impersonate_user is not None and username is not None:\n url.username = username\n\n @classmethod\n def get_configuration_for_impersonation(cls, uri, impersonate_user, username):\n \"\"\"\n Return a configuration dictionary that can be merged with other configs\n that can set the correct properties for impersonating users\n :param uri: URI string\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n :return: Dictionary with configs required for impersonation\n \"\"\"\n return {}\n\n @classmethod\n def execute(cls, cursor, query, **kwargs):\n if cls.arraysize:\n cursor.arraysize = cls.arraysize\n cursor.execute(query)\n\n @classmethod\n def make_label_compatible(cls, label):\n \"\"\"\n Conditionally mutate and/or quote a sql column/expression label. If\n force_column_alias_quotes is set to True, return the label as a\n sqlalchemy.sql.elements.quoted_name object to ensure that the select query\n and query results have same case. Otherwise return the mutated label as a\n regular string.\n \"\"\"\n label = cls.mutate_label(label)\n return quoted_name(label, True) if cls.force_column_alias_quotes else label\n\n @staticmethod\n def mutate_label(label):\n \"\"\"\n Most engines support mixed case aliases that can include numbers\n and special characters, like commas, parentheses etc. For engines that\n have restrictions on what types of aliases are supported, this method\n can be overridden to ensure that labels conform to the engine's\n limitations. Mutated labels should be deterministic (input label A always\n yields output label X) and unique (input labels A and B don't yield the same\n output label X).\n \"\"\"\n return label\n\n\nclass PostgresBaseEngineSpec(BaseEngineSpec):\n \"\"\" Abstract class for Postgres 'like' databases \"\"\"\n\n engine = ''\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': \"DATE_TRUNC('second', {col}) AT TIME ZONE 'UTC'\",\n 'PT1M': \"DATE_TRUNC('minute', {col}) AT TIME ZONE 'UTC'\",\n 'PT1H': \"DATE_TRUNC('hour', {col}) AT TIME ZONE 'UTC'\",\n 'P1D': \"DATE_TRUNC('day', {col}) AT TIME ZONE 'UTC'\",\n 'P1W': \"DATE_TRUNC('week', {col}) AT TIME ZONE 'UTC'\",\n 'P1M': \"DATE_TRUNC('month', {col}) AT TIME ZONE 'UTC'\",\n 'P0.25Y': \"DATE_TRUNC('quarter', {col}) AT TIME ZONE 'UTC'\",\n 'P1Y': \"DATE_TRUNC('year', {col}) AT TIME ZONE 'UTC'\",\n }\n\n @classmethod\n def fetch_data(cls, cursor, limit):\n if not cursor.description:\n return []\n if cls.limit_method == LimitMethod.FETCH_MANY:\n return cursor.fetchmany(limit)\n return cursor.fetchall()\n\n @classmethod\n def epoch_to_dttm(cls):\n return \"(timestamp 'epoch' + {col} * interval '1 second')\"\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n\nclass PostgresEngineSpec(PostgresBaseEngineSpec):\n engine = 'postgresql'\n\n @classmethod\n def get_table_names(cls, inspector, schema):\n \"\"\"Need to consider foreign tables for PostgreSQL\"\"\"\n tables = inspector.get_table_names(schema)\n tables.extend(inspector.get_foreign_table_names(schema))\n return sorted(tables)\n\n\nclass SnowflakeEngineSpec(PostgresBaseEngineSpec):\n engine = 'snowflake'\n force_column_alias_quotes = True\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': \"DATE_TRUNC('SECOND', {col})\",\n 'PT1M': \"DATE_TRUNC('MINUTE', {col})\",\n 'PT5M': \"DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 5) * 5, \\\n DATE_TRUNC('HOUR', {col}))\",\n 'PT10M': \"DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 10) * 10, \\\n DATE_TRUNC('HOUR', {col}))\",\n 'PT15M': \"DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 15) * 15, \\\n DATE_TRUNC('HOUR', {col}))\",\n 'PT0.5H': \"DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 30) * 30, \\\n DATE_TRUNC('HOUR', {col}))\",\n 'PT1H': \"DATE_TRUNC('HOUR', {col})\",\n 'P1D': \"DATE_TRUNC('DAY', {col})\",\n 'P1W': \"DATE_TRUNC('WEEK', {col})\",\n 'P1M': \"DATE_TRUNC('MONTH', {col})\",\n 'P0.25Y': \"DATE_TRUNC('QUARTER', {col})\",\n 'P1Y': \"DATE_TRUNC('YEAR', {col})\",\n }\n\n @classmethod\n def adjust_database_uri(cls, uri, selected_schema=None):\n database = uri.database\n if '/' in uri.database:\n database = uri.database.split('/')[0]\n if selected_schema:\n uri.database = database + '/' + selected_schema\n return uri\n\n\nclass VerticaEngineSpec(PostgresBaseEngineSpec):\n engine = 'vertica'\n\n\nclass RedshiftEngineSpec(PostgresBaseEngineSpec):\n engine = 'redshift'\n\n @staticmethod\n def mutate_label(label):\n \"\"\"\n Redshift only supports lowercase column names and aliases.\n :param str label: Original label which might include uppercase letters\n :return: String that is supported by the database\n \"\"\"\n return label.lower()\n\n\nclass OracleEngineSpec(PostgresBaseEngineSpec):\n engine = 'oracle'\n limit_method = LimitMethod.WRAP_SQL\n force_column_alias_quotes = True\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': 'CAST({col} as DATE)',\n 'PT1M': \"TRUNC(CAST({col} as DATE), 'MI')\",\n 'PT1H': \"TRUNC(CAST({col} as DATE), 'HH')\",\n 'P1D': \"TRUNC(CAST({col} as DATE), 'DDD')\",\n 'P1W': \"TRUNC(CAST({col} as DATE), 'WW')\",\n 'P1M': \"TRUNC(CAST({col} as DATE), 'MONTH')\",\n 'P0.25Y': \"TRUNC(CAST({col} as DATE), 'Q')\",\n 'P1Y': \"TRUNC(CAST({col} as DATE), 'YEAR')\",\n }\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n return (\n \"\"\"TO_TIMESTAMP('{}', 'YYYY-MM-DD\"T\"HH24:MI:SS.ff6')\"\"\"\n ).format(dttm.isoformat())\n\n @staticmethod\n def mutate_label(label):\n \"\"\"\n Oracle 12.1 and earlier support a maximum of 30 byte length object names, which\n usually means 30 characters.\n :param str label: Original label which might include unsupported characters\n :return: String that is supported by the database\n \"\"\"\n if len(label) > 30:\n hashed_label = hashlib.md5(label.encode('utf-8')).hexdigest()\n # truncate the hash to first 30 characters\n return hashed_label[:30]\n return label\n\n\nclass Db2EngineSpec(BaseEngineSpec):\n engine = 'ibm_db_sa'\n limit_method = LimitMethod.WRAP_SQL\n force_column_alias_quotes = True\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': 'CAST({col} as TIMESTAMP)'\n ' - MICROSECOND({col}) MICROSECONDS',\n 'PT1M': 'CAST({col} as TIMESTAMP)'\n ' - SECOND({col}) SECONDS'\n ' - MICROSECOND({col}) MICROSECONDS',\n 'PT1H': 'CAST({col} as TIMESTAMP)'\n ' - MINUTE({col}) MINUTES'\n ' - SECOND({col}) SECONDS'\n ' - MICROSECOND({col}) MICROSECONDS ',\n 'P1D': 'CAST({col} as TIMESTAMP)'\n ' - HOUR({col}) HOURS'\n ' - MINUTE({col}) MINUTES'\n ' - SECOND({col}) SECONDS'\n ' - MICROSECOND({col}) MICROSECONDS',\n 'P1W': '{col} - (DAYOFWEEK({col})) DAYS',\n 'P1M': '{col} - (DAY({col})-1) DAYS',\n 'P0.25Y': '{col} - (DAY({col})-1) DAYS'\n ' - (MONTH({col})-1) MONTHS'\n ' + ((QUARTER({col})-1) * 3) MONTHS',\n 'P1Y': '{col} - (DAY({col})-1) DAYS'\n ' - (MONTH({col})-1) MONTHS',\n }\n\n @classmethod\n def epoch_to_dttm(cls):\n return \"(TIMESTAMP('1970-01-01', '00:00:00') + {col} SECONDS)\"\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d-%H.%M.%S'))\n\n @staticmethod\n def mutate_label(label):\n \"\"\"\n Db2 for z/OS supports a maximum of 30 byte length object names, which usually\n means 30 characters.\n :param str label: Original label which might include unsupported characters\n :return: String that is supported by the database\n \"\"\"\n if len(label) > 30:\n hashed_label = hashlib.md5(label.encode('utf-8')).hexdigest()\n # truncate the hash to first 30 characters\n return hashed_label[:30]\n return label\n\n\nclass SqliteEngineSpec(BaseEngineSpec):\n engine = 'sqlite'\n\n time_grain_functions = {\n None: '{col}',\n 'PT1H': \"DATETIME(STRFTIME('%Y-%m-%dT%H:00:00', {col}))\",\n 'P1D': 'DATE({col})',\n 'P1W': \"DATE({col}, -strftime('%W', {col}) || ' days')\",\n 'P1M': \"DATE({col}, -strftime('%d', {col}) || ' days', '+1 day')\",\n 'P1Y': \"DATETIME(STRFTIME('%Y-01-01T00:00:00', {col}))\",\n 'P1W/1970-01-03T00:00:00Z': \"DATE({col}, 'weekday 6')\",\n '1969-12-28T00:00:00Z/P1W': \"DATE({col}, 'weekday 0', '-7 days')\",\n }\n\n @classmethod\n def epoch_to_dttm(cls):\n return \"datetime({col}, 'unixepoch')\"\n\n @classmethod\n def fetch_result_sets(cls, db, datasource_type):\n schemas = db.all_schema_names(cache=db.schema_cache_enabled,\n cache_timeout=db.schema_cache_timeout,\n force=True)\n all_result_sets = []\n schema = schemas[0]\n if datasource_type == 'table':\n all_datasource_names = db.all_table_names_in_schema(\n schema=schema, force=True,\n cache=db.table_cache_enabled,\n cache_timeout=db.table_cache_timeout)\n elif datasource_type == 'view':\n all_datasource_names = db.all_view_names_in_schema(\n schema=schema, force=True,\n cache=db.table_cache_enabled,\n cache_timeout=db.table_cache_timeout)\n all_result_sets += [\n '{}.{}'.format(schema, t) for t in all_datasource_names]\n return all_result_sets\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n iso = dttm.isoformat().replace('T', ' ')\n if '.' not in iso:\n iso += '.000000'\n return \"'{}'\".format(iso)\n\n @classmethod\n def get_table_names(cls, inspector, schema):\n \"\"\"Need to disregard the schema for Sqlite\"\"\"\n return sorted(inspector.get_table_names())\n\n\nclass MySQLEngineSpec(BaseEngineSpec):\n engine = 'mysql'\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': 'DATE_ADD(DATE({col}), '\n 'INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60'\n ' + SECOND({col})) SECOND)',\n 'PT1M': 'DATE_ADD(DATE({col}), '\n 'INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)',\n 'PT1H': 'DATE_ADD(DATE({col}), '\n 'INTERVAL HOUR({col}) HOUR)',\n 'P1D': 'DATE({col})',\n 'P1W': 'DATE(DATE_SUB({col}, '\n 'INTERVAL DAYOFWEEK({col}) - 1 DAY))',\n 'P1M': 'DATE(DATE_SUB({col}, '\n 'INTERVAL DAYOFMONTH({col}) - 1 DAY))',\n 'P0.25Y': 'MAKEDATE(YEAR({col}), 1) '\n '+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER',\n 'P1Y': 'DATE(DATE_SUB({col}, '\n 'INTERVAL DAYOFYEAR({col}) - 1 DAY))',\n '1969-12-29T00:00:00Z/P1W': 'DATE(DATE_SUB({col}, '\n 'INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))',\n }\n\n type_code_map = {} # loaded from get_datatype only if needed\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n if target_type.upper() in ('DATETIME', 'DATE'):\n return \"STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')\".format(\n dttm.strftime('%Y-%m-%d %H:%M:%S'))\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n @classmethod\n def adjust_database_uri(cls, uri, selected_schema=None):\n if selected_schema:\n uri.database = selected_schema\n return uri\n\n @classmethod\n def get_datatype(cls, type_code):\n if not cls.type_code_map:\n # only import and store if needed at least once\n import MySQLdb\n ft = MySQLdb.constants.FIELD_TYPE\n cls.type_code_map = {\n getattr(ft, k): k\n for k in dir(ft)\n if not k.startswith('_')\n }\n datatype = type_code\n if isinstance(type_code, int):\n datatype = cls.type_code_map.get(type_code)\n if datatype and isinstance(datatype, str) and len(datatype):\n return datatype\n\n @classmethod\n def epoch_to_dttm(cls):\n return 'from_unixtime({col})'\n\n @classmethod\n def extract_error_message(cls, e):\n \"\"\"Extract error message for queries\"\"\"\n message = str(e)\n try:\n if isinstance(e.args, tuple) and len(e.args) > 1:\n message = e.args[1]\n except Exception:\n pass\n return message\n\n\nclass PrestoEngineSpec(BaseEngineSpec):\n engine = 'presto'\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': \"date_trunc('second', CAST({col} AS TIMESTAMP))\",\n 'PT1M': \"date_trunc('minute', CAST({col} AS TIMESTAMP))\",\n 'PT1H': \"date_trunc('hour', CAST({col} AS TIMESTAMP))\",\n 'P1D': \"date_trunc('day', CAST({col} AS TIMESTAMP))\",\n 'P1W': \"date_trunc('week', CAST({col} AS TIMESTAMP))\",\n 'P1M': \"date_trunc('month', CAST({col} AS TIMESTAMP))\",\n 'P0.25Y': \"date_trunc('quarter', CAST({col} AS TIMESTAMP))\",\n 'P1Y': \"date_trunc('year', CAST({col} AS TIMESTAMP))\",\n 'P1W/1970-01-03T00:00:00Z':\n \"date_add('day', 5, date_trunc('week', date_add('day', 1, \\\n CAST({col} AS TIMESTAMP))))\",\n '1969-12-28T00:00:00Z/P1W':\n \"date_add('day', -1, date_trunc('week', \\\n date_add('day', 1, CAST({col} AS TIMESTAMP))))\",\n }\n\n @classmethod\n def get_view_names(cls, inspector, schema):\n \"\"\"Returns an empty list\n\n get_table_names() function returns all table names and view names,\n and get_view_names() is not implemented in sqlalchemy_presto.py\n https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py\n \"\"\"\n return []\n\n @classmethod\n def adjust_database_uri(cls, uri, selected_schema=None):\n database = uri.database\n if selected_schema and database:\n if '/' in database:\n database = database.split('/')[0] + '/' + selected_schema\n else:\n database += '/' + selected_schema\n uri.database = database\n return uri\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n tt = target_type.upper()\n if tt == 'DATE':\n return \"from_iso8601_date('{}')\".format(dttm.isoformat()[:10])\n if tt == 'TIMESTAMP':\n return \"from_iso8601_timestamp('{}')\".format(dttm.isoformat())\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n @classmethod\n def epoch_to_dttm(cls):\n return 'from_unixtime({col})'\n\n @classmethod\n def fetch_result_sets(cls, db, datasource_type):\n \"\"\"Returns a list of tables [schema1.table1, schema2.table2, ...]\n\n Datasource_type can be 'table' or 'view'.\n Empty schema corresponds to the list of full names of the all\n tables or views: <schema>.<result_set_name>.\n \"\"\"\n result_set_df = db.get_df(\n \"\"\"SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S\n ORDER BY concat(table_schema, '.', table_name)\"\"\".format(\n datasource_type.upper(),\n ),\n None)\n result_sets = []\n for unused, row in result_set_df.iterrows():\n result_sets.append('{}.{}'.format(\n row['table_schema'], row['table_name']))\n return result_sets\n\n @classmethod\n def extra_table_metadata(cls, database, table_name, schema_name):\n indexes = database.get_indexes(table_name, schema_name)\n if not indexes:\n return {}\n cols = indexes[0].get('column_names', [])\n full_table_name = table_name\n if schema_name and '.' not in table_name:\n full_table_name = '{}.{}'.format(schema_name, table_name)\n pql = cls._partition_query(full_table_name)\n col_name, latest_part = cls.latest_partition(\n table_name, schema_name, database, show_first=True)\n return {\n 'partitions': {\n 'cols': cols,\n 'latest': {col_name: latest_part},\n 'partitionQuery': pql,\n },\n }\n\n @classmethod\n def handle_cursor(cls, cursor, query, session):\n \"\"\"Updates progress information\"\"\"\n logging.info('Polling the cursor for progress')\n polled = cursor.poll()\n # poll returns dict -- JSON status information or ``None``\n # if the query is done\n # https://github.com/dropbox/PyHive/blob/\n # b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178\n while polled:\n # Update the object and wait for the kill signal.\n stats = polled.get('stats', {})\n\n query = session.query(type(query)).filter_by(id=query.id).one()\n if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]:\n cursor.cancel()\n break\n\n if stats:\n state = stats.get('state')\n\n # if already finished, then stop polling\n if state == 'FINISHED':\n break\n\n completed_splits = float(stats.get('completedSplits'))\n total_splits = float(stats.get('totalSplits'))\n if total_splits and completed_splits:\n progress = 100 * (completed_splits / total_splits)\n logging.info(\n 'Query progress: {} / {} '\n 'splits'.format(completed_splits, total_splits))\n if progress > query.progress:\n query.progress = progress\n session.commit()\n time.sleep(1)\n logging.info('Polling the cursor for progress')\n polled = cursor.poll()\n\n @classmethod\n def extract_error_message(cls, e):\n if (\n hasattr(e, 'orig') and\n type(e.orig).__name__ == 'DatabaseError' and\n isinstance(e.orig[0], dict)):\n error_dict = e.orig[0]\n return '{} at {}: {}'.format(\n error_dict.get('errorName'),\n error_dict.get('errorLocation'),\n error_dict.get('message'),\n )\n if (\n type(e).__name__ == 'DatabaseError' and\n hasattr(e, 'args') and\n len(e.args) > 0\n ):\n error_dict = e.args[0]\n return error_dict.get('message')\n return utils.error_msg_from_exception(e)\n\n @classmethod\n def _partition_query(\n cls, table_name, limit=0, order_by=None, filters=None):\n \"\"\"Returns a partition query\n\n :param table_name: the name of the table to get partitions from\n :type table_name: str\n :param limit: the number of partitions to be returned\n :type limit: int\n :param order_by: a list of tuples of field name and a boolean\n that determines if that field should be sorted in descending\n order\n :type order_by: list of (str, bool) tuples\n :param filters: a list of filters to apply\n :param filters: dict of field name and filter value combinations\n \"\"\"\n limit_clause = 'LIMIT {}'.format(limit) if limit else ''\n order_by_clause = ''\n if order_by:\n l = [] # noqa: E741\n for field, desc in order_by:\n l.append(field + ' DESC' if desc else '')\n order_by_clause = 'ORDER BY ' + ', '.join(l)\n\n where_clause = ''\n if filters:\n l = [] # noqa: E741\n for field, value in filters.items():\n l.append(f\"{field} = '{value}'\")\n where_clause = 'WHERE ' + ' AND '.join(l)\n\n sql = textwrap.dedent(f\"\"\"\\\n SHOW PARTITIONS FROM {table_name}\n {where_clause}\n {order_by_clause}\n {limit_clause}\n \"\"\")\n return sql\n\n @classmethod\n def where_latest_partition(\n cls, table_name, schema, database, qry, columns=None):\n try:\n col_name, value = cls.latest_partition(\n table_name, schema, database, show_first=True)\n except Exception:\n # table is not partitioned\n return False\n for c in columns:\n if c.get('name') == col_name:\n return qry.where(Column(col_name) == value)\n return False\n\n @classmethod\n def _latest_partition_from_df(cls, df):\n recs = df.to_records(index=False)\n if recs:\n return recs[0][0]\n\n @classmethod\n def latest_partition(cls, table_name, schema, database, show_first=False):\n \"\"\"Returns col name and the latest (max) partition value for a table\n\n :param table_name: the name of the table\n :type table_name: str\n :param schema: schema / database / namespace\n :type schema: str\n :param database: database query will be run against\n :type database: models.Database\n :param show_first: displays the value for the first partitioning key\n if there are many partitioning keys\n :type show_first: bool\n\n >>> latest_partition('foo_table')\n '2018-01-01'\n \"\"\"\n indexes = database.get_indexes(table_name, schema)\n if len(indexes[0]['column_names']) < 1:\n raise SupersetTemplateException(\n 'The table should have one partitioned field')\n elif not show_first and len(indexes[0]['column_names']) > 1:\n raise SupersetTemplateException(\n 'The table should have a single partitioned field '\n 'to use this function. You may want to use '\n '`presto.latest_sub_partition`')\n part_field = indexes[0]['column_names'][0]\n sql = cls._partition_query(table_name, 1, [(part_field, True)])\n df = database.get_df(sql, schema)\n return part_field, cls._latest_partition_from_df(df)\n\n @classmethod\n def latest_sub_partition(cls, table_name, schema, database, **kwargs):\n \"\"\"Returns the latest (max) partition value for a table\n\n A filtering criteria should be passed for all fields that are\n partitioned except for the field to be returned. For example,\n if a table is partitioned by (``ds``, ``event_type`` and\n ``event_category``) and you want the latest ``ds``, you'll want\n to provide a filter as keyword arguments for both\n ``event_type`` and ``event_category`` as in\n ``latest_sub_partition('my_table',\n event_category='page', event_type='click')``\n\n :param table_name: the name of the table, can be just the table\n name or a fully qualified table name as ``schema_name.table_name``\n :type table_name: str\n :param schema: schema / database / namespace\n :type schema: str\n :param database: database query will be run against\n :type database: models.Database\n\n :param kwargs: keyword arguments define the filtering criteria\n on the partition list. There can be many of these.\n :type kwargs: str\n >>> latest_sub_partition('sub_partition_table', event_type='click')\n '2018-01-01'\n \"\"\"\n indexes = database.get_indexes(table_name, schema)\n part_fields = indexes[0]['column_names']\n for k in kwargs.keys():\n if k not in k in part_fields:\n msg = 'Field [{k}] is not part of the portioning key'\n raise SupersetTemplateException(msg)\n if len(kwargs.keys()) != len(part_fields) - 1:\n msg = (\n 'A filter needs to be specified for {} out of the '\n '{} fields.'\n ).format(len(part_fields) - 1, len(part_fields))\n raise SupersetTemplateException(msg)\n\n for field in part_fields:\n if field not in kwargs.keys():\n field_to_return = field\n\n sql = cls._partition_query(\n table_name, 1, [(field_to_return, True)], kwargs)\n df = database.get_df(sql, schema)\n if df.empty:\n return ''\n return df.to_dict()[field_to_return][0]\n\n\nclass HiveEngineSpec(PrestoEngineSpec):\n\n \"\"\"Reuses PrestoEngineSpec functionality.\"\"\"\n\n engine = 'hive'\n\n # Scoping regex at class level to avoid recompiling\n # 17/02/07 19:36:38 INFO ql.Driver: Total jobs = 5\n jobs_stats_r = re.compile(\n r'.*INFO.*Total jobs = (?P<max_jobs>[0-9]+)')\n # 17/02/07 19:37:08 INFO ql.Driver: Launching Job 2 out of 5\n launching_job_r = re.compile(\n '.*INFO.*Launching Job (?P<job_number>[0-9]+) out of '\n '(?P<max_jobs>[0-9]+)')\n # 17/02/07 19:36:58 INFO exec.Task: 2017-02-07 19:36:58,152 Stage-18\n # map = 0%, reduce = 0%\n stage_progress_r = re.compile(\n r'.*INFO.*Stage-(?P<stage_number>[0-9]+).*'\n r'map = (?P<map_progress>[0-9]+)%.*'\n r'reduce = (?P<reduce_progress>[0-9]+)%.*')\n\n @classmethod\n def patch(cls):\n from pyhive import hive # pylint: disable=no-name-in-module\n from superset.db_engines import hive as patched_hive\n from TCLIService import (\n constants as patched_constants,\n ttypes as patched_ttypes,\n TCLIService as patched_TCLIService)\n\n hive.TCLIService = patched_TCLIService\n hive.constants = patched_constants\n hive.ttypes = patched_ttypes\n hive.Cursor.fetch_logs = patched_hive.fetch_logs\n\n @classmethod\n def fetch_result_sets(cls, db, datasource_type):\n return BaseEngineSpec.fetch_result_sets(\n db, datasource_type)\n\n @classmethod\n def fetch_data(cls, cursor, limit):\n import pyhive\n from TCLIService import ttypes\n state = cursor.poll()\n if state.operationState == ttypes.TOperationState.ERROR_STATE:\n raise Exception('Query error', state.errorMessage)\n try:\n return super(HiveEngineSpec, cls).fetch_data(cursor, limit)\n except pyhive.exc.ProgrammingError:\n return []\n\n @staticmethod\n def create_table_from_csv(form, table):\n \"\"\"Uploads a csv file and creates a superset datasource in Hive.\"\"\"\n def convert_to_hive_type(col_type):\n \"\"\"maps tableschema's types to hive types\"\"\"\n tableschema_to_hive_types = {\n 'boolean': 'BOOLEAN',\n 'integer': 'INT',\n 'number': 'DOUBLE',\n 'string': 'STRING',\n }\n return tableschema_to_hive_types.get(col_type, 'STRING')\n\n bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']\n\n if not bucket_path:\n logging.info('No upload bucket specified')\n raise Exception(\n 'No upload bucket specified. You can specify one in the config file.')\n\n table_name = form.name.data\n schema_name = form.schema.data\n\n if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):\n if '.' in table_name or schema_name:\n raise Exception(\n \"You can't specify a namespace. \"\n 'All tables will be uploaded to the `{}` namespace'.format(\n config.get('HIVE_NAMESPACE')))\n full_table_name = '{}.{}'.format(\n config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)\n else:\n if '.' in table_name and schema_name:\n raise Exception(\n \"You can't specify a namespace both in the name of the table \"\n 'and in the schema field. Please remove one')\n\n full_table_name = '{}.{}'.format(\n schema_name, table_name) if schema_name else table_name\n\n filename = form.csv_file.data.filename\n\n upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']\n upload_path = config['UPLOAD_FOLDER'] + \\\n secure_filename(filename)\n\n # Optional dependency\n from tableschema import Table # pylint: disable=import-error\n hive_table_schema = Table(upload_path).infer()\n column_name_and_type = []\n for column_info in hive_table_schema['fields']:\n column_name_and_type.append(\n '`{}` {}'.format(\n column_info['name'],\n convert_to_hive_type(column_info['type'])))\n schema_definition = ', '.join(column_name_and_type)\n\n # Optional dependency\n import boto3 # pylint: disable=import-error\n\n s3 = boto3.client('s3')\n location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)\n s3.upload_file(\n upload_path, bucket_path,\n os.path.join(upload_prefix, table_name, filename))\n sql = f\"\"\"CREATE TABLE {full_table_name} ( {schema_definition} )\n ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS\n TEXTFILE LOCATION '{location}'\n tblproperties ('skip.header.line.count'='1')\"\"\"\n logging.info(form.con.data)\n engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)\n engine.execute(sql)\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n tt = target_type.upper()\n if tt == 'DATE':\n return \"CAST('{}' AS DATE)\".format(dttm.isoformat()[:10])\n elif tt == 'TIMESTAMP':\n return \"CAST('{}' AS TIMESTAMP)\".format(\n dttm.strftime('%Y-%m-%d %H:%M:%S'))\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n @classmethod\n def adjust_database_uri(cls, uri, selected_schema=None):\n if selected_schema:\n uri.database = selected_schema\n return uri\n\n @classmethod\n def extract_error_message(cls, e):\n msg = str(e)\n match = re.search(r'errorMessage=\"(.*?)(?<!\\\\)\"', msg)\n if match:\n msg = match.group(1)\n return msg\n\n @classmethod\n def progress(cls, log_lines):\n total_jobs = 1 # assuming there's at least 1 job\n current_job = 1\n stages = {}\n for line in log_lines:\n match = cls.jobs_stats_r.match(line)\n if match:\n total_jobs = int(match.groupdict()['max_jobs']) or 1\n match = cls.launching_job_r.match(line)\n if match:\n current_job = int(match.groupdict()['job_number'])\n total_jobs = int(match.groupdict()['max_jobs']) or 1\n stages = {}\n match = cls.stage_progress_r.match(line)\n if match:\n stage_number = int(match.groupdict()['stage_number'])\n map_progress = int(match.groupdict()['map_progress'])\n reduce_progress = int(match.groupdict()['reduce_progress'])\n stages[stage_number] = (map_progress + reduce_progress) / 2\n logging.info(\n 'Progress detail: {}, '\n 'current job {}, '\n 'total jobs: {}'.format(stages, current_job, total_jobs))\n\n stage_progress = sum(\n stages.values()) / len(stages.values()) if stages else 0\n\n progress = (\n 100 * (current_job - 1) / total_jobs + stage_progress / total_jobs\n )\n return int(progress)\n\n @classmethod\n def get_tracking_url(cls, log_lines):\n lkp = 'Tracking URL = '\n for line in log_lines:\n if lkp in line:\n return line.split(lkp)[1]\n\n @classmethod\n def handle_cursor(cls, cursor, query, session):\n \"\"\"Updates progress information\"\"\"\n from pyhive import hive # pylint: disable=no-name-in-module\n unfinished_states = (\n hive.ttypes.TOperationState.INITIALIZED_STATE,\n hive.ttypes.TOperationState.RUNNING_STATE,\n )\n polled = cursor.poll()\n last_log_line = 0\n tracking_url = None\n job_id = None\n while polled.operationState in unfinished_states:\n query = session.query(type(query)).filter_by(id=query.id).one()\n if query.status == QueryStatus.STOPPED:\n cursor.cancel()\n break\n\n log = cursor.fetch_logs() or ''\n if log:\n log_lines = log.splitlines()\n progress = cls.progress(log_lines)\n logging.info('Progress total: {}'.format(progress))\n needs_commit = False\n if progress > query.progress:\n query.progress = progress\n needs_commit = True\n if not tracking_url:\n tracking_url = cls.get_tracking_url(log_lines)\n if tracking_url:\n job_id = tracking_url.split('/')[-2]\n logging.info(\n 'Found the tracking url: {}'.format(tracking_url))\n tracking_url = tracking_url_trans(tracking_url)\n logging.info(\n 'Transformation applied: {}'.format(tracking_url))\n query.tracking_url = tracking_url\n logging.info('Job id: {}'.format(job_id))\n needs_commit = True\n if job_id and len(log_lines) > last_log_line:\n # Wait for job id before logging things out\n # this allows for prefixing all log lines and becoming\n # searchable in something like Kibana\n for l in log_lines[last_log_line:]:\n logging.info('[{}] {}'.format(job_id, l))\n last_log_line = len(log_lines)\n if needs_commit:\n session.commit()\n time.sleep(hive_poll_interval)\n polled = cursor.poll()\n\n @classmethod\n def where_latest_partition(\n cls, table_name, schema, database, qry, columns=None):\n try:\n col_name, value = cls.latest_partition(\n table_name, schema, database, show_first=True)\n except Exception:\n # table is not partitioned\n return False\n for c in columns:\n if c.get('name') == col_name:\n return qry.where(Column(col_name) == value)\n return False\n\n @classmethod\n def latest_sub_partition(cls, table_name, schema, database, **kwargs):\n # TODO(bogdan): implement`\n pass\n\n @classmethod\n def _latest_partition_from_df(cls, df):\n \"\"\"Hive partitions look like ds={partition name}\"\"\"\n return df.ix[:, 0].max().split('=')[1]\n\n @classmethod\n def _partition_query(\n cls, table_name, limit=0, order_by=None, filters=None):\n return f'SHOW PARTITIONS {table_name}'\n\n @classmethod\n def modify_url_for_impersonation(cls, url, impersonate_user, username):\n \"\"\"\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n \"\"\"\n # Do nothing in the URL object since instead this should modify\n # the configuraiton dictionary. See get_configuration_for_impersonation\n pass\n\n @classmethod\n def get_configuration_for_impersonation(cls, uri, impersonate_user, username):\n \"\"\"\n Return a configuration dictionary that can be merged with other configs\n that can set the correct properties for impersonating users\n :param uri: URI string\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n :return: Dictionary with configs required for impersonation\n \"\"\"\n configuration = {}\n url = make_url(uri)\n backend_name = url.get_backend_name()\n\n # Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS\n if (backend_name == 'hive' and 'auth' in url.query.keys() and\n impersonate_user is True and username is not None):\n configuration['hive.server2.proxy.user'] = username\n return configuration\n\n @staticmethod\n def execute(cursor, query, async_=False):\n kwargs = {'async': async_}\n cursor.execute(query, **kwargs)\n\n\nclass MssqlEngineSpec(BaseEngineSpec):\n engine = 'mssql'\n epoch_to_dttm = \"dateadd(S, {col}, '1970-01-01')\"\n limit_method = LimitMethod.WRAP_SQL\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': \"DATEADD(second, DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')\",\n 'PT1M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}), 0)',\n 'PT5M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 5 * 5, 0)',\n 'PT10M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 10 * 10, 0)',\n 'PT15M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 15 * 15, 0)',\n 'PT0.5H': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 30 * 30, 0)',\n 'PT1H': 'DATEADD(hour, DATEDIFF(hour, 0, {col}), 0)',\n 'P1D': 'DATEADD(day, DATEDIFF(day, 0, {col}), 0)',\n 'P1W': 'DATEADD(week, DATEDIFF(week, 0, {col}), 0)',\n 'P1M': 'DATEADD(month, DATEDIFF(month, 0, {col}), 0)',\n 'P0.25Y': 'DATEADD(quarter, DATEDIFF(quarter, 0, {col}), 0)',\n 'P1Y': 'DATEADD(year, DATEDIFF(year, 0, {col}), 0)',\n }\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n return \"CONVERT(DATETIME, '{}', 126)\".format(dttm.isoformat())\n\n @classmethod\n def fetch_data(cls, cursor, limit):\n data = super(MssqlEngineSpec, cls).fetch_data(cursor, limit)\n if len(data) != 0 and type(data[0]).__name__ == 'Row':\n data = [[elem for elem in r] for r in data]\n return data\n\n\nclass AthenaEngineSpec(BaseEngineSpec):\n engine = 'awsathena'\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': \"date_trunc('second', CAST({col} AS TIMESTAMP))\",\n 'PT1M': \"date_trunc('minute', CAST({col} AS TIMESTAMP))\",\n 'PT1H': \"date_trunc('hour', CAST({col} AS TIMESTAMP))\",\n 'P1D': \"date_trunc('day', CAST({col} AS TIMESTAMP))\",\n 'P1W': \"date_trunc('week', CAST({col} AS TIMESTAMP))\",\n 'P1M': \"date_trunc('month', CAST({col} AS TIMESTAMP))\",\n 'P0.25Y': \"date_trunc('quarter', CAST({col} AS TIMESTAMP))\",\n 'P1Y': \"date_trunc('year', CAST({col} AS TIMESTAMP))\",\n 'P1W/1970-01-03T00:00:00Z': \"date_add('day', 5, date_trunc('week', \\\n date_add('day', 1, CAST({col} AS TIMESTAMP))))\",\n '1969-12-28T00:00:00Z/P1W': \"date_add('day', -1, date_trunc('week', \\\n date_add('day', 1, CAST({col} AS TIMESTAMP))))\",\n }\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n tt = target_type.upper()\n if tt == 'DATE':\n return \"from_iso8601_date('{}')\".format(dttm.isoformat()[:10])\n if tt == 'TIMESTAMP':\n return \"from_iso8601_timestamp('{}')\".format(dttm.isoformat())\n return (\"CAST ('{}' AS TIMESTAMP)\"\n .format(dttm.strftime('%Y-%m-%d %H:%M:%S')))\n\n @classmethod\n def epoch_to_dttm(cls):\n return 'from_unixtime({col})'\n\n\nclass ClickHouseEngineSpec(BaseEngineSpec):\n \"\"\"Dialect for ClickHouse analytical DB.\"\"\"\n\n engine = 'clickhouse'\n\n time_secondary_columns = True\n time_groupby_inline = True\n\n time_grain_functions = {\n None: '{col}',\n 'PT1M': 'toStartOfMinute(toDateTime({col}))',\n 'PT5M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)',\n 'PT10M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)',\n 'PT15M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 900)*900)',\n 'PT0.5H': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 1800)*1800)',\n 'PT1H': 'toStartOfHour(toDateTime({col}))',\n 'P1D': 'toStartOfDay(toDateTime({col}))',\n 'P1W': 'toMonday(toDateTime({col}))',\n 'P1M': 'toStartOfMonth(toDateTime({col}))',\n 'P0.25Y': 'toStartOfQuarter(toDateTime({col}))',\n 'P1Y': 'toStartOfYear(toDateTime({col}))',\n }\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n tt = target_type.upper()\n if tt == 'DATE':\n return \"toDate('{}')\".format(dttm.strftime('%Y-%m-%d'))\n if tt == 'DATETIME':\n return \"toDateTime('{}')\".format(\n dttm.strftime('%Y-%m-%d %H:%M:%S'))\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n\nclass BQEngineSpec(BaseEngineSpec):\n \"\"\"Engine spec for Google's BigQuery\n\n As contributed by @mxmzdlv on issue #945\"\"\"\n engine = 'bigquery'\n\n \"\"\"\n https://www.python.org/dev/peps/pep-0249/#arraysize\n raw_connections bypass the pybigquery query execution context and deal with\n raw dbapi connection directly.\n If this value is not set, the default value is set to 1, as described here,\n https://googlecloudplatform.github.io/google-cloud-python/latest/_modules/google/cloud/bigquery/dbapi/cursor.html#Cursor\n\n The default value of 5000 is derived from the pybigquery.\n https://github.com/mxmzdlv/pybigquery/blob/d214bb089ca0807ca9aaa6ce4d5a01172d40264e/pybigquery/sqlalchemy_bigquery.py#L102\n \"\"\"\n arraysize = 5000\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': 'TIMESTAMP_TRUNC({col}, SECOND)',\n 'PT1M': 'TIMESTAMP_TRUNC({col}, MINUTE)',\n 'PT1H': 'TIMESTAMP_TRUNC({col}, HOUR)',\n 'P1D': 'TIMESTAMP_TRUNC({col}, DAY)',\n 'P1W': 'TIMESTAMP_TRUNC({col}, WEEK)',\n 'P1M': 'TIMESTAMP_TRUNC({col}, MONTH)',\n 'P0.25Y': 'TIMESTAMP_TRUNC({col}, QUARTER)',\n 'P1Y': 'TIMESTAMP_TRUNC({col}, YEAR)',\n }\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n tt = target_type.upper()\n if tt == 'DATE':\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d'))\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n @classmethod\n def fetch_data(cls, cursor, limit):\n data = super(BQEngineSpec, cls).fetch_data(cursor, limit)\n if len(data) != 0 and type(data[0]).__name__ == 'Row':\n data = [r.values() for r in data]\n return data\n\n @staticmethod\n def mutate_label(label):\n \"\"\"\n BigQuery field_name should start with a letter or underscore, contain only\n alphanumeric characters and be at most 128 characters long. Labels that start\n with a number are prefixed with an underscore. Any unsupported characters are\n replaced with underscores and an md5 hash is added to the end of the label to\n avoid possible collisions. If the resulting label exceeds 128 characters, only\n the md5 sum is returned.\n :param str label: the original label which might include unsupported characters\n :return: String that is supported by the database\n \"\"\"\n hashed_label = '_' + hashlib.md5(label.encode('utf-8')).hexdigest()\n\n # if label starts with number, add underscore as first character\n mutated_label = '_' + label if re.match(r'^\\d', label) else label\n\n # replace non-alphanumeric characters with underscores\n mutated_label = re.sub(r'[^\\w]+', '_', mutated_label)\n if mutated_label != label:\n # add md5 hash to label to avoid possible collisions\n mutated_label += hashed_label\n\n # return only hash if length of final label exceeds 128 chars\n return mutated_label if len(mutated_label) <= 128 else hashed_label\n\n @classmethod\n def extra_table_metadata(cls, database, table_name, schema_name):\n indexes = database.get_indexes(table_name, schema_name)\n if not indexes:\n return {}\n partitions_columns = [\n index.get('column_names', []) for index in indexes\n if index.get('name') == 'partition'\n ]\n cluster_columns = [\n index.get('column_names', []) for index in indexes\n if index.get('name') == 'clustering'\n ]\n return {\n 'partitions': {\n 'cols': partitions_columns,\n },\n 'clustering': {\n 'cols': cluster_columns,\n },\n }\n\n @classmethod\n def _get_fields(cls, cols):\n \"\"\"\n BigQuery dialect requires us to not use backtick in the fieldname which are\n nested.\n Using literal_column handles that issue.\n http://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column\n Also explicility specifying column names so we don't encounter duplicate\n column names in the result.\n \"\"\"\n return [sqla.literal_column(c.get('name')).label(c.get('name').replace('.', '__'))\n for c in cols]\n\n\nclass ImpalaEngineSpec(BaseEngineSpec):\n \"\"\"Engine spec for Cloudera's Impala\"\"\"\n\n engine = 'impala'\n\n time_grain_functions = {\n None: '{col}',\n 'PT1M': \"TRUNC({col}, 'MI')\",\n 'PT1H': \"TRUNC({col}, 'HH')\",\n 'P1D': \"TRUNC({col}, 'DD')\",\n 'P1W': \"TRUNC({col}, 'WW')\",\n 'P1M': \"TRUNC({col}, 'MONTH')\",\n 'P0.25Y': \"TRUNC({col}, 'Q')\",\n 'P1Y': \"TRUNC({col}, 'YYYY')\",\n }\n\n @classmethod\n def epoch_to_dttm(cls):\n return 'from_unixtime({col})'\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n tt = target_type.upper()\n if tt == 'DATE':\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d'))\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n @classmethod\n def get_schema_names(cls, inspector):\n schemas = [row[0] for row in inspector.engine.execute('SHOW SCHEMAS')\n if not row[0].startswith('_')]\n return schemas\n\n\nclass DruidEngineSpec(BaseEngineSpec):\n \"\"\"Engine spec for Druid.io\"\"\"\n engine = 'druid'\n inner_joins = False\n allows_subquery = False\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': 'FLOOR({col} TO SECOND)',\n 'PT1M': 'FLOOR({col} TO MINUTE)',\n 'PT1H': 'FLOOR({col} TO HOUR)',\n 'P1D': 'FLOOR({col} TO DAY)',\n 'P1W': 'FLOOR({col} TO WEEK)',\n 'P1M': 'FLOOR({col} TO MONTH)',\n 'P0.25Y': 'FLOOR({col} TO QUARTER)',\n 'P1Y': 'FLOOR({col} TO YEAR)',\n }\n\n\nclass GSheetsEngineSpec(SqliteEngineSpec):\n \"\"\"Engine for Google spreadsheets\"\"\"\n engine = 'gsheets'\n inner_joins = False\n allows_subquery = False\n\n\nclass KylinEngineSpec(BaseEngineSpec):\n \"\"\"Dialect for Apache Kylin\"\"\"\n\n engine = 'kylin'\n\n time_grain_functions = {\n None: '{col}',\n 'PT1S': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO SECOND) AS TIMESTAMP)',\n 'PT1M': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MINUTE) AS TIMESTAMP)',\n 'PT1H': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO HOUR) AS TIMESTAMP)',\n 'P1D': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO DAY) AS DATE)',\n 'P1W': 'CAST(TIMESTAMPADD(WEEK, WEEK(CAST({col} AS DATE)) - 1, \\\n FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)',\n 'P1M': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MONTH) AS DATE)',\n 'P0.25Y': 'CAST(TIMESTAMPADD(QUARTER, QUARTER(CAST({col} AS DATE)) - 1, \\\n FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)',\n 'P1Y': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO YEAR) AS DATE)',\n }\n\n @classmethod\n def convert_dttm(cls, target_type, dttm):\n tt = target_type.upper()\n if tt == 'DATE':\n return \"CAST('{}' AS DATE)\".format(dttm.isoformat()[:10])\n if tt == 'TIMESTAMP':\n return \"CAST('{}' AS TIMESTAMP)\".format(\n dttm.strftime('%Y-%m-%d %H:%M:%S'))\n return \"'{}'\".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n\n\nclass TeradataEngineSpec(BaseEngineSpec):\n \"\"\"Dialect for Teradata DB.\"\"\"\n engine = 'teradata'\n limit_method = LimitMethod.WRAP_SQL\n\n time_grain_functions = {\n None: '{col}',\n 'PT1M': \"TRUNC(CAST({col} as DATE), 'MI')\",\n 'PT1H': \"TRUNC(CAST({col} as DATE), 'HH')\",\n 'P1D': \"TRUNC(CAST({col} as DATE), 'DDD')\",\n 'P1W': \"TRUNC(CAST({col} as DATE), 'WW')\",\n 'P1M': \"TRUNC(CAST({col} as DATE), 'MONTH')\",\n 'P0.25Y': \"TRUNC(CAST({col} as DATE), 'Q')\",\n 'P1Y': \"TRUNC(CAST({col} as DATE), 'YEAR')\",\n }\n\n\nengines = {\n o.engine: o for o in globals().values()\n if inspect.isclass(o) and issubclass(o, BaseEngineSpec)}\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.concat"
]
] |
ppmdatix/rtdl | [
"a01ecd9ae6b673f4e82e51f804ffd7031c7350a0"
] | [
"lib/metrics.py"
] | [
"import typing as ty\n\nimport numpy as np\nimport scipy.special\nimport sklearn.metrics as skm\n\nfrom . import util\n\n\ndef calculate_metrics(\n task_type: str,\n y: np.ndarray,\n prediction: np.ndarray,\n classification_mode: str,\n y_info: ty.Optional[ty.Dict[str, ty.Any]],\n) -> ty.Dict[str, float]:\n if task_type == util.REGRESSION:\n del classification_mode\n rmse = skm.mean_squared_error(y, prediction) ** 0.5 # type: ignore[code]\n if y_info:\n if y_info['policy'] == 'mean_std':\n rmse *= y_info['std']\n else:\n assert False\n return {'rmse': rmse, 'score': -rmse}\n else:\n assert task_type in (util.BINCLASS, util.MULTICLASS)\n labels = None\n if classification_mode == 'probs':\n probs = prediction\n elif classification_mode == 'logits':\n probs = (\n scipy.special.expit(prediction)\n if task_type == util.BINCLASS\n else scipy.special.softmax(prediction, axis=1)\n )\n else:\n assert classification_mode == 'labels'\n probs = None\n labels = prediction\n if labels is None:\n labels = (\n np.round(probs).astype('int64')\n if task_type == util.BINCLASS\n else probs.argmax(axis=1) # type: ignore[code]\n )\n\n result = skm.classification_report(y, labels, output_dict=True) # type: ignore[code]\n if task_type == util.BINCLASS:\n result['roc_auc'] = skm.roc_auc_score(y, probs) # type: ignore[code]\n result['score'] = result['accuracy'] # type: ignore[code]\n return result # type: ignore[code]\n\n\ndef make_summary(metrics: ty.Dict[str, ty.Any]) -> str:\n precision = 3\n summary = {}\n for k, v in metrics.items():\n if k.isdigit():\n continue\n k = {\n 'score': 'SCORE',\n 'accuracy': 'acc',\n 'roc_auc': 'roc_auc',\n 'macro avg': 'm',\n 'weighted avg': 'w',\n }.get(k, k)\n if isinstance(v, float):\n v = round(v, precision)\n summary[k] = v\n else:\n v = {\n {'precision': 'p', 'recall': 'r', 'f1-score': 'f1', 'support': 's'}.get(\n x, x\n ): round(v[x], precision)\n for x in v\n }\n for item in v.items():\n summary[k + item[0]] = item[1]\n\n s = [f'score = {summary.pop(\"SCORE\"):.3f}']\n for k, v in summary.items():\n if k not in ['mp', 'mr', 'wp', 'wr']: # just to save screen space\n s.append(f'{k} = {v}')\n return ' | '.join(s)\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"numpy.round",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.classification_report"
]
] |
gonsp/LotterySampling | [
"92ff14f602c05d747708b522cf05b9f9066c43e0"
] | [
"test/streams.py"
] | [
"import itertools\nimport math\nimport numpy as np\nfrom abc import abstractmethod\nfrom io import TextIOWrapper\nfrom sorted_list import SortedList\n\n\nclass Stream():\n\n def __init__(self, length, save=True):\n self.length = length\n self.N = 0\n self.n = 0\n self.save = save\n self.elements = SortedList()\n\n\n def __iter__(self):\n return self\n\n\n def __next__(self):\n self.N += 1\n if self.N > self.length:\n raise StopIteration\n element = self.next_element()\n if self.save: # To speed-up tests in which it is not necessary to check accuracy\n self.elements.process_element(element)\n self.n = self.elements.size()\n return element\n\n\n @abstractmethod\n def next_element(self):\n pass\n\n\n def top_k_query(self, k):\n return [(str(id), count/self.N) for id, count in itertools.islice(iter(self.elements), k)]\n\n\n def frequent_query(self, freq):\n return [(str(id), count/self.N) for id, count in itertools.takewhile(lambda element: element[1] >= math.ceil(freq * self.N), iter(self.elements))]\n\n\ndef chunk_stream(stream, chunk_size):\n it = iter(stream)\n while True:\n chunk = list(itertools.islice(it, chunk_size))\n if len(chunk) > 0:\n yield chunk\n else:\n return None\n\n\nclass MultiZipf(Stream):\n\n def __init__(self, length, alpha=1.5, segments=2, offset=10000, seed=None, save=True):\n super().__init__(length, save)\n self.alpha = alpha\n self.segments = segments\n self.offset = offset\n np.random.seed(seed)\n\n\n def next_element(self):\n element = np.random.zipf(self.alpha)\n element += self.offset * (self.N // (self.length / self.segments))\n return int(element)\n\n\nclass Zipf(MultiZipf):\n\n def __init__(self, length, alpha=1.5, seed=None, save=True):\n super().__init__(length, alpha=alpha, segments=1, seed=seed, save=save)\n\n\nclass Uniform(Stream):\n\n def __init__(self, length, n_max, seed=None, save=True):\n super().__init__(length, save)\n self.n_max = n_max\n np.random.seed(seed)\n\n\n def next_element(self):\n return np.random.randint(0, self.n_max)\n\n\nclass Unequal(Stream):\n\n def __init__(self, length, alpha, beta, seed=None, save=True):\n super().__init__(length, save)\n data = np.zeros(length, dtype=int)\n for i in range(alpha):\n for j in range(beta):\n data[i*beta + j] = i\n for i in range(alpha * beta, length):\n data[i] = i - alpha * (beta - 1)\n np.random.seed(seed)\n self.data = iter(np.random.permutation(data))\n\n\n def next_element(self):\n return next(self.data)\n\n\nclass File(Stream):\n\n def __init__(self, file_path, length=math.inf, shuffle=False, repetitions=1, seed=None, save=True):\n if shuffle or repetitions > 1:\n self.data = []\n with open(file_path, 'r') as file:\n for line in file:\n element = line[:-1]\n self.data.append(element)\n self.data *= repetitions\n length = min(len(self.data), length)\n if shuffle:\n np.random.seed(seed)\n self.data = np.random.permutation(self.data)\n else:\n with open(file_path, 'r') as file:\n length = min(sum(1 for _ in file), length)\n self.data = open(file_path, 'r')\n super().__init__(length, save)\n\n\n def next_element(self):\n if isinstance(self.data, TextIOWrapper):\n element = self.data.readline()[:-1]\n if element == '':\n raise StopIteration\n return element\n else:\n if self.N == len(self.data):\n raise StopIteration\n return self.data[self.N]\n\n\nclass ZipfNoiseZipf(Stream):\n\n def __init__(self, length, alpha=1.5, noise=0.3, offset=10000, seed=None, save=True):\n super().__init__(length, save)\n self.alpha = alpha\n self.noise = noise\n self.offset = offset\n np.random.seed(seed)\n\n\n def next_element(self):\n if self.N < self.length * (1 - self.noise) // 2:\n return int(np.random.zipf(self.alpha))\n elif self.N < self.length - self.length * (1 - self.noise) // 2:\n return self.N\n else:\n return int(np.random.zipf(self.alpha) + self.offset)\n\n\n\nclass ESA(Stream):\n\n def __init__(self, length, seed=None, save=True):\n super().__init__(length, save)\n np.random.seed(seed)\n\n\n def next_element(self):\n if self.N < self.length // 2:\n return self.N // 2\n else:\n return self.length"
] | [
[
"numpy.zeros",
"numpy.random.permutation",
"numpy.random.seed",
"numpy.random.zipf",
"numpy.random.randint"
]
] |
CosmosHua/Deformable-ConvNets | [
"6aeda878a95bcb55eadffbe125804e730574de8d"
] | [
"fpn/operator_py/fpn_roi_pooling.py"
] | [
"# --------------------------------------------------------\n# Deformable Convolutional Networks\n# Copyright (c) 2017 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Modified by Haozhi Qi, Yuwen Xiong\n# --------------------------------------------------------\n\nimport mxnet as mx\nimport numpy as np\nfrom mxnet.contrib import autograd\nimport gc\n\n\nclass FPNROIPoolingOperator(mx.operator.CustomOp):\n def __init__(self, feat_strides, pooled_height, pooled_width, output_dim, with_deformable):\n self.pooled_height = pooled_height\n self.pooled_width = pooled_width\n self.feat_strides = feat_strides\n self.with_deformable = with_deformable\n self.output_dim = output_dim\n self.in_grad_hist_list = []\n self.num_strides = len(self.feat_strides)\n self.roi_pool = [None for _ in range(self.num_strides)]\n self.feat_idx = [None for _ in range(self.num_strides)]\n\n def forward(self, is_train, req, in_data, out_data, aux):\n rois = in_data[-1].asnumpy()\n w = rois[:, 3] - rois[:, 1] + 1\n h = rois[:, 4] - rois[:, 2] + 1\n feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, len(self.feat_strides) - 1)\n pyramid_idx = []\n\n rois_p = [None for _ in range(self.num_strides)]\n for i in range(self.num_strides):\n self.feat_idx[i] = np.where(feat_id == i)[0]\n if len(self.feat_idx[i]) == 0:\n # padding dummy roi\n rois_p[i] = np.zeros((1, 5))\n pyramid_idx.append(-1)\n else:\n rois_p[i] = rois[self.feat_idx[i]]\n pyramid_idx.append(self.feat_idx[i])\n rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:]\n\n if is_train:\n for i in range(self.num_strides):\n self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))\n\n if self.with_deformable:\n for i in range(self.num_strides, self.num_strides * 3):\n self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))\n autograd.mark_variables([in_data[i] for i in range(self.num_strides * 3)], self.in_grad_hist_list)\n\n with autograd.train_section():\n for i in range(self.num_strides):\n roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7,\n sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i])\n roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides])\n roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7))\n self.roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape,\n group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7,\n output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1)\n else:\n autograd.mark_variables([in_data[i] for i in range(self.num_strides)], self.in_grad_hist_list)\n with autograd.train_section():\n for i in range(self.num_strides):\n self.roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i])\n roi_pool = mx.nd.concatenate(self.roi_pool, axis=0)\n else:\n # during testing, there is no need to record variable, thus saving memory\n roi_pool = [None for _ in range(self.num_strides)]\n if self.with_deformable:\n for i in range(self.num_strides):\n roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7,\n sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i])\n roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides])\n roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7))\n roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape,\n group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7,\n output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1)\n else:\n for i in range(self.num_strides):\n roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i])\n\n roi_pool = mx.nd.concatenate(roi_pool, axis=0)\n\n roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx, roi_pool.context))\n self.assign(out_data[0], req[0], roi_pool)\n\n def backward(self, req, out_grad, in_data, out_data, in_grad, aux):\n for i in range(len(in_grad)):\n self.assign(in_grad[i], req[i], 0)\n\n with autograd.train_section():\n for i in range(self.num_strides):\n if len(self.feat_idx[i] > 0):\n autograd.compute_gradient([mx.nd.take(out_grad[0], mx.nd.array(self.feat_idx[i], out_grad[0].context)) * self.roi_pool[i]])\n\n if self.with_deformable:\n for i in range(0, self.num_strides * 3):\n self.assign(in_grad[i], req[i], self.in_grad_hist_list[i])\n else:\n for i in range(0, self.num_strides):\n self.assign(in_grad[i], req[i], self.in_grad_hist_list[i])\n\n gc.collect()\n\n\[email protected]('fpn_roi_pooling')\nclass FPNROIPoolingProp(mx.operator.CustomOpProp):\n def __init__(self, feat_strides='(4,8,16,32)', pooled_height='7', pooled_width='7', with_deformable='False', output_dim='256'):\n super(FPNROIPoolingProp, self).__init__(need_top_grad=True)\n self.pooled_height = int(pooled_height)\n self.pooled_width = int(pooled_width)\n self.feat_strides = np.fromstring(feat_strides[1:-1], dtype=int, sep=',')\n self.with_deformable = with_deformable == 'True'\n self.output_dim = int(output_dim)\n\n self.num_strides = len(self.feat_strides)\n\n def list_arguments(self):\n args_list = []\n for i in range(self.num_strides):\n args_list.append('data_p{}'.format(2 + i))\n if self.with_deformable:\n for i in range(self.num_strides):\n args_list.extend(['offset_weight_p{}'.format(2 + i), 'offset_bias_p{}'.format(2 + i)])\n args_list.append('rois')\n return args_list\n\n def list_outputs(self):\n return ['output']\n\n def infer_shape(self, in_shape):\n output_feat_shape = [in_shape[-1][0], in_shape[0][1], self.pooled_height, self.pooled_width]\n if self.with_deformable:\n offset_dim = self.pooled_height * self.pooled_width * 2\n input_dim = self.pooled_height * self.pooled_width * self.output_dim\n for i in range(self.num_strides):\n in_shape[i * 2 + self.num_strides], in_shape[i * 2 + 1 + self.num_strides] = [offset_dim, input_dim], [offset_dim, ]\n return in_shape, [output_feat_shape]\n\n def create_operator(self, ctx, shapes, dtypes):\n return FPNROIPoolingOperator(self.feat_strides, self.pooled_height, self.pooled_width, self.output_dim, self.with_deformable)\n\n def declare_backward_dependency(self, out_grad, in_data, out_data):\n return [out_grad[0]]\n"
] | [
[
"numpy.zeros",
"numpy.hstack",
"numpy.sqrt",
"numpy.where",
"numpy.fromstring"
]
] |
shaheen19/FAIR | [
"345c23b3d35918729e7aa49ecb39047494c48a6e",
"345c23b3d35918729e7aa49ecb39047494c48a6e"
] | [
"fair/forward.py",
"tests/reproduction/reproduction_test.py"
] | [
"from __future__ import division\n\nimport inspect\nimport numpy as np\nimport warnings\nfrom scipy.optimize import root\nfrom .ancil import natural, cmip6_volcanic, cmip6_solar, historical_scaling\nfrom .constants import molwt, lifetime, radeff\nfrom .constants.general import M_ATMOS, ppm_gtc\nfrom .defaults import carbon, thermal\nfrom .forcing import ozone_tr, ozone_st, h2o_st, contrails, aerosols, bc_snow,\\\n landuse\nfrom .forcing.ghg import co2_log\n\n\ndef iirf_interp(alp_b,a,tau,iirf_h,targ_iirf):\n \"\"\"Interpolation function for finding alpha, the CO2 decay time constant\n scaling factor, in iirf_h equation. See Eq. (7) of Millar et al ACP (2017).\n\n Inputs:\n alp_b : Guess for alpha, the scale factor, for tau \n a : partition fractions for CO2 boxes\n tau : time constants for CO2 boxes\n iirf_h : time horizon for time-integrated airborne fraction\n targ_iirf: iirf_h calculated using simple parameterisation (Eq. (8),\n Millar et al (2017)).\n \"\"\"\n\n iirf_arr = alp_b*(np.sum(a*tau*(1.0 - np.exp(-iirf_h/(tau*alp_b)))))\n return iirf_arr - targ_iirf\n\n \ndef iirf_simple(c_acc, temp, r0, rc, rt, iirf_max):\n \"\"\"Simple linear iIRF relationship. Eq. (8) of Millar et al ACP (2017).\n \n Inputs:\n c_acc : cumulative airborne carbon anomaly (GtC) since\n pre-industrial\n temp : temperature anomaly since pre-industrial\n r0 : pre-industrial time-integrated airborne fraction (yr)\n rc : sensitivity of time-integrated airborne fraction to airborne\n carbon (yr/GtC)\n rt : sensitivity of time-integrated airborne fraction to\n temperature (yr/K)\n iirf_max : maximum value of time-integrated airborne fraction (yr)\n \n Outputs:\n iirf : time-integrated airborne fraction of carbon (yr)\n \"\"\"\n \n return np.min([r0 + rc * c_acc + rt * temp, iirf_max])\n \n \ndef calculate_q(tcrecs, d, f2x, tcr_dbl, nt):\n \"\"\"If TCR and ECS are supplied, calculate the q model coefficients.\n See Eqs. (4) and (5) of Millar et al ACP (2017).\n \n Inputs:\n tcrecs : 2-element array of transient climate response (TCR) and\n equilibrium climate sensitivity (ECS).\n d : The slow and fast thermal response time constants\n f2x : Effective radiative forcing from a doubling of CO2\n tcr_dbl : time to a doubling of CO2 under 1% per year CO2 increase, yr\n nt : number of timesteps\n \n Outputs:\n q : coefficients of slow and fast temperature change in each\n timestep ((nt, 2) array).\n \"\"\"\n \n # TODO:\n # error checking before call\n # benchmark one call per timestep and if not slower do not convert to 2D\n # - will make code cleaner\n \n k = 1.0 - (d/tcr_dbl)*(1.0 - np.exp(-tcr_dbl/d))\n # if ECS and TCR are not time-varying, expand them to 2D array anyway\n if tcrecs.ndim==1:\n if len(tcrecs)!=2:\n raise ValueError(\n \"Constant TCR and ECS should be a 2-element array\")\n tcrecs = np.ones((nt, 2)) * tcrecs\n elif tcrecs.ndim==2:\n if tcrecs.shape!=(nt, 2):\n raise ValueError(\n \"Transient TCR and ECS should be a nt x 2 array\")\n q = (1.0 / f2x) * (1.0/(k[0]-k[1])) * np.array([\n tcrecs[:,0]-tcrecs[:,1]*k[1],tcrecs[:,1]*k[0]-tcrecs[:,0]]).T\n return q\n \n\ndef carbon_cycle(e0, c_acc0, temp, r0, rc, rt, iirf_max, time_scale_sf0, a, tau,\n iirf_h, carbon_boxes0, c_pi, c0, e1):\n \"\"\"Calculates CO2 concentrations from emissions.\n \n Inputs:\n e0 : emissions of CO2 (GtC) in timestep t-1\n c_acc0 : cumulative airborne carbon anomaly (GtC) since\n pre-industrial, timestep t-1\n temp : temperature anomaly above pre-industrial (K)\n r0 : pre-industrial time-integrated airborne fraction (yr)\n rc : sensitivity of time-integrated airborne fraction to \n airborne carbon (yr/GtC)\n rt : sensitivity of time-integrated airborne fraction to\n temperature (yr/K)\n iirf_max : maximum value of time-integrated airborne fraction (yr)\n time_scale_sf0: initial guess of alpha scaling factor\n a : partition coefficient of carbon boxes\n tau : present-day decay time constants of CO2 (yr)\n iirf_h : time horizon for time-integrated airborne fraction (yr)\n carbon_boxes0 : carbon stored in each atmospheric reservoir at timestep\n t-1 (GtC)\n c_pi : pre-industrial concentration of CO2, ppmv\n c0 : concentration of CO2 in timestep t-1, ppmv\n e1 : emissions of CO2 in timestep t, GtC\n\n Outputs:\n c1 : concentrations of CO2 in timestep t, ppmv\n c_acc1 : cumulative airborne carbon anomaly (GtC) since\n pre-industrial, timestep t\n carbon_boxes1 : carbon stored in each atmospheric reservoir at timestep\n t (GtC)\n time_scale_sf : scale factor for CO2 decay constants\n \"\"\"\n iirf = iirf_simple(c_acc0, temp, r0, rc, rt, iirf_max)\n time_scale_sf = root(iirf_interp, time_scale_sf0,\n args=(a, tau, iirf_h, iirf))['x']\n tau_new = tau * time_scale_sf\n carbon_boxes1 = carbon_boxes0*np.exp(-1.0/tau_new) + a*e1 / ppm_gtc\n c1 = np.sum(carbon_boxes1) + c_pi\n c_acc1 = c_acc0 + 0.5*(e1 + e0) - (c1 - c0)*ppm_gtc\n return c1, c_acc1, carbon_boxes1, time_scale_sf\n \n \ndef emis_to_conc(c0, e0, e1, ts, lt, vm):\n \"\"\"Calculate concentrations of well mixed GHGs from emissions for simple\n one-box model.\n \n Inputs (all can be scalar or 1D arrays for multiple species):\n c0: concentrations in timestep t-1\n e0: emissions in timestep t-1\n e1: emissions in timestep t\n ts: length of timestep. Use 1 for sensible results in FaIR 1.3.\n lt: atmospheric (e-folding) lifetime of GHG\n vm: conversion from emissions units (e.g. Mt) to concentrations units\n (e.g. ppb)\n \n Outputs:\n c1: concentrations in timestep t\n \"\"\"\n c1 = c0 - c0 * (1.0 - np.exp(-ts/lt)) + 0.5 * ts * (e1 + e0) * vm\n return c1\n\n\ndef forc_to_temp(t0, q, d, f, e=1.0):\n \"\"\"Calculate temperature from a given radiative forcing.\n\n Inputs:\n t0: Temperature in timestep t-1\n q: The matrix contributions to slow and fast temperature change\n calculated from ECS and TCR (2 element array)\n d: The slow and fast thermal response time constants (2 element array)\n f: radiative forcing (can be scalar or 1D array representing multiple\n species)\n\n Keywords:\n e: efficacy factor (default 1); if f is an array, e should be an array\n of the same length.\n\n Outputs:\n t1: slow and fast contributions to total temperature (2 element array)\n in timestep t\n \"\"\"\n t1 = t0*np.exp(-1.0/d) + q*(1.0-np.exp((-1.0)/d))*np.sum(f*e)\n return t1\n\n\ndef fair_scm(\n emissions=False,\n emissions_driven=True,\n C=None,\n other_rf=0.0,\n q = thermal.q,\n tcrecs = thermal.tcrecs,\n d = thermal.d,\n F2x = thermal.f2x,\n tcr_dbl = thermal.tcr_dbl,\n a = carbon.a,\n tau = carbon.tau,\n r0 = carbon.r0,\n rc = carbon.rc,\n rt = carbon.rt,\n iirf_max = carbon.iirf_max,\n iirf_h = carbon.iirf_h,\n C_pi=np.array([278., 722., 273., 34.497] + [0.]*25 + [13.0975, 547.996]),\n restart_in=False,\n restart_out=False,\n F_tropO3 = 0.,\n F_aerosol = 0.,\n F_volcanic=cmip6_volcanic.Forcing.volcanic,\n F_solar=cmip6_solar.Forcing.solar,\n F_contrails=0.,\n F_bcsnow=0.,\n F_landuse=0.,\n aviNOx_frac=0.,\n fossilCH4_frac=0.,\n natural=natural.Emissions.emissions,\n efficacy=np.array([1.]*9 + [3.] + [1.]*3),\n scale=None,\n oxCH4_frac=0.61,\n ghg_forcing=\"Etminan\",\n stwv_from_ch4=None,\n b_aero = np.array([-6.2227e-3, 0.0, -3.8392e-4, -1.16551e-3, 1.601537e-2,\n -1.45339e-3, -1.55605e-3]),\n b_tro3 = np.array([2.8249e-4, 1.0695e-4, -9.3604e-4, 99.7831e-4]),\n ghan_params = np.array([-1.95011431, 0.01107147, 0.01387492]),\n stevens_params = np.array([0.001875, 0.634, 60.]),\n useMultigas=True,\n useStevenson=True,\n lifetimes=False,\n aerosol_forcing=\"aerocom+ghan\",\n scaleAerosolAR5=True,\n fixPre1850RCP=True,\n useTropO3TFeedback=True,\n scaleHistoricalAR5=False,\n contrail_forcing='NOx',\n kerosene_supply=0.,\n landuse_forcing='co2',\n ):\n\n # is iirf_h < iirf_max? Don't stop the code, but warn user\n if iirf_h < iirf_max:\n warnings.warn('iirf_h=%f, which is less than iirf_max (%f)'\n % (iirf_h, iirf_max), RuntimeWarning)\n\n # Conversion between ppb/ppt concentrations and Mt/kt emissions\n # in the RCP databases ppb = Mt and ppt = kt so factor always 1e18\n emis2conc = M_ATMOS/1e18*np.asarray(molwt.aslist)/molwt.AIR\n\n # Funny units for nitrogen emissions - N2O is expressed in N2 equivalent\n n2o_sf = molwt.N2O/molwt.N2\n emis2conc[2] = emis2conc[2] / n2o_sf\n\n # Convert any list to a numpy array for (a) speed and (b) consistency.\n # Goes through all variables in scope and converts them.\n frame = inspect.currentframe()\n args, _, _, values = inspect.getargvalues(frame)\n for arg_to_check in args:\n if type(values[arg_to_check]) is list:\n exec(arg_to_check + '= np.array(' + arg_to_check + ')')\n\n # Set up the output timeseries variables depending on options and perform\n # basic sense checks\n if useMultigas:\n ngas = 31\n nF = 13\n if emissions_driven:\n if type(emissions) is not np.ndarray or emissions.shape[1] != 40:\n raise ValueError(\n \"emissions timeseries should be a nt x 40 numpy array\")\n carbon_boxes_shape = (emissions.shape[0], a.shape[0])\n thermal_boxes_shape = (emissions.shape[0], d.shape[0])\n nt = emissions.shape[0]\n else:\n if type(C) is not np.ndarray or C.shape[1] != ngas:\n raise ValueError(\n \"C timeseries should be a nt x %d numpy array\" % ngas)\n thermal_boxes_shape = (C.shape[0], d.shape[0])\n nt = C.shape[0]\n if np.isscalar(fossilCH4_frac):\n fossilCH4_frac = np.ones(nt) * fossilCH4_frac\n # If custom gas lifetimes are supplied, use them, else import defaults\n if type(lifetimes) is np.ndarray:\n if len(lifetimes)!=ngas:\n raise ValueError(\n \"custom GHG lifetime array must have \" + str(ngas) + \n \" elements\")\n else:\n lifetimes = lifetime.aslist\n # Select the desired GHG forcing relationship and populate \n # stratospheric water vapour from methane scale factor if not specified\n # by user\n if ghg_forcing.lower()==\"etminan\":\n from .forcing.ghg import etminan as ghg\n if stwv_from_ch4==None: stwv_from_ch4=0.12\n elif ghg_forcing.lower()==\"myhre\":\n from .forcing.ghg import myhre as ghg\n if stwv_from_ch4==None: stwv_from_ch4=0.15\n else:\n raise ValueError(\n \"ghg_forcing should be 'etminan' (default) or 'myhre'\")\n \n # Check natural emissions and convert to 2D array if necessary\n if type(natural) in [float,int]:\n natural = natural * np.ones((nt,2))\n elif type(natural) is np.ndarray:\n if natural.ndim==1:\n if natural.shape[0]!=2:\n raise ValueError(\n \"natural emissions should be a 2-element or nt x 2 \" +\n \"array\")\n natural = np.tile(natural, nt).reshape((nt,2))\n elif natural.ndim==2:\n if natural.shape[1]!=2 or natural.shape[0]!=nt:\n raise ValueError(\n \"natural emissions should be a 2-element or nt x 2 \" +\n \"array\")\n else:\n raise ValueError(\n \"natural emissions should be a scalar, 2-element, or nt x 2 \" +\n \"array\")\n\n # check scale factor is correct shape. If 1D inflate to 2D\n if scale is None:\n scale = np.ones((nt,nF))\n elif scale.shape[-1]==nF:\n if scale.ndim==2 and scale.shape[0]==nt:\n pass\n elif scale.ndim==1:\n scale = np.tile(scale, nt).reshape((nt,nF))\n else:\n raise ValueError(\"in multi-gas mode, scale should be None, or a \"+\n \"(13,) or (nt, 13) array\")\n\n # if scaling the historical time series to match AR5, apply these\n # factors to whatever the user specifies\n if scaleHistoricalAR5:\n scale=scale*historical_scaling.all[:nt,:]\n\n else:\n ngas = 1\n nF = 1\n\n if emissions_driven:\n if type(emissions) is np.ndarray:\n if emissions.ndim != 1:\n raise ValueError(\n \"In CO2-only mode, emissions should be a 1D array\")\n nt = emissions.shape[0]\n carbon_boxes_shape = (nt, a.shape[0])\n thermal_boxes_shape = (nt, d.shape[0])\n elif type(other_rf) is np.ndarray:\n if other_rf.ndim != 1:\n raise ValueError(\n \"In CO2-only mode, other_rf should be a 1D array\")\n nt = other_rf.shape[0]\n carbon_boxes_shape = (nt, a.shape[0])\n thermal_boxes_shape = (nt, d.shape[0])\n emissions = np.zeros(nt)\n else:\n raise ValueError(\n \"Neither emissions or other_rf is defined as a timeseries\")\n\n else:\n if type(C) is not np.ndarray or C.ndim != 1:\n raise ValueError(\n \"In CO2-only mode, concentrations should be a 1D array\")\n nt = C.shape[0]\n thermal_boxes_shape = (nt, d.shape[0])\n # expand C to 2D array for consistency with other calcs\n C = C.reshape((nt, 1))\n\n # check scale factor is correct shape - either scalar or 1D\n # needs try/except really\n if scale is None:\n scale = np.ones(nt)\n elif np.isscalar(scale):\n scale = np.ones(nt) * scale\n elif scale.ndim==1 and scale.shape[0]==nt:\n pass\n else:\n raise ValueError(\"in CO2-only mode, scale should be None, a \"+\n \"scalar or a (nt,) array\")\n\n # if scaling the historical time series to match AR5, apply these\n # factors to whatever the user specifies\n if scaleHistoricalAR5:\n scale=scale*historical_scaling.co2[:nt]\n\n # If TCR and ECS are supplied, calculate q coefficients\n if type(tcrecs) is np.ndarray:\n q = calculate_q(tcrecs, d, F2x, tcr_dbl, nt)\n\n # Check a and tau are same size\n if a.ndim != 1:\n raise ValueError(\"a should be a 1D array\")\n if tau.ndim != 1:\n raise ValueError(\"tau should be a 1D array\")\n if len(a) != len(tau):\n raise ValueError(\"a and tau should be the same size\")\n if not np.isclose(np.sum(a), 1.0):\n raise ValueError(\"a should sum to one\")\n\n # Allocate intermediate and output arrays\n F = np.zeros((nt, nF))\n C_acc = np.zeros(nt)\n T_j = np.zeros(thermal_boxes_shape)\n T = np.zeros(nt)\n C_0 = np.copy(C_pi)\n if emissions_driven:\n C = np.zeros((nt, ngas))\n R_i = np.zeros(carbon_boxes_shape)\n\n if restart_in:\n R_minus1 = restart_in[0]\n T_j_minus1 = restart_in[1]\n C_acc_minus1 = restart_in[2]\n E_minus1 = restart_in[3]\n C_minus1 = np.sum(R_minus1,axis=-1) + C_0[0]\n\n C[0,0], C_acc[0], R_i[0,:], time_scale_sf = carbon_cycle(\n E_minus1,\n C_acc_minus1,\n np.sum(T_j_minus1),\n r0,\n rc,\n rt,\n iirf_max,\n 0.16,\n a,\n tau,\n iirf_h,\n R_minus1,\n C_pi[0],\n C_minus1,\n emissions[0]\n )\n\n if np.isscalar(other_rf):\n F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf\n else:\n F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf[0]\n\n F[0,0] = F[0,0] * scale[0]\n\n T_j[0,:] = forc_to_temp(T_j_minus1, q[0,:], d, F[0,:])\n T[0]=np.sum(T_j[0,:],axis=-1)\n\n else:\n # Initialise the carbon pools to be correct for first timestep in\n # numerical method\n if emissions_driven:\n if useMultigas:\n R_i[0,:] = a * (np.sum(emissions[0,1:3])) / ppm_gtc\n C[0,1:] = C_0[1:]\n else:\n R_i[0,:] = a * emissions[0,np.newaxis] / ppm_gtc\n C[0,0] = np.sum(R_i[0,:],axis=-1) + C_0[0]\n\n if useMultigas:\n # CO2, CH4 and N2O are co-dependent\n F[0,0:3] = ghg(C[0,0:3], C_pi[0:3], F2x=F2x)\n # Minor (F- and H-gases) are linear in concentration\n # the factor of 0.001 here is because radiative efficiencies are given\n # in W/m2/ppb and concentrations of minor gases are in ppt.\n F[0,3] = np.sum((C[0,3:] - C_pi[3:]) * radeff.aslist[3:] * 0.001)\n\n # Tropospheric ozone:\n if emissions_driven:\n if useStevenson:\n F[0,4] = ozone_tr.stevenson(emissions[0,:], C[0,1],\n T=np.sum(T_j[0,:]), \n feedback=useTropO3TFeedback,\n fix_pre1850_RCP=fixPre1850RCP)\n else:\n F[0,4] = ozone_tr.regress(emissions[0,:], beta=b_tro3)\n else:\n F[:,4] = F_tropO3\n\n # Stratospheric ozone depends on concentrations of ODSs (index 15-30)\n F[0,5] = ozone_st.magicc(C[0,15:], C_pi[15:])\n\n # Stratospheric water vapour is a function of the methane ERF\n F[0,6] = h2o_st.linear(F[0,1], ratio=stwv_from_ch4)\n\n # Forcing from contrails. No climate feedback so can live outside\n # of forward model in this version\n if emissions_driven:\n if contrail_forcing.lower()[0]=='n': # from NOx emissions\n F[:,7] = contrails.from_aviNOx(emissions, aviNOx_frac)\n elif contrail_forcing.lower()[0]=='f': # from kerosene production\n F[:,7] = contrails.from_fuel(kerosene_supply)\n elif contrail_forcing.lower()[0]=='e': # external forcing timeseries\n F[:,7] = F_contrails\n else:\n raise ValueError(\"contrails must be one of 'NOx' (estimated \"+\n \"from NOx emissions), 'fuel' (estimated from annual jet fuel \"+\n \"supplied) or 'external' (an external forcing time series).\")\n else:\n F[:,7] = F_contrails\n\n # Forcing from aerosols - again no feedback dependence\n if emissions_driven:\n if aerosol_forcing.lower()=='stevens':\n F[:,8] = aerosols.Stevens(emissions, stevens_params=stevens_params)\n elif 'aerocom' in aerosol_forcing.lower():\n F[:,8] = aerosols.aerocom_direct(emissions, beta=b_aero)\n if 'ghan' in aerosol_forcing.lower():\n F[:,8] = F[:,8] + aerosols.ghan_indirect(emissions,\n scale_AR5=scaleAerosolAR5,\n fix_pre1850_RCP=fixPre1850RCP,\n ghan_params=ghan_params)\n elif aerosol_forcing.lower()[0] == 'e':\n F[:,8] = F_aerosol\n else:\n raise ValueError(\"aerosol_forcing should be one of 'stevens', \" +\n \"aerocom, aerocom+ghan or external\")\n else:\n F[:,8] = F_aerosol\n\n # Black carbon on snow - no feedback dependence\n if emissions_driven:\n F[:,9] = bc_snow.linear(emissions)\n else:\n F[:,9] = F_bcsnow\n\n # Land use change - either use a scaling with cumulative CO2 emissions\n # or an external time series\n if emissions_driven:\n if landuse_forcing.lower()[0]=='c':\n F[:,10] = landuse.cumulative(emissions)\n elif landuse_forcing.lower()[0]=='e':\n F[:,10] = F_landuse\n else:\n raise ValueError(\n \"landuse_forcing should be one of 'co2' or 'external'\")\n else:\n F[:,10] = F_landuse\n \n # Volcanic and solar copied straight to the output arrays\n F[:,11] = F_volcanic\n F[:,12] = F_solar\n\n # multiply by scale factors\n F[0,:] = F[0,:] * scale[0,:]\n\n else:\n if np.isscalar(other_rf):\n F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf\n else:\n F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf[0]\n F[0,0] = F[0,0] * scale[0]\n\n if restart_in == False:\n # Update the thermal response boxes\n T_j[0,:] = (q[0,:]/d)*(np.sum(F[0,:]))\n\n # Sum the thermal response boxes to get the total temperature anomaly\n T[0]=np.sum(T_j[0,:],axis=-1)\n\n for t in range(1,nt):\n\n if emissions_driven:\n if useMultigas:\n if t == 1:\n time_scale_sf = 0.16\n # Calculate concentrations\n # a. CARBON DIOXIDE\n # Firstly add any oxidised methane from last year to the CO2\n # pool\n oxidised_CH4 = ((C[t-1,1]-C_pi[1]) *\n (1.0 - np.exp(-1.0/lifetimes[1])) * \n (molwt.C/molwt.CH4 * 0.001 * oxCH4_frac * fossilCH4_frac[t]))\n oxidised_CH4 = np.max((oxidised_CH4, 0))\n\n C[t,0], C_acc[t], R_i[t,:], time_scale_sf = carbon_cycle(\n np.sum(emissions[t-1,1:3]),\n C_acc[t-1],\n T[t-1],\n r0,\n rc,\n rt,\n iirf_max,\n time_scale_sf,\n a,\n tau,\n iirf_h,\n R_i[t-1,:] + oxidised_CH4,\n C_pi[0],\n C[t-1,0],\n np.sum(emissions[t,1:3])\n )\n\n # b. METHANE\n C[t,1] = emis_to_conc(\n C[t-1,1],\n emissions[t-1,3]+natural[t,0], \n emissions[t,3]+natural[t,0],\n 1.0,\n lifetimes[1],\n 1.0/emis2conc[1]\n )\n\n # c. NITROUS OXIDE\n C[t,2] = emis_to_conc(\n C[t-1,2],\n emissions[t-1,4]+natural[t,1], \n emissions[t,4]+natural[t,1],\n 1.0,\n lifetimes[2],\n 1.0/emis2conc[2]\n )\n\n # d. OTHER WMGHGs\n C[t,3:] = emis_to_conc(\n C[t-1,3:],\n emissions[t-1,12:], \n emissions[t,12:],\n 1.0,\n np.array(lifetimes[3:]),\n 1.0/emis2conc[3:]\n )\n\n # 2. Radiative forcing\n F[t,0:3] = ghg(C[t,0:3], C_pi[0:3], F2x=F2x)\n F[t,3] = np.sum((C[t,3:] - C_pi[3:]) * radeff.aslist[3:]\n * 0.001)\n if useStevenson:\n F[t,4] = ozone_tr.stevenson(emissions[t,:],\n C[t,1],\n T=T[t-1], \n feedback=useTropO3TFeedback,\n fix_pre1850_RCP=fixPre1850RCP)\n else:\n F[t,4] = ozone_tr.regress(emissions[t,:], beta=b_tro3)\n F[t,5] = ozone_st.magicc(C[t,15:], C_pi[15:])\n F[t,6] = h2o_st.linear(F[t,1], ratio=stwv_from_ch4)\n\n # multiply by scale factors\n F[t,:] = F[t,:] * scale[t,:]\n\n # 3. Temperature\n # Update the thermal response boxes\n T_j[t,:] = forc_to_temp(\n T_j[t-1,:], q[t,:], d, F[t,:], e=efficacy)\n # Sum the thermal response boxes to get the total temperature\n T[t]=np.sum(T_j[t,:],axis=-1)\n\n else:\n if t == 1:\n time_scale_sf = 0.16\n C[t,0], C_acc[t], R_i[t,:], time_scale_sf = carbon_cycle(\n emissions[t-1],\n C_acc[t-1],\n T[t-1],\n r0,\n rc,\n rt,\n iirf_max,\n time_scale_sf,\n a,\n tau,\n iirf_h,\n R_i[t-1,:],\n C_pi[0],\n C[t-1,0],\n emissions[t]\n )\n if np.isscalar(other_rf):\n F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf\n else:\n F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf[t]\n\n F[t,0] = F[t,0] * scale[t]\n\n T_j[t,:] = forc_to_temp(T_j[t-1,:], q[t,:], d, F[t,:])\n T[t]=np.sum(T_j[t,:],axis=-1)\n\n else:\n\n if useMultigas:\n F[t,0:3] = ghg(C[t,0:3], C_pi[0:3], F2x=F2x)\n F[t,3] = np.sum((C[t,3:] - C_pi[3:]) * radeff.aslist[3:]\n * 0.001)\n F[t,5] = ozone_st.magicc(C[t,15:], C_pi[15:])\n F[t,6] = h2o_st.linear(F[t,1], ratio=stwv_from_ch4)\n\n # multiply by scale factors\n F[t,:] = F[t,:] * scale[t,:]\n\n # 3. Temperature\n # Update the thermal response boxes\n T_j[t,:] = T_j[t,:] = forc_to_temp(\n T_j[t-1,:], q[t,:], d, F[t,:], e=efficacy)\n # Sum the thermal response boxes to get the total temperature\n T[t]=np.sum(T_j[t,:],axis=-1)\n\n else:\n if np.isscalar(other_rf):\n F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf\n else:\n F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf[t]\n\n F[t,0] = F[t,0] * scale[t]\n\n T_j[t,:] = forc_to_temp(T_j[t-1,:], q[t,:], d, F[t,:])\n T[t]=np.sum(T_j[t,:],axis=-1)\n\n if not useMultigas:\n C = np.squeeze(C)\n F = np.squeeze(F)\n\n if restart_out:\n if useMultigas:\n E_minus1 = np.sum(emissions[-1,1:3])\n else:\n E_minus1 = emissions[-1]\n restart_out_val=(R_i[-1],T_j[-1],C_acc[-1],E_minus1)\n return C, F, T, restart_out_val\n else:\n return C, F, T\n",
"import pytest\n\nimport fair\nfrom fair.RCPs import rcp3pd, rcp45, rcp6, rcp85, rcp26, rcp60\nimport numpy as np\nimport os\nfrom fair.constants import molwt, radeff, lifetime\nfrom fair.tools.constrain import hist_temp\nfrom fair.tools.gwp import gwp\n\ndef test_ten_GtC_pulse():\n emissions = np.zeros(250)\n emissions[125:] = 10.0\n other_rf = np.zeros(emissions.size)\n for x in range(0,emissions.size):\n other_rf[x] = 0.5*np.sin(2*np.pi*(x)/14.0)\n\n C,F,T = fair.forward.fair_scm(\n emissions=emissions, other_rf=other_rf, useMultigas=False,\n r0=32.4, tcr_dbl=70)\n\n datadir = os.path.join(os.path.dirname(__file__), 'ten_GtC_pulse/')\n C_expected = np.load(datadir + 'C.npy')\n F_expected = np.load(datadir + 'F.npy')\n T_expected = np.load(datadir + 'T.npy')\n\n assert np.allclose(C, C_expected)\n assert np.allclose(F, F_expected)\n assert np.allclose(T, T_expected)\n\n\ndef test_multigas_fullemissions_error():\n with pytest.raises(ValueError):\n fair.forward.fair_scm(emissions=rcp3pd.Emissions.emissions,\n useMultigas=False)\n\n\n# There must be a good way to avoid duplication here\ndef test_rcp3pd():\n C,F,T = fair.forward.fair_scm(\n emissions=rcp3pd.Emissions.emissions,\n b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),\n efficacy=np.ones(13)\n )\n datadir = os.path.join(os.path.dirname(__file__), 'rcp3pd/')\n C_expected = np.load(datadir + 'C.npy')\n F_expected = np.load(datadir + 'F.npy')\n T_expected = np.load(datadir + 'T.npy')\n\n assert np.allclose(C, C_expected)\n assert np.allclose(F, F_expected)\n assert np.allclose(T, T_expected)\n\n\ndef test_rcp45():\n C,F,T = fair.forward.fair_scm(\n emissions=rcp45.Emissions.emissions,\n b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),\n efficacy=np.ones(13)\n )\n datadir = os.path.join(os.path.dirname(__file__), 'rcp45/')\n C_expected = np.load(datadir + 'C.npy')\n F_expected = np.load(datadir + 'F.npy')\n T_expected = np.load(datadir + 'T.npy')\n\n assert np.allclose(C, C_expected)\n assert np.allclose(F, F_expected)\n assert np.allclose(T, T_expected)\n\n\ndef test_rcp6():\n C,F,T = fair.forward.fair_scm(\n emissions=rcp6.Emissions.emissions,\n b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),\n efficacy=np.ones(13)\n )\n datadir = os.path.join(os.path.dirname(__file__), 'rcp6/')\n C_expected = np.load(datadir + 'C.npy')\n F_expected = np.load(datadir + 'F.npy')\n T_expected = np.load(datadir + 'T.npy')\n\n assert np.allclose(C, C_expected)\n assert np.allclose(F, F_expected)\n assert np.allclose(T, T_expected)\n\n\ndef test_rcp85():\n C,F,T = fair.forward.fair_scm(\n emissions=rcp85.Emissions.emissions,\n b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),\n efficacy=np.ones(13)\n )\n datadir = os.path.join(os.path.dirname(__file__), 'rcp85/')\n C_expected = np.load(datadir + 'C.npy')\n F_expected = np.load(datadir + 'F.npy')\n T_expected = np.load(datadir + 'T.npy')\n\n assert np.allclose(C, C_expected)\n assert np.allclose(F, F_expected)\n assert np.allclose(T, T_expected)\n\n\n# rcp3pd and rcp6 have been renamed. The modules should still work otherwise\n# the tests would not have got to this point. But we import directly here to\n# ensure compatibility.\ndef test_rcp_aliases():\n\n # 1. rcp26\n C,F,T = fair.forward.fair_scm(\n emissions=rcp26.Emissions.emissions,\n b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),\n efficacy=np.ones(13)\n )\n datadir = os.path.join(os.path.dirname(__file__), 'rcp3pd/')\n C_expected = np.load(datadir + 'C.npy')\n F_expected = np.load(datadir + 'F.npy')\n T_expected = np.load(datadir + 'T.npy')\n\n assert np.allclose(C, C_expected)\n assert np.allclose(F, F_expected)\n assert np.allclose(T, T_expected) \n\n # 2. rcp60\n C,F,T = fair.forward.fair_scm(\n emissions=rcp60.Emissions.emissions,\n b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),\n efficacy=np.ones(13)\n )\n datadir = os.path.join(os.path.dirname(__file__), 'rcp6/')\n C_expected = np.load(datadir + 'C.npy')\n F_expected = np.load(datadir + 'F.npy')\n T_expected = np.load(datadir + 'T.npy')\n\n assert np.allclose(C, C_expected)\n assert np.allclose(F, F_expected)\n assert np.allclose(T, T_expected)\n\n\ndef test_co2_concentration_driven():\n C, F, T = fair.forward.fair_scm(\n emissions_driven=False,\n C=rcp45.Concentrations.co2,\n useMultigas=False\n )\n assert (C==rcp45.Concentrations.co2).all()\n datadir = os.path.join(os.path.dirname(__file__), 'rcp45/')\n T_expected = np.load(datadir + 'T_concdriven.npy')\n assert np.allclose(T, T_expected)\n\n\ndef test_multigas_concentration_driven():\n C, F, T = fair.forward.fair_scm(\n emissions_driven=False,\n C=rcp45.Concentrations.gases,\n F_tropO3 = rcp45.Forcing.tropo3,\n F_aerosol = rcp45.Forcing.aero+rcp45.Forcing.cloud,\n F_bcsnow = rcp45.Forcing.bcsnow,\n useMultigas=True\n )\n datadir = os.path.join(os.path.dirname(__file__), 'rcp45/')\n T_expected = np.load(datadir + 'T_concdriven_multi.npy')\n assert np.allclose(T, T_expected)\n\n\ndef test_inverse_fair():\n \"\"\"Tests reproducibility of concentrations-to-emissions FaIR.\"\"\"\n\n # initialise a 1% run\n nt = 140\n C = 1.01**np.arange(nt)*278.\n\n E,F,T = fair.inverse.inverse_fair_scm(C=C, tcrecs=np.array([1.7, 3.0]))\n\n datadir = os.path.join(os.path.dirname(__file__), '1pctCO2/')\n E_expected = np.load(datadir + 'E.npy')\n F_expected = np.load(datadir + 'F.npy')\n T_expected = np.load(datadir + 'T.npy')\n\n assert np.allclose(E, E_expected)\n assert np.allclose(F, F_expected)\n assert np.allclose(T, T_expected)\n\n\ndef test_forward_versus_reverse():\n \"\"\"Does inverse FaIR recover the same emissions as forward FaIR?\n\n Both methods require numerical root finding methods so exact correspondence\n is quite unlikely, so accept a small tolerance\"\"\"\n\n E_forward = rcp85.Emissions.co2\n other_rf = np.sin(np.arange(736)) * 0.2\n C_forward, F_forward, T_forward = fair.forward.fair_scm(emissions=E_forward, other_rf=other_rf, useMultigas=False)\n E_inverse, F_inverse, T_inverse = fair.inverse.inverse_fair_scm(C=C_forward, other_rf=other_rf)\n\n assert np.allclose(E_forward, E_inverse, atol=0.01, rtol=0.01)\n assert np.allclose(F_forward, F_inverse, atol=0.01, rtol=0.01)\n assert np.allclose(T_forward, T_inverse, atol=0.01, rtol=0.01)\n\n\ndef test_restart_co2_continuous():\n \"\"\"Tests to check that a CO2-only run with a restart produces the same\n results as a CO2-only run without a restart.\"\"\"\n\n C, F, T = fair.forward.fair_scm(\n emissions = rcp45.Emissions.co2[:20],\n useMultigas = False\n )\n\n C1, F1, T1, restart = fair.forward.fair_scm(\n emissions = rcp45.Emissions.co2[:10],\n useMultigas = False,\n restart_out = True\n )\n\n C2, F2, T2 = fair.forward.fair_scm(\n emissions = rcp45.Emissions.co2[10:20],\n useMultigas = False,\n restart_in = restart\n )\n\n assert np.all(C == np.concatenate((C1, C2)))\n assert np.all(F == np.concatenate((F1, F2)))\n assert np.all(T == np.concatenate((T1, T2)))\n\n\ndef test_inverse_restart():\n \"\"\"Tests restarts for inverse FaIR.\"\"\"\n\n E, F, T = fair.inverse.inverse_fair_scm(\n C = rcp85.Concentrations.co2[:20])\n\n E1, F1, T1, restart = fair.inverse.inverse_fair_scm(\n C = rcp85.Concentrations.co2[:10], restart_out=True)\n\n E2, F2, T2 = fair.inverse.inverse_fair_scm(\n C = rcp85.Concentrations.co2[10:20], restart_in=restart)\n\n assert np.all(E == np.concatenate((E1, E2)))\n assert np.all(F == np.concatenate((F1, F2)))\n assert np.all(T == np.concatenate((T1, T2)))\n\n\ndef test_constrain():\n \"\"\"Checks that the historical temperature constraining function works\"\"\"\n\n datadir = os.path.join(os.path.dirname(__file__),\n '../../fair/tools/tempobs/')\n tempobsdata = np.loadtxt(datadir+'had4_krig_annual_v2_0_0.csv')\n years = tempobsdata[:,0]\n tempobs = tempobsdata[:,1]\n\n C,F,T = fair.forward.fair_scm(emissions=rcp45.Emissions.emissions)\n accept1,sm1,im1,so1,io1 = hist_temp(tempobs, T[85:252], years)\n assert accept1==True\n\n accept2,sm2,im2,so2,io2 = hist_temp(tempobs, T[85:252], years,\n inflate=False)\n assert sm1==sm2\n assert so1==so2\n assert accept2==True\n\n accept3,_,_,_,_ = hist_temp(tempobs, np.zeros(167), years)\n assert accept3==False\n\n\ndef test_gwp():\n \"\"\"Checks that GWP calculator produces correct GWPs.\"\"\"\n\n # methane uses \"perturbation lifetime\" for GWP calculations and feedback\n # factor\n assert np.round(gwp(100, 12.4, radeff.CH4, molwt.CH4, f=0.65))==28\n\n # for N2O, I think the IPCC AR5 value is out by one year. Most likely\n # explanation is that they have rounded off the feedback somewhere.\n # This is calculated as 1-(1-0.36*(1.65)*radeff.CH4/radeff.N2O). See\n # eq. 8.SM.20 in the supplement to Chapter 8, AR5\n assert np.round(\n gwp(20, lifetime.N2O, radeff.N2O, molwt.N2O, f=-0.071874))==263\n assert np.round(\n gwp(100, lifetime.N2O, radeff.N2O, molwt.N2O, f=-0.071874))==264\n\n # Now check a nice straightforward example\n assert np.round(\n gwp(100, lifetime.CFC11, radeff.CFC11, molwt.CFC11), decimals=-1)==4660\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.tile",
"numpy.zeros",
"numpy.squeeze",
"scipy.optimize.root",
"numpy.asarray",
"numpy.copy",
"numpy.exp",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.isscalar"
],
[
"numpy.load",
"numpy.allclose",
"numpy.ones",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"numpy.sin",
"numpy.concatenate",
"numpy.loadtxt"
]
] |
douglasdaly/spot-robot | [
"7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2"
] | [
"src/squad/graphs.py"
] | [
"from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload\n\nimport numpy as np\n\nfrom squad.exceptions import (\n EdgeAlreadyExists,\n EdgeNotFound,\n NodeAlreadyExists,\n NodeNotFound,\n)\n\n\nclass Node:\n \"\"\"\n Single node in a graph.\n \"\"\"\n\n def __init__(self, name: str, **data: Any) -> None:\n self._name = name\n self._data = data\n\n @property\n def name(self) -> str:\n \"\"\"str: The name of this node.\"\"\"\n return self._name\n\n @property\n def data(self) -> Dict[str, Any]:\n \"\"\"Dict[str, Any]: The data stored in this node (if any).\"\"\"\n return self._data.copy()\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}({self._name})\"\n\n def __hash__(self) -> int:\n return hash((self.__class__.__name__, self._name))\n\n def __eq__(self, __o: object) -> bool:\n if isinstance(__o, Node):\n return self._name == __o._name\n elif isinstance(__o, str):\n return self._name == __o\n raise ValueError(\n f\"Cannot compare {self.__class__.__name__} with\"\n f\" {type(__o).__name__}\"\n )\n\n def __getitem__(self, key: str) -> Any:\n return self._data[key]\n\n def update(self, **data: Any) -> None:\n \"\"\"Updates the data stored on this node.\n\n Parameters\n ----------\n **data : Any, optional\n The data parameters to update on this node.\n\n \"\"\"\n self._data.update(data)\n\n\nclass Edge:\n \"\"\"\n Single edge in a graph.\n \"\"\"\n\n def __init__(\n self,\n u: Node,\n v: Node,\n weight: float = 1.0,\n **data: Any,\n ) -> None:\n self._u = u\n self._v = v\n self._wgt = weight\n self._value: Optional[float] = None\n self._data = data\n\n @property\n def u(self) -> Node:\n \"\"\"Node: The first node in this edge.\"\"\"\n return self._u\n\n @property\n def v(self) -> Node:\n \"\"\"Node: The second node in this edge.\"\"\"\n return self._v\n\n @property\n def weight(self) -> float:\n \"\"\"float: The weight of this edge.\"\"\"\n return self._wgt\n\n @weight.setter\n def weight(self, value: float) -> None:\n self._wgt = value\n\n @property\n def value(self) -> float:\n \"\"\"float: The value of this edge.\"\"\"\n if self._value is None:\n self._value = self.get_value()\n return self._value\n\n @value.setter\n def value(self, value: float) -> None:\n self._value = value\n\n @property\n def weighted_value(self) -> float:\n \"\"\"float: The weighted-value of this edge.\"\"\"\n return self._wgt * self.value\n\n @property\n def data(self) -> Dict[str, Any]:\n \"\"\"Dict[str, Any]: The data associated with this edge (if any).\"\"\"\n return self._data.copy()\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}({self._u.name}, {self._v.name})\"\n\n def __hash__(self) -> int:\n return hash((self.__class__.__name__, self._u, self._v))\n\n def __eq__(self, __o: object) -> bool:\n if isinstance(__o, Edge):\n return self._u == __o._u and self._v == __o._v\n elif isinstance(__o, tuple):\n return self._u._name == __o[0] and self._v._name == __o[1]\n raise ValueError(\n f\"Cannot compare {self.__class__.__name__} with\"\n f\" {type(__o).__name__}\"\n )\n\n def __getitem__(self, key: str) -> Any:\n return self._data[key]\n\n def __call__(self, **kwargs: Any) -> float:\n self.update(**kwargs)\n return self._wgt * self.value\n\n def update(self, **data: Any) -> None:\n \"\"\"Updates this edge's state.\n\n Parameters\n ----------\n **data : Any\n Any named-parameters to update the edge's data with.\n\n \"\"\"\n if data:\n self._data.update(data)\n self._value = self.get_value()\n\n def get_value(self) -> float:\n \"\"\"Gets the value associated with this edge.\n\n Returns\n -------\n float\n The computed value for this edge.\n\n \"\"\"\n return 1.0\n\n\ndef remove_square_matrix_index(matrix: np.ndarray, index: int) -> np.ndarray:\n \"\"\"Removes the row & column of the specified index from the given\n square matrix.\n\n Parameters\n ----------\n matrix : np.ndarray\n The square matrix to remove the specified `index` row and column\n from.\n index : int\n The index of the row & column to remove from the given `matrix`.\n\n Returns\n -------\n np.ndarray\n The new matrix, from the original `matrix` given, with the\n desired row & column `index` removed.\n\n Raises\n ------\n ValueError\n If the given `matrix` is not a square matrix.\n IndexError\n If the given `index` is invalid for the bounds of the given\n `matrix`.\n\n \"\"\"\n if matrix.ndim < 2 or matrix.shape[0] != matrix.shape[1]:\n raise ValueError(f\"Invalid matrix given, shape: {matrix.shape}\")\n elif abs(index) > (matrix.shape[0] - 1):\n raise IndexError(index)\n return np.delete(np.delete(matrix, index, axis=0), index, axis=1)\n\n\nclass Graph:\n \"\"\"\n Directed graph.\n \"\"\"\n\n def __init__(\n self,\n node_cls: Optional[Type[Node]] = None,\n edge_cls: Optional[Type[Edge]] = None,\n ) -> None:\n self._node_cls = node_cls or Node\n self._nodes: List[Node] = []\n self._node_lookup: Dict[str, int] = {}\n\n self._edge_cls = edge_cls or Edge\n self._edges: List[Edge] = []\n self._edge_lookup: Dict[Tuple[str, str], int] = {}\n\n self._adj_mat = np.array([], dtype=float)\n self._con_mat = self._adj_mat.copy()\n\n @property\n def nodes(self) -> Dict[str, Node]:\n \"\"\"Dict[str, Node]: The nodes contained in this graph.\"\"\"\n return {x.name: x for x in self._nodes}\n\n @property\n def edges(self) -> Dict[str, Dict[str, Edge]]:\n \"\"\"Dict[str, Dict[str, Edge]]: The edges in this graph.\"\"\"\n ret = {x.name: {} for x in self._nodes}\n for x in self._edges:\n ret[x.u.name][x.v.name] = x\n return ret\n\n def __getitem__(\n self,\n key: Union[str, Tuple[str, str]],\n ) -> Union[Edge, Node]:\n if isinstance(key, str):\n if key not in self._node_lookup:\n raise NodeNotFound(key)\n return self._nodes[self._node_lookup[key]]\n else:\n if key not in self._edge_lookup:\n raise EdgeNotFound(*key)\n return self._edges[self._edge_lookup[key]]\n\n def add(self, obj: Union[Edge, Node]) -> None:\n \"\"\"Adds an edge or node to this graph.\n\n Parameters\n ----------\n obj : Union[Edge, Node]\n The node or edge object to add to this graph.\n\n Raises\n ------\n EdgeAlreadyExists\n If the given edge `obj` is already in this graph.\n NodeAlreadyExists\n If the given node `obj` is already in this graph.\n NodeNotFound\n If one or both of the nodes in the given edge `obj` is not\n in this graph.\n\n \"\"\"\n if isinstance(obj, Edge):\n if obj in self._edges:\n raise EdgeAlreadyExists(obj.u.name, obj.v.name)\n elif obj.u.name not in self._node_lookup:\n raise NodeNotFound(obj.u.name)\n elif obj.v.name not in self._node_lookup:\n raise NodeNotFound(obj.v.name)\n self._add_edge_obj(obj)\n else:\n if obj in self._nodes:\n raise NodeAlreadyExists(obj.name)\n self._add_node_obj(obj)\n return\n\n def remove(self, obj: Union[Edge, Node]) -> None:\n \"\"\"Removes the given edge or node from this graph.\n\n Parameters\n ----------\n obj : Union[Edge, Node]\n The edge or node object to remove from this graph.\n\n Raises\n ------\n EdgeNotFound\n If the given edge `obj` could not be found.\n NodeNotFound\n If the given node `obj` could not be found.\n\n \"\"\"\n if isinstance(obj, Edge):\n if obj not in self._edges:\n raise EdgeNotFound(obj.u.name, obj.v.name)\n self._remove_edge_obj(obj.u.name, obj.v.name)\n else:\n if obj not in self._nodes:\n raise NodeNotFound(obj.name)\n self._remove_node_obj(obj.name)\n return\n\n def clear(self) -> None:\n \"\"\"Clears all nodes and edges from this graph.\"\"\"\n self._node_lookup.clear()\n self._nodes.clear()\n self._edge_lookup.clear()\n self._edges.clear()\n self._adj_mat = np.array([], dtype=self._adj_mat.dtype)\n self._con_mat = self._adj_mat.copy()\n\n def _add_edge_obj(self, edge: Edge) -> None:\n \"\"\"Adds a new edge object to this graph.\"\"\"\n self._edges.append(edge)\n new_n_edges = len(self._edges)\n self._edge_lookup[(edge.u.name, edge.v.name)] = new_n_edges - 1\n\n idx_u = self._nodes.index(edge.u)\n idx_v = self._nodes.index(edge.v)\n self._adj_mat[idx_u, idx_v] = 1.0\n self._con_mat[idx_u, idx_v] = 1.0\n if idx_u != idx_v:\n self._con_mat[idx_v, idx_u] = 1.0\n return\n\n def _remove_edge_obj(self, u_name: str, v_name: str) -> None:\n \"\"\"Removes the specified edge from this graph.\"\"\"\n # - Update adjacency/connection matrices\n u_idx = self._node_lookup[u_name]\n v_idx = self._node_lookup[v_name]\n\n self._adj_mat[u_idx, v_idx] = 0.0\n if u_idx == v_idx:\n self._con_mat[u_idx, v_idx] = 0.0\n elif (v_name, u_name) not in self._edge_lookup:\n self._con_mat[u_idx, v_idx] = 0.0\n self._con_mat[v_idx, u_idx] = 0.0\n\n # - Remove edge\n edge_idx = self._edge_lookup.pop((u_name, v_name))\n self._edges.pop(edge_idx)\n\n # - Update lookup table for relevant edges\n edge_names_to_update = [\n (x.u.name, x.v.name) for x in self._edges[edge_idx:]\n ]\n for edge_name in edge_names_to_update:\n self._edge_lookup[edge_name] -= 1\n return\n\n def _add_node_obj(self, node: Node) -> None:\n \"\"\"Adds a new node object to this graph.\"\"\"\n orig_n_nodes = len(self._nodes)\n self._nodes.append(node)\n self._node_lookup[node.name] = orig_n_nodes\n new_n_nodes = orig_n_nodes + 1\n\n upd_adj_mat = np.zeros(\n (new_n_nodes, new_n_nodes),\n dtype=self._adj_mat.dtype,\n )\n upd_con_mat = upd_adj_mat.copy()\n\n if orig_n_nodes:\n upd_adj_mat[:orig_n_nodes, :orig_n_nodes] = self._adj_mat\n upd_con_mat[:orig_n_nodes, :orig_n_nodes] = self._con_mat\n\n self._adj_mat = upd_adj_mat\n self._con_mat = upd_con_mat\n\n def _remove_node_obj(self, node_name: str) -> None:\n \"\"\"Removes an existing node object from this graph.\"\"\"\n node_idx = self._node_lookup[node_name]\n\n # Update the adjacency/connection matrices\n self._adj_mat = remove_square_matrix_index(self._adj_mat, node_idx)\n self._con_mat = remove_square_matrix_index(self._con_mat, node_idx)\n\n # - Remove any edge objects connected to the node\n def _edge_filter(x: Tuple[str, str]) -> bool:\n return node_name in x\n\n edge_idxs_to_remove = sorted(\n (\n self._edge_lookup[k]\n for k in filter(_edge_filter, self._edge_lookup.keys())\n ),\n reverse=True,\n )\n edge_names_to_remove = [\n (x.u.name, x.v.name)\n for x in (self._edges[i] for i in edge_idxs_to_remove)\n ]\n\n for i, n in zip(edge_idxs_to_remove, edge_names_to_remove):\n del self._edge_lookup[n]\n self._edges.pop(i)\n\n # - Remove the node object\n self._nodes.pop(node_idx)\n\n # - Update the lookup tables\n for node in self._nodes[node_idx:]:\n self._node_lookup[node.name] -= 1\n\n for i, edge in enumerate(self._edges):\n self._edge_lookup[(edge.u.name, edge.v.name)] = i\n return\n\n def add_node(self, name: str, **data: Any) -> None:\n \"\"\"Creates and adds a new node to this graph.\n\n Parameters\n ----------\n name : str\n The name of the node to add to this graph.\n **data : Any\n The data of the node to add to this graph (if any).\n\n Raises\n ------\n NodeAlreadyExists\n If a node with the same `name` given already exists in this\n graph.\n\n \"\"\"\n if name in (x.name for x in self._nodes):\n raise NodeAlreadyExists(name)\n new_node = self._node_cls(name, **data)\n self._add_node_obj(new_node)\n\n def add_nodes(self, *names: str, **data: Any) -> None:\n \"\"\"Creates and adds new node(s) to this graph.\n\n Parameters\n ----------\n *names : str\n The name(s) of the new nodes to create and add.\n **data : Any, optional\n The data (if any) to associate with each of the new nodes.\n\n Raises\n ------\n NodeAlreadyExists\n If any of the nodes from the given `names` already exist in\n this graph.\n ValueError\n If no `names` are provided.\n\n \"\"\"\n for name in names:\n if name in self._node_lookup:\n raise NodeAlreadyExists(name)\n\n for name in names:\n new_node = self._node_cls(name, **data)\n self._add_node_obj(new_node)\n return\n\n def remove_node(self, name: str) -> None:\n \"\"\"Removes the specified node from this graph.\n\n Parameters\n ----------\n name : str\n The name of the node to remove.\n\n Raises\n ------\n NodeNotFound\n If the node with the given `name` could not be found.\n\n \"\"\"\n if name not in self._node_lookup:\n raise NodeNotFound(name)\n self._remove_node_obj(name)\n\n def add_edge(\n self,\n u_name: str,\n v_name: str,\n weight: float = 1.0,\n **data: Any,\n ) -> None:\n \"\"\"Creates and adds a new edge to this graph.\n\n Parameters\n ----------\n u_name : str\n The name of the (existing) node to set as the first node for\n the new edge to add.\n v_name : str\n The name of the (existing) node to set as the second node\n for the new edge to add.\n weight : float, default=1.0\n The weight to use for the new edge to add.\n **data : Any, optional\n The data (if any) to store on the new edge.\n\n Raises\n ------\n EdgeAlreadyExists\n If an edge for the given nodes specified already exists in\n this graph.\n NodeNotFound\n If either of the given nodes specified could not be found.\n\n \"\"\"\n if (u_name, v_name) in ((x.u.name, x.v.name) for x in self._edges):\n raise EdgeAlreadyExists(u_name, v_name)\n\n u = None\n v = None\n for node in self._nodes:\n if node.name == u_name:\n u = node\n if node.name == v_name:\n v = node\n if u is not None and v is not None:\n break\n\n if u is None:\n raise NodeNotFound(u_name)\n if v is None:\n raise NodeNotFound(v_name)\n\n new_edge = self._edge_cls(u, v, weight=weight, **data)\n self._add_edge_obj(new_edge)\n\n def add_edges(\n self,\n u_name: str,\n *v_names: str,\n weight: float = 1.0,\n **data: Any,\n ) -> None:\n \"\"\"Adds multiple edges from `u_name` to this graph.\n\n Parameters\n ----------\n u_name : str\n The name of the (existing) node to set as the first node for\n the new edges to add.\n *v_names : str\n The names of the (existing) nodes to set as the second node\n for the new edge to add.\n weight : float, default=1.0\n The weight to use for each new edge to add.\n **data : Any, optional\n The data (if any) to store on each new edge.\n\n Raises\n ------\n EdgeAlreadyExists\n If any edge for the given nodes specified already exists in\n this graph.\n NodeNotFound\n If any of the given nodes specified could not be found.\n ValueError\n If no `v_names` are provided.\n\n \"\"\"\n if not v_names:\n raise ValueError(\"You must provide at least one v node name\")\n\n if u_name not in self._node_lookup:\n raise NodeNotFound(u_name)\n else:\n for v in v_names:\n if v not in self._node_lookup:\n raise NodeNotFound(v)\n\n for e in ((u_name, v) for v in v_names):\n if e in self._edge_lookup:\n raise EdgeAlreadyExists(e[0], e[1])\n\n u_node = self._nodes[self._node_lookup[u_name]]\n for v_name in v_names:\n v_node = self._nodes[self._node_lookup[v_name]]\n new_edge = self._edge_cls(u_node, v_node, weight=weight, **data)\n self._add_edge_obj(new_edge)\n return\n\n def remove_edge(self, u_name: str, v_name: str) -> None:\n \"\"\"Removes the edge specified from this graph.\n\n Parameters\n ----------\n u_name : str\n The name of the first node in the edge to remove.\n v_name : str\n The name of the second node in the edge to remove.\n\n Raises\n ------\n EdgeNotFound\n If the specified edge could not be found.\n NodeNotFound\n If either node specified by the given `u_name` and `v_name`\n could not be found.\n\n \"\"\"\n if u_name not in self._node_lookup:\n raise NodeNotFound(u_name)\n elif v_name not in self._node_lookup:\n raise NodeNotFound(v_name)\n elif (u_name, v_name) not in self._edge_lookup:\n raise EdgeNotFound(u_name, v_name)\n self._remove_edge_obj(u_name, v_name)\n\n def update_nodes(self, *names: str, **data: Any) -> None:\n \"\"\"Updates the node(s) in this graph.\n\n Parameters\n ----------\n *names : str, optional\n The specific node(s) to update (if not given then all nodes\n will be updated).\n **data : Any, optional\n The data updates to push to all nodes in the graph for the\n update calls.\n\n \"\"\"\n if names:\n nodes = (x for x in self._nodes if x.name in names)\n else:\n nodes = self._nodes\n\n for node in nodes:\n node.update(**data)\n return\n\n def update_edges(self, *names: str, **data: Any) -> None:\n \"\"\"Updates all the edges in this graph.\n\n Parameters\n ----------\n *names : str, optional\n The u-node (first node) names of the relevant edges to\n update (if not provided then all edges are updated).\n **data : Any, optional\n Any data updates to push to all edges in the graph for the\n update calls.\n\n \"\"\"\n if names:\n edges = (x for x in self._edges if x.u.name in names)\n else:\n edges = self._edges\n\n for edge in edges:\n edge.update(**data)\n return\n\n @overload\n def adj_edges(self, u_name: str) -> List[Edge]:\n ...\n\n @overload\n def adj_edges(self, u_name: str, v_name: str) -> Edge:\n ...\n\n def adj_edges(\n self,\n u_name: str,\n v_name: Optional[str] = None,\n ) -> Union[Edge, List[Edge]]:\n \"\"\"Gets the adjacenct edge(s) specified.\n\n Parameters\n ----------\n u_name : str\n The name of the node to get the adjacent edge(s) *from*.\n v_name : str, optional\n The name of the node to get the adjacent edge(s) *to* (if\n any). If not specified (default) it will return all\n possible adjacent edges.\n\n Returns\n -------\n Edge or List[Edge]\n The adjacent edge(s) from the specified `u_name` (if\n `v_name` was not specified). If `v_name` was given then\n it just returns the adjacent edge from the specified\n `u_name` node to the specified `v_name` node.\n\n Raises\n ------\n NodeNotFound\n If the specified `u_name` node (or `v_name` node, if given)\n could not be found.\n EdgeNotFound\n If the specified `u_name` to `v_name` (if given) edge could\n not be found.\n\n See Also\n --------\n adj, adj_values, adj_weights\n\n \"\"\"\n u_idx = None\n v_idx = None\n for i, node in enumerate(self._nodes):\n if node.name == u_name:\n u_idx = i\n if v_name is not None:\n if node.name == v_name:\n v_idx = i\n if u_idx is not None and v_idx is not None:\n break\n elif u_idx is not None:\n break\n\n if u_idx is None:\n raise NodeNotFound(u_name)\n if v_name is not None and v_idx is None:\n raise NodeNotFound(v_name)\n\n if v_name is None:\n # - All adjacent edges\n adj_edges: List[Edge] = []\n for i, v in enumerate(self._adj_mat[u_idx]):\n if v == 0.0:\n continue\n v_node = self._nodes[i]\n t_edge = self._edges[self._edge_lookup[(u_name, v_node.name)]]\n adj_edges.append(t_edge)\n\n return adj_edges\n else:\n # - Single edge\n try:\n adj_edge = self._edges[self._edge_lookup[(u_name, v_name)]]\n except KeyError:\n raise EdgeNotFound(u_name, v_name)\n return adj_edge\n\n @overload\n def adj_values(\n self,\n u_name: str,\n ) -> Dict[str, float]:\n ...\n\n @overload\n def adj_values(\n self,\n u_name: str,\n v_name: str,\n ) -> float:\n ...\n\n def adj_values(\n self,\n u_name: str,\n v_name: Optional[str] = None,\n ) -> Union[float, Dict[str, float]]:\n \"\"\"Gets the adjacency edge value(s) for the specified node/edge.\n\n Parameters\n ----------\n u_name : str\n The name of the node to get the adjacency data *from*.\n v_name : str, optional\n The name of the node to get the adjacency data *to* (if\n any). If not specified (default) it will return all\n possible adjacent nodes and values.\n\n Returns\n -------\n float or Dict[str, float]\n The adjacent edges and values from the specified `u_name`\n (if `v_name` was not specified). If `v_name` was given then\n it just returns the value of the adjacency edge from the\n specified `u_name` node to the specified `v_name` node.\n\n See Also\n --------\n adj, adj_edges, adj_weights\n\n \"\"\"\n # - Single edge value\n if v_name is not None:\n edge = self.adj_edges(u_name, v_name)\n return edge.value\n\n # - All adjacent edge values\n edges = self.adj_edges(u_name)\n ret = {x.v.name: x.value for x in edges}\n return ret\n\n @overload\n def adj(\n self,\n u_name: str,\n ) -> Dict[str, float]:\n ...\n\n @overload\n def adj(\n self,\n u_name: str,\n v_name: str,\n ) -> float:\n ...\n\n def adj(\n self,\n u_name: str,\n v_name: Optional[str] = None,\n ) -> Union[float, Dict[str, float]]:\n \"\"\"Gets the adjacency edge weighted-value(s) for the specified\n node/edge.\n\n Parameters\n ----------\n u_name : str\n The name of the node to get the adjacency data *from*.\n v_name : str, optional\n The name of the node to get the adjacency data *to* (if\n any). If not specified (default) it will return all\n possible adjacent nodes and values.\n\n Returns\n -------\n float or Dict[str, float]\n The adjacent edges and weighted-values from the specified\n `u_name` (if `v_name` was not specified). If `v_name` was\n given then it just returns the weighted-value of the\n adjacent edge from the specified `u_name` node to the\n specified `v_name` node.\n\n See Also\n --------\n adj_edges, adj_values, adj_weights\n\n \"\"\"\n # - Single edge value\n if v_name is not None:\n edge = self.adj_edges(u_name, v_name)\n return edge.weighted_value\n\n # - All adjacent edge values\n edges = self.adj_edges(u_name)\n ret = {x.v.name: x.weighted_value for x in edges}\n return ret\n\n @overload\n def adj_weights(\n self,\n u_name: str,\n ) -> Dict[str, float]:\n ...\n\n @overload\n def adj_weights(\n self,\n u_name: str,\n v_name: str,\n ) -> float:\n ...\n\n def adj_weights(\n self,\n u_name: str,\n v_name: Optional[str] = None,\n ) -> Union[float, Dict[str, float]]:\n \"\"\"Gets the adjacency edge weight(s) of the specified node/edge.\n\n Parameters\n ----------\n u_name : str\n The name of the node to get the adjacency data *from*.\n v_name : str, optional\n The name of the node to get the adjacency data *to* (if\n any). If not specified (default) it will return all\n possible adjacent nodes and values.\n\n Returns\n -------\n float or Dict[str, float]\n The adjacent edges and weight(s) from the specified `u_name`\n node (if `v_name` was not specified). If `v_name` was given\n then it just returns the raw value of the adjacent edge from\n the specified `u_name` node to the specified `v_name` node.\n\n See Also\n --------\n adj, adj_edges, adj_values\n\n \"\"\"\n # - Single edge value\n if v_name is not None:\n edge = self.adj_edges(u_name, v_name)\n return edge.weight\n\n # - All adjacent edge values\n edges = self.adj_edges(u_name)\n ret = {x.v.name: x.weight for x in edges}\n return ret\n"
] | [
[
"numpy.array",
"numpy.delete",
"numpy.zeros"
]
] |
xdjiangkai/ColossalAI | [
"4a3d3446b04065fa1c89b78cba673e96115c6325"
] | [
"colossalai/nn/optimizer/lamb.py"
] | [
"\"\"\"\nAdapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-lamb\n\"\"\"\n\nimport torch\nfrom torch.optim import Optimizer\n\nfrom colossalai.registry import OPTIMIZERS\n\n\[email protected]_module\nclass Lamb(Optimizer):\n r\"\"\"Implements Lamb algorithm.\n It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n adam (bool, optional): always use trust ratio = 1, which turns this into\n Adam. Useful for comparison purposes.\n\n .. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes:\n https://arxiv.org/abs/1904.00962\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,\n weight_decay=0, adam=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\n \"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\n \"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay)\n self.adam = adam\n super(Lamb, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'Lamb does not support sparse gradients, consider SparseAdam instad.')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n # m_t\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n # v_t\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # Paper v3 does not use debiasing.\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n # Apply bias to lr to avoid broadcast.\n # * math.sqrt(bias_correction2) / bias_correction1\n step_size = group['lr']\n\n weight_norm = p.data.pow(2).sum().sqrt()\n\n adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])\n if group['weight_decay'] != 0:\n adam_step.add_(p.data, alpha=group['weight_decay'])\n\n adam_norm = adam_step.pow(2).sum().sqrt()\n if weight_norm == 0 or adam_norm == 0:\n trust_ratio = 1\n else:\n trust_ratio = weight_norm / adam_norm\n state['weight_norm'] = weight_norm\n state['adam_norm'] = adam_norm\n state['trust_ratio'] = trust_ratio\n if self.adam:\n trust_ratio = 1\n\n p.data.add_(adam_step, alpha=-step_size * trust_ratio)\n\n return loss\n"
] | [
[
"torch.zeros_like"
]
] |
JunCEEE/hummingbird | [
"0b1bdf5023b92090f31d9bc857e0854a805cf2cd"
] | [
"scripts/flash/plot_hitscores.py"
] | [
"#!/usr/bin/env python\nimport h5py\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys\n\nrunnr = int(sys.argv[1])\nfilename = '/asap3/flash/gpfs/bl1/2017/data/11001733/processed/hummingbird/r%04d_ol1.h5' %runnr\nwith h5py.File(filename, 'r') as f:\n hitscore = f['entry_1/result_1/hitscore_litpixel'][:]\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(hitscore, 'k.')\n#ax.axhline(int(sys.argv[2]))\nfig.savefig('../plots/r%04d_hitscore.png' %runnr, dpi=100, bbox_inches='tight')\n"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.figure"
]
] |
TheoPantaz/Control-of-robotic-vehicle-via-brain-activity | [
"4cae5a69503659581f510c748f59f045d1f2b145"
] | [
"real_time.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 17 18:06:40 2020\n\n@author: Kokkinos\n\nlines for telnet communication: 31,32,136,139,149,152,201,204,212,215,296\n\"\"\"\nfrom threading import Thread\n\nimport numpy as np\nimport scipy.io as sio\nfrom pylsl import StreamInlet, resolve_stream\nfrom tkinter import *\nimport telnetlib\nimport pickle\nimport threading\n\nfrom graphics import Graphics\n\nclass rt(Graphics):\n \n def __init__(self, mode = 'IMvsall', tim_window = 4, vote_window = 4, overlap = 0, \n IM_window = 2, HOST = \"192.168.4.1\"):\n \n if mode == 'IMvsall' or 'Rvsall' or 'IMvsRest' or 'CSP_OVR' or'sync':\n self.mode = mode\n else:\n raise ValueError('Inappropriate mode value')\n \n# self.HOST = HOST\n# self.tn = telnetlib.Telnet(self.HOST)\n \n with open(\"visual_cues.txt\") as f:\n content = f.readlines()\n content = [line.rstrip('\\n') for line in content]\n \n self.Fs = int(content[0]) # Sampling Frequency\n self.Rdur = int(content[1]) # Rest visual cue duration\n self.Prdur = int(content[3])\n\n content = np.array(content)\n self.vcN = len(content) # number of visual cues\n idxs = np.where(content == 'REST')\n self.RN = len(idxs[0]) # number of REST visual cues\n idxs = np.where(content == 'PREPARE')\n self.PRN = len(idxs[0])\n self.IMN = len(content) - self.RN - self.PRN - 4 # number of Imaginary Movements visual cues\n\n try:\n self.IMdur = int(content[2])\n self.recdur = self.RN * self.Rdur * self.Fs + self.IMN * self.IMdur * self.Fs + self.PRN * self.Prdur * self.Fs # duration of the recording\n except:\n IMdur = list(content[2].split(','))\n self.IMdur = [int(i) for i in IMdur]\n self.IMdur = [np.random.randint(IMdur[0],IMdur[1]) for i in range(self.IMN)]\n self.recdur = self.RN * self.Rdur * self.Fs + sum(self.IMdur) * self.Fs + self.PRN * self.Prdur * self.Fs # duration of the recording\n\n self.content = np.delete(content,np.s_[:4])\n \n \n\n if self.mode == 'sync':\n \n self.tim_window = self.IMdur * self.Fs \n self.vote_window = self.IMdur * self.Fs\n self.step = (self.IMdur + self.Prdur + self.Rdur) * self.Fs\n self.IM_window = 1\n self.IMdur = [self.IMdur] * self.IMN\n \n else:\n \n self.tim_window = tim_window * self.Fs\n self.vote_window = vote_window * self.Fs\n self.overlap = overlap\n self.step = int(self.tim_window * (1 - self.overlap))\n self.IM_window = IM_window\n \n Graphics.__init__(self)\n \n def load_bcis(self, filename):\n with open(filename, 'rb') as train:\n self.bcis = pickle.load(train)\n return self\n \n def begin_stream(self):\n \n print(\"looking for an EEG stream...\")\n self.streams = resolve_stream('type', 'EEG')\n \n # create a new inlet to read from the stream\n self.inlet = StreamInlet(self.streams[0])\n \n def pred_im(self, chunk, cSTR):\n \n self.pred = []\n \n chunk = (np.array(chunk).T)/1000000000\n chunk = chunk.reshape((1,chunk.shape[0],chunk.shape[1]))\n \n for i, bci in enumerate(self.bcis[:-1]):\n \n self.pred.append(bci.predict(chunk))\n \n if self.pred[i] != 0:\n self.vote[i] += 1\n else:\n self.vote[i] -= 1\n \n self.pred.append(self.bcis[-1].predict(chunk))\n\n if self.pred[-1] == 1:\n self.vote[-1] += 1\n else:\n self.vote[-1] -= 1\n \n if cSTR % self.vote_window == 0:\n self.pred_decision()\n \n \n \n def pred_decision(self):\n \n if self.mode == 'IMvsall' or self.mode == 'IMvsRest': \n \n if self.vote[0] <= 0 and self.vote[1] <= 0:\n \n self.prediction.extend([0])\n print(\"pred:rest\")\n \n elif self.vote[0] > 0:\n \n self.prediction.extend([1])\n \n if self.begin:\n# self.tn.write(('1').encode('ascii'))\n self.begin = False\n else:\n# self.tn.write(('4').encode('ascii'))\n self.cIM += 1\n \n print(\"pred:left\")\n \n else:\n \n self.prediction.extend([2])\n \n if self.begin:\n# self.tn.write(('1').encode('ascii'))\n self.begin = False\n else:\n# self.tn.write(('3').encode('ascii'))\n self.cIM += 1\n \n print(\"pred:right\")\n \n elif self.mode == 'Rvsall':\n \n if self.vote[0] <= 0:\n \n self.prediction.extend([0])\n print(\"pred:rest\")\n \n elif self.vote[1] > 0:\n \n self.prediction.extend([1])\n \n if self.begin:\n self.tn.write(('1').encode('ascii'))\n self.begin = False\n else:\n self.tn.write(('4').encode('ascii'))\n self.cIM += 1\n \n print(\"pred:left\")\n \n else:\n \n self.prediction.extend([2])\n \n if self.begin:\n self.tn.write(('1').encode('ascii'))\n self.begin = False\n else:\n self.tn.write(('3').encode('ascii'))\n self.cIM += 1\n \n print(\"pred:right\")\n \n else:\n \n self.prediction.extend([self.pred[-1]])\n \n if self.pred[-1] == 0:\n \n print(\"pred:rest\")\n \n elif self.pred[-1] == 1:\n \n if self.begin:\n# self.tn.write(('1').encode('ascii'))\n self.begin = False\n else:\n# self.tn.write(('4').encode('ascii'))\n self.cIM += 1\n \n print(\"pred:left\")\n \n else:\n \n if self.begin:\n# self.tn.write(('1').encode('ascii'))\n self.begin = False\n else:\n# self.tn.write(('3').encode('ascii'))\n self.cIM += 1\n \n print(\"pred:right\")\n \n self.vote = [0] * len(self.vote)\n \n def main_loop(self):\n \n self.load_bcis('train')\n \n self.vote = [0,0,0]\n self.pred = []\n self.prediction = []\n self.begin = True\n self.cIM = 0\n \n cSTR = 0\n cVC = 0\n cdur = 0\n cIMdur = 0\n dur = self.Prdur\n buffer = []\n \n while cSTR < self.recdur:\n \n sample, timestamp = self.inlet.pull_sample()\n buffer += [sample,]\n \n if cdur % (dur * self.Fs) == 0:\n \n if self.content[cVC] == 'REST':\n print(\"REST\")\n self.delete_all()\n cdur = 0\n dur = self.Rdur\n cVC = cVC+1\n \n elif self.content[cVC] == 'LEFT':\n print(\"LEFT\")\n self.left_arrow()\n cdur = 0\n try:\n dur = self.IMdur[cIMdur]\n cIMdur += 1\n except:\n dur = self.IMdur\n cVC = cVC+1\n \n elif self.content[cVC] == 'RIGHT':\n print(\"RIGHT\")\n self.right_arrow()\n cdur = 0\n try:\n dur = self.IMdur[cIMdur]\n cIMdur += 1\n except:\n dur = self.IMdur\n cVC = cVC+1\n \n elif self.content[cVC]=='PREPARE':\n self.Concentration_Cross()\n cdur = 0\n dur = self.Prdur\n cVC = cVC+1\n \n if cSTR > 0 and cSTR % self.step == 0: #and self.cIM == 0:\n \n t1 = threading.Thread(target = self.pred_im, args=(buffer[-self.tim_window:],cSTR,))\n t1.start() \n \n# elif cSTR > 0 and cSTR % self.step == 0:\n# \n# if self.cIM == self.IM_window:\n# self.cIM = 0\n# else:\n# self.cIM += 1\n \n cSTR = cSTR + 1\n cdur = cdur + 1\n \n# self.tn.write(('0').encode('ascii'))\n \n return buffer\n \n def save_recording(self, buffer):\n \n LABELS = []\n trig = []\n offset = (self.Rdur + self.Prdur) * self.Fs\n trig += [offset,]\n \n idxs=np.where(self.content=='REST')\n self.content = np.delete(self.content,idxs)\n idxs=np.where(self.content=='PREPARE')\n self.content = np.delete(self.content,idxs)\n\n\n try:\n for i, IMdur in enumerate(self.IMdur):\n\n trig += [IMdur * self.Fs + offset + trig[-1],]\n LABELS += [0] * offset\n if self.content[i] == 'LEFT':\n LABELS += [1] * IMdur * self.Fs\n else:\n LABELS += [2] * IMdur * self.Fs\n except:\n for i, visual_cue in enumerate(self.content):\n\n trig += [self.IMdur * self.Fs + offset + trig[-1],]\n LABELS += [0] * offset\n if visual_cue == 'LEFT':\n LABELS += [1] * self.IMdur * self.Fs\n else:\n LABELS += [2] * self.IMdur * self.Fs\n \n LABELS += [0] * self.Rdur * self. Fs\n pred = [[pr] * self.vote_window for pr in self.prediction]\n \n \n trig = np.array(trig)\n trig = np.delete(trig,-1)\n LABELS = np.array(LABELS)\n buffer = np.array(buffer)\n pred = np.array(pred).flatten()\n \n # create matlab files\n sio.savemat('trig.mat', {'trig':trig})\n sio.savemat('rec.mat', {'rec':buffer})\n sio.savemat('LABELS.mat', {'LABELS':LABELS})\n sio.savemat('pred.mat', {'pred':pred})\n\n\nif __name__ == '__main__':\n b_c_i = rt(mode = 'CSP_OVR', tim_window = 4, vote_window = 8, overlap = 0.5,IM_window = 0)\n b_c_i.load_bcis('train')\n b_c_i.begin_stream()\n buffer = b_c_i.main_loop()\n b_c_i.save_recording(buffer)\n\n \n \n \n"
] | [
[
"scipy.io.savemat",
"numpy.delete",
"numpy.array",
"numpy.where",
"numpy.random.randint"
]
] |
Katarina11/PreSumm | [
"616e72f038d512e9e9112af375d66a0b2e3db6cd"
] | [
"src/models/trainer_ext.py"
] | [
"import os\n\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\n\nimport distributed\nfrom models.reporter_ext import ReportMgr, Statistics\nfrom others.logging import logger\nfrom others.utils import test_rouge, rouge_results_to_str\n\n\ndef _tally_parameters(model):\n n_params = sum([p.nelement() for p in model.parameters()])\n return n_params\n\n\ndef build_trainer(args, device_id, model, optim):\n \"\"\"\n Simplify `Trainer` creation based on user `opt`s*\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n optim (:obj:`onmt.utils.Optimizer`): optimizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model\n \"\"\"\n\n grad_accum_count = args.accum_count\n n_gpu = args.world_size\n\n if device_id >= 0:\n gpu_rank = int(args.gpu_ranks[device_id])\n else:\n gpu_rank = 0\n n_gpu = 0\n\n print('gpu_rank %d' % gpu_rank)\n\n tensorboard_log_dir = args.model_path\n\n writer = SummaryWriter(tensorboard_log_dir, comment=\"Unmt\")\n\n report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)\n\n trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager)\n\n # print(tr)\n if (model):\n n_params = _tally_parameters(model)\n logger.info('* number of parameters: %d' % n_params)\n\n return trainer\n\n\nclass Trainer(object):\n \"\"\"\n Class that controls the training process.\n\n Args:\n model(:py:class:`onmt.models.model.NMTModel`): translation model\n to train\n train_loss(:obj:`onmt.utils.loss.LossComputeBase`):\n training loss computation\n valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):\n training loss computation\n optim(:obj:`onmt.utils.optimizers.Optimizer`):\n the optimizer responsible for update\n trunc_size(int): length of truncated back propagation through time\n shard_size(int): compute loss in shards of this size for efficiency\n data_type(string): type of the source input: [text|img|audio]\n norm_method(string): normalization methods: [sents|tokens]\n grad_accum_count(int): accumulate gradients this many times.\n report_manager(:obj:`onmt.utils.ReportMgrBase`):\n the object that creates reports, or None\n model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is\n used to save a checkpoint.\n Thus nothing will be saved if this parameter is None\n \"\"\"\n\n def __init__(self, args, model, optim,\n grad_accum_count=1, n_gpu=1, gpu_rank=1,\n report_manager=None):\n # Basic attributes.\n self.args = args\n self.save_checkpoint_steps = args.save_checkpoint_steps\n self.model = model\n self.optim = optim\n self.grad_accum_count = grad_accum_count\n self.n_gpu = n_gpu\n self.gpu_rank = gpu_rank\n self.report_manager = report_manager\n\n self.loss = torch.nn.BCELoss(reduction='none')\n assert grad_accum_count > 0\n # Set model in training mode.\n if (model):\n self.model.train()\n\n def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1):\n \"\"\"\n The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`\n\n Args:\n train_iter_fct(function): a function that returns the train\n iterator. e.g. something like\n train_iter_fct = lambda: generator(*args, **kwargs)\n valid_iter_fct(function): same as train_iter_fct, for valid data\n train_steps(int):\n valid_steps(int):\n save_checkpoint_steps(int):\n\n Return:\n None\n \"\"\"\n logger.info('Start training...')\n\n # step = self.optim._step + 1\n step = self.optim._step + 1\n true_batchs = []\n accum = 0\n normalization = 0\n train_iter = train_iter_fct()\n\n total_stats = Statistics()\n report_stats = Statistics()\n self._start_report_manager(start_time=total_stats.start_time)\n\n while step <= train_steps:\n\n reduce_counter = 0\n for i, batch in enumerate(train_iter):\n if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):\n\n true_batchs.append(batch)\n normalization += batch.batch_size\n accum += 1\n if accum == self.grad_accum_count:\n reduce_counter += 1\n if self.n_gpu > 1:\n normalization = sum(distributed\n .all_gather_list\n (normalization))\n\n self._gradient_accumulation(\n true_batchs, normalization, total_stats,\n report_stats)\n\n report_stats = self._maybe_report_training(\n step, train_steps,\n self.optim.learning_rate,\n report_stats)\n\n true_batchs = []\n accum = 0\n normalization = 0\n if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0):\n self._save(step)\n\n step += 1\n if step > train_steps:\n break\n train_iter = train_iter_fct()\n\n return total_stats\n\n def validate(self, valid_iter, step=0):\n \"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n # Set model in validating mode.\n self.model.eval()\n stats = Statistics()\n\n with torch.no_grad():\n for batch in valid_iter:\n src = batch.src\n labels = batch.src_sent_labels\n segs = batch.segs\n clss = batch.clss\n mask = batch.mask_src\n mask_cls = batch.mask_cls\n\n sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)\n\n loss = self.loss(sent_scores, labels.float())\n loss = (loss * mask.float()).sum()\n batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))\n stats.update(batch_stats)\n self._report_step(0, step, valid_stats=stats)\n return stats\n\n def test(self, test_iter, step, cal_lead=False, cal_oracle=False):\n \"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n\n # Set model in validating mode.\n def _get_ngrams(n, text):\n ngram_set = set()\n text_length = len(text)\n max_index_ngram_start = text_length - n\n for i in range(max_index_ngram_start + 1):\n ngram_set.add(tuple(text[i:i + n]))\n return ngram_set\n\n def _block_tri(c, p):\n tri_c = _get_ngrams(3, c.split())\n for s in p:\n tri_s = _get_ngrams(3, s.split())\n if len(tri_c.intersection(tri_s)) > 0:\n return True\n return False\n\n if (not cal_lead and not cal_oracle):\n self.model.eval()\n stats = Statistics()\n\n can_path = '%s_step%d.candidate' % (self.args.result_path, step)\n gold_path = '%s_step%d.gold' % (self.args.result_path, step)\n ##\n src_path = '%s_step%d.src' % (self.args.result_path, step)\n f = open(src_path, 'w')\n ##\n sent_no = 0\n with open(can_path, 'w') as save_pred:\n with open(gold_path, 'w') as save_gold:\n with torch.no_grad():\n for batch in test_iter:\n src = batch.src\n labels = batch.src_sent_labels\n segs = batch.segs\n clss = batch.clss\n mask = batch.mask_src\n mask_cls = batch.mask_cls\n\n gold = []\n pred = []\n src_fix = []\n\n if (cal_lead):\n selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size\n elif (cal_oracle):\n selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in\n range(batch.batch_size)]\n else:\n sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)\n if labels.float().size()[1] != 0:\n loss = self.loss(sent_scores, labels.float())\n else:\n continue\n loss = (loss * mask.float()).sum()\n batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))\n stats.update(batch_stats)\n\n sent_scores = sent_scores + mask.float()\n sent_scores = sent_scores.cpu().data.numpy()\n selected_ids = np.argsort(-sent_scores, 1)\n\n if len(selected_ids[0]) < 7:\n continue\n # selected_ids = np.sort(selected_ids,1)\n for i, idx in enumerate(selected_ids):\n _pred = []\n if (len(batch.src_str[i]) == 0):\n continue\n for j in selected_ids[i][:len(batch.src_str[i])]:\n if (j >= len(batch.src_str[i])):\n continue\n candidate = batch.src_str[i][j].strip()\n if (self.args.block_trigram):\n if (not _block_tri(candidate, _pred)):\n _pred.append(candidate)\n else:\n _pred.append(candidate)\n\n if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3):\n break\n\n _pred = '<q>'.join(_pred)\n if (self.args.recall_eval):\n _pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())])\n\n pred.append(_pred)\n gold.append(batch.tgt_str[i])\n src_fix.append(batch.src_str[i])\n sent_no += 1\n # print(sent_no)\n\n # print('gold', gold)\n # print(gold_path)\n\n for i in range(len(gold)):\n save_gold.write(str(sent_no) + \"_\" + str(i) + ': ' + gold[i].strip() + '\\n')\n for i in range(len(pred)):\n save_pred.write(str(sent_no) + \"_\" + str(i) + ': ' + pred[i].strip() + '\\n')\n for i in range(len(pred)):\n f.write(str(sent_no) + \"_\" + str(i) + ': ' + '###'.join(src_fix[i]).strip()+'\\n')\n f.close()\n if (step != -1 and self.args.report_rouge):\n rouges = test_rouge(self.args.temp_dir, can_path, gold_path)\n logger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n self._report_step(0, step, valid_stats=stats)\n\n return stats\n\n def _gradient_accumulation(self, true_batchs, normalization, total_stats,\n report_stats):\n if self.grad_accum_count > 1:\n self.model.zero_grad()\n\n for batch in true_batchs:\n if self.grad_accum_count == 1:\n self.model.zero_grad()\n\n src = batch.src\n labels = batch.src_sent_labels\n segs = batch.segs\n clss = batch.clss\n mask = batch.mask_src\n mask_cls = batch.mask_cls\n\n sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)\n\n loss = self.loss(sent_scores, labels.float())\n loss = (loss * mask.float()).sum()\n (loss / loss.numel()).backward()\n # loss.div(float(normalization)).backward()\n\n batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization)\n\n total_stats.update(batch_stats)\n report_stats.update(batch_stats)\n\n # 4. Update the parameters and statistics.\n if self.grad_accum_count == 1:\n # Multi GPU gradient gather\n if self.n_gpu > 1:\n grads = [p.grad.data for p in self.model.parameters()\n if p.requires_grad\n and p.grad is not None]\n distributed.all_reduce_and_rescale_tensors(\n grads, float(1))\n self.optim.step()\n\n # in case of multi step gradient accumulation,\n # update only after accum batches\n if self.grad_accum_count > 1:\n if self.n_gpu > 1:\n grads = [p.grad.data for p in self.model.parameters()\n if p.requires_grad\n and p.grad is not None]\n distributed.all_reduce_and_rescale_tensors(\n grads, float(1))\n self.optim.step()\n\n def _save(self, step):\n real_model = self.model\n # real_generator = (self.generator.module\n # if isinstance(self.generator, torch.nn.DataParallel)\n # else self.generator)\n\n model_state_dict = real_model.state_dict()\n # generator_state_dict = real_generator.state_dict()\n checkpoint = {\n 'model': model_state_dict,\n # 'generator': generator_state_dict,\n 'opt': self.args,\n 'optims': self.optim,\n }\n checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step)\n logger.info(\"Saving checkpoint %s\" % checkpoint_path)\n # checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step)\n if (not os.path.exists(checkpoint_path)):\n torch.save(checkpoint, checkpoint_path)\n return checkpoint, checkpoint_path\n\n def _start_report_manager(self, start_time=None):\n \"\"\"\n Simple function to start report manager (if any)\n \"\"\"\n if self.report_manager is not None:\n if start_time is None:\n self.report_manager.start()\n else:\n self.report_manager.start_time = start_time\n\n def _maybe_gather_stats(self, stat):\n \"\"\"\n Gather statistics in multi-processes cases\n\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n\n Returns:\n stat: the updated (or unchanged) stat object\n \"\"\"\n if stat is not None and self.n_gpu > 1:\n return Statistics.all_gather_stats(stat)\n return stat\n\n def _maybe_report_training(self, step, num_steps, learning_rate,\n report_stats):\n \"\"\"\n Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_training(\n step, num_steps, learning_rate, report_stats,\n multigpu=self.n_gpu > 1)\n\n def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n \"\"\"\n Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)\n\n def _maybe_save(self, step):\n \"\"\"\n Save the model if a model saver is set\n \"\"\"\n if self.model_saver is not None:\n self.model_saver.maybe_save(step)\n"
] | [
[
"torch.nn.BCELoss",
"torch.save",
"torch.no_grad",
"numpy.argsort"
]
] |
knowledgevis/large_image | [
"ab5c213d3a68de8a2144707fc0dc1115d1e4664f"
] | [
"test/test_source_gdal.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport glob\nimport json\nimport numpy\nimport os\nimport PIL.Image\nimport PIL.ImageChops\nimport pytest\nimport six\n\nfrom large_image import constants\nfrom large_image.exceptions import TileSourceException\n\nimport large_image_source_gdal\n\nfrom . import utilities\n\n\ndef _assertImageMatches(image, testRootName, saveTestImageFailurePath='/tmp'):\n \"\"\"\n Check if an image matches any of a set of images.\n\n Adapted from:\n https://stackoverflow.com/questions/35176639/compare-images-python-pil\n\n :param image: PIL image to compare or a binary string of the image.\n :param testRootName: base name of the images to test. These images are\n globbed in test_files/<testRootName>*.png.\n :param saveTestImageFailurePath: if the image doesn't match any of the\n test images, if this value is set, save the image to make it easier\n to determine why it failed.\n \"\"\"\n if isinstance(image, six.binary_type):\n image = PIL.Image.open(six.BytesIO(image))\n image = image.convert('RGBA')\n testDir = os.path.dirname(os.path.realpath(__file__))\n testImagePaths = glob.glob(os.path.join(\n testDir, 'test_files', testRootName + '*.png'))\n testImages = [PIL.Image.open(testImagePath).convert('RGBA')\n for testImagePath in testImagePaths]\n diffs = [PIL.ImageChops.difference(image, testImage).getbbox()\n for testImage in testImages]\n if None not in diffs and saveTestImageFailurePath:\n image.save(os.path.join(saveTestImageFailurePath, testRootName + '_test.png'))\n assert None in diffs\n\n\ndef testTileFromGeotiffs():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n source = large_image_source_gdal.GDALFileTileSource(imagePath)\n tileMetadata = source.getMetadata()\n\n assert tileMetadata['tileWidth'] == 256\n assert tileMetadata['tileHeight'] == 256\n assert tileMetadata['sizeX'] == 256\n assert tileMetadata['sizeY'] == 256\n assert tileMetadata['levels'] == 1\n assert tileMetadata['bounds']['xmax'] == 597915.0\n assert tileMetadata['bounds']['xmin'] == 367185.0\n assert tileMetadata['bounds']['ymax'] == 3788115.0\n assert tileMetadata['bounds']['ymin'] == 3552885.0\n assert (tileMetadata['bounds']['srs'].strip() ==\n '+proj=utm +zone=11 +datum=WGS84 +units=m +no_defs')\n assert tileMetadata['geospatial']\n # Check that we read some band data, too\n assert len(tileMetadata['bands']) == 3\n assert tileMetadata['bands'][2]['interpretation'] == 'green'\n assert tileMetadata['bands'][2]['max'] == 212.0\n assert tileMetadata['bands'][2]['min'] == 0.0\n\n # Getting the metadata with a specified projection will be different\n source = large_image_source_gdal.GDALFileTileSource(\n imagePath, projection='EPSG:3857')\n tileMetadata = source.getMetadata()\n\n assert tileMetadata['tileWidth'] == 256\n assert tileMetadata['tileHeight'] == 256\n assert tileMetadata['sizeX'] == 65536\n assert tileMetadata['sizeY'] == 65536\n assert tileMetadata['levels'] == 9\n assert tileMetadata['bounds']['xmax'] == pytest.approx(-12906033, 1)\n assert tileMetadata['bounds']['xmin'] == pytest.approx(-13184900, 1)\n assert tileMetadata['bounds']['ymax'] == pytest.approx(4059661, 1)\n assert tileMetadata['bounds']['ymin'] == pytest.approx(3777034, 1)\n assert tileMetadata['bounds']['srs'] == '+init=epsg:3857'\n assert tileMetadata['geospatial']\n\n source = large_image_source_gdal.GDALFileTileSource(\n imagePath, projection='EPSG:3857', style=json.dumps({'band': -1}), encoding='PNG')\n image = source.getTile(89, 207, 9)\n _assertImageMatches(image, 'geotiff_9_89_207')\n\n\ndef testTileLinearStyleFromGeotiffs():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n style = json.dumps({'band': 1, 'min': 0, 'max': 100,\n 'palette': 'matplotlib.Plasma_6',\n 'scheme': 'linear'})\n source = large_image_source_gdal.GDALFileTileSource(\n imagePath, projection='EPSG:3857', style=style, encoding='PNG')\n image = source.getTile(22, 51, 7)\n _assertImageMatches(image, 'geotiff_style_linear_7_22_51')\n\n\ndef testTileStyleBadInput():\n def _assertStyleResponse(imagePath, style, message):\n with pytest.raises(TileSourceException, match=message):\n source = large_image_source_gdal.GDALFileTileSource(\n imagePath, projection='EPSG:3857', style=json.dumps(style), encoding='PNG')\n source.getTile(22, 51, 7)\n\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n\n _assertStyleResponse(imagePath, {\n 'band': 1.1,\n }, 'Band has to be a positive integer, -1, or a band interpretation found in the source.')\n\n _assertStyleResponse(imagePath, {\n 'band': 500,\n }, 'Band has to be a positive integer, -1, or a band interpretation found in the source.')\n\n _assertStyleResponse(imagePath, {\n 'band': 1,\n 'palette': 'nonexistent.palette'\n }, 'Palette is not a valid palettable path.')\n\n _assertStyleResponse(imagePath, ['style'],\n 'Style is not a valid json object.')\n\n\ndef testThumbnailFromGeotiffs():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n source = large_image_source_gdal.GDALFileTileSource(imagePath)\n # We get a thumbnail without a projection\n image, mimeType = source.getThumbnail(encoding='PNG')\n assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader\n # We get a different thumbnail with a projection\n source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')\n image2, mimeType = source.getThumbnail(encoding='PNG')\n assert image2[:len(utilities.PNGHeader)] == utilities.PNGHeader\n assert image != image2\n\n\ndef testPixel():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n\n # Test in pixel coordinates\n source = large_image_source_gdal.GDALFileTileSource(imagePath)\n pixel = source.getPixel(region={'left': 212, 'top': 198})\n assert pixel == {\n 'r': 76, 'g': 78, 'b': 77, 'a': 255, 'bands': {1: 62.0, 2: 65.0, 3: 66.0}}\n pixel = source.getPixel(region={'left': 2120, 'top': 198})\n assert pixel == {}\n\n # Test with a projection\n source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')\n pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})\n assert pixel == {\n 'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}\n\n # Test with styles\n style = json.dumps({'band': 1, 'min': 0, 'max': 100,\n 'palette': 'matplotlib.Plasma_6'})\n source = large_image_source_gdal.GDALFileTileSource(\n imagePath, projection='EPSG:3857', style=style)\n pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})\n assert pixel == {\n 'r': 247, 'g': 156, 'b': 60, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}\n\n # Test with palette as an array of colors\n style = json.dumps({'band': 1, 'min': 0, 'max': 100,\n 'palette': ['#0000ff', '#00ff00', '#ff0000']})\n source = large_image_source_gdal.GDALFileTileSource(\n imagePath, projection='EPSG:3857', style=style)\n pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})\n assert pixel == {\n 'r': 137, 'g': 117, 'b': 0, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}\n\n # Test with projection units\n source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')\n pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'EPSG:3857'})\n assert pixel == {\n 'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}\n pixel = source.getPixel(region={'left': -117.975, 'top': 33.865, 'units': 'WGS84'})\n assert pixel == {\n 'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}\n # When the tile has a different projection, the pixel is the same as\n # the band values.\n source = large_image_source_gdal.GDALFileTileSource(imagePath)\n pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'EPSG:3857'})\n assert pixel == {\n 'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}\n\n\ndef testSourceErrors():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n with pytest.raises(TileSourceException, match='must not be geographic'):\n large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:4326')\n imagePath = os.path.join(testDir, 'test_files', 'zero_gi.tif')\n with pytest.raises(TileSourceException, match='cannot be opened via'):\n large_image_source_gdal.GDALFileTileSource(imagePath)\n imagePath = os.path.join(testDir, 'test_files', 'yb10kx5k.png')\n with pytest.raises(TileSourceException, match='does not have a projected scale'):\n large_image_source_gdal.GDALFileTileSource(imagePath)\n\n\ndef testStereographicProjection():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n # We will fail if we ask for a stereographic projection and don't\n # specify unitsPerPixel\n with pytest.raises(TileSourceException, match='unitsPerPixel must be specified'):\n large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:3411')\n # But will pass if unitsPerPixel is specified\n large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:3411', unitsPerPixel=150000)\n\n\ndef testProj4Proj():\n # Test obtaining pyproj.Proj projection values\n proj4Proj = large_image_source_gdal.GDALFileTileSource._proj4Proj\n\n proj = proj4Proj(b'epsg:4326')\n assert proj4Proj(u'epsg:4326').srs == proj.srs\n assert proj4Proj('proj4:EPSG:4326').srs == proj.srs\n assert proj4Proj(4326) is None\n\n\ndef testConvertProjectionUnits():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n tsNoProj = large_image_source_gdal.GDALFileTileSource(imagePath)\n\n result = tsNoProj._convertProjectionUnits(\n -13024380, 3895303, None, None, None, None, 'EPSG:3857')\n assert result[0] == pytest.approx(147, 1)\n assert result[1] == pytest.approx(149, 1)\n assert result[2:] == (None, None, 'base_pixels')\n\n result = tsNoProj._convertProjectionUnits(\n None, None, -13080040, 3961860, None, None, 'EPSG:3857')\n assert result[2] == pytest.approx(96, 1)\n assert result[3] == pytest.approx(88, 1)\n assert result[:2] == (None, None)\n result = tsNoProj._convertProjectionUnits(\n -117.5, 33, None, None, 0.5, 0.5, 'EPSG:4326')\n assert result[0] == pytest.approx(96, 1)\n assert result[1] == pytest.approx(149, 1)\n assert result[2] == pytest.approx(147, 1)\n assert result[3] == pytest.approx(89, 1)\n result = tsNoProj._convertProjectionUnits(\n None, None, -117, 33.5, 0.5, 0.5, 'EPSG:4326')\n assert result[0] == pytest.approx(96, 1)\n assert result[1] == pytest.approx(149, 1)\n assert result[2] == pytest.approx(147, 1)\n assert result[3] == pytest.approx(89, 1)\n result = tsNoProj._convertProjectionUnits(\n -117.5, 33, None, None, 0.5, 0.5, 'EPSG:4326', unitsWH='base_pixels')\n assert result[0] == pytest.approx(96, 1)\n assert result[1] == pytest.approx(149, 1)\n assert result[2:] == (None, None, 'base_pixels')\n\n with pytest.raises(TileSourceException, match='Cannot convert'):\n tsNoProj._convertProjectionUnits(\n -117.5, None, -117, None, None, None, 'EPSG:4326')\n\n tsProj = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')\n result = tsProj._convertProjectionUnits(\n -13024380, 3895303, None, None, None, None, 'EPSG:3857')\n assert result[0] == pytest.approx(-13024380, 1)\n assert result[1] == pytest.approx(3895303, 1)\n assert result[2:] == (None, None, 'projection')\n\n\ndef testGuardAgainstBadLatLong():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'global_dem.tif')\n source = large_image_source_gdal.GDALFileTileSource(imagePath)\n bounds = source.getBounds(srs='EPSG:4326')\n\n assert bounds['xmin'] == -180.00416667\n assert bounds['xmax'] == 179.99583333\n assert bounds['ymin'] == -89.99583333\n assert bounds['ymax'] == 90\n\n\ndef testPalettizedGeotiff():\n imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')\n source = large_image_source_gdal.GDALFileTileSource(imagePath)\n tileMetadata = source.getMetadata()\n assert tileMetadata['tileWidth'] == 256\n assert tileMetadata['tileHeight'] == 256\n assert tileMetadata['sizeX'] == 687\n assert tileMetadata['sizeY'] == 509\n assert tileMetadata['levels'] == 3\n assert tileMetadata['bounds']['srs'].strip().startswith(\n '+proj=aea +lat_0=23 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0')\n assert tileMetadata['geospatial']\n assert len(tileMetadata['bands']) == 1\n assert tileMetadata['bands'][1]['interpretation'] == 'palette'\n # Getting the metadata with a specified projection will be different\n source = large_image_source_gdal.GDALFileTileSource(\n imagePath, projection='EPSG:3857', encoding='PNG')\n tileMetadata = source.getMetadata()\n assert tileMetadata['tileWidth'] == 256\n assert tileMetadata['tileHeight'] == 256\n assert tileMetadata['sizeX'] == 65536\n assert tileMetadata['sizeY'] == 65536\n assert tileMetadata['levels'] == 9\n assert tileMetadata['bounds']['xmax'] == pytest.approx(-7837888, 1)\n assert tileMetadata['bounds']['xmin'] == pytest.approx(-8909162, 1)\n assert tileMetadata['bounds']['ymax'] == pytest.approx(5755717, 1)\n assert tileMetadata['bounds']['ymin'] == pytest.approx(4876273, 1)\n assert tileMetadata['bounds']['srs'] == '+init=epsg:3857'\n assert tileMetadata['geospatial']\n image = source.getTile(37, 46, 7)\n image = PIL.Image.open(six.BytesIO(image))\n image = numpy.asarray(image)\n assert list(image[0, 0, :]) == [0, 0, 0, 0]\n assert list(image[255, 0, :]) == [221, 201, 201, 255]\n\n\ndef testRetileProjection():\n imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')\n ts = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')\n ti = ts.getSingleTile(tile_size=dict(width=1000, height=1000), tile_position=1000)\n assert ti['tile'].size == 3000000\n tile = ts.getTile(1178, 1507, 12)\n assert len(tile) > 1000\n\n\ndef testInternalMetadata():\n testDir = os.path.dirname(os.path.realpath(__file__))\n imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')\n source = large_image_source_gdal.GDALFileTileSource(imagePath)\n metadata = source.getInternalMetadata()\n assert metadata['driverShortName'] == 'GTiff'\n\n\ndef testGetRegionWithProjection():\n imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')\n ts = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')\n region, _ = ts.getRegion(output=dict(maxWidth=1024, maxHeight=1024),\n format=constants.TILE_FORMAT_NUMPY)\n assert region.shape == (1024, 1024, 4)\n"
] | [
[
"numpy.asarray"
]
] |
shikisawamura/nnabla-examples | [
"070d25078ad3d5458744dbfd390cdd926e20e573",
"baf4e4cc620dedbf4368683325c0fb868676850d",
"baf4e4cc620dedbf4368683325c0fb868676850d"
] | [
"GANs/stargan/generate.py",
"mnist-collection/dcgan.py",
"shape-reconstruction/implicit-geometric-regularization/datasets.py"
] | [
"# Copyright (c) 2019 Sony Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport argparse\nimport nnabla as nn\nfrom nnabla.ext_utils import get_extension_context\nimport numpy as np\nimport json\nimport glob\nimport model\nfrom nnabla.utils.image_utils import imread, imsave, imresize\nimport functools\n\n\ndef saveimage(path, img):\n img = (img * 0.5) + 0.5\n imsave(path, img, channel_first=True)\n\n\ndef save_results(i, args, used_config, img_trg, lbl_trg):\n target_attr_flags = lbl_trg.d[0].reshape(lbl_trg.d[0].size)\n target_domain = \"_\".join([attr for idx, attr in zip(\n target_attr_flags, used_config[\"selected_attrs\"]) if bool(idx) is True])\n result_x = img_trg.d[0]\n filename = os.path.join(args.result_save_path,\n \"generated_{}_{}.png\".format(i, target_domain))\n saveimage(filename, result_x)\n print(\"Saved {}.\".format(filename))\n return\n\n\ndef img_preprocess(img_paths, used_config):\n\n image_size = used_config[\"image_size\"]\n images = list()\n image_names = list()\n\n for img_path in img_paths:\n # Load (and resize) image and labels.\n image = imread(img_path, num_channels=3, channel_first=True)\n if image.dtype == np.uint8:\n # Clip image's value from [0, 255] -> [0.0, 1.0]\n image = image / 255.0\n image = (image - 0.5) / 0.5 # Normalize\n image = imresize(image, (image_size, image_size),\n interpolate='bilinear', channel_first=True)\n images.append(image)\n image_names.append(img_path.split(\"/\")[-1])\n\n return np.asarray(images), np.asarray(image_names)\n\n\ndef get_user_input(used_config):\n label = [0 for _ in range(used_config[\"c_dim\"])]\n choice = used_config[\"selected_attrs\"]\n for i, c in enumerate(choice):\n print(\"Use '{}'?\".format(c))\n while 1:\n ans = input(\"type yes or no: \")\n if ans in [\"yes\", \"no\"]:\n label[i] = 1 if ans == \"yes\" else 0\n break\n else:\n print(\"type 'yes' or 'no'.\")\n #label[i] = int(bool(input(\"if yes, type 1, if not, just press enter:\")))\n return np.array(label)\n\n\ndef generate(args):\n\n # Load the config data used for training.\n with open(args.config, \"r\") as f:\n used_config = json.load(f)\n\n paramfile = args.pretrained_params\n img_paths = glob.glob(os.path.join(args.test_image_path, \"*.png\"))\n assert os.path.isfile(paramfile) and paramfile.split(\n \"/\")[-1] == used_config[\"pretrained_params\"], \"Corresponding parameter file not found.\"\n\n print(\"Learned attributes choice: {}\".format(\n used_config[\"selected_attrs\"]))\n\n # Prepare Generator and Discriminator based on user config.\n generator = functools.partial(\n model.generator, conv_dim=used_config[\"g_conv_dim\"], c_dim=used_config[\"c_dim\"], repeat_num=used_config[\"g_repeat_num\"])\n\n x_real = nn.Variable(\n [1, 3, used_config[\"image_size\"], used_config[\"image_size\"]])\n label_trg = nn.Variable([1, used_config[\"c_dim\"], 1, 1])\n with nn.parameter_scope(\"gen\"):\n x_fake = generator(x_real, label_trg)\n x_fake.persistent = True\n\n nn.load_parameters(paramfile) # load learned parameters.\n\n images, image_names = img_preprocess(img_paths, used_config)\n\n for i, (image, image_name) in enumerate(zip(images, image_names)):\n # Get real images.\n print(\"Source image: {}\".format(image_name))\n x_real.d = image\n\n # Generate target domain based on user input.\n label_trg.d = np.reshape(get_user_input(used_config), label_trg.shape)\n\n # Execute image translation.\n x_fake.forward(clear_no_need_grad=True)\n\n save_results(i, args, used_config, x_fake, label_trg)\n\n\ndef get_args():\n\n parser = argparse.ArgumentParser()\n\n # Generation\n parser.add_argument('--context', '-c', type=str,\n default='cudnn', help=\"Extension path. ex) cpu, cudnn.\")\n parser.add_argument(\"--device-id\", \"-d\", type=str, default='0',\n help='Device ID the training run on. This is only valid if you specify `-c cudnn`.')\n parser.add_argument(\"--type-config\", \"-t\", type=str, default='float',\n help='Type of computation. e.g. \"float\", \"half\".')\n parser.add_argument('--test-image-path', type=str,\n help='a directory containing images used for image translation')\n parser.add_argument('--result-save-path', type=str,\n default=\"tmp.results\", help='a directory to save generated images')\n parser.add_argument('--pretrained-params', type=str, required=True,\n help='path to the parameters used for generation.')\n parser.add_argument('--config', type=str, required=True,\n help='path to the config file used for generation.')\n\n args = parser.parse_args()\n if not os.path.isdir(args.result_save_path):\n os.makedirs(args.result_save_path)\n\n return args\n\n\ndef main():\n args = get_args()\n ctx = get_extension_context(\n args.context, device_id=args.device_id, type_config=args.type_config)\n nn.set_default_context(ctx)\n generate(args)\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (c) 2017 Sony Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom six.moves import range\n\nimport numpy as np\n\nimport nnabla as nn\nimport nnabla.logger as logger\nimport nnabla.functions as F\nimport nnabla.parametric_functions as PF\nimport nnabla.solvers as S\nimport nnabla.utils.save as save\n\nfrom args import get_args\nfrom mnist_data import data_iterator_mnist\nfrom _checkpoint_nnp_util import save_checkpoint, load_checkpoint, save_nnp\n\nimport os\n\n\ndef generator(z, maxh=256, test=False, output_hidden=False):\n \"\"\"\n Building generator network which takes (B, Z, 1, 1) inputs and generates\n (B, 1, 28, 28) outputs.\n \"\"\"\n # Define shortcut functions\n def bn(x):\n # Batch normalization\n return PF.batch_normalization(x, batch_stat=not test)\n\n def upsample2(x, c):\n # Twice upsampling with deconvolution.\n return PF.deconvolution(x, c, kernel=(4, 4), pad=(1, 1), stride=(2, 2), with_bias=False)\n\n assert maxh / 4 > 0\n with nn.parameter_scope(\"gen\"):\n # (Z, 1, 1) --> (256, 4, 4)\n with nn.parameter_scope(\"deconv1\"):\n d1 = F.elu(bn(PF.deconvolution(z, maxh, (4, 4), with_bias=False)))\n # (256, 4, 4) --> (128, 8, 8)\n with nn.parameter_scope(\"deconv2\"):\n d2 = F.elu(bn(upsample2(d1, maxh / 2)))\n # (128, 8, 8) --> (64, 16, 16)\n with nn.parameter_scope(\"deconv3\"):\n d3 = F.elu(bn(upsample2(d2, maxh / 4)))\n # (64, 16, 16) --> (32, 28, 28)\n with nn.parameter_scope(\"deconv4\"):\n # Convolution with kernel=4, pad=3 and stride=2 transforms a 28 x 28 map\n # to a 16 x 16 map. Deconvolution with those parameters behaves like an\n # inverse operation, i.e. maps 16 x 16 to 28 x 28.\n d4 = F.elu(bn(PF.deconvolution(\n d3, maxh / 8, (4, 4), pad=(3, 3), stride=(2, 2), with_bias=False)))\n # (32, 28, 28) --> (1, 28, 28)\n with nn.parameter_scope(\"conv5\"):\n x = F.tanh(PF.convolution(d4, 1, (3, 3), pad=(1, 1)))\n if output_hidden:\n return x, [d1, d2, d3, d4]\n return x\n\n\ndef discriminator(x, maxh=256, test=False, output_hidden=False):\n \"\"\"\n Building discriminator network which maps a (B, 1, 28, 28) input to\n a (B, 1).\n \"\"\"\n # Define shortcut functions\n def bn(xx):\n # Batch normalization\n return PF.batch_normalization(xx, batch_stat=not test)\n\n def downsample2(xx, c):\n return PF.convolution(xx, c, (3, 3), pad=(1, 1), stride=(2, 2), with_bias=False)\n\n assert maxh / 8 > 0\n with nn.parameter_scope(\"dis\"):\n # (1, 28, 28) --> (32, 16, 16)\n with nn.parameter_scope(\"conv1\"):\n c1 = F.elu(bn(PF.convolution(x, maxh / 8,\n (3, 3), pad=(3, 3), stride=(2, 2), with_bias=False)))\n # (32, 16, 16) --> (64, 8, 8)\n with nn.parameter_scope(\"conv2\"):\n c2 = F.elu(bn(downsample2(c1, maxh / 4)))\n # (64, 8, 8) --> (128, 4, 4)\n with nn.parameter_scope(\"conv3\"):\n c3 = F.elu(bn(downsample2(c2, maxh / 2)))\n # (128, 4, 4) --> (256, 4, 4)\n with nn.parameter_scope(\"conv4\"):\n c4 = bn(PF.convolution(c3, maxh, (3, 3),\n pad=(1, 1), with_bias=False))\n # (256, 4, 4) --> (1,)\n with nn.parameter_scope(\"fc1\"):\n f = PF.affine(c4, 1)\n if output_hidden:\n return f, [c1, c2, c3, c4]\n return f\n\n\ndef train(args):\n \"\"\"\n Main script.\n \"\"\"\n\n # Get context.\n from nnabla.ext_utils import get_extension_context\n logger.info(\"Running in %s\" % args.context)\n ctx = get_extension_context(\n args.context, device_id=args.device_id, type_config=args.type_config)\n nn.set_default_context(ctx)\n\n # Create CNN network for both training and testing.\n # TRAIN\n\n # Fake path\n z = nn.Variable([args.batch_size, 100, 1, 1])\n fake = generator(z)\n fake.persistent = True # Not to clear at backward\n pred_fake = discriminator(fake)\n loss_gen = F.mean(F.sigmoid_cross_entropy(\n pred_fake, F.constant(1, pred_fake.shape)))\n fake_dis = fake.get_unlinked_variable(need_grad=True)\n fake_dis.need_grad = True # TODO: Workaround until v1.0.2\n pred_fake_dis = discriminator(fake_dis)\n loss_dis = F.mean(F.sigmoid_cross_entropy(\n pred_fake_dis, F.constant(0, pred_fake_dis.shape)))\n\n # Real path\n x = nn.Variable([args.batch_size, 1, 28, 28])\n pred_real = discriminator(x)\n loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real,\n F.constant(1, pred_real.shape)))\n\n # Create Solver.\n solver_gen = S.Adam(args.learning_rate, beta1=0.5)\n solver_dis = S.Adam(args.learning_rate, beta1=0.5)\n with nn.parameter_scope(\"gen\"):\n solver_gen.set_parameters(nn.get_parameters())\n with nn.parameter_scope(\"dis\"):\n solver_dis.set_parameters(nn.get_parameters())\n start_point = 0\n\n if args.checkpoint is not None:\n # load weights and solver state info from specified checkpoint files.\n start_point = load_checkpoint(\n args.checkpoint, {\"gen\": solver_gen, \"dis\": solver_dis})\n\n # Create monitor.\n import nnabla.monitor as M\n monitor = M.Monitor(args.monitor_path)\n monitor_loss_gen = M.MonitorSeries(\"Generator loss\", monitor, interval=10)\n monitor_loss_dis = M.MonitorSeries(\n \"Discriminator loss\", monitor, interval=10)\n monitor_time = M.MonitorTimeElapsed(\"Time\", monitor, interval=100)\n monitor_fake = M.MonitorImageTile(\n \"Fake images\", monitor, normalize_method=lambda x: (x + 1) / 2.)\n\n data = data_iterator_mnist(args.batch_size, True)\n\n # Save_nnp\n contents = save_nnp({'x': z}, {'y': fake}, args.batch_size)\n save.save(os.path.join(args.model_save_path,\n 'Generator_result_epoch0.nnp'), contents)\n contents = save_nnp({'x': x}, {'y': pred_real}, args.batch_size)\n save.save(os.path.join(args.model_save_path,\n 'Discriminator_result_epoch0.nnp'), contents)\n\n # Training loop.\n for i in range(start_point, args.max_iter):\n if i % args.model_save_interval == 0:\n save_checkpoint(args.model_save_path, i, {\n \"gen\": solver_gen, \"dis\": solver_dis})\n\n # Training forward\n image, _ = data.next()\n x.d = image / 255. - 0.5 # [0, 255] to [-1, 1]\n z.d = np.random.randn(*z.shape)\n\n # Generator update.\n solver_gen.zero_grad()\n loss_gen.forward(clear_no_need_grad=True)\n loss_gen.backward(clear_buffer=True)\n solver_gen.weight_decay(args.weight_decay)\n solver_gen.update()\n monitor_fake.add(i, fake)\n monitor_loss_gen.add(i, loss_gen.d.copy())\n\n # Discriminator update.\n solver_dis.zero_grad()\n loss_dis.forward(clear_no_need_grad=True)\n loss_dis.backward(clear_buffer=True)\n solver_dis.weight_decay(args.weight_decay)\n solver_dis.update()\n monitor_loss_dis.add(i, loss_dis.d.copy())\n monitor_time.add(i)\n\n with nn.parameter_scope(\"gen\"):\n nn.save_parameters(os.path.join(\n args.model_save_path, \"generator_param_%06d.h5\" % i))\n with nn.parameter_scope(\"dis\"):\n nn.save_parameters(os.path.join(\n args.model_save_path, \"discriminator_param_%06d.h5\" % i))\n\n # Save_nnp\n contents = save_nnp({'x': z}, {'y': fake}, args.batch_size)\n save.save(os.path.join(args.model_save_path,\n 'Generator_result.nnp'), contents)\n contents = save_nnp({'x': x}, {'y': pred_real}, args.batch_size)\n save.save(os.path.join(args.model_save_path,\n 'Discriminator_result.nnp'), contents)\n\n\nif __name__ == '__main__':\n monitor_path = 'tmp.monitor.dcgan'\n args = get_args(monitor_path=monitor_path, model_save_path=monitor_path,\n max_iter=20000, learning_rate=0.0002, batch_size=64,\n weight_decay=0.0001)\n train(args)\n",
"# Copyright (c) 2017-2020 Sony Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\n\nimport glob\nimport os\nimport numpy as np\nfrom scipy import spatial\nimport open3d as o3d\n\nimport nnabla as nn\nimport nnabla.logger as logger\nimport nnabla.monitor as M\nimport nnabla.functions as F\nimport nnabla.parametric_functions as PF\nfrom nnabla.utils.data_iterator import data_iterator, data_iterator_simple\nfrom nnabla.utils.data_source import DataSource\nimport utils\n\nfrom args import get_args\n\n\nclass PointCloudDataSource(DataSource):\n\n def __init__(self, fpath, knn=50, test_rate=0.25, test=False, shuffle=True, rng=None):\n super(PointCloudDataSource, self).__init__(shuffle=shuffle)\n self.knn = knn\n self.test_rate = 0.25\n self.rng = np.random.RandomState(313) if rng is None else rng\n\n # Split info\n pcd = self._read_dataset(fpath)\n total_size = len(pcd.points)\n test_size = int(total_size * test_rate)\n indices = self.rng.permutation(total_size)\n test_indices = indices[:test_size]\n train_indices = indices[test_size:]\n indices = test_indices if test else train_indices\n self._size = test_size if test else total_size - test_size\n # Points\n points = np.asarray(pcd.points)\n self._points = self._preprocess(points)[indices]\n # Normals\n normals = np.asarray(pcd.normals)\n self._normals = normals[indices] if self.has_normals(\n normals) else normals\n # Radius\n self._radius = self._compute_radius(self._points, self.knn)\n self._variables = ('points', 'normals', 'radius')\n self.reset()\n\n logger.info(\"Data size = {}\".format(self._size))\n\n def has_normals(self, normals):\n return False if normals.shape[0] == 0 else True\n\n def _preprocess(self, points):\n return utils.normalize(points)\n\n def _compute_radius(self, points, knn):\n if knn < 0:\n logger.info(\"Radius is not computed.\")\n return\n # KDTree\n logger.info(\n \"Constructing KDTree and querying {}-nearest neighbors\".format(self.knn))\n tree = spatial.cKDTree(points, compact_nodes=True)\n # exclude self by adding 1\n dists, indices = tree.query(points, k=knn + 1)\n return dists[:, -1].reshape(dists.shape[0], 1)\n\n def _read_dataset(self, fpath):\n pcd = utils.read_pcd(fpath)\n return pcd\n\n def _get_data(self, position):\n points = self._points[self._indices[position]]\n normals = self._normals[self._indices[position]\n ] if self.has_normals(self._normals) else [0.0]\n radius = self._radius[self._indices[position]]\n return points, normals, radius\n\n @property\n def points(self):\n return self._points\n\n @property\n def normals(self):\n return self._normals\n\n @property\n def radius(self):\n return self._radius\n\n def reset(self):\n self._indices = self.rng.permutation(self._size) \\\n if self._shuffle else np.arange(self._size)\n return super(PointCloudDataSource, self).reset()\n\n\ndef point_cloud_data_source(fpath, knn=50, test_rate=0.25, test=False, shuffle=True, rng=None):\n return PointCloudDataSource(fpath, knn, test_rate, test, shuffle, rng)\n\n\ndef point_cloud_data_iterator(data_source, batch_size):\n return data_iterator(data_source, batch_size=batch_size,\n with_memory_cache=False,\n with_file_cache=False)\n\n\ndef create_pcd_dataset_from_mesh(fpath):\n mesh = utils.read_mesh(fpath)\n pcd = mesh.sample_points_poisson_disk(len(mesh.vertices),\n use_triangle_normal=True,\n seed=412)\n dpath = \"/\".join(fpath.split(\"/\")[:-1])\n fname = fpath.split(\"/\")[-1]\n fname = \"{}_pcd.ply\".format(os.path.splitext(fname)[0])\n fpath = os.path.join(dpath, fname)\n logger.info(\"PointCloud data ({}) is being created.\".format(fpath))\n utils.write_pcd(fpath, pcd)\n\n\ndef main():\n args = get_args()\n if args.command == \"create\":\n create_pcd_dataset_from_mesh(args.mesh_data_path)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.asarray"
],
[
"numpy.random.randn"
],
[
"numpy.random.RandomState",
"numpy.arange",
"scipy.spatial.cKDTree",
"numpy.asarray"
]
] |
OminiaVincit/qphase-trans | [
"40e0c078dcd74282e8d8f44690433bf670bff8cb"
] | [
"source/draw_ising_ph.py"
] | [
"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nimport time\nimport argparse\nfrom visual_utils import generate_listcol\nimport seaborn as sns\n\ndef calculate_npent(death_scales):\n sd = np.sum(death_scales)\n npent = 0\n for d in death_scales:\n dr = d/sd\n npent -= dr*np.log(dr)\n npent = npent/np.log(sd)\n return npent\n\nif __name__ == '__main__':\n # Check for command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--basename', type=str, default='exp_20200217_ising')\n parser.add_argument('--res', type=str, default='results')\n parser.add_argument('--dim', type=int, default=0)\n args = parser.parse_args()\n print(args)\n resname, basename, d = args.res, args.basename, args.dim\n plt.style.use('seaborn-colorblind')\n #cycles = plt.rcParams['axes.prop_cycle'].by_key()['color']\n cycles = generate_listcol(option=3)\n print(cycles)\n\n plt.rc('font', family='serif')\n plt.rc('mathtext', fontset='cm')\n plt.rcParams['font.size'] = 16\n \n\n gs = [0.2, 0.8, 1.0, 1.2, 1.8]\n N = len(gs)\n fig, axs = plt.subplots(1, N, figsize=(3*N, 2.8), squeeze=False, sharey=True)\n axs = axs.ravel()\n #ax.set_xlabel(r\"Transverse Field \" r\"$g$\", fontsize=24)\n\n mk = '_'\n lstyle = 'dashed'\n sz=80\n alpha=1.0\n Ls = [32, 64, 128, 256, 512, 1024]\n for j in range(len(gs)):\n ax = axs[j]\n g = gs[j]\n gidx = int((g - 0.1) / 0.05)\n for i in range(len(Ls)):\n L = Ls[i]\n phfile = '{}_L_{}_ph_dim_{}.txt'.format(basename, L, d)\n phfile = os.path.join(resname, phfile)\n print(phfile)\n if os.path.isfile(phfile):\n arr = np.loadtxt(phfile)\n death_scales, nlist = arr[:, 1], arr[:, 3]\n ids1 = (death_scales != np.inf)\n ids2 = (nlist == gidx)\n ids = ids1 * ids2\n death_scales = death_scales[ids]\n npent = calculate_npent(death_scales)\n print(arr.shape, gidx, len(death_scales), npent)\n sns.kdeplot(death_scales, legend=False, shade=True, color=cycles[i], ax=ax, label='$L$={}'.format(L))\n #sns.displot(death_scales[ids], bins=20, ax=ax)\n \n #ax.plot(glist, npent_list, linestyle=lstyle, label = 'e-{}'.format(L))\n #ax.plot(glist, pnorm_list, linestyle=lstyle, label = 'p-{}'.format(L))\n \n #ax.plot(glist, vals_list, linestyle='solid', marker='o', color=cols[i], alpha=alpha, linewidth=1.0, markersize=8, label='L={}'.format(L))\n #ax.scatter(glist, vals_list, s=sz, alpha=alpha, edgecolor='k', linewidths='1', label = 'L-{}'.format(L))\n #ax.scatter(glist, pnorm_list, s=sz, alpha=alpha, label = 'p-{}'.format(L))\n #ax.set_xlabel('Birth-scale')\n ax.set_ylabel('')\n ax.set_xticks([0.0, 0.5])\n ax.tick_params(direction='out', length=8)\n ax.set_xlim([0.0, 0.6])\n ax.set_ylim([0, 60])\n ax.set_title('$g$={},E={:.3f}'.format(g, npent))\n\n axs[0].legend(fontsize=10)\n #axs[0].set_ylabel('Density')\n \n for figtype in ['png', 'pdf', 'svg']:\n fig_ofile = os.path.join(resname, '{}_diagram_d_{}.{}'.format(basename,d, figtype))\n plt.savefig(fig_ofile, bbox_inches='tight', format=figtype)\n plt.show()\n \n"
] | [
[
"numpy.sum",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.log",
"numpy.loadtxt"
]
] |
blutooth/dgp | [
"bedbbc3595fbe124d7a06c3d6d64f9009304491e"
] | [
"GPflow/test_gplvm.py"
] | [
"from __future__ import print_function\nimport kernels\nimport numpy as np\nimport unittest\nimport gplvm\n\nclass TestBayesianGPLVM(unittest.TestCase):\n def setUp(self):\n N = 10 # number of data points\n D = 1 # latent dimensions\n M = 5 # inducings points\n R = 2 # data dimension\n k = kernels.RBF(D)\n Z = np.linspace(0,1,M)\n Z = np.expand_dims(Z, D)\n rng = np.random.RandomState(1)\n Y = rng.randn(N,R)\n self.m = gplvm.BayesianGPLVM(X_mean = np.zeros((N,D)),\n X_var=np.ones((N,D)), Y=Y, kern=k, Z=Z)\n\n def test_linearSolution(self):\n # You could implement a standard GPLVM, and show that it recovers PCA when the kernel is linear -> \n # How to deal with rotations and linear rescalings.\n pass\n\n def test_GPLVM_BGPLVM_Equivalence(self):\n\n # You could set the variance of the BGPLVM to zero and show that it's the same as the GPLVM\n # BGPLVM with variance to 0 is same as GPLVM\n N = 10 # number of data points\n Q = 1 # latent dimensions\n M = 5 # inducing points\n D = 2 # data dimension\n k = kernels.RBF(Q)\n Z = np.linspace(0, 1, M)\n Z = np.expand_dims(Z, Q)\n rng = np.random.RandomState(1)\n Y = rng.randn(N, Q)\n XInit = rng.rand(N, Q)\n # use 0 variance for BGPLVM\n m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z)\n print(m)\n m.X_var.fixed = True\n\n ll = m.compute_log_likelihood()\n print(ll)\n m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z, X_prior_mean=np.zeros((N,Q)), X_prior_var = np.ones((N,Q)))\n llprior = m.compute_log_likelihood()\n print(m) \n print(llprior)\n assert ll == llprior\n \n Z = np.linspace(0, 1, M*2)\n Z = np.expand_dims(Z, Q)\n m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z, X_prior_mean=np.zeros((N,Q)), X_prior_var = np.ones((N,Q)))\n llmoreZ = m.compute_log_likelihood()\n print(llmoreZ)\n assert llmoreZ > ll\n \n# m.optimize()\n# mGPLVM = GPflow.gplvm.GPLVM(Y=Y, Q=Q, kern=k, XInit=XInit)\n# mGPLVM.optimize()\n# assert np.allclose(m.X_mean.value, mGPLVM.X.value)\n # this does not work - f= +Infinity!\n\n def test_gplvmOptimization(self):\n print('Run optimisation')\n# self.m.optimize()\n \n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.expand_dims",
"numpy.linspace"
]
] |
astromaddie/pywavelets-py3 | [
"9d434929cb748eb44be86a4b712d8f3009326693"
] | [
"demo/wp_scalogram.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport pywt\n\n\nx = np.linspace(0, 1, num=512)\ndata = np.sin(250 * np.pi * x**2)\n\nwavelet = 'db2'\nlevel = 4\norder = \"freq\" # other option is \"normal\"\ninterpolation = 'nearest'\ncmap = plt.cm.cool\n\n# Construct wavelet packet\nwp = pywt.WaveletPacket(data, wavelet, 'sym', maxlevel=level)\nnodes = wp.get_level(level, order=order)\nlabels = [n.path for n in nodes]\nvalues = np.array([n.data for n in nodes], 'd')\nvalues = abs(values)\n\n# Show signal and wavelet packet coefficients\nfig = plt.figure()\nfig.subplots_adjust(hspace=0.2, bottom=.03, left=.07, right=.97, top=.92)\nax = fig.add_subplot(2, 1, 1)\nax.set_title(\"linchirp signal\")\nax.plot(x, data, 'b')\nax.set_xlim(0, x[-1])\n\nax = fig.add_subplot(2, 1, 2)\nax.set_title(\"Wavelet packet coefficients at level %d\" % level)\nax.imshow(values, interpolation=interpolation, cmap=cmap, aspect=\"auto\",\n origin=\"lower\", extent=[0, 1, 0, len(values)])\nax.set_yticks(np.arange(0.5, len(labels) + 0.5), labels)\n\n# Show spectrogram and wavelet packet coefficients\nfig2 = plt.figure()\nax2 = fig2.add_subplot(211)\nax2.specgram(data, NFFT=64, noverlap=32, cmap=cmap)\nax2.set_title(\"Spectrogram of signal\")\nax3 = fig2.add_subplot(212)\nax3.imshow(values, origin='upper', extent=[-1,1,-1,1],\n interpolation='nearest')\nax3.set_title(\"Wavelet packet coefficients\")\n\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.sin",
"numpy.linspace"
]
] |
Pronton2001/carla_pilotnet | [
"813ca14e04eccd405fde5fff350fe23c6ada5657"
] | [
"pilotnet/viz_gamepy.py"
] | [
"#!/usr/bin/env python\n\n# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\n# Allows controlling a vehicle with a keyboard. For a simpler and more\n# documented example, please take a look at tutorial.py.\n\n\"\"\"\nWelcome to CARLA manual control.\n\nUse ARROWS or WASD keys for control.\n\n W : throttle\n S : brake\n A/D : steer left/right\n Q : toggle reverse\n Space : hand-brake\n P : toggle autopilot\n M : toggle manual transmission\n ,/. : gear up/down\n CTRL + W : toggle constant velocity mode at 60 km/h\n\n L : toggle next light type\n SHIFT + L : toggle high beam\n Z/X : toggle right/left blinker\n I : toggle interior light\n\n TAB : change sensor position\n ` or N : next sensor\n [1-9] : change to sensor [1-9]\n G : toggle radar visualization\n C : change weather (Shift+C reverse)\n Backspace : change vehicle\n\n V : Select next map layer (Shift+V reverse)\n B : Load current selected map layer (Shift+B to unload)\n\n R : toggle recording images to disk\n T : toggle vehicle's telemetry\n\n CTRL + R : toggle recording of simulation (replacing any previous)\n CTRL + P : start replaying last recorded simulation\n CTRL + + : increments the start time of the replay by 1 second (+SHIFT = 10 seconds)\n CTRL + - : decrements the start time of the replay by 1 second (+SHIFT = 10 seconds)\n\n F1 : toggle HUD\n H/? : toggle help\n ESC : quit\n\"\"\"\n\nfrom __future__ import print_function\n\n\n# ==============================================================================\n# -- find carla module ---------------------------------------------------------\n# ==============================================================================\n\n\nimport glob\nimport os\nimport sys\n\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\n\n# ==============================================================================\n# -- imports -------------------------------------------------------------------\n# ==============================================================================\n\n\nimport carla\n\nfrom carla import ColorConverter as cc\n\nimport argparse\nimport collections\nimport datetime\nimport logging\nimport math\nimport random\nimport re\nimport weakref\n\n##################### Added by me #####################\nimport tensorflow as tf\nfrom tensorflow import expand_dims\nfrom tensorflow.image import resize\nimport cv2\nimport model as md\nmodel = md.getPilotNetModel()\nmodel.load_weights('model/model-weights.h5')\n\ntry:\n import pygame\n from pygame.locals import KMOD_CTRL\n from pygame.locals import KMOD_SHIFT\n from pygame.locals import K_0\n from pygame.locals import K_9\n from pygame.locals import K_BACKQUOTE\n from pygame.locals import K_BACKSPACE\n from pygame.locals import K_COMMA\n from pygame.locals import K_DOWN\n from pygame.locals import K_ESCAPE\n from pygame.locals import K_F1\n from pygame.locals import K_LEFT\n from pygame.locals import K_PERIOD\n from pygame.locals import K_RIGHT\n from pygame.locals import K_SLASH\n from pygame.locals import K_SPACE\n from pygame.locals import K_TAB\n from pygame.locals import K_UP\n from pygame.locals import K_a\n from pygame.locals import K_b\n from pygame.locals import K_c\n from pygame.locals import K_d\n from pygame.locals import K_g\n from pygame.locals import K_h\n from pygame.locals import K_i\n from pygame.locals import K_l\n from pygame.locals import K_m\n from pygame.locals import K_n\n from pygame.locals import K_p\n from pygame.locals import K_q\n from pygame.locals import K_r\n from pygame.locals import K_s\n from pygame.locals import K_t\n from pygame.locals import K_v\n from pygame.locals import K_w\n from pygame.locals import K_x\n from pygame.locals import K_z\n from pygame.locals import K_MINUS\n from pygame.locals import K_EQUALS\nexcept ImportError:\n raise RuntimeError('cannot import pygame, make sure pygame package is installed')\n\ntry:\n import numpy as np\nexcept ImportError:\n raise RuntimeError('cannot import numpy, make sure numpy package is installed')\n\n\n# ==============================================================================\n# -- Global functions ----------------------------------------------------------\n# ==============================================================================\n\n\ndef find_weather_presets():\n rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')\n name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))\n presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]\n return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]\n\n\ndef get_actor_display_name(actor, truncate=250):\n name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])\n return (name[:truncate - 1] + u'\\u2026') if len(name) > truncate else name\n\ndef get_actor_blueprints(world, filter, generation):\n bps = world.get_blueprint_library().filter(filter)\n\n if generation.lower() == \"all\":\n return bps\n\n # If the filter returns only one bp, we assume that this one needed\n # and therefore, we ignore the generation\n if len(bps) == 1:\n return bps\n\n try:\n int_generation = int(generation)\n # Check if generation is in available generations\n if int_generation in [1, 2]:\n bps = [x for x in bps if int(x.get_attribute('generation')) == int_generation]\n return bps\n else:\n print(\" Warning! Actor Generation is not valid. No actor will be spawned.\")\n return []\n except:\n print(\" Warning! Actor Generation is not valid. No actor will be spawned.\")\n return []\n\n\n# ==============================================================================\n# -- World ---------------------------------------------------------------------\n# ==============================================================================\n\n\nclass World(object):\n def __init__(self, carla_world, hud, args):\n self.world = carla_world\n self.sync = args.sync\n self.actor_role_name = args.rolename\n try:\n self.map = self.world.get_map()\n except RuntimeError as error:\n print('RuntimeError: {}'.format(error))\n print(' The server could not send the OpenDRIVE (.xodr) file:')\n print(' Make sure it exists, has the same name of your town, and is correct.')\n sys.exit(1)\n self.hud = hud\n self.player = None\n self.collision_sensor = None\n self.lane_invasion_sensor = None\n self.gnss_sensor = None\n self.imu_sensor = None\n self.radar_sensor = None\n self.camera_manager = None\n self._weather_presets = find_weather_presets()\n self._weather_index = 0\n self._actor_filter = args.filter\n self._actor_generation = args.generation\n self._gamma = args.gamma\n self.restart()\n self.world.on_tick(hud.on_world_tick)\n self.recording_enabled = False\n self.recording_start = 0\n self.constant_velocity_enabled = False\n self.show_vehicle_telemetry = False\n self.current_map_layer = 0\n self.map_layer_names = [\n carla.MapLayer.NONE,\n carla.MapLayer.Buildings,\n carla.MapLayer.Decals,\n carla.MapLayer.Foliage,\n carla.MapLayer.Ground,\n carla.MapLayer.ParkedVehicles,\n carla.MapLayer.Particles,\n carla.MapLayer.Props,\n carla.MapLayer.StreetLights,\n carla.MapLayer.Walls,\n carla.MapLayer.All\n ]\n\n def restart(self):\n self.player_max_speed = 1.589\n self.player_max_speed_fast = 3.713\n # Keep same camera config if the camera manager exists.\n cam_index = self.camera_manager.index if self.camera_manager is not None else 0\n cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0\n # Get a random blueprint.\n blueprint = random.choice(get_actor_blueprints(self.world, self._actor_filter, self._actor_generation))\n blueprint.set_attribute('role_name', self.actor_role_name)\n if blueprint.has_attribute('color'):\n color = random.choice(blueprint.get_attribute('color').recommended_values)\n blueprint.set_attribute('color', color)\n if blueprint.has_attribute('driver_id'):\n driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)\n blueprint.set_attribute('driver_id', driver_id)\n if blueprint.has_attribute('is_invincible'):\n blueprint.set_attribute('is_invincible', 'true')\n # set the max speed\n if blueprint.has_attribute('speed'):\n self.player_max_speed = float(blueprint.get_attribute('speed').recommended_values[1])\n self.player_max_speed_fast = float(blueprint.get_attribute('speed').recommended_values[2])\n\n # Spawn the player.\n if self.player is not None:\n spawn_point = self.player.get_transform()\n spawn_point.location.z += 2.0\n spawn_point.rotation.roll = 0.0\n spawn_point.rotation.pitch = 0.0\n self.destroy()\n self.player = self.world.try_spawn_actor(blueprint, spawn_point)\n self.modify_vehicle_physics(self.player)\n while self.player is None:\n if not self.map.get_spawn_points():\n print('There are no spawn points available in your map/town.')\n print('Please add some Vehicle Spawn Point to your UE4 scene.')\n sys.exit(1)\n spawn_points = self.map.get_spawn_points()\n spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()\n self.player = self.world.try_spawn_actor(blueprint, spawn_point)\n self.modify_vehicle_physics(self.player)\n # Set up the sensors.\n self.collision_sensor = CollisionSensor(self.player, self.hud)\n self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud)\n self.gnss_sensor = GnssSensor(self.player)\n self.imu_sensor = IMUSensor(self.player)\n self.camera_manager = CameraManager(self.player, self.hud, self._gamma)\n self.camera_manager.transform_index = cam_pos_index\n self.camera_manager.set_sensor(cam_index, notify=False)\n actor_type = get_actor_display_name(self.player)\n self.hud.notification(actor_type)\n\n if self.sync:\n self.world.tick()\n else:\n self.world.wait_for_tick()\n\n def next_weather(self, reverse=False):\n self._weather_index += -1 if reverse else 1\n self._weather_index %= len(self._weather_presets)\n preset = self._weather_presets[self._weather_index]\n self.hud.notification('Weather: %s' % preset[1])\n self.player.get_world().set_weather(preset[0])\n\n def next_map_layer(self, reverse=False):\n self.current_map_layer += -1 if reverse else 1\n self.current_map_layer %= len(self.map_layer_names)\n selected = self.map_layer_names[self.current_map_layer]\n self.hud.notification('LayerMap selected: %s' % selected)\n\n def load_map_layer(self, unload=False):\n selected = self.map_layer_names[self.current_map_layer]\n if unload:\n self.hud.notification('Unloading map layer: %s' % selected)\n self.world.unload_map_layer(selected)\n else:\n self.hud.notification('Loading map layer: %s' % selected)\n self.world.load_map_layer(selected)\n\n def toggle_radar(self):\n if self.radar_sensor is None:\n self.radar_sensor = RadarSensor(self.player)\n elif self.radar_sensor.sensor is not None:\n self.radar_sensor.sensor.destroy()\n self.radar_sensor = None\n\n def modify_vehicle_physics(self, actor):\n #If actor is not a vehicle, we cannot use the physics control\n try:\n physics_control = actor.get_physics_control()\n physics_control.use_sweep_wheel_collision = True\n actor.apply_physics_control(physics_control)\n except Exception:\n pass\n\n def tick(self, clock):\n self.hud.tick(self, clock)\n\n def render(self, display):\n self.camera_manager.render(display)\n self.hud.render(display)\n\n def destroy_sensors(self):\n self.camera_manager.sensor.destroy()\n self.camera_manager.sensor = None\n self.camera_manager.index = None\n\n def destroy(self):\n if self.radar_sensor is not None:\n self.toggle_radar()\n sensors = [\n self.camera_manager.sensor,\n self.collision_sensor.sensor,\n self.lane_invasion_sensor.sensor,\n self.gnss_sensor.sensor,\n self.imu_sensor.sensor]\n for sensor in sensors:\n if sensor is not None:\n sensor.stop()\n sensor.destroy()\n if self.player is not None:\n self.player.destroy()\n\n\n# ==============================================================================\n# -- KeyboardControl -----------------------------------------------------------\n# ==============================================================================\n\n\nclass KeyboardControl(object):\n \"\"\"Class that handles keyboard input.\"\"\"\n def __init__(self, world, start_in_autopilot):\n self._autopilot_enabled = start_in_autopilot\n if isinstance(world.player, carla.Vehicle):\n self._control = carla.VehicleControl()\n self._lights = carla.VehicleLightState.NONE\n world.player.set_autopilot(self._autopilot_enabled)\n world.player.set_light_state(self._lights)\n elif isinstance(world.player, carla.Walker):\n self._control = carla.WalkerControl()\n self._autopilot_enabled = False\n self._rotation = world.player.get_transform().rotation\n else:\n raise NotImplementedError(\"Actor type not supported\")\n self._steer_cache = 0.0\n world.hud.notification(\"Press 'H' or '?' for help.\", seconds=4.0)\n\n def parse_events(self, client, world, clock, sync_mode):\n if isinstance(self._control, carla.VehicleControl):\n current_lights = self._lights\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n elif event.type == pygame.KEYUP:\n if self._is_quit_shortcut(event.key):\n return True\n elif event.key == K_BACKSPACE:\n if self._autopilot_enabled:\n world.player.set_autopilot(False)\n world.restart()\n world.player.set_autopilot(True)\n else:\n world.restart()\n elif event.key == K_F1:\n world.hud.toggle_info()\n elif event.key == K_v and pygame.key.get_mods() & KMOD_SHIFT:\n world.next_map_layer(reverse=True)\n elif event.key == K_v:\n world.next_map_layer()\n elif event.key == K_b and pygame.key.get_mods() & KMOD_SHIFT:\n world.load_map_layer(unload=True)\n elif event.key == K_b:\n world.load_map_layer(unload = True) # i changed False -> True\n elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):\n world.hud.help.toggle()\n elif event.key == K_TAB:\n world.camera_manager.toggle_camera()\n elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:\n world.next_weather(reverse=True)\n elif event.key == K_c:\n world.next_weather()\n elif event.key == K_g:\n world.toggle_radar()\n elif event.key == K_BACKQUOTE:\n world.camera_manager.next_sensor()\n elif event.key == K_n:\n world.camera_manager.next_sensor()\n elif event.key == K_w and (pygame.key.get_mods() & KMOD_CTRL):\n if world.constant_velocity_enabled:\n world.player.disable_constant_velocity()\n world.constant_velocity_enabled = False\n world.hud.notification(\"Disabled Constant Velocity Mode\")\n else:\n world.player.enable_constant_velocity(carla.Vector3D(17, 0, 0))\n world.constant_velocity_enabled = True\n world.hud.notification(\"Enabled Constant Velocity Mode at 60 km/h\")\n elif event.key == K_t:\n if world.show_vehicle_telemetry:\n world.player.show_debug_telemetry(False)\n world.show_vehicle_telemetry = False\n world.hud.notification(\"Disabled Vehicle Telemetry\")\n else:\n try:\n world.player.show_debug_telemetry(True)\n world.show_vehicle_telemetry = True\n world.hud.notification(\"Enabled Vehicle Telemetry\")\n except Exception:\n pass\n elif event.key > K_0 and event.key <= K_9:\n index_ctrl = 0\n if pygame.key.get_mods() & KMOD_CTRL:\n index_ctrl = 9\n world.camera_manager.set_sensor(event.key - 1 - K_0 + index_ctrl)\n elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL):\n world.camera_manager.toggle_recording()\n elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL):\n if (world.recording_enabled):\n client.stop_recorder()\n world.recording_enabled = False\n world.hud.notification(\"Recorder is OFF\")\n else:\n client.start_recorder(\"manual_recording.rec\")\n world.recording_enabled = True\n world.hud.notification(\"Recorder is ON\")\n elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL):\n # stop recorder\n client.stop_recorder()\n world.recording_enabled = False\n # work around to fix camera at start of replaying\n current_index = world.camera_manager.index\n world.destroy_sensors()\n # disable autopilot\n self._autopilot_enabled = False\n world.player.set_autopilot(self._autopilot_enabled)\n world.hud.notification(\"Replaying file 'manual_recording.rec'\")\n # replayer\n client.replay_file(\"manual_recording.rec\", world.recording_start, 0, 0)\n world.camera_manager.set_sensor(current_index)\n elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL):\n if pygame.key.get_mods() & KMOD_SHIFT:\n world.recording_start -= 10\n else:\n world.recording_start -= 1\n world.hud.notification(\"Recording start time is %d\" % (world.recording_start))\n elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL):\n if pygame.key.get_mods() & KMOD_SHIFT:\n world.recording_start += 10\n else:\n world.recording_start += 1\n world.hud.notification(\"Recording start time is %d\" % (world.recording_start))\n if isinstance(self._control, carla.VehicleControl):\n if event.key == K_q:\n self._control.gear = 1 if self._control.reverse else -1\n elif event.key == K_m:\n self._control.manual_gear_shift = not self._control.manual_gear_shift\n self._control.gear = world.player.get_control().gear\n world.hud.notification('%s Transmission' %\n ('Manual' if self._control.manual_gear_shift else 'Automatic'))\n elif self._control.manual_gear_shift and event.key == K_COMMA:\n self._control.gear = max(-1, self._control.gear - 1)\n elif self._control.manual_gear_shift and event.key == K_PERIOD:\n self._control.gear = self._control.gear + 1\n elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:\n if not self._autopilot_enabled and not sync_mode:\n print(\"WARNING: You are currently in asynchronous mode and could \"\n \"experience some issues with the traffic simulation\")\n self._autopilot_enabled = not self._autopilot_enabled\n world.player.set_autopilot(self._autopilot_enabled)\n world.hud.notification(\n 'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))\n elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:\n current_lights ^= carla.VehicleLightState.Special1\n elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:\n current_lights ^= carla.VehicleLightState.HighBeam\n elif event.key == K_l:\n # Use 'L' key to switch between lights:\n # closed -> position -> low beam -> fog\n if not self._lights & carla.VehicleLightState.Position:\n world.hud.notification(\"Position lights\")\n current_lights |= carla.VehicleLightState.Position\n else:\n world.hud.notification(\"Low beam lights\")\n current_lights |= carla.VehicleLightState.LowBeam\n if self._lights & carla.VehicleLightState.LowBeam:\n world.hud.notification(\"Fog lights\")\n current_lights |= carla.VehicleLightState.Fog\n if self._lights & carla.VehicleLightState.Fog:\n world.hud.notification(\"Lights off\")\n current_lights ^= carla.VehicleLightState.Position\n current_lights ^= carla.VehicleLightState.LowBeam\n current_lights ^= carla.VehicleLightState.Fog\n elif event.key == K_i:\n current_lights ^= carla.VehicleLightState.Interior\n elif event.key == K_z:\n current_lights ^= carla.VehicleLightState.LeftBlinker\n elif event.key == K_x:\n current_lights ^= carla.VehicleLightState.RightBlinker\n\n if not self._autopilot_enabled:\n if isinstance(self._control, carla.VehicleControl):\n self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())\n self._control.reverse = self._control.gear < 0\n # Set automatic control-related vehicle lights\n if self._control.brake:\n current_lights |= carla.VehicleLightState.Brake\n else: # Remove the Brake flag\n current_lights &= ~carla.VehicleLightState.Brake\n if self._control.reverse:\n current_lights |= carla.VehicleLightState.Reverse\n else: # Remove the Reverse flag\n current_lights &= ~carla.VehicleLightState.Reverse\n if current_lights != self._lights: # Change the light state only if necessary\n self._lights = current_lights\n world.player.set_light_state(carla.VehicleLightState(self._lights))\n elif isinstance(self._control, carla.WalkerControl):\n self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world)\n world.player.apply_control(self._control)\n\n def _parse_vehicle_keys(self, keys, milliseconds):\n if keys[K_UP] or keys[K_w]:\n self._control.throttle = min(self._control.throttle + 0.01, 1.00)\n else:\n self._control.throttle = 0.0\n\n if keys[K_DOWN] or keys[K_s]:\n self._control.brake = min(self._control.brake + 0.2, 1)\n else:\n self._control.brake = 0\n\n steer_increment = 5e-4 * milliseconds\n if keys[K_LEFT] or keys[K_a]:\n if self._steer_cache > 0:\n self._steer_cache = 0\n else:\n self._steer_cache -= steer_increment\n elif keys[K_RIGHT] or keys[K_d]:\n if self._steer_cache < 0:\n self._steer_cache = 0\n else:\n self._steer_cache += steer_increment\n else:\n self._steer_cache = 0.0\n self._steer_cache = min(0.7, max(-0.7, self._steer_cache))\n self._control.steer = round(self._steer_cache, 1)\n self._control.hand_brake = keys[K_SPACE]\n\n def _parse_walker_keys(self, keys, milliseconds, world):\n self._control.speed = 0.0\n if keys[K_DOWN] or keys[K_s]:\n self._control.speed = 0.0\n if keys[K_LEFT] or keys[K_a]:\n self._control.speed = .01\n self._rotation.yaw -= 0.08 * milliseconds\n if keys[K_RIGHT] or keys[K_d]:\n self._control.speed = .01\n self._rotation.yaw += 0.08 * milliseconds\n if keys[K_UP] or keys[K_w]:\n self._control.speed = world.player_max_speed_fast if pygame.key.get_mods() & KMOD_SHIFT else world.player_max_speed\n self._control.jump = keys[K_SPACE]\n self._rotation.yaw = round(self._rotation.yaw, 1)\n self._control.direction = self._rotation.get_forward_vector()\n\n @staticmethod\n def _is_quit_shortcut(key):\n return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)\n\n\n# ==============================================================================\n# -- HUD -----------------------------------------------------------------------\n# ==============================================================================\n\n\nclass HUD(object):\n def __init__(self, width, height):\n self.dim = (width, height)\n font = pygame.font.Font(pygame.font.get_default_font(), 20)\n font_name = 'courier' if os.name == 'nt' else 'mono'\n fonts = [x for x in pygame.font.get_fonts() if font_name in x]\n default_font = 'ubuntumono'\n mono = default_font if default_font in fonts else fonts[0]\n mono = pygame.font.match_font(mono)\n self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)\n self._notifications = FadingText(font, (width, 40), (0, height - 40))\n self.help = HelpText(pygame.font.Font(mono, 16), width, height)\n self.server_fps = 0\n self.frame = 0\n self.simulation_time = 0\n self._show_info = True\n self._info_text = []\n self._server_clock = pygame.time.Clock()\n\n def on_world_tick(self, timestamp):\n self._server_clock.tick()\n self.server_fps = self._server_clock.get_fps()\n self.frame = timestamp.frame\n self.simulation_time = timestamp.elapsed_seconds\n\n def tick(self, world, clock):\n self._notifications.tick(world, clock)\n if not self._show_info:\n return\n t = world.player.get_transform()\n v = world.player.get_velocity()\n c = world.player.get_control()\n compass = world.imu_sensor.compass\n heading = 'N' if compass > 270.5 or compass < 89.5 else ''\n heading += 'S' if 90.5 < compass < 269.5 else ''\n heading += 'E' if 0.5 < compass < 179.5 else ''\n heading += 'W' if 180.5 < compass < 359.5 else ''\n colhist = world.collision_sensor.get_collision_history()\n collision = [colhist[x + self.frame - 200] for x in range(0, 200)]\n max_col = max(1.0, max(collision))\n collision = [x / max_col for x in collision]\n vehicles = world.world.get_actors().filter('vehicle.*')\n self._info_text = [\n 'Server: % 16.0f FPS' % self.server_fps,\n 'Client: % 16.0f FPS' % clock.get_fps(),\n '',\n 'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20),\n 'Map: % 20s' % world.map.name.split('/')[-1],\n 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),\n '',\n 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),\n u'Compass:% 17.0f\\N{DEGREE SIGN} % 2s' % (compass, heading),\n 'Accelero: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.accelerometer),\n 'Gyroscop: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.gyroscope),\n 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),\n 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)),\n 'Height: % 18.0f m' % t.location.z,\n '']\n if isinstance(c, carla.VehicleControl):\n self._info_text += [\n ('Throttle:', c.throttle, 0.0, 1.0),\n ('Steer:', c.steer, -1.0, 1.0),\n ('Brake:', c.brake, 0.0, 1.0),\n ('Reverse:', c.reverse),\n ('Hand brake:', c.hand_brake),\n ('Manual:', c.manual_gear_shift),\n 'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear)]\n elif isinstance(c, carla.WalkerControl):\n self._info_text += [\n ('Speed:', c.speed, 0.0, 5.556),\n ('Jump:', c.jump)]\n self._info_text += [\n '',\n 'Collision:',\n collision,\n '',\n 'Number of vehicles: % 8d' % len(vehicles)]\n if len(vehicles) > 1:\n self._info_text += ['Nearby vehicles:']\n distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2)\n vehicles = [(distance(x.get_location()), x) for x in vehicles if x.id != world.player.id]\n for d, vehicle in sorted(vehicles, key=lambda vehicles: vehicles[0]):\n if d > 200.0:\n break\n vehicle_type = get_actor_display_name(vehicle, truncate=22)\n self._info_text.append('% 4dm %s' % (d, vehicle_type))\n\n def toggle_info(self):\n self._show_info = not self._show_info\n\n def notification(self, text, seconds=2.0):\n self._notifications.set_text(text, seconds=seconds)\n\n def error(self, text):\n self._notifications.set_text('Error: %s' % text, (255, 0, 0))\n\n def render(self, display):\n if self._show_info:\n info_surface = pygame.Surface((220, self.dim[1]))\n info_surface.set_alpha(100)\n display.blit(info_surface, (0, 0))\n v_offset = 4\n bar_h_offset = 100\n bar_width = 106\n for item in self._info_text:\n if v_offset + 18 > self.dim[1]:\n break\n if isinstance(item, list):\n if len(item) > 1:\n points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]\n pygame.draw.lines(display, (255, 136, 0), False, points, 2)\n item = None\n v_offset += 18\n elif isinstance(item, tuple):\n if isinstance(item[1], bool):\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))\n pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)\n else:\n rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect_border, 1)\n f = (item[1] - item[2]) / (item[3] - item[2])\n if item[2] < 0.0:\n rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))\n else:\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect)\n item = item[0]\n if item: # At this point has to be a str.\n surface = self._font_mono.render(item, True, (255, 255, 255))\n display.blit(surface, (8, v_offset))\n v_offset += 18\n self._notifications.render(display)\n self.help.render(display)\n\n\n# ==============================================================================\n# -- FadingText ----------------------------------------------------------------\n# ==============================================================================\n\n\nclass FadingText(object):\n def __init__(self, font, dim, pos):\n self.font = font\n self.dim = dim\n self.pos = pos\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n\n def set_text(self, text, color=(255, 255, 255), seconds=2.0):\n text_texture = self.font.render(text, True, color)\n self.surface = pygame.Surface(self.dim)\n self.seconds_left = seconds\n self.surface.fill((0, 0, 0, 0))\n self.surface.blit(text_texture, (10, 11))\n\n def tick(self, _, clock):\n delta_seconds = 1e-3 * clock.get_time()\n self.seconds_left = max(0.0, self.seconds_left - delta_seconds)\n self.surface.set_alpha(500.0 * self.seconds_left)\n\n def render(self, display):\n display.blit(self.surface, self.pos)\n\n\n# ==============================================================================\n# -- HelpText ------------------------------------------------------------------\n# ==============================================================================\n\n\nclass HelpText(object):\n \"\"\"Helper class to handle text output using pygame\"\"\"\n def __init__(self, font, width, height):\n lines = __doc__.split('\\n')\n self.font = font\n self.line_space = 18\n self.dim = (780, len(lines) * self.line_space + 12)\n self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n self.surface.fill((0, 0, 0, 0))\n for n, line in enumerate(lines):\n text_texture = self.font.render(line, True, (255, 255, 255))\n self.surface.blit(text_texture, (22, n * self.line_space))\n self._render = False\n self.surface.set_alpha(220)\n\n def toggle(self):\n self._render = not self._render\n\n def render(self, display):\n if self._render:\n display.blit(self.surface, self.pos)\n\n\n# ==============================================================================\n# -- CollisionSensor -----------------------------------------------------------\n# ==============================================================================\n\n\nclass CollisionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self.history = []\n self._parent = parent_actor\n self.hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.collision')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event))\n\n def get_collision_history(self):\n history = collections.defaultdict(int)\n for frame, intensity in self.history:\n history[frame] += intensity\n return history\n\n @staticmethod\n def _on_collision(weak_self, event):\n self = weak_self()\n if not self:\n return\n actor_type = get_actor_display_name(event.other_actor)\n self.hud.notification('Collision with %r' % actor_type)\n impulse = event.normal_impulse\n intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)\n self.history.append((event.frame, intensity))\n if len(self.history) > 4000:\n self.history.pop(0)\n\n\n# ==============================================================================\n# -- LaneInvasionSensor --------------------------------------------------------\n# ==============================================================================\n\n\nclass LaneInvasionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n\n # If the spawn object is not a vehicle, we cannot use the Lane Invasion Sensor\n if parent_actor.type_id.startswith(\"vehicle.\"):\n self._parent = parent_actor\n self.hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.lane_invasion')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event))\n\n @staticmethod\n def _on_invasion(weak_self, event):\n self = weak_self()\n if not self:\n return\n lane_types = set(x.type for x in event.crossed_lane_markings)\n text = ['%r' % str(x).split()[-1] for x in lane_types]\n self.hud.notification('Crossed line %s' % ' and '.join(text))\n\n\n# ==============================================================================\n# -- GnssSensor ----------------------------------------------------------------\n# ==============================================================================\n\n\nclass GnssSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n self.lat = 0.0\n self.lon = 0.0\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.gnss')\n self.sensor = world.spawn_actor(bp, carla.Transform(carla.Location(x=1.0, z=2.8)), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: GnssSensor._on_gnss_event(weak_self, event))\n\n @staticmethod\n def _on_gnss_event(weak_self, event):\n self = weak_self()\n if not self:\n return\n self.lat = event.latitude\n self.lon = event.longitude\n\n\n# ==============================================================================\n# -- IMUSensor -----------------------------------------------------------------\n# ==============================================================================\n\n\nclass IMUSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n self.accelerometer = (0.0, 0.0, 0.0)\n self.gyroscope = (0.0, 0.0, 0.0)\n self.compass = 0.0\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.imu')\n self.sensor = world.spawn_actor(\n bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(\n lambda sensor_data: IMUSensor._IMU_callback(weak_self, sensor_data))\n\n @staticmethod\n def _IMU_callback(weak_self, sensor_data):\n self = weak_self()\n if not self:\n return\n limits = (-99.9, 99.9)\n self.accelerometer = (\n max(limits[0], min(limits[1], sensor_data.accelerometer.x)),\n max(limits[0], min(limits[1], sensor_data.accelerometer.y)),\n max(limits[0], min(limits[1], sensor_data.accelerometer.z)))\n self.gyroscope = (\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.x))),\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.y))),\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.z))))\n self.compass = math.degrees(sensor_data.compass)\n\n\n# ==============================================================================\n# -- RadarSensor ---------------------------------------------------------------\n# ==============================================================================\n\n\nclass RadarSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n bound_x = 0.5 + self._parent.bounding_box.extent.x\n bound_y = 0.5 + self._parent.bounding_box.extent.y\n bound_z = 0.5 + self._parent.bounding_box.extent.z\n\n self.velocity_range = 7.5 # m/s\n world = self._parent.get_world()\n self.debug = world.debug\n bp = world.get_blueprint_library().find('sensor.other.radar')\n bp.set_attribute('horizontal_fov', str(35))\n bp.set_attribute('vertical_fov', str(20))\n self.sensor = world.spawn_actor(\n bp,\n carla.Transform(\n carla.Location(x=bound_x + 0.05, z=bound_z+0.05),\n carla.Rotation(pitch=5)),\n attach_to=self._parent)\n # We need a weak reference to self to avoid circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(\n lambda radar_data: RadarSensor._Radar_callback(weak_self, radar_data))\n\n @staticmethod\n def _Radar_callback(weak_self, radar_data):\n self = weak_self()\n if not self:\n return\n # To get a numpy [[vel, altitude, azimuth, depth],...[,,,]]:\n # points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))\n # points = np.reshape(points, (len(radar_data), 4))\n\n current_rot = radar_data.transform.rotation\n for detect in radar_data:\n azi = math.degrees(detect.azimuth)\n alt = math.degrees(detect.altitude)\n # The 0.25 adjusts a bit the distance so the dots can\n # be properly seen\n fw_vec = carla.Vector3D(x=detect.depth - 0.25)\n carla.Transform(\n carla.Location(),\n carla.Rotation(\n pitch=current_rot.pitch + alt,\n yaw=current_rot.yaw + azi,\n roll=current_rot.roll)).transform(fw_vec)\n\n def clamp(min_v, max_v, value):\n return max(min_v, min(value, max_v))\n\n norm_velocity = detect.velocity / self.velocity_range # range [-1, 1]\n r = int(clamp(0.0, 1.0, 1.0 - norm_velocity) * 255.0)\n g = int(clamp(0.0, 1.0, 1.0 - abs(norm_velocity)) * 255.0)\n b = int(abs(clamp(- 1.0, 0.0, - 1.0 - norm_velocity)) * 255.0)\n self.debug.draw_point(\n radar_data.transform.location + fw_vec,\n size=0.075,\n life_time=0.06,\n persistent_lines=False,\n color=carla.Color(r, g, b))\n\n# ==============================================================================\n# -- CameraManager -------------------------------------------------------------\n# ==============================================================================\n\n\nclass CameraManager(object):\n def __init__(self, parent_actor, hud, gamma_correction):\n self.sensor = None\n self.surface = None\n self._parent = parent_actor\n self.hud = hud\n self.recording = False\n bound_x = 0.5 + self._parent.bounding_box.extent.x\n bound_y = 0.5 + self._parent.bounding_box.extent.y\n bound_z = 0.5 + self._parent.bounding_box.extent.z\n Attachment = carla.AttachmentType\n\n if not self._parent.type_id.startswith(\"walker.pedestrian\"):\n self._camera_transforms = [\n (carla.Transform(carla.Location(x=-2.0*bound_x, y=+0.0*bound_y, z=2.0*bound_z), carla.Rotation(pitch=8.0)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=+0.8*bound_x, y=+0.0*bound_y, z=1.3*bound_z)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=+1.9*bound_x, y=+1.0*bound_y, z=1.2*bound_z)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=-2.8*bound_x, y=+0.0*bound_y, z=4.6*bound_z), carla.Rotation(pitch=6.0)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=-1.0, y=-1.0*bound_y, z=0.4*bound_z)), Attachment.Rigid)]\n else:\n self._camera_transforms = [\n (carla.Transform(carla.Location(x=-2.5, z=0.0), carla.Rotation(pitch=-8.0)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=2.5, y=0.5, z=0.0), carla.Rotation(pitch=-8.0)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=-4.0, z=2.0), carla.Rotation(pitch=6.0)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=0, y=-2.5, z=-0.0), carla.Rotation(yaw=90.0)), Attachment.Rigid)]\n\n self.transform_index = 1\n self.sensors = [\n ['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}],\n ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)', {}],\n ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)', {}],\n ['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)', {}],\n ['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)', {}],\n ['sensor.camera.semantic_segmentation', cc.CityScapesPalette,\n 'Camera Semantic Segmentation (CityScapes Palette)', {}],\n ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)', {'range': '50'}],\n ['sensor.camera.dvs', cc.Raw, 'Dynamic Vision Sensor', {}],\n ['sensor.camera.rgb', cc.Raw, 'Camera RGB Distorted',\n {'lens_circle_multiplier': '3.0',\n 'lens_circle_falloff': '3.0',\n 'chromatic_aberration_intensity': '0.5',\n 'chromatic_aberration_offset': '0'}],\n ['sensor.camera.optical_flow', cc.Raw, 'Optical Flow', {}],\n ]\n world = self._parent.get_world()\n bp_library = world.get_blueprint_library()\n for item in self.sensors:\n bp = bp_library.find(item[0])\n if item[0].startswith('sensor.camera'):\n bp.set_attribute('image_size_x', str(hud.dim[0]))\n bp.set_attribute('image_size_y', str(hud.dim[1]))\n if bp.has_attribute('gamma'):\n bp.set_attribute('gamma', str(gamma_correction))\n for attr_name, attr_value in item[3].items():\n bp.set_attribute(attr_name, attr_value)\n elif item[0].startswith('sensor.lidar'):\n self.lidar_range = 50\n\n for attr_name, attr_value in item[3].items():\n bp.set_attribute(attr_name, attr_value)\n if attr_name == 'range':\n self.lidar_range = float(attr_value)\n\n item.append(bp)\n self.index = None\n\n def toggle_camera(self):\n self.transform_index = (self.transform_index + 1) % len(self._camera_transforms)\n self.set_sensor(self.index, notify=False, force_respawn=True)\n\n def set_sensor(self, index, notify=True, force_respawn=False):\n index = index % len(self.sensors)\n needs_respawn = True if self.index is None else \\\n (force_respawn or (self.sensors[index][2] != self.sensors[self.index][2]))\n if needs_respawn:\n if self.sensor is not None:\n self.sensor.destroy()\n self.surface = None\n self.sensor = self._parent.get_world().spawn_actor(\n self.sensors[index][-1],\n self._camera_transforms[self.transform_index][0],\n attach_to=self._parent,\n attachment_type=self._camera_transforms[self.transform_index][1])\n # We need to pass the lambda a weak reference to self to avoid\n # circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))\n if notify:\n self.hud.notification(self.sensors[index][2])\n self.index = index\n\n def next_sensor(self):\n self.set_sensor(self.index + 1)\n\n def toggle_recording(self):\n self.recording = not self.recording\n self.hud.notification('Recording %s' % ('On' if self.recording else 'Off'))\n\n def render(self, display):\n if self.surface is not None:\n display.blit(self.surface, (0, 0))\n\n @staticmethod\n def scale_steer(steer):\n if (steer >= 1):\n return 1\n elif (steer <= -1):\n return -1\n else:\n return steer\n\n @staticmethod\n def predict_steering(img_rgb):\n img_size = (66, 200, 3)\n input_img = resize(img_rgb, img_size[:2])\n input_img = expand_dims(input_img, 0) # Create batch axis\n steering_pred= model.predict(input_img)[0][0]\n return steering_pred\n\n @staticmethod\n def _parse_image(weak_self, image):\n self = weak_self()\n if not self:\n return\n if self.sensors[self.index][0].startswith('sensor.lidar'):\n points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))\n points = np.reshape(points, (int(points.shape[0] / 4), 4))\n lidar_data = np.array(points[:, :2])\n lidar_data *= min(self.hud.dim) / (2.0 * self.lidar_range)\n lidar_data += (0.5 * self.hud.dim[0], 0.5 * self.hud.dim[1])\n lidar_data = np.fabs(lidar_data) # pylint: disable=E1111\n lidar_data = lidar_data.astype(np.int32)\n lidar_data = np.reshape(lidar_data, (-1, 2))\n lidar_img_size = (self.hud.dim[0], self.hud.dim[1], 3)\n lidar_img = np.zeros((lidar_img_size), dtype=np.uint8)\n lidar_img[tuple(lidar_data.T)] = (255, 255, 255)\n self.surface = pygame.surfarray.make_surface(lidar_img)\n elif self.sensors[self.index][0].startswith('sensor.camera.dvs'):\n # Example of converting the raw_data from a carla.DVSEventArray\n # sensor into a NumPy array and using it as an image\n dvs_events = np.frombuffer(image.raw_data, dtype=np.dtype([\n ('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]))\n dvs_img = np.zeros((image.height, image.width, 3), dtype=np.uint8)\n # Blue is positive, red is negative\n dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255\n self.surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1))\n elif self.sensors[self.index][0].startswith('sensor.camera.optical_flow'):\n image = image.get_color_coded_flow()\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n else:\n image.convert(self.sensors[self.index][1])\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4)) # RGBA\n array = array[:, :, :3] # RGBA -> RGB\n # array = array[:, :, ::-1]\n img_rgb = cv2.resize(np.float32(array), (320, 180))\n pred_steering = CameraManager.predict_steering(img_rgb)\n print('before scale', pred_steering)\n pred_steering /=70\n print('after scale', pred_steering)\n pred_steering = CameraManager.scale_steer(pred_steering)\n print(\"Predicted steering: \", pred_steering)\n self._parent.apply_control(carla.VehicleControl(throttle=0.9, steer=1))\n self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if self.recording:\n image.save_to_disk('_out/%08d' % image.frame)\n\n\n# ==============================================================================\n# -- game_loop() ---------------------------------------------------------------\n# ==============================================================================\n\n\ndef game_loop(args):\n pygame.init()\n pygame.font.init()\n world = None\n original_settings = None\n\n try:\n client = carla.Client(args.host, args.port)\n client.set_timeout(20.0)\n\n sim_world = client.get_world()\n if args.sync:\n original_settings = sim_world.get_settings()\n settings = sim_world.get_settings()\n if not settings.synchronous_mode:\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = 0.05\n sim_world.apply_settings(settings)\n\n traffic_manager = client.get_trafficmanager()\n traffic_manager.set_synchronous_mode(True)\n\n if args.autopilot and not sim_world.get_settings().synchronous_mode:\n print(\"WARNING: You are currently in asynchronous mode and could \"\n \"experience some issues with the traffic simulation\")\n\n # Remove all layer for fast rendering\n sim_world.unload_map_layer(carla.MapLayer.All)\n # settings = sim_world.get_settings()\n # settings.fixed_delta_seconds = None # Set a variable time-step\n # sim_world.apply_settings(settings)\n\n display = pygame.display.set_mode(\n (args.width, args.height),\n pygame.HWSURFACE | pygame.DOUBLEBUF)\n display.fill((0,0,0))\n pygame.display.flip()\n\n hud = HUD(args.width, args.height)\n world = World(sim_world, hud, args)\n controller = KeyboardControl(world, args.autopilot)\n\n\n if args.sync:\n sim_world.tick()\n else:\n sim_world.wait_for_tick()\n\n clock = pygame.time.Clock()\n while True:\n if args.sync:\n sim_world.tick()\n clock.tick_busy_loop(60)\n if controller.parse_events(client, world, clock, args.sync):\n return\n world.tick(clock)\n world.render(display)\n pygame.display.flip()\n\n finally:\n\n if original_settings:\n sim_world.apply_settings(original_settings)\n\n if (world and world.recording_enabled):\n client.stop_recorder()\n\n if world is not None:\n world.destroy()\n\n pygame.quit()\n\n\n# ==============================================================================\n# -- main() --------------------------------------------------------------------\n# ==============================================================================\n\n\ndef main():\n argparser = argparse.ArgumentParser(\n description='CARLA Manual Control Client')\n argparser.add_argument(\n '-v', '--verbose',\n action='store_true',\n dest='debug',\n help='print debug information')\n argparser.add_argument(\n '--host',\n metavar='H',\n default='127.0.0.1',\n help='IP of the host server (default: 127.0.0.1)')\n argparser.add_argument(\n '-p', '--port',\n metavar='P',\n default=2000,\n type=int,\n help='TCP port to listen to (default: 2000)')\n argparser.add_argument(\n '-a', '--autopilot',\n action='store_true',\n help='enable autopilot')\n argparser.add_argument(\n '--res',\n metavar='WIDTHxHEIGHT',\n default='1280x720',\n help='window resolution (default: 1280x720)')\n argparser.add_argument(\n '--filter',\n metavar='PATTERN',\n default='vehicle.*',\n help='actor filter (default: \"vehicle.*\")')\n argparser.add_argument(\n '--generation',\n metavar='G',\n default='2',\n help='restrict to certain actor generation (values: \"1\",\"2\",\"All\" - default: \"2\")')\n argparser.add_argument(\n '--rolename',\n metavar='NAME',\n default='hero',\n help='actor role name (default: \"hero\")')\n argparser.add_argument(\n '--gamma',\n default=2.2,\n type=float,\n help='Gamma correction of the camera (default: 2.2)')\n argparser.add_argument(\n '--sync',\n action='store_true',\n help='Activate synchronous mode execution')\n args = argparser.parse_args()\n\n args.width, args.height = [int(x) for x in args.res.split('x')]\n\n log_level = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)\n\n logging.info('listening to server %s:%s', args.host, args.port)\n\n print(__doc__)\n\n try:\n\n game_loop(args)\n\n except KeyboardInterrupt:\n print('\\nCancelled by user. Bye!')\n\n\nif __name__ == '__main__':\n\n main()\n\n"
] | [
[
"numpy.fabs",
"numpy.zeros",
"tensorflow.image.resize",
"numpy.dtype",
"numpy.reshape",
"tensorflow.expand_dims",
"numpy.float32",
"numpy.array"
]
] |
hkennyv/pandas | [
"31875eb3d8a56f359c2f529f86b867572d5dfeb1"
] | [
"pandas/tests/indexes/timedeltas/test_timedelta.py"
] | [
"from datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n Int64Index,\n Series,\n Timedelta,\n TimedeltaIndex,\n array,\n date_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\nfrom ..datetimelike import DatetimeLike\n\nrandn = np.random.randn\n\n\nclass TestTimedeltaIndex(DatetimeLike):\n _holder = TimedeltaIndex\n\n @pytest.fixture\n def indices(self):\n return tm.makeTimedeltaIndex(10)\n\n def create_index(self) -> TimedeltaIndex:\n index = pd.to_timedelta(range(5), unit=\"d\")._with_freq(\"infer\")\n assert index.freq == \"D\"\n return index + pd.offsets.Hour(1)\n\n def test_numeric_compat(self):\n # Dummy method to override super's version; this test is now done\n # in test_arithmetic.py\n pass\n\n def test_shift(self):\n pass # this is handled in test_arithmetic.py\n\n def test_pickle_compat_construction(self):\n pass\n\n def test_isin(self):\n\n index = tm.makeTimedeltaIndex(4)\n result = index.isin(index)\n assert result.all()\n\n result = index.isin(list(index))\n assert result.all()\n\n tm.assert_almost_equal(\n index.isin([index[2], 5]), np.array([False, False, True, False])\n )\n\n def test_factorize(self):\n idx1 = TimedeltaIndex([\"1 day\", \"1 day\", \"2 day\", \"2 day\", \"3 day\", \"3 day\"])\n\n exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)\n exp_idx = TimedeltaIndex([\"1 day\", \"2 day\", \"3 day\"])\n\n arr, idx = idx1.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n arr, idx = idx1.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n # freq must be preserved\n idx3 = timedelta_range(\"1 day\", periods=4, freq=\"s\")\n exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)\n arr, idx = idx3.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, idx3)\n\n def test_sort_values(self):\n\n idx = TimedeltaIndex([\"4d\", \"1d\", \"2d\"])\n\n ordered = idx.sort_values()\n assert ordered.is_monotonic\n\n ordered = idx.sort_values(ascending=False)\n assert ordered[::-1].is_monotonic\n\n ordered, dexer = idx.sort_values(return_indexer=True)\n assert ordered.is_monotonic\n\n tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]), check_dtype=False)\n\n ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)\n assert ordered[::-1].is_monotonic\n\n tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]), check_dtype=False)\n\n @pytest.mark.parametrize(\"klass\", [list, np.array, array, Series])\n def test_searchsorted_different_argument_classes(self, klass):\n idx = TimedeltaIndex([\"1 day\", \"2 days\", \"3 days\"])\n result = idx.searchsorted(klass(idx))\n expected = np.arange(len(idx), dtype=result.dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx._data.searchsorted(klass(idx))\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"arg\",\n [[1, 2], [\"a\", \"b\"], [pd.Timestamp(\"2020-01-01\", tz=\"Europe/London\")] * 2],\n )\n def test_searchsorted_invalid_argument_dtype(self, arg):\n idx = TimedeltaIndex([\"1 day\", \"2 days\", \"3 days\"])\n msg = \"searchsorted requires compatible dtype\"\n with pytest.raises(TypeError, match=msg):\n idx.searchsorted(arg)\n\n def test_argmin_argmax(self):\n idx = TimedeltaIndex([\"1 day 00:00:05\", \"1 day 00:00:01\", \"1 day 00:00:02\"])\n assert idx.argmin() == 1\n assert idx.argmax() == 0\n\n def test_misc_coverage(self):\n\n rng = timedelta_range(\"1 day\", periods=5)\n result = rng.groupby(rng.days)\n assert isinstance(list(result.values())[0][0], Timedelta)\n\n idx = TimedeltaIndex([\"3d\", \"1d\", \"2d\"])\n assert not idx.equals(list(idx))\n\n non_td = Index(list(\"abc\"))\n assert not idx.equals(list(non_td))\n\n def test_map(self):\n # test_map_dictlike generally tests\n\n rng = timedelta_range(\"1 day\", periods=10)\n\n f = lambda x: x.days\n result = rng.map(f)\n exp = Int64Index([f(x) for x in rng])\n tm.assert_index_equal(result, exp)\n\n def test_pass_TimedeltaIndex_to_index(self):\n\n rng = timedelta_range(\"1 days\", \"10 days\")\n idx = Index(rng, dtype=object)\n\n expected = Index(rng.to_pytimedelta(), dtype=object)\n\n tm.assert_numpy_array_equal(idx.values, expected.values)\n\n def test_append_numpy_bug_1681(self):\n\n td = timedelta_range(\"1 days\", \"10 days\", freq=\"2D\")\n a = DataFrame()\n c = DataFrame({\"A\": \"foo\", \"B\": td}, index=td)\n str(c)\n\n result = a.append(c)\n assert (result[\"B\"] == td).all()\n\n def test_fields(self):\n rng = timedelta_range(\"1 days, 10:11:12.100123456\", periods=2, freq=\"s\")\n tm.assert_index_equal(rng.days, Index([1, 1], dtype=\"int64\"))\n tm.assert_index_equal(\n rng.seconds,\n Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype=\"int64\"),\n )\n tm.assert_index_equal(\n rng.microseconds, Index([100 * 1000 + 123, 100 * 1000 + 123], dtype=\"int64\")\n )\n tm.assert_index_equal(rng.nanoseconds, Index([456, 456], dtype=\"int64\"))\n\n msg = \"'TimedeltaIndex' object has no attribute '{}'\"\n with pytest.raises(AttributeError, match=msg.format(\"hours\")):\n rng.hours\n with pytest.raises(AttributeError, match=msg.format(\"minutes\")):\n rng.minutes\n with pytest.raises(AttributeError, match=msg.format(\"milliseconds\")):\n rng.milliseconds\n\n # with nat\n s = Series(rng)\n s[1] = np.nan\n\n tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))\n tm.assert_series_equal(\n s.dt.seconds, Series([10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1])\n )\n\n # preserve name (GH15589)\n rng.name = \"name\"\n assert rng.days.name == \"name\"\n\n def test_freq_conversion(self):\n\n # doc example\n\n # series\n td = Series(date_range(\"20130101\", periods=4)) - Series(\n date_range(\"20121201\", periods=4)\n )\n td[2] += timedelta(minutes=5, seconds=3)\n td[3] = np.nan\n\n result = td / np.timedelta64(1, \"D\")\n expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])\n tm.assert_series_equal(result, expected)\n\n result = td.astype(\"timedelta64[D]\")\n expected = Series([31, 31, 31, np.nan])\n tm.assert_series_equal(result, expected)\n\n result = td / np.timedelta64(1, \"s\")\n expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan])\n tm.assert_series_equal(result, expected)\n\n result = td.astype(\"timedelta64[s]\")\n tm.assert_series_equal(result, expected)\n\n # tdi\n td = TimedeltaIndex(td)\n\n result = td / np.timedelta64(1, \"D\")\n expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])\n tm.assert_index_equal(result, expected)\n\n result = td.astype(\"timedelta64[D]\")\n expected = Index([31, 31, 31, np.nan])\n tm.assert_index_equal(result, expected)\n\n result = td / np.timedelta64(1, \"s\")\n expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan])\n tm.assert_index_equal(result, expected)\n\n result = td.astype(\"timedelta64[s]\")\n tm.assert_index_equal(result, expected)\n"
] | [
[
"pandas._testing.assert_numpy_array_equal",
"pandas.timedelta_range",
"pandas.Series",
"pandas.offsets.Hour",
"pandas.date_range",
"numpy.timedelta64",
"pandas._testing.makeTimedeltaIndex",
"pandas.DataFrame",
"pandas.TimedeltaIndex",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_series_equal",
"numpy.array",
"pandas.Index",
"pandas.Timestamp"
]
] |
adipasquale/frontmatter-analysis | [
"068b8870ee35569a81600f637569ad589087e2a8"
] | [
"fma/analyze.py"
] | [
"import pandas as pd\nimport os\nfrom pathlib import Path\nimport frontmatter\nimport argparse\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\", help=\"path containing .md files\")\n args = parser.parse_args()\n data = [frontmatter.load(path).metadata for path in Path(args.path).glob('*.md')]\n df = pd.DataFrame(data)\n with pd.option_context('display.width', 100):\n print(df.describe().transpose())\n"
] | [
[
"pandas.DataFrame",
"pandas.option_context"
]
] |
kevinmtian/pytorchvideo | [
"168e16859a6029ef8ebeb476f9163bebb6c6b87d",
"168e16859a6029ef8ebeb476f9163bebb6c6b87d"
] | [
"pytorchvideo/models/memory_bank.py",
"tests/test_layers_convolutions.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport math\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pytorchvideo.layers.utils import set_attributes\n\n\nclass MemoryBank(nn.Module):\n \"\"\"\n Performs Non-Parametric Instance Discrimination for self supervised learning on\n video. A memory bank is built to keep and update the historical feature embedding\n and use them for contrastive learning.\n\n The original paper is:\n Unsupervised Feature Learning via Non-Parametric Instance Discrimination\n https://arxiv.org/pdf/1805.01978.pdf\n\n More details can be found from the memory bank part in the following paper:\n Momentum Contrast for Unsupervised Visual Representation Learning\n https://arxiv.org/pdf/1911.05722.pdf\n \"\"\"\n\n def __init__(\n self,\n backbone: nn.Module,\n mlp: Optional[nn.Module] = None,\n neg_size: int = 4096,\n temperature: float = 0.07,\n bank_size: int = 1280000,\n dim: int = 2048,\n mmt: float = 0.999,\n ) -> None:\n \"\"\"\n Args:\n backbone (nn.Module): backbone used to forward the input.\n mlp (nn.Module): multi-layer perception used in memory bank instance\n discrimination model.\n neg_size (int): size of negative samples per instance.\n temperature (float): temperature to use for contrastive learning.\n bank_size (int): size of the memory bank, expected to be the same size as\n the training set.\n dim (int): dimension of the channel.\n mmt (float): momentum to use.\n \"\"\"\n super().__init__()\n set_attributes(self, locals())\n self._init_mem_bank(bank_size, dim)\n\n def _init_mem_bank(self, bank_size: int, dim: int) -> None:\n \"\"\"\n Given the memory bank size and the channel dimension, initialize the memory\n bank.\n Args:\n bank_size (int): size of the memory bank, expected to be the same size as\n the training set.\n dim (int): dimension of the channel.\n \"\"\"\n stdv = 1.0 / math.sqrt(dim / 3)\n self.register_buffer(\n \"memory\",\n torch.rand(\n bank_size,\n dim,\n )\n .mul_(2 * stdv)\n .add_(-stdv)\n .to(next(self.backbone.parameters()).device),\n )\n\n def forward(self, x: torch.Tensor, x_ind: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Perform contrastive learning with random sampled negative instance from the\n memory bank. During training, update the memory bank with latest feature\n embedding.\n Args:\n x (torch.tensor): a batch of image with augmentation. The input tensor\n shape should able to be feed into the backbone.\n x_ind (torch.tensor): the index of the image x from the dataset. Expected\n shape is B.\n \"\"\"\n batch_size = x.shape[0]\n x = self.backbone(x)\n if self.mlp is not None:\n x = self.mlp(x)\n # Normalize the output embedding before multiplication.\n x = F.normalize(x, p=2, dim=1)\n # Random sample negative instances from the memory bank.\n idx = torch.randint(0, self.bank_size, size=(batch_size, self.neg_size + 1)).to(\n x.device\n )\n # Fill the first with positive instances.\n idx.select(1, 0).copy_(x_ind.data)\n weight = torch.index_select(self.memory, 0, idx.view(-1)).detach()\n weight = weight.view(batch_size, self.neg_size + 1, self.dim)\n # Multiplication for contrastive learning.\n out = torch.einsum(\"bkc,bc->bk\", weight, x)\n out = torch.div(out, self.temperature)\n gt = torch.zeros((batch_size,), device=x.device, dtype=torch.long)\n loss = torch.nn.functional.cross_entropy(out, gt)\n # Update memory during training.\n if self.training:\n with torch.no_grad():\n pos = torch.index_select(self.memory, 0, x_ind.view(-1))\n pos.mul_(self.mmt)\n pos.add_(torch.mul(x, 1 - self.mmt))\n norm = pos.pow(2).sum(1, keepdim=True).pow(0.5)\n updated = pos.div(norm)\n self.memory.index_copy_(0, x_ind, updated)\n return loss\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport itertools\nimport unittest\n\nimport numpy as np\nimport torch\nfrom pytorchvideo.layers.convolutions import (\n Conv2plus1d,\n ConvReduce3D,\n create_conv_2plus1d,\n)\nfrom torch import nn\n\n\nclass TestConvReduce3D(unittest.TestCase):\n def setUp(self):\n super().setUp()\n torch.set_rng_state(torch.manual_seed(42).get_state())\n\n def test_create_stack_conv(self):\n \"\"\"\n Test ConvReduce3D.\n \"\"\"\n for input_dim, output_dim in itertools.product((2, 4), (4, 8, 16)):\n model = ConvReduce3D(\n in_channels=input_dim,\n out_channels=output_dim,\n kernel_size=((1, 1, 1), (3, 3, 3), (1, 3, 3)),\n stride=((1, 1, 1), (1, 1, 1), None),\n padding=((0, 0, 0), (1, 1, 1), (0, 1, 1)),\n dilation=((2, 2, 2), (1, 1, 1), None),\n groups=(1, 2, None),\n bias=(True, False, None),\n )\n model_gt_list = [\n nn.Conv3d(\n in_channels=input_dim,\n out_channels=output_dim,\n kernel_size=(1, 1, 1),\n stride=(1, 1, 1),\n padding=(0, 0, 0),\n dilation=(2, 2, 2),\n groups=1,\n bias=True,\n ),\n nn.Conv3d(\n in_channels=input_dim,\n out_channels=output_dim,\n kernel_size=(3, 3, 3),\n stride=(1, 1, 1),\n padding=(1, 1, 1),\n dilation=(1, 1, 1),\n groups=2,\n bias=False,\n ),\n nn.Conv3d(\n in_channels=input_dim,\n out_channels=output_dim,\n kernel_size=(1, 3, 3),\n padding=(0, 1, 1),\n ),\n ]\n model.convs[0].load_state_dict(\n model_gt_list[0].state_dict(), strict=True\n ) # explicitly use strict mode.\n model.convs[1].load_state_dict(\n model_gt_list[1].state_dict(), strict=True\n ) # explicitly use strict mode.\n model.convs[2].load_state_dict(\n model_gt_list[2].state_dict(), strict=True\n ) # explicitly use strict mode.\n\n # Test forwarding.\n for tensor in TestConvReduce3D._get_inputs(input_dim):\n if tensor.shape[1] != input_dim:\n with self.assertRaises(RuntimeError):\n output_tensor = model(tensor)\n continue\n else:\n output_tensor = model(tensor)\n output_gt = []\n for ind in range(3):\n output_gt.append(model_gt_list[ind](tensor))\n output_tensor_gt = torch.stack(output_gt, dim=0).sum(\n dim=0, keepdim=False\n )\n\n self.assertEqual(\n output_tensor.shape,\n output_tensor_gt.shape,\n \"Output shape {} is different from expected shape {}\".format(\n output_tensor.shape, output_tensor_gt.shape\n ),\n )\n\n @staticmethod\n def _get_inputs(input_dim: int = 3) -> torch.tensor:\n \"\"\"\n Provide different tensors as test cases.\n\n Yield:\n (torch.tensor): tensor as test case input.\n \"\"\"\n # Prepare random tensor as test cases.\n shapes = (\n # Forward succeeded.\n (1, input_dim, 3, 7, 7),\n (1, input_dim, 5, 7, 7),\n (1, input_dim, 7, 7, 7),\n (2, input_dim, 3, 7, 7),\n (4, input_dim, 3, 7, 7),\n (8, input_dim, 3, 7, 7),\n (2, input_dim, 3, 7, 14),\n (2, input_dim, 3, 14, 7),\n (2, input_dim, 3, 14, 14),\n # Forward failed.\n (8, input_dim * 2, 3, 7, 7),\n (8, input_dim * 4, 5, 7, 7),\n )\n for shape in shapes:\n yield torch.rand(shape)\n\n\nclass TestConv2plus1d(unittest.TestCase):\n def setUp(self):\n super().setUp()\n torch.set_rng_state(torch.manual_seed(42).get_state())\n\n def test_create_2plus1d_conv(self):\n \"\"\"\n Test Conv2plus1d.\n \"\"\"\n for input_dim, output_dim in itertools.product((2, 4), (4, 8, 16)):\n model = Conv2plus1d(\n conv_t=nn.Conv3d(\n in_channels=input_dim,\n out_channels=output_dim,\n kernel_size=(3, 1, 1),\n stride=(2, 1, 1),\n padding=(1, 0, 0),\n bias=False,\n ),\n norm=nn.BatchNorm3d(output_dim),\n activation=nn.ReLU(),\n conv_xy=nn.Conv3d(\n in_channels=output_dim,\n out_channels=output_dim,\n kernel_size=(1, 3, 3),\n stride=(1, 2, 2),\n padding=(0, 1, 1),\n bias=False,\n ),\n )\n\n model_gt = create_conv_2plus1d(\n in_channels=input_dim,\n out_channels=output_dim,\n kernel_size=(3, 3, 3),\n stride=(2, 2, 2),\n padding=(1, 1, 1),\n bias=False,\n norm=nn.BatchNorm3d,\n norm_eps=1e-5,\n norm_momentum=0.1,\n activation=nn.ReLU,\n )\n\n model.load_state_dict(\n model_gt.state_dict(), strict=True\n ) # explicitly use strict mode.\n\n # Test forwarding.\n for input_tensor in TestConv2plus1d._get_inputs():\n with torch.no_grad():\n if input_tensor.shape[1] != input_dim:\n with self.assertRaises(RuntimeError):\n output_tensor = model(input_tensor)\n continue\n else:\n output_tensor = model(input_tensor)\n output_tensor_gt = model_gt(input_tensor)\n self.assertEqual(\n output_tensor.shape,\n output_tensor_gt.shape,\n \"Output shape {} is different from expected shape {}\".format(\n output_tensor.shape, output_tensor_gt.shape\n ),\n )\n self.assertTrue(\n np.allclose(output_tensor.numpy(), output_tensor_gt.numpy())\n )\n\n @staticmethod\n def _get_inputs(input_dim: int = 3) -> torch.tensor:\n \"\"\"\n Provide different tensors as test cases.\n\n Yield:\n (torch.tensor): tensor as test case input.\n \"\"\"\n # Prepare random tensor as test cases.\n shapes = (\n # Forward succeeded.\n (1, input_dim, 3, 7, 7),\n (1, input_dim, 5, 7, 7),\n (1, input_dim, 7, 7, 7),\n (2, input_dim, 3, 7, 7),\n (4, input_dim, 3, 7, 7),\n (8, input_dim, 3, 7, 7),\n (2, input_dim, 3, 7, 14),\n (2, input_dim, 3, 14, 7),\n (2, input_dim, 3, 14, 14),\n # Forward failed.\n (8, input_dim * 2, 3, 7, 7),\n (8, input_dim * 4, 5, 7, 7),\n )\n for shape in shapes:\n yield torch.rand(shape)\n"
] | [
[
"torch.randint",
"torch.nn.functional.normalize",
"torch.rand",
"torch.no_grad",
"torch.div",
"torch.mul",
"torch.nn.functional.cross_entropy",
"torch.zeros",
"torch.einsum"
],
[
"torch.stack",
"torch.nn.BatchNorm3d",
"torch.rand",
"torch.manual_seed",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.Conv3d"
]
] |
fxdmhtt/airflow | [
"cf88f7bc7bbd3e9bf110e98f025759a96c130235"
] | [
"tests/contrib/operators/test_hive_to_dynamodb_operator.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport json\nimport unittest\nfrom unittest import mock\nimport datetime\n\nimport pandas as pd\n\nfrom airflow import DAG\nfrom airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook\n\nimport airflow.contrib.operators.hive_to_dynamodb\n\nDEFAULT_DATE = datetime.datetime(2015, 1, 1)\nDEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()\nDEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]\n\ntry:\n from moto import mock_dynamodb2\nexcept ImportError:\n mock_dynamodb2 = None\n\n\nclass HiveToDynamoDBTransferOperatorTest(unittest.TestCase):\n\n def setUp(self):\n args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}\n dag = DAG('test_dag_id', default_args=args)\n self.dag = dag\n self.sql = 'SELECT 1'\n self.hook = AwsDynamoDBHook(\n aws_conn_id='aws_default', region_name='us-east-1')\n\n @staticmethod\n def process_data(data, *args, **kwargs):\n return json.loads(data.to_json(orient='records'))\n\n @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')\n @mock_dynamodb2\n def test_get_conn_returns_a_boto3_connection(self):\n hook = AwsDynamoDBHook(aws_conn_id='aws_default')\n self.assertIsNotNone(hook.get_conn())\n\n @mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',\n return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))\n @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')\n @mock_dynamodb2\n def test_get_records_with_schema(self, get_results_mock):\n # this table needs to be created in production\n self.hook.get_conn().create_table(\n TableName='test_airflow',\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n },\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'name',\n 'AttributeType': 'S'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n\n operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(\n sql=self.sql,\n table_name=\"test_airflow\",\n task_id='hive_to_dynamodb_check',\n table_keys=['id'],\n dag=self.dag)\n\n operator.execute(None)\n\n table = self.hook.get_conn().Table('test_airflow')\n table.meta.client.get_waiter(\n 'table_exists').wait(TableName='test_airflow')\n self.assertEqual(table.item_count, 1)\n\n @mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',\n return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))\n @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')\n @mock_dynamodb2\n def test_pre_process_records_with_schema(self, get_results_mock):\n # this table needs to be created in production\n self.hook.get_conn().create_table(\n TableName='test_airflow',\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n },\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'name',\n 'AttributeType': 'S'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n\n operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(\n sql=self.sql,\n table_name='test_airflow',\n task_id='hive_to_dynamodb_check',\n table_keys=['id'],\n pre_process=self.process_data,\n dag=self.dag)\n\n operator.execute(None)\n\n table = self.hook.get_conn().Table('test_airflow')\n table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')\n self.assertEqual(table.item_count, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"pandas.DataFrame"
]
] |
ziko1305/Hidden-Markov-Based-Mathematical-Model | [
"0ad906e6c4f99ad91d4047aed78df49399447633"
] | [
"Ingredient_Extractor/with_unknown_words_concideration/with_various_accuracies_on_first_layer/mean_values_1.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 20 09:49:40 2020\r\n\r\n@author: Mehdi\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\na1=np.nanmean([table_11.loc['A'].accuracy,table_12.loc['A'].accuracy,table_13.loc['A'].accuracy,table_14.loc['A'].accuracy,\r\n table_15.loc['A'].accuracy,table_16.loc['A'].accuracy,table_17.loc['A'].accuracy,table_18.loc['A'].accuracy,\r\n table_19.loc['A'].accuracy,table_110.loc['A'].accuracy])\r\n\r\n\r\na2=np.nanmean([table_11.loc['A'].f1_score,table_12.loc['A'].f1_score,table_13.loc['A'].f1_score,table_14.loc['A'].f1_score,\r\n table_15.loc['A'].f1_score,table_16.loc['A'].f1_score,table_17.loc['A'].f1_score,table_18.loc['A'].f1_score,\r\n table_19.loc['A'].f1_score,table_110.loc['A'].f1_score])\r\n\r\n\r\na3=np.nanmean([table_11.loc['A'][2],table_12.loc['A'][2],table_13.loc['A'][2],table_14.loc['A'][2],\r\n table_15.loc['A'][2],table_16.loc['A'][2],table_17.loc['A'][2],table_18.loc['A'][2],\r\n table_19.loc['A'][2],table_110.loc['A'][2]])\r\n\r\n\r\na4=np.nanmean([table_11.loc['A'][3],table_12.loc['A'][3],table_13.loc['A'][3],table_14.loc['A'][3],\r\n table_15.loc['A'][3],table_16.loc['A'][3],table_17.loc['A'][3],table_18.loc['A'][3],\r\n table_19.loc['A'][3],table_110.loc['A'][3]])\r\n\r\na5=np.nanmean([table_11.loc['A'][4],table_12.loc['A'][4],table_13.loc['A'][4],table_14.loc['A'][4],\r\n table_15.loc['A'][4],table_16.loc['A'][4],table_17.loc['A'][4],table_18.loc['A'][4],\r\n table_19.loc['A'][4],table_110.loc['A'][4]])\r\n\r\na6=np.nanmean([table_11.loc['A'][5],table_12.loc['A'][5],table_13.loc['A'][5],table_14.loc['A'][5],\r\n table_15.loc['A'][5],table_16.loc['A'][5],table_17.loc['A'][5],table_18.loc['A'][5],\r\n table_19.loc['A'][5],table_110.loc['A'][5]])\r\n\r\na7=np.nanmean([table_11.loc['B'].accuracy,table_12.loc['B'].accuracy,table_13.loc['B'].accuracy,table_14.loc['B'].accuracy,\r\n table_15.loc['B'].accuracy,table_16.loc['B'].accuracy,table_17.loc['B'].accuracy,table_18.loc['B'].accuracy,\r\n table_19.loc['B'].accuracy,table_110.loc['B'].accuracy])\r\n\r\n\r\na8=np.nanmean([table_11.loc['B'].f1_score,table_12.loc['B'].f1_score,table_13.loc['B'].f1_score,table_14.loc['B'].f1_score,\r\n table_15.loc['B'].f1_score,table_16.loc['B'].f1_score,table_17.loc['B'].f1_score,table_18.loc['B'].f1_score,\r\n table_19.loc['B'].f1_score,table_110.loc['B'].f1_score])\r\n\r\n\r\na9=np.nanmean([table_11.loc['B'][2],table_12.loc['B'][2],table_13.loc['B'][2],table_14.loc['B'][2],\r\n table_15.loc['B'][2],table_16.loc['B'][2],table_17.loc['B'][2],table_18.loc['B'][2],\r\n table_19.loc['B'][2],table_110.loc['B'][2]])\r\n\r\n\r\na10=np.nanmean([table_11.loc['B'][3],table_12.loc['B'][3],table_13.loc['B'][3],table_14.loc['B'][3],\r\n table_15.loc['B'][3],table_16.loc['B'][3],table_17.loc['B'][3],table_18.loc['B'][3],\r\n table_19.loc['B'][3],table_110.loc['B'][3]])\r\n\r\na11=np.nanmean([table_11.loc['B'][4],table_12.loc['B'][4],table_13.loc['B'][4],table_14.loc['B'][4],\r\n table_15.loc['B'][4],table_16.loc['B'][4],table_17.loc['B'][4],table_18.loc['B'][4],\r\n table_19.loc['B'][4],table_110.loc['B'][4]])\r\n\r\na12=np.nanmean([table_11.loc['B'][5],table_12.loc['B'][5],table_13.loc['B'][5],table_14.loc['B'][5],\r\n table_15.loc['B'][5],table_16.loc['B'][5],table_17.loc['B'][5],table_18.loc['B'][5],\r\n table_19.loc['B'][5],table_110.loc['B'][5]])\r\n\r\n\r\na13=np.nanmean([table_11.loc['C'].accuracy,table_12.loc['C'].accuracy,table_13.loc['C'].accuracy,table_14.loc['C'].accuracy,\r\n table_15.loc['C'].accuracy,table_16.loc['C'].accuracy,table_17.loc['C'].accuracy,table_18.loc['C'].accuracy,\r\n table_19.loc['C'].accuracy,table_110.loc['C'].accuracy])\r\n\r\n\r\na14=np.nanmean([table_11.loc['C'].f1_score,table_12.loc['C'].f1_score,table_13.loc['C'].f1_score,table_14.loc['C'].f1_score,\r\n table_15.loc['C'].f1_score,table_16.loc['C'].f1_score,table_17.loc['C'].f1_score,table_18.loc['C'].f1_score,\r\n table_19.loc['C'].f1_score,table_110.loc['C'].f1_score])\r\n\r\n\r\na15=np.nanmean([table_11.loc['C'][2],table_12.loc['C'][2],table_13.loc['C'][2],table_14.loc['C'][2],\r\n table_15.loc['C'][2],table_16.loc['C'][2],table_17.loc['C'][2],table_18.loc['C'][2],\r\n table_19.loc['C'][2],table_110.loc['C'][2]])\r\n\r\n\r\na16=np.nanmean([table_11.loc['C'][3],table_12.loc['C'][3],table_13.loc['C'][3],table_14.loc['C'][3],\r\n table_15.loc['C'][3],table_16.loc['C'][3],table_17.loc['C'][3],table_18.loc['C'][3],\r\n table_19.loc['C'][3],table_110.loc['C'][3]])\r\n\r\na17=np.nanmean([table_11.loc['C'][4],table_12.loc['C'][4],table_13.loc['C'][4],table_14.loc['C'][4],\r\n table_15.loc['C'][4],table_16.loc['C'][4],table_17.loc['C'][4],table_18.loc['C'][4],\r\n table_19.loc['C'][4],table_110.loc['C'][4]])\r\n\r\na18=np.nanmean([table_11.loc['C'][5],table_12.loc['C'][5],table_13.loc['C'][5],table_14.loc['C'][5],\r\n table_15.loc['C'][5],table_16.loc['C'][5],table_17.loc['C'][5],table_18.loc['C'][5],\r\n table_19.loc['C'][5],table_110.loc['C'][5]])\r\n\r\na19=np.nanmean([table_11.loc['D'].accuracy,table_12.loc['D'].accuracy,table_13.loc['D'].accuracy,table_14.loc['D'].accuracy,\r\n table_15.loc['D'].accuracy,table_16.loc['D'].accuracy,table_17.loc['D'].accuracy,table_18.loc['D'].accuracy,\r\n table_19.loc['D'].accuracy,table_110.loc['D'].accuracy])\r\n\r\n\r\na20=np.nanmean([table_11.loc['D'].f1_score,table_12.loc['D'].f1_score,table_13.loc['D'].f1_score,table_14.loc['D'].f1_score,\r\n table_15.loc['D'].f1_score,table_16.loc['D'].f1_score,table_17.loc['D'].f1_score,table_18.loc['D'].f1_score,\r\n table_19.loc['D'].f1_score,table_110.loc['D'].f1_score])\r\n\r\n\r\na21=np.nanmean([table_11.loc['D'][2],table_12.loc['D'][2],table_13.loc['D'][2],table_14.loc['D'][2],\r\n table_15.loc['D'][2],table_16.loc['D'][2],table_17.loc['D'][2],table_18.loc['D'][2],\r\n table_19.loc['D'][2],table_110.loc['D'][2]])\r\n\r\n\r\na22=np.nanmean([table_11.loc['D'][3],table_12.loc['D'][3],table_13.loc['D'][3],table_14.loc['D'][3],\r\n table_15.loc['D'][3],table_16.loc['D'][3],table_17.loc['D'][3],table_18.loc['D'][3],\r\n table_19.loc['D'][3],table_110.loc['D'][3]])\r\n\r\na23=np.nanmean([table_11.loc['D'][4],table_12.loc['D'][4],table_13.loc['D'][4],table_14.loc['D'][4],\r\n table_15.loc['D'][4],table_16.loc['D'][4],table_17.loc['D'][4],table_18.loc['D'][4],\r\n table_19.loc['D'][4],table_110.loc['D'][4]])\r\n\r\na24=np.nanmean([table_11.loc['D'][5],table_12.loc['D'][5],table_13.loc['D'][5],table_14.loc['D'][5],\r\n table_15.loc['D'][5],table_16.loc['D'][5],table_17.loc['D'][5],table_18.loc['D'][5],\r\n table_19.loc['D'][5],table_110.loc['D'][5]])\r\n\r\n\r\na25=np.nanmean([table_11.loc['E'].accuracy,table_12.loc['E'].accuracy,table_13.loc['E'].accuracy,table_14.loc['E'].accuracy,\r\n table_15.loc['E'].accuracy,table_16.loc['E'].accuracy,table_17.loc['E'].accuracy,table_18.loc['E'].accuracy,\r\n table_19.loc['E'].accuracy,table_110.loc['E'].accuracy])\r\n\r\n\r\na26=np.nanmean([table_11.loc['E'].f1_score,table_12.loc['E'].f1_score,table_13.loc['E'].f1_score,table_14.loc['E'].f1_score,\r\n table_15.loc['E'].f1_score,table_16.loc['E'].f1_score,table_17.loc['E'].f1_score,table_18.loc['E'].f1_score,\r\n table_19.loc['E'].f1_score,table_110.loc['E'].f1_score])\r\n\r\n\r\na27=np.nanmean([table_11.loc['E'][2],table_12.loc['E'][2],table_13.loc['E'][2],table_14.loc['E'][2],\r\n table_15.loc['E'][2],table_16.loc['E'][2],table_17.loc['E'][2],table_18.loc['E'][2],\r\n table_19.loc['E'][2],table_110.loc['E'][2]])\r\n\r\n\r\na28=np.nanmean([table_11.loc['E'][3],table_12.loc['E'][3],table_13.loc['E'][3],table_14.loc['E'][3],\r\n table_15.loc['E'][3],table_16.loc['E'][3],table_17.loc['E'][3],table_18.loc['E'][3],\r\n table_19.loc['E'][3],table_110.loc['E'][3]])\r\n\r\na29=np.nanmean([table_11.loc['E'][4],table_12.loc['E'][4],table_13.loc['E'][4],table_14.loc['E'][4],\r\n table_15.loc['E'][4],table_16.loc['E'][4],table_17.loc['E'][4],table_18.loc['E'][4],\r\n table_19.loc['E'][4],table_110.loc['E'][4]])\r\n\r\na30=np.nanmean([table_11.loc['E'][5],table_12.loc['E'][5],table_13.loc['E'][5],table_14.loc['E'][5],\r\n table_15.loc['E'][5],table_16.loc['E'][5],table_17.loc['E'][5],table_18.loc['E'][5],\r\n table_19.loc['E'][5],table_110.loc['E'][5]])\r\n\r\n\r\na31=np.nanmean([table_11.loc['F'].accuracy,table_12.loc['F'].accuracy,table_13.loc['F'].accuracy,table_14.loc['F'].accuracy,\r\n table_15.loc['F'].accuracy,table_16.loc['F'].accuracy,table_17.loc['F'].accuracy,table_18.loc['F'].accuracy,\r\n table_19.loc['F'].accuracy,table_110.loc['F'].accuracy])\r\n\r\n\r\na32=np.nanmean([table_11.loc['F'].f1_score,table_12.loc['F'].f1_score,table_13.loc['F'].f1_score,table_14.loc['F'].f1_score,\r\n table_15.loc['F'].f1_score,table_16.loc['F'].f1_score,table_17.loc['F'].f1_score,table_18.loc['F'].f1_score,\r\n table_19.loc['F'].f1_score,table_110.loc['F'].f1_score])\r\n\r\n\r\na33=np.nanmean([table_11.loc['F'][2],table_12.loc['F'][2],table_13.loc['F'][2],table_14.loc['F'][2],\r\n table_15.loc['F'][2],table_16.loc['F'][2],table_17.loc['F'][2],table_18.loc['F'][2],\r\n table_19.loc['F'][2],table_110.loc['F'][2]])\r\n\r\n\r\na34=np.nanmean([table_11.loc['F'][3],table_12.loc['F'][3],table_13.loc['F'][3],table_14.loc['F'][3],\r\n table_15.loc['F'][3],table_16.loc['F'][3],table_17.loc['F'][3],table_18.loc['F'][3],\r\n table_19.loc['F'][3],table_110.loc['F'][3]])\r\n\r\na35=np.nanmean([table_11.loc['F'][4],table_12.loc['F'][4],table_13.loc['F'][4],table_14.loc['F'][4],\r\n table_15.loc['F'][4],table_16.loc['F'][4],table_17.loc['F'][4],table_18.loc['F'][4],\r\n table_19.loc['F'][4],table_110.loc['F'][4]])\r\n\r\na36=np.nanmean([table_11.loc['F'][5],table_12.loc['F'][5],table_13.loc['F'][5],table_14.loc['F'][5],\r\n table_15.loc['F'][5],table_16.loc['F'][5],table_17.loc['F'][5],table_18.loc['F'][5],\r\n table_19.loc['F'][5],table_110.loc['F'][5]])\r\n\r\na37=np.nanmean([table_11.loc['G'].accuracy,table_12.loc['G'].accuracy,table_13.loc['G'].accuracy,table_14.loc['G'].accuracy,\r\n table_15.loc['G'].accuracy,table_16.loc['G'].accuracy,table_17.loc['G'].accuracy,table_18.loc['G'].accuracy,\r\n table_19.loc['G'].accuracy,table_110.loc['G'].accuracy])\r\n\r\n\r\na38=np.nanmean([table_11.loc['G'].f1_score,table_12.loc['G'].f1_score,table_13.loc['G'].f1_score,table_14.loc['G'].f1_score,\r\n table_15.loc['G'].f1_score,table_16.loc['G'].f1_score,table_17.loc['G'].f1_score,table_18.loc['G'].f1_score,\r\n table_19.loc['G'].f1_score,table_110.loc['G'].f1_score])\r\n\r\n\r\na39=np.nanmean([table_11.loc['G'][2],table_12.loc['G'][2],table_13.loc['G'][2],table_14.loc['G'][2],\r\n table_15.loc['G'][2],table_16.loc['G'][2],table_17.loc['G'][2],table_18.loc['G'][2],\r\n table_19.loc['G'][2],table_110.loc['G'][2]])\r\n\r\n\r\na40=np.nanmean([table_11.loc['G'][3],table_12.loc['G'][3],table_13.loc['G'][3],table_14.loc['G'][3],\r\n table_15.loc['G'][3],table_16.loc['G'][3],table_17.loc['G'][3],table_18.loc['G'][3],\r\n table_19.loc['G'][3],table_110.loc['G'][3]])\r\n\r\na41=np.nanmean([table_11.loc['G'][4],table_12.loc['G'][4],table_13.loc['G'][4],table_14.loc['G'][4],\r\n table_15.loc['G'][4],table_16.loc['G'][4],table_17.loc['G'][4],table_18.loc['G'][4],\r\n table_19.loc['G'][4],table_110.loc['G'][4]])\r\n\r\na42=np.nanmean([table_11.loc['G'][5],table_12.loc['G'][5],table_13.loc['G'][5],table_14.loc['G'][5],\r\n table_15.loc['G'][5],table_16.loc['G'][5],table_17.loc['G'][5],table_18.loc['G'][5],\r\n table_19.loc['G'][5],table_110.loc['G'][5]])\r\n\r\n\r\na43=np.nanmean([table_11.loc['H'].accuracy,table_12.loc['H'].accuracy,table_13.loc['H'].accuracy,table_14.loc['H'].accuracy,\r\n table_15.loc['H'].accuracy,table_16.loc['H'].accuracy,table_17.loc['H'].accuracy,table_18.loc['H'].accuracy,\r\n table_19.loc['H'].accuracy,table_110.loc['H'].accuracy])\r\n\r\n\r\na44=np.nanmean([table_11.loc['H'].f1_score,table_12.loc['H'].f1_score,table_13.loc['H'].f1_score,table_14.loc['H'].f1_score,\r\n table_15.loc['H'].f1_score,table_16.loc['H'].f1_score,table_17.loc['H'].f1_score,table_18.loc['H'].f1_score,\r\n table_19.loc['H'].f1_score,table_110.loc['H'].f1_score])\r\n\r\n\r\na45=np.nanmean([table_11.loc['H'][2],table_12.loc['H'][2],table_13.loc['H'][2],table_14.loc['H'][2],\r\n table_15.loc['H'][2],table_16.loc['H'][2],table_17.loc['H'][2],table_18.loc['H'][2],\r\n table_19.loc['H'][2],table_110.loc['H'][2]])\r\n\r\n\r\na46=np.nanmean([table_11.loc['H'][3],table_12.loc['H'][3],table_13.loc['H'][3],table_14.loc['H'][3],\r\n table_15.loc['H'][3],table_16.loc['H'][3],table_17.loc['H'][3],table_18.loc['H'][3],\r\n table_19.loc['H'][3],table_110.loc['H'][3]])\r\n\r\na47=np.nanmean([table_11.loc['H'][4],table_12.loc['H'][4],table_13.loc['H'][4],table_14.loc['H'][4],\r\n table_15.loc['H'][4],table_16.loc['H'][4],table_17.loc['H'][4],table_18.loc['H'][4],\r\n table_19.loc['H'][4],table_110.loc['H'][4]])\r\n\r\na48=np.nanmean([table_11.loc['H'][5],table_12.loc['H'][5],table_13.loc['H'][5],table_14.loc['H'][5],\r\n table_15.loc['H'][5],table_16.loc['H'][5],table_17.loc['H'][5],table_18.loc['H'][5],\r\n table_19.loc['H'][5],table_110.loc['H'][5]])\r\n\r\n\r\na49=np.nanmean([table_11.loc['I'].accuracy,table_12.loc['I'].accuracy,table_13.loc['I'].accuracy,table_14.loc['I'].accuracy,\r\n table_15.loc['I'].accuracy,table_16.loc['I'].accuracy,table_17.loc['I'].accuracy,table_18.loc['I'].accuracy,\r\n table_19.loc['I'].accuracy,table_110.loc['I'].accuracy])\r\n\r\n\r\na50=np.nanmean([table_11.loc['I'].f1_score,table_12.loc['I'].f1_score,table_13.loc['I'].f1_score,table_14.loc['I'].f1_score,\r\n table_15.loc['I'].f1_score,table_16.loc['I'].f1_score,table_17.loc['I'].f1_score,table_18.loc['I'].f1_score,\r\n table_19.loc['I'].f1_score,table_110.loc['I'].f1_score])\r\n\r\n\r\na51=np.nanmean([table_11.loc['I'][2],table_12.loc['I'][2],table_13.loc['I'][2],table_14.loc['I'][2],\r\n table_15.loc['I'][2],table_16.loc['I'][2],table_17.loc['I'][2],table_18.loc['I'][2],\r\n table_19.loc['I'][2],table_110.loc['I'][2]])\r\n\r\n\r\na52=np.nanmean([table_11.loc['I'][3],table_12.loc['I'][3],table_13.loc['I'][3],table_14.loc['I'][3],\r\n table_15.loc['I'][3],table_16.loc['I'][3],table_17.loc['I'][3],table_18.loc['I'][3],\r\n table_19.loc['I'][3],table_110.loc['I'][3]])\r\n\r\na53=np.nanmean([table_11.loc['I'][4],table_12.loc['I'][4],table_13.loc['I'][4],table_14.loc['I'][4],\r\n table_15.loc['I'][4],table_16.loc['I'][4],table_17.loc['I'][4],table_18.loc['I'][4],\r\n table_19.loc['I'][4],table_110.loc['I'][4]])\r\n\r\na54=np.nanmean([table_11.loc['I'][5],table_12.loc['I'][5],table_13.loc['I'][5],table_14.loc['I'][5],\r\n table_15.loc['I'][5],table_16.loc['I'][5],table_17.loc['I'][5],table_18.loc['I'][5],\r\n table_19.loc['I'][5],table_110.loc['I'][5]])\r\n\r\n\r\na55=np.nanmean([table_11.loc['J'].accuracy,table_12.loc['J'].accuracy,table_13.loc['J'].accuracy,table_14.loc['J'].accuracy,\r\n table_15.loc['J'].accuracy,table_16.loc['J'].accuracy,table_17.loc['J'].accuracy,table_18.loc['J'].accuracy,\r\n table_19.loc['J'].accuracy,table_110.loc['J'].accuracy])\r\n\r\n\r\na56=np.nanmean([table_11.loc['J'].f1_score,table_12.loc['J'].f1_score,table_13.loc['J'].f1_score,table_14.loc['J'].f1_score,\r\n table_15.loc['J'].f1_score,table_16.loc['J'].f1_score,table_17.loc['J'].f1_score,table_18.loc['J'].f1_score,\r\n table_19.loc['J'].f1_score,table_110.loc['J'].f1_score])\r\n\r\n\r\na57=np.nanmean([table_11.loc['J'][2],table_12.loc['J'][2],table_13.loc['J'][2],table_14.loc['J'][2],\r\n table_15.loc['J'][2],table_16.loc['J'][2],table_17.loc['J'][2],table_18.loc['J'][2],\r\n table_19.loc['J'][2],table_110.loc['J'][2]])\r\n\r\n\r\na58=np.nanmean([table_11.loc['J'][3],table_12.loc['J'][3],table_13.loc['J'][3],table_14.loc['J'][3],\r\n table_15.loc['J'][3],table_16.loc['J'][3],table_17.loc['J'][3],table_18.loc['J'][3],\r\n table_19.loc['J'][3],table_110.loc['J'][3]])\r\n\r\na59=np.nanmean([table_11.loc['J'][4],table_12.loc['J'][4],table_13.loc['J'][4],table_14.loc['J'][4],\r\n table_15.loc['J'][4],table_16.loc['J'][4],table_17.loc['J'][4],table_18.loc['J'][4],\r\n table_19.loc['J'][4],table_110.loc['J'][4]])\r\n\r\na60=np.nanmean([table_11.loc['J'][5],table_12.loc['J'][5],table_13.loc['J'][5],table_14.loc['J'][5],\r\n table_15.loc['J'][5],table_16.loc['J'][5],table_17.loc['J'][5],table_18.loc['J'][5],\r\n table_19.loc['J'][5],table_110.loc['J'][5]])\r\n\r\na61=np.nanmean([table_11.loc['K'].accuracy,table_12.loc['K'].accuracy,table_13.loc['K'].accuracy,table_14.loc['K'].accuracy,\r\n table_15.loc['K'].accuracy,table_16.loc['K'].accuracy,table_17.loc['K'].accuracy,table_18.loc['K'].accuracy,\r\n table_19.loc['K'].accuracy,table_110.loc['K'].accuracy])\r\n\r\n\r\na62=np.nanmean([table_11.loc['K'].f1_score,table_12.loc['K'].f1_score,table_13.loc['K'].f1_score,table_14.loc['K'].f1_score,\r\n table_15.loc['K'].f1_score,table_16.loc['K'].f1_score,table_17.loc['K'].f1_score,table_18.loc['K'].f1_score,\r\n table_19.loc['K'].f1_score,table_110.loc['K'].f1_score])\r\n\r\n\r\na63=np.nanmean([table_11.loc['K'][2],table_12.loc['K'][2],table_13.loc['K'][2],table_14.loc['K'][2],\r\n table_15.loc['K'][2],table_16.loc['K'][2],table_17.loc['K'][2],table_18.loc['K'][2],\r\n table_19.loc['K'][2],table_110.loc['K'][2]])\r\n\r\n\r\na64=np.nanmean([table_11.loc['K'][3],table_12.loc['K'][3],table_13.loc['K'][3],table_14.loc['K'][3],\r\n table_15.loc['K'][3],table_16.loc['K'][3],table_17.loc['K'][3],table_18.loc['K'][3],\r\n table_19.loc['K'][3],table_110.loc['K'][3]])\r\n\r\na65=np.nanmean([table_11.loc['K'][4],table_12.loc['K'][4],table_13.loc['K'][4],table_14.loc['K'][4],\r\n table_15.loc['K'][4],table_16.loc['K'][4],table_17.loc['K'][4],table_18.loc['K'][4],\r\n table_19.loc['K'][4],table_110.loc['K'][4]])\r\n\r\na66=np.nanmean([table_11.loc['K'][5],table_12.loc['K'][5],table_13.loc['K'][5],table_14.loc['K'][5],\r\n table_15.loc['K'][5],table_16.loc['K'][5],table_17.loc['K'][5],table_18.loc['K'][5],\r\n table_19.loc['K'][5],table_110.loc['K'][5]])\r\n\r\na67=np.nanmean([table_11.loc['L'].accuracy,table_12.loc['L'].accuracy,table_13.loc['L'].accuracy,table_14.loc['L'].accuracy,\r\n table_15.loc['L'].accuracy,table_16.loc['L'].accuracy,table_17.loc['L'].accuracy,table_18.loc['L'].accuracy,\r\n table_19.loc['L'].accuracy,table_110.loc['L'].accuracy])\r\n\r\n\r\na68=np.nanmean([table_11.loc['L'].f1_score,table_12.loc['L'].f1_score,table_13.loc['L'].f1_score,table_14.loc['L'].f1_score,\r\n table_15.loc['L'].f1_score,table_16.loc['L'].f1_score,table_17.loc['L'].f1_score,table_18.loc['L'].f1_score,\r\n table_19.loc['L'].f1_score,table_110.loc['L'].f1_score])\r\n\r\n\r\na69=np.nanmean([table_11.loc['L'][2],table_12.loc['L'][2],table_13.loc['L'][2],table_14.loc['L'][2],\r\n table_15.loc['L'][2],table_16.loc['L'][2],table_17.loc['L'][2],table_18.loc['L'][2],\r\n table_19.loc['L'][2],table_110.loc['L'][2]])\r\n\r\n\r\na70=np.nanmean([table_11.loc['L'][3],table_12.loc['L'][3],table_13.loc['L'][3],table_14.loc['L'][3],\r\n table_15.loc['L'][3],table_16.loc['L'][3],table_17.loc['L'][3],table_18.loc['L'][3],\r\n table_19.loc['L'][3],table_110.loc['L'][3]])\r\n\r\na71=np.nanmean([table_11.loc['L'][4],table_12.loc['L'][4],table_13.loc['L'][4],table_14.loc['L'][4],\r\n table_15.loc['L'][4],table_16.loc['L'][4],table_17.loc['L'][4],table_18.loc['L'][4],\r\n table_19.loc['L'][4],table_110.loc['L'][4]])\r\n\r\na72=np.nanmean([table_11.loc['L'][5],table_12.loc['L'][5],table_13.loc['L'][5],table_14.loc['L'][5],\r\n table_15.loc['L'][5],table_16.loc['L'][5],table_17.loc['L'][5],table_18.loc['L'][5],\r\n table_19.loc['L'][5],table_110.loc['L'][5]])\r\n\r\n\r\na73=np.nanmean([table_11.loc['M'].accuracy,table_12.loc['M'].accuracy,table_13.loc['M'].accuracy,table_14.loc['M'].accuracy,\r\n table_15.loc['M'].accuracy,table_16.loc['M'].accuracy,table_17.loc['M'].accuracy,table_18.loc['M'].accuracy,\r\n table_19.loc['M'].accuracy,table_110.loc['M'].accuracy])\r\n\r\n\r\na74=np.nanmean([table_11.loc['M'].f1_score,table_12.loc['M'].f1_score,table_13.loc['M'].f1_score,table_14.loc['M'].f1_score,\r\n table_15.loc['M'].f1_score,table_16.loc['M'].f1_score,table_17.loc['M'].f1_score,table_18.loc['M'].f1_score,\r\n table_19.loc['M'].f1_score,table_110.loc['M'].f1_score])\r\n\r\n\r\na75=np.nanmean([table_11.loc['M'][2],table_12.loc['M'][2],table_13.loc['M'][2],table_14.loc['M'][2],\r\n table_15.loc['M'][2],table_16.loc['M'][2],table_17.loc['M'][2],table_18.loc['M'][2],\r\n table_19.loc['M'][2],table_110.loc['M'][2]])\r\n\r\n\r\na76=np.nanmean([table_11.loc['M'][3],table_12.loc['M'][3],table_13.loc['M'][3],table_14.loc['M'][3],\r\n table_15.loc['M'][3],table_16.loc['M'][3],table_17.loc['M'][3],table_18.loc['M'][3],\r\n table_19.loc['M'][3],table_110.loc['M'][3]])\r\n\r\na77=np.nanmean([table_11.loc['M'][4],table_12.loc['M'][4],table_13.loc['M'][4],table_14.loc['M'][4],\r\n table_15.loc['M'][4],table_16.loc['M'][4],table_17.loc['M'][4],table_18.loc['M'][4],\r\n table_19.loc['M'][4],table_110.loc['M'][4]])\r\n\r\na78=np.nanmean([table_11.loc['M'][5],table_12.loc['M'][5],table_13.loc['M'][5],table_14.loc['M'][5],\r\n table_15.loc['M'][5],table_16.loc['M'][5],table_17.loc['M'][5],table_18.loc['M'][5],\r\n table_19.loc['M'][5],table_110.loc['M'][5]])\r\n\r\n\r\na79=np.nanmean([table_11.loc['.'].accuracy,table_12.loc['.'].accuracy,table_13.loc['.'].accuracy,table_14.loc['.'].accuracy,\r\n table_15.loc['.'].accuracy,table_16.loc['.'].accuracy,table_17.loc['.'].accuracy,table_18.loc['.'].accuracy,\r\n table_19.loc['.'].accuracy,table_110.loc['.'].accuracy])\r\n\r\n\r\na80=np.nanmean([table_11.loc['.'].f1_score,table_12.loc['.'].f1_score,table_13.loc['.'].f1_score,table_14.loc['.'].f1_score,\r\n table_15.loc['.'].f1_score,table_16.loc['.'].f1_score,table_17.loc['.'].f1_score,table_18.loc['.'].f1_score,\r\n table_19.loc['.'].f1_score,table_110.loc['.'].f1_score])\r\n\r\n\r\na81=np.nanmean([table_11.loc['.'][2],table_12.loc['.'][2],table_13.loc['.'][2],table_14.loc['.'][2],\r\n table_15.loc['.'][2],table_16.loc['.'][2],table_17.loc['.'][2],table_18.loc['.'][2],\r\n table_19.loc['.'][2],table_110.loc['.'][2]])\r\n\r\n\r\na82=np.nanmean([table_11.loc['.'][3],table_12.loc['.'][3],table_13.loc['.'][3],table_14.loc['.'][3],\r\n table_15.loc['.'][3],table_16.loc['.'][3],table_17.loc['.'][3],table_18.loc['.'][3],\r\n table_19.loc['.'][3],table_110.loc['.'][3]])\r\n\r\na83=np.nanmean([table_11.loc['.'][4],table_12.loc['.'][4],table_13.loc['.'][4],table_14.loc['.'][4],\r\n table_15.loc['.'][4],table_16.loc['.'][4],table_17.loc['.'][4],table_18.loc['.'][4],\r\n table_19.loc['.'][4],table_110.loc['.'][4]])\r\n\r\na84=np.nanmean([table_11.loc['.'][5],table_12.loc['.'][5],table_13.loc['.'][5],table_14.loc['.'][5],\r\n table_15.loc['.'][5],table_16.loc['.'][5],table_17.loc['.'][5],table_18.loc['.'][5],\r\n table_19.loc['.'][5],table_110.loc['.'][5]])\r\n\r\n\r\nA=[[a1,a2,a3,round(a4),a5,round(a6)],[a7,a8,a9,round(a10),a11,round(a12)],[a13,a14,a15,round(a16),a17,round(a18)],\r\n [a19,a20,a21,round(a22),a23,round(a24)]\r\n,[a25,a26,a27,round(a28),a29,round(a30)],[a31,a32,a33,round(a34),a35,round(a36)],\r\n[a37,a38,a39,round(a40),a41,round(a42)],[a43,a44,a45,round(a46),a47,round(a48)],\r\n[a49,a50,a51,round(a52),a53,round(a54)],[a55,a56,a57,round(a58),a59,round(a60)],\r\n[a61,a62,a63,round(a64),a65,round(a66)],[a67,a68,a69,round(a70),a71,round(a72)],\r\n[a73,a74,a75,round(a76),a77,round(a78)],[a79,a80,a81,round(a82),a83,round(a84)]]\r\n\r\nvv1=np.mean([v1[0],v2[0],v3[0],v4[0],v5[0],v6[0],v7[0],v8[0],v9[0],v10[0]])\r\nvv2=np.mean([v1[1],v2[1],v3[1],v4[1],v5[1],v6[1],v7[1],v8[1],v9[1],v10[1]])\r\nvv3=np.mean([v1[2],v2[2],v3[2],v4[2],v5[2],v6[2],v7[2],v8[2],v9[2],v10[2]])\r\nvv4=np.mean([v1[3],v2[3],v3[3],v4[3],v5[3],v6[3],v7[3],v8[3],v9[3],v10[3]])\r\nvv5=np.mean([v1[4],v2[4],v3[4],v4[4],v5[4],v6[4],v7[4],v8[4],v9[4],v10[4]])\r\nvv6=np.mean([v1[5],v2[5],v3[5],v4[5],v5[5],v6[5],v7[5],v8[5],v9[5],v10[5]])\r\ntable_111= pd.DataFrame(A,columns=['accuracy', 'f1_score', 'accuracy for unknown words',\r\n'number of unknown words','accuracy for known words','number of known words']\r\n,index=['A','B','C','D','E','F','G','H','I','J','K','L','M','.'])\r\n\r\n#table_10= pd.DataFrame(A,\r\n#columns=['accuracy', 'f1_score', 'accuracy for unknown words',\r\n# 'number of unknown words','accuracy for known words','number of known words']\r\n#,index=[list(tag2idx.keys())[0], list(tag2idx.keys())[1], list(tag2idx.keys())[2] , list(tag2idx.keys())[3] \r\n#, list(tag2idx.keys())[4] , list(tag2idx.keys())[5],list(tag2idx.keys())[6],list(tag2idx.keys())[7]\r\n#,list(tag2idx.keys())[8],list(tag2idx.keys())[9],list(tag2idx.keys())[10],list(tag2idx.keys())[11],\r\n#list(tag2idx.keys())[12],list(tag2idx.keys())[13]])\r\n\r\nstr_pythontex=[float(\"{0:.2f}\".format(list(table_111.loc[\"A\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"A\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"A\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"A\"])[4]*100)),\r\nround(list(table_111.loc[\"A\"])[3]),round(list(table_111.loc[\"A\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"B\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"B\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"B\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"B\"])[4]*100)),\r\nround(list(table_111.loc[\"B\"])[3]),round(list(table_111.loc[\"B\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"C\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"C\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"C\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"C\"])[4]*100)),\r\nround(list(table_111.loc[\"C\"])[3]),round(list(table_111.loc[\"C\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"D\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"D\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"D\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"D\"])[4]*100)),\r\nround(list(table_111.loc[\"D\"])[3]),round(list(table_111.loc[\"D\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"E\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"E\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"E\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"E\"])[4]*100)),\r\nround(list(table_111.loc[\"E\"])[3]),round(list(table_111.loc[\"E\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"F\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"F\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"F\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"F\"])[4]*100)),\r\nround(list(table_111.loc[\"F\"])[3]),round(list(table_111.loc[\"F\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"G\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"G\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"G\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"G\"])[4]*100)),\r\nround(list(table_111.loc[\"G\"])[3]),round(list(table_111.loc[\"G\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"H\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"H\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"H\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"H\"])[4]*100)),\r\nround(list(table_111.loc[\"H\"])[3]),round(list(table_111.loc[\"H\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"I\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"I\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"I\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"I\"])[4]*100)),\r\nround(list(table_111.loc[\"I\"])[3]),round(list(table_111.loc[\"I\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"J\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"J\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"J\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"J\"])[4]*100)),\r\nround(list(table_111.loc[\"J\"])[3]),round(list(table_111.loc[\"J\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"K\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"K\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"K\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"K\"])[4]*100)),\r\nround(list(table_111.loc[\"K\"])[3]),round(list(table_111.loc[\"K\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"L\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"L\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"L\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"L\"])[4]*100)),\r\nround(list(table_111.loc[\"L\"])[3]),round(list(table_111.loc[\"L\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"M\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"M\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\"M\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\"M\"])[4]*100)),\r\nround(list(table_111.loc[\"M\"])[3]),round(list(table_111.loc[\"M\"])[5]),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\".\"])[0]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\".\"])[1]*100)),\r\nfloat(\"{0:.2f}\".format(list(table_111.loc[\".\"])[2]*100)),float(\"{0:.2f}\".format(list(table_111.loc[\".\"])[4]*100)),\r\nround(list(table_111.loc[\".\"])[3]),round(list(table_111.loc[\".\"])[5]),float(\"{0:.2f}\".format(vv1))\r\n,float(\"{0:.2f}\".format(vv2))\r\n,float(\"{0:.2f}\".format(vv3))\r\n,float(\"{0:.2f}\".format(vv4)),round(vv5)\r\n,float(\"{0:.2f}\".format(vv6))\r\n]\r\n\r\n\r\nL=[]\r\nfor x in str_pythontex:\r\n if math.isnan(x):\r\n L.append('NULL')\r\n else:\r\n L.append(str(x))\r\n\r\nL1=[]\r\ni=0\r\nfor x in L:\r\n i=i+1\r\n if i!=5 and i!=6 and x!=\"NULL\":\r\n L1.append(x+\" \\%\")\r\n elif x==\"NULL\":\r\n L1.append(x)\r\n elif i==5:\r\n L1.append(x)\r\n else:\r\n L1.append(x)\r\n i=0\r\n\r\nL1[-1]=L1[-1]+\" \\%\"\r\n\r\n"
] | [
[
"numpy.nanmean",
"numpy.mean"
]
] |
emarche/Value-based-DeepRL | [
"8b6458d4b82f293b401fc9e9c81cc482e0948830"
] | [
"DuelingDQN/agent.py"
] | [
"\"\"\"DuelingDQN agent script \n\nThis manages the training phase of the off-policy DuelingDQN.\n\"\"\"\n\nimport random\nfrom collections import deque\n\nimport yaml\nimport numpy as np\n\nwith open('config.yml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)\n seed = cfg['setup']['seed']\n ymlfile.close()\n \nrandom.seed(seed)\nnp.random.seed(seed)\n\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import Adam\ntf.random.set_seed(seed)\n\nfrom utils.deepnetwork import DeepNetwork\nfrom utils.memorybuffer import Buffer\n\nclass DuelingDQN:\n \"\"\"\n Class for the DuelingDQN agent\n \"\"\"\n\n def __init__(self, env, params):\n \"\"\"Initialize the agent, its network, optimizer and buffer\n\n Args:\n env (gym): gym environment\n params (dict): agent parameters (e.g.,dnn structure)\n\n Returns:\n None\n \"\"\"\n\n self.env = env\n\n self.model = DeepNetwork.build(env, params['dnn'])\n self.model_opt = Adam()\n\n self.buffer = Buffer(params['buffer']['size'])\n \n def get_action(self, state, eps):\n \"\"\"Get the action to perform\n\n Args:\n state (list): agent current state\n eps (float): random action probability\n\n Returns:\n action (float): sampled actions to perform\n \"\"\"\n\n if np.random.uniform() <= eps:\n return np.random.randint(0, self.env.action_space.n)\n \n q_values = self.model(np.array([state])).numpy()[0]\n return np.argmax(q_values)\n\n\n def update(self, gamma, batch_size):\n \"\"\"Prepare the samples to update the network\n\n Args:\n gamma (float): discount factor\n batch_size (int): batch size for the off-policy A2C\n\n Returns:\n None\n \"\"\"\n\n batch_size = min(self.buffer.size, batch_size)\n states, actions, rewards, obs_states, dones = self.buffer.sample(batch_size)\n\n # The updates require shape (n° samples, len(metric))\n rewards = rewards.reshape(-1, 1)\n dones = dones.reshape(-1, 1)\n\n self.fit(gamma, states, actions, rewards, obs_states, dones)\n\n def fit(self, gamma, states, actions, rewards, obs_states, dones):\n \"\"\"We want to minimizing mse of the temporal difference error given by Q(s,a|θ) and the target y = r + γ max_a' Q(s', a'|θ). This version is based on vanilla DQN, so it presents the non-stationary targets (i.e., the same network estimates its values and its targets).\n The dueling follows the idea that the advantage A(s, a) = Q(s, a) - V(s). Hence it splits the network into two streams, one that estimates V(S) and one that estimates A(s, a). It then recomposes the two stream into the output layer that forms Q(s, a). The idea is that is that if a state is bad, it is bad regardless of the actions. This way we reduce the exploration and the requirement to evaluate all the actions to converge.\n\n Args:\n gamma (float): discount factor\n states (list): episode's states for the update\n actions (list): episode's actions for the update\n rewards (list): episode's rewards for the update\n obs_states (list): episode's obs_states for the update\n dones (list): episode's dones for the update\n\n Returns:\n None\n \"\"\"\n \n with tf.GradientTape() as tape:\n # Compute the target y = r + γ max_a' Q(s', a'|θ)\n obs_qvalues = self.model(obs_states)\n obs_action = tf.math.argmax(obs_qvalues, axis=-1).numpy()\n idxs = np.array([[int(i), int(action)] for i, action in enumerate(obs_action)])\n\n max_obs_qvalues = tf.expand_dims(tf.gather_nd(obs_qvalues, idxs), axis=-1)\n y = rewards + gamma * max_obs_qvalues * dones\n\n # Compute values Q(s,a|θ)\n qvalues = self.model(states)\n idxs = np.array([[int(i), int(action)] for i, action in enumerate(actions)])\n qvalues = tf.expand_dims(tf.gather_nd(qvalues, idxs), axis=-1)\n\n # Compute the loss as mse of Q(s, a) - y\n td_errors = tf.math.subtract(qvalues, y)\n td_errors = 0.5 * tf.math.square(td_errors)\n loss = tf.math.reduce_mean(td_errors)\n\n # Compute the model gradient and update the network\n grad = tape.gradient(loss, self.model.trainable_variables)\n self.model_opt.apply_gradients(zip(grad, self.model.trainable_variables))\n\n def train(self, tracker, n_episodes, verbose, params, hyperp):\n \"\"\"Main loop for the agent's training phase\n\n Args:\n tracker (object): used to store and save the training stats\n n_episodes (int): n° of episodes to perform\n verbose (int): how frequent we save the training stats\n params (dict): agent parameters (e.g., the critic's gamma)\n hyperp (dict): algorithmic specific values (e.g., tau)\n\n Returns:\n None\n \"\"\"\n\n mean_reward = deque(maxlen=100)\n\n eps, eps_min = params['eps'], params['eps_min']\n eps_decay = hyperp['eps_d'] \n\n for e in range(n_episodes):\n ep_reward, steps = 0, 0 \n \n state = self.env.reset()\n\n while True:\n action = self.get_action(state, eps)\n obs_state, obs_reward, done, _ = self.env.step(action)\n\n self.buffer.store(state, \n action, \n obs_reward, \n obs_state, \n 1 - int(done)\n )\n\n ep_reward += obs_reward\n steps += 1\n\n state = obs_state\n \n if e > params['update_start']: \n self.update(\n params['gamma'], \n params['buffer']['batch']\n ) \n \n if done: break \n\n eps = max(eps_min, eps * eps_decay)\n\n mean_reward.append(ep_reward)\n tracker.update([e, ep_reward])\n\n if e % verbose == 0: tracker.save_metrics()\n\n print(f'Ep: {e}, Ep_Rew: {ep_reward}, Mean_Rew: {np.mean(mean_reward)}')\n \n\n\n "
] | [
[
"numpy.random.uniform",
"tensorflow.keras.optimizers.Adam",
"tensorflow.math.subtract",
"tensorflow.gather_nd",
"numpy.random.seed",
"numpy.argmax",
"tensorflow.math.argmax",
"tensorflow.GradientTape",
"tensorflow.math.reduce_mean",
"numpy.array",
"tensorflow.math.square",
"numpy.random.randint",
"tensorflow.random.set_seed",
"numpy.mean"
]
] |
JakobGM/polars | [
"fe10d4a180e59e5e34f4ab17303f12f1cd64e6c8"
] | [
"py-polars/polars/utils.py"
] | [
"import ctypes\nimport os\nimport sys\nfrom datetime import date, datetime, timedelta, timezone\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type, Union\n\nimport numpy as np\n\nfrom polars.datatypes import DataType, Date, Datetime\n\nif sys.version_info >= (3, 10):\n from typing import TypeGuard\nelse:\n from typing_extensions import TypeGuard # pragma: no cover\n\n\ndef _process_null_values(\n null_values: Union[None, str, List[str], Dict[str, str]] = None,\n) -> Union[None, str, List[str], List[Tuple[str, str]]]:\n if isinstance(null_values, dict):\n return list(null_values.items())\n else:\n return null_values\n\n\n# https://stackoverflow.com/questions/4355524/getting-data-from-ctypes-array-into-numpy\ndef _ptr_to_numpy(ptr: int, len: int, ptr_type: Any) -> np.ndarray:\n \"\"\"\n\n Parameters\n ----------\n ptr\n C/Rust ptr casted to usize.\n len\n Length of the array values.\n ptr_type\n Example:\n f32: ctypes.c_float)\n\n Returns\n -------\n View of memory block as numpy array.\n\n \"\"\"\n ptr_ctype = ctypes.cast(ptr, ctypes.POINTER(ptr_type))\n return np.ctypeslib.as_array(ptr_ctype, (len,))\n\n\ndef _timedelta_to_pl_duration(td: timedelta) -> str:\n return f\"{td.days}d{td.seconds}s{td.microseconds}us\"\n\n\ndef in_nanoseconds_window(dt: datetime) -> bool:\n return 1386 < dt.year < 2554\n\n\ndef timedelta_in_nanoseconds_window(td: timedelta) -> bool:\n return in_nanoseconds_window(datetime(1970, 1, 1) + td)\n\n\ndef _datetime_to_pl_timestamp(dt: datetime, tu: Optional[str]) -> int:\n \"\"\"\n Converts a python datetime to a timestamp in nanoseconds\n \"\"\"\n if tu == \"ns\":\n return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e9)\n elif tu == \"us\":\n return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)\n elif tu == \"ms\":\n return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e3)\n if tu is None:\n # python has us precision\n return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)\n else:\n raise ValueError(\"expected on of {'ns', 'ms'}\")\n\n\ndef _timedelta_to_pl_timedelta(td: timedelta, tu: Optional[str] = None) -> int:\n if tu == \"ns\":\n return int(td.total_seconds() * 1e9)\n elif tu == \"us\":\n return int(td.total_seconds() * 1e6)\n elif tu == \"ms\":\n return int(td.total_seconds() * 1e3)\n if tu is None:\n if timedelta_in_nanoseconds_window(td):\n return int(td.total_seconds() * 1e9)\n else:\n return int(td.total_seconds() * 1e3)\n else:\n raise ValueError(\"expected one of {'ns', 'us, 'ms'}\")\n\n\ndef _date_to_pl_date(d: date) -> int:\n dt = datetime.combine(d, datetime.min.time()).replace(tzinfo=timezone.utc)\n return int(dt.timestamp()) // (3600 * 24)\n\n\ndef is_str_sequence(\n val: Sequence[object], allow_str: bool = False\n) -> TypeGuard[Sequence[str]]:\n \"\"\"\n Checks that `val` is a sequence of strings. Note that a single string is a sequence of strings\n by definition, use `allow_str=False` to return False on a single string\n \"\"\"\n if (not allow_str) and isinstance(val, str):\n return False\n return _is_iterable_of(val, Sequence, str)\n\n\ndef is_int_sequence(val: Sequence[object]) -> TypeGuard[Sequence[int]]:\n return _is_iterable_of(val, Sequence, int)\n\n\ndef _is_iterable_of(val: Iterable, itertype: Type, eltype: Type) -> bool:\n return isinstance(val, itertype) and all(isinstance(x, eltype) for x in val)\n\n\ndef range_to_slice(rng: range) -> slice:\n step: Optional[int]\n # maybe we can slice instead of take by indices\n if rng.step != 1:\n step = rng.step\n else:\n step = None\n return slice(rng.start, rng.stop, step)\n\n\ndef handle_projection_columns(\n columns: Optional[Union[List[str], List[int]]]\n) -> Tuple[Optional[List[int]], Optional[List[str]]]:\n projection: Optional[List[int]] = None\n if columns:\n if is_int_sequence(columns):\n projection = columns # type: ignore\n columns = None\n elif not is_str_sequence(columns):\n raise ValueError(\n \"columns arg should contain a list of all integers or all strings values.\"\n )\n return projection, columns # type: ignore\n\n\ndef _to_python_timedelta(\n value: Union[int, float], tu: Optional[str] = \"ns\"\n) -> timedelta:\n if tu == \"ns\":\n return timedelta(microseconds=value // 1e3)\n elif tu == \"us\":\n return timedelta(microseconds=value)\n elif tu == \"ms\":\n return timedelta(milliseconds=value)\n else:\n raise ValueError(f\"time unit: {tu} not expected\")\n\n\ndef _prepare_row_count_args(\n row_count_name: Optional[str] = None,\n row_count_offset: int = 0,\n) -> Optional[Tuple[str, int]]:\n\n if row_count_name is not None:\n return (row_count_name, row_count_offset)\n else:\n return None\n\n\nEPOCH = datetime(1970, 1, 1).replace(tzinfo=None)\n\n\ndef _to_python_datetime(\n value: Union[int, float], dtype: Type[DataType], tu: Optional[str] = \"ns\"\n) -> Union[date, datetime]:\n if dtype == Date:\n # days to seconds\n # important to create from utc. Not doing this leads\n # to inconsistencies dependent on the timezone you are in.\n return datetime.utcfromtimestamp(value * 3600 * 24).date()\n elif dtype == Datetime:\n if tu == \"ns\":\n # nanoseconds to seconds\n return EPOCH + timedelta(microseconds=value / 1000)\n if tu == \"us\":\n return EPOCH + timedelta(microseconds=value)\n elif tu == \"ms\":\n # milliseconds to seconds\n return datetime.utcfromtimestamp(value / 1_000)\n else:\n raise ValueError(f\"time unit: {tu} not expected\")\n else:\n raise NotImplementedError # pragma: no cover\n\n\ndef _in_notebook() -> bool:\n try:\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config: # pragma: no cover\n return False\n except ImportError:\n return False\n except AttributeError:\n return False\n return True\n\n\ndef format_path(path: Union[str, Path]) -> str:\n \"\"\"\n Returnsa string path, expanding the home directory if present.\n \"\"\"\n return os.path.expanduser(path)\n"
] | [
[
"numpy.ctypeslib.as_array"
]
] |
Dapid/scipy | [
"dde07a64407ffaa9442b3d8298c6c26ff91fb384"
] | [
"scipy/stats/stats.py"
] | [
"# Copyright (c) Gary Strangman. All rights reserved\n#\n# Disclaimer\n#\n# This software is provided \"as-is\". There are no expressed or implied\n# warranties of any kind, including, but not limited to, the warranties\n# of merchantability and fitness for a given application. In no event\n# shall Gary Strangman be liable for any direct, indirect, incidental,\n# special, exemplary or consequential damages (including, but not limited\n# to, loss of use, data or profits, or business interruption) however\n# caused and on any theory of liability, whether in contract, strict\n# liability or tort (including negligence or otherwise) arising in any way\n# out of the use of this software, even if advised of the possibility of\n# such damage.\n#\n\n#\n# Heavily adapted for use by SciPy 2002 by Travis Oliphant\n\"\"\"\nA collection of basic statistical functions for python. The function\nnames appear below.\n\n Some scalar functions defined here are also available in the scipy.special\n package where they work on arbitrary sized arrays.\n\nDisclaimers: The function list is obviously incomplete and, worse, the\nfunctions are not optimized. All functions have been tested (some more\nso than others), but they are far from bulletproof. Thus, as with any\nfree software, no warranty or guarantee is expressed or implied. :-) A\nfew extra functions that don't appear in the list below can be found by\ninterested treasure-hunters. These functions don't necessarily have\nboth list and array versions but were deemed useful.\n\nCentral Tendency\n----------------\n.. autosummary::\n :toctree: generated/\n\n gmean\n hmean\n mode\n\nMoments\n-------\n.. autosummary::\n :toctree: generated/\n\n moment\n variation\n skew\n kurtosis\n normaltest\n\nMoments Handling NaN:\n\n.. autosummary::\n :toctree: generated/\n\n nanmean\n nanmedian\n nanstd\n\nAltered Versions\n----------------\n.. autosummary::\n :toctree: generated/\n\n tmean\n tvar\n tstd\n tsem\n describe\n\nFrequency Stats\n---------------\n.. autosummary::\n :toctree: generated/\n\n itemfreq\n scoreatpercentile\n percentileofscore\n histogram\n cumfreq\n relfreq\n\nVariability\n-----------\n.. autosummary::\n :toctree: generated/\n\n obrientransform\n signaltonoise\n sem\n\nTrimming Functions\n------------------\n.. autosummary::\n :toctree: generated/\n\n threshold\n trimboth\n trim1\n\nCorrelation Functions\n---------------------\n.. autosummary::\n :toctree: generated/\n\n pearsonr\n fisher_exact\n spearmanr\n pointbiserialr\n kendalltau\n linregress\n theilslopes\n\nInferential Stats\n-----------------\n.. autosummary::\n :toctree: generated/\n\n ttest_1samp\n ttest_ind\n ttest_ind_from_stats\n ttest_rel\n chisquare\n power_divergence\n ks_2samp\n mannwhitneyu\n ranksums\n wilcoxon\n kruskal\n friedmanchisquare\n combine_pvalues\n\nProbability Calculations\n------------------------\n.. autosummary::\n :toctree: generated/\n\n chisqprob\n betai\n\nANOVA Functions\n---------------\n.. autosummary::\n :toctree: generated/\n\n f_oneway\n f_value\n\nSupport Functions\n-----------------\n.. autosummary::\n :toctree: generated/\n\n ss\n square_of_sums\n rankdata\n\nReferences\n----------\n.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\nimport math\nfrom collections import namedtuple\n\nfrom scipy._lib.six import xrange\n\n# Scipy imports.\nfrom scipy._lib.six import callable, string_types\nfrom numpy import array, asarray, ma, zeros\nimport scipy.special as special\nimport scipy.linalg as linalg\nimport numpy as np\nfrom . import futil\nfrom . import distributions\n\nfrom ._rank import rankdata, tiecorrect\n\n__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',\n 'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',\n 'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',\n 'normaltest', 'jarque_bera', 'itemfreq',\n 'scoreatpercentile', 'percentileofscore', 'histogram',\n 'histogram2', 'cumfreq', 'relfreq', 'obrientransform',\n 'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',\n 'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',\n 'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',\n 'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',\n 'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',\n 'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',\n 'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',\n 'chisqprob', 'betai',\n 'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',\n 'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',\n 'nanstd', 'nanmedian', 'combine_pvalues', ]\n\n\ndef _chk_asarray(a, axis):\n if axis is None:\n a = np.ravel(a)\n outaxis = 0\n else:\n a = np.asarray(a)\n outaxis = axis\n return a, outaxis\n\n\ndef _chk2_asarray(a, b, axis):\n if axis is None:\n a = np.ravel(a)\n b = np.ravel(b)\n outaxis = 0\n else:\n a = np.asarray(a)\n b = np.asarray(b)\n outaxis = axis\n return a, b, outaxis\n\n\ndef find_repeats(arr):\n \"\"\"\n Find repeats and repeat counts.\n\n Parameters\n ----------\n arr : array_like\n Input array\n\n Returns\n -------\n find_repeats : tuple\n Returns a tuple of two 1-D ndarrays. The first ndarray are the repeats\n as sorted, unique values that are repeated in `arr`. The second\n ndarray are the counts mapped one-to-one of the repeated values\n in the first ndarray.\n\n Examples\n --------\n >>> from scipy import stats\n >>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])\n (array([ 2. ]), array([ 4 ], dtype=int32)\n\n >>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])\n (array([ 4., 5.]), array([2, 2], dtype=int32))\n\n \"\"\"\n v1, v2, n = futil.dfreps(arr)\n return v1[:n], v2[:n]\n\n#######\n# NAN friendly functions\n########\n\n\[email protected](message=\"scipy.stats.nanmean is deprecated in scipy 0.15.0 \"\n \"in favour of numpy.nanmean.\")\ndef nanmean(x, axis=0):\n \"\"\"\n Compute the mean over the given axis ignoring nans.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n axis : int or None, optional\n Axis along which the mean is computed. Default is 0.\n If None, compute over the whole array `x`.\n\n Returns\n -------\n m : float\n The mean of `x`, ignoring nans.\n\n See Also\n --------\n nanstd, nanmedian\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.linspace(0, 4, 3)\n >>> a\n array([ 0., 2., 4.])\n >>> a[-1] = np.nan\n >>> stats.nanmean(a)\n 1.0\n\n \"\"\"\n x, axis = _chk_asarray(x, axis)\n x = x.copy()\n Norig = x.shape[axis]\n mask = np.isnan(x)\n factor = 1.0 - np.sum(mask, axis) / Norig\n\n x[mask] = 0.0\n return np.mean(x, axis) / factor\n\n\[email protected](message=\"scipy.stats.nanstd is deprecated in scipy 0.15 \"\n \"in favour of numpy.nanstd.\\nNote that numpy.nanstd \"\n \"has a different signature.\")\ndef nanstd(x, axis=0, bias=False):\n \"\"\"\n Compute the standard deviation over the given axis, ignoring nans.\n\n Parameters\n ----------\n x : array_like\n Input array.\n axis : int or None, optional\n Axis along which the standard deviation is computed. Default is 0.\n If None, compute over the whole array `x`.\n bias : bool, optional\n If True, the biased (normalized by N) definition is used. If False\n (default), the unbiased definition is used.\n\n Returns\n -------\n s : float\n The standard deviation.\n\n See Also\n --------\n nanmean, nanmedian\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(10, dtype=float)\n >>> a[1:3] = np.nan\n >>> np.std(a)\n nan\n >>> stats.nanstd(a)\n 2.9154759474226504\n >>> stats.nanstd(a.reshape(2, 5), axis=1)\n array([ 2.0817, 1.5811])\n >>> stats.nanstd(a.reshape(2, 5), axis=None)\n 2.9154759474226504\n\n \"\"\"\n x, axis = _chk_asarray(x, axis)\n x = x.copy()\n Norig = x.shape[axis]\n\n mask = np.isnan(x)\n Nnan = np.sum(mask, axis) * 1.0\n n = Norig - Nnan\n\n x[mask] = 0.0\n m1 = np.sum(x, axis) / n\n\n if axis:\n d = x - np.expand_dims(m1, axis)\n else:\n d = x - m1\n\n d *= d\n\n m2 = np.sum(d, axis) - m1 * m1 * Nnan\n\n if bias:\n m2c = m2 / n\n else:\n m2c = m2 / (n - 1.0)\n\n return np.sqrt(m2c)\n\n\ndef _nanmedian(arr1d): # This only works on 1d arrays\n \"\"\"Private function for rank a arrays. Compute the median ignoring Nan.\n\n Parameters\n ----------\n arr1d : ndarray\n Input array, of rank 1.\n\n Results\n -------\n m : float\n The median.\n \"\"\"\n x = arr1d.copy()\n c = np.isnan(x)\n s = np.where(c)[0]\n if s.size == x.size:\n warnings.warn(\"All-NaN slice encountered\", RuntimeWarning)\n return np.nan\n elif s.size != 0:\n # select non-nans at end of array\n enonan = x[-s.size:][~c[-s.size:]]\n # fill nans in beginning of array with non-nans of end\n x[s[:enonan.size]] = enonan\n # slice nans away\n x = x[:-s.size]\n return np.median(x, overwrite_input=True)\n\[email protected](message=\"scipy.stats.nanmedian is deprecated in scipy 0.15 \"\n \"in favour of numpy.nanmedian.\")\ndef nanmedian(x, axis=0):\n \"\"\"\n Compute the median along the given axis ignoring nan values.\n\n Parameters\n ----------\n x : array_like\n Input array.\n axis : int or None, optional\n Axis along which the median is computed. Default is 0.\n If None, compute over the whole array `x`.\n\n Returns\n -------\n m : float\n The median of `x` along `axis`.\n\n See Also\n --------\n nanstd, nanmean, numpy.nanmedian\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.array([0, 3, 1, 5, 5, np.nan])\n >>> stats.nanmedian(a)\n array(3.0)\n\n >>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])\n >>> stats.nanmedian(b)\n array(4.0)\n\n Example with axis:\n\n >>> c = np.arange(30.).reshape(5,6)\n >>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)\n >>> c[idx] = np.nan\n >>> c\n array([[ 0., 1., 2., nan, 4., 5.],\n [ 6., 7., nan, 9., 10., 11.],\n [ 12., nan, 14., 15., 16., 17.],\n [ nan, 19., 20., 21., 22., nan],\n [ 24., 25., 26., 27., nan, 29.]])\n >>> stats.nanmedian(c, axis=1)\n array([ 2. , 9. , 15. , 20.5, 26. ])\n\n \"\"\"\n x, axis = _chk_asarray(x, axis)\n if x.ndim == 0:\n return float(x.item())\n if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases\n return np.nanmedian(x, axis)\n x = np.apply_along_axis(_nanmedian, axis, x)\n if x.ndim == 0:\n x = float(x.item())\n return x\n\n\n#####################################\n# CENTRAL TENDENCY #\n#####################################\n\n\ndef gmean(a, axis=0, dtype=None):\n \"\"\"\n Compute the geometric mean along the specified axis.\n\n Returns the geometric average of the array elements.\n That is: n-th root of (x1 * x2 * ... * xn)\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the geometric mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If dtype is not specified, it defaults to the\n dtype of a, unless a has an integer dtype with a precision less than\n that of the default platform integer. In that case, the default\n platform integer is used.\n\n Returns\n -------\n gmean : ndarray\n see dtype parameter above\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n hmean : Harmonic mean\n\n Notes\n -----\n The geometric average is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n\n Use masked arrays to ignore any non-finite values in the input or that\n arise in the calculations such as Not a Number and infinity because masked\n arrays automatically mask any non-finite values.\n\n \"\"\"\n if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it\n log_a = np.log(np.array(a, dtype=dtype))\n elif dtype: # Must change the default dtype allowing array type\n if isinstance(a, np.ma.MaskedArray):\n log_a = np.log(np.ma.asarray(a, dtype=dtype))\n else:\n log_a = np.log(np.asarray(a, dtype=dtype))\n else:\n log_a = np.log(a)\n return np.exp(log_a.mean(axis=axis))\n\n\ndef hmean(a, axis=0, dtype=None):\n \"\"\"\n Calculates the harmonic mean along the specified axis.\n\n That is: n / (1/x1 + 1/x2 + ... + 1/xn)\n\n Parameters\n ----------\n a : array_like\n Input array, masked array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the harmonic mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If `dtype` is not specified, it defaults to the\n dtype of `a`, unless `a` has an integer `dtype` with a precision less\n than that of the default platform integer. In that case, the default\n platform integer is used.\n\n Returns\n -------\n hmean : ndarray\n see `dtype` parameter above\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n gmean : Geometric mean\n\n Notes\n -----\n The harmonic mean is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n\n Use masked arrays to ignore any non-finite values in the input or that\n arise in the calculations such as Not a Number and infinity.\n\n \"\"\"\n if not isinstance(a, np.ndarray):\n a = np.array(a, dtype=dtype)\n if np.all(a > 0): # Harmonic mean only defined if greater than zero\n if isinstance(a, np.ma.MaskedArray):\n size = a.count(axis)\n else:\n if axis is None:\n a = a.ravel()\n size = a.shape[0]\n else:\n size = a.shape[axis]\n return size / np.sum(1.0/a, axis=axis, dtype=dtype)\n else:\n raise ValueError(\"Harmonic mean only defined if all elements greater than zero\")\n\n\ndef mode(a, axis=0):\n \"\"\"\n Returns an array of the modal (most common) value in the passed array.\n\n If there is more than one such value, only the first is returned.\n The bin-count for the modal bins is also returned.\n\n Parameters\n ----------\n a : array_like\n n-dimensional array of which to find mode(s).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n\n Returns\n -------\n vals : ndarray\n Array of modal values.\n counts : ndarray\n Array of counts for each mode.\n\n Examples\n --------\n >>> a = np.array([[6, 8, 3, 0],\n [3, 2, 1, 7],\n [8, 1, 8, 4],\n [5, 3, 0, 5],\n [4, 7, 5, 9]])\n >>> from scipy import stats\n >>> stats.mode(a)\n (array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))\n\n To get mode of whole array, specify axis=None:\n\n >>> stats.mode(a, axis=None)\n (array([3]), array([3]))\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n if a.size == 0:\n return np.array([]), np.array([])\n\n scores = np.unique(np.ravel(a)) # get ALL unique values\n testshape = list(a.shape)\n testshape[axis] = 1\n oldmostfreq = np.zeros(testshape, dtype=a.dtype)\n oldcounts = np.zeros(testshape, dtype=int)\n for score in scores:\n template = (a == score)\n counts = np.expand_dims(np.sum(template, axis), axis)\n mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)\n oldcounts = np.maximum(counts, oldcounts)\n oldmostfreq = mostfrequent\n return mostfrequent, oldcounts\n\n\ndef mask_to_limits(a, limits, inclusive):\n \"\"\"Mask an array for values outside of given limits.\n\n This is primarily a utility function.\n\n Parameters\n ----------\n a : array\n limits : (float or None, float or None)\n A tuple consisting of the (lower limit, upper limit). Values in the\n input array less than the lower limit or greater than the upper limit\n will be masked out. None implies no limit.\n inclusive : (bool, bool)\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to lower or upper are allowed.\n\n Returns\n -------\n A MaskedArray.\n\n Raises\n ------\n A ValueError if there are no values within the given limits.\n \"\"\"\n lower_limit, upper_limit = limits\n lower_include, upper_include = inclusive\n am = ma.MaskedArray(a)\n if lower_limit is not None:\n if lower_include:\n am = ma.masked_less(am, lower_limit)\n else:\n am = ma.masked_less_equal(am, lower_limit)\n\n if upper_limit is not None:\n if upper_include:\n am = ma.masked_greater(am, upper_limit)\n else:\n am = ma.masked_greater_equal(am, upper_limit)\n\n if am.count() == 0:\n raise ValueError(\"No array values within given limits\")\n\n return am\n\n\ndef tmean(a, limits=None, inclusive=(True, True)):\n \"\"\"\n Compute the trimmed mean.\n\n This function finds the arithmetic mean of given values, ignoring values\n outside the given `limits`.\n\n Parameters\n ----------\n a : array_like\n Array of values.\n limits : None or (lower limit, upper limit), optional\n Values in the input array less than the lower limit or greater than the\n upper limit will be ignored. When limits is None (default), then all\n values are used. Either of the limit values in the tuple can also be\n None representing a half-open interval.\n inclusive : (bool, bool), optional\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to the lower or upper limits\n are included. The default value is (True, True).\n\n Returns\n -------\n tmean : float\n\n \"\"\"\n a = asarray(a)\n if limits is None:\n return np.mean(a, None)\n\n am = mask_to_limits(a.ravel(), limits, inclusive)\n return am.mean()\n\n\ndef masked_var(am):\n m = am.mean()\n s = ma.add.reduce((am - m)**2)\n n = am.count() - 1.0\n return s / n\n\n\ndef tvar(a, limits=None, inclusive=(True, True)):\n \"\"\"\n Compute the trimmed variance\n\n This function computes the sample variance of an array of values,\n while ignoring values which are outside of given `limits`.\n\n Parameters\n ----------\n a : array_like\n Array of values.\n limits : None or (lower limit, upper limit), optional\n Values in the input array less than the lower limit or greater than the\n upper limit will be ignored. When limits is None, then all values are\n used. Either of the limit values in the tuple can also be None\n representing a half-open interval. The default value is None.\n inclusive : (bool, bool), optional\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to the lower or upper limits\n are included. The default value is (True, True).\n\n Returns\n -------\n tvar : float\n Trimmed variance.\n\n Notes\n -----\n `tvar` computes the unbiased sample variance, i.e. it uses a correction\n factor ``n / (n - 1)``.\n\n \"\"\"\n a = asarray(a)\n a = a.astype(float).ravel()\n if limits is None:\n n = len(a)\n return a.var() * n/(n-1.)\n am = mask_to_limits(a, limits, inclusive)\n return masked_var(am)\n\n\ndef tmin(a, lowerlimit=None, axis=0, inclusive=True):\n \"\"\"\n Compute the trimmed minimum\n\n This function finds the miminum value of an array `a` along the\n specified axis, but only considering values greater than a specified\n lower limit.\n\n Parameters\n ----------\n a : array_like\n array of values\n lowerlimit : None or float, optional\n Values in the input array less than the given limit will be ignored.\n When lowerlimit is None, then all values are used. The default value\n is None.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over the whole\n array `a`.\n inclusive : {True, False}, optional\n This flag determines whether values exactly equal to the lower limit\n are included. The default value is True.\n\n Returns\n -------\n tmin : float\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n am = mask_to_limits(a, (lowerlimit, None), (inclusive, False))\n return ma.minimum.reduce(am, axis)\n\n\ndef tmax(a, upperlimit=None, axis=0, inclusive=True):\n \"\"\"\n Compute the trimmed maximum\n\n This function computes the maximum value of an array along a given axis,\n while ignoring values larger than a specified upper limit.\n\n Parameters\n ----------\n a : array_like\n array of values\n upperlimit : None or float, optional\n Values in the input array greater than the given limit will be ignored.\n When upperlimit is None, then all values are used. The default value\n is None.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over the\n whole array `a`.\n inclusive : {True, False}, optional\n This flag determines whether values exactly equal to the upper limit\n are included. The default value is True.\n\n Returns\n -------\n tmax : float\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n am = mask_to_limits(a, (None, upperlimit), (False, inclusive))\n return ma.maximum.reduce(am, axis)\n\n\ndef tstd(a, limits=None, inclusive=(True, True)):\n \"\"\"\n Compute the trimmed sample standard deviation\n\n This function finds the sample standard deviation of given values,\n ignoring values outside the given `limits`.\n\n Parameters\n ----------\n a : array_like\n array of values\n limits : None or (lower limit, upper limit), optional\n Values in the input array less than the lower limit or greater than the\n upper limit will be ignored. When limits is None, then all values are\n used. Either of the limit values in the tuple can also be None\n representing a half-open interval. The default value is None.\n inclusive : (bool, bool), optional\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to the lower or upper limits\n are included. The default value is (True, True).\n\n Returns\n -------\n tstd : float\n\n Notes\n -----\n `tstd` computes the unbiased sample standard deviation, i.e. it uses a\n correction factor ``n / (n - 1)``.\n\n \"\"\"\n return np.sqrt(tvar(a, limits, inclusive))\n\n\ndef tsem(a, limits=None, inclusive=(True, True)):\n \"\"\"\n Compute the trimmed standard error of the mean.\n\n This function finds the standard error of the mean for given\n values, ignoring values outside the given `limits`.\n\n Parameters\n ----------\n a : array_like\n array of values\n limits : None or (lower limit, upper limit), optional\n Values in the input array less than the lower limit or greater than the\n upper limit will be ignored. When limits is None, then all values are\n used. Either of the limit values in the tuple can also be None\n representing a half-open interval. The default value is None.\n inclusive : (bool, bool), optional\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to the lower or upper limits\n are included. The default value is (True, True).\n\n Returns\n -------\n tsem : float\n\n Notes\n -----\n `tsem` uses unbiased sample standard deviation, i.e. it uses a\n correction factor ``n / (n - 1)``.\n\n \"\"\"\n a = np.asarray(a).ravel()\n if limits is None:\n return a.std(ddof=1) / np.sqrt(a.size)\n\n am = mask_to_limits(a, limits, inclusive)\n sd = np.sqrt(masked_var(am))\n return sd / np.sqrt(am.count())\n\n\n#####################################\n# MOMENTS #\n#####################################\n\ndef moment(a, moment=1, axis=0):\n \"\"\"\n Calculates the nth moment about the mean for a sample.\n\n Generally used to calculate coefficients of skewness and\n kurtosis.\n\n Parameters\n ----------\n a : array_like\n data\n moment : int, optional\n order of central moment that is returned\n axis : int or None, optional\n Axis along which the central moment is computed. Default is 0.\n If None, compute over the whole array `a`.\n\n Returns\n -------\n n-th central moment : ndarray or float\n The appropriate moment along the given axis or over all values if axis\n is None. The denominator for the moment calculation is the number of\n observations, no degrees of freedom correction is done.\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n if moment == 1:\n # By definition the first moment about the mean is 0.\n shape = list(a.shape)\n del shape[axis]\n if shape:\n # return an actual array of the appropriate shape\n return np.zeros(shape, dtype=float)\n else:\n # the input was 1D, so return a scalar instead of a rank-0 array\n return np.float64(0.0)\n else:\n # Exponentiation by squares: form exponent sequence\n n_list = [moment]\n current_n = moment\n while current_n > 2:\n if current_n % 2:\n current_n = (current_n-1)/2\n else:\n current_n /= 2\n n_list.append(current_n)\n \n # Starting point for exponentiation by squares\n a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)\n if n_list[-1] == 1:\n s = a_zero_mean.copy()\n else:\n s = a_zero_mean**2\n \n # Perform multiplications\n for n in n_list[-2::-1]:\n s = s**2\n if n % 2:\n s *= a_zero_mean\n return np.mean(s, axis)\n\n\ndef variation(a, axis=0):\n \"\"\"\n Computes the coefficient of variation, the ratio of the biased standard\n deviation to the mean.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or None, optional\n Axis along which to calculate the coefficient of variation. Default\n is 0. If None, compute over the whole array `a`.\n\n References\n ----------\n .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n return a.std(axis) / a.mean(axis)\n\n\ndef skew(a, axis=0, bias=True):\n \"\"\"\n Computes the skewness of a data set.\n\n For normally distributed data, the skewness should be about 0. A skewness\n value > 0 means that there is more weight in the left tail of the\n distribution. The function `skewtest` can be used to determine if the\n skewness value is close enough to 0, statistically speaking.\n\n Parameters\n ----------\n a : ndarray\n data\n axis : int or None, optional\n Axis along which skewness is calculated. Default is 0.\n If None, compute over the whole array `a`.\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n\n Returns\n -------\n skewness : ndarray\n The skewness of values along an axis, returning 0 where all values are\n equal.\n\n References\n ----------\n\n .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n Section 2.2.24.1\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n n = a.shape[axis]\n m2 = moment(a, 2, axis)\n m3 = moment(a, 3, axis)\n zero = (m2 == 0)\n vals = np.where(zero, 0, m3 / m2**1.5)\n if not bias:\n can_correct = (n > 2) & (m2 > 0)\n if can_correct.any():\n m2 = np.extract(can_correct, m2)\n m3 = np.extract(can_correct, m3)\n nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5\n np.place(vals, can_correct, nval)\n\n if vals.ndim == 0:\n return vals.item()\n\n return vals\n\n\ndef kurtosis(a, axis=0, fisher=True, bias=True):\n \"\"\"\n Computes the kurtosis (Fisher or Pearson) of a dataset.\n\n Kurtosis is the fourth central moment divided by the square of the\n variance. If Fisher's definition is used, then 3.0 is subtracted from\n the result to give 0.0 for a normal distribution.\n\n If bias is False then the kurtosis is calculated using k statistics to\n eliminate bias coming from biased moment estimators\n\n Use `kurtosistest` to see if result is close enough to normal.\n\n Parameters\n ----------\n a : array\n data for which the kurtosis is calculated\n axis : int or None, optional\n Axis along which the kurtosis is calculated. Default is 0.\n If None, compute over the whole array `a`.\n fisher : bool, optional\n If True, Fisher's definition is used (normal ==> 0.0). If False,\n Pearson's definition is used (normal ==> 3.0).\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n\n Returns\n -------\n kurtosis : array\n The kurtosis of values along an axis. If all values are equal,\n return -3 for Fisher's definition and 0 for Pearson's definition.\n\n References\n ----------\n .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n n = a.shape[axis]\n m2 = moment(a, 2, axis)\n m4 = moment(a, 4, axis)\n zero = (m2 == 0)\n olderr = np.seterr(all='ignore')\n try:\n vals = np.where(zero, 0, m4 / m2**2.0)\n finally:\n np.seterr(**olderr)\n\n if not bias:\n can_correct = (n > 3) & (m2 > 0)\n if can_correct.any():\n m2 = np.extract(can_correct, m2)\n m4 = np.extract(can_correct, m4)\n nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)\n np.place(vals, can_correct, nval + 3.0)\n\n if vals.ndim == 0:\n vals = vals.item() # array scalar\n\n if fisher:\n return vals - 3\n else:\n return vals\n\n\n_DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',\n 'variance', 'skewness',\n 'kurtosis'))\n\n\ndef describe(a, axis=0, ddof=1):\n \"\"\"\n Computes several descriptive statistics of the passed array.\n\n Parameters\n ----------\n a : array_like\n Input data.\n axis : int or None, optional\n Axis along which statistics are calculated. Default is 0.\n If None, compute over the whole array `a`.\n ddof : int, optional\n Delta degrees of freedom. Default is 1.\n\n Returns\n -------\n nobs : int\n Number of observations (length of data along `axis`).\n minmax: tuple of ndarrays or floats\n Minimum and maximum value of data array.\n mean : ndarray or float\n Arithmetic mean of data along axis.\n variance : ndarray or float\n Unbiased variance of the data along axis, denominator is number of\n observations minus one.\n skewness : ndarray or float\n Biased skewness, based on moment calculations with denominator equal to\n the number of observations, i.e. no degrees of freedom correction.\n kurtosis : ndarray or float\n Biased kurtosis (Fisher). The kurtosis is normalized so that it is\n zero for the normal distribution. No degrees of freedom or bias\n correction is used.\n\n See Also\n --------\n skew, kurtosis\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n n = a.shape[axis]\n mm = (np.min(a, axis=axis), np.max(a, axis=axis))\n m = np.mean(a, axis=axis)\n v = np.var(a, axis=axis, ddof=ddof)\n sk = skew(a, axis)\n kurt = kurtosis(a, axis)\n\n # Return namedtuple for clarity\n return _DescribeResult(n, mm, m, v, sk, kurt)\n\n#####################################\n# NORMALITY TESTS #\n#####################################\n\n\ndef skewtest(a, axis=0):\n \"\"\"\n Tests whether the skew is different from the normal distribution.\n\n This function tests the null hypothesis that the skewness of\n the population that the sample was drawn from is the same\n as that of a corresponding normal distribution.\n\n Parameters\n ----------\n a : array\n The data to be tested\n axis : int or None, optional\n Axis along which statistics are calculated. Default is 0.\n If None, compute over the whole array `a`.\n\n Returns\n -------\n z-score : float\n The computed z-score for this test.\n p-value : float\n a 2-sided p-value for the hypothesis test\n\n Notes\n -----\n The sample size must be at least 8.\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n if axis is None:\n a = np.ravel(a)\n axis = 0\n b2 = skew(a, axis)\n n = float(a.shape[axis])\n if n < 8:\n raise ValueError(\n \"skewtest is not valid with less than 8 samples; %i samples\"\n \" were given.\" % int(n))\n y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))\n beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /\n ((n-2.0) * (n+5) * (n+7) * (n+9)))\n W2 = -1 + math.sqrt(2 * (beta2 - 1))\n delta = 1 / math.sqrt(0.5 * math.log(W2))\n alpha = math.sqrt(2.0 / (W2 - 1))\n y = np.where(y == 0, 1, y)\n Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))\n return Z, 2 * distributions.norm.sf(np.abs(Z))\n\n\ndef kurtosistest(a, axis=0):\n \"\"\"\n Tests whether a dataset has normal kurtosis\n\n This function tests the null hypothesis that the kurtosis\n of the population from which the sample was drawn is that\n of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.\n\n Parameters\n ----------\n a : array\n array of the sample data\n axis : int or None, optional\n Axis along which to compute test. Default is 0. If None,\n compute over the whole array `a`.\n\n Returns\n -------\n z-score : float\n The computed z-score for this test.\n p-value : float\n The 2-sided p-value for the hypothesis test\n\n Notes\n -----\n Valid only for n>20. The Z-score is set to 0 for bad entries.\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n n = float(a.shape[axis])\n if n < 5:\n raise ValueError(\n \"kurtosistest requires at least 5 observations; %i observations\"\n \" were given.\" % int(n))\n if n < 20:\n warnings.warn(\"kurtosistest only valid for n>=20 ... continuing \"\n \"anyway, n=%i\" % int(n))\n b2 = kurtosis(a, axis, fisher=False)\n\n E = 3.0*(n-1) / (n+1)\n varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))\n x = (b2-E) / np.sqrt(varb2)\n sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /\n (n*(n-2)*(n-3)))\n A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))\n term1 = 1 - 2/(9.0*A)\n denom = 1 + x*np.sqrt(2/(A-4.0))\n denom = np.where(denom < 0, 99, denom)\n term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))\n Z = (term1 - term2) / np.sqrt(2/(9.0*A))\n Z = np.where(denom == 99, 0, Z)\n if Z.ndim == 0:\n Z = Z[()]\n\n # zprob uses upper tail, so Z needs to be positive\n return Z, 2 * distributions.norm.sf(np.abs(Z))\n\n\ndef normaltest(a, axis=0):\n \"\"\"\n Tests whether a sample differs from a normal distribution.\n\n This function tests the null hypothesis that a sample comes\n from a normal distribution. It is based on D'Agostino and\n Pearson's [1]_, [2]_ test that combines skew and kurtosis to\n produce an omnibus test of normality.\n\n\n Parameters\n ----------\n a : array_like\n The array containing the data to be tested.\n axis : int or None, optional\n Axis along which to compute test. Default is 0. If None,\n compute over the whole array `a`.\n\n Returns\n -------\n k2 : float or array\n `s^2 + k^2`, where `s` is the z-score returned by `skewtest` and\n `k` is the z-score returned by `kurtosistest`.\n p-value : float or array\n A 2-sided chi squared probability for the hypothesis test.\n\n References\n ----------\n .. [1] D'Agostino, R. B. (1971), \"An omnibus test of normality for\n moderate and large sample size,\" Biometrika, 58, 341-348\n\n .. [2] D'Agostino, R. and Pearson, E. S. (1973), \"Testing for\n departures from normality,\" Biometrika, 60, 613-622\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n s, _ = skewtest(a, axis)\n k, _ = kurtosistest(a, axis)\n k2 = s*s + k*k\n return k2, chisqprob(k2, 2)\n\n\ndef jarque_bera(x):\n \"\"\"\n Perform the Jarque-Bera goodness of fit test on sample data.\n\n The Jarque-Bera test tests whether the sample data has the skewness and\n kurtosis matching a normal distribution.\n\n Note that this test only works for a large enough number of data samples\n (>2000) as the test statistic asymptotically has a Chi-squared distribution\n with 2 degrees of freedom.\n\n Parameters\n ----------\n x : array_like\n Observations of a random variable.\n\n Returns\n -------\n jb_value : float\n The test statistic.\n p : float\n The p-value for the hypothesis test.\n\n References\n ----------\n .. [1] Jarque, C. and Bera, A. (1980) \"Efficient tests for normality,\n homoscedasticity and serial independence of regression residuals\",\n 6 Econometric Letters 255-259.\n\n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(987654321)\n >>> x = np.random.normal(0, 1, 100000)\n >>> y = np.random.rayleigh(1, 100000)\n >>> stats.jarque_bera(x)\n (4.7165707989581342, 0.09458225503041906)\n >>> stats.jarque_bera(y)\n (6713.7098548143422, 0.0)\n\n \"\"\"\n x = np.asarray(x)\n n = float(x.size)\n if n == 0:\n raise ValueError('At least one observation is required.')\n\n mu = x.mean()\n diffx = x - mu\n skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)\n kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2\n jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)\n p = 1 - distributions.chi2.cdf(jb_value, 2)\n\n return jb_value, p\n\n\n#####################################\n# FREQUENCY FUNCTIONS #\n#####################################\n\ndef itemfreq(a):\n \"\"\"\n Returns a 2-D array of item frequencies.\n\n Parameters\n ----------\n a : (N,) array_like\n Input array.\n\n Returns\n -------\n itemfreq : (K, 2) ndarray\n A 2-D frequency table. Column 1 contains sorted, unique values from\n `a`, column 2 contains their respective counts.\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])\n >>> stats.itemfreq(a)\n array([[ 0., 2.],\n [ 1., 4.],\n [ 2., 2.],\n [ 4., 1.],\n [ 5., 1.]])\n >>> np.bincount(a)\n array([2, 4, 2, 0, 1, 1])\n\n >>> stats.itemfreq(a/10.)\n array([[ 0. , 2. ],\n [ 0.1, 4. ],\n [ 0.2, 2. ],\n [ 0.4, 1. ],\n [ 0.5, 1. ]])\n\n \"\"\"\n items, inv = np.unique(a, return_inverse=True)\n freq = np.bincount(inv)\n return np.array([items, freq]).T\n\n\ndef scoreatpercentile(a, per, limit=(), interpolation_method='fraction',\n axis=None):\n \"\"\"\n Calculate the score at a given percentile of the input sequence.\n\n For example, the score at `per=50` is the median. If the desired quantile\n lies between two data points, we interpolate between them, according to\n the value of `interpolation`. If the parameter `limit` is provided, it\n should be a tuple (lower, upper) of two values.\n\n Parameters\n ----------\n a : array_like\n A 1-D array of values from which to extract score.\n per : array_like\n Percentile(s) at which to extract score. Values should be in range\n [0,100].\n limit : tuple, optional\n Tuple of two scalars, the lower and upper limits within which to\n compute the percentile. Values of `a` outside\n this (closed) interval will be ignored.\n interpolation_method : {'fraction', 'lower', 'higher'}, optional\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`\n\n - fraction: ``i + (j - i) * fraction`` where ``fraction`` is the\n fractional part of the index surrounded by ``i`` and ``j``.\n - lower: ``i``.\n - higher: ``j``.\n\n axis : int, optional\n Axis along which the percentiles are computed. Default is None. If\n None, compute over the whole array `a`.\n\n Returns\n -------\n score : float or ndarray\n Score at percentile(s).\n\n See Also\n --------\n percentileofscore, numpy.percentile\n\n Notes\n -----\n This function will become obsolete in the future.\n For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality\n that `scoreatpercentile` provides. And it's significantly faster.\n Therefore it's recommended to use `numpy.percentile` for users that have\n numpy >= 1.9.\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(100)\n >>> stats.scoreatpercentile(a, 50)\n 49.5\n\n \"\"\"\n # adapted from NumPy's percentile function. When we require numpy >= 1.8,\n # the implementation of this function can be replaced by np.percentile.\n a = np.asarray(a)\n if a.size == 0:\n # empty array, return nan(s) with shape matching `per`\n if np.isscalar(per):\n return np.nan\n else:\n return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan\n\n if limit:\n a = a[(limit[0] <= a) & (a <= limit[1])]\n\n sorted = np.sort(a, axis=axis)\n if axis is None:\n axis = 0\n\n return _compute_qth_percentile(sorted, per, interpolation_method, axis)\n\n\n# handle sequence of per's without calling sort multiple times\ndef _compute_qth_percentile(sorted, per, interpolation_method, axis):\n if not np.isscalar(per):\n score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)\n for i in per]\n return np.array(score)\n\n if (per < 0) or (per > 100):\n raise ValueError(\"percentile must be in the range [0, 100]\")\n\n indexer = [slice(None)] * sorted.ndim\n idx = per / 100. * (sorted.shape[axis] - 1)\n\n if int(idx) != idx:\n # round fractional indices according to interpolation method\n if interpolation_method == 'lower':\n idx = int(np.floor(idx))\n elif interpolation_method == 'higher':\n idx = int(np.ceil(idx))\n elif interpolation_method == 'fraction':\n pass # keep idx as fraction and interpolate\n else:\n raise ValueError(\"interpolation_method can only be 'fraction', \"\n \"'lower' or 'higher'\")\n\n i = int(idx)\n if i == idx:\n indexer[axis] = slice(i, i + 1)\n weights = array(1)\n sumval = 1.0\n else:\n indexer[axis] = slice(i, i + 2)\n j = i + 1\n weights = array([(j - idx), (idx - i)], float)\n wshape = [1] * sorted.ndim\n wshape[axis] = 2\n weights.shape = wshape\n sumval = weights.sum()\n\n # Use np.add.reduce (== np.sum but a little faster) to coerce data type\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n\n\ndef percentileofscore(a, score, kind='rank'):\n \"\"\"\n The percentile rank of a score relative to a list of scores.\n\n A `percentileofscore` of, for example, 80% means that 80% of the\n scores in `a` are below the given score. In the case of gaps or\n ties, the exact definition depends on the optional keyword, `kind`.\n\n Parameters\n ----------\n a : array_like\n Array of scores to which `score` is compared.\n score : int or float\n Score that is compared to the elements in `a`.\n kind : {'rank', 'weak', 'strict', 'mean'}, optional\n This optional parameter specifies the interpretation of the\n resulting score:\n\n - \"rank\": Average percentage ranking of score. In case of\n multiple matches, average the percentage rankings of\n all matching scores.\n - \"weak\": This kind corresponds to the definition of a cumulative\n distribution function. A percentileofscore of 80%\n means that 80% of values are less than or equal\n to the provided score.\n - \"strict\": Similar to \"weak\", except that only values that are\n strictly less than the given score are counted.\n - \"mean\": The average of the \"weak\" and \"strict\" scores, often used in\n testing. See\n\n http://en.wikipedia.org/wiki/Percentile_rank\n\n Returns\n -------\n pcos : float\n Percentile-position of score (0-100) relative to `a`.\n\n See Also\n --------\n numpy.percentile\n\n Examples\n --------\n Three-quarters of the given values lie below a given score:\n\n >>> from scipy import stats\n >>> stats.percentileofscore([1, 2, 3, 4], 3)\n 75.0\n\n With multiple matches, note how the scores of the two matches, 0.6\n and 0.8 respectively, are averaged:\n\n >>> stats.percentileofscore([1, 2, 3, 3, 4], 3)\n 70.0\n\n Only 2/5 values are strictly less than 3:\n\n >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')\n 40.0\n\n But 4/5 values are less than or equal to 3:\n\n >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')\n 80.0\n\n The average between the weak and the strict scores is\n\n >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')\n 60.0\n\n \"\"\"\n a = np.array(a)\n n = len(a)\n\n if kind == 'rank':\n if not np.any(a == score):\n a = np.append(a, score)\n a_len = np.array(list(range(len(a))))\n else:\n a_len = np.array(list(range(len(a)))) + 1.0\n\n a = np.sort(a)\n idx = [a == score]\n pct = (np.mean(a_len[idx]) / n) * 100.0\n return pct\n\n elif kind == 'strict':\n return np.sum(a < score) / float(n) * 100\n elif kind == 'weak':\n return np.sum(a <= score) / float(n) * 100\n elif kind == 'mean':\n return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)\n else:\n raise ValueError(\"kind can only be 'rank', 'strict', 'weak' or 'mean'\")\n\n\ndef histogram2(a, bins):\n \"\"\"\n Compute histogram using divisions in bins.\n\n Count the number of times values from array `a` fall into\n numerical ranges defined by `bins`. Range x is given by\n bins[x] <= range_x < bins[x+1] where x =0,N and N is the\n length of the `bins` array. The last range is given by\n bins[N] <= range_N < infinity. Values less than bins[0] are\n not included in the histogram.\n\n Parameters\n ----------\n a : array_like of rank 1\n The array of values to be assigned into bins\n bins : array_like of rank 1\n Defines the ranges of values to use during histogramming.\n\n Returns\n -------\n histogram2 : ndarray of rank 1\n Each value represents the occurrences for a given bin (range) of\n values.\n\n \"\"\"\n # comment: probably obsoleted by numpy.histogram()\n n = np.searchsorted(np.sort(a), bins)\n n = np.concatenate([n, [len(a)]])\n return n[1:] - n[:-1]\n\n\ndef histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):\n \"\"\"\n Separates the range into several bins and returns the number of instances\n in each bin.\n\n Parameters\n ----------\n a : array_like\n Array of scores which will be put into bins.\n numbins : int, optional\n The number of bins to use for the histogram. Default is 10.\n defaultlimits : tuple (lower, upper), optional\n The lower and upper values for the range of the histogram.\n If no value is given, a range slightly larger than the range of the\n values in a is used. Specifically ``(a.min() - s, a.max() + s)``,\n where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.\n weights : array_like, optional\n The weights for each value in `a`. Default is None, which gives each\n value a weight of 1.0\n printextras : bool, optional\n If True, if there are extra points (i.e. the points that fall outside\n the bin limits) a warning is raised saying how many of those points\n there are. Default is False.\n\n Returns\n -------\n histogram : ndarray\n Number of points (or sum of weights) in each bin.\n low_range : float\n Lowest value of histogram, the lower limit of the first bin.\n binsize : float\n The size of the bins (all bins have the same size).\n extrapoints : int\n The number of points outside the range of the histogram.\n\n See Also\n --------\n numpy.histogram\n\n Notes\n -----\n This histogram is based on numpy's histogram but has a larger range by\n default if default limits is not set.\n\n \"\"\"\n a = np.ravel(a)\n if defaultlimits is None:\n # no range given, so use values in `a`\n data_min = a.min()\n data_max = a.max()\n # Have bins extend past min and max values slightly\n s = (data_max - data_min) / (2. * (numbins - 1.))\n defaultlimits = (data_min - s, data_max + s)\n\n # use numpy's histogram method to compute bins\n hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,\n weights=weights)\n # hist are not always floats, convert to keep with old output\n hist = np.array(hist, dtype=float)\n # fixed width for bins is assumed, as numpy's histogram gives\n # fixed width bins for int values for 'bins'\n binsize = bin_edges[1] - bin_edges[0]\n # calculate number of extra points\n extrapoints = len([v for v in a\n if defaultlimits[0] > v or v > defaultlimits[1]])\n if extrapoints > 0 and printextras:\n warnings.warn(\"Points outside given histogram range = %s\"\n % extrapoints)\n\n return hist, defaultlimits[0], binsize, extrapoints\n\n\ndef cumfreq(a, numbins=10, defaultreallimits=None, weights=None):\n \"\"\"\n Returns a cumulative frequency histogram, using the histogram function.\n\n Parameters\n ----------\n a : array_like\n Input array.\n numbins : int, optional\n The number of bins to use for the histogram. Default is 10.\n defaultreallimits : tuple (lower, upper), optional\n The lower and upper values for the range of the histogram.\n If no value is given, a range slightly larger than the range of the\n values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,\n where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.\n weights : array_like, optional\n The weights for each value in `a`. Default is None, which gives each\n value a weight of 1.0\n\n Returns\n -------\n cumfreq : ndarray\n Binned values of cumulative frequency.\n lowerreallimit : float\n Lower real limit\n binsize : float\n Width of each bin.\n extrapoints : int\n Extra points.\n\n Examples\n --------\n >>> from scipy import stats\n >>> x = [1, 4, 2, 1, 3, 1]\n >>> cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)\n >>> cumfreqs\n array([ 3., 4., 5., 6.])\n >>> cumfreqs, lowlim, binsize, extrapoints = \\\n ... stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))\n >>> cumfreqs\n array([ 1., 2., 3., 3.])\n >>> extrapoints\n 3\n\n \"\"\"\n h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)\n cumhist = np.cumsum(h * 1, axis=0)\n return cumhist, l, b, e\n\n\ndef relfreq(a, numbins=10, defaultreallimits=None, weights=None):\n \"\"\"\n Returns a relative frequency histogram, using the histogram function.\n\n Parameters\n ----------\n a : array_like\n Input array.\n numbins : int, optional\n The number of bins to use for the histogram. Default is 10.\n defaultreallimits : tuple (lower, upper), optional\n The lower and upper values for the range of the histogram.\n If no value is given, a range slightly larger than the range of the\n values in a is used. Specifically ``(a.min() - s, a.max() + s)``,\n where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.\n weights : array_like, optional\n The weights for each value in `a`. Default is None, which gives each\n value a weight of 1.0\n\n Returns\n -------\n relfreq : ndarray\n Binned values of relative frequency.\n lowerreallimit : float\n Lower real limit\n binsize : float\n Width of each bin.\n extrapoints : int\n Extra points.\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.array([1, 4, 2, 1, 3, 1])\n >>> relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)\n >>> relfreqs\n array([ 0.5 , 0.16666667, 0.16666667, 0.16666667])\n >>> np.sum(relfreqs) # relative frequencies should add up to 1\n 0.99999999999999989\n\n \"\"\"\n h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)\n h = np.array(h / float(np.array(a).shape[0]))\n return h, l, b, e\n\n\n#####################################\n# VARIABILITY FUNCTIONS #\n#####################################\n\ndef obrientransform(*args):\n \"\"\"\n Computes the O'Brien transform on input data (any number of arrays).\n\n Used to test for homogeneity of variance prior to running one-way stats.\n Each array in ``*args`` is one level of a factor.\n If `f_oneway` is run on the transformed data and found significant,\n the variances are unequal. From Maxwell and Delaney [1]_, p.112.\n\n Parameters\n ----------\n args : tuple of array_like\n Any number of arrays.\n\n Returns\n -------\n obrientransform : ndarray\n Transformed data for use in an ANOVA. The first dimension\n of the result corresponds to the sequence of transformed\n arrays. If the arrays given are all 1-D of the same length,\n the return value is a 2-D array; otherwise it is a 1-D array\n of type object, with each element being an ndarray.\n\n References\n ----------\n .. [1] S. E. Maxwell and H. D. Delaney, \"Designing Experiments and\n Analyzing Data: A Model Comparison Perspective\", Wadsworth, 1990.\n\n Examples\n --------\n We'll test the following data sets for differences in their variance.\n\n >>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]\n >>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]\n\n Apply the O'Brien transform to the data.\n\n >>> tx, ty = obrientransform(x, y)\n\n Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the\n transformed data.\n\n >>> from scipy.stats import f_oneway\n >>> F, p = f_oneway(tx, ty)\n >>> p\n 0.1314139477040335\n\n If we require that ``p < 0.05`` for significance, we cannot conclude\n that the variances are different.\n \"\"\"\n TINY = np.sqrt(np.finfo(float).eps)\n\n # `arrays` will hold the transformed arguments.\n arrays = []\n\n for arg in args:\n a = np.asarray(arg)\n n = len(a)\n mu = np.mean(a)\n sq = (a - mu)**2\n sumsq = sq.sum()\n\n # The O'Brien transform.\n t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))\n\n # Check that the mean of the transformed data is equal to the\n # original variance.\n var = sumsq / (n - 1)\n if abs(var - np.mean(t)) > TINY:\n raise ValueError('Lack of convergence in obrientransform.')\n\n arrays.append(t)\n\n # If the arrays are not all the same shape, calling np.array(arrays)\n # creates a 1-D array with dtype `object` in numpy 1.6+. In numpy\n # 1.5.x, it raises an exception. To work around this, we explicitly\n # set the dtype to `object` when the arrays are not all the same shape.\n if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):\n dt = None\n else:\n dt = object\n return np.array(arrays, dtype=dt)\n\n\ndef signaltonoise(a, axis=0, ddof=0):\n \"\"\"\n The signal-to-noise ratio of the input data.\n\n Returns the signal-to-noise ratio of `a`, here defined as the mean\n divided by the standard deviation.\n\n Parameters\n ----------\n a : array_like\n An array_like object containing the sample data.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n ddof : int, optional\n Degrees of freedom correction for standard deviation. Default is 0.\n\n Returns\n -------\n s2n : ndarray\n The mean to standard deviation ratio(s) along `axis`, or 0 where the\n standard deviation is 0.\n\n \"\"\"\n a = np.asanyarray(a)\n m = a.mean(axis)\n sd = a.std(axis=axis, ddof=ddof)\n return np.where(sd == 0, 0, m/sd)\n\n\ndef sem(a, axis=0, ddof=1):\n \"\"\"\n Calculates the standard error of the mean (or standard error of\n measurement) of the values in the input array.\n\n Parameters\n ----------\n a : array_like\n An array containing the values for which the standard error is\n returned.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n ddof : int, optional\n Delta degrees-of-freedom. How many degrees of freedom to adjust\n for bias in limited samples relative to the population estimate\n of variance. Defaults to 1.\n\n Returns\n -------\n s : ndarray or float\n The standard error of the mean in the sample(s), along the input axis.\n\n Notes\n -----\n The default value for `ddof` is different to the default (0) used by other\n ddof containing routines, such as np.std nd stats.nanstd.\n\n Examples\n --------\n Find standard error along the first axis:\n\n >>> from scipy import stats\n >>> a = np.arange(20).reshape(5,4)\n >>> stats.sem(a)\n array([ 2.8284, 2.8284, 2.8284, 2.8284])\n\n Find standard error across the whole array, using n degrees of freedom:\n\n >>> stats.sem(a, axis=None, ddof=0)\n 1.2893796958227628\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n n = a.shape[axis]\n s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)\n return s\n\n\ndef zscore(a, axis=0, ddof=0):\n \"\"\"\n Calculates the z score of each value in the sample, relative to the sample\n mean and standard deviation.\n\n Parameters\n ----------\n a : array_like\n An array like object containing the sample data.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n ddof : int, optional\n Degrees of freedom correction in the calculation of the\n standard deviation. Default is 0.\n\n Returns\n -------\n zscore : array_like\n The z-scores, standardized by mean and standard deviation of input\n array `a`.\n\n Notes\n -----\n This function preserves ndarray subclasses, and works also with\n matrices and masked arrays (it uses `asanyarray` instead of `asarray`\n for parameters).\n\n Examples\n --------\n >>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,\n 0.6307, 0.6599, 0.1065, 0.0508])\n >>> from scipy import stats\n >>> stats.zscore(a)\n array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,\n 0.6748, -1.1488, -1.3324])\n\n Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)\n to calculate the standard deviation:\n\n >>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],\n [ 0.7149, 0.0775, 0.6072, 0.9656],\n [ 0.6341, 0.1403, 0.9759, 0.4064],\n [ 0.5918, 0.6948, 0.904 , 0.3721],\n [ 0.0921, 0.2481, 0.1188, 0.1366]])\n >>> stats.zscore(b, axis=1, ddof=1)\n array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],\n [ 0.33048416, -1.37380874, 0.04251374, 1.00081084],\n [ 0.26796377, -1.12598418, 1.23283094, -0.37481053],\n [-0.22095197, 0.24468594, 1.19042819, -1.21416216],\n [-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])\n \"\"\"\n a = np.asanyarray(a)\n mns = a.mean(axis=axis)\n sstd = a.std(axis=axis, ddof=ddof)\n if axis and mns.ndim < a.ndim:\n return ((a - np.expand_dims(mns, axis=axis)) /\n np.expand_dims(sstd, axis=axis))\n else:\n return (a - mns) / sstd\n\n\ndef zmap(scores, compare, axis=0, ddof=0):\n \"\"\"\n Calculates the relative z-scores.\n\n Returns an array of z-scores, i.e., scores that are standardized to zero\n mean and unit variance, where mean and variance are calculated from the\n comparison array.\n\n Parameters\n ----------\n scores : array_like\n The input for which z-scores are calculated.\n compare : array_like\n The input from which the mean and standard deviation of the\n normalization are taken; assumed to have the same dimension as\n `scores`.\n axis : int or None, optional\n Axis over which mean and variance of `compare` are calculated.\n Default is 0. If None, compute over the whole array `scores`.\n ddof : int, optional\n Degrees of freedom correction in the calculation of the\n standard deviation. Default is 0.\n\n Returns\n -------\n zscore : array_like\n Z-scores, in the same shape as `scores`.\n\n Notes\n -----\n This function preserves ndarray subclasses, and works also with\n matrices and masked arrays (it uses `asanyarray` instead of `asarray`\n for parameters).\n\n Examples\n --------\n >>> a = [0.5, 2.0, 2.5, 3]\n >>> b = [0, 1, 2, 3, 4]\n >>> zmap(a, b)\n array([-1.06066017, 0. , 0.35355339, 0.70710678])\n \"\"\"\n scores, compare = map(np.asanyarray, [scores, compare])\n mns = compare.mean(axis=axis)\n sstd = compare.std(axis=axis, ddof=ddof)\n if axis and mns.ndim < compare.ndim:\n return ((scores - np.expand_dims(mns, axis=axis)) /\n np.expand_dims(sstd, axis=axis))\n else:\n return (scores - mns) / sstd\n\n\n#####################################\n# TRIMMING FUNCTIONS #\n#####################################\n\ndef threshold(a, threshmin=None, threshmax=None, newval=0):\n \"\"\"\n Clip array to a given value.\n\n Similar to numpy.clip(), except that values less than `threshmin` or\n greater than `threshmax` are replaced by `newval`, instead of by\n `threshmin` and `threshmax` respectively.\n\n Parameters\n ----------\n a : array_like\n Data to threshold.\n threshmin : float, int or None, optional\n Minimum threshold, defaults to None.\n threshmax : float, int or None, optional\n Maximum threshold, defaults to None.\n newval : float or int, optional\n Value to put in place of values in `a` outside of bounds.\n Defaults to 0.\n\n Returns\n -------\n out : ndarray\n The clipped input array, with values less than `threshmin` or\n greater than `threshmax` replaced with `newval`.\n\n Examples\n --------\n >>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])\n >>> from scipy import stats\n >>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)\n array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])\n\n \"\"\"\n a = asarray(a).copy()\n mask = zeros(a.shape, dtype=bool)\n if threshmin is not None:\n mask |= (a < threshmin)\n if threshmax is not None:\n mask |= (a > threshmax)\n a[mask] = newval\n return a\n\n\ndef sigmaclip(a, low=4., high=4.):\n \"\"\"\n Iterative sigma-clipping of array elements.\n\n The output array contains only those elements of the input array `c`\n that satisfy the conditions ::\n\n mean(c) - std(c)*low < c < mean(c) + std(c)*high\n\n Starting from the full sample, all elements outside the critical range are\n removed. The iteration continues with a new critical range until no\n elements are outside the range.\n\n Parameters\n ----------\n a : array_like\n Data array, will be raveled if not 1-D.\n low : float, optional\n Lower bound factor of sigma clipping. Default is 4.\n high : float, optional\n Upper bound factor of sigma clipping. Default is 4.\n\n Returns\n -------\n c : ndarray\n Input array with clipped elements removed.\n critlower : float\n Lower threshold value use for clipping.\n critlupper : float\n Upper threshold value use for clipping.\n\n Examples\n --------\n >>> a = np.concatenate((np.linspace(9.5,10.5,31), np.linspace(0,20,5)))\n >>> fact = 1.5\n >>> c, low, upp = sigmaclip(a, fact, fact)\n >>> c\n array([ 9.96666667, 10. , 10.03333333, 10. ])\n >>> c.var(), c.std()\n (0.00055555555555555165, 0.023570226039551501)\n >>> low, c.mean() - fact*c.std(), c.min()\n (9.9646446609406727, 9.9646446609406727, 9.9666666666666668)\n >>> upp, c.mean() + fact*c.std(), c.max()\n (10.035355339059327, 10.035355339059327, 10.033333333333333)\n\n >>> a = np.concatenate((np.linspace(9.5,10.5,11),\n np.linspace(-100,-50,3)))\n >>> c, low, upp = sigmaclip(a, 1.8, 1.8)\n >>> (c == np.linspace(9.5,10.5,11)).all()\n True\n\n \"\"\"\n c = np.asarray(a).ravel()\n delta = 1\n while delta:\n c_std = c.std()\n c_mean = c.mean()\n size = c.size\n critlower = c_mean - c_std*low\n critupper = c_mean + c_std*high\n c = c[(c > critlower) & (c < critupper)]\n delta = size - c.size\n return c, critlower, critupper\n\n\ndef trimboth(a, proportiontocut, axis=0):\n \"\"\"\n Slices off a proportion of items from both ends of an array.\n\n Slices off the passed proportion of items from both ends of the passed\n array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**\n rightmost 10% of scores). You must pre-sort the array if you want\n 'proper' trimming. Slices off less if proportion results in a\n non-integer slice index (i.e., conservatively slices off\n `proportiontocut`).\n\n Parameters\n ----------\n a : array_like\n Data to trim.\n proportiontocut : float\n Proportion (in range 0-1) of total data set to trim of each end.\n axis : int or None, optional\n Axis along which to trim data. Default is 0. If None, compute over\n the whole array `a`.\n\n Returns\n -------\n out : ndarray\n Trimmed version of array `a`.\n\n See Also\n --------\n trim_mean\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(20)\n >>> b = stats.trimboth(a, 0.1)\n >>> b.shape\n (16,)\n\n \"\"\"\n a = np.asarray(a)\n if axis is None:\n a = a.ravel()\n axis = 0\n\n nobs = a.shape[axis]\n lowercut = int(proportiontocut * nobs)\n uppercut = nobs - lowercut\n if (lowercut >= uppercut):\n raise ValueError(\"Proportion too big.\")\n\n sl = [slice(None)] * a.ndim\n sl[axis] = slice(lowercut, uppercut)\n return a[sl]\n\n\ndef trim1(a, proportiontocut, tail='right'):\n \"\"\"\n Slices off a proportion of items from ONE end of the passed array\n distribution.\n\n If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'\n 10% of scores. Slices off LESS if proportion results in a non-integer\n slice index (i.e., conservatively slices off `proportiontocut` ).\n\n Parameters\n ----------\n a : array_like\n Input array\n proportiontocut : float\n Fraction to cut off of 'left' or 'right' of distribution\n tail : {'left', 'right'}, optional\n Defaults to 'right'.\n\n Returns\n -------\n trim1 : ndarray\n Trimmed version of array `a`\n\n \"\"\"\n a = asarray(a)\n if tail.lower() == 'right':\n lowercut = 0\n uppercut = len(a) - int(proportiontocut * len(a))\n elif tail.lower() == 'left':\n lowercut = int(proportiontocut * len(a))\n uppercut = len(a)\n\n return a[lowercut:uppercut]\n\n\ndef trim_mean(a, proportiontocut, axis=0):\n \"\"\"\n Return mean of array after trimming distribution from both lower and upper\n tails.\n\n If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of\n scores. Slices off LESS if proportion results in a non-integer slice\n index (i.e., conservatively slices off `proportiontocut` ).\n\n Parameters\n ----------\n a : array_like\n Input array\n proportiontocut : float\n Fraction to cut off of both tails of the distribution\n axis : int or None, optional\n Axis along which the trimmed means are computed. Default is 0.\n If None, compute over the whole array `a`.\n\n Returns\n -------\n trim_mean : ndarray\n Mean of trimmed array.\n\n See Also\n --------\n trimboth\n\n Examples\n --------\n >>> from scipy import stats\n >>> x = np.arange(20)\n >>> stats.trim_mean(x, 0.1)\n 9.5\n >>> x2 = x.reshape(5, 4)\n >>> x2\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15],\n [16, 17, 18, 19]])\n >>> stats.trim_mean(x2, 0.25)\n array([ 8., 9., 10., 11.])\n >>> stats.trim_mean(x2, 0.25, axis=1)\n array([ 1.5, 5.5, 9.5, 13.5, 17.5])\n\n \"\"\"\n a = np.asarray(a)\n if axis is None:\n nobs = a.size\n else:\n nobs = a.shape[axis]\n lowercut = int(proportiontocut * nobs)\n uppercut = nobs - lowercut - 1\n if (lowercut > uppercut):\n raise ValueError(\"Proportion too big.\")\n\n try:\n atmp = np.partition(a, (lowercut, uppercut), axis)\n except AttributeError:\n atmp = np.sort(a, axis)\n\n newa = trimboth(atmp, proportiontocut, axis=axis)\n return np.mean(newa, axis=axis)\n\n\ndef f_oneway(*args):\n \"\"\"\n Performs a 1-way ANOVA.\n\n The one-way ANOVA tests the null hypothesis that two or more groups have\n the same population mean. The test is applied to samples from two or\n more groups, possibly with differing sizes.\n\n Parameters\n ----------\n sample1, sample2, ... : array_like\n The sample measurements for each group.\n\n Returns\n -------\n F-value : float\n The computed F-value of the test.\n p-value : float\n The associated p-value from the F-distribution.\n\n Notes\n -----\n The ANOVA test has important assumptions that must be satisfied in order\n for the associated p-value to be valid.\n\n 1. The samples are independent.\n 2. Each sample is from a normally distributed population.\n 3. The population standard deviations of the groups are all equal. This\n property is known as homoscedasticity.\n\n If these assumptions are not true for a given set of data, it may still be\n possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although\n with some loss of power.\n\n The algorithm is from Heiman[2], pp.394-7.\n\n\n References\n ----------\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 14.\n http://faculty.vassar.edu/lowry/ch14pt1.html\n\n .. [2] Heiman, G.W. Research Methods in Statistics. 2002.\n\n \"\"\"\n args = [np.asarray(arg, dtype=float) for arg in args]\n na = len(args) # ANOVA on 'na' groups, each in it's own array\n alldata = np.concatenate(args)\n bign = len(alldata)\n sstot = ss(alldata) - (square_of_sums(alldata) / float(bign))\n ssbn = 0\n for a in args:\n ssbn += square_of_sums(a) / float(len(a))\n\n ssbn -= (square_of_sums(alldata) / float(bign))\n sswn = sstot - ssbn\n dfbn = na - 1\n dfwn = bign - na\n msb = ssbn / float(dfbn)\n msw = sswn / float(dfwn)\n f = msb / msw\n prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf\n return f, prob\n\n\ndef pearsonr(x, y):\n \"\"\"\n Calculates a Pearson correlation coefficient and the p-value for testing\n non-correlation.\n\n The Pearson correlation coefficient measures the linear relationship\n between two datasets. Strictly speaking, Pearson's correlation requires\n that each dataset be normally distributed. Like other correlation\n coefficients, this one varies between -1 and +1 with 0 implying no\n correlation. Correlations of -1 or +1 imply an exact linear\n relationship. Positive correlations imply that as x increases, so does\n y. Negative correlations imply that as x increases, y decreases.\n\n The p-value roughly indicates the probability of an uncorrelated system\n producing datasets that have a Pearson correlation at least as extreme\n as the one computed from these datasets. The p-values are not entirely\n reliable but are probably reasonable for datasets larger than 500 or so.\n\n Parameters\n ----------\n x : (N,) array_like\n Input\n y : (N,) array_like\n Input\n\n Returns\n -------\n (Pearson's correlation coefficient,\n 2-tailed p-value)\n\n References\n ----------\n http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation\n\n \"\"\"\n # x and y should have same length.\n x = np.asarray(x)\n y = np.asarray(y)\n n = len(x)\n mx = x.mean()\n my = y.mean()\n xm, ym = x - mx, y - my\n r_num = np.add.reduce(xm * ym)\n r_den = np.sqrt(ss(xm) * ss(ym))\n r = r_num / r_den\n\n # Presumably, if abs(r) > 1, then it is only some small artifact of floating\n # point arithmetic.\n r = max(min(r, 1.0), -1.0)\n df = n - 2\n if abs(r) == 1.0:\n prob = 0.0\n else:\n t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))\n prob = betai(0.5*df, 0.5, df/(df+t_squared))\n\n return r, prob\n\n\ndef fisher_exact(table, alternative='two-sided'):\n \"\"\"Performs a Fisher exact test on a 2x2 contingency table.\n\n Parameters\n ----------\n table : array_like of ints\n A 2x2 contingency table. Elements should be non-negative integers.\n alternative : {'two-sided', 'less', 'greater'}, optional\n Which alternative hypothesis to the null hypothesis the test uses.\n Default is 'two-sided'.\n\n Returns\n -------\n oddsratio : float\n This is prior odds ratio and not a posterior estimate.\n p_value : float\n P-value, the probability of obtaining a distribution at least as\n extreme as the one that was actually observed, assuming that the\n null hypothesis is true.\n\n See Also\n --------\n chi2_contingency : Chi-square test of independence of variables in a\n contingency table.\n\n Notes\n -----\n The calculated odds ratio is different from the one R uses. In R language,\n this implementation returns the (more common) \"unconditional Maximum\n Likelihood Estimate\", while R uses the \"conditional Maximum Likelihood\n Estimate\".\n\n For tables with large numbers the (inexact) chi-square test implemented\n in the function `chi2_contingency` can also be used.\n\n Examples\n --------\n Say we spend a few days counting whales and sharks in the Atlantic and\n Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the\n Indian ocean 2 whales and 5 sharks. Then our contingency table is::\n\n Atlantic Indian\n whales 8 2\n sharks 1 5\n\n We use this table to find the p-value:\n\n >>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])\n >>> pvalue\n 0.0349...\n\n The probability that we would observe this or an even more imbalanced ratio\n by chance is about 3.5%. A commonly used significance level is 5%, if we\n adopt that we can therefore conclude that our observed imbalance is\n statistically significant; whales prefer the Atlantic while sharks prefer\n the Indian ocean.\n\n \"\"\"\n hypergeom = distributions.hypergeom\n c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm\n if not c.shape == (2, 2):\n raise ValueError(\"The input `table` must be of shape (2, 2).\")\n\n if np.any(c < 0):\n raise ValueError(\"All values in `table` must be nonnegative.\")\n\n if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):\n # If both values in a row or column are zero, the p-value is 1 and\n # the odds ratio is NaN.\n return np.nan, 1.0\n\n if c[1,0] > 0 and c[0,1] > 0:\n oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])\n else:\n oddsratio = np.inf\n\n n1 = c[0,0] + c[0,1]\n n2 = c[1,0] + c[1,1]\n n = c[0,0] + c[1,0]\n\n def binary_search(n, n1, n2, side):\n \"\"\"Binary search for where to begin lower/upper halves in two-sided\n test.\n \"\"\"\n if side == \"upper\":\n minval = mode\n maxval = n\n else:\n minval = 0\n maxval = mode\n guess = -1\n while maxval - minval > 1:\n if maxval == minval + 1 and guess == minval:\n guess = maxval\n else:\n guess = (maxval + minval) // 2\n pguess = hypergeom.pmf(guess, n1 + n2, n1, n)\n if side == \"upper\":\n ng = guess - 1\n else:\n ng = guess + 1\n if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):\n break\n elif pguess < pexact:\n maxval = guess\n else:\n minval = guess\n if guess == -1:\n guess = minval\n if side == \"upper\":\n while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:\n guess -= 1\n while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:\n guess += 1\n else:\n while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:\n guess += 1\n while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:\n guess -= 1\n return guess\n\n if alternative == 'less':\n pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)\n elif alternative == 'greater':\n # Same formula as the 'less' case, but with the second column.\n pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])\n elif alternative == 'two-sided':\n mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))\n pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)\n pmode = hypergeom.pmf(mode, n1 + n2, n1, n)\n\n epsilon = 1 - 1e-4\n if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:\n return oddsratio, 1.\n\n elif c[0,0] < mode:\n plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)\n if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:\n return oddsratio, plower\n\n guess = binary_search(n, n1, n2, \"upper\")\n pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)\n else:\n pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)\n if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:\n return oddsratio, pupper\n\n guess = binary_search(n, n1, n2, \"lower\")\n pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)\n else:\n msg = \"`alternative` should be one of {'two-sided', 'less', 'greater'}\"\n raise ValueError(msg)\n\n if pvalue > 1.0:\n pvalue = 1.0\n\n return oddsratio, pvalue\n\n\ndef spearmanr(a, b=None, axis=0):\n \"\"\"\n Calculates a Spearman rank-order correlation coefficient and the p-value\n to test for non-correlation.\n\n The Spearman correlation is a nonparametric measure of the monotonicity\n of the relationship between two datasets. Unlike the Pearson correlation,\n the Spearman correlation does not assume that both datasets are normally\n distributed. Like other correlation coefficients, this one varies\n between -1 and +1 with 0 implying no correlation. Correlations of -1 or\n +1 imply an exact monotonic relationship. Positive correlations imply that\n as x increases, so does y. Negative correlations imply that as x\n increases, y decreases.\n\n The p-value roughly indicates the probability of an uncorrelated system\n producing datasets that have a Spearman correlation at least as extreme\n as the one computed from these datasets. The p-values are not entirely\n reliable but are probably reasonable for datasets larger than 500 or so.\n\n Parameters\n ----------\n a, b : 1D or 2D array_like, b is optional\n One or two 1-D or 2-D arrays containing multiple variables and\n observations. Each column of `a` and `b` represents a variable, and\n each row entry a single observation of those variables. See also\n `axis`. Both arrays need to have the same length in the `axis`\n dimension.\n axis : int or None, optional\n If axis=0 (default), then each column represents a variable, with\n observations in the rows. If axis=0, the relationship is transposed:\n each row represents a variable, while the columns contain observations.\n If axis=None, then both arrays will be raveled.\n\n Returns\n -------\n rho : float or ndarray (2-D square)\n Spearman correlation matrix or correlation coefficient (if only 2\n variables are given as parameters. Correlation matrix is square with\n length equal to total number of variables (columns or rows) in a and b\n combined.\n p-value : float\n The two-sided p-value for a hypothesis test whose null hypothesis is\n that two sets of data are uncorrelated, has same dimension as rho.\n\n Notes\n -----\n Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.\n\n References\n ----------\n\n .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n Section 14.7\n\n Examples\n --------\n >>> from scipy import stats\n >>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])\n (0.82078268166812329, 0.088587005313543798)\n >>> np.random.seed(1234321)\n >>> x2n = np.random.randn(100, 2)\n >>> y2n = np.random.randn(100, 2)\n >>> stats.spearmanr(x2n)\n (0.059969996999699973, 0.55338590803773591)\n >>> stats.spearmanr(x2n[:,0], x2n[:,1])\n (0.059969996999699973, 0.55338590803773591)\n >>> rho, pval = stats.spearmanr(x2n, y2n)\n >>> rho\n array([[ 1. , 0.05997 , 0.18569457, 0.06258626],\n [ 0.05997 , 1. , 0.110003 , 0.02534653],\n [ 0.18569457, 0.110003 , 1. , 0.03488749],\n [ 0.06258626, 0.02534653, 0.03488749, 1. ]])\n >>> pval\n array([[ 0. , 0.55338591, 0.06435364, 0.53617935],\n [ 0.55338591, 0. , 0.27592895, 0.80234077],\n [ 0.06435364, 0.27592895, 0. , 0.73039992],\n [ 0.53617935, 0.80234077, 0.73039992, 0. ]])\n >>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)\n >>> rho\n array([[ 1. , 0.05997 , 0.18569457, 0.06258626],\n [ 0.05997 , 1. , 0.110003 , 0.02534653],\n [ 0.18569457, 0.110003 , 1. , 0.03488749],\n [ 0.06258626, 0.02534653, 0.03488749, 1. ]])\n >>> stats.spearmanr(x2n, y2n, axis=None)\n (0.10816770419260482, 0.1273562188027364)\n >>> stats.spearmanr(x2n.ravel(), y2n.ravel())\n (0.10816770419260482, 0.1273562188027364)\n\n >>> xint = np.random.randint(10, size=(100, 2))\n >>> stats.spearmanr(xint)\n (0.052760927029710199, 0.60213045837062351)\n\n \"\"\"\n a, axisout = _chk_asarray(a, axis)\n ar = np.apply_along_axis(rankdata, axisout, a)\n\n br = None\n if b is not None:\n b, axisout = _chk_asarray(b, axis)\n br = np.apply_along_axis(rankdata, axisout, b)\n n = a.shape[axisout]\n rs = np.corrcoef(ar, br, rowvar=axisout)\n\n olderr = np.seterr(divide='ignore') # rs can have elements equal to 1\n try:\n t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))\n finally:\n np.seterr(**olderr)\n\n prob = 2 * distributions.t.sf(np.abs(t), n-2)\n if rs.shape == (2, 2):\n return rs[1,0], prob[1,0]\n else:\n return rs, prob\n\n\ndef pointbiserialr(x, y):\n \"\"\"Calculates a point biserial correlation coefficient and the associated\n p-value.\n\n The point biserial correlation is used to measure the relationship\n between a binary variable, x, and a continuous variable, y. Like other\n correlation coefficients, this one varies between -1 and +1 with 0\n implying no correlation. Correlations of -1 or +1 imply a determinative\n relationship.\n\n This function uses a shortcut formula but produces the same result as\n `pearsonr`.\n\n Parameters\n ----------\n x : array_like of bools\n Input array.\n y : array_like\n Input array.\n\n Returns\n -------\n r : float\n R value\n p-value : float\n 2-tailed p-value\n\n References\n ----------\n http://en.wikipedia.org/wiki/Point-biserial_correlation_coefficient\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.array([0, 0, 0, 1, 1, 1, 1])\n >>> b = np.arange(7)\n >>> stats.pointbiserialr(a, b)\n (0.8660254037844386, 0.011724811003954652)\n >>> stats.pearsonr(a, b)\n (0.86602540378443871, 0.011724811003954626)\n >>> np.corrcoef(a, b)\n array([[ 1. , 0.8660254],\n [ 0.8660254, 1. ]])\n\n \"\"\"\n x = np.asarray(x, dtype=bool)\n y = np.asarray(y, dtype=float)\n n = len(x)\n\n # phat is the fraction of x values that are True\n phat = x.sum() / float(len(x))\n y0 = y[~x] # y-values where x is False\n y1 = y[x] # y-values where x is True\n y0m = y0.mean()\n y1m = y1.mean()\n\n # phat - phat**2 is more stable than phat*(1-phat)\n rpb = (y1m - y0m) * np.sqrt(phat - phat**2) / y.std()\n\n df = n - 2\n # fixme: see comment about TINY in pearsonr()\n TINY = 1e-20\n t = rpb * np.sqrt(df / ((1.0 - rpb + TINY)*(1.0 + rpb + TINY)))\n prob = betai(0.5*df, 0.5, df/(df+t*t))\n return rpb, prob\n\n\ndef kendalltau(x, y, initial_lexsort=True):\n \"\"\"\n Calculates Kendall's tau, a correlation measure for ordinal data.\n\n Kendall's tau is a measure of the correspondence between two rankings.\n Values close to 1 indicate strong agreement, values close to -1 indicate\n strong disagreement. This is the tau-b version of Kendall's tau which\n accounts for ties.\n\n Parameters\n ----------\n x, y : array_like\n Arrays of rankings, of the same shape. If arrays are not 1-D, they will\n be flattened to 1-D.\n initial_lexsort : bool, optional\n Whether to use lexsort or quicksort as the sorting method for the\n initial sort of the inputs. Default is lexsort (True), for which\n `kendalltau` is of complexity O(n log(n)). If False, the complexity is\n O(n^2), but with a smaller pre-factor (so quicksort may be faster for\n small arrays).\n\n Returns\n -------\n Kendall's tau : float\n The tau statistic.\n p-value : float\n The two-sided p-value for a hypothesis test whose null hypothesis is\n an absence of association, tau = 0.\n\n Notes\n -----\n The definition of Kendall's tau that is used is::\n\n tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))\n\n where P is the number of concordant pairs, Q the number of discordant\n pairs, T the number of ties only in `x`, and U the number of ties only in\n `y`. If a tie occurs for the same pair in both `x` and `y`, it is not\n added to either T or U.\n\n References\n ----------\n W.R. Knight, \"A Computer Method for Calculating Kendall's Tau with\n Ungrouped Data\", Journal of the American Statistical Association, Vol. 61,\n No. 314, Part 1, pp. 436-439, 1966.\n\n Examples\n --------\n >>> from scipy import stats\n >>> x1 = [12, 2, 1, 12, 2]\n >>> x2 = [1, 4, 7, 1, 0]\n >>> tau, p_value = stats.kendalltau(x1, x2)\n >>> tau\n -0.47140452079103173\n >>> p_value\n 0.24821309157521476\n\n \"\"\"\n x = np.asarray(x).ravel()\n y = np.asarray(y).ravel()\n\n if not x.size or not y.size:\n return (np.nan, np.nan) # Return NaN if arrays are empty\n\n n = np.int64(len(x))\n temp = list(range(n)) # support structure used by mergesort\n # this closure recursively sorts sections of perm[] by comparing\n # elements of y[perm[]] using temp[] as support\n # returns the number of swaps required by an equivalent bubble sort\n\n def mergesort(offs, length):\n exchcnt = 0\n if length == 1:\n return 0\n if length == 2:\n if y[perm[offs]] <= y[perm[offs+1]]:\n return 0\n t = perm[offs]\n perm[offs] = perm[offs+1]\n perm[offs+1] = t\n return 1\n length0 = length // 2\n length1 = length - length0\n middle = offs + length0\n exchcnt += mergesort(offs, length0)\n exchcnt += mergesort(middle, length1)\n if y[perm[middle - 1]] < y[perm[middle]]:\n return exchcnt\n\n # merging\n i = j = k = 0\n while j < length0 or k < length1:\n if k >= length1 or (j < length0 and y[perm[offs + j]] <=\n y[perm[middle + k]]):\n temp[i] = perm[offs + j]\n d = i - j\n j += 1\n else:\n temp[i] = perm[middle + k]\n d = (offs + i) - (middle + k)\n k += 1\n if d > 0:\n exchcnt += d\n i += 1\n perm[offs:offs+length] = temp[0:length]\n return exchcnt\n\n # initial sort on values of x and, if tied, on values of y\n if initial_lexsort:\n # sort implemented as mergesort, worst case: O(n log(n))\n perm = np.lexsort((y, x))\n else:\n # sort implemented as quicksort, 30% faster but with worst case: O(n^2)\n perm = list(range(n))\n perm.sort(key=lambda a: (x[a], y[a]))\n\n # compute joint ties\n first = 0\n t = 0\n for i in xrange(1, n):\n if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:\n t += ((i - first) * (i - first - 1)) // 2\n first = i\n t += ((n - first) * (n - first - 1)) // 2\n\n # compute ties in x\n first = 0\n u = 0\n for i in xrange(1, n):\n if x[perm[first]] != x[perm[i]]:\n u += ((i - first) * (i - first - 1)) // 2\n first = i\n u += ((n - first) * (n - first - 1)) // 2\n\n # count exchanges\n exchanges = mergesort(0, n)\n # compute ties in y after mergesort with counting\n first = 0\n v = 0\n for i in xrange(1, n):\n if y[perm[first]] != y[perm[i]]:\n v += ((i - first) * (i - first - 1)) // 2\n first = i\n v += ((n - first) * (n - first - 1)) // 2\n\n tot = (n * (n - 1)) // 2\n if tot == u or tot == v:\n return np.nan, np.nan # Special case for all ties in both ranks\n\n # Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))\n denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))\n tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom\n\n # what follows reproduces the ending of Gary Strangman's original\n # stats.kendalltau() in SciPy\n svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))\n z = tau / np.sqrt(svar)\n prob = special.erfc(np.abs(z) / 1.4142136)\n\n return tau, prob\n\n\ndef linregress(x, y=None):\n \"\"\"\n Calculate a regression line\n\n This computes a least-squares regression for two sets of measurements.\n\n Parameters\n ----------\n x, y : array_like\n two sets of measurements. Both arrays should have the same length.\n If only x is given (and y=None), then it must be a two-dimensional\n array where one dimension has length 2. The two sets of measurements\n are then found by splitting the array along the length-2 dimension.\n\n Returns\n -------\n slope : float\n slope of the regression line\n intercept : float\n intercept of the regression line\n r-value : float\n correlation coefficient\n p-value : float\n two-sided p-value for a hypothesis test whose null hypothesis is\n that the slope is zero.\n stderr : float\n Standard error of the estimate\n\n\n Examples\n --------\n >>> from scipy import stats\n >>> x = np.random.random(10)\n >>> y = np.random.random(10)\n >>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\n\n # To get coefficient of determination (r_squared)\n\n >>> print(\"r-squared:\", r_value**2)\n r-squared: 0.15286643777\n\n \"\"\"\n TINY = 1.0e-20\n if y is None: # x is a (2, N) or (N, 2) shaped array_like\n x = asarray(x)\n if x.shape[0] == 2:\n x, y = x\n elif x.shape[1] == 2:\n x, y = x.T\n else:\n msg = (\"If only `x` is given as input, it has to be of shape \"\n \"(2, N) or (N, 2), provided shape was %s\" % str(x.shape))\n raise ValueError(msg)\n else:\n x = asarray(x)\n y = asarray(y)\n n = len(x)\n xmean = np.mean(x, None)\n ymean = np.mean(y, None)\n\n # average sum of squares:\n ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat\n r_num = ssxym\n r_den = np.sqrt(ssxm * ssym)\n if r_den == 0.0:\n r = 0.0\n else:\n r = r_num / r_den\n # test for numerical error propagation\n if r > 1.0:\n r = 1.0\n elif r < -1.0:\n r = -1.0\n\n df = n - 2\n t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))\n prob = 2 * distributions.t.sf(np.abs(t), df)\n slope = r_num / ssxm\n intercept = ymean - slope*xmean\n sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)\n return slope, intercept, r, prob, sterrest\n\n\ndef theilslopes(y, x=None, alpha=0.95):\n r\"\"\"\n Computes the Theil-Sen estimator for a set of points (x, y).\n\n `theilslopes` implements a method for robust linear regression. It\n computes the slope as the median of all slopes between paired values.\n\n Parameters\n ----------\n y : array_like\n Dependent variable.\n x : array_like or None, optional\n Independent variable. If None, use ``arange(len(y))`` instead.\n alpha : float, optional\n Confidence degree between 0 and 1. Default is 95% confidence.\n Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are\n interpreted as \"find the 90% confidence interval\".\n\n Returns\n -------\n medslope : float\n Theil slope.\n medintercept : float\n Intercept of the Theil line, as ``median(y) - medslope*median(x)``.\n lo_slope : float\n Lower bound of the confidence interval on `medslope`.\n up_slope : float\n Upper bound of the confidence interval on `medslope`.\n\n Notes\n -----\n The implementation of `theilslopes` follows [1]_. The intercept is\n not defined in [1]_, and here it is defined as ``median(y) -\n medslope*median(x)``, which is given in [3]_. Other definitions of\n the intercept exist in the literature. A confidence interval for\n the intercept is not given as this question is not addressed in\n [1]_.\n\n References\n ----------\n .. [1] P.K. Sen, \"Estimates of the regression coefficient based on Kendall's tau\",\n J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.\n .. [2] H. Theil, \"A rank-invariant method of linear and polynomial\n regression analysis I, II and III\", Nederl. Akad. Wetensch., Proc.\n 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.\n .. [3] W.L. Conover, \"Practical nonparametric statistics\", 2nd ed.,\n John Wiley and Sons, New York, pp. 493.\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n\n >>> x = np.linspace(-5, 5, num=150)\n >>> y = x + np.random.normal(size=x.size)\n >>> y[11:15] += 10 # add outliers\n >>> y[-5:] -= 7\n\n Compute the slope, intercept and 90% confidence interval. For comparison,\n also compute the least-squares fit with `linregress`:\n\n >>> res = stats.theilslopes(y, x, 0.90)\n >>> lsq_res = stats.linregress(x, y)\n\n Plot the results. The Theil-Sen regression line is shown in red, with the\n dashed red lines illustrating the confidence interval of the slope (note\n that the dashed red lines are not the confidence interval of the regression\n as the confidence interval of the intercept is not included). The green\n line shows the least-squares fit for comparison.\n\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(x, y, 'b.')\n >>> ax.plot(x, res[1] + res[0] * x, 'r-')\n >>> ax.plot(x, res[1] + res[2] * x, 'r--')\n >>> ax.plot(x, res[1] + res[3] * x, 'r--')\n >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')\n >>> plt.show()\n\n \"\"\"\n y = np.asarray(y).flatten()\n if x is None:\n x = np.arange(len(y), dtype=float)\n else:\n x = np.asarray(x, dtype=float).flatten()\n if len(x) != len(y):\n raise ValueError(\"Incompatible lengths ! (%s<>%s)\" % (len(y), len(x)))\n\n # Compute sorted slopes only when deltax > 0\n deltax = x[:, np.newaxis] - x\n deltay = y[:, np.newaxis] - y\n slopes = deltay[deltax > 0] / deltax[deltax > 0]\n slopes.sort()\n medslope = np.median(slopes)\n medinter = np.median(y) - medslope * np.median(x)\n # Now compute confidence intervals\n if alpha > 0.5:\n alpha = 1. - alpha\n\n z = distributions.norm.ppf(alpha / 2.)\n # This implements (2.6) from Sen (1968)\n _, nxreps = find_repeats(x)\n _, nyreps = find_repeats(y)\n nt = len(slopes) # N in Sen (1968)\n ny = len(y) # n in Sen (1968)\n # Equation 2.6 in Sen (1968):\n sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -\n np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -\n np.sum(k * (k-1) * (2*k + 5) for k in nyreps))\n # Find the confidence interval indices in `slopes`\n sigma = np.sqrt(sigsq)\n Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)\n Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)\n delta = slopes[[Rl, Ru]]\n return medslope, medinter, delta[0], delta[1]\n\n\n#####################################\n# INFERENTIAL STATISTICS #\n#####################################\n\ndef ttest_1samp(a, popmean, axis=0):\n \"\"\"\n Calculates the T-test for the mean of ONE group of scores.\n\n This is a two-sided test for the null hypothesis that the expected value\n (mean) of a sample of independent observations `a` is equal to the given\n population mean, `popmean`.\n\n Parameters\n ----------\n a : array_like\n sample observation\n popmean : float or array_like\n expected value in null hypothesis, if array_like than it must have the\n same shape as `a` excluding the axis dimension\n axis : int or None, optional\n Axis along which to compute test. If None, compute over the whole\n array `a`.\n\n Returns\n -------\n t : float or array\n t-statistic\n prob : float or array\n two-tailed p-value\n\n Examples\n --------\n >>> from scipy import stats\n\n >>> np.random.seed(7654567) # fix seed to get the same result\n >>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))\n\n Test if mean of random sample is equal to true mean, and different mean.\n We reject the null hypothesis in the second case and don't reject it in\n the first case.\n\n >>> stats.ttest_1samp(rvs,5.0)\n (array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))\n >>> stats.ttest_1samp(rvs,0.0)\n (array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))\n\n Examples using axis and non-scalar dimension for population mean.\n\n >>> stats.ttest_1samp(rvs,[5.0,0.0])\n (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))\n >>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)\n (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))\n >>> stats.ttest_1samp(rvs,[[5.0],[0.0]])\n (array([[-0.68014479, -0.04323899],\n [ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],\n [ 7.89094663e-03, 1.49986458e-04]]))\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n n = a.shape[axis]\n df = n - 1\n\n d = np.mean(a, axis) - popmean\n v = np.var(a, axis, ddof=1)\n denom = np.sqrt(v / float(n))\n\n t = np.divide(d, denom)\n t, prob = _ttest_finish(df, t)\n\n return t, prob\n\n\ndef _ttest_finish(df, t):\n \"\"\"Common code between all 3 t-test functions.\"\"\"\n prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail\n if t.ndim == 0:\n t = t[()]\n\n return t, prob\n\n\ndef _ttest_ind_from_stats(mean1, mean2, denom, df):\n\n d = mean1 - mean2\n t = np.divide(d, denom)\n t, prob = _ttest_finish(df, t)\n\n return t, prob\n\n\ndef _unequal_var_ttest_denom(v1, n1, v2, n2):\n vn1 = v1 / n1\n vn2 = v2 / n2\n df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))\n\n # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).\n # Hence it doesn't matter what df is as long as it's not NaN.\n df = np.where(np.isnan(df), 1, df)\n denom = np.sqrt(vn1 + vn2)\n return df, denom\n\n\ndef _equal_var_ttest_denom(v1, n1, v2, n2):\n df = n1 + n2 - 2\n svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)\n denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))\n return df, denom\n\n\ndef ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,\n equal_var=True):\n \"\"\"\n T-test for means of two independent samples from descriptive statistics.\n\n This is a two-sided test for the null hypothesis that 2 independent samples\n have identical average (expected) values.\n\n Parameters\n ----------\n mean1 : array_like\n The mean(s) of sample 1.\n std1 : array_like\n The standard deviation(s) of sample 1.\n nobs1 : array_like\n The number(s) of observations of sample 1.\n mean2 : array_like\n The mean(s) of sample 2\n std2 : array_like\n The standard deviations(s) of sample 2.\n nobs2 : array_like\n The number(s) of observations of sample 2.\n equal_var : bool, optional\n If True (default), perform a standard independent 2 sample test\n that assumes equal population variances [1]_.\n If False, perform Welch's t-test, which does not assume equal\n population variance [2]_.\n\n Returns\n -------\n t : float or array\n The calculated t-statistics\n prob : float or array\n The two-tailed p-value.\n\n See also\n --------\n scipy.stats.ttest_ind\n\n Notes\n -----\n\n .. versionadded:: 0.16.0\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test\n\n .. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test\n \"\"\"\n if equal_var:\n df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)\n else:\n df, denom = _unequal_var_ttest_denom(std1**2, nobs1,\n std2**2, nobs2)\n return _ttest_ind_from_stats(mean1, mean2, denom, df)\n\n\ndef ttest_ind(a, b, axis=0, equal_var=True):\n \"\"\"\n Calculates the T-test for the means of TWO INDEPENDENT samples of scores.\n\n This is a two-sided test for the null hypothesis that 2 independent samples\n have identical average (expected) values. This test assumes that the\n populations have identical variances by default.\n\n Parameters\n ----------\n a, b : array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int or None, optional\n Axis along which to compute test. If None, compute over the whole\n arrays, `a`, and `b`.\n equal_var : bool, optional\n If True (default), perform a standard independent 2 sample test\n that assumes equal population variances [1]_.\n If False, perform Welch's t-test, which does not assume equal\n population variance [2]_.\n .. versionadded:: 0.11.0\n\n\n Returns\n -------\n t : float or array\n The calculated t-statistic.\n prob : float or array\n The two-tailed p-value.\n\n Notes\n -----\n We can use this test, if we observe two independent samples from\n the same or different population, e.g. exam scores of boys and\n girls or of two ethnic groups. The test measures whether the\n average (expected) value differs significantly across samples. If\n we observe a large p-value, for example larger than 0.05 or 0.1,\n then we cannot reject the null hypothesis of identical average scores.\n If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,\n then we reject the null hypothesis of equal averages.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test\n\n .. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test\n\n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678)\n\n Test with sample with identical means:\n\n >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)\n >>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)\n >>> stats.ttest_ind(rvs1,rvs2)\n (0.26833823296239279, 0.78849443369564776)\n >>> stats.ttest_ind(rvs1,rvs2, equal_var = False)\n (0.26833823296239279, 0.78849452749500748)\n\n `ttest_ind` underestimates p for unequal variances:\n\n >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)\n >>> stats.ttest_ind(rvs1, rvs3)\n (-0.46580283298287162, 0.64145827413436174)\n >>> stats.ttest_ind(rvs1, rvs3, equal_var = False)\n (-0.46580283298287162, 0.64149646246569292)\n\n When n1 != n2, the equal variance t-statistic is no longer equal to the\n unequal variance t-statistic:\n\n >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)\n >>> stats.ttest_ind(rvs1, rvs4)\n (-0.99882539442782481, 0.3182832709103896)\n >>> stats.ttest_ind(rvs1, rvs4, equal_var = False)\n (-0.69712570584654099, 0.48716927725402048)\n\n T-test with different means, variance, and n:\n\n >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)\n >>> stats.ttest_ind(rvs1, rvs5)\n (-1.4679669854490653, 0.14263895620529152)\n >>> stats.ttest_ind(rvs1, rvs5, equal_var = False)\n (-0.94365973617132992, 0.34744170334794122)\n\n \"\"\"\n a, b, axis = _chk2_asarray(a, b, axis)\n if a.size == 0 or b.size == 0:\n return (np.nan, np.nan)\n\n v1 = np.var(a, axis, ddof=1)\n v2 = np.var(b, axis, ddof=1)\n n1 = a.shape[axis]\n n2 = b.shape[axis]\n\n if equal_var:\n df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)\n else:\n df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)\n return _ttest_ind_from_stats(np.mean(a, axis),\n np.mean(b, axis),\n denom, df)\n\n\ndef ttest_rel(a, b, axis=0):\n \"\"\"\n Calculates the T-test on TWO RELATED samples of scores, a and b.\n\n This is a two-sided test for the null hypothesis that 2 related or\n repeated samples have identical average (expected) values.\n\n Parameters\n ----------\n a, b : array_like\n The arrays must have the same shape.\n axis : int or None, optional\n Axis along which to compute test. If None, compute over the whole\n arrays, `a`, and `b`.\n\n Returns\n -------\n t : float or array\n t-statistic\n prob : float or array\n two-tailed p-value\n\n Notes\n -----\n Examples for the use are scores of the same set of student in\n different exams, or repeated sampling from the same units. The\n test measures whether the average score differs significantly\n across samples (e.g. exams). If we observe a large p-value, for\n example greater than 0.05 or 0.1 then we cannot reject the null\n hypothesis of identical average scores. If the p-value is smaller\n than the threshold, e.g. 1%, 5% or 10%, then we reject the null\n hypothesis of equal averages. Small p-values are associated with\n large t-statistics.\n\n References\n ----------\n http://en.wikipedia.org/wiki/T-test#Dependent_t-test\n\n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678) # fix random seed to get same numbers\n\n >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)\n >>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +\n ... stats.norm.rvs(scale=0.2,size=500))\n >>> stats.ttest_rel(rvs1,rvs2)\n (0.24101764965300962, 0.80964043445811562)\n >>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +\n ... stats.norm.rvs(scale=0.2,size=500))\n >>> stats.ttest_rel(rvs1,rvs3)\n (-3.9995108708727933, 7.3082402191726459e-005)\n\n \"\"\"\n a, b, axis = _chk2_asarray(a, b, axis)\n if a.shape[axis] != b.shape[axis]:\n raise ValueError('unequal length arrays')\n\n if a.size == 0 or b.size == 0:\n return np.nan, np.nan\n\n n = a.shape[axis]\n df = float(n - 1)\n\n d = (a - b).astype(np.float64)\n v = np.var(d, axis, ddof=1)\n dm = np.mean(d, axis)\n denom = np.sqrt(v / float(n))\n\n t = np.divide(dm, denom)\n t, prob = _ttest_finish(df, t)\n\n return t, prob\n\n\ndef kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):\n \"\"\"\n Perform the Kolmogorov-Smirnov test for goodness of fit.\n\n This performs a test of the distribution G(x) of an observed\n random variable against a given distribution F(x). Under the null\n hypothesis the two distributions are identical, G(x)=F(x). The\n alternative hypothesis can be either 'two-sided' (default), 'less'\n or 'greater'. The KS test is only valid for continuous distributions.\n\n Parameters\n ----------\n rvs : str, array or callable\n If a string, it should be the name of a distribution in `scipy.stats`.\n If an array, it should be a 1-D array of observations of random\n variables.\n If a callable, it should be a function to generate random variables;\n it is required to have a keyword argument `size`.\n cdf : str or callable\n If a string, it should be the name of a distribution in `scipy.stats`.\n If `rvs` is a string then `cdf` can be False or the same as `rvs`.\n If a callable, that callable is used to calculate the cdf.\n args : tuple, sequence, optional\n Distribution parameters, used if `rvs` or `cdf` are strings.\n N : int, optional\n Sample size if `rvs` is string or callable. Default is 20.\n alternative : {'two-sided', 'less','greater'}, optional\n Defines the alternative hypothesis (see explanation above).\n Default is 'two-sided'.\n mode : 'approx' (default) or 'asymp', optional\n Defines the distribution used for calculating the p-value.\n\n - 'approx' : use approximation to exact distribution of test statistic\n - 'asymp' : use asymptotic distribution of test statistic\n\n Returns\n -------\n D : float\n KS test statistic, either D, D+ or D-.\n p-value : float\n One-tailed or two-tailed p-value.\n\n Notes\n -----\n In the one-sided test, the alternative is that the empirical\n cumulative distribution function of the random variable is \"less\"\n or \"greater\" than the cumulative distribution function F(x) of the\n hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.\n\n Examples\n --------\n >>> from scipy import stats\n\n >>> x = np.linspace(-15, 15, 9)\n >>> stats.kstest(x, 'norm')\n (0.44435602715924361, 0.038850142705171065)\n\n >>> np.random.seed(987654321) # set random seed to get the same result\n >>> stats.kstest('norm', False, N=100)\n (0.058352892479417884, 0.88531190944151261)\n\n The above lines are equivalent to:\n\n >>> np.random.seed(987654321)\n >>> stats.kstest(stats.norm.rvs(size=100), 'norm')\n (0.058352892479417884, 0.88531190944151261)\n\n *Test against one-sided alternative hypothesis*\n\n Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:\n\n >>> np.random.seed(987654321)\n >>> x = stats.norm.rvs(loc=0.2, size=100)\n >>> stats.kstest(x,'norm', alternative = 'less')\n (0.12464329735846891, 0.040989164077641749)\n\n Reject equal distribution against alternative hypothesis: less\n\n >>> stats.kstest(x,'norm', alternative = 'greater')\n (0.0072115233216311081, 0.98531158590396395)\n\n Don't reject equal distribution against alternative hypothesis: greater\n\n >>> stats.kstest(x,'norm', mode='asymp')\n (0.12464329735846891, 0.08944488871182088)\n\n *Testing t distributed random variables against normal distribution*\n\n With 100 degrees of freedom the t distribution looks close to the normal\n distribution, and the K-S test does not reject the hypothesis that the\n sample came from the normal distribution:\n\n >>> np.random.seed(987654321)\n >>> stats.kstest(stats.t.rvs(100,size=100),'norm')\n (0.072018929165471257, 0.67630062862479168)\n\n With 3 degrees of freedom the t distribution looks sufficiently different\n from the normal distribution, that we can reject the hypothesis that the\n sample came from the normal distribution at the 10% level:\n\n >>> np.random.seed(987654321)\n >>> stats.kstest(stats.t.rvs(3,size=100),'norm')\n (0.131016895759829, 0.058826222555312224)\n\n \"\"\"\n if isinstance(rvs, string_types):\n if (not cdf) or (cdf == rvs):\n cdf = getattr(distributions, rvs).cdf\n rvs = getattr(distributions, rvs).rvs\n else:\n raise AttributeError(\"if rvs is string, cdf has to be the \"\n \"same distribution\")\n\n if isinstance(cdf, string_types):\n cdf = getattr(distributions, cdf).cdf\n if callable(rvs):\n kwds = {'size': N}\n vals = np.sort(rvs(*args, **kwds))\n else:\n vals = np.sort(rvs)\n N = len(vals)\n cdfvals = cdf(vals, *args)\n\n # to not break compatibility with existing code\n if alternative == 'two_sided':\n alternative = 'two-sided'\n\n if alternative in ['two-sided', 'greater']:\n Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()\n if alternative == 'greater':\n return Dplus, distributions.ksone.sf(Dplus, N)\n\n if alternative in ['two-sided', 'less']:\n Dmin = (cdfvals - np.arange(0.0, N)/N).max()\n if alternative == 'less':\n return Dmin, distributions.ksone.sf(Dmin, N)\n\n if alternative == 'two-sided':\n D = np.max([Dplus, Dmin])\n if mode == 'asymp':\n return D, distributions.kstwobign.sf(D * np.sqrt(N))\n if mode == 'approx':\n pval_two = distributions.kstwobign.sf(D * np.sqrt(N))\n if N > 2666 or pval_two > 0.80 - N*0.3/1000:\n return D, distributions.kstwobign.sf(D * np.sqrt(N))\n else:\n return D, 2 * distributions.ksone.sf(D, N)\n\n\n# Map from names to lambda_ values used in power_divergence().\n_power_div_lambda_names = {\n \"pearson\": 1,\n \"log-likelihood\": 0,\n \"freeman-tukey\": -0.5,\n \"mod-log-likelihood\": -1,\n \"neyman\": -2,\n \"cressie-read\": 2/3,\n}\n\n\ndef _count(a, axis=None):\n \"\"\"\n Count the number of non-masked elements of an array.\n\n This function behaves like np.ma.count(), but is much faster\n for ndarrays.\n \"\"\"\n if hasattr(a, 'count'):\n num = a.count(axis=axis)\n if isinstance(num, np.ndarray) and num.ndim == 0:\n # In some cases, the `count` method returns a scalar array (e.g.\n # np.array(3)), but we want a plain integer.\n num = int(num)\n else:\n if axis is None:\n num = a.size\n else:\n num = a.shape[axis]\n return num\n\n\ndef power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):\n \"\"\"\n Cressie-Read power divergence statistic and goodness of fit test.\n\n This function tests the null hypothesis that the categorical data\n has the given frequencies, using the Cressie-Read power divergence\n statistic.\n\n Parameters\n ----------\n f_obs : array_like\n Observed frequencies in each category.\n f_exp : array_like, optional\n Expected frequencies in each category. By default the categories are\n assumed to be equally likely.\n ddof : int, optional\n \"Delta degrees of freedom\": adjustment to the degrees of freedom\n for the p-value. The p-value is computed using a chi-squared\n distribution with ``k - 1 - ddof`` degrees of freedom, where `k`\n is the number of observed frequencies. The default value of `ddof`\n is 0.\n axis : int or None, optional\n The axis of the broadcast result of `f_obs` and `f_exp` along which to\n apply the test. If axis is None, all values in `f_obs` are treated\n as a single data set. Default is 0.\n lambda_ : float or str, optional\n `lambda_` gives the power in the Cressie-Read power divergence\n statistic. The default is 1. For convenience, `lambda_` may be\n assigned one of the following strings, in which case the\n corresponding numerical value is used::\n\n String Value Description\n \"pearson\" 1 Pearson's chi-squared statistic.\n In this case, the function is\n equivalent to `stats.chisquare`.\n \"log-likelihood\" 0 Log-likelihood ratio. Also known as\n the G-test [3]_.\n \"freeman-tukey\" -1/2 Freeman-Tukey statistic.\n \"mod-log-likelihood\" -1 Modified log-likelihood ratio.\n \"neyman\" -2 Neyman's statistic.\n \"cressie-read\" 2/3 The power recommended in [5]_.\n\n Returns\n -------\n stat : float or ndarray\n The Cressie-Read power divergence test statistic. The value is\n a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.\n p : float or ndarray\n The p-value of the test. The value is a float if `ddof` and the\n return value `stat` are scalars.\n\n See Also\n --------\n chisquare\n\n Notes\n -----\n This test is invalid when the observed or expected frequencies in each\n category are too small. A typical rule is that all of the observed\n and expected frequencies should be at least 5.\n\n When `lambda_` is less than zero, the formula for the statistic involves\n dividing by `f_obs`, so a warning or error may be generated if any value\n in `f_obs` is 0.\n\n Similarly, a warning or error may be generated if any value in `f_exp` is\n zero when `lambda_` >= 0.\n\n The default degrees of freedom, k-1, are for the case when no parameters\n of the distribution are estimated. If p parameters are estimated by\n efficient maximum likelihood then the correct degrees of freedom are\n k-1-p. If the parameters are estimated in a different way, then the\n dof can be between k-1-p and k-1. However, it is also possible that\n the asymptotic distribution is not a chisquare, in which case this\n test is not appropriate.\n\n This function handles masked arrays. If an element of `f_obs` or `f_exp`\n is masked, then data at that position is ignored, and does not count\n towards the size of the data set.\n\n .. versionadded:: 0.13.0\n\n References\n ----------\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html\n .. [2] \"Chi-squared test\", http://en.wikipedia.org/wiki/Chi-squared_test\n .. [3] \"G-test\", http://en.wikipedia.org/wiki/G-test\n .. [4] Sokal, R. R. and Rohlf, F. J. \"Biometry: the principles and\n practice of statistics in biological research\", New York: Freeman\n (1981)\n .. [5] Cressie, N. and Read, T. R. C., \"Multinomial Goodness-of-Fit\n Tests\", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),\n pp. 440-464.\n\n Examples\n --------\n\n (See `chisquare` for more examples.)\n\n When just `f_obs` is given, it is assumed that the expected frequencies\n are uniform and given by the mean of the observed frequencies. Here we\n perform a G-test (i.e. use the log-likelihood ratio statistic):\n\n >>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')\n (2.006573162632538, 0.84823476779463769)\n\n The expected frequencies can be given with the `f_exp` argument:\n\n >>> power_divergence([16, 18, 16, 14, 12, 12],\n ... f_exp=[16, 16, 16, 16, 16, 8],\n ... lambda_='log-likelihood')\n (3.5, 0.62338762774958223)\n\n When `f_obs` is 2-D, by default the test is applied to each column.\n\n >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T\n >>> obs.shape\n (6, 2)\n >>> power_divergence(obs, lambda_=\"log-likelihood\")\n (array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))\n\n By setting ``axis=None``, the test is applied to all data in the array,\n which is equivalent to applying the test to the flattened array.\n\n >>> power_divergence(obs, axis=None)\n (23.31034482758621, 0.015975692534127565)\n >>> power_divergence(obs.ravel())\n (23.31034482758621, 0.015975692534127565)\n\n `ddof` is the change to make to the default degrees of freedom.\n\n >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)\n (2.0, 0.73575888234288467)\n\n The calculation of the p-values is done by broadcasting the\n test statistic with `ddof`.\n\n >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])\n (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))\n\n `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has\n shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting\n `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared\n statistics, we must use ``axis=1``:\n\n >>> power_divergence([16, 18, 16, 14, 12, 12],\n ... f_exp=[[16, 16, 16, 16, 16, 8],\n ... [8, 20, 20, 16, 12, 12]],\n ... axis=1)\n (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))\n\n \"\"\"\n # Convert the input argument `lambda_` to a numerical value.\n if isinstance(lambda_, string_types):\n if lambda_ not in _power_div_lambda_names:\n names = repr(list(_power_div_lambda_names.keys()))[1:-1]\n raise ValueError(\"invalid string for lambda_: {0!r}. Valid strings \"\n \"are {1}\".format(lambda_, names))\n lambda_ = _power_div_lambda_names[lambda_]\n elif lambda_ is None:\n lambda_ = 1\n\n f_obs = np.asanyarray(f_obs)\n\n if f_exp is not None:\n f_exp = np.atleast_1d(np.asanyarray(f_exp))\n else:\n # Compute the equivalent of\n # f_exp = f_obs.mean(axis=axis, keepdims=True)\n # Older versions of numpy do not have the 'keepdims' argument, so\n # we have to do a little work to achieve the same result.\n # Ignore 'invalid' errors so the edge case of a data set with length 0\n # is handled without spurious warnings.\n with np.errstate(invalid='ignore'):\n f_exp = np.atleast_1d(f_obs.mean(axis=axis))\n if axis is not None:\n reduced_shape = list(f_obs.shape)\n reduced_shape[axis] = 1\n f_exp.shape = reduced_shape\n\n # `terms` is the array of terms that are summed along `axis` to create\n # the test statistic. We use some specialized code for a few special\n # cases of lambda_.\n if lambda_ == 1:\n # Pearson's chi-squared statistic\n terms = (f_obs - f_exp)**2 / f_exp\n elif lambda_ == 0:\n # Log-likelihood ratio (i.e. G-test)\n terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)\n elif lambda_ == -1:\n # Modified log-likelihood ratio\n terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)\n else:\n # General Cressie-Read power divergence.\n terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)\n terms /= 0.5 * lambda_ * (lambda_ + 1)\n\n stat = terms.sum(axis=axis)\n\n num_obs = _count(terms, axis=axis)\n ddof = asarray(ddof)\n p = chisqprob(stat, num_obs - 1 - ddof)\n\n return stat, p\n\n\ndef chisquare(f_obs, f_exp=None, ddof=0, axis=0):\n \"\"\"\n Calculates a one-way chi square test.\n\n The chi square test tests the null hypothesis that the categorical data\n has the given frequencies.\n\n Parameters\n ----------\n f_obs : array_like\n Observed frequencies in each category.\n f_exp : array_like, optional\n Expected frequencies in each category. By default the categories are\n assumed to be equally likely.\n ddof : int, optional\n \"Delta degrees of freedom\": adjustment to the degrees of freedom\n for the p-value. The p-value is computed using a chi-squared\n distribution with ``k - 1 - ddof`` degrees of freedom, where `k`\n is the number of observed frequencies. The default value of `ddof`\n is 0.\n axis : int or None, optional\n The axis of the broadcast result of `f_obs` and `f_exp` along which to\n apply the test. If axis is None, all values in `f_obs` are treated\n as a single data set. Default is 0.\n\n Returns\n -------\n chisq : float or ndarray\n The chi-squared test statistic. The value is a float if `axis` is\n None or `f_obs` and `f_exp` are 1-D.\n p : float or ndarray\n The p-value of the test. The value is a float if `ddof` and the\n return value `chisq` are scalars.\n\n See Also\n --------\n power_divergence\n mstats.chisquare\n\n Notes\n -----\n This test is invalid when the observed or expected frequencies in each\n category are too small. A typical rule is that all of the observed\n and expected frequencies should be at least 5.\n\n The default degrees of freedom, k-1, are for the case when no parameters\n of the distribution are estimated. If p parameters are estimated by\n efficient maximum likelihood then the correct degrees of freedom are\n k-1-p. If the parameters are estimated in a different way, then the\n dof can be between k-1-p and k-1. However, it is also possible that\n the asymptotic distribution is not a chisquare, in which case this\n test is not appropriate.\n\n References\n ----------\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html\n .. [2] \"Chi-squared test\", http://en.wikipedia.org/wiki/Chi-squared_test\n\n Examples\n --------\n When just `f_obs` is given, it is assumed that the expected frequencies\n are uniform and given by the mean of the observed frequencies.\n\n >>> chisquare([16, 18, 16, 14, 12, 12])\n (2.0, 0.84914503608460956)\n\n With `f_exp` the expected frequencies can be given.\n\n >>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])\n (3.5, 0.62338762774958223)\n\n When `f_obs` is 2-D, by default the test is applied to each column.\n\n >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T\n >>> obs.shape\n (6, 2)\n >>> chisquare(obs)\n (array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))\n\n By setting ``axis=None``, the test is applied to all data in the array,\n which is equivalent to applying the test to the flattened array.\n\n >>> chisquare(obs, axis=None)\n (23.31034482758621, 0.015975692534127565)\n >>> chisquare(obs.ravel())\n (23.31034482758621, 0.015975692534127565)\n\n `ddof` is the change to make to the default degrees of freedom.\n\n >>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)\n (2.0, 0.73575888234288467)\n\n The calculation of the p-values is done by broadcasting the\n chi-squared statistic with `ddof`.\n\n >>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])\n (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))\n\n `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has\n shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting\n `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared\n statistics, we use ``axis=1``:\n\n >>> chisquare([16, 18, 16, 14, 12, 12],\n ... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],\n ... axis=1)\n (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))\n\n \"\"\"\n return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,\n lambda_=\"pearson\")\n\n\ndef ks_2samp(data1, data2):\n \"\"\"\n Computes the Kolmogorov-Smirnov statistic on 2 samples.\n\n This is a two-sided test for the null hypothesis that 2 independent samples\n are drawn from the same continuous distribution.\n\n Parameters\n ----------\n data1, data2 : sequence of 1-D ndarrays\n two arrays of sample observations assumed to be drawn from a continuous\n distribution, sample sizes can be different\n\n Returns\n -------\n D : float\n KS statistic\n p-value : float\n two-tailed p-value\n\n Notes\n -----\n This tests whether 2 samples are drawn from the same distribution. Note\n that, like in the case of the one-sample K-S test, the distribution is\n assumed to be continuous.\n\n This is the two-sided test, one-sided tests are not implemented.\n The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.\n\n If the K-S statistic is small or the p-value is high, then we cannot\n reject the hypothesis that the distributions of the two samples\n are the same.\n\n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678) #fix random seed to get the same result\n >>> n1 = 200 # size of first sample\n >>> n2 = 300 # size of second sample\n\n For a different distribution, we can reject the null hypothesis since the\n pvalue is below 1%:\n\n >>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)\n >>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)\n >>> stats.ks_2samp(rvs1, rvs2)\n (0.20833333333333337, 4.6674975515806989e-005)\n\n For a slightly different distribution, we cannot reject the null hypothesis\n at a 10% or lower alpha since the p-value at 0.144 is higher than 10%\n\n >>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)\n >>> stats.ks_2samp(rvs1, rvs3)\n (0.10333333333333333, 0.14498781825751686)\n\n For an identical distribution, we cannot reject the null hypothesis since\n the p-value is high, 41%:\n\n >>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)\n >>> stats.ks_2samp(rvs1, rvs4)\n (0.07999999999999996, 0.41126949729859719)\n\n \"\"\"\n data1 = np.sort(data1)\n data2 = np.sort(data2)\n n1 = data1.shape[0]\n n2 = data2.shape[0]\n data_all = np.concatenate([data1, data2])\n cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)\n cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)\n d = np.max(np.absolute(cdf1 - cdf2))\n # Note: d absolute not signed distance\n en = np.sqrt(n1 * n2 / float(n1 + n2))\n try:\n prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)\n except:\n prob = 1.0\n\n return d, prob\n\n\ndef mannwhitneyu(x, y, use_continuity=True):\n \"\"\"\n Computes the Mann-Whitney rank test on samples x and y.\n\n Parameters\n ----------\n x, y : array_like\n Array of samples, should be one-dimensional.\n use_continuity : bool, optional\n Whether a continuity correction (1/2.) should be taken into\n account. Default is True.\n\n Returns\n -------\n u : float\n The Mann-Whitney statistics.\n prob : float\n One-sided p-value assuming a asymptotic normal distribution.\n\n Notes\n -----\n Use only when the number of observation in each sample is > 20 and\n you have 2 independent samples of ranks. Mann-Whitney U is\n significant if the u-obtained is LESS THAN or equal to the critical\n value of U.\n\n This test corrects for ties and by default uses a continuity correction.\n The reported p-value is for a one-sided hypothesis, to get the two-sided\n p-value multiply the returned p-value by 2.\n\n \"\"\"\n x = asarray(x)\n y = asarray(y)\n n1 = len(x)\n n2 = len(y)\n ranked = rankdata(np.concatenate((x, y)))\n rankx = ranked[0:n1] # get the x-ranks\n u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x\n u2 = n1*n2 - u1 # remainder is U for y\n bigu = max(u1, u2)\n smallu = min(u1, u2)\n T = tiecorrect(ranked)\n if T == 0:\n raise ValueError('All numbers are identical in amannwhitneyu')\n sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)\n\n if use_continuity:\n # normal approximation for prob calc with continuity correction\n z = abs((bigu - 0.5 - n1*n2/2.0) / sd)\n else:\n z = abs((bigu - n1*n2/2.0) / sd) # normal approximation for prob calc\n\n return smallu, distributions.norm.sf(z)\n\n\ndef ranksums(x, y):\n \"\"\"\n Compute the Wilcoxon rank-sum statistic for two samples.\n\n The Wilcoxon rank-sum test tests the null hypothesis that two sets\n of measurements are drawn from the same distribution. The alternative\n hypothesis is that values in one sample are more likely to be\n larger than the values in the other sample.\n\n This test should be used to compare two samples from continuous\n distributions. It does not handle ties between measurements\n in x and y. For tie-handling and an optional continuity correction\n see `scipy.stats.mannwhitneyu`.\n\n Parameters\n ----------\n x,y : array_like\n The data from the two samples\n\n Returns\n -------\n z-statistic : float\n The test statistic under the large-sample approximation that the\n rank sum statistic is normally distributed\n p-value : float\n The two-sided p-value of the test\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test\n\n \"\"\"\n x, y = map(np.asarray, (x, y))\n n1 = len(x)\n n2 = len(y)\n alldata = np.concatenate((x, y))\n ranked = rankdata(alldata)\n x = ranked[:n1]\n s = np.sum(x, axis=0)\n expected = n1 * (n1+n2+1) / 2.0\n z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)\n prob = 2 * distributions.norm.sf(abs(z))\n return z, prob\n\n\ndef kruskal(*args):\n \"\"\"\n Compute the Kruskal-Wallis H-test for independent samples\n\n The Kruskal-Wallis H-test tests the null hypothesis that the population\n median of all of the groups are equal. It is a non-parametric version of\n ANOVA. The test works on 2 or more independent samples, which may have\n different sizes. Note that rejecting the null hypothesis does not\n indicate which of the groups differs. Post-hoc comparisons between\n groups are required to determine which groups are different.\n\n Parameters\n ----------\n sample1, sample2, ... : array_like\n Two or more arrays with the sample measurements can be given as\n arguments.\n\n Returns\n -------\n H-statistic : float\n The Kruskal-Wallis H statistic, corrected for ties\n p-value : float\n The p-value for the test using the assumption that H has a chi\n square distribution\n\n Notes\n -----\n Due to the assumption that H has a chi square distribution, the number\n of samples in each group must not be too small. A typical rule is\n that each sample must have at least 5 measurements.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance\n\n \"\"\"\n args = list(map(np.asarray, args)) # convert to a numpy array\n na = len(args) # Kruskal-Wallis on 'na' groups, each in it's own array\n if na < 2:\n raise ValueError(\"Need at least two groups in stats.kruskal()\")\n n = np.asarray(list(map(len, args)))\n\n alldata = np.concatenate(args)\n ranked = rankdata(alldata) # Rank the data\n ties = tiecorrect(ranked) # Correct for ties\n if ties == 0:\n raise ValueError('All numbers are identical in kruskal')\n\n # Compute sum^2/n for each group and sum\n j = np.insert(np.cumsum(n), 0, 0)\n ssbn = 0\n for i in range(na):\n ssbn += square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])\n\n totaln = np.sum(n)\n h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)\n df = na - 1\n h /= ties\n return h, chisqprob(h, df)\n\n\ndef friedmanchisquare(*args):\n \"\"\"\n Computes the Friedman test for repeated measurements\n\n The Friedman test tests the null hypothesis that repeated measurements of\n the same individuals have the same distribution. It is often used\n to test for consistency among measurements obtained in different ways.\n For example, if two measurement techniques are used on the same set of\n individuals, the Friedman test can be used to determine if the two\n measurement techniques are consistent.\n\n Parameters\n ----------\n measurements1, measurements2, measurements3... : array_like\n Arrays of measurements. All of the arrays must have the same number\n of elements. At least 3 sets of measurements must be given.\n\n Returns\n -------\n friedman chi-square statistic : float\n the test statistic, correcting for ties\n p-value : float\n the associated p-value assuming that the test statistic has a chi\n squared distribution\n\n Notes\n -----\n Due to the assumption that the test statistic has a chi squared\n distribution, the p-value is only reliable for n > 10 and more than\n 6 repeated measurements.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Friedman_test\n\n \"\"\"\n k = len(args)\n if k < 3:\n raise ValueError('\\nLess than 3 levels. Friedman test not appropriate.\\n')\n\n n = len(args[0])\n for i in range(1, k):\n if len(args[i]) != n:\n raise ValueError('Unequal N in friedmanchisquare. Aborting.')\n\n # Rank data\n data = np.vstack(args).T\n data = data.astype(float)\n for i in range(len(data)):\n data[i] = rankdata(data[i])\n\n # Handle ties\n ties = 0\n for i in range(len(data)):\n replist, repnum = find_repeats(array(data[i]))\n for t in repnum:\n ties += t * (t*t - 1)\n c = 1 - ties / float(k*(k*k - 1)*n)\n\n ssbn = np.sum(data.sum(axis=0)**2)\n chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c\n return chisq, chisqprob(chisq, k - 1)\n\n\ndef combine_pvalues(pvalues, method='fisher', weights=None):\n \"\"\"\n Methods for combining the p-values of independent tests bearing upon the\n same hypothesis.\n\n Parameters\n ----------\n pvalues : array_like, 1-D\n Array of p-values assumed to come from independent tests.\n method : {'fisher', 'stouffer'}, optional\n Name of method to use to combine p-values. The following methods are\n available:\n - \"fisher\": Fisher's method (Fisher's combined probability test),\n the default.\n - \"stouffer\": Stouffer's Z-score method.\n weights : array_like, 1-D, optional\n Optional array of weights used only for Stouffer's Z-score method.\n\n Returns\n -------\n statistic: float\n The statistic calculated by the specified method:\n - \"fisher\": The chi-squared statistic\n - \"stouffer\": The Z-score\n pval: float\n The combined p-value.\n\n Notes\n -----\n Fisher's method (also known as Fisher's combined probability test) [1]_ uses\n a chi-squared statistic to compute a combined p-value. The closely related\n Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The\n advantage of Stouffer's method is that it is straightforward to introduce\n weights, which can make Stouffer's method more powerful than Fisher's\n method when the p-values are from studies of different size [3]_ [4]_.\n\n Fisher's method may be extended to combine p-values from dependent tests\n [5]_. Extensions such as Brown's method and Kost's method are not currently\n implemented.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Fisher%27s_method\n .. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method\n .. [3] Whitlock, M. C. \"Combining probability from independent tests: the\n weighted Z-method is superior to Fisher's approach.\" Journal of\n Evolutionary Biology 18, no. 5 (2005): 1368-1373.\n .. [4] Zaykin, Dmitri V. \"Optimally weighted Z-test is a powerful method\n for combining probabilities in meta-analysis.\" Journal of\n Evolutionary Biology 24, no. 8 (2011): 1836-1841.\n .. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method\n\n \"\"\"\n pvalues = np.asarray(pvalues)\n if pvalues.ndim != 1:\n raise ValueError(\"pvalues is not 1-D\")\n\n if method == 'fisher':\n Xsq = -2 * np.sum(np.log(pvalues))\n pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))\n return (Xsq, pval)\n elif method == 'stouffer':\n if weights is None:\n weights = np.ones_like(pvalues)\n elif len(weights) != len(pvalues):\n raise ValueError(\"pvalues and weights must be of the same size.\")\n\n weights = np.asarray(weights)\n if weights.ndim != 1:\n raise ValueError(\"weights is not 1-D\")\n\n Zi = distributions.norm.isf(pvalues)\n Z = np.dot(weights, Zi) / np.linalg.norm(weights)\n pval = distributions.norm.sf(Z)\n\n return (Z, pval)\n else:\n raise ValueError(\n \"Invalid method '%s'. Options are 'fisher' or 'stouffer'\", method)\n\n#####################################\n# PROBABILITY CALCULATIONS #\n#####################################\n\ndef chisqprob(chisq, df):\n \"\"\"\n Probability value (1-tail) for the Chi^2 probability distribution.\n\n Broadcasting rules apply.\n\n Parameters\n ----------\n chisq : array_like or float > 0\n\n df : array_like or float, probably int >= 1\n\n Returns\n -------\n chisqprob : ndarray\n The area from `chisq` to infinity under the Chi^2 probability\n distribution with degrees of freedom `df`.\n\n \"\"\"\n return special.chdtrc(df, chisq)\n\n\ndef betai(a, b, x):\n \"\"\"\n Returns the incomplete beta function.\n\n I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)\n\n where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma\n function of a.\n\n The standard broadcasting rules apply to a, b, and x.\n\n Parameters\n ----------\n a : array_like or float > 0\n\n b : array_like or float > 0\n\n x : array_like or float\n x will be clipped to be no greater than 1.0 .\n\n Returns\n -------\n betai : ndarray\n Incomplete beta function.\n\n \"\"\"\n x = np.asarray(x)\n x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0\n return special.betainc(a, b, x)\n\n\n#####################################\n# ANOVA CALCULATIONS #\n#####################################\n\ndef f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):\n \"\"\"Calculation of Wilks lambda F-statistic for multivarite data, per\n Maxwell & Delaney p.657.\n \"\"\"\n if isinstance(ER, (int, float)):\n ER = array([[ER]])\n if isinstance(EF, (int, float)):\n EF = array([[EF]])\n lmbda = linalg.det(EF) / linalg.det(ER)\n if (a-1)**2 + (b-1)**2 == 5:\n q = 1\n else:\n q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))\n\n n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)\n d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)\n return n_um / d_en\n\n\ndef f_value(ER, EF, dfR, dfF):\n \"\"\"\n Returns an F-statistic for a restricted vs. unrestricted model.\n\n Parameters\n ----------\n ER : float\n `ER` is the sum of squared residuals for the restricted model\n or null hypothesis\n\n EF : float\n `EF` is the sum of squared residuals for the unrestricted model\n or alternate hypothesis\n\n dfR : int\n `dfR` is the degrees of freedom in the restricted model\n\n dfF : int\n `dfF` is the degrees of freedom in the unrestricted model\n\n Returns\n -------\n F-statistic : float\n\n \"\"\"\n return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))\n\n\ndef f_value_multivariate(ER, EF, dfnum, dfden):\n \"\"\"\n Returns a multivariate F-statistic.\n\n Parameters\n ----------\n ER : ndarray\n Error associated with the null hypothesis (the Restricted model).\n From a multivariate F calculation.\n EF : ndarray\n Error associated with the alternate hypothesis (the Full model)\n From a multivariate F calculation.\n dfnum : int\n Degrees of freedom the Restricted model.\n dfden : int\n Degrees of freedom associated with the Restricted model.\n\n Returns\n -------\n fstat : float\n The computed F-statistic.\n\n \"\"\"\n if isinstance(ER, (int, float)):\n ER = array([[ER]])\n if isinstance(EF, (int, float)):\n EF = array([[EF]])\n n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)\n d_en = linalg.det(EF) / float(dfden)\n return n_um / d_en\n\n\n#####################################\n# SUPPORT FUNCTIONS #\n#####################################\n\ndef ss(a, axis=0):\n \"\"\"\n Squares each element of the input array, and returns the sum(s) of that.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or None, optional\n Axis along which to calculate. Default is 0. If None, compute over\n the whole array `a`.\n\n Returns\n -------\n ss : ndarray\n The sum along the given axis for (a**2).\n\n See also\n --------\n square_of_sums : The square(s) of the sum(s) (the opposite of `ss`).\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.array([1., 2., 5.])\n >>> stats.ss(a)\n 30.0\n\n And calculating along an axis:\n\n >>> b = np.array([[1., 2., 5.], [2., 5., 6.]])\n >>> stats.ss(b, axis=1)\n array([ 30., 65.])\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n return np.sum(a*a, axis)\n\n\ndef square_of_sums(a, axis=0):\n \"\"\"\n Sums elements of the input array, and returns the square(s) of that sum.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or None, optional\n Axis along which to calculate. Default is 0. If None, compute over\n the whole array `a`.\n\n Returns\n -------\n square_of_sums : float or ndarray\n The square of the sum over `axis`.\n\n See also\n --------\n ss : The sum of squares (the opposite of `square_of_sums`).\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(20).reshape(5,4)\n >>> stats.square_of_sums(a)\n array([ 1600., 2025., 2500., 3025.])\n >>> stats.square_of_sums(a, axis=None)\n 36100.0\n\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n s = np.sum(a, axis)\n if not np.isscalar(s):\n return s.astype(float) * s\n else:\n return float(s) * s\n\n\[email protected](message=\"scipy.stats.fastsort is deprecated in scipy 0.16.0\")\ndef fastsort(a):\n \"\"\"\n Sort an array and provide the argsort.\n\n Parameters\n ----------\n a : array_like\n Input array.\n\n Returns\n -------\n fastsort : ndarray of type int\n sorted indices into the original array\n\n \"\"\"\n # TODO: the wording in the docstring is nonsense.\n it = np.argsort(a)\n as_ = a[it]\n return as_, it\n"
] | [
[
"numpy.sum",
"numpy.nanmedian",
"numpy.var",
"numpy.histogram",
"numpy.any",
"numpy.argsort",
"numpy.asarray",
"numpy.ones_like",
"scipy.special.fdtrc",
"numpy.ma.asarray",
"numpy.log",
"numpy.deprecate",
"numpy.ma.masked_less_equal",
"numpy.apply_along_axis",
"scipy._lib.six.callable",
"numpy.isscalar",
"numpy.float64",
"numpy.cov",
"numpy.partition",
"scipy.special.chdtrc",
"numpy.ma.minimum.reduce",
"numpy.add.reduce",
"scipy.special.betainc",
"numpy.vstack",
"numpy.extract",
"numpy.append",
"numpy.concatenate",
"numpy.abs",
"numpy.seterr",
"numpy.expand_dims",
"numpy.ma.MaskedArray",
"numpy.absolute",
"numpy.isnan",
"numpy.where",
"numpy.ma.maximum.reduce",
"numpy.ma.masked_greater_equal",
"numpy.unique",
"numpy.mean",
"numpy.corrcoef",
"scipy.linalg.det",
"numpy.ma.masked_less",
"numpy.round",
"numpy.bincount",
"numpy.ceil",
"numpy.zeros",
"numpy.searchsorted",
"numpy.dot",
"scipy.special.xlogy",
"numpy.median",
"numpy.asanyarray",
"numpy.lexsort",
"numpy.ma.masked_greater",
"numpy.arange",
"numpy.all",
"numpy.max",
"numpy.min",
"numpy.power",
"numpy.sort",
"numpy.maximum",
"numpy.std",
"numpy.finfo",
"numpy.array",
"numpy.linalg.norm",
"numpy.cumsum",
"numpy.divide",
"scipy._lib.six.xrange",
"numpy.floor",
"numpy.errstate",
"numpy.ravel",
"numpy.place",
"numpy.sqrt",
"numpy.ma.add.reduce"
]
] |
cs-gn/tpu | [
"4727594874e8587a60cb088627d46f73a1769823"
] | [
"models/experimental/mnist_keras_ds/mnist.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Experimental Keras MNIST Example.\n\nTo test on CPU:\n python mnist.py --use_tpu=False [--fake_data=true]\n\nTo test on TPU:\n python mnist.py --use_tpu=True [--tpu=$TPU_NAME]\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Standard Imports\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport tensorflow as tf\n\nflags.DEFINE_bool('use_tpu', True, 'Use TPU model instead of CPU.')\nflags.DEFINE_string('tpu', None, 'Name of the TPU to use.')\nflags.DEFINE_string(\n 'model_dir', None,\n ('The directory where the model and training/evaluation summaries '\n 'are stored. If unset, no summaries will be stored.'))\n\nflags.DEFINE_bool('fake_data', False, 'Use fake data to test functionality.')\n\n# Batch size should satify two properties to be able to run in cloud:\n# num_eval_samples % batch_size == 0\n# batch_size % 8 == 0\nBATCH_SIZE = 200\nNUM_CLASSES = 10\nEPOCHS = 15\n\n# input image dimensions\nIMG_ROWS, IMG_COLS = 28, 28\n\nFLAGS = flags.FLAGS\n\n\ndef mnist_model(input_shape):\n \"\"\"Creates a MNIST model.\"\"\"\n model = tf.keras.models.Sequential()\n model.add(\n tf.keras.layers.Conv2D(\n 32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\n model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(tf.keras.layers.Dropout(0.25))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation='relu'))\n model.add(tf.keras.layers.Dropout(0.5))\n model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n return model\n\n\ndef run():\n \"\"\"Run the model training and return evaluation output.\"\"\"\n use_tpu = FLAGS.use_tpu\n\n strategy = None\n if use_tpu:\n strategy = tf.contrib.distribute.TPUStrategy(\n tf.contrib.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu),\n steps_per_run=100)\n\n print('Mode:', 'TPU' if use_tpu else 'CPU')\n\n if FLAGS.fake_data:\n print('Using fake data')\n x_train = np.random.random((BATCH_SIZE, IMG_ROWS, IMG_COLS))\n y_train = np.zeros([BATCH_SIZE, 1], dtype=np.int32)\n x_test, y_test = x_train, y_train\n else:\n # the data, split between train and test sets\n print('Using real data')\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\n x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1)\n x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1)\n input_shape = (IMG_ROWS, IMG_COLS, 1)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)\n y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)\n\n model = mnist_model(input_shape)\n model.compile(\n loss=tf.keras.losses.categorical_crossentropy,\n optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),\n metrics=['accuracy'],\n distribute=strategy)\n\n callbacks = []\n if FLAGS.model_dir:\n callbacks = [tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)]\n\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n callbacks=callbacks,\n epochs=EPOCHS,\n verbose=1,\n validation_data=(x_test, y_test))\n return model.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=1)\n\n\ndef main(unused_dev):\n score = run()\n print('Loss for final step: %s;' % score[0])\n print('Accuracy: %s;' % score[1])\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n app.run(main)\n"
] | [
[
"tensorflow.keras.models.Sequential",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dropout",
"numpy.zeros",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.logging.set_verbosity",
"numpy.random.random",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D"
]
] |
byu-dml/d3m-profiler | [
"9a3bc45061267091b0109f2159648785e370a18b"
] | [
"example.py"
] | [
"import numpy as np\nimport multiprocessing as mp\nimport pathlib as pl\nimport pandas as pd\nimport pickle\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC as SupportVectorClassifier\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC as SupportVectorClassifier\n\nfrom d3m_profiler import rebalance, score_results\nfrom d3m_profiler.evaluate_models import run_models, _save_results\nfrom d3m_profiler.embed import embed\n\n_NUM_THREADS = mp.cpu_count()\n\nresults = pd.DataFrame(columns=['data_collection', 'classifier', 'balanced', 'accuracy_score', 'f1_score_micro', 'f1_score_macro', 'f1_score_weighted'])\n\n\n\n#closed_bal_file = 'data/closed_d3m_bal.csv'\n#closed_unbal_file = 'data/closed_d3m_unbal.csv'\n\n#open_bal_file = 'data/open_d3m_bal.csv'\n#open_unbal_file = 'data/open_d3m_unbal.csv'\n\n#files = [closed_unbal_file, closed_bal_file, open_unbal_file, open_bal_file]\n\ntype_column = 'colType'\nmodel_weights_path = 'torontobooks_unigrams.bin'\n\nopen_d3m_file = 'data/open_d3m_data.csv'\nclosed_d3m_file = 'data/closed_d3m_data.csv'\n\nfiles = [open_d3m_file]\n#files = [open_d3m_file, closed_d3m_file]\n#files = [closed_d3m_file, open_d3m_file]\n\nfor _file in files:\n data_collection = _file.split('/')[1]\n print(data_collection)\n\n orig_df = pd.read_csv(_file)\n orig_df = orig_df.applymap(str)\n\n dfs = [embed(orig_df, type_column, model_weights_path)]\n\n class_counts = orig_df[type_column].value_counts().values\n balanced = len(set(class_counts)) == 1\n\n if (not balanced):\n print('rebalancing {} data collection'.format(data_collection))\n rebal_df = rebalance.rebalance_SMOTE(orig_df, type_column, 'smote', model_weights_path)\n dfs.append(rebal_df)\n\n for df in dfs:\n class_counts = df[type_column].value_counts().values\n balanced = len(set(class_counts)) == 1\n print(balanced)\n\n xtrain, xtest, ytrain, ytest = None, None, None, None\n\n if (balanced):\n X_syn = df[df['datasetName'].eq('SYNTHETIC')].drop(['datasetName', type_column], axis=1)\n y_syn = df[df['datasetName'].eq('SYNTHETIC')][type_column]\n\n X_organ = df[df['datasetName'] != 'SYNTHETIC'].drop(['datasetName', type_column], axis=1)\n y_organ = df[df['datasetName'] != 'SYNTHETIC'][type_column]\n\n xtrain, xtest, ytrain, ytest = train_test_split(X_organ, y_organ, test_size=0.33)\n\n xtrain = xtrain.append(X_syn)\n ytrain = ytrain.append(y_syn)\n else:\n X = df.drop(['datasetName', type_column], axis=1)\n y = df[type_column]\n dataset_names = df['datasetName']\n \n xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.33)\n\n #for model_class in [SupportVectorClassifier, RandomForestClassifier]:\n for model_class in [RandomForestClassifier]:\n classifier = model_class.__name__\n print('evaluating model: {}'.format(classifier))\n model = model_class()\n print('fitting model...')\n model.fit(xtrain, ytrain)\n if (balanced):\n filename = 'RF_public_model.sav'\n pickle.dump(model, open(filename, 'wb'))\n yhat = model.predict(xtest)\n\n accuracy = accuracy_score(ytest, yhat)\n f1_micro = f1_score(ytest, yhat, average='micro')\n f1_macro = f1_score(ytest, yhat, average='macro')\n f1_weighted = f1_score(ytest, yhat, average='weighted')\n\n results = results.append({'data_collection': data_collection, 'classifier': classifier, 'balanced': balanced, 'accuracy_score': accuracy, \n 'f1_score_micro': f1_micro, 'f1_score_macro': f1_macro, 'f1_score_weighted': f1_weighted}, ignore_index=True)\n\n\nprint(results)\nresults.to_csv('data/results_2.csv', index=False)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.metrics.f1_score",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.train_test_split"
]
] |
YunongPan/swin_gui | [
"52adc917d3413781e76609d021c6a2579fdf44d1"
] | [
"mmdet/datasets/coco.py"
] | [
"import itertools\nimport logging\nimport os.path as osp\nimport tempfile\nfrom collections import OrderedDict\n\nimport mmcv\nimport numpy as np\nimport pycocotools\nfrom mmcv.utils import print_log\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom terminaltables import AsciiTable\n\nfrom mmdet.core import eval_recalls\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\n\n\[email protected]_module()\nclass CocoDataset(CustomDataset):\n\n CLASSES = ('schwarze_Schraube',)## check mark ##\n\n def load_annotations(self, ann_file):\n \"\"\"Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n \"\"\"\n if not getattr(pycocotools, '__version__', '0') >= '12.0.2':\n raise AssertionError(\n 'Incompatible version of pycocotools is installed. '\n 'Run pip uninstall pycocotools first. Then run pip '\n 'install mmpycocotools to install open-mmlab forked '\n 'pycocotools.')\n\n self.coco = COCO(ann_file)\n self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.img_ids = self.coco.get_img_ids()\n data_infos = []\n total_ann_ids = []\n for i in self.img_ids:\n info = self.coco.load_imgs([i])[0]\n info['filename'] = info['file_name']\n data_infos.append(info)\n ann_ids = self.coco.get_ann_ids(img_ids=[i])\n total_ann_ids.extend(ann_ids)\n assert len(set(total_ann_ids)) == len(\n total_ann_ids), f\"Annotation ids in '{ann_file}' are not unique!\"\n return data_infos\n\n def get_ann_info(self, idx):\n \"\"\"Get COCO annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n \"\"\"\n\n img_id = self.data_infos[idx]['id']\n ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n ann_info = self.coco.load_anns(ann_ids)\n return self._parse_ann_info(self.data_infos[idx], ann_info)\n\n def get_cat_ids(self, idx):\n \"\"\"Get COCO category ids by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n \"\"\"\n\n img_id = self.data_infos[idx]['id']\n ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n ann_info = self.coco.load_anns(ann_ids)\n return [ann['category_id'] for ann in ann_info]\n\n def _filter_imgs(self, min_size=32):\n \"\"\"Filter images too small or without ground truths.\"\"\"\n valid_inds = []\n # obtain images that contain annotation\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n # obtain images that contain annotations of the required categories\n ids_in_cat = set()\n for i, class_id in enumerate(self.cat_ids):\n ids_in_cat |= set(self.coco.cat_img_map[class_id])\n # merge the image id sets of the two conditions and use the merged set\n # to filter out images if self.filter_empty_gt=True\n ids_in_cat &= ids_with_ann\n\n valid_img_ids = []\n for i, img_info in enumerate(self.data_infos):\n img_id = self.img_ids[i]\n if self.filter_empty_gt and img_id not in ids_in_cat:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n valid_img_ids.append(img_id)\n self.img_ids = valid_img_ids\n return valid_inds\n\n def _parse_ann_info(self, img_info, ann_info):\n \"\"\"Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\\\n labels, masks, seg_map. \"masks\" are raw annotations and not \\\n decoded into binary masks.\n \"\"\"\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))\n inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))\n if inter_w * inter_h == 0:\n continue\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n if ann['category_id'] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_masks_ann.append(ann.get('segmentation', None))\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n seg_map = img_info['filename'].replace('jpg', 'png')\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n seg_map=seg_map)\n\n return ann\n\n def xyxy2xywh(self, bbox):\n \"\"\"Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO\n evaluation.\n\n Args:\n bbox (numpy.ndarray): The bounding boxes, shape (4, ), in\n ``xyxy`` order.\n\n Returns:\n list[float]: The converted bounding boxes, in ``xywh`` order.\n \"\"\"\n\n _bbox = bbox.tolist()\n return [\n _bbox[0],\n _bbox[1],\n _bbox[2] - _bbox[0],\n _bbox[3] - _bbox[1],\n ]\n\n def _proposal2json(self, results):\n \"\"\"Convert proposal results to COCO json style.\"\"\"\n json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n bboxes = results[idx]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = 1\n json_results.append(data)\n return json_results\n\n def _det2json(self, results):\n \"\"\"Convert detection results to COCO json style.\"\"\"\n json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n result = results[idx]\n for label in range(len(result)):\n bboxes = result[label]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = self.cat_ids[label]\n json_results.append(data)\n return json_results\n\n def _segm2json(self, results):\n \"\"\"Convert instance segmentation results to COCO json style.\"\"\"\n bbox_json_results = []\n segm_json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n det, seg = results[idx]\n for label in range(len(det)):\n # bbox results\n bboxes = det[label]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = self.cat_ids[label]\n bbox_json_results.append(data)\n\n # segm results\n # some detectors use different scores for bbox and mask\n if isinstance(seg, tuple):\n segms = seg[0][label]\n mask_score = seg[1][label]\n else:\n segms = seg[label]\n mask_score = [bbox[4] for bbox in bboxes]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(mask_score[i])\n data['category_id'] = self.cat_ids[label]\n if isinstance(segms[i]['counts'], bytes):\n segms[i]['counts'] = segms[i]['counts'].decode()\n data['segmentation'] = segms[i]\n segm_json_results.append(data)\n return bbox_json_results, segm_json_results\n\n def results2json(self, results, outfile_prefix):\n \"\"\"Dump the detection results to a COCO style json file.\n\n There are 3 types of results: proposals, bbox predictions, mask\n predictions, and they have different data types. This method will\n automatically recognize the type, and dump them to json files.\n\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files. If the\n prefix is \"somepath/xxx\", the json files will be named\n \"somepath/xxx.bbox.json\", \"somepath/xxx.segm.json\",\n \"somepath/xxx.proposal.json\".\n\n Returns:\n dict[str: str]: Possible keys are \"bbox\", \"segm\", \"proposal\", and \\\n values are corresponding filenames.\n \"\"\"\n result_files = dict()\n if isinstance(results[0], list):\n json_results = self._det2json(results)\n result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n mmcv.dump(json_results, result_files['bbox'])\n elif isinstance(results[0], tuple):\n json_results = self._segm2json(results)\n result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n result_files['segm'] = f'{outfile_prefix}.segm.json'\n mmcv.dump(json_results[0], result_files['bbox'])\n mmcv.dump(json_results[1], result_files['segm'])\n elif isinstance(results[0], np.ndarray):\n json_results = self._proposal2json(results)\n result_files['proposal'] = f'{outfile_prefix}.proposal.json'\n mmcv.dump(json_results, result_files['proposal'])\n else:\n raise TypeError('invalid type of results')\n return result_files\n\n def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):\n gt_bboxes = []\n for i in range(len(self.img_ids)):\n ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])\n ann_info = self.coco.load_anns(ann_ids)\n if len(ann_info) == 0:\n gt_bboxes.append(np.zeros((0, 4)))\n continue\n bboxes = []\n for ann in ann_info:\n if ann.get('ignore', False) or ann['iscrowd']:\n continue\n x1, y1, w, h = ann['bbox']\n bboxes.append([x1, y1, x1 + w, y1 + h])\n bboxes = np.array(bboxes, dtype=np.float32)\n if bboxes.shape[0] == 0:\n bboxes = np.zeros((0, 4))\n gt_bboxes.append(bboxes)\n\n recalls = eval_recalls(\n gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)\n ar = recalls.mean(axis=1)\n return ar\n\n def format_results(self, results, jsonfile_prefix=None, **kwargs):\n \"\"\"Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list[tuple | numpy.ndarray]): Testing results of the\n dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing \\\n the json filepaths, tmp_dir is the temporal directory created \\\n for saving json files when jsonfile_prefix is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n if jsonfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n result_files = self.results2json(results, jsonfile_prefix)\n return result_files, tmp_dir\n\n def evaluate(self,\n results,\n metric='bbox',\n logger=None,\n jsonfile_prefix=None,\n classwise=False,\n proposal_nums=(100, 300, 1000),\n iou_thrs=None,\n metric_items=None):\n \"\"\"Evaluation in COCO protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n 'bbox', 'segm', 'proposal', 'proposal_fast'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float], optional): IoU threshold used for\n evaluating recalls/mAPs. If set to a list, the average of all\n IoUs will also be computed. If not specified, [0.50, 0.55,\n 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.\n Default: None.\n metric_items (list[str] | str, optional): Metric items that will\n be returned. If not specified, ``['AR@100', 'AR@300',\n 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be\n used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',\n 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when\n ``metric=='bbox' or metric=='segm'``.\n\n Returns:\n dict[str, float]: COCO style evaluation metric.\n \"\"\"\n\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n if iou_thrs is None:\n iou_thrs = np.linspace(\n .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n if metric_items is not None:\n if not isinstance(metric_items, list):\n metric_items = [metric_items]\n\n result_files, tmp_dir = self.format_results(results, jsonfile_prefix)\n\n eval_results = OrderedDict()\n cocoGt = self.coco\n for metric in metrics:\n msg = f'Evaluating {metric}...'\n if logger is None:\n msg = '\\n' + msg\n print_log(msg, logger=logger)\n\n if metric == 'proposal_fast':\n ar = self.fast_eval_recall(\n results, proposal_nums, iou_thrs, logger='silent')\n log_msg = []\n for i, num in enumerate(proposal_nums):\n eval_results[f'AR@{num}'] = ar[i]\n log_msg.append(f'\\nAR@{num}\\t{ar[i]:.4f}')\n log_msg = ''.join(log_msg)\n print_log(log_msg, logger=logger)\n continue\n\n if metric not in result_files:\n raise KeyError(f'{metric} is not in results')\n try:\n cocoDt = cocoGt.loadRes(result_files[metric])\n except IndexError:\n print_log(\n 'The testing results of the whole dataset is empty.',\n logger=logger,\n level=logging.ERROR)\n break\n\n iou_type = 'bbox' if metric == 'proposal' else metric\n cocoEval = COCOeval(cocoGt, cocoDt, iou_type)\n cocoEval.params.catIds = self.cat_ids\n cocoEval.params.imgIds = self.img_ids\n cocoEval.params.maxDets = list(proposal_nums)\n cocoEval.params.iouThrs = iou_thrs\n # mapping of cocoEval.stats\n coco_metric_names = {\n 'mAP': 0,\n 'mAP_50': 1,\n 'mAP_75': 2,\n 'mAP_s': 3,\n 'mAP_m': 4,\n 'mAP_l': 5,\n 'AR@100': 6,\n 'AR@300': 7,\n 'AR@1000': 8,\n 'AR_s@1000': 9,\n 'AR_m@1000': 10,\n 'AR_l@1000': 11\n }\n if metric_items is not None:\n for metric_item in metric_items:\n if metric_item not in coco_metric_names:\n raise KeyError(\n f'metric item {metric_item} is not supported')\n\n if metric == 'proposal':\n cocoEval.params.useCats = 0\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n if metric_items is None:\n metric_items = [\n 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',\n 'AR_m@1000', 'AR_l@1000'\n ]\n\n for item in metric_items:\n val = float(\n f'{cocoEval.stats[coco_metric_names[item]]:.3f}')\n eval_results[item] = val\n else:\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n if classwise: # Compute per-category AP\n # Compute per-category AP\n # from https://github.com/facebookresearch/detectron2/\n precisions = cocoEval.eval['precision']\n # precision: (iou, recall, cls, area range, max dets)\n assert len(self.cat_ids) == precisions.shape[2]\n\n results_per_category = []\n for idx, catId in enumerate(self.cat_ids):\n # area range index 0: all area ranges\n # max dets index -1: typically 100 per image\n nm = self.coco.loadCats(catId)[0]\n precision = precisions[:, :, idx, 0, -1]\n precision = precision[precision > -1]\n if precision.size:\n ap = np.mean(precision)\n else:\n ap = float('nan')\n results_per_category.append(\n (f'{nm[\"name\"]}', f'{float(ap):0.3f}'))\n\n num_columns = min(6, len(results_per_category) * 2)\n results_flatten = list(\n itertools.chain(*results_per_category))\n headers = ['category', 'AP'] * (num_columns // 2)\n results_2d = itertools.zip_longest(*[\n results_flatten[i::num_columns]\n for i in range(num_columns)\n ])\n table_data = [headers]\n table_data += [result for result in results_2d]\n table = AsciiTable(table_data)\n print_log('\\n' + table.table, logger=logger)\n\n if metric_items is None:\n metric_items = [\n 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'\n ]\n\n for metric_item in metric_items:\n key = f'{metric}_{metric_item}'\n val = float(\n f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'\n )\n eval_results[key] = val\n ap = cocoEval.stats[:6]\n eval_results[f'{metric}_mAP_copypaste'] = (\n f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '\n f'{ap[4]:.3f} {ap[5]:.3f}')\n if tmp_dir is not None:\n tmp_dir.cleanup()\n return eval_results\n"
] | [
[
"numpy.array",
"numpy.mean",
"numpy.round",
"numpy.zeros"
]
] |
SmirnovKol/Paddle | [
"a3730dc87bc61593514b830727e36e5d19e753cd"
] | [
"python/paddle/nn/layer/conv.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# TODO: define classes of convolutional neural network\n\nimport numpy as np\n\nfrom paddle import get_flags\nfrom ...device import get_cudnn_version\nfrom .. import Layer\nfrom ..initializer import Normal\nfrom .. import functional as F\nfrom ...fluid.layers import utils\nfrom ..functional.conv import _update_padding_nd\nfrom ...device import is_compiled_with_cuda\nfrom ...device import is_compiled_with_rocm\n\n__all__ = []\n\n\ndef _get_default_param_initializer(num_channels, filter_size):\n filter_elem_num = num_channels * np.prod(filter_size)\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std)\n\n\ndef _reverse_repeat_list(t, n):\n \"\"\"Reverse the order of `t` and repeat each element for `n` times.\n This can be used to translate padding arg used by Conv and Pooling modules\n to the ones used by `F.pad`.\n \"\"\"\n return list(x for x in reversed(t) for _ in range(n))\n\n\nclass _ConvNd(Layer):\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n transposed,\n dims,\n stride=1,\n padding=0,\n padding_mode='zeros',\n output_padding=0,\n dilation=1,\n groups=1,\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCHW\"):\n super(_ConvNd, self).__init__()\n assert weight_attr is not False, \"weight_attr should not be False in Conv.\"\n self._param_attr = weight_attr\n self._bias_attr = bias_attr\n self._groups = groups\n self._in_channels = in_channels\n self._out_channels = out_channels\n self._data_format = data_format\n\n valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}\n if padding_mode not in valid_padding_modes:\n raise ValueError(\n \"padding_mode must be one of {}, but got padding_mode='{}'\".\n format(valid_padding_modes, padding_mode))\n\n if padding_mode in {'reflect', 'replicate', 'circular'\n } and not isinstance(padding, np.int):\n raise TypeError(\n \"when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int\"\n )\n\n valid_format = {'NHWC', 'NCHW', 'NDHWC', 'NCDHW', 'NLC', 'NCL'}\n if data_format not in valid_format:\n raise ValueError(\n \"data_format must be one of {}, but got data_format='{}'\".\n format(valid_format, data_format))\n\n channel_last = (data_format == \"NHWC\") or (data_format\n == \"NDHWC\") or (data_format\n == \"NLC\")\n if channel_last:\n self._channel_dim = len(data_format) - 1\n else:\n self._channel_dim = 1\n\n self._stride = utils.convert_to_list(stride, dims, 'stride')\n self._dilation = utils.convert_to_list(dilation, dims, 'dilation')\n self._kernel_size = utils.convert_to_list(kernel_size, dims,\n 'kernel_size')\n self._padding = padding\n self._padding_mode = padding_mode\n self.output_padding = output_padding\n if dims != 1:\n self._updated_padding, self._padding_algorithm = _update_padding_nd(\n padding, channel_last, dims)\n\n if transposed:\n filter_shape = [self._in_channels, out_channels // groups\n ] + self._kernel_size\n else:\n if in_channels % groups != 0:\n raise ValueError(\"in_channels must be divisible by groups.\")\n\n if padding_mode in {'reflect', 'replicate', 'circular'}:\n _paired_padding = utils.convert_to_list(padding, dims,\n 'padding')\n self._reversed_padding_repeated_twice = _reverse_repeat_list(\n _paired_padding, 2)\n\n self._updated_padding, self._padding_algorithm = _update_padding_nd(\n 0, channel_last, dims)\n\n filter_shape = [out_channels, in_channels // groups\n ] + self._kernel_size\n\n def _get_default_param_initializer():\n if transposed:\n return None\n filter_elem_num = np.prod(self._kernel_size) * self._in_channels\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std)\n\n self.weight = self.create_parameter(\n shape=filter_shape,\n attr=self._param_attr,\n default_initializer=_get_default_param_initializer())\n self.bias = self.create_parameter(attr=self._bias_attr,\n shape=[self._out_channels],\n is_bias=True)\n\n cudnn_version = get_cudnn_version()\n\n self._use_cudnn = True if (is_compiled_with_cuda()\n and cudnn_version is not None) else False\n\n self._op_type = \"conv\" + str(dims) + 'd'\n if self._op_type == 'conv2d' and (in_channels == groups\n and in_channels != 1\n and out_channels % in_channels == 0):\n self._op_type = 'depthwise_conv2d'\n if is_compiled_with_rocm():\n self._use_cudnn = True\n else:\n self._use_cudnn = False\n\n if (is_compiled_with_cuda() and get_flags(\"FLAGS_conv2d_disable_cudnn\")\n [\"FLAGS_conv2d_disable_cudnn\"]):\n self._use_cudnn = False\n\n def extra_repr(self):\n main_str = '{_in_channels}, {_out_channels}, kernel_size={_kernel_size}'\n if self._stride != [1] * len(self._stride):\n main_str += ', stride={_stride}'\n if self._padding != 0:\n main_str += ', padding={_padding}'\n if self._padding_mode != 'zeros':\n main_str += ', padding_mode={_padding_mode}'\n if self.output_padding != 0:\n main_str += ', output_padding={output_padding}'\n if self._dilation != [1] * len(self._dilation):\n main_str += ', dilation={_dilation}'\n if self._groups != 1:\n main_str += ', groups={_groups}'\n main_str += ', data_format={_data_format}'\n return main_str.format(**self.__dict__)\n\n\nclass Conv1D(_ConvNd):\n r\"\"\"\n This interface is used to construct a callable object of the ``Conv1D`` class.\n For more details, refer to code examples.\n The convolution1D layer calculates the output based on the input, filter\n and stride, padding, dilation, groups parameters. Input and\n Output are in NCL format or NLC format, where N is batch size, C is the number of\n the feature map, L is the length of the feature map.\n Filter's shape is [MCK] , where M is the number of output feature map,\n C is the number of input feature map, K is the size of the kernel. \n If the groups is greater than 1, C will equal the number of input feature map divided by the groups.\n If bias attribution and activation type are provided, bias is added to the\n output of the convolution, and the corresponding activation function is\n applied to the final result.\n\n For each input :math:`X` , the equation is:\n\n .. math::\n\n Out = \\sigma (W \\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a ``Tensor`` with 'NCL' format or 'NLC' format.\n * :math:`W`: Filter value, a ``Tensor`` with shape [MCK] .\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, L_{in})`\n\n Kernel shape: :math:`(C_{out}, C_{in}, K)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, L_{out})`\n\n Where\n\n .. math::\n\n L_{out}&= \\frac{(L_{in} + 2 * padding - (dilation * (L_f - 1) + 1))}{stride} + 1 \\\\\n\n Parameters:\n in_channels(int): The number of channels in the input image.\n out_channels(int): The number of filter. It is as same as the output\n feature map.\n kernel_size (int|tuple|list): The filter size. If kernel_size is a tuple/list,\n it must contain one integer, (kernel_size).\n stride (int|tuple|list, optional): The stride size. If stride is a tuple/list, it must\n contain one integer, (stride_size). Default: 1.\n padding(int|str|tuple|list, optional): The size of zeros to be padded. It must be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means the feature map is zero paded by size of `padding` on both sides.\n 3. a list[int] or tuple[int] whose length is 1, which means the feature map is zero paded by size of `padding[0]` on both sides.\n The default value is 0.\n dilation (int|tuple|list, optional): The dilation size. If dilation is a tuple/list, it must\n contain one integer, (dilation_size). Default: 1.\n groups (int, optional): The groups number of the conv2d Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. Default: 1.\n padding_mode(str, optional): Four modes: 'zeros', 'reflect', 'replicate', 'circular'.\n When in 'zeros' mode, this op uses zeros to pad the input tensor.\n When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.\n When in 'replicate' mode, uses input boundaries to pad the input tensor.\n When in 'circular' mode, uses circular input to pad the input tensor.\n Default is 'zeros'.\n weight_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)\n of conv1d. If it is set to None or one attribute of ParamAttr, conv1d\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with :math:`Normal(0.0, std)`,\n and the :math:`std` is :math:`(\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. Default: None.\n bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv1d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv1d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filter of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Shape:\n - x: 3-D tensor with shape: (batch, in_channels, length) or (batch, length, in_channels).\n - weight: 3-D tensor with shape: (out_channels, in_channels, kernel_size)\n - bias: 1-D tensor with shape: (out_channels)\n - output: 3-D tensor with same shape as input x.\n \n Raises:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.nn import Conv1D\n import numpy as np\n x = np.array([[[4, 8, 1, 9],\n [7, 2, 0, 9],\n [6, 9, 2, 6]]]).astype(np.float32)\n w=np.array(\n [[[9, 3, 4],\n [0, 0, 7],\n [2, 5, 6]],\n [[0, 3, 4],\n [2, 9, 7],\n [5, 6, 8]]]).astype(np.float32)\n x_t = paddle.to_tensor(x)\n conv = Conv1D(3, 2, 3)\n conv.weight.set_value(w)\n y_t = conv(x_t)\n print(y_t)\n # [[[133. 238.]\n # [160. 211.]]]\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCL\"):\n super(Conv1D, self).__init__(in_channels,\n out_channels,\n kernel_size,\n False,\n 1,\n stride=stride,\n padding=padding,\n padding_mode=padding_mode,\n dilation=dilation,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x):\n padding = 0\n if self._padding_mode != \"zeros\":\n x = F.pad(x,\n self._reversed_padding_repeated_twice,\n mode=self._padding_mode,\n data_format=self._data_format)\n else:\n padding = self._padding\n\n out = F.conv1d(x,\n self.weight,\n bias=self.bias,\n padding=padding,\n stride=self._stride,\n dilation=self._dilation,\n groups=self._groups,\n data_format=self._data_format)\n return out\n\n\nclass Conv1DTranspose(_ConvNd):\n r\"\"\"\n This interface is used to construct a callable object of the ``Conv1DTranspose`` class.\n For more details, refer to code examples.\n The 1-D convolution transpose layer calculates the output based on the input,\n filter, and dilation, stride, padding. Input(Input) and output(Output)\n are in 'NCL' format or 'NLC' where N is batch size, C is the number of channels,\n L is the length of the feature. The details of convolution transpose\n layer, please refer to the following explanation and references\n `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a 3-D Tensor with 'NCL' format or 'NLC' format.\n * :math:`W`: Kernel value, a 3-D Tensor with 'MCK' format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, a 3-D Tensor with data format 'NCL' of 'NLC', the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, L_{in})`\n\n Filter shape: :math:`(C_{in}, C_{out}, L_f)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, L_{out})`\n\n Where\n\n .. math::\n\n L^\\prime_{out} &= (L_{in} - 1) * stride - pad_top - pad_bottom + dilation * (L_f - 1) + 1 \\\\\\\\\n L_{out} &\\in [ L^\\prime_{out}, L^\\prime_{out} + stride ]\n\n Note:\n The conv1d_transpose can be seen as the backward of the conv1d. For conv1d,\n when stride > 1, conv1d maps multiple input shape to the same output shape,\n so for conv1d_transpose, when stride > 1, input shape maps multiple output shape.\n If output_size is None, :math:`L_{out} = L^\\prime_{out}`;\n else, the :math:`L_{out}` of the output size must between :math:`L^\\prime_{out}`\n and :math:`L^\\prime_{out} + stride`.\n\n Args:\n in_channels(int): The number of channels in the input image.\n out_channels(int): The number of the filter. It is as same as the output\n feature map.\n kernel_size(int|tuple|list, optional): The filter size. If kernel_size is a tuple/list,\n it must contain one integers, (kernel_size). None if\n use output size to calculate kernel_size. Default: None. kernel_size and\n output_size should not be None at the same time.\n stride(int|tuple|list, optional): The stride size. It means the stride in transposed convolution.\n If stride is a tuple/list, it must contain one integer, (stride_size).\n Default: stride = 1.\n padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds\n `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a\n string, either 'VALID' or 'SAME' supported, which is the padding algorithm.\n If `padding` is a tuple or list, it could be in two forms:\n `[pad]` or `[pad_left, pad_right]`. Default: padding = 0.\n output_padding(int|list|tuple, optional): The count of zeros to be added to tail of each dimension.\n If it is a tuple/list, it must contain one integer. Default: 0.\n groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n Default: groups = 1.\n bias(bool, optional): Whether to use bias. Default: True.\n dilation(int|tuple|list, optional): The dilation size. It means the spacing between the kernel points.\n If dilation is a tuple/list, it must contain one integer, (dilation_size).\n Default: dilation = 1.\n weight_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv1d_transpose. If it is set to None or one attribute of ParamAttr, conv1d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv1d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv1d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n\n Attribute:\n **weight** (Parameter): the learnable weights of filters of this layer.\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Shape:\n\n - x(Tensor): 3-D tensor with shape (batch, in_channels, length) when data_format is \"NCL\" or shape (batch, length, in_channels) when data_format is \"NLC\".\n - weight(Tensor): 3-D tensor with shape (in_channels, out_channels, kernel_length).\n - bias(Tensor): 1-D tensor with shape (out_channels).\n - output_size(int|tuple|list, optional): The output image size. If output size is a tuple/list, it must contain one integer, (feature_length). None if use kernel_size, padding, output_padding and stride to calculate output_size. If output_size and kernel_size are specified at the same time, They should follow the formula above. Default: None. output_size and kernel_size should not be None at the same time.\n - output(Tensor): 3-D tensor with same shape as input x.\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.nn import Conv1DTranspose\n import numpy as np\n \n # shape: (1, 2, 4)\n x=np.array([[[4, 0, 9, 7],\n [8, 0, 9, 2]]]).astype(np.float32)\n # shape: (2, 1, 2)\n y=np.array([[[7, 0]],\n [[4, 2]]]).astype(np.float32)\n x_t = paddle.to_tensor(x)\n conv = Conv1DTranspose(2, 1, 2)\n conv.weight.set_value(y)\n y_t = conv(x_t)\n print(y_t)\n \n # [[[60. 16. 99. 75. 4.]]]\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n output_padding=0,\n groups=1,\n dilation=1,\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCL\"):\n super(Conv1DTranspose, self).__init__(in_channels,\n out_channels,\n kernel_size,\n True,\n 1,\n stride=stride,\n padding=padding,\n dilation=dilation,\n output_padding=output_padding,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x, output_size=None):\n out = F.conv1d_transpose(x,\n self.weight,\n bias=self.bias,\n output_size=output_size,\n output_padding=self.output_padding,\n padding=self._padding,\n stride=self._stride,\n dilation=self._dilation,\n groups=self._groups,\n data_format=self._data_format)\n return out\n\n\nclass Conv2D(_ConvNd):\n r\"\"\"\n This interface is used to construct a callable object of the ``Conv2D`` class.\n For more details, refer to code examples.\n The convolution2D layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input and\n Output are in NCHW format, where N is batch size, C is the number of\n the feature map, H is the height of the feature map, and W is the width of the feature map.\n Filter's shape is [MCHW] , where M is the number of output feature map,\n C is the number of input feature map, H is the height of the filter,\n and W is the width of the filter. If the groups is greater than 1,\n C will equal the number of input feature map divided by the groups.\n Please refer to UFLDL's `convolution\n <http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_\n for more details.\n If bias attribution and activation type are provided, bias is added to the\n output of the convolution, and the corresponding activation function is\n applied to the final result.\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a ``Tensor`` with NCHW format.\n * :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n \n Parameters:\n in_channels(int): The number of input channels in the input image.\n out_channels(int): The number of output channels produced by the convolution.\n kernel_size(int|list|tuple, optional): The size of the convolving kernel.\n stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must\n contain three integers, (stride_H, stride_W). Otherwise, the\n stride_H = stride_W = stride. The default value is 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3D Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. The default value is 1.\n padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv2d. If it is set to None or one attribute of ParamAttr, conv2d\n will create ParamAttr as param_attr. If it is set to None, the parameter\n is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is\n :math:`(\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. The default value is None.\n bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv2d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv2d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCHW\" or \"NHWC\". Default: \"NCHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filter of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, C_{in}, H_{in}, W_{in})`\n\n - weight: :math:`(C_{out}, C_{in}, K_{h}, K_{w})`\n\n - bias: :math:`(C_{out})`\n\n - output: :math:`(N, C_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (kernel\\_size[0] - 1) + 1))}{strides[0]} + 1\n\n W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (kernel\\_size[1] - 1) + 1))}{strides[1]} + 1\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n \n paddle.disable_static()\n \n x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)\n \n conv = nn.Conv2D(4, 6, (3, 3))\n y_var = conv(x_var)\n y_np = y_var.numpy()\n print(y_np.shape)\n # (2, 6, 6, 6)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCHW\"):\n super(Conv2D, self).__init__(in_channels,\n out_channels,\n kernel_size,\n False,\n 2,\n stride=stride,\n padding=padding,\n padding_mode=padding_mode,\n dilation=dilation,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x):\n if self._padding_mode != 'zeros':\n x = F.pad(x,\n self._reversed_padding_repeated_twice,\n mode=self._padding_mode,\n data_format=self._data_format)\n\n out = F.conv._conv_nd(x,\n self.weight,\n bias=self.bias,\n stride=self._stride,\n padding=self._updated_padding,\n padding_algorithm=self._padding_algorithm,\n dilation=self._dilation,\n groups=self._groups,\n data_format=self._data_format,\n channel_dim=self._channel_dim,\n op_type=self._op_type,\n use_cudnn=self._use_cudnn)\n return out\n\n\nclass Conv2DTranspose(_ConvNd):\n r\"\"\"\n This interface is used to construct a callable object of the ``Conv2DTranspose`` class.\n For more details, refer to code examples.\n The convolution2D transpose layer calculates the output based on the input,\n filter, and dilations, strides, paddings. Input and output\n are in NCHW format. Where N is batch size, C is the number of feature map,\n H is the height of the feature map, and W is the width of the feature map.\n Filter's shape is [CMHW] , where C is the number of input feature map,\n M is the number of output feature map, H is the height of the filter,\n and W is the width of the filter. If the groups is greater than 1,\n C will equal the number of input feature map divided by the groups.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n The details of convolution transpose layer, please refer to the following explanation and references\n `conv2dtranspose <https://arxiv.org/pdf/1603.07285.pdf>`_ .\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a ``Tensor`` with NCHW format.\n * :math:`W`: Filter value, a ``Tensor`` with shape [CMHW] .\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n \n Parameters:\n in_channels(int): The number of channels in the input image.\n out_channels(int): The number of channels produced by the convolution.\n kernel_size(int|list|tuple): The kernel size. If kernel_size is a list/tuple,\n it must contain two integers, (kernel_size_H, kernel_size_W).\n Otherwise, the kernel will be a square.\n stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must\n contain two integers, (stride_H, stride_W). Otherwise, the\n stride_H = stride_W = stride. Default: 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` on both sides \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n output_padding(int|list|tuple, optional): Additional size added to one side\n of each dimension in the output shape. Default: 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must\n contain two integers, (dilation_H, dilation_W). Otherwise, the\n dilation_H = dilation_W = dilation. Default: 1.\n groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n Default: 1.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable weights(Parameter)\n of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr(ParamAttr|bool, optional): The attribute for the bias of conv2d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv2d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCHW\" or \"NHWC\". Default: \"NCHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, C_{in}, H_{in}, W_{in})`\n\n - weight: :math:`(C_{in}, C_{out}, K_{h}, K_{w})`\n\n - bias: :math:`(C_{out})`\n\n - output: :math:`(N, C_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n H^\\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\\_size[0] - 1) + 1\n\n W^\\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\\_size[1] - 1) + 1\n\n H_{out} &\\in [ H^\\prime_{out}, H^\\prime_{out} + strides[0] )\n\n W_{out} &\\in [ W^\\prime_{out}, W^\\prime_{out} + strides[1] )\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n \n paddle.disable_static()\n\n x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)\n\n conv = nn.Conv2DTranspose(4, 6, (3, 3))\n y_var = conv(x_var)\n y_np = y_var.numpy()\n print(y_np.shape)\n # (2, 6, 10, 10)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n output_padding=0,\n dilation=1,\n groups=1,\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCHW\"):\n super(Conv2DTranspose, self).__init__(in_channels,\n out_channels,\n kernel_size,\n True,\n 2,\n stride=stride,\n padding=padding,\n dilation=dilation,\n output_padding=output_padding,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x, output_size=None):\n if output_size is None:\n output_padding = self.output_padding\n else:\n output_padding = 0\n\n out = F.conv2d_transpose(x,\n self.weight,\n bias=self.bias,\n padding=self._padding,\n output_padding=output_padding,\n stride=self._stride,\n dilation=self._dilation,\n groups=self._groups,\n output_size=output_size,\n data_format=self._data_format)\n return out\n\n\nclass Conv3D(_ConvNd):\n r\"\"\"\n **Convlution3d Layer**\n The convolution3d layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input(Input) and\n Output(Output) are multidimensional tensors with a shape of \n :math:`[N, C, D, H, W]` . Where N is batch size, C is the number of\n channels, D is the depth of the feature, H is the height of the feature,\n and W is the width of the feature. Convlution3D is similar with Convlution2D\n but adds one dimension(depth). If bias attribution and activation type are\n provided, bias is added to the output of the convolution, and the\n corresponding activation function is applied to the final result.\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\ast X + b)\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NCDHW or NDHWC format.\n * :math:`W`: Filter value, a tensor with MCDHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 1-D tensor with shape [M].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Parameters:\n in_channels(int): The number of input channels in the input image.\n out_channels(int): The number of output channels produced by the convolution.\n kernel_size(int|list|tuple, optional): The size of the convolving kernel.\n stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must\n contain three integers, (stride_D, stride_H, stride_W). Otherwise, the\n stride_D = stride_H = stride_W = stride. The default value is 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3D Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. The default value is 1.\n padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv3d. If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as param_attr. If it is set to None, the parameter\n is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is\n :math:`(\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. The default value is None.\n bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCDHW\" or \"NDHWC\". Default: \"NCDHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n\n - weight: :math:`(C_{out}, C_{in}, K_{d}, K_{h}, K_{w})`\n\n - bias: :math:`(C_{out})`\n\n - output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (kernel\\_size[0] - 1) + 1))}{strides[0]} + 1\n\n H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (kernel\\_size[1] - 1) + 1))}{strides[1]} + 1\n\n W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (kernel\\_size[2] - 1) + 1))}{strides[2]} + 1\n\n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n \n paddle.disable_static()\n\n x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)\n \n conv = nn.Conv3D(4, 6, (3, 3, 3))\n y_var = conv(x_var)\n y_np = y_var.numpy()\n print(y_np.shape)\n # (2, 6, 6, 6, 6)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCDHW\"):\n super(Conv3D, self).__init__(in_channels,\n out_channels,\n kernel_size,\n False,\n 3,\n stride=stride,\n padding=padding,\n padding_mode=padding_mode,\n dilation=dilation,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x):\n if self._padding_mode != 'zeros':\n x = F.pad(x,\n self._reversed_padding_repeated_twice,\n mode=self._padding_mode,\n data_format=self._data_format)\n\n out = F.conv._conv_nd(x,\n self.weight,\n bias=self.bias,\n stride=self._stride,\n padding=self._updated_padding,\n padding_algorithm=self._padding_algorithm,\n dilation=self._dilation,\n groups=self._groups,\n data_format=self._data_format,\n channel_dim=self._channel_dim,\n op_type=self._op_type,\n use_cudnn=self._use_cudnn)\n return out\n\n\nclass Conv3DTranspose(_ConvNd):\n r\"\"\"\n **Convlution3D transpose layer**\n The convolution3D transpose layer calculates the output based on the input,\n filter, and dilations, strides, paddings. Input(Input) and output(Output)\n are in NCDHW format. Where N is batch size, C is the number of channels,\n D is the depth of the feature, H is the height of the feature, and W\n is the width of the feature. Parameters(dilations, strides, paddings) are\n two elements. These two elements represent height and width, respectively.\n The details of convolution transpose layer, please refer to the following\n explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n For each input :math:`X`, the equation is:\n \n .. math::\n\n Out = \\sigma (W \\ast X + b)\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NCDHW format.\n * :math:`W`: Filter value, a tensor with CMDHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 1-D tensor with shape [M].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n **Note**:\n\n The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,\n when stride > 1, conv3d maps multiple input shape to the same output shape, \n so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.\n If output_size is None, :math:`H_{out} = H^\\prime_{out}, :math:`H_{out} = \\\n H^\\prime_{out}, W_{out} = W^\\prime_{out}`; else, the :math:`D_{out}` of the output \n size must between :math:`D^\\prime_{out}` and :math:`D^\\prime_{out} + strides[0]`, \n the :math:`H_{out}` of the output size must between :math:`H^\\prime_{out}` \n and :math:`H^\\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must \n between :math:`W^\\prime_{out}` and :math:`W^\\prime_{out} + strides[2]`, \n conv3d_transpose can compute the kernel size automatically.\n\n Parameters:\n in_channels(int): The number of channels in the input image.\n out_channels(int): The number of channels produced by the convolution.\n kernel_size(int|list|tuple): The kernel size. If kernel_size is a list/tuple,\n it must contain three integers, (kernel_size_D, kernel_size_H, kernel_size_W).\n Otherwise, the kernel will be a square.\n stride(int|list|tuple, optional): The stride size. It means the stride in transposed convolution. \n If stride is a list/tuple, it must contain three integers, (stride_depth, stride_height, \n stride_width). Otherwise, stride_depth = stride_height = stride_width = stride. \n The default value is 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n output_padding(int|list|tuple, optional): Additional size added to one side\n of each dimension in the output shape. Default: 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n The default value is 1.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. The default value is None.\n bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCDHW\" or \"NDHWC\". Default: \"NCDHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n\n - weight: :math:`(C_{in}, C_{out}, K_{d}, K_{h}, K_{w})`\n\n - bias: :math:`(C_{out})`\n\n - output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n D^\\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\\_size[0] - 1) + 1\n \n H^\\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\\_size[1] - 1) + 1\n \n W^\\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (kernel\\_size[2] - 1) + 1\n \n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n \n paddle.disable_static()\n\n x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)\n \n conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))\n y_var = conv(x_var)\n y_np = y_var.numpy()\n print(y_np.shape)\n # (2, 6, 10, 10, 10)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n output_padding=0,\n dilation=1,\n groups=1,\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCDHW\"):\n super(Conv3DTranspose, self).__init__(in_channels,\n out_channels,\n kernel_size,\n True,\n 3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n output_padding=output_padding,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x, output_size=None):\n if output_size is None:\n output_padding = self.output_padding\n else:\n output_padding = 0\n\n out = F.conv3d_transpose(x,\n self.weight,\n bias=self.bias,\n padding=self._padding,\n output_padding=output_padding,\n stride=self._stride,\n dilation=self._dilation,\n groups=self._groups,\n output_size=output_size,\n data_format=self._data_format)\n return out\n"
] | [
[
"numpy.prod"
]
] |
serazing/xscale | [
"a804866aa6f6a5a0f293a7f6765ea17403159134"
] | [
"xscale/signal/tests/test_fitting.py"
] | [
"# Python 2/3 compatibility\nfrom __future__ import absolute_import, division, print_function\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nimport xscale.signal.fitting as xfit\n\ndef test_polyfit():\n\tNt, Nx, Ny = 100, 128, 128\n\trand = xr.DataArray(np.random.rand(Nt, Nx, Ny), dims=['time', 'x', 'y'])\n\tslopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * rand.x / Nx), dims=['x'])\n\ttruth = rand + slopes * rand.time\n\ttruth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})\n\tlinfit = xfit.polyfit(truth, dim='time').load()\n\txfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()\n\tassert np.allclose(linfit.sel(degree=1).mean(dim='y').data, slopes.data,\n\t rtol=5e-2, atol=1e-3)\n\ndef test_linreg():\n\tnt, nx, ny = 100, 128, 128\n\toffset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])\n\tslopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])\n\ttruth = offset + slopes * offset.time\n\ttruth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})\n\txfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()\n\tslopes_fitted, offsets_fitted = xfit.linreg(truth, dim='time')\n\tassert np.allclose(slopes, slopes_fitted.mean(dim='y').load())\n\tassert np.allclose(offset, offsets_fitted.mean(dim='y').load())\n\ndef test_trend():\n\tnt, nx, ny = 100, 128, 128\n\toffset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])\n\tslopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])\n\ttruth = offset + slopes * offset.time\n\ttruth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})\n\ttrend_mean = xfit.trend(offset, dim='time', type='constant')\n\ttrend_linear = xfit.trend(truth, dim='time', type='linear')\n\tassert np.allclose(offset, trend_mean.load())\n\tassert np.allclose(truth, trend_linear.load())\n\ndef test_detrend():\n\tnt, nx, ny = 100, 128, 128\n\toffset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])\n\tslopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])\n\ttruth = offset + slopes * offset.time\n\ttruth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})\n\tassert np.allclose(0 * offset, xfit.detrend(offset, dim='time',\n\t type='constant').load())\n\tassert np.allclose(0 * offset, xfit.detrend(truth, dim='time',\n\t type='linear').load())\n\ndef test_sinfit():\n\tNt, Nx, Ny = 100, 128, 128\n\tzeros = xr.DataArray(np.zeros((Nt, Nx, Ny)), dims=['time', 'x', 'y'])\n\tzeros = zeros.assign_coords(time=pd.date_range(start='2011-01-01',\n\t periods=100, freq='H'))\n\toffset = 0.4\n\tamp1, phi1 = 1.2, 0.\n\twave1 = amp1 * np.sin(2 * np.pi * zeros['time.hour'] / 24. +\n\t phi1 * np.pi / 180.)\n\tamp2, phi2 = 1.9, 60.\n\twave2 = amp2 * np.sin(2 * np.pi * zeros['time.hour'] / 12. +\n\t phi2 * np.pi / 180.)\n\ttruth = offset + zeros + wave1 + wave2\n\ttruth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})\n\t# Fit both waves\n\tfit2w = xfit.sinfit(truth, dim='time', periods=[24, 12], unit='h').load()\n\tassert np.isclose(fit2w['amplitude'].sel(periods=24).isel(x=10, y=10), amp1)\n\tassert np.isclose(fit2w['phase'].sel(periods=24).isel(x=10, y=10), phi1,\n\t atol=1e-4)\n\tassert np.isclose(fit2w['amplitude'].sel(periods=12).isel(x=10, y=10), amp2)\n\tassert np.isclose(fit2w['phase'].sel(periods=12).isel(x=10, y=10), phi2)\n\tassert np.isclose(fit2w['offset'].isel(x=10, y=10), offset)\n\t# Fit only one wave (wave2)\n\tfit1w = xfit.sinfit(truth, dim='time', periods=12, unit='h').load()\n\t# Compare with 5% relative tolerance (error induced by wave1)\n\tassert np.isclose(fit1w['amplitude'].sel(periods=12).isel(x=10, y=10),\n\t amp2, rtol=5e-2)\n\tassert np.isclose(fit1w['phase'].sel(periods=12).isel(x=10, y=10),\n\t phi2, rtol=5e-2)\n\t# Fit only one dimensional data\n\txfit.sinfit(truth.isel(x=0, y=0), dim='time',\n\t periods=[24, 12],\n\t unit='h').load()\n\n\ndef test_sinval():\n\tNt, Nx, Ny = 100, 128, 128\n\toffset = 0.4\n\tperiods = [24., 12.]\n\tamp1, phi1 = 1.2, 0.\n\tamp2, phi2 = 1.9, 60.\n\ttime = xr.DataArray(pd.date_range(start='2011-01-01',\n\t periods=Nt,\n\t freq='H'),\n\t dims='time')\n\tamp = xr.DataArray([amp1, amp2], dims='periods')\n\tphi = xr.DataArray([phi1, phi2], dims='periods')\n\tones = xr.DataArray(np.ones((Nx, Ny)), dims=['x', 'y'])\n\tvar_dict = {'amplitude': amp * ones,\n\t 'phase': phi * ones,\n\t 'offset': offset * ones}\n\tds = xr.Dataset(var_dict).chunk(chunks={'x': 50, 'y': 50})\n\tds = ds.assign_coords(periods=periods)\n\tds['periods'].attrs['units'] = 'h'\n\txfit.sinval(ds, time)\n\t#One mode reconstruction\n\txfit.sinval(ds.sel(periods=[24,]), time)\n\n\ndef test_order_and_stack():\n\trand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])\n\trand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})\n\trand_stacked = xfit._order_and_stack(rand, 'y')\n\tassert rand_stacked.dims[0] is 'y'\n\tassert rand_stacked.dims[-1] is 'temp_dim'\n\tassert rand_stacked.shape[-1] == 128 * 100\n\t# Test the exception for 1d array\n\trand1d = rand.isel(time=0, x=0)\n\trand1d_stacked = xfit._order_and_stack(rand1d, 'y')\n\tassert np.array_equal(rand1d_stacked, rand1d)\n\n\ndef test_unstack():\n\trand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])\n\trand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})\n\trand_stacked = xfit._order_and_stack(rand, 'y')\n\trand_unstacked = xfit._unstack(rand_stacked.mean(dim='y'))\n\tassert rand_unstacked.dims == ('time', 'x')\n\tassert rand_unstacked.shape == (100, 128)"
] | [
[
"numpy.ones",
"pandas.date_range",
"numpy.zeros",
"numpy.cos",
"numpy.random.rand",
"numpy.array_equal",
"numpy.sin"
]
] |
OlegBezverhii/python-notebooks | [
"5d4b501173a2f3519bff9a085c3d2190ce6cf808"
] | [
"webcams/eye_status.py"
] | [
"import os\nfrom PIL import Image\nimport numpy as np\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import model_from_json\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom imageio import imread, imwrite\nfrom skimage.transform import resize\n\nIMG_SIZE = 24\n\ndef collect():\n\ttrain_datagen = ImageDataGenerator(\n\t\t\trescale=1./255,\n\t\t\tshear_range=0.2,\n\t\t\thorizontal_flip=True, \n\t\t)\n\n\tval_datagen = ImageDataGenerator(\n\t\t\trescale=1./255,\n\t\t\tshear_range=0.2,\n\t\t\thorizontal_flip=True,\t\t)\n\n\ttrain_generator = train_datagen.flow_from_directory(\n\t directory=\"dataset/train\",\n\t target_size=(IMG_SIZE, IMG_SIZE),\n\t color_mode=\"grayscale\",\n\t batch_size=32,\n\t class_mode=\"binary\",\n\t shuffle=True,\n\t seed=42\n\t)\n\n\tval_generator = val_datagen.flow_from_directory(\n\t directory=\"dataset/val\",\n\t target_size=(IMG_SIZE, IMG_SIZE),\n\t color_mode=\"grayscale\",\n\t batch_size=32,\n\t class_mode=\"binary\",\n\t shuffle=True,\n\t seed=42\n\t)\n\treturn train_generator, val_generator\n\n\ndef save_model(model):\n\tmodel_json = model.to_json()\n\twith open(\"model.json\", \"w\") as json_file:\n\t\tjson_file.write(model_json)\n\t# serialize weights to HDF5\n\tmodel.save_weights(\"model.h5\")\n\ndef load_model():\n\tjson_file = open('model.json', 'r')\n\tloaded_model_json = json_file.read()\n\tjson_file.close()\n\tloaded_model = model_from_json(loaded_model_json)\n\t# load weights into new model\n\tloaded_model.load_weights(\"model.h5\")\n\tloaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn loaded_model\n\ndef train(train_generator, val_generator):\n\tSTEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size\n\tSTEP_SIZE_VALID=val_generator.n//val_generator.batch_size\n\n\tprint('[LOG] Intialize Neural Network')\n\t\n\tmodel = Sequential()\n\n\tmodel.add(Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(IMG_SIZE,IMG_SIZE,1)))\n\tmodel.add(AveragePooling2D())\n\n\tmodel.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))\n\tmodel.add(AveragePooling2D())\n\n\tmodel.add(Flatten())\n\n\tmodel.add(Dense(units=120, activation='relu'))\n\n\tmodel.add(Dense(units=84, activation='relu'))\n\n\tmodel.add(Dense(units=1, activation = 'sigmoid'))\n\n\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\tmodel.fit_generator(generator=train_generator,\n\t steps_per_epoch=STEP_SIZE_TRAIN,\n\t validation_data=val_generator,\n\t validation_steps=STEP_SIZE_VALID,\n\t epochs=20\n\t)\n\tsave_model(model)\n\ndef predict(img, model):\n\timg = Image.fromarray(img, 'RGB').convert('L')\n\tprint(img)\n\timg = resize(img, (IMG_SIZE,IMG_SIZE)).astype('float32')/255\n\tprint(img)\n\timg = img.reshape(1,IMG_SIZE,IMG_SIZE,1)\n\tprediction = model.predict(img)\n\tif prediction < 0.1:\n\t\tprediction = 'closed'\n\telif prediction > 0.9:\n\t\tprediction = 'open'\n\telse:\n\t\tprediction = 'idk'\n\treturn prediction\n\ndef evaluate(X_test, y_test):\n\tmodel = load_model()\n\tprint('Evaluate model')\n\tloss, acc = model.evaluate(X_test, y_test, verbose = 0)\n\tprint(acc * 100)\n\nif __name__ == '__main__':\t\n\ttrain_generator , val_generator = collect()\n\ttrain(train_generator,val_generator)\n"
] | [
[
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.keras.models.model_from_json"
]
] |
KustomApe/ksie | [
"d6f97d0298d04d06788563546c66ff50c6bb2d31"
] | [
".history/spider/pokemon_spider_20201213130808.py"
] | [
"from selenium import webdriver\nimport pandas as pd\nimport time\n\n\"\"\"[注意事項]\nrobot.txtを必ず読んで、ルールに沿った形でクローリングするように気をつけてください。\nあくまで自己責任でお願いできればと思います。\n\"\"\"\n\n\"\"\"[Initial Setting]\n初期設定\n\"\"\"\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headeless')\noptions.add_argument('--disable-gpu')\noptions.add_argument('--lang-ja')\nbrowser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')\ndf = pd.DataFrame(columns=['ranking', 'name', 'image'])\nurl = 'https://swsh.pokedb.tokyo/pokemon/list/'\n\n\"\"\"[CSS Selector Setting]\nCSSセレクターの設定\n\"\"\"\nPAGER_NEXT = \"li.select-page.arrow a[rel='next']\"\nPOSTS = \".product-item-list__item\"\nRANKING = \".pokemon-ranking-rank\"\nNAME = \".product-item-list__item-name\"\nIMAGE = \".product-item-list__item-image img\"\nPRICE = \".product-item-list__item-price\"\nCATEGORY = \".product-item-list__item-category\"\nCAR = \".product-item-list__item-car-name\"\n\n\"\"\"[Activate Section]\n実行部分\n\"\"\"\nbrowser.get(url)\n\nwhile True: #Continue until getting the last page.\n if len(browser.find_elements_by_css_selector(PAGER_NEXT)) > 0:\n print('Starting to get posts...')\n posts = browser.find_elements_by_css_selector(POSTS)\n print(len(posts))\n for post in posts:\n try:\n name = post.find_element_by_css_selector(PRODUCT_NAME).text\n print(name)\n thumbnailURL = post.find_element_by_css_selector(IMAGE).get_attribute('src')\n print(thumbnailURL)\n price = post.find_element_by_css_selector(PRICE).text\n print(price)\n category = post.find_element_by_css_selector(CATEGORY).text\n print(category)\n car = post.find_element_by_css_selector(CAR).text\n print(car)\n se = pd.Series([name, thumbnailURL, price, category, car], ['name', 'image', 'price', 'category', 'car'])\n df = df.append(se, ignore_index=True)\n except Exception as e:\n print(e)\n btn = browser.find_element_by_css_selector(PAGER_NEXT).get_attribute('href')\n print('next url:{}'.format(btn))\n time.sleep(3)\n browser.get(btn)\n print('Moving to next page.')\n else:\n print('No pager exist anymore...')\n break\n\nprint('Finished Crawling. Writing out to CSV file...')\ndf.to_csv('car_parts.csv')\nprint('Done')\n"
] | [
[
"pandas.Series",
"pandas.DataFrame"
]
] |
sumanmichael/lightning-flash | [
"4c69c1bf49fa74d0f2fdb9c4dbdcdfd5942352db"
] | [
"flash/core/data/data_module.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport platform\nfrom typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nfrom pytorch_lightning.trainer.states import RunningStage\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.dataset import IterableDataset, Subset\nfrom torch.utils.data.sampler import Sampler\n\nimport flash\nfrom flash.core.data.auto_dataset import BaseAutoDataset, IterableAutoDataset\nfrom flash.core.data.base_viz import BaseVisualization\nfrom flash.core.data.callback import BaseDataFetcher\nfrom flash.core.data.data_pipeline import DataPipeline, DefaultPreprocess, Postprocess, Preprocess\nfrom flash.core.data.data_source import DataSource, DefaultDataSources\nfrom flash.core.data.splits import SplitDataset\nfrom flash.core.data.utils import _STAGES_PREFIX\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, requires\n\nif _FIFTYONE_AVAILABLE and TYPE_CHECKING:\n from fiftyone.core.collections import SampleCollection\nelse:\n SampleCollection = None\n\n\nclass DataModule(pl.LightningDataModule):\n \"\"\"A basic DataModule class for all Flash tasks. This class includes references to a\n :class:`~flash.core.data.data_source.DataSource`, :class:`~flash.core.data.process.Preprocess`,\n :class:`~flash.core.data.process.Postprocess`, and a :class:`~flash.core.data.callback.BaseDataFetcher`.\n\n Args:\n train_dataset: Dataset for training. Defaults to None.\n val_dataset: Dataset for validating model performance during training. Defaults to None.\n test_dataset: Dataset to test model performance. Defaults to None.\n predict_dataset: Dataset for predicting. Defaults to None.\n data_source: The :class:`~flash.core.data.data_source.DataSource` that was used to create the datasets.\n preprocess: The :class:`~flash.core.data.process.Preprocess` to use when constructing the\n :class:`~flash.core.data.data_pipeline.DataPipeline`. If ``None``, a\n :class:`~flash.core.data.process.DefaultPreprocess` will be used.\n postprocess: The :class:`~flash.core.data.process.Postprocess` to use when constructing the\n :class:`~flash.core.data.data_pipeline.DataPipeline`. If ``None``, a plain\n :class:`~flash.core.data.process.Postprocess` will be used.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to attach to the\n :class:`~flash.core.data.process.Preprocess`. If ``None``, the output from\n :meth:`~flash.core.data.data_module.DataModule.configure_data_fetcher` will be used.\n val_split: An optional float which gives the relative amount of the training dataset to use for the validation\n dataset.\n batch_size: The batch size to be used by the DataLoader. Defaults to 1.\n num_workers: The number of workers to use for parallelized loading.\n Defaults to None which equals the number of available CPU threads,\n or 0 for Windows or Darwin platform.\n sampler: A sampler following the :class:`~torch.utils.data.sampler.Sampler` type.\n Will be passed to the DataLoader for the training dataset. Defaults to None.\n \"\"\"\n\n preprocess_cls = DefaultPreprocess\n postprocess_cls = Postprocess\n\n def __init__(\n self,\n train_dataset: Optional[Dataset] = None,\n val_dataset: Optional[Dataset] = None,\n test_dataset: Optional[Dataset] = None,\n predict_dataset: Optional[Dataset] = None,\n data_source: Optional[DataSource] = None,\n preprocess: Optional[Preprocess] = None,\n postprocess: Optional[Postprocess] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n ) -> None:\n\n super().__init__()\n\n if flash._IS_TESTING and torch.cuda.is_available():\n batch_size = 16\n\n self._data_source: DataSource = data_source\n self._preprocess: Optional[Preprocess] = preprocess\n self._postprocess: Optional[Postprocess] = postprocess\n self._viz: Optional[BaseVisualization] = None\n self._data_fetcher: Optional[BaseDataFetcher] = data_fetcher or self.configure_data_fetcher()\n\n # TODO: Preprocess can change\n self.data_fetcher.attach_to_preprocess(self.preprocess)\n\n self._train_ds = train_dataset\n self._val_ds = val_dataset\n self._test_ds = test_dataset\n self._predict_ds = predict_dataset\n\n if self._train_ds is not None and (val_split is not None and self._val_ds is None):\n self._train_ds, self._val_ds = self._split_train_val(self._train_ds, val_split)\n\n if self._train_ds:\n self.train_dataloader = self._train_dataloader\n\n if self._val_ds:\n self.val_dataloader = self._val_dataloader\n\n if self._test_ds:\n self.test_dataloader = self._test_dataloader\n\n if self._predict_ds:\n self.predict_dataloader = self._predict_dataloader\n\n self.batch_size = batch_size\n\n # TODO: figure out best solution for setting num_workers\n if num_workers is None:\n if platform.system() in (\"Darwin\", \"Windows\"):\n num_workers = 0\n else:\n num_workers = os.cpu_count()\n self.num_workers = num_workers\n self.sampler = sampler\n\n self.set_running_stages()\n\n @property\n def train_dataset(self) -> Optional[Dataset]:\n \"\"\"This property returns the train dataset.\"\"\"\n return self._train_ds\n\n @property\n def val_dataset(self) -> Optional[Dataset]:\n \"\"\"This property returns the validation dataset.\"\"\"\n return self._val_ds\n\n @property\n def test_dataset(self) -> Optional[Dataset]:\n \"\"\"This property returns the test dataset.\"\"\"\n return self._test_ds\n\n @property\n def predict_dataset(self) -> Optional[Dataset]:\n \"\"\"This property returns the predict dataset.\"\"\"\n return self._predict_ds\n\n @property\n def viz(self) -> BaseVisualization:\n return self._viz or DataModule.configure_data_fetcher()\n\n @viz.setter\n def viz(self, viz: BaseVisualization) -> None:\n self._viz = viz\n\n @staticmethod\n def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:\n \"\"\"This function is used to configure a :class:`~flash.core.data.callback.BaseDataFetcher`.\n\n Override with your custom one.\n \"\"\"\n return BaseDataFetcher()\n\n @property\n def data_fetcher(self) -> BaseDataFetcher:\n return self._data_fetcher or DataModule.configure_data_fetcher()\n\n @data_fetcher.setter\n def data_fetcher(self, data_fetcher: BaseDataFetcher) -> None:\n self._data_fetcher = data_fetcher\n\n def _reset_iterator(self, stage: str) -> Iterable[Any]:\n iter_name = f\"_{stage}_iter\"\n # num_workers has to be set to 0 to work properly\n num_workers = self.num_workers\n self.num_workers = 0\n dataloader_fn = getattr(self, f\"{stage}_dataloader\")\n iterator = iter(dataloader_fn())\n self.num_workers = num_workers\n setattr(self, iter_name, iterator)\n return iterator\n\n def _show_batch(self, stage: str, func_names: Union[str, List[str]], reset: bool = True) -> None:\n \"\"\"This function is used to handle transforms profiling for batch visualization.\"\"\"\n # don't show in CI\n if os.getenv(\"FLASH_TESTING\", \"0\") == \"1\":\n return None\n iter_name = f\"_{stage}_iter\"\n\n if not hasattr(self, iter_name):\n self._reset_iterator(stage)\n\n # list of functions to visualise\n if isinstance(func_names, str):\n func_names = [func_names]\n\n iter_dataloader = getattr(self, iter_name)\n with self.data_fetcher.enable():\n if reset:\n self.data_fetcher.batches[stage] = {}\n try:\n _ = next(iter_dataloader)\n except StopIteration:\n iter_dataloader = self._reset_iterator(stage)\n _ = next(iter_dataloader)\n data_fetcher: BaseVisualization = self.data_fetcher\n data_fetcher._show(stage, func_names)\n if reset:\n self.data_fetcher.batches[stage] = {}\n\n def show_train_batch(self, hooks_names: Union[str, List[str]] = \"load_sample\", reset: bool = True) -> None:\n \"\"\"This function is used to visualize a batch from the train dataloader.\"\"\"\n stage_name: str = _STAGES_PREFIX[RunningStage.TRAINING]\n self._show_batch(stage_name, hooks_names, reset=reset)\n\n def show_val_batch(self, hooks_names: Union[str, List[str]] = \"load_sample\", reset: bool = True) -> None:\n \"\"\"This function is used to visualize a batch from the validation dataloader.\"\"\"\n stage_name: str = _STAGES_PREFIX[RunningStage.VALIDATING]\n self._show_batch(stage_name, hooks_names, reset=reset)\n\n def show_test_batch(self, hooks_names: Union[str, List[str]] = \"load_sample\", reset: bool = True) -> None:\n \"\"\"This function is used to visualize a batch from the test dataloader.\"\"\"\n stage_name: str = _STAGES_PREFIX[RunningStage.TESTING]\n self._show_batch(stage_name, hooks_names, reset=reset)\n\n def show_predict_batch(self, hooks_names: Union[str, List[str]] = \"load_sample\", reset: bool = True) -> None:\n \"\"\"This function is used to visualize a batch from the predict dataloader.\"\"\"\n stage_name: str = _STAGES_PREFIX[RunningStage.PREDICTING]\n self._show_batch(stage_name, hooks_names, reset=reset)\n\n @staticmethod\n def get_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, default: Optional[Any] = None) -> Any:\n if isinstance(dataset, Subset):\n return getattr(dataset.dataset, attr_name, default)\n\n return getattr(dataset, attr_name, default)\n\n @staticmethod\n def set_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, value: Any) -> None:\n if isinstance(dataset, Subset):\n dataset = dataset.dataset\n if isinstance(dataset, (Dataset, IterableDataset)):\n setattr(dataset, attr_name, value)\n\n def set_running_stages(self):\n if self._train_ds:\n self.set_dataset_attribute(self._train_ds, \"running_stage\", RunningStage.TRAINING)\n\n if self._val_ds:\n self.set_dataset_attribute(self._val_ds, \"running_stage\", RunningStage.VALIDATING)\n\n if self._test_ds:\n self.set_dataset_attribute(self._test_ds, \"running_stage\", RunningStage.TESTING)\n\n if self._predict_ds:\n self.set_dataset_attribute(self._predict_ds, \"running_stage\", RunningStage.PREDICTING)\n\n def _resolve_collate_fn(self, dataset: Dataset, running_stage: RunningStage) -> Optional[Callable]:\n if isinstance(dataset, (BaseAutoDataset, SplitDataset)):\n return self.data_pipeline.worker_preprocessor(running_stage)\n\n def _train_dataloader(self) -> DataLoader:\n train_ds: Dataset = self._train_ds() if isinstance(self._train_ds, Callable) else self._train_ds\n shuffle: bool = False\n collate_fn = self._resolve_collate_fn(train_ds, RunningStage.TRAINING)\n if isinstance(train_ds, IterableAutoDataset):\n drop_last = False\n else:\n drop_last = len(train_ds) > self.batch_size\n pin_memory = True\n\n if self.sampler is None:\n shuffle = not isinstance(train_ds, (IterableDataset, IterableAutoDataset))\n\n if isinstance(getattr(self, \"trainer\", None), pl.Trainer):\n return self.trainer.lightning_module.process_train_dataset(\n train_ds,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=pin_memory,\n shuffle=shuffle,\n drop_last=drop_last,\n collate_fn=collate_fn,\n sampler=self.sampler,\n )\n\n return DataLoader(\n train_ds,\n batch_size=self.batch_size,\n shuffle=shuffle,\n sampler=self.sampler,\n num_workers=self.num_workers,\n pin_memory=pin_memory,\n drop_last=drop_last,\n collate_fn=collate_fn,\n )\n\n def _val_dataloader(self) -> DataLoader:\n val_ds: Dataset = self._val_ds() if isinstance(self._val_ds, Callable) else self._val_ds\n collate_fn = self._resolve_collate_fn(val_ds, RunningStage.VALIDATING)\n pin_memory = True\n\n if isinstance(getattr(self, \"trainer\", None), pl.Trainer):\n return self.trainer.lightning_module.process_val_dataset(\n val_ds,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n )\n\n return DataLoader(\n val_ds,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n )\n\n def _test_dataloader(self) -> DataLoader:\n test_ds: Dataset = self._test_ds() if isinstance(self._test_ds, Callable) else self._test_ds\n collate_fn = self._resolve_collate_fn(test_ds, RunningStage.TESTING)\n pin_memory = True\n\n if isinstance(getattr(self, \"trainer\", None), pl.Trainer):\n return self.trainer.lightning_module.process_test_dataset(\n test_ds,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n )\n\n return DataLoader(\n test_ds,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n )\n\n def _predict_dataloader(self) -> DataLoader:\n predict_ds: Dataset = self._predict_ds() if isinstance(self._predict_ds, Callable) else self._predict_ds\n if isinstance(predict_ds, IterableAutoDataset):\n batch_size = self.batch_size\n else:\n batch_size = min(self.batch_size, len(predict_ds) if len(predict_ds) > 0 else 1)\n\n collate_fn = self._resolve_collate_fn(predict_ds, RunningStage.PREDICTING)\n pin_memory = True\n\n if isinstance(getattr(self, \"trainer\", None), pl.Trainer):\n return self.trainer.lightning_module.process_test_dataset(\n predict_ds,\n batch_size=batch_size,\n num_workers=self.num_workers,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n )\n\n return DataLoader(\n predict_ds, batch_size=batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=collate_fn\n )\n\n @property\n def num_classes(self) -> Optional[int]:\n n_cls_train = getattr(self.train_dataset, \"num_classes\", None)\n n_cls_val = getattr(self.val_dataset, \"num_classes\", None)\n n_cls_test = getattr(self.test_dataset, \"num_classes\", None)\n return n_cls_train or n_cls_val or n_cls_test\n\n @property\n def multi_label(self) -> Optional[bool]:\n multi_label_train = getattr(self.train_dataset, \"multi_label\", None)\n multi_label_val = getattr(self.val_dataset, \"multi_label\", None)\n multi_label_test = getattr(self.test_dataset, \"multi_label\", None)\n return multi_label_train or multi_label_val or multi_label_test\n\n @property\n def data_source(self) -> Optional[DataSource]:\n return self._data_source\n\n @property\n def preprocess(self) -> Preprocess:\n return self._preprocess or self.preprocess_cls()\n\n @property\n def postprocess(self) -> Postprocess:\n return self._postprocess or self.postprocess_cls()\n\n @property\n def data_pipeline(self) -> DataPipeline:\n return DataPipeline(self.data_source, self.preprocess, self.postprocess)\n\n def available_data_sources(self) -> Sequence[str]:\n \"\"\"Get the list of available data source names for use with this\n :class:`~flash.core.data.data_module.DataModule`.\n\n Returns:\n The list of data source names.\n \"\"\"\n return self.preprocess.available_data_sources()\n\n @staticmethod\n def _split_train_val(\n train_dataset: Dataset,\n val_split: float,\n ) -> Tuple[Any, Any]:\n\n if not isinstance(val_split, float) or (isinstance(val_split, float) and val_split > 1 or val_split < 0):\n raise MisconfigurationException(f\"`val_split` should be a float between 0 and 1. Found {val_split}.\")\n\n if isinstance(train_dataset, IterableAutoDataset):\n raise MisconfigurationException(\n \"`val_split` should be `None` when the dataset is built with an IterableDataset.\"\n )\n\n val_num_samples = int(len(train_dataset) * val_split)\n indices = list(range(len(train_dataset)))\n np.random.shuffle(indices)\n val_indices = indices[:val_num_samples]\n train_indices = indices[val_num_samples:]\n return (\n SplitDataset(train_dataset, train_indices, use_duplicated_indices=True),\n SplitDataset(train_dataset, val_indices, use_duplicated_indices=True),\n )\n\n @classmethod\n def from_data_source(\n cls,\n data_source: str,\n train_data: Any = None,\n val_data: Any = None,\n test_data: Any = None,\n predict_data: Any = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object from the given inputs to\n :meth:`~flash.core.data.data_source.DataSource.load_data` (``train_data``, ``val_data``, ``test_data``,\n ``predict_data``). The data source will be resolved from the instantiated\n :class:`~flash.core.data.process.Preprocess`\n using :meth:`~flash.core.data.process.Preprocess.data_source_of_name`.\n\n Args:\n data_source: The name of the data source to use for the\n :meth:`~flash.core.data.data_source.DataSource.load_data`.\n train_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating\n the train dataset.\n val_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating\n the validation dataset.\n test_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating\n the test dataset.\n predict_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating\n the predict dataset.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls`` will be\n constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n data_module = DataModule.from_data_source(\n DefaultDataSources.FOLDERS,\n train_data=\"train_folder\",\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n \"\"\"\n preprocess = preprocess or cls.preprocess_cls(\n train_transform,\n val_transform,\n test_transform,\n predict_transform,\n **preprocess_kwargs,\n )\n\n data_source = preprocess.data_source_of_name(data_source)\n\n train_dataset, val_dataset, test_dataset, predict_dataset = data_source.to_datasets(\n train_data,\n val_data,\n test_data,\n predict_data,\n )\n\n return cls(\n train_dataset,\n val_dataset,\n test_dataset,\n predict_dataset,\n data_source=data_source,\n preprocess=preprocess,\n data_fetcher=data_fetcher,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n sampler=sampler,\n )\n\n @classmethod\n def from_folders(\n cls,\n train_folder: Optional[str] = None,\n val_folder: Optional[str] = None,\n test_folder: Optional[str] = None,\n predict_folder: Optional[str] = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object from the given folders using the\n :class:`~flash.core.data.data_source.DataSource` of name\n :attr:`~flash.core.data.data_source.DefaultDataSources.FOLDERS`\n from the passed or constructed :class:`~flash.core.data.process.Preprocess`.\n\n Args:\n train_folder: The folder containing the train data.\n val_folder: The folder containing the validation data.\n test_folder: The folder containing the test data.\n predict_folder: The folder containing the predict data.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``\n will be constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n data_module = DataModule.from_folders(\n train_folder=\"train_folder\",\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n \"\"\"\n return cls.from_data_source(\n DefaultDataSources.FOLDERS,\n train_folder,\n val_folder,\n test_folder,\n predict_folder,\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_fetcher=data_fetcher,\n preprocess=preprocess,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n sampler=sampler,\n **preprocess_kwargs,\n )\n\n @classmethod\n def from_files(\n cls,\n train_files: Optional[Sequence[str]] = None,\n train_targets: Optional[Sequence[Any]] = None,\n val_files: Optional[Sequence[str]] = None,\n val_targets: Optional[Sequence[Any]] = None,\n test_files: Optional[Sequence[str]] = None,\n test_targets: Optional[Sequence[Any]] = None,\n predict_files: Optional[Sequence[str]] = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object from the given sequences of files\n using the :class:`~flash.core.data.data_source.DataSource` of name\n :attr:`~flash.core.data.data_source.DefaultDataSources.FILES` from the passed or constructed\n :class:`~flash.core.data.process.Preprocess`.\n\n Args:\n train_files: A sequence of files to use as the train inputs.\n train_targets: A sequence of targets (one per train file) to use as the train targets.\n val_files: A sequence of files to use as the validation inputs.\n val_targets: A sequence of targets (one per validation file) to use as the validation targets.\n test_files: A sequence of files to use as the test inputs.\n test_targets: A sequence of targets (one per test file) to use as the test targets.\n predict_files: A sequence of files to use when predicting.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``\n will be constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n data_module = DataModule.from_files(\n train_files=[\"image_1.png\", \"image_2.png\", \"image_3.png\"],\n train_targets=[1, 0, 1],\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n \"\"\"\n return cls.from_data_source(\n DefaultDataSources.FILES,\n (train_files, train_targets),\n (val_files, val_targets),\n (test_files, test_targets),\n predict_files,\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_fetcher=data_fetcher,\n preprocess=preprocess,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n sampler=sampler,\n **preprocess_kwargs,\n )\n\n @classmethod\n def from_tensors(\n cls,\n train_data: Optional[Collection[torch.Tensor]] = None,\n train_targets: Optional[Collection[Any]] = None,\n val_data: Optional[Collection[torch.Tensor]] = None,\n val_targets: Optional[Sequence[Any]] = None,\n test_data: Optional[Collection[torch.Tensor]] = None,\n test_targets: Optional[Sequence[Any]] = None,\n predict_data: Optional[Collection[torch.Tensor]] = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object from the given tensors using the\n :class:`~flash.core.data.data_source.DataSource`\n of name :attr:`~flash.core.data.data_source.DefaultDataSources.TENSOR`\n from the passed or constructed :class:`~flash.core.data.process.Preprocess`.\n\n Args:\n train_data: A tensor or collection of tensors to use as the train inputs.\n train_targets: A sequence of targets (one per train input) to use as the train targets.\n val_data: A tensor or collection of tensors to use as the validation inputs.\n val_targets: A sequence of targets (one per validation input) to use as the validation targets.\n test_data: A tensor or collection of tensors to use as the test inputs.\n test_targets: A sequence of targets (one per test input) to use as the test targets.\n predict_data: A tensor or collection of tensors to use when predicting.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``\n will be constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n data_module = DataModule.from_tensors(\n train_files=torch.rand(3, 128),\n train_targets=[1, 0, 1],\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n \"\"\"\n return cls.from_data_source(\n DefaultDataSources.TENSORS,\n (train_data, train_targets),\n (val_data, val_targets),\n (test_data, test_targets),\n predict_data,\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_fetcher=data_fetcher,\n preprocess=preprocess,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n sampler=sampler,\n **preprocess_kwargs,\n )\n\n @classmethod\n def from_numpy(\n cls,\n train_data: Optional[Collection[np.ndarray]] = None,\n train_targets: Optional[Collection[Any]] = None,\n val_data: Optional[Collection[np.ndarray]] = None,\n val_targets: Optional[Sequence[Any]] = None,\n test_data: Optional[Collection[np.ndarray]] = None,\n test_targets: Optional[Sequence[Any]] = None,\n predict_data: Optional[Collection[np.ndarray]] = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object from the given numpy array using the\n :class:`~flash.core.data.data_source.DataSource`\n of name :attr:`~flash.core.data.data_source.DefaultDataSources.NUMPY`\n from the passed or constructed :class:`~flash.core.data.process.Preprocess`.\n\n Args:\n train_data: A numpy array to use as the train inputs.\n train_targets: A sequence of targets (one per train input) to use as the train targets.\n val_data: A numpy array to use as the validation inputs.\n val_targets: A sequence of targets (one per validation input) to use as the validation targets.\n test_data: A numpy array to use as the test inputs.\n test_targets: A sequence of targets (one per test input) to use as the test targets.\n predict_data: A numpy array to use when predicting.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``\n will be constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n data_module = DataModule.from_numpy(\n train_files=np.random.rand(3, 128),\n train_targets=[1, 0, 1],\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n \"\"\"\n return cls.from_data_source(\n DefaultDataSources.NUMPY,\n (train_data, train_targets),\n (val_data, val_targets),\n (test_data, test_targets),\n predict_data,\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_fetcher=data_fetcher,\n preprocess=preprocess,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n sampler=sampler,\n **preprocess_kwargs,\n )\n\n @classmethod\n def from_json(\n cls,\n input_fields: Union[str, Sequence[str]],\n target_fields: Optional[Union[str, Sequence[str]]] = None,\n train_file: Optional[str] = None,\n val_file: Optional[str] = None,\n test_file: Optional[str] = None,\n predict_file: Optional[str] = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n field: Optional[str] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object from the given JSON files using the\n :class:`~flash.core.data.data_source.DataSource`\n of name :attr:`~flash.core.data.data_source.DefaultDataSources.JSON`\n from the passed or constructed :class:`~flash.core.data.process.Preprocess`.\n\n Args:\n input_fields: The field or fields in the JSON objects to use for the input.\n target_fields: The field or fields in the JSON objects to use for the target.\n train_file: The JSON file containing the training data.\n val_file: The JSON file containing the validation data.\n test_file: The JSON file containing the testing data.\n predict_file: The JSON file containing the data to use when predicting.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``\n will be constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n field: To specify the field that holds the data in the JSON file.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n data_module = DataModule.from_json(\n \"input\",\n \"target\",\n train_file=\"train_data.json\",\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n\n # In the case where the data is of the form:\n # {\n # \"version\": 0.0.x,\n # \"data\": [\n # {\n # \"input_field\" : \"input_data\",\n # \"target_field\" : \"target_output\"\n # },\n # ...\n # ]\n # }\n\n data_module = DataModule.from_json(\n \"input\",\n \"target\",\n train_file=\"train_data.json\",\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n feild=\"data\"\n )\n \"\"\"\n return cls.from_data_source(\n DefaultDataSources.JSON,\n (train_file, input_fields, target_fields, field),\n (val_file, input_fields, target_fields, field),\n (test_file, input_fields, target_fields, field),\n (predict_file, input_fields, target_fields, field),\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_fetcher=data_fetcher,\n preprocess=preprocess,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n sampler=sampler,\n **preprocess_kwargs,\n )\n\n @classmethod\n def from_csv(\n cls,\n input_fields: Union[str, Sequence[str]],\n target_fields: Optional[Union[str, Sequence[str]]] = None,\n train_file: Optional[str] = None,\n val_file: Optional[str] = None,\n test_file: Optional[str] = None,\n predict_file: Optional[str] = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object from the given CSV files using the\n :class:`~flash.core.data.data_source.DataSource`\n of name :attr:`~flash.core.data.data_source.DefaultDataSources.CSV`\n from the passed or constructed :class:`~flash.core.data.process.Preprocess`.\n\n Args:\n input_fields: The field or fields (columns) in the CSV file to use for the input.\n target_fields: The field or fields (columns) in the CSV file to use for the target.\n train_file: The CSV file containing the training data.\n val_file: The CSV file containing the validation data.\n test_file: The CSV file containing the testing data.\n predict_file: The CSV file containing the data to use when predicting.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``\n will be constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n data_module = DataModule.from_csv(\n \"input\",\n \"target\",\n train_file=\"train_data.csv\",\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n \"\"\"\n return cls.from_data_source(\n DefaultDataSources.CSV,\n (train_file, input_fields, target_fields),\n (val_file, input_fields, target_fields),\n (test_file, input_fields, target_fields),\n (predict_file, input_fields, target_fields),\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_fetcher=data_fetcher,\n preprocess=preprocess,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n sampler=sampler,\n **preprocess_kwargs,\n )\n\n @classmethod\n def from_datasets(\n cls,\n train_dataset: Optional[Dataset] = None,\n val_dataset: Optional[Dataset] = None,\n test_dataset: Optional[Dataset] = None,\n predict_dataset: Optional[Dataset] = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object from the given datasets using the\n :class:`~flash.core.data.data_source.DataSource`\n of name :attr:`~flash.core.data.data_source.DefaultDataSources.DATASETS`\n from the passed or constructed :class:`~flash.core.data.process.Preprocess`.\n\n Args:\n train_dataset: Dataset used during training.\n val_dataset: Dataset used during validating.\n test_dataset: Dataset used during testing.\n predict_dataset: Dataset used during predicting.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``\n will be constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n data_module = DataModule.from_datasets(\n train_dataset=train_dataset,\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n \"\"\"\n return cls.from_data_source(\n DefaultDataSources.DATASETS,\n train_dataset,\n val_dataset,\n test_dataset,\n predict_dataset,\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_fetcher=data_fetcher,\n preprocess=preprocess,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n sampler=sampler,\n **preprocess_kwargs,\n )\n\n @classmethod\n @requires(\"fiftyone\")\n def from_fiftyone(\n cls,\n train_dataset: Optional[SampleCollection] = None,\n val_dataset: Optional[SampleCollection] = None,\n test_dataset: Optional[SampleCollection] = None,\n predict_dataset: Optional[SampleCollection] = None,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n data_fetcher: Optional[BaseDataFetcher] = None,\n preprocess: Optional[Preprocess] = None,\n val_split: Optional[float] = None,\n batch_size: int = 4,\n num_workers: Optional[int] = None,\n **preprocess_kwargs: Any,\n ) -> \"DataModule\":\n \"\"\"Creates a :class:`~flash.core.data.data_module.DataModule` object\n from the given FiftyOne Datasets using the\n :class:`~flash.core.data.data_source.DataSource` of name\n :attr:`~flash.core.data.data_source.DefaultDataSources.FIFTYONE`\n from the passed or constructed :class:`~flash.core.data.process.Preprocess`.\n\n Args:\n train_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the train data.\n val_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the validation data.\n test_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the test data.\n predict_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the predict data.\n train_transform: The dictionary of transforms to use during training which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n val_transform: The dictionary of transforms to use during validation which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n test_transform: The dictionary of transforms to use during testing which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n predict_transform: The dictionary of transforms to use during predicting which maps\n :class:`~flash.core.data.process.Preprocess` hook names to callable transforms.\n data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the\n :class:`~flash.core.data.data_module.DataModule`.\n preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the\n :class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``\n will be constructed and used.\n val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.\n preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used\n if ``preprocess = None``.\n\n Returns:\n The constructed data module.\n\n Examples::\n\n train_dataset = fo.Dataset.from_dir(\n \"/path/to/dataset\",\n dataset_type=fo.types.ImageClassificationDirectoryTree,\n )\n data_module = DataModule.from_fiftyone(\n train_data = train_dataset,\n train_transform={\n \"to_tensor_transform\": torch.as_tensor,\n },\n )\n \"\"\"\n return cls.from_data_source(\n DefaultDataSources.FIFTYONE,\n train_dataset,\n val_dataset,\n test_dataset,\n predict_dataset,\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_fetcher=data_fetcher,\n preprocess=preprocess,\n val_split=val_split,\n batch_size=batch_size,\n num_workers=num_workers,\n **preprocess_kwargs,\n )\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.random.shuffle",
"torch.cuda.is_available"
]
] |
Webbah/sec-for-reinforcement-learning | [
"19db622dce4963d25cb1b6e4ae12ddf98b6d27d2"
] | [
"OMG/env/random_load.py"
] | [
"import numpy as np\nimport pandas as pd\n\nfrom openmodelica_microgrid_gym.util import RandProcess\n\n\n\nclass RandomLoad:\n def __init__(self, train_episode_length: int, ts: float, rand_process: RandProcess, loadstep_time: int = None,\n load_curve: pd.DataFrame = None, bounds=None, bounds_std=None):\n \"\"\"\n\n :param max_episode_steps: number of steps per training episode (can differ from env.max_episode_steps)\n :param ts: sampletime of env\n :param rand_pocess: Instance of random process defines noise added to load\n :param loadstep_time: number of env step where load step should happen\n :param load_curve: Stored load data to sample from instead of smaple from distribution\n :param bounds: Bounds to clip the sampled load data\n :param bounds_std: Chosen bounds are sampled from a distribution with std=bounds_std and mean=bounds\n\n \"\"\"\n self.train_episode_length = train_episode_length\n self.ts = ts\n self.rand_process = rand_process\n if loadstep_time is None:\n self.loadstep_time = np.random.randint(0, self.train_episode_length)\n else:\n self.loadstep_time = loadstep_time\n self.load_curve = load_curve\n if bounds is None:\n self.bounds = (-np.inf, np.inf)\n else:\n self.bounds = bounds\n if bounds_std is None:\n self.bounds_std = (0, 0)\n else:\n self.bounds_std = bounds_std\n\n self.lowerbound_std = 0\n self.upperbound_std = 0\n\n def reset(self, loadstep_time=None):\n if loadstep_time is None:\n self.loadstep_time = np.random.randint(0, self.train_episode_length)\n else:\n self.loadstep_time = loadstep_time\n\n def load_step(self, t, gain):\n \"\"\"\n Changes the load parameters\n :param t:\n :param gain: device parameter\n :return: Sample from SP\n \"\"\"\n # Defines a load step after 0.01 s\n if self.loadstep_time * self.ts < t <= self.loadstep_time * self.ts + self.ts:\n self.rand_process.proc.mean = gain * 0.55\n self.rand_process.reserve = gain * 0.55\n elif t <= self.ts:\n self.rand_process.proc.mean = gain\n\n return self.rand_process.sample(t)\n\n def clipped_step(self, t):\n return np.clip(self.rand_process.sample(t),\n self.bounds[0] + self.lowerbound_std,\n self.bounds[1] + self.upperbound_std\n )\n\n def one_random_loadstep_per_episode(self, t):\n if self.loadstep_time * self.ts < t <= self.loadstep_time * self.ts + self.ts:\n # do with 100 percent propability\n self.do_change(1002, 102)\n # else:\n # with 2 permill change drift\n # self.do_change(2, 0)\n\n return np.clip(self.rand_process.sample(t),\n self.bounds[0] + self.lowerbound_std,\n self.bounds[1] + self.upperbound_std\n )\n\n def give_dataframe_value(self, t, col):\n \"\"\"\n Gives load values from a stored dataframe (self.load_curve)\n :parma t: time - represents here the row of the dataframe\n :param col: colon name of the dataframe (typically str)\n \"\"\"\n if t < 0:\n # return None\n return self.load_curve[col][0]\n if self.load_curve is None:\n raise ValueError('No dataframe given! Please feed load class (.load_curve) with data')\n return self.load_curve[col][int(t / self.ts)]\n\n def random_load_step(self, t, event_prob: int = 2, step_prob: int = 50):\n \"\"\"\n Changes the load parameters applying a loadstep with 0.2% probability which is a pure step with 50 %\n probability otherwise a drift. In every event the random process variance is drawn randomly [1, 150].\n :param t: time\n :param event_prob: probability (in pre mill) that the step event is triggered in the current step\n :param step_prob: probability (in pre cent) that event is a abrupt step (drift otherwise!, random process speed\n not adjustable yet\n :return: Sample from SP\n \"\"\"\n # Changes rand process data with probability of 5% and sets new value randomly\n if np.random.randint(0, 1001) < 2:\n\n gain = np.random.randint(self.rand_process.bounds[0], self.rand_process.bounds[1])\n\n self.rand_process.proc.mean = gain\n self.rand_process.proc.vol = np.random.randint(1, 150)\n self.rand_process.proc.speed = np.random.randint(10, 1200)\n # define sdt for clipping once every event\n # np.maximum to not allow negative values\n self.lowerbound_std = np.maximum(np.random.normal(scale=self.bounds_std[0]), 0.0001)\n self.upperbound_std = np.random.normal(scale=self.bounds_std[1])\n\n # With 50% probability do a step or a drift\n if np.random.randint(0, 101) < 50:\n # step\n self.rand_process.reserve = gain\n\n else:\n # drift -> Lower speed to allow\n self.rand_process.proc.speed = np.random.randint(10, 100)\n\n return np.clip(self.rand_process.sample(t),\n self.bounds[0] + self.lowerbound_std,\n self.bounds[1] + self.upperbound_std\n )\n\n def do_change(self, event_prob_permill=2, step_prob_percent=50):\n if np.random.randint(0, 1001) < event_prob_permill:\n\n gain = np.random.randint(self.rand_process.bounds[0], self.rand_process.bounds[1])\n\n self.rand_process.proc.mean = gain\n self.rand_process.proc.vol = np.random.randint(1, 150)\n self.rand_process.proc.speed = np.random.randint(10, 1200)\n # define sdt for clipping once every event\n self.lowerbound_std = np.random.normal(scale=self.bounds_std[0])\n self.upperbound_std = np.random.normal(scale=self.bounds_std[1])\n\n # With 50% probability do a step or a drift\n if np.random.randint(0, 101) < step_prob_percent:\n # step\n self.rand_process.reserve = gain\n\n else:\n # drift -> Lower speed to allow\n self.rand_process.proc.speed = np.random.randint(10, 100)\n"
] | [
[
"numpy.random.normal",
"numpy.random.randint"
]
] |
kurtamohler/numpy | [
"73157efcd17da95ce984d1595ac4907233b9dbf5"
] | [
"numpy/core/tests/test_datetime.py"
] | [
"\nimport numpy\nimport numpy as np\nimport datetime\nimport pytest\nfrom numpy.testing import (\n assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,\n assert_raises_regex,\n )\nfrom numpy.compat import pickle\n\n# Use pytz to test out various time zones if available\ntry:\n from pytz import timezone as tz\n _has_pytz = True\nexcept ImportError:\n _has_pytz = False\n\ntry:\n RecursionError\nexcept NameError:\n RecursionError = RuntimeError # python < 3.5\n\n\nclass TestDateTime:\n def test_datetime_dtype_creation(self):\n for unit in ['Y', 'M', 'W', 'D',\n 'h', 'm', 's', 'ms', 'us',\n 'μs', # alias for us\n 'ns', 'ps', 'fs', 'as']:\n dt1 = np.dtype('M8[750%s]' % unit)\n assert_(dt1 == np.dtype('datetime64[750%s]' % unit))\n dt2 = np.dtype('m8[%s]' % unit)\n assert_(dt2 == np.dtype('timedelta64[%s]' % unit))\n\n # Generic units shouldn't add [] to the end\n assert_equal(str(np.dtype(\"M8\")), \"datetime64\")\n\n # Should be possible to specify the endianness\n assert_equal(np.dtype(\"=M8\"), np.dtype(\"M8\"))\n assert_equal(np.dtype(\"=M8[s]\"), np.dtype(\"M8[s]\"))\n assert_(np.dtype(\">M8\") == np.dtype(\"M8\") or\n np.dtype(\"<M8\") == np.dtype(\"M8\"))\n assert_(np.dtype(\">M8[D]\") == np.dtype(\"M8[D]\") or\n np.dtype(\"<M8[D]\") == np.dtype(\"M8[D]\"))\n assert_(np.dtype(\">M8\") != np.dtype(\"<M8\"))\n\n assert_equal(np.dtype(\"=m8\"), np.dtype(\"m8\"))\n assert_equal(np.dtype(\"=m8[s]\"), np.dtype(\"m8[s]\"))\n assert_(np.dtype(\">m8\") == np.dtype(\"m8\") or\n np.dtype(\"<m8\") == np.dtype(\"m8\"))\n assert_(np.dtype(\">m8[D]\") == np.dtype(\"m8[D]\") or\n np.dtype(\"<m8[D]\") == np.dtype(\"m8[D]\"))\n assert_(np.dtype(\">m8\") != np.dtype(\"<m8\"))\n\n # Check that the parser rejects bad datetime types\n assert_raises(TypeError, np.dtype, 'M8[badunit]')\n assert_raises(TypeError, np.dtype, 'm8[badunit]')\n assert_raises(TypeError, np.dtype, 'M8[YY]')\n assert_raises(TypeError, np.dtype, 'm8[YY]')\n assert_raises(TypeError, np.dtype, 'm4')\n assert_raises(TypeError, np.dtype, 'M7')\n assert_raises(TypeError, np.dtype, 'm7')\n assert_raises(TypeError, np.dtype, 'M16')\n assert_raises(TypeError, np.dtype, 'm16')\n\n def test_datetime_casting_rules(self):\n # Cannot cast safely/same_kind between timedelta and datetime\n assert_(not np.can_cast('m8', 'M8', casting='same_kind'))\n assert_(not np.can_cast('M8', 'm8', casting='same_kind'))\n assert_(not np.can_cast('m8', 'M8', casting='safe'))\n assert_(not np.can_cast('M8', 'm8', casting='safe'))\n\n # Can cast safely/same_kind from integer to timedelta\n assert_(np.can_cast('i8', 'm8', casting='same_kind'))\n assert_(np.can_cast('i8', 'm8', casting='safe'))\n assert_(np.can_cast('i4', 'm8', casting='same_kind'))\n assert_(np.can_cast('i4', 'm8', casting='safe'))\n assert_(np.can_cast('u4', 'm8', casting='same_kind'))\n assert_(np.can_cast('u4', 'm8', casting='safe'))\n\n # Cannot cast safely from unsigned integer of the same size, which\n # could overflow\n assert_(np.can_cast('u8', 'm8', casting='same_kind'))\n assert_(not np.can_cast('u8', 'm8', casting='safe'))\n\n # Cannot cast safely/same_kind from float to timedelta\n assert_(not np.can_cast('f4', 'm8', casting='same_kind'))\n assert_(not np.can_cast('f4', 'm8', casting='safe'))\n\n # Cannot cast safely/same_kind from integer to datetime\n assert_(not np.can_cast('i8', 'M8', casting='same_kind'))\n assert_(not np.can_cast('i8', 'M8', casting='safe'))\n\n # Cannot cast safely/same_kind from bool to datetime\n assert_(not np.can_cast('b1', 'M8', casting='same_kind'))\n assert_(not np.can_cast('b1', 'M8', casting='safe'))\n # Can cast safely/same_kind from bool to timedelta\n assert_(np.can_cast('b1', 'm8', casting='same_kind'))\n assert_(np.can_cast('b1', 'm8', casting='safe'))\n\n # Can cast datetime safely from months/years to days\n assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))\n assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))\n # Cannot cast timedelta safely from months/years to days\n assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))\n assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))\n # Can cast datetime same_kind from months/years to days\n assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))\n assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))\n # Can't cast timedelta same_kind from months/years to days\n assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))\n assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))\n # Can cast datetime same_kind across the date/time boundary\n assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))\n # Can cast timedelta same_kind across the date/time boundary\n assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))\n assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))\n\n # Cannot cast safely if the integer multiplier doesn't divide\n assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))\n assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))\n # But can cast same_kind\n assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))\n # Can cast safely if the integer multiplier does divide\n assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))\n\n # We can always cast types with generic units (corresponding to NaT) to\n # more specific types\n assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))\n assert_(np.can_cast('m8', 'm8[h]', casting='safe'))\n assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))\n assert_(np.can_cast('M8', 'M8[h]', casting='safe'))\n # but not the other way around\n assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))\n assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))\n assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))\n assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))\n\n def test_compare_generic_nat(self):\n # regression tests for gh-6452\n assert_(np.datetime64('NaT') !=\n np.datetime64('2000') + np.timedelta64('NaT'))\n assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))\n assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))\n\n @pytest.mark.parametrize(\"size\", [\n 3, 21, 217, 1000])\n def test_datetime_nat_argsort_stability(self, size):\n # NaT < NaT should be False internally for\n # sort stability\n expected = np.arange(size)\n arr = np.tile(np.datetime64('NaT'), size)\n assert_equal(np.argsort(arr, kind='mergesort'), expected)\n \n @pytest.mark.parametrize(\"size\", [\n 3, 21, 217, 1000])\n def test_timedelta_nat_argsort_stability(self, size):\n # NaT < NaT should be False internally for\n # sort stability\n expected = np.arange(size)\n arr = np.tile(np.timedelta64('NaT'), size)\n assert_equal(np.argsort(arr, kind='mergesort'), expected)\n\n @pytest.mark.parametrize(\"arr, expected\", [\n # the example provided in gh-12629\n (['NaT', 1, 2, 3],\n [1, 2, 3, 'NaT']),\n # multiple NaTs\n (['NaT', 9, 'NaT', -707],\n [-707, 9, 'NaT', 'NaT']),\n # this sort explores another code path for NaT\n ([1, -2, 3, 'NaT'],\n [-2, 1, 3, 'NaT']),\n # 2-D array\n ([[51, -220, 'NaT'],\n [-17, 'NaT', -90]],\n [[-220, 51, 'NaT'],\n [-90, -17, 'NaT']]),\n ])\n @pytest.mark.parametrize(\"dtype\", [\n 'M8[ns]', 'M8[us]',\n 'm8[ns]', 'm8[us]'])\n def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):\n # fix for gh-12629 and gh-15063; NaT sorting to end of array\n arr = np.array(arr, dtype=dtype)\n expected = np.array(expected, dtype=dtype)\n arr.sort()\n assert_equal(arr, expected)\n\n def test_datetime_scalar_construction(self):\n # Construct with different units\n assert_equal(np.datetime64('1950-03-12', 'D'),\n np.datetime64('1950-03-12'))\n assert_equal(np.datetime64('1950-03-12T13', 's'),\n np.datetime64('1950-03-12T13', 'm'))\n\n # Default construction means NaT\n assert_equal(np.datetime64(), np.datetime64('NaT'))\n\n # Some basic strings and repr\n assert_equal(str(np.datetime64('NaT')), 'NaT')\n assert_equal(repr(np.datetime64('NaT')),\n \"numpy.datetime64('NaT')\")\n assert_equal(str(np.datetime64('2011-02')), '2011-02')\n assert_equal(repr(np.datetime64('2011-02')),\n \"numpy.datetime64('2011-02')\")\n\n # None gets constructed as NaT\n assert_equal(np.datetime64(None), np.datetime64('NaT'))\n\n # Default construction of NaT is in generic units\n assert_equal(np.datetime64().dtype, np.dtype('M8'))\n assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))\n\n # Construction from integers requires a specified unit\n assert_raises(ValueError, np.datetime64, 17)\n\n # When constructing from a scalar or zero-dimensional array,\n # it either keeps the units or you can override them.\n a = np.datetime64('2000-03-18T16', 'h')\n b = np.array('2000-03-18T16', dtype='M8[h]')\n\n assert_equal(a.dtype, np.dtype('M8[h]'))\n assert_equal(b.dtype, np.dtype('M8[h]'))\n\n assert_equal(np.datetime64(a), a)\n assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))\n\n assert_equal(np.datetime64(b), a)\n assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))\n\n assert_equal(np.datetime64(a, 's'), a)\n assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))\n\n assert_equal(np.datetime64(b, 's'), a)\n assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))\n\n # Construction from datetime.date\n assert_equal(np.datetime64('1945-03-25'),\n np.datetime64(datetime.date(1945, 3, 25)))\n assert_equal(np.datetime64('2045-03-25', 'D'),\n np.datetime64(datetime.date(2045, 3, 25), 'D'))\n # Construction from datetime.datetime\n assert_equal(np.datetime64('1980-01-25T14:36:22.5'),\n np.datetime64(datetime.datetime(1980, 1, 25,\n 14, 36, 22, 500000)))\n\n # Construction with time units from a date is okay\n assert_equal(np.datetime64('1920-03-13', 'h'),\n np.datetime64('1920-03-13T00'))\n assert_equal(np.datetime64('1920-03', 'm'),\n np.datetime64('1920-03-01T00:00'))\n assert_equal(np.datetime64('1920', 's'),\n np.datetime64('1920-01-01T00:00:00'))\n assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),\n np.datetime64('2045-03-25T00:00:00.000'))\n\n # Construction with date units from a datetime is also okay\n assert_equal(np.datetime64('1920-03-13T18', 'D'),\n np.datetime64('1920-03-13'))\n assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),\n np.datetime64('1920-03'))\n assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),\n np.datetime64('1920'))\n\n def test_datetime_scalar_construction_timezone(self):\n # verify that supplying an explicit timezone works, but is deprecated\n with assert_warns(DeprecationWarning):\n assert_equal(np.datetime64('2000-01-01T00Z'),\n np.datetime64('2000-01-01T00'))\n with assert_warns(DeprecationWarning):\n assert_equal(np.datetime64('2000-01-01T00-08'),\n np.datetime64('2000-01-01T08'))\n\n def test_datetime_array_find_type(self):\n dt = np.datetime64('1970-01-01', 'M')\n arr = np.array([dt])\n assert_equal(arr.dtype, np.dtype('M8[M]'))\n\n # at the moment, we don't automatically convert these to datetime64\n\n dt = datetime.date(1970, 1, 1)\n arr = np.array([dt])\n assert_equal(arr.dtype, np.dtype('O'))\n\n dt = datetime.datetime(1970, 1, 1, 12, 30, 40)\n arr = np.array([dt])\n assert_equal(arr.dtype, np.dtype('O'))\n\n # find \"supertype\" for non-dates and dates\n\n b = np.bool_(True)\n dm = np.datetime64('1970-01-01', 'M')\n d = datetime.date(1970, 1, 1)\n dt = datetime.datetime(1970, 1, 1, 12, 30, 40)\n\n arr = np.array([b, dm])\n assert_equal(arr.dtype, np.dtype('O'))\n\n arr = np.array([b, d])\n assert_equal(arr.dtype, np.dtype('O'))\n\n arr = np.array([b, dt])\n assert_equal(arr.dtype, np.dtype('O'))\n\n arr = np.array([d, d]).astype('datetime64')\n assert_equal(arr.dtype, np.dtype('M8[D]'))\n\n arr = np.array([dt, dt]).astype('datetime64')\n assert_equal(arr.dtype, np.dtype('M8[us]'))\n\n @pytest.mark.parametrize(\"unit\", [\n # test all date / time units and use\n # \"generic\" to select generic unit\n (\"Y\"), (\"M\"), (\"W\"), (\"D\"), (\"h\"), (\"m\"),\n (\"s\"), (\"ms\"), (\"us\"), (\"ns\"), (\"ps\"),\n (\"fs\"), (\"as\"), (\"generic\") ])\n def test_timedelta_np_int_construction(self, unit):\n # regression test for gh-7617\n if unit != \"generic\":\n assert_equal(np.timedelta64(np.int64(123), unit),\n np.timedelta64(123, unit))\n else:\n assert_equal(np.timedelta64(np.int64(123)),\n np.timedelta64(123))\n\n def test_timedelta_scalar_construction(self):\n # Construct with different units\n assert_equal(np.timedelta64(7, 'D'),\n np.timedelta64(1, 'W'))\n assert_equal(np.timedelta64(120, 's'),\n np.timedelta64(2, 'm'))\n\n # Default construction means 0\n assert_equal(np.timedelta64(), np.timedelta64(0))\n\n # None gets constructed as NaT\n assert_equal(np.timedelta64(None), np.timedelta64('NaT'))\n\n # Some basic strings and repr\n assert_equal(str(np.timedelta64('NaT')), 'NaT')\n assert_equal(repr(np.timedelta64('NaT')),\n \"numpy.timedelta64('NaT')\")\n assert_equal(str(np.timedelta64(3, 's')), '3 seconds')\n assert_equal(repr(np.timedelta64(-3, 's')),\n \"numpy.timedelta64(-3,'s')\")\n assert_equal(repr(np.timedelta64(12)),\n \"numpy.timedelta64(12)\")\n\n # Construction from an integer produces generic units\n assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))\n\n # When constructing from a scalar or zero-dimensional array,\n # it either keeps the units or you can override them.\n a = np.timedelta64(2, 'h')\n b = np.array(2, dtype='m8[h]')\n\n assert_equal(a.dtype, np.dtype('m8[h]'))\n assert_equal(b.dtype, np.dtype('m8[h]'))\n\n assert_equal(np.timedelta64(a), a)\n assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))\n\n assert_equal(np.timedelta64(b), a)\n assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))\n\n assert_equal(np.timedelta64(a, 's'), a)\n assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))\n\n assert_equal(np.timedelta64(b, 's'), a)\n assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))\n\n # Construction from datetime.timedelta\n assert_equal(np.timedelta64(5, 'D'),\n np.timedelta64(datetime.timedelta(days=5)))\n assert_equal(np.timedelta64(102347621, 's'),\n np.timedelta64(datetime.timedelta(seconds=102347621)))\n assert_equal(np.timedelta64(-10234760000, 'us'),\n np.timedelta64(datetime.timedelta(\n microseconds=-10234760000)))\n assert_equal(np.timedelta64(10234760000, 'us'),\n np.timedelta64(datetime.timedelta(\n microseconds=10234760000)))\n assert_equal(np.timedelta64(1023476, 'ms'),\n np.timedelta64(datetime.timedelta(milliseconds=1023476)))\n assert_equal(np.timedelta64(10, 'm'),\n np.timedelta64(datetime.timedelta(minutes=10)))\n assert_equal(np.timedelta64(281, 'h'),\n np.timedelta64(datetime.timedelta(hours=281)))\n assert_equal(np.timedelta64(28, 'W'),\n np.timedelta64(datetime.timedelta(weeks=28)))\n\n # Cannot construct across nonlinear time unit boundaries\n a = np.timedelta64(3, 's')\n assert_raises(TypeError, np.timedelta64, a, 'M')\n assert_raises(TypeError, np.timedelta64, a, 'Y')\n a = np.timedelta64(6, 'M')\n assert_raises(TypeError, np.timedelta64, a, 'D')\n assert_raises(TypeError, np.timedelta64, a, 'h')\n a = np.timedelta64(1, 'Y')\n assert_raises(TypeError, np.timedelta64, a, 'D')\n assert_raises(TypeError, np.timedelta64, a, 'm')\n a = datetime.timedelta(seconds=3)\n assert_raises(TypeError, np.timedelta64, a, 'M')\n assert_raises(TypeError, np.timedelta64, a, 'Y')\n a = datetime.timedelta(weeks=3)\n assert_raises(TypeError, np.timedelta64, a, 'M')\n assert_raises(TypeError, np.timedelta64, a, 'Y')\n a = datetime.timedelta()\n assert_raises(TypeError, np.timedelta64, a, 'M')\n assert_raises(TypeError, np.timedelta64, a, 'Y')\n\n def test_timedelta_object_array_conversion(self):\n # Regression test for gh-11096\n inputs = [datetime.timedelta(28),\n datetime.timedelta(30),\n datetime.timedelta(31)]\n expected = np.array([28, 30, 31], dtype='timedelta64[D]')\n actual = np.array(inputs, dtype='timedelta64[D]')\n assert_equal(expected, actual)\n\n def test_timedelta_0_dim_object_array_conversion(self):\n # Regression test for gh-11151\n test = np.array(datetime.timedelta(seconds=20))\n actual = test.astype(np.timedelta64)\n # expected value from the array constructor workaround\n # described in above issue\n expected = np.array(datetime.timedelta(seconds=20),\n np.timedelta64)\n assert_equal(actual, expected)\n\n def test_timedelta_scalar_construction_units(self):\n # String construction detecting units\n assert_equal(np.datetime64('2010').dtype,\n np.dtype('M8[Y]'))\n assert_equal(np.datetime64('2010-03').dtype,\n np.dtype('M8[M]'))\n assert_equal(np.datetime64('2010-03-12').dtype,\n np.dtype('M8[D]'))\n assert_equal(np.datetime64('2010-03-12T17').dtype,\n np.dtype('M8[h]'))\n assert_equal(np.datetime64('2010-03-12T17:15').dtype,\n np.dtype('M8[m]'))\n assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,\n np.dtype('M8[s]'))\n\n assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,\n np.dtype('M8[ms]'))\n assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,\n np.dtype('M8[ms]'))\n assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,\n np.dtype('M8[ms]'))\n\n assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,\n np.dtype('M8[us]'))\n assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,\n np.dtype('M8[us]'))\n assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,\n np.dtype('M8[us]'))\n\n assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,\n np.dtype('M8[ns]'))\n assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,\n np.dtype('M8[ns]'))\n assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,\n np.dtype('M8[ns]'))\n\n assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,\n np.dtype('M8[ps]'))\n assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,\n np.dtype('M8[ps]'))\n assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,\n np.dtype('M8[ps]'))\n\n assert_equal(np.datetime64(\n '1970-01-01T00:00:02.1234567890123').dtype,\n np.dtype('M8[fs]'))\n assert_equal(np.datetime64(\n '1970-01-01T00:00:02.12345678901234').dtype,\n np.dtype('M8[fs]'))\n assert_equal(np.datetime64(\n '1970-01-01T00:00:02.123456789012345').dtype,\n np.dtype('M8[fs]'))\n\n assert_equal(np.datetime64(\n '1970-01-01T00:00:02.1234567890123456').dtype,\n np.dtype('M8[as]'))\n assert_equal(np.datetime64(\n '1970-01-01T00:00:02.12345678901234567').dtype,\n np.dtype('M8[as]'))\n assert_equal(np.datetime64(\n '1970-01-01T00:00:02.123456789012345678').dtype,\n np.dtype('M8[as]'))\n\n # Python date object\n assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,\n np.dtype('M8[D]'))\n\n # Python datetime object\n assert_equal(np.datetime64(\n datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,\n np.dtype('M8[us]'))\n\n # 'today' special value\n assert_equal(np.datetime64('today').dtype,\n np.dtype('M8[D]'))\n\n # 'now' special value\n assert_equal(np.datetime64('now').dtype,\n np.dtype('M8[s]'))\n\n def test_datetime_nat_casting(self):\n a = np.array('NaT', dtype='M8[D]')\n b = np.datetime64('NaT', '[D]')\n\n # Arrays\n assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))\n assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))\n assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))\n assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))\n assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))\n\n # Scalars -> Scalars\n assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))\n assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))\n assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))\n assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))\n assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))\n\n # Arrays -> Scalars\n assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))\n assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))\n assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))\n assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))\n assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))\n\n # NaN -> NaT\n nan = np.array([np.nan] * 8)\n fnan = nan.astype('f')\n lnan = nan.astype('g')\n cnan = nan.astype('D')\n cfnan = nan.astype('F')\n clnan = nan.astype('G')\n\n nat = np.array([np.datetime64('NaT')] * 8)\n assert_equal(nan.astype('M8[ns]'), nat)\n assert_equal(fnan.astype('M8[ns]'), nat)\n assert_equal(lnan.astype('M8[ns]'), nat)\n assert_equal(cnan.astype('M8[ns]'), nat)\n assert_equal(cfnan.astype('M8[ns]'), nat)\n assert_equal(clnan.astype('M8[ns]'), nat)\n\n nat = np.array([np.timedelta64('NaT')] * 8)\n assert_equal(nan.astype('timedelta64[ns]'), nat)\n assert_equal(fnan.astype('timedelta64[ns]'), nat)\n assert_equal(lnan.astype('timedelta64[ns]'), nat)\n assert_equal(cnan.astype('timedelta64[ns]'), nat)\n assert_equal(cfnan.astype('timedelta64[ns]'), nat)\n assert_equal(clnan.astype('timedelta64[ns]'), nat)\n\n def test_days_creation(self):\n assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),\n (1600-1970)*365 - (1972-1600)/4 + 3 - 365)\n assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),\n (1600-1970)*365 - (1972-1600)/4 + 3)\n assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),\n (1600-1970)*365 - (1972-1600)/4 + 3 + 366)\n assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),\n (1900-1970)*365 - (1970-1900)//4)\n assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),\n (1900-1970)*365 - (1970-1900)//4 + 365)\n assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)\n assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)\n assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)\n assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)\n assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)\n assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)\n assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)\n assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)\n assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),\n (2000 - 1970)*365 + (2000 - 1972)//4)\n assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),\n (2000 - 1970)*365 + (2000 - 1972)//4 + 366)\n assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),\n (2400 - 1970)*365 + (2400 - 1972)//4 - 3)\n assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),\n (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)\n\n assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),\n (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)\n assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),\n (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)\n assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),\n (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)\n assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),\n (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)\n assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),\n (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)\n\n def test_days_to_pydate(self):\n assert_equal(np.array('1599', dtype='M8[D]').astype('O'),\n datetime.date(1599, 1, 1))\n assert_equal(np.array('1600', dtype='M8[D]').astype('O'),\n datetime.date(1600, 1, 1))\n assert_equal(np.array('1601', dtype='M8[D]').astype('O'),\n datetime.date(1601, 1, 1))\n assert_equal(np.array('1900', dtype='M8[D]').astype('O'),\n datetime.date(1900, 1, 1))\n assert_equal(np.array('1901', dtype='M8[D]').astype('O'),\n datetime.date(1901, 1, 1))\n assert_equal(np.array('2000', dtype='M8[D]').astype('O'),\n datetime.date(2000, 1, 1))\n assert_equal(np.array('2001', dtype='M8[D]').astype('O'),\n datetime.date(2001, 1, 1))\n assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),\n datetime.date(1600, 2, 29))\n assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),\n datetime.date(1600, 3, 1))\n assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),\n datetime.date(2001, 3, 22))\n\n def test_dtype_comparison(self):\n assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))\n assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))\n assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))\n assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))\n\n def test_pydatetime_creation(self):\n a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')\n assert_equal(a[0], a[1])\n a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')\n assert_equal(a[0], a[1])\n a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')\n assert_equal(a[0], a[1])\n # Will fail if the date changes during the exact right moment\n a = np.array(['today', datetime.date.today()], dtype='M8[D]')\n assert_equal(a[0], a[1])\n # datetime.datetime.now() returns local time, not UTC\n #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')\n #assert_equal(a[0], a[1])\n\n # we can give a datetime.date time units\n assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),\n np.array(np.datetime64('1960-03-12T00:00:00')))\n\n def test_datetime_string_conversion(self):\n a = ['2011-03-16', '1920-01-01', '2013-05-19']\n str_a = np.array(a, dtype='S')\n uni_a = np.array(a, dtype='U')\n dt_a = np.array(a, dtype='M')\n\n # String to datetime\n assert_equal(dt_a, str_a.astype('M'))\n assert_equal(dt_a.dtype, str_a.astype('M').dtype)\n dt_b = np.empty_like(dt_a)\n dt_b[...] = str_a\n assert_equal(dt_a, dt_b)\n\n # Datetime to string\n assert_equal(str_a, dt_a.astype('S0'))\n str_b = np.empty_like(str_a)\n str_b[...] = dt_a\n assert_equal(str_a, str_b)\n\n # Unicode to datetime\n assert_equal(dt_a, uni_a.astype('M'))\n assert_equal(dt_a.dtype, uni_a.astype('M').dtype)\n dt_b = np.empty_like(dt_a)\n dt_b[...] = uni_a\n assert_equal(dt_a, dt_b)\n\n # Datetime to unicode\n assert_equal(uni_a, dt_a.astype('U'))\n uni_b = np.empty_like(uni_a)\n uni_b[...] = dt_a\n assert_equal(uni_a, uni_b)\n\n # Datetime to long string - gh-9712\n assert_equal(str_a, dt_a.astype((np.string_, 128)))\n str_b = np.empty(str_a.shape, dtype=(np.string_, 128))\n str_b[...] = dt_a\n assert_equal(str_a, str_b)\n\n def test_datetime_array_str(self):\n a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')\n assert_equal(str(a), \"['2011-03-16' '1920-01-01' '2013-05-19']\")\n\n a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')\n assert_equal(np.array2string(a, separator=', ',\n formatter={'datetime': lambda x:\n \"'%s'\" % np.datetime_as_string(x, timezone='UTC')}),\n \"['2011-03-16T13:55Z', '1920-01-01T03:12Z']\")\n\n # Check that one NaT doesn't corrupt subsequent entries\n a = np.array(['2010', 'NaT', '2030']).astype('M')\n assert_equal(str(a), \"['2010' 'NaT' '2030']\")\n\n def test_timedelta_array_str(self):\n a = np.array([-1, 0, 100], dtype='m')\n assert_equal(str(a), \"[ -1 0 100]\")\n a = np.array(['NaT', 'NaT'], dtype='m')\n assert_equal(str(a), \"['NaT' 'NaT']\")\n # Check right-alignment with NaTs\n a = np.array([-1, 'NaT', 0], dtype='m')\n assert_equal(str(a), \"[ -1 'NaT' 0]\")\n a = np.array([-1, 'NaT', 1234567], dtype='m')\n assert_equal(str(a), \"[ -1 'NaT' 1234567]\")\n\n # Test with other byteorder:\n a = np.array([-1, 'NaT', 1234567], dtype='>m')\n assert_equal(str(a), \"[ -1 'NaT' 1234567]\")\n a = np.array([-1, 'NaT', 1234567], dtype='<m')\n assert_equal(str(a), \"[ -1 'NaT' 1234567]\")\n\n def test_pickle(self):\n # Check that pickle roundtripping works\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n dt = np.dtype('M8[7D]')\n assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)\n dt = np.dtype('M8[W]')\n assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)\n scalar = np.datetime64('2016-01-01T00:00:00.000000000')\n assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),\n scalar)\n delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')\n assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),\n delta)\n\n # Check that loading pickles from 1.6 works\n pkl = b\"cnumpy\\ndtype\\np0\\n(S'M8'\\np1\\nI0\\nI1\\ntp2\\nRp3\\n\" + \\\n b\"(I4\\nS'<'\\np4\\nNNNI-1\\nI-1\\nI0\\n((dp5\\n(S'D'\\np6\\n\" + \\\n b\"I7\\nI1\\nI1\\ntp7\\ntp8\\ntp9\\nb.\"\n assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))\n pkl = b\"cnumpy\\ndtype\\np0\\n(S'M8'\\np1\\nI0\\nI1\\ntp2\\nRp3\\n\" + \\\n b\"(I4\\nS'<'\\np4\\nNNNI-1\\nI-1\\nI0\\n((dp5\\n(S'W'\\np6\\n\" + \\\n b\"I1\\nI1\\nI1\\ntp7\\ntp8\\ntp9\\nb.\"\n assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))\n pkl = b\"cnumpy\\ndtype\\np0\\n(S'M8'\\np1\\nI0\\nI1\\ntp2\\nRp3\\n\" + \\\n b\"(I4\\nS'>'\\np4\\nNNNI-1\\nI-1\\nI0\\n((dp5\\n(S'us'\\np6\\n\" + \\\n b\"I1\\nI1\\nI1\\ntp7\\ntp8\\ntp9\\nb.\"\n assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))\n\n def test_setstate(self):\n \"Verify that datetime dtype __setstate__ can handle bad arguments\"\n dt = np.dtype('>M8[us]')\n assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))\n assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])\n assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))\n assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])\n\n def test_dtype_promotion(self):\n # datetime <op> datetime computes the metadata gcd\n # timedelta <op> timedelta computes the metadata gcd\n for mM in ['m', 'M']:\n assert_equal(\n np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),\n np.dtype(mM+'8[2Y]'))\n assert_equal(\n np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),\n np.dtype(mM+'8[3Y]'))\n assert_equal(\n np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),\n np.dtype(mM+'8[2M]'))\n assert_equal(\n np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),\n np.dtype(mM+'8[1D]'))\n assert_equal(\n np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),\n np.dtype(mM+'8[s]'))\n assert_equal(\n np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),\n np.dtype(mM+'8[7s]'))\n # timedelta <op> timedelta raises when there is no reasonable gcd\n assert_raises(TypeError, np.promote_types,\n np.dtype('m8[Y]'), np.dtype('m8[D]'))\n assert_raises(TypeError, np.promote_types,\n np.dtype('m8[M]'), np.dtype('m8[W]'))\n # timedelta and float cannot be safely cast with each other\n assert_raises(TypeError, np.promote_types, \"float32\", \"m8\")\n assert_raises(TypeError, np.promote_types, \"m8\", \"float32\")\n assert_raises(TypeError, np.promote_types, \"uint64\", \"m8\")\n assert_raises(TypeError, np.promote_types, \"m8\", \"uint64\")\n\n # timedelta <op> timedelta may overflow with big unit ranges\n assert_raises(OverflowError, np.promote_types,\n np.dtype('m8[W]'), np.dtype('m8[fs]'))\n assert_raises(OverflowError, np.promote_types,\n np.dtype('m8[s]'), np.dtype('m8[as]'))\n\n def test_cast_overflow(self):\n # gh-4486\n def cast():\n numpy.datetime64(\"1971-01-01 00:00:00.000000000000000\").astype(\"<M8[D]\")\n assert_raises(OverflowError, cast)\n\n def cast2():\n numpy.datetime64(\"2014\").astype(\"<M8[fs]\")\n assert_raises(OverflowError, cast2)\n\n def test_pyobject_roundtrip(self):\n # All datetime types should be able to roundtrip through object\n a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,\n -1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],\n dtype=np.int64)\n # With date units\n for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:\n b = a.copy().view(dtype=unit)\n b[0] = '-0001-01-01'\n b[1] = '-0001-12-31'\n b[2] = '0000-01-01'\n b[3] = '0001-01-01'\n b[4] = '1969-12-31'\n b[5] = '1970-01-01'\n b[6] = '9999-12-31'\n b[7] = '10000-01-01'\n b[8] = 'NaT'\n\n assert_equal(b.astype(object).astype(unit), b,\n \"Error roundtripping unit %s\" % unit)\n # With time units\n for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',\n 'M8[300as]', 'M8[20us]']:\n b = a.copy().view(dtype=unit)\n b[0] = '-0001-01-01T00'\n b[1] = '-0001-12-31T00'\n b[2] = '0000-01-01T00'\n b[3] = '0001-01-01T00'\n b[4] = '1969-12-31T23:59:59.999999'\n b[5] = '1970-01-01T00'\n b[6] = '9999-12-31T23:59:59.999999'\n b[7] = '10000-01-01T00'\n b[8] = 'NaT'\n\n assert_equal(b.astype(object).astype(unit), b,\n \"Error roundtripping unit %s\" % unit)\n\n def test_month_truncation(self):\n # Make sure that months are truncating correctly\n assert_equal(np.array('1945-03-01', dtype='M8[M]'),\n np.array('1945-03-31', dtype='M8[M]'))\n assert_equal(np.array('1969-11-01', dtype='M8[M]'),\n np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))\n assert_equal(np.array('1969-12-01', dtype='M8[M]'),\n np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))\n assert_equal(np.array('1970-01-01', dtype='M8[M]'),\n np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))\n assert_equal(np.array('1980-02-01', dtype='M8[M]'),\n np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))\n\n def test_different_unit_comparison(self):\n # Check some years with date units\n for unit1 in ['Y', 'M', 'D']:\n dt1 = np.dtype('M8[%s]' % unit1)\n for unit2 in ['Y', 'M', 'D']:\n dt2 = np.dtype('M8[%s]' % unit2)\n assert_equal(np.array('1945', dtype=dt1),\n np.array('1945', dtype=dt2))\n assert_equal(np.array('1970', dtype=dt1),\n np.array('1970', dtype=dt2))\n assert_equal(np.array('9999', dtype=dt1),\n np.array('9999', dtype=dt2))\n assert_equal(np.array('10000', dtype=dt1),\n np.array('10000-01-01', dtype=dt2))\n assert_equal(np.datetime64('1945', unit1),\n np.datetime64('1945', unit2))\n assert_equal(np.datetime64('1970', unit1),\n np.datetime64('1970', unit2))\n assert_equal(np.datetime64('9999', unit1),\n np.datetime64('9999', unit2))\n assert_equal(np.datetime64('10000', unit1),\n np.datetime64('10000-01-01', unit2))\n # Check some datetimes with time units\n for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:\n dt1 = np.dtype('M8[%s]' % unit1)\n for unit2 in ['h', 'm', 's', 'ms', 'us']:\n dt2 = np.dtype('M8[%s]' % unit2)\n assert_equal(np.array('1945-03-12T18', dtype=dt1),\n np.array('1945-03-12T18', dtype=dt2))\n assert_equal(np.array('1970-03-12T18', dtype=dt1),\n np.array('1970-03-12T18', dtype=dt2))\n assert_equal(np.array('9999-03-12T18', dtype=dt1),\n np.array('9999-03-12T18', dtype=dt2))\n assert_equal(np.array('10000-01-01T00', dtype=dt1),\n np.array('10000-01-01T00', dtype=dt2))\n assert_equal(np.datetime64('1945-03-12T18', unit1),\n np.datetime64('1945-03-12T18', unit2))\n assert_equal(np.datetime64('1970-03-12T18', unit1),\n np.datetime64('1970-03-12T18', unit2))\n assert_equal(np.datetime64('9999-03-12T18', unit1),\n np.datetime64('9999-03-12T18', unit2))\n assert_equal(np.datetime64('10000-01-01T00', unit1),\n np.datetime64('10000-01-01T00', unit2))\n # Check some days with units that won't overflow\n for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:\n dt1 = np.dtype('M8[%s]' % unit1)\n for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:\n dt2 = np.dtype('M8[%s]' % unit2)\n assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),\n np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),\n casting='unsafe'))\n assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),\n np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),\n casting='unsafe'))\n\n # Shouldn't be able to compare datetime and timedelta\n # TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by\n # default is needed to properly catch this kind of thing...\n a = np.array('2012-12-21', dtype='M8[D]')\n b = np.array(3, dtype='m8[D]')\n #assert_raises(TypeError, np.less, a, b)\n assert_raises(TypeError, np.less, a, b, casting='same_kind')\n\n def test_datetime_like(self):\n a = np.array([3], dtype='m8[4D]')\n b = np.array(['2012-12-21'], dtype='M8[D]')\n\n assert_equal(np.ones_like(a).dtype, a.dtype)\n assert_equal(np.zeros_like(a).dtype, a.dtype)\n assert_equal(np.empty_like(a).dtype, a.dtype)\n assert_equal(np.ones_like(b).dtype, b.dtype)\n assert_equal(np.zeros_like(b).dtype, b.dtype)\n assert_equal(np.empty_like(b).dtype, b.dtype)\n\n def test_datetime_unary(self):\n for tda, tdb, tdzero, tdone, tdmone in \\\n [\n # One-dimensional arrays\n (np.array([3], dtype='m8[D]'),\n np.array([-3], dtype='m8[D]'),\n np.array([0], dtype='m8[D]'),\n np.array([1], dtype='m8[D]'),\n np.array([-1], dtype='m8[D]')),\n # NumPy scalars\n (np.timedelta64(3, '[D]'),\n np.timedelta64(-3, '[D]'),\n np.timedelta64(0, '[D]'),\n np.timedelta64(1, '[D]'),\n np.timedelta64(-1, '[D]'))]:\n # negative ufunc\n assert_equal(-tdb, tda)\n assert_equal((-tdb).dtype, tda.dtype)\n assert_equal(np.negative(tdb), tda)\n assert_equal(np.negative(tdb).dtype, tda.dtype)\n\n # positive ufunc\n assert_equal(np.positive(tda), tda)\n assert_equal(np.positive(tda).dtype, tda.dtype)\n assert_equal(np.positive(tdb), tdb)\n assert_equal(np.positive(tdb).dtype, tdb.dtype)\n\n # absolute ufunc\n assert_equal(np.absolute(tdb), tda)\n assert_equal(np.absolute(tdb).dtype, tda.dtype)\n\n # sign ufunc\n assert_equal(np.sign(tda), tdone)\n assert_equal(np.sign(tdb), tdmone)\n assert_equal(np.sign(tdzero), tdzero)\n assert_equal(np.sign(tda).dtype, tda.dtype)\n\n # The ufuncs always produce native-endian results\n assert_\n\n def test_datetime_add(self):\n for dta, dtb, dtc, dtnat, tda, tdb, tdc in \\\n [\n # One-dimensional arrays\n (np.array(['2012-12-21'], dtype='M8[D]'),\n np.array(['2012-12-24'], dtype='M8[D]'),\n np.array(['2012-12-21T11'], dtype='M8[h]'),\n np.array(['NaT'], dtype='M8[D]'),\n np.array([3], dtype='m8[D]'),\n np.array([11], dtype='m8[h]'),\n np.array([3*24 + 11], dtype='m8[h]')),\n # NumPy scalars\n (np.datetime64('2012-12-21', '[D]'),\n np.datetime64('2012-12-24', '[D]'),\n np.datetime64('2012-12-21T11', '[h]'),\n np.datetime64('NaT', '[D]'),\n np.timedelta64(3, '[D]'),\n np.timedelta64(11, '[h]'),\n np.timedelta64(3*24 + 11, '[h]'))]:\n # m8 + m8\n assert_equal(tda + tdb, tdc)\n assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))\n # m8 + bool\n assert_equal(tdb + True, tdb + 1)\n assert_equal((tdb + True).dtype, np.dtype('m8[h]'))\n # m8 + int\n assert_equal(tdb + 3*24, tdc)\n assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))\n # bool + m8\n assert_equal(False + tdb, tdb)\n assert_equal((False + tdb).dtype, np.dtype('m8[h]'))\n # int + m8\n assert_equal(3*24 + tdb, tdc)\n assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))\n # M8 + bool\n assert_equal(dta + True, dta + 1)\n assert_equal(dtnat + True, dtnat)\n assert_equal((dta + True).dtype, np.dtype('M8[D]'))\n # M8 + int\n assert_equal(dta + 3, dtb)\n assert_equal(dtnat + 3, dtnat)\n assert_equal((dta + 3).dtype, np.dtype('M8[D]'))\n # bool + M8\n assert_equal(False + dta, dta)\n assert_equal(False + dtnat, dtnat)\n assert_equal((False + dta).dtype, np.dtype('M8[D]'))\n # int + M8\n assert_equal(3 + dta, dtb)\n assert_equal(3 + dtnat, dtnat)\n assert_equal((3 + dta).dtype, np.dtype('M8[D]'))\n # M8 + m8\n assert_equal(dta + tda, dtb)\n assert_equal(dtnat + tda, dtnat)\n assert_equal((dta + tda).dtype, np.dtype('M8[D]'))\n # m8 + M8\n assert_equal(tda + dta, dtb)\n assert_equal(tda + dtnat, dtnat)\n assert_equal((tda + dta).dtype, np.dtype('M8[D]'))\n\n # In M8 + m8, the result goes to higher precision\n assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)\n assert_equal(np.add(dta, tdb, casting='unsafe').dtype,\n np.dtype('M8[h]'))\n assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)\n assert_equal(np.add(tdb, dta, casting='unsafe').dtype,\n np.dtype('M8[h]'))\n\n # M8 + M8\n assert_raises(TypeError, np.add, dta, dtb)\n\n def test_datetime_subtract(self):\n for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \\\n [\n # One-dimensional arrays\n (np.array(['2012-12-21'], dtype='M8[D]'),\n np.array(['2012-12-24'], dtype='M8[D]'),\n np.array(['1940-12-24'], dtype='M8[D]'),\n np.array(['1940-12-24T00'], dtype='M8[h]'),\n np.array(['1940-12-23T13'], dtype='M8[h]'),\n np.array(['NaT'], dtype='M8[D]'),\n np.array([3], dtype='m8[D]'),\n np.array([11], dtype='m8[h]'),\n np.array([3*24 - 11], dtype='m8[h]')),\n # NumPy scalars\n (np.datetime64('2012-12-21', '[D]'),\n np.datetime64('2012-12-24', '[D]'),\n np.datetime64('1940-12-24', '[D]'),\n np.datetime64('1940-12-24T00', '[h]'),\n np.datetime64('1940-12-23T13', '[h]'),\n np.datetime64('NaT', '[D]'),\n np.timedelta64(3, '[D]'),\n np.timedelta64(11, '[h]'),\n np.timedelta64(3*24 - 11, '[h]'))]:\n # m8 - m8\n assert_equal(tda - tdb, tdc)\n assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))\n assert_equal(tdb - tda, -tdc)\n assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))\n # m8 - bool\n assert_equal(tdc - True, tdc - 1)\n assert_equal((tdc - True).dtype, np.dtype('m8[h]'))\n # m8 - int\n assert_equal(tdc - 3*24, -tdb)\n assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))\n # int - m8\n assert_equal(False - tdb, -tdb)\n assert_equal((False - tdb).dtype, np.dtype('m8[h]'))\n # int - m8\n assert_equal(3*24 - tdb, tdc)\n assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))\n # M8 - bool\n assert_equal(dtb - True, dtb - 1)\n assert_equal(dtnat - True, dtnat)\n assert_equal((dtb - True).dtype, np.dtype('M8[D]'))\n # M8 - int\n assert_equal(dtb - 3, dta)\n assert_equal(dtnat - 3, dtnat)\n assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))\n # M8 - m8\n assert_equal(dtb - tda, dta)\n assert_equal(dtnat - tda, dtnat)\n assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))\n\n # In M8 - m8, the result goes to higher precision\n assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)\n assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,\n np.dtype('M8[h]'))\n\n # M8 - M8 with different goes to higher precision\n assert_equal(np.subtract(dtc, dtd, casting='unsafe'),\n np.timedelta64(0, 'h'))\n assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,\n np.dtype('m8[h]'))\n assert_equal(np.subtract(dtd, dtc, casting='unsafe'),\n np.timedelta64(0, 'h'))\n assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,\n np.dtype('m8[h]'))\n\n # m8 - M8\n assert_raises(TypeError, np.subtract, tda, dta)\n # bool - M8\n assert_raises(TypeError, np.subtract, False, dta)\n # int - M8\n assert_raises(TypeError, np.subtract, 3, dta)\n\n def test_datetime_multiply(self):\n for dta, tda, tdb, tdc in \\\n [\n # One-dimensional arrays\n (np.array(['2012-12-21'], dtype='M8[D]'),\n np.array([6], dtype='m8[h]'),\n np.array([9], dtype='m8[h]'),\n np.array([12], dtype='m8[h]')),\n # NumPy scalars\n (np.datetime64('2012-12-21', '[D]'),\n np.timedelta64(6, '[h]'),\n np.timedelta64(9, '[h]'),\n np.timedelta64(12, '[h]'))]:\n # m8 * int\n assert_equal(tda * 2, tdc)\n assert_equal((tda * 2).dtype, np.dtype('m8[h]'))\n # int * m8\n assert_equal(2 * tda, tdc)\n assert_equal((2 * tda).dtype, np.dtype('m8[h]'))\n # m8 * float\n assert_equal(tda * 1.5, tdb)\n assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))\n # float * m8\n assert_equal(1.5 * tda, tdb)\n assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))\n\n # m8 * m8\n assert_raises(TypeError, np.multiply, tda, tdb)\n # m8 * M8\n assert_raises(TypeError, np.multiply, dta, tda)\n # M8 * m8\n assert_raises(TypeError, np.multiply, tda, dta)\n # M8 * int\n assert_raises(TypeError, np.multiply, dta, 2)\n # int * M8\n assert_raises(TypeError, np.multiply, 2, dta)\n # M8 * float\n assert_raises(TypeError, np.multiply, dta, 1.5)\n # float * M8\n assert_raises(TypeError, np.multiply, 1.5, dta)\n\n # NaTs\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in multiply\")\n nat = np.timedelta64('NaT')\n def check(a, b, res):\n assert_equal(a * b, res)\n assert_equal(b * a, res)\n for tp in (int, float):\n check(nat, tp(2), nat)\n check(nat, tp(0), nat)\n for f in (float('inf'), float('nan')):\n check(np.timedelta64(1), f, nat)\n check(np.timedelta64(0), f, nat)\n check(nat, f, nat)\n\n @pytest.mark.parametrize(\"op1, op2, exp\", [\n # m8 same units round down\n (np.timedelta64(7, 's'),\n np.timedelta64(4, 's'),\n 1),\n # m8 same units round down with negative\n (np.timedelta64(7, 's'),\n np.timedelta64(-4, 's'),\n -2),\n # m8 same units negative no round down\n (np.timedelta64(8, 's'),\n np.timedelta64(-4, 's'),\n -2),\n # m8 different units\n (np.timedelta64(1, 'm'),\n np.timedelta64(31, 's'),\n 1),\n # m8 generic units\n (np.timedelta64(1890),\n np.timedelta64(31),\n 60),\n # Y // M works\n (np.timedelta64(2, 'Y'),\n np.timedelta64('13', 'M'),\n 1),\n # handle 1D arrays\n (np.array([1, 2, 3], dtype='m8'),\n np.array([2], dtype='m8'),\n np.array([0, 1, 1], dtype=np.int64)),\n ])\n def test_timedelta_floor_divide(self, op1, op2, exp):\n assert_equal(op1 // op2, exp)\n\n @pytest.mark.parametrize(\"op1, op2\", [\n # div by 0\n (np.timedelta64(10, 'us'),\n np.timedelta64(0, 'us')),\n # div with NaT\n (np.timedelta64('NaT'),\n np.timedelta64(50, 'us')),\n # special case for int64 min\n # in integer floor division\n (np.timedelta64(np.iinfo(np.int64).min),\n np.timedelta64(-1)),\n ])\n def test_timedelta_floor_div_warnings(self, op1, op2):\n with assert_warns(RuntimeWarning):\n actual = op1 // op2\n assert_equal(actual, 0)\n assert_equal(actual.dtype, np.int64)\n\n @pytest.mark.parametrize(\"val1, val2\", [\n # the smallest integer that can't be represented\n # exactly in a double should be preserved if we avoid\n # casting to double in floordiv operation\n (9007199254740993, 1),\n # stress the alternate floordiv code path where\n # operand signs don't match and remainder isn't 0\n (9007199254740999, -2),\n ])\n def test_timedelta_floor_div_precision(self, val1, val2):\n op1 = np.timedelta64(val1)\n op2 = np.timedelta64(val2)\n actual = op1 // op2\n # Python reference integer floor\n expected = val1 // val2\n assert_equal(actual, expected)\n\n @pytest.mark.parametrize(\"val1, val2\", [\n # years and months sometimes can't be unambiguously\n # divided for floor division operation\n (np.timedelta64(7, 'Y'),\n np.timedelta64(3, 's')),\n (np.timedelta64(7, 'M'),\n np.timedelta64(1, 'D')),\n ])\n def test_timedelta_floor_div_error(self, val1, val2):\n with assert_raises_regex(TypeError, \"common metadata divisor\"):\n val1 // val2\n\n @pytest.mark.parametrize(\"op1, op2\", [\n # reuse the test cases from floordiv\n (np.timedelta64(7, 's'),\n np.timedelta64(4, 's')),\n # m8 same units round down with negative\n (np.timedelta64(7, 's'),\n np.timedelta64(-4, 's')),\n # m8 same units negative no round down\n (np.timedelta64(8, 's'),\n np.timedelta64(-4, 's')),\n # m8 different units\n (np.timedelta64(1, 'm'),\n np.timedelta64(31, 's')),\n # m8 generic units\n (np.timedelta64(1890),\n np.timedelta64(31)),\n # Y // M works\n (np.timedelta64(2, 'Y'),\n np.timedelta64('13', 'M')),\n # handle 1D arrays\n (np.array([1, 2, 3], dtype='m8'),\n np.array([2], dtype='m8')),\n ])\n def test_timedelta_divmod(self, op1, op2):\n expected = (op1 // op2, op1 % op2)\n assert_equal(divmod(op1, op2), expected)\n\n @pytest.mark.parametrize(\"op1, op2\", [\n # reuse cases from floordiv\n # div by 0\n (np.timedelta64(10, 'us'),\n np.timedelta64(0, 'us')),\n # div with NaT\n (np.timedelta64('NaT'),\n np.timedelta64(50, 'us')),\n # special case for int64 min\n # in integer floor division\n (np.timedelta64(np.iinfo(np.int64).min),\n np.timedelta64(-1)),\n ])\n def test_timedelta_divmod_warnings(self, op1, op2):\n with assert_warns(RuntimeWarning):\n expected = (op1 // op2, op1 % op2)\n with assert_warns(RuntimeWarning):\n actual = divmod(op1, op2)\n assert_equal(actual, expected)\n\n def test_datetime_divide(self):\n for dta, tda, tdb, tdc, tdd in \\\n [\n # One-dimensional arrays\n (np.array(['2012-12-21'], dtype='M8[D]'),\n np.array([6], dtype='m8[h]'),\n np.array([9], dtype='m8[h]'),\n np.array([12], dtype='m8[h]'),\n np.array([6], dtype='m8[m]')),\n # NumPy scalars\n (np.datetime64('2012-12-21', '[D]'),\n np.timedelta64(6, '[h]'),\n np.timedelta64(9, '[h]'),\n np.timedelta64(12, '[h]'),\n np.timedelta64(6, '[m]'))]:\n # m8 / int\n assert_equal(tdc / 2, tda)\n assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))\n # m8 / float\n assert_equal(tda / 0.5, tdc)\n assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))\n # m8 / m8\n assert_equal(tda / tdb, 6.0 / 9.0)\n assert_equal(np.divide(tda, tdb), 6.0 / 9.0)\n assert_equal(np.true_divide(tda, tdb), 6.0 / 9.0)\n assert_equal(tdb / tda, 9.0 / 6.0)\n assert_equal((tda / tdb).dtype, np.dtype('f8'))\n assert_equal(tda / tdd, 60.0)\n assert_equal(tdd / tda, 1.0 / 60.0)\n\n # int / m8\n assert_raises(TypeError, np.divide, 2, tdb)\n # float / m8\n assert_raises(TypeError, np.divide, 0.5, tdb)\n # m8 / M8\n assert_raises(TypeError, np.divide, dta, tda)\n # M8 / m8\n assert_raises(TypeError, np.divide, tda, dta)\n # M8 / int\n assert_raises(TypeError, np.divide, dta, 2)\n # int / M8\n assert_raises(TypeError, np.divide, 2, dta)\n # M8 / float\n assert_raises(TypeError, np.divide, dta, 1.5)\n # float / M8\n assert_raises(TypeError, np.divide, 1.5, dta)\n\n # NaTs\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, r\".*encountered in true\\_divide\")\n nat = np.timedelta64('NaT')\n for tp in (int, float):\n assert_equal(np.timedelta64(1) / tp(0), nat)\n assert_equal(np.timedelta64(0) / tp(0), nat)\n assert_equal(nat / tp(0), nat)\n assert_equal(nat / tp(2), nat)\n # Division by inf\n assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))\n assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))\n assert_equal(nat / float('inf'), nat)\n # Division by nan\n assert_equal(np.timedelta64(1) / float('nan'), nat)\n assert_equal(np.timedelta64(0) / float('nan'), nat)\n assert_equal(nat / float('nan'), nat)\n\n def test_datetime_compare(self):\n # Test all the comparison operators\n a = np.datetime64('2000-03-12T18:00:00.000000')\n b = np.array(['2000-03-12T18:00:00.000000',\n '2000-03-12T17:59:59.999999',\n '2000-03-12T18:00:00.000001',\n '1970-01-11T12:00:00.909090',\n '2016-01-11T12:00:00.909090'],\n dtype='datetime64[us]')\n assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])\n assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])\n assert_equal(np.less(a, b), [0, 0, 1, 0, 1])\n assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])\n assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])\n assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])\n\n def test_datetime_compare_nat(self):\n dt_nat = np.datetime64('NaT', 'D')\n dt_other = np.datetime64('2000-01-01')\n td_nat = np.timedelta64('NaT', 'h')\n td_other = np.timedelta64(1, 'h')\n\n for op in [np.equal, np.less, np.less_equal,\n np.greater, np.greater_equal]:\n assert_(not op(dt_nat, dt_nat))\n assert_(not op(dt_nat, dt_other))\n assert_(not op(dt_other, dt_nat))\n\n assert_(not op(td_nat, td_nat))\n assert_(not op(td_nat, td_other))\n assert_(not op(td_other, td_nat))\n\n assert_(np.not_equal(dt_nat, dt_nat))\n assert_(np.not_equal(dt_nat, dt_other))\n assert_(np.not_equal(dt_other, dt_nat))\n\n assert_(np.not_equal(td_nat, td_nat))\n assert_(np.not_equal(td_nat, td_other))\n assert_(np.not_equal(td_other, td_nat))\n\n def test_datetime_minmax(self):\n # The metadata of the result should become the GCD\n # of the operand metadata\n a = np.array('1999-03-12T13', dtype='M8[2m]')\n b = np.array('1999-03-12T12', dtype='M8[s]')\n assert_equal(np.minimum(a, b), b)\n assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))\n assert_equal(np.fmin(a, b), b)\n assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))\n assert_equal(np.maximum(a, b), a)\n assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))\n assert_equal(np.fmax(a, b), a)\n assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))\n # Viewed as integers, the comparison is opposite because\n # of the units chosen\n assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))\n\n # Interaction with NaT\n a = np.array('1999-03-12T13', dtype='M8[2m]')\n dtnat = np.array('NaT', dtype='M8[h]')\n assert_equal(np.minimum(a, dtnat), dtnat)\n assert_equal(np.minimum(dtnat, a), dtnat)\n assert_equal(np.maximum(a, dtnat), dtnat)\n assert_equal(np.maximum(dtnat, a), dtnat)\n assert_equal(np.fmin(dtnat, a), a)\n assert_equal(np.fmin(a, dtnat), a)\n assert_equal(np.fmax(dtnat, a), a)\n assert_equal(np.fmax(a, dtnat), a)\n\n # Also do timedelta\n a = np.array(3, dtype='m8[h]')\n b = np.array(3*3600 - 3, dtype='m8[s]')\n assert_equal(np.minimum(a, b), b)\n assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))\n assert_equal(np.fmin(a, b), b)\n assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))\n assert_equal(np.maximum(a, b), a)\n assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))\n assert_equal(np.fmax(a, b), a)\n assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))\n # Viewed as integers, the comparison is opposite because\n # of the units chosen\n assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))\n\n # should raise between datetime and timedelta\n #\n # TODO: Allowing unsafe casting by\n # default in ufuncs strikes again... :(\n a = np.array(3, dtype='m8[h]')\n b = np.array('1999-03-12T12', dtype='M8[s]')\n #assert_raises(TypeError, np.minimum, a, b)\n #assert_raises(TypeError, np.maximum, a, b)\n #assert_raises(TypeError, np.fmin, a, b)\n #assert_raises(TypeError, np.fmax, a, b)\n assert_raises(TypeError, np.minimum, a, b, casting='same_kind')\n assert_raises(TypeError, np.maximum, a, b, casting='same_kind')\n assert_raises(TypeError, np.fmin, a, b, casting='same_kind')\n assert_raises(TypeError, np.fmax, a, b, casting='same_kind')\n\n def test_hours(self):\n t = np.ones(3, dtype='M8[s]')\n t[0] = 60*60*24 + 60*60*10\n assert_(t[0].item().hour == 10)\n\n def test_divisor_conversion_year(self):\n assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))\n assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))\n assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))\n\n def test_divisor_conversion_month(self):\n assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))\n assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))\n assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))\n\n def test_divisor_conversion_week(self):\n assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))\n assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))\n assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))\n\n def test_divisor_conversion_day(self):\n assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))\n assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))\n assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))\n\n def test_divisor_conversion_hour(self):\n assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))\n assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))\n\n def test_divisor_conversion_minute(self):\n assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))\n assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))\n\n def test_divisor_conversion_second(self):\n assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))\n assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))\n\n def test_divisor_conversion_fs(self):\n assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))\n assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))\n\n def test_divisor_conversion_as(self):\n assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))\n\n def test_string_parser_variants(self):\n # Allow space instead of 'T' between date and time\n assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),\n np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))\n # Allow positive years\n assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),\n np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))\n # Allow negative years\n assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),\n np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))\n # UTC specifier\n with assert_warns(DeprecationWarning):\n assert_equal(\n np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),\n np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))\n with assert_warns(DeprecationWarning):\n assert_equal(\n np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),\n np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))\n # Time zone offset\n with assert_warns(DeprecationWarning):\n assert_equal(\n np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),\n np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))\n with assert_warns(DeprecationWarning):\n assert_equal(\n np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),\n np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))\n with assert_warns(DeprecationWarning):\n assert_equal(\n np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),\n np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))\n with assert_warns(DeprecationWarning):\n assert_equal(np.datetime64('1977-03-02T12:30-0230'),\n np.datetime64('1977-03-02T15:00'))\n\n def test_string_parser_error_check(self):\n # Arbitrary bad string\n assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))\n # Character after year must be '-'\n assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))\n # Cannot have trailing '-'\n assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))\n # Month must be in range [1,12]\n assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))\n # Month must have two digits\n assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))\n # 'Mor' is not a valid month\n assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))\n # Cannot have trailing '-'\n assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))\n # Day must be in range [1,len(month)]\n assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))\n # Cannot have trailing characters\n assert_raises(ValueError, np.array, ['1980-02-03%'],\n np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-02-03 q'],\n np.dtype('M8[us]'))\n\n # Hours must be in range [0, 23]\n assert_raises(ValueError, np.array, ['1980-02-03 25'],\n np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-02-03T25'],\n np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-02-03 24:01'],\n np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-02-03T24:01'],\n np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-02-03 -1'],\n np.dtype('M8[us]'))\n # No trailing ':'\n assert_raises(ValueError, np.array, ['1980-02-03 01:'],\n np.dtype('M8[us]'))\n # Minutes must be in range [0, 59]\n assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],\n np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-02-03 01:60'],\n np.dtype('M8[us]'))\n # No trailing ':'\n assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],\n np.dtype('M8[us]'))\n # Seconds must be in range [0, 59]\n assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],\n np.dtype('M8[us]'))\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],\n np.dtype('M8[us]'))\n # Timezone offset must within a reasonable range\n with assert_warns(DeprecationWarning):\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],\n np.dtype('M8[us]'))\n with assert_warns(DeprecationWarning):\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],\n np.dtype('M8[us]'))\n with assert_warns(DeprecationWarning):\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],\n np.dtype('M8[us]'))\n with assert_warns(DeprecationWarning):\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],\n np.dtype('M8[us]'))\n with assert_warns(DeprecationWarning):\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],\n np.dtype('M8[us]'))\n\n def test_creation_overflow(self):\n date = '1980-03-23 20:00:00'\n timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)\n for unit in ['ms', 'us', 'ns']:\n timesteps *= 1000\n x = np.array([date], dtype='datetime64[%s]' % unit)\n\n assert_equal(timesteps, x[0].astype(np.int64),\n err_msg='Datetime conversion error for unit %s' % unit)\n\n assert_equal(x[0].astype(np.int64), 322689600000000000)\n\n # gh-13062\n with pytest.raises(OverflowError):\n np.datetime64(2**64, 'D')\n with pytest.raises(OverflowError):\n np.timedelta64(2**64, 'D')\n\n def test_datetime_as_string(self):\n # Check all the units with default string conversion\n date = '1959-10-13'\n datetime = '1959-10-13T12:34:56.789012345678901234'\n\n assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),\n '1959')\n assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),\n '1959-10')\n assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),\n '1959-10-13')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),\n '1959-10-13T12')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),\n '1959-10-13T12:34')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),\n '1959-10-13T12:34:56')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),\n '1959-10-13T12:34:56.789')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'us')),\n '1959-10-13T12:34:56.789012')\n\n datetime = '1969-12-31T23:34:56.789012345678901234'\n\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),\n '1969-12-31T23:34:56.789012345')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),\n '1969-12-31T23:34:56.789012345678')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),\n '1969-12-31T23:34:56.789012345678901')\n\n datetime = '1969-12-31T23:59:57.789012345678901234'\n\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),\n datetime)\n datetime = '1970-01-01T00:34:56.789012345678901234'\n\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),\n '1970-01-01T00:34:56.789012345')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),\n '1970-01-01T00:34:56.789012345678')\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),\n '1970-01-01T00:34:56.789012345678901')\n\n datetime = '1970-01-01T00:00:05.789012345678901234'\n\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),\n datetime)\n\n # String conversion with the unit= parameter\n a = np.datetime64('2032-07-18T12:23:34.123456', 'us')\n assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),\n '2032')\n assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),\n '2032-07')\n assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),\n '2032-07-18')\n assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),\n '2032-07-18')\n assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')\n assert_equal(np.datetime_as_string(a, unit='m'),\n '2032-07-18T12:23')\n assert_equal(np.datetime_as_string(a, unit='s'),\n '2032-07-18T12:23:34')\n assert_equal(np.datetime_as_string(a, unit='ms'),\n '2032-07-18T12:23:34.123')\n assert_equal(np.datetime_as_string(a, unit='us'),\n '2032-07-18T12:23:34.123456')\n assert_equal(np.datetime_as_string(a, unit='ns'),\n '2032-07-18T12:23:34.123456000')\n assert_equal(np.datetime_as_string(a, unit='ps'),\n '2032-07-18T12:23:34.123456000000')\n assert_equal(np.datetime_as_string(a, unit='fs'),\n '2032-07-18T12:23:34.123456000000000')\n assert_equal(np.datetime_as_string(a, unit='as'),\n '2032-07-18T12:23:34.123456000000000000')\n\n # unit='auto' parameter\n assert_equal(np.datetime_as_string(\n np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),\n '2032-07-18T12:23:34.123456')\n assert_equal(np.datetime_as_string(\n np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),\n '2032-07-18T12:23:34.120')\n assert_equal(np.datetime_as_string(\n np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),\n '2032-07-18T12:23:34')\n assert_equal(np.datetime_as_string(\n np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),\n '2032-07-18T12:23')\n # 'auto' doesn't split up hour and minute\n assert_equal(np.datetime_as_string(\n np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),\n '2032-07-18T12:00')\n assert_equal(np.datetime_as_string(\n np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),\n '2032-07-18')\n # 'auto' doesn't split up the date\n assert_equal(np.datetime_as_string(\n np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),\n '2032-07-01')\n assert_equal(np.datetime_as_string(\n np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),\n '2032-01-01')\n\n @pytest.mark.skipif(not _has_pytz, reason=\"The pytz module is not available.\")\n def test_datetime_as_string_timezone(self):\n # timezone='local' vs 'UTC'\n a = np.datetime64('2010-03-15T06:30', 'm')\n assert_equal(np.datetime_as_string(a),\n '2010-03-15T06:30')\n assert_equal(np.datetime_as_string(a, timezone='naive'),\n '2010-03-15T06:30')\n assert_equal(np.datetime_as_string(a, timezone='UTC'),\n '2010-03-15T06:30Z')\n assert_(np.datetime_as_string(a, timezone='local') !=\n '2010-03-15T06:30')\n\n b = np.datetime64('2010-02-15T06:30', 'm')\n\n assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),\n '2010-03-15T01:30-0500')\n assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),\n '2010-03-15T02:30-0400')\n assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),\n '2010-03-14T23:30-0700')\n\n assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),\n '2010-02-15T00:30-0600')\n assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),\n '2010-02-15T01:30-0500')\n assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),\n '2010-02-14T22:30-0800')\n\n # Dates to strings with a timezone attached is disabled by default\n assert_raises(TypeError, np.datetime_as_string, a, unit='D',\n timezone=tz('US/Pacific'))\n # Check that we can print out the date in the specified time zone\n assert_equal(np.datetime_as_string(a, unit='D',\n timezone=tz('US/Pacific'), casting='unsafe'),\n '2010-03-14')\n assert_equal(np.datetime_as_string(b, unit='D',\n timezone=tz('US/Central'), casting='unsafe'),\n '2010-02-15')\n\n def test_datetime_arange(self):\n # With two datetimes provided as strings\n a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')\n assert_equal(a.dtype, np.dtype('M8[D]'))\n assert_equal(a,\n np.array(['2010-01-05', '2010-01-06', '2010-01-07',\n '2010-01-08', '2010-01-09'], dtype='M8[D]'))\n\n a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')\n assert_equal(a.dtype, np.dtype('M8[D]'))\n assert_equal(a,\n np.array(['1950-02-10', '1950-02-09', '1950-02-08',\n '1950-02-07'], dtype='M8[D]'))\n\n # Unit should be detected as months here\n a = np.arange('1969-05', '1970-05', 2, dtype='M8')\n assert_equal(a.dtype, np.dtype('M8[M]'))\n assert_equal(a,\n np.datetime64('1969-05') + np.arange(12, step=2))\n\n # datetime, integer|timedelta works as well\n # produces arange (start, start + stop) in this case\n a = np.arange('1969', 18, 3, dtype='M8')\n assert_equal(a.dtype, np.dtype('M8[Y]'))\n assert_equal(a,\n np.datetime64('1969') + np.arange(18, step=3))\n a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')\n assert_equal(a.dtype, np.dtype('M8[D]'))\n assert_equal(a,\n np.datetime64('1969-12-19') + np.arange(22, step=2))\n\n # Step of 0 is disallowed\n assert_raises(ValueError, np.arange, np.datetime64('today'),\n np.datetime64('today') + 3, 0)\n # Promotion across nonlinear unit boundaries is disallowed\n assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),\n np.timedelta64(5, 'M'))\n assert_raises(TypeError, np.arange,\n np.datetime64('2012-02-03T14', 's'),\n np.timedelta64(5, 'Y'))\n\n def test_datetime_arange_no_dtype(self):\n d = np.array('2010-01-04', dtype=\"M8[D]\")\n assert_equal(np.arange(d, d + 1), d)\n assert_raises(ValueError, np.arange, d)\n\n def test_timedelta_arange(self):\n a = np.arange(3, 10, dtype='m8')\n assert_equal(a.dtype, np.dtype('m8'))\n assert_equal(a, np.timedelta64(0) + np.arange(3, 10))\n\n a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')\n assert_equal(a.dtype, np.dtype('m8[s]'))\n assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))\n\n # Step of 0 is disallowed\n assert_raises(ValueError, np.arange, np.timedelta64(0),\n np.timedelta64(5), 0)\n # Promotion across nonlinear unit boundaries is disallowed\n assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),\n np.timedelta64(5, 'M'))\n assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),\n np.timedelta64(5, 'D'))\n\n @pytest.mark.parametrize(\"val1, val2, expected\", [\n # case from gh-12092\n (np.timedelta64(7, 's'),\n np.timedelta64(3, 's'),\n np.timedelta64(1, 's')),\n # negative value cases\n (np.timedelta64(3, 's'),\n np.timedelta64(-2, 's'),\n np.timedelta64(-1, 's')),\n (np.timedelta64(-3, 's'),\n np.timedelta64(2, 's'),\n np.timedelta64(1, 's')),\n # larger value cases\n (np.timedelta64(17, 's'),\n np.timedelta64(22, 's'),\n np.timedelta64(17, 's')),\n (np.timedelta64(22, 's'),\n np.timedelta64(17, 's'),\n np.timedelta64(5, 's')),\n # different units\n (np.timedelta64(1, 'm'),\n np.timedelta64(57, 's'),\n np.timedelta64(3, 's')),\n (np.timedelta64(1, 'us'),\n np.timedelta64(727, 'ns'),\n np.timedelta64(273, 'ns')),\n # NaT is propagated\n (np.timedelta64('NaT'),\n np.timedelta64(50, 'ns'),\n np.timedelta64('NaT')),\n # Y % M works\n (np.timedelta64(2, 'Y'),\n np.timedelta64(22, 'M'),\n np.timedelta64(2, 'M')),\n ])\n def test_timedelta_modulus(self, val1, val2, expected):\n assert_equal(val1 % val2, expected)\n\n @pytest.mark.parametrize(\"val1, val2\", [\n # years and months sometimes can't be unambiguously\n # divided for modulus operation\n (np.timedelta64(7, 'Y'),\n np.timedelta64(3, 's')),\n (np.timedelta64(7, 'M'),\n np.timedelta64(1, 'D')),\n ])\n def test_timedelta_modulus_error(self, val1, val2):\n with assert_raises_regex(TypeError, \"common metadata divisor\"):\n val1 % val2\n\n def test_timedelta_modulus_div_by_zero(self):\n with assert_warns(RuntimeWarning):\n actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')\n assert_equal(actual, np.timedelta64('NaT'))\n\n @pytest.mark.parametrize(\"val1, val2\", [\n # cases where one operand is not\n # timedelta64\n (np.timedelta64(7, 'Y'),\n 15,),\n (7.5,\n np.timedelta64(1, 'D')),\n ])\n def test_timedelta_modulus_type_resolution(self, val1, val2):\n # NOTE: some of the operations may be supported\n # in the future\n with assert_raises_regex(TypeError,\n \"'remainder' cannot use operands with types\"):\n val1 % val2\n\n def test_timedelta_arange_no_dtype(self):\n d = np.array(5, dtype=\"m8[D]\")\n assert_equal(np.arange(d, d + 1), d)\n assert_equal(np.arange(d), np.arange(0, d))\n\n def test_datetime_maximum_reduce(self):\n a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')\n assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))\n assert_equal(np.maximum.reduce(a),\n np.datetime64('2010-01-02'))\n\n a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')\n assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))\n assert_equal(np.maximum.reduce(a),\n np.timedelta64(7, 's'))\n\n def test_datetime_busday_offset(self):\n # First Monday in June\n assert_equal(\n np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),\n np.datetime64('2011-06-06'))\n # Last Monday in June\n assert_equal(\n np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),\n np.datetime64('2011-06-27'))\n assert_equal(\n np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),\n np.datetime64('2011-06-27'))\n\n # Default M-F business days, different roll modes\n assert_equal(np.busday_offset('2010-08', 0, roll='backward'),\n np.datetime64('2010-07-30'))\n assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),\n np.datetime64('2010-07-30'))\n assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),\n np.datetime64('2010-08-02'))\n assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),\n np.datetime64('2010-08-02'))\n assert_equal(np.busday_offset('2010-08', 0, roll='forward'),\n np.datetime64('2010-08-02'))\n assert_equal(np.busday_offset('2010-08', 0, roll='following'),\n np.datetime64('2010-08-02'))\n assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),\n np.datetime64('2010-11-01'))\n assert_equal(\n np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),\n np.datetime64('2010-10-29'))\n assert_equal(\n np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),\n np.datetime64('2010-10-29'))\n assert_equal(\n np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),\n np.datetime64('2010-10-18'))\n assert_equal(\n np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),\n np.datetime64('2010-10-15'))\n # roll='raise' by default\n assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)\n\n # Bigger offset values\n assert_equal(np.busday_offset('2006-02-01', 25),\n np.datetime64('2006-03-08'))\n assert_equal(np.busday_offset('2006-03-08', -25),\n np.datetime64('2006-02-01'))\n assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),\n np.datetime64('2007-04-07'))\n assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),\n np.datetime64('2007-02-25'))\n\n # NaT values when roll is not raise\n assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),\n np.datetime64('NaT'))\n assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),\n np.datetime64('NaT'))\n assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),\n np.datetime64('NaT'))\n\n def test_datetime_busdaycalendar(self):\n # Check that it removes NaT, duplicates, and weekends\n # and sorts the result.\n bdd = np.busdaycalendar(\n holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',\n '2011-12-26', '2011-05-30', '2011-01-17'])\n assert_equal(bdd.holidays,\n np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))\n # Default M-F weekmask\n assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))\n\n # Check string weekmask with varying whitespace.\n bdd = np.busdaycalendar(weekmask=\"Sun TueWed Thu\\tFri\")\n assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))\n\n # Check length 7 0/1 string\n bdd = np.busdaycalendar(weekmask=\"0011001\")\n assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))\n\n # Check length 7 string weekmask.\n bdd = np.busdaycalendar(weekmask=\"Mon Tue\")\n assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))\n\n # All-zeros weekmask should raise\n assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])\n # weekday names must be correct case\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"satsun\")\n # All-zeros weekmask should raise\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"\")\n # Invalid weekday name codes should raise\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"Mon Tue We\")\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"Max\")\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"Monday Tue\")\n\n def test_datetime_busday_holidays_offset(self):\n # With exactly one holiday\n assert_equal(\n np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),\n np.datetime64('2011-11-14'))\n assert_equal(\n np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),\n np.datetime64('2011-11-14'))\n assert_equal(\n np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),\n np.datetime64('2011-11-18'))\n assert_equal(\n np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),\n np.datetime64('2011-11-10'))\n assert_equal(\n np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),\n np.datetime64('2011-11-10'))\n assert_equal(\n np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),\n np.datetime64('2011-11-04'))\n # With the holiday appearing twice\n assert_equal(\n np.busday_offset('2011-11-10', 1,\n holidays=['2011-11-11', '2011-11-11']),\n np.datetime64('2011-11-14'))\n assert_equal(\n np.busday_offset('2011-11-14', -1,\n holidays=['2011-11-11', '2011-11-11']),\n np.datetime64('2011-11-10'))\n # With a NaT holiday\n assert_equal(\n np.busday_offset('2011-11-10', 1,\n holidays=['2011-11-11', 'NaT']),\n np.datetime64('2011-11-14'))\n assert_equal(\n np.busday_offset('2011-11-14', -1,\n holidays=['NaT', '2011-11-11']),\n np.datetime64('2011-11-10'))\n # With another holiday after\n assert_equal(\n np.busday_offset('2011-11-10', 1,\n holidays=['2011-11-11', '2011-11-24']),\n np.datetime64('2011-11-14'))\n assert_equal(\n np.busday_offset('2011-11-14', -1,\n holidays=['2011-11-11', '2011-11-24']),\n np.datetime64('2011-11-10'))\n # With another holiday before\n assert_equal(\n np.busday_offset('2011-11-10', 1,\n holidays=['2011-10-10', '2011-11-11']),\n np.datetime64('2011-11-14'))\n assert_equal(\n np.busday_offset('2011-11-14', -1,\n holidays=['2011-10-10', '2011-11-11']),\n np.datetime64('2011-11-10'))\n # With another holiday before and after\n assert_equal(\n np.busday_offset('2011-11-10', 1,\n holidays=['2011-10-10', '2011-11-11', '2011-11-24']),\n np.datetime64('2011-11-14'))\n assert_equal(\n np.busday_offset('2011-11-14', -1,\n holidays=['2011-10-10', '2011-11-11', '2011-11-24']),\n np.datetime64('2011-11-10'))\n\n # A bigger forward jump across more than one week/holiday\n holidays = ['2011-10-10', '2011-11-11', '2011-11-24',\n '2011-12-25', '2011-05-30', '2011-02-21',\n '2011-12-26', '2012-01-02']\n bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)\n assert_equal(\n np.busday_offset('2011-10-03', 4, holidays=holidays),\n np.busday_offset('2011-10-03', 4))\n assert_equal(\n np.busday_offset('2011-10-03', 5, holidays=holidays),\n np.busday_offset('2011-10-03', 5 + 1))\n assert_equal(\n np.busday_offset('2011-10-03', 27, holidays=holidays),\n np.busday_offset('2011-10-03', 27 + 1))\n assert_equal(\n np.busday_offset('2011-10-03', 28, holidays=holidays),\n np.busday_offset('2011-10-03', 28 + 2))\n assert_equal(\n np.busday_offset('2011-10-03', 35, holidays=holidays),\n np.busday_offset('2011-10-03', 35 + 2))\n assert_equal(\n np.busday_offset('2011-10-03', 36, holidays=holidays),\n np.busday_offset('2011-10-03', 36 + 3))\n assert_equal(\n np.busday_offset('2011-10-03', 56, holidays=holidays),\n np.busday_offset('2011-10-03', 56 + 3))\n assert_equal(\n np.busday_offset('2011-10-03', 57, holidays=holidays),\n np.busday_offset('2011-10-03', 57 + 4))\n assert_equal(\n np.busday_offset('2011-10-03', 60, holidays=holidays),\n np.busday_offset('2011-10-03', 60 + 4))\n assert_equal(\n np.busday_offset('2011-10-03', 61, holidays=holidays),\n np.busday_offset('2011-10-03', 61 + 5))\n assert_equal(\n np.busday_offset('2011-10-03', 61, busdaycal=bdd),\n np.busday_offset('2011-10-03', 61 + 5))\n # A bigger backward jump across more than one week/holiday\n assert_equal(\n np.busday_offset('2012-01-03', -1, holidays=holidays),\n np.busday_offset('2012-01-03', -1 - 1))\n assert_equal(\n np.busday_offset('2012-01-03', -4, holidays=holidays),\n np.busday_offset('2012-01-03', -4 - 1))\n assert_equal(\n np.busday_offset('2012-01-03', -5, holidays=holidays),\n np.busday_offset('2012-01-03', -5 - 2))\n assert_equal(\n np.busday_offset('2012-01-03', -25, holidays=holidays),\n np.busday_offset('2012-01-03', -25 - 2))\n assert_equal(\n np.busday_offset('2012-01-03', -26, holidays=holidays),\n np.busday_offset('2012-01-03', -26 - 3))\n assert_equal(\n np.busday_offset('2012-01-03', -33, holidays=holidays),\n np.busday_offset('2012-01-03', -33 - 3))\n assert_equal(\n np.busday_offset('2012-01-03', -34, holidays=holidays),\n np.busday_offset('2012-01-03', -34 - 4))\n assert_equal(\n np.busday_offset('2012-01-03', -56, holidays=holidays),\n np.busday_offset('2012-01-03', -56 - 4))\n assert_equal(\n np.busday_offset('2012-01-03', -57, holidays=holidays),\n np.busday_offset('2012-01-03', -57 - 5))\n assert_equal(\n np.busday_offset('2012-01-03', -57, busdaycal=bdd),\n np.busday_offset('2012-01-03', -57 - 5))\n\n # Can't supply both a weekmask/holidays and busdaycal\n assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,\n weekmask='1111100', busdaycal=bdd)\n assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,\n holidays=holidays, busdaycal=bdd)\n\n # Roll with the holidays\n assert_equal(\n np.busday_offset('2011-12-25', 0,\n roll='forward', holidays=holidays),\n np.datetime64('2011-12-27'))\n assert_equal(\n np.busday_offset('2011-12-26', 0,\n roll='forward', holidays=holidays),\n np.datetime64('2011-12-27'))\n assert_equal(\n np.busday_offset('2011-12-26', 0,\n roll='backward', holidays=holidays),\n np.datetime64('2011-12-23'))\n assert_equal(\n np.busday_offset('2012-02-27', 0,\n roll='modifiedfollowing',\n holidays=['2012-02-27', '2012-02-26', '2012-02-28',\n '2012-03-01', '2012-02-29']),\n np.datetime64('2012-02-24'))\n assert_equal(\n np.busday_offset('2012-03-06', 0,\n roll='modifiedpreceding',\n holidays=['2012-03-02', '2012-03-03', '2012-03-01',\n '2012-03-05', '2012-03-07', '2012-03-06']),\n np.datetime64('2012-03-08'))\n\n def test_datetime_busday_holidays_count(self):\n holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',\n '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',\n '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',\n '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']\n bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)\n\n # Validate against busday_offset broadcast against\n # a range of offsets\n dates = np.busday_offset('2011-01-01', np.arange(366),\n roll='forward', busdaycal=bdd)\n assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),\n np.arange(366))\n # Returns negative value when reversed\n assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),\n -np.arange(366))\n\n dates = np.busday_offset('2011-12-31', -np.arange(366),\n roll='forward', busdaycal=bdd)\n assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),\n np.arange(366))\n # Returns negative value when reversed\n assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),\n -np.arange(366))\n\n # Can't supply both a weekmask/holidays and busdaycal\n assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',\n weekmask='1111100', busdaycal=bdd)\n assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',\n holidays=holidays, busdaycal=bdd)\n\n # Number of Mondays in March 2011\n assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)\n # Returns negative value when reversed\n assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)\n\n def test_datetime_is_busday(self):\n holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',\n '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',\n '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',\n '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',\n 'NaT']\n bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)\n\n # Weekend/weekday tests\n assert_equal(np.is_busday('2011-01-01'), False)\n assert_equal(np.is_busday('2011-01-02'), False)\n assert_equal(np.is_busday('2011-01-03'), True)\n\n # All the holidays are not business days\n assert_equal(np.is_busday(holidays, busdaycal=bdd),\n np.zeros(len(holidays), dtype='?'))\n\n def test_datetime_y2038(self):\n # Test parsing on either side of the Y2038 boundary\n a = np.datetime64('2038-01-19T03:14:07')\n assert_equal(a.view(np.int64), 2**31 - 1)\n a = np.datetime64('2038-01-19T03:14:08')\n assert_equal(a.view(np.int64), 2**31)\n\n # Test parsing on either side of the Y2038 boundary with\n # a manually specified timezone offset\n with assert_warns(DeprecationWarning):\n a = np.datetime64('2038-01-19T04:14:07+0100')\n assert_equal(a.view(np.int64), 2**31 - 1)\n with assert_warns(DeprecationWarning):\n a = np.datetime64('2038-01-19T04:14:08+0100')\n assert_equal(a.view(np.int64), 2**31)\n\n # Test parsing a date after Y2038\n a = np.datetime64('2038-01-20T13:21:14')\n assert_equal(str(a), '2038-01-20T13:21:14')\n\n def test_isnat(self):\n assert_(np.isnat(np.datetime64('NaT', 'ms')))\n assert_(np.isnat(np.datetime64('NaT', 'ns')))\n assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))\n\n assert_(np.isnat(np.timedelta64('NaT', \"ms\")))\n assert_(not np.isnat(np.timedelta64(34, \"ms\")))\n\n res = np.array([False, False, True])\n for unit in ['Y', 'M', 'W', 'D',\n 'h', 'm', 's', 'ms', 'us',\n 'ns', 'ps', 'fs', 'as']:\n arr = np.array([123, -321, \"NaT\"], dtype='<datetime64[%s]' % unit)\n assert_equal(np.isnat(arr), res)\n arr = np.array([123, -321, \"NaT\"], dtype='>datetime64[%s]' % unit)\n assert_equal(np.isnat(arr), res)\n arr = np.array([123, -321, \"NaT\"], dtype='<timedelta64[%s]' % unit)\n assert_equal(np.isnat(arr), res)\n arr = np.array([123, -321, \"NaT\"], dtype='>timedelta64[%s]' % unit)\n assert_equal(np.isnat(arr), res)\n\n def test_isnat_error(self):\n # Test that only datetime dtype arrays are accepted\n for t in np.typecodes[\"All\"]:\n if t in np.typecodes[\"Datetime\"]:\n continue\n assert_raises(TypeError, np.isnat, np.zeros(10, t))\n\n def test_isfinite_scalar(self):\n assert_(not np.isfinite(np.datetime64('NaT', 'ms')))\n assert_(not np.isfinite(np.datetime64('NaT', 'ns')))\n assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))\n\n assert_(not np.isfinite(np.timedelta64('NaT', \"ms\")))\n assert_(np.isfinite(np.timedelta64(34, \"ms\")))\n\n @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'])\n @pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',\n '<timedelta64[%s]', '>timedelta64[%s]'])\n def test_isfinite_isinf_isnan_units(self, unit, dstr):\n '''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes\n '''\n arr_val = [123, -321, \"NaT\"]\n arr = np.array(arr_val, dtype= dstr % unit)\n pos = np.array([True, True, False])\n neg = np.array([False, False, True])\n false = np.array([False, False, False])\n assert_equal(np.isfinite(arr), pos)\n assert_equal(np.isinf(arr), false)\n assert_equal(np.isnan(arr), neg)\n\n def test_assert_equal(self):\n assert_raises(AssertionError, assert_equal,\n np.datetime64('nat'), np.timedelta64('nat'))\n\n def test_corecursive_input(self):\n # construct a co-recursive list\n a, b = [], []\n a.append(b)\n b.append(a)\n obj_arr = np.array([None])\n obj_arr[0] = a\n\n # At some point this caused a stack overflow (gh-11154). Now raises\n # ValueError since the nested list cannot be converted to a datetime.\n assert_raises(ValueError, obj_arr.astype, 'M8')\n assert_raises(ValueError, obj_arr.astype, 'm8')\n\n @pytest.mark.parametrize(\"shape\", [(), (1,)])\n def test_discovery_from_object_array(self, shape):\n arr = np.array(\"2020-10-10\", dtype=object).reshape(shape)\n res = np.array(\"2020-10-10\", dtype=\"M8\").reshape(shape)\n assert res.dtype == np.dtype(\"M8[D]\")\n assert_equal(arr.astype(\"M8\"), res)\n arr[...] = np.bytes_(\"2020-10-10\") # try a numpy string type\n assert_equal(arr.astype(\"M8\"), res)\n arr = arr.astype(\"S\")\n assert_equal(arr.astype(\"S\").astype(\"M8\"), res)\n\n @pytest.mark.parametrize(\"time_unit\", [\n \"Y\", \"M\", \"W\", \"D\", \"h\", \"m\", \"s\", \"ms\", \"us\", \"ns\", \"ps\", \"fs\", \"as\",\n # compound units\n \"10D\", \"2M\",\n ])\n def test_limit_symmetry(self, time_unit):\n \"\"\"\n Dates should have symmetric limits around the unix epoch at +/-np.int64\n \"\"\"\n epoch = np.datetime64(0, time_unit)\n latest = np.datetime64(np.iinfo(np.int64).max, time_unit)\n earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)\n\n # above should not have overflowed\n assert earliest < epoch < latest\n\n @pytest.mark.parametrize(\"time_unit\", [\n \"Y\", \"M\",\n pytest.param(\"W\", marks=pytest.mark.xfail(reason=\"gh-13197\")),\n \"D\", \"h\", \"m\",\n \"s\", \"ms\", \"us\", \"ns\", \"ps\", \"fs\", \"as\",\n pytest.param(\"10D\", marks=pytest.mark.xfail(reason=\"similar to gh-13197\")),\n ])\n @pytest.mark.parametrize(\"sign\", [-1, 1])\n def test_limit_str_roundtrip(self, time_unit, sign):\n \"\"\"\n Limits should roundtrip when converted to strings.\n\n This tests the conversion to and from npy_datetimestruct.\n \"\"\"\n # TODO: add absolute (gold standard) time span limit strings\n limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)\n\n # Convert to string and back. Explicit unit needed since the day and\n # week reprs are not distinguishable.\n limit_via_str = np.datetime64(str(limit), time_unit)\n assert limit_via_str == limit\n\n\nclass TestDateTimeData:\n\n def test_basic(self):\n a = np.array(['1980-03-23'], dtype=np.datetime64)\n assert_equal(np.datetime_data(a.dtype), ('D', 1))\n\n def test_bytes(self):\n # byte units are converted to unicode\n dt = np.datetime64('2000', (b'ms', 5))\n assert np.datetime_data(dt.dtype) == ('ms', 5)\n\n dt = np.datetime64('2000', b'5ms')\n assert np.datetime_data(dt.dtype) == ('ms', 5)\n\n def test_non_ascii(self):\n # μs is normalized to μ\n dt = np.datetime64('2000', ('μs', 5))\n assert np.datetime_data(dt.dtype) == ('us', 5)\n\n dt = np.datetime64('2000', '5μs')\n assert np.datetime_data(dt.dtype) == ('us', 5)\n"
] | [
[
"numpy.ones",
"numpy.busday_count",
"numpy.subtract",
"numpy.testing.assert_equal",
"numpy.dtype",
"numpy.less",
"numpy.argsort",
"numpy.compat.pickle.loads",
"numpy.ones_like",
"numpy.greater_equal",
"numpy.int64",
"numpy.testing.assert_warns",
"numpy.add",
"numpy.datetime64",
"numpy.isnat",
"numpy.isfinite",
"numpy.timedelta64",
"numpy.datetime_data",
"numpy.bool_",
"numpy.empty_like",
"numpy.bytes_",
"numpy.absolute",
"numpy.isnan",
"numpy.fmax",
"numpy.negative",
"numpy.can_cast",
"numpy.minimum",
"numpy.positive",
"numpy.fmin",
"numpy.datetime_as_string",
"numpy.zeros",
"numpy.compat.pickle.dumps",
"numpy.equal",
"numpy.greater",
"numpy.arange",
"numpy.busday_offset",
"numpy.maximum.reduce",
"numpy.busdaycalendar",
"numpy.maximum",
"numpy.testing.assert_raises",
"numpy.zeros_like",
"numpy.is_busday",
"numpy.empty",
"numpy.sign",
"numpy.divide",
"numpy.true_divide",
"numpy.not_equal",
"numpy.isinf",
"numpy.iinfo",
"numpy.less_equal",
"numpy.array",
"numpy.testing.suppress_warnings",
"numpy.testing.assert_raises_regex"
]
] |
Andruxin52rus/openvino | [
"769bb7709597c14debdaa356dd60c5a78bdfa97e"
] | [
"model-optimizer/mo/front/kaldi/extractors/normalize_component_ext.py"
] | [
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport numpy as np\n\nfrom extensions.ops.normalize import NormalizeOp\nfrom mo.front.caffe.extractors.utils import embed_input\nfrom mo.front.extractor import FrontExtractorOp\nfrom mo.front.kaldi.loader.utils import collect_until_token, read_binary_bool_token, read_binary_integer32_token, \\\n read_binary_float_token\nfrom mo.utils.error import Error\n\n\nclass NormalizeComponentFrontExtractor(FrontExtractorOp):\n op = 'normalizecomponent'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n pb = node.parameters\n try:\n collect_until_token(pb, b'<Dim>')\n except Error:\n try:\n pb.seek(0)\n collect_until_token(pb, b'<InputDim>')\n except Error:\n raise Error(\"Neither <Dim> nor <InputDim> were found\")\n in_dim = read_binary_integer32_token(pb)\n\n try:\n collect_until_token(pb, b'<TargetRms>')\n target_rms = read_binary_float_token(pb)\n except Error:\n # model does not contain TargetRms\n target_rms = 1.0\n\n try:\n collect_until_token(pb, b'<AddLogStddev>')\n add_log = read_binary_bool_token(pb)\n except Error:\n # model does not contain AddLogStddev\n add_log = False\n\n if add_log is not False:\n raise Error(\"AddLogStddev True in Normalize component is not supported\")\n\n scale = target_rms * np.sqrt(in_dim)\n\n attrs = {\n 'eps': 0.00000001,\n 'across_spatial': 0,\n 'channel_shared': 1,\n 'in_dim': in_dim,\n }\n embed_input(attrs, 1, 'weights', [scale])\n\n NormalizeOp.update_node_stat(node, attrs)\n return cls.enabled\n"
] | [
[
"numpy.sqrt"
]
] |
cindychu/LargeScaleComputing_S20 | [
"913b0155f47914c258b503df677067a510dd23f5"
] | [
"Labs/Lab 1 Midway RCC and mpi4py/mpi_rand_walk.py"
] | [
"\nfrom mpi4py import MPI\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\ndef sim_rand_walks_parallel(n_runs):\n # Get rank of process and overall size of communicator:\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n # Start time:\n t0 = time.time()\n\n # Evenly distribute number of simulation runs across processes\n N = int(n_runs/size)\n\n # Simulate N random walks and specify as a NumPy Array\n r_walks = []\n for i in range(N):\n steps = np.random.normal(loc=0, scale=1, size=100)\n steps[0] = 0\n r_walks.append(100 + np.cumsum(steps))\n r_walks_array = np.array(r_walks)\n\n # Gather all simulation arrays to buffer of expected size/dtype on rank 0\n r_walks_all = None\n if rank == 0:\n r_walks_all = np.empty([N*size, 100], dtype='float')\n comm.Gather(sendbuf = r_walks_array, recvbuf = r_walks_all, root=0)\n\n # Print/plot simulation results on rank 0\n if rank == 0:\n # Calculate time elapsed after computing mean and std\n average_finish = np.mean(r_walks_all[:,-1])\n std_finish = np.std(r_walks_all[:,-1])\n time_elapsed = time.time() - t0\n\n # Print time elapsed + simulation results\n print(\"Simulated %d Random Walks in: %f seconds on %d MPI processes\"\n % (n_runs, time_elapsed, size))\n print(\"Average final position: %f, Standard Deviation: %f\"\n % (average_finish, std_finish))\n\n # Plot Simulations and save to file\n plt.plot(r_walks_all.transpose())\n plt.savefig(\"r_walk_nprocs%d_nruns%d.png\" % (size, n_runs))\n\n return\n\ndef main():\n sim_rand_walks_parallel(n_runs = 10000)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.cumsum",
"numpy.empty",
"matplotlib.pyplot.savefig",
"numpy.random.normal",
"numpy.array",
"numpy.std",
"numpy.mean"
]
] |
ahernandez1801/donkey_rl_mqtt | [
"02bbfc3d036220a4061b95e50780984e657aff43"
] | [
"src/train.py"
] | [
"'''\nTrain\nTrain your nerual network\nAuthor: Tawn Kramer\n'''\nfrom __future__ import print_function\nimport os\nimport sys\nimport glob\nimport time\nimport fnmatch\nimport argparse\nimport numpy as np\nfrom PIL import Image\nimport keras\nimport conf\nimport random\nimport augment\nimport models\n\n\n'''\nmatplotlib can be a pain to setup. So handle the case where it is absent. When present,\nuse it to generate a plot of training results.\n'''\ntry:\n import matplotlib\n # Force matplotlib to not use any Xwindows backend.\n matplotlib.use('Agg')\n\n import matplotlib.pyplot as plt\n do_plot = True\nexcept:\n do_plot = False\n\n\ndef shuffle(samples):\n '''\n randomly mix a list and return a new list\n '''\n ret_arr = []\n len_samples = len(samples)\n while len_samples > 0:\n iSample = random.randrange(0, len_samples)\n ret_arr.append(samples[iSample])\n del samples[iSample]\n len_samples -= 1\n return ret_arr\n\ndef parse_img_filepath(filepath):\n basename = os.path.basename(filepath)\n\n #less .jpg\n f = basename[:-4]\n f = f.split('_')\n\n steering = float(f[3])\n throttle = float(f[5])\n \n data = {'steering':steering, 'throttle':throttle }\n\n return data\n\ndef generator(samples, batch_size=32, perc_to_augment=0.5):\n '''\n Rather than keep all data in memory, we will make a function that keeps\n it's state and returns just the latest batch required via the yield command.\n \n As we load images, we can optionally augment them in some manner that doesn't\n change their underlying meaning or features. This is a combination of\n brightness, contrast, sharpness, and color PIL image filters applied with random\n settings. Optionally a shadow image may be overlayed with some random rotation and\n opacity.\n We flip each image horizontally and supply it as a another sample with the steering\n negated.\n '''\n num_samples = len(samples)\n shadows = augment.load_shadow_images('./shadows/*.png') \n \n while 1: # Loop forever so the generator never terminates\n samples = shuffle(samples)\n #divide batch_size in half, because we double each output by flipping image.\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n \n images = []\n controls = []\n for fullpath in batch_samples:\n try:\n data = parse_img_filepath(fullpath)\n \n steering = data[\"steering\"]\n throttle = data[\"throttle\"]\n\n try:\n image = Image.open(fullpath)\n except:\n image = None\n\n if image is None:\n print('failed to open', fullpath)\n continue\n\n #PIL Image as a numpy array\n image = np.array(image)\n\n if len(shadows) > 0 and random.uniform(0.0, 1.0) < perc_to_augment:\n image = augment.augment_image(image, shadows)\n\n center_angle = steering\n images.append(image)\n \n if conf.num_outputs == 2:\n controls.append([center_angle, throttle])\n elif conf.num_outputs == 1:\n controls.append([center_angle])\n else:\n print(\"expected 1 or 2 ouputs\")\n\n except:\n print(\"we threw an exception on:\", fullpath)\n yield [], []\n\n\n # final np array to submit to training\n X_train = np.array(images)\n y_train = np.array(controls)\n yield X_train, y_train\n\n\ndef get_files(filemask):\n '''\n use a filemask and search a path recursively for matches\n '''\n path, mask = os.path.split(filemask)\n matches = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, mask):\n matches.append(os.path.join(root, filename))\n return matches\n\n\ndef train_test_split(lines, test_perc):\n '''\n split a list into two parts, percentage of test used to seperate\n '''\n train = []\n test = []\n\n for line in lines:\n if random.uniform(0.0, 1.0) < test_perc:\n test.append(line)\n else:\n train.append(line)\n\n return train, test\n\ndef make_generators(inputs, limit=None, batch_size=32, aug_perc=0.0):\n '''\n load the job spec from the csv and create some generator for training\n '''\n \n #get the image/steering pairs from the csv files\n lines = get_files(inputs)\n print(\"found %d files\" % len(lines))\n\n if limit is not None:\n lines = lines[:limit]\n print(\"limiting to %d files\" % len(lines))\n \n train_samples, validation_samples = train_test_split(lines, test_perc=0.2)\n\n print(\"num train/val\", len(train_samples), len(validation_samples))\n \n # compile and train the model using the generator function\n train_generator = generator(train_samples, batch_size=batch_size, perc_to_augment=aug_perc)\n validation_generator = generator(validation_samples, batch_size=batch_size, perc_to_augment=0.0)\n \n n_train = len(train_samples)\n n_val = len(validation_samples)\n \n return train_generator, validation_generator, n_train, n_val\n\n\ndef go(model_name, epochs=50, inputs='./log/*.jpg', limit=None, aug_mult=1, aug_perc=0.0):\n\n print('working on model', model_name)\n\n '''\n modify config.json to select the model to train.\n '''\n model = models.get_nvidia_model(conf.num_outputs)\n\n '''\n display layer summary and weights info\n '''\n models.show_model_summary(model)\n\n callbacks = [\n keras.callbacks.EarlyStopping(monitor='val_loss', patience=conf.training_patience, verbose=0),\n keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=0),\n ]\n \n batch_size = conf.training_batch_size\n\n\n #Train on session images\n train_generator, validation_generator, n_train, n_val = make_generators(inputs, limit=limit, batch_size=batch_size, aug_perc=aug_perc)\n\n if n_train == 0:\n print('no training data found')\n return\n\n steps_per_epoch = n_train // batch_size\n validation_steps = n_val // batch_size\n\n print(\"steps_per_epoch\", steps_per_epoch, \"validation_steps\", validation_steps)\n\n history = model.fit_generator(train_generator, \n steps_per_epoch = steps_per_epoch,\n validation_data = validation_generator,\n validation_steps = validation_steps,\n epochs=epochs,\n verbose=1,\n callbacks=callbacks)\n \n try:\n if do_plot:\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig('loss.png')\n except:\n print(\"problems with loss graph\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='train script')\n parser.add_argument('model', type=str, help='model name')\n parser.add_argument('--epochs', type=int, default=conf.training_default_epochs, help='number of epochs')\n parser.add_argument('--inputs', default='../dataset/log/*.jpg', help='input mask to gather images')\n parser.add_argument('--limit', type=int, default=None, help='max number of images to train with')\n parser.add_argument('--aug_mult', type=int, default=conf.training_default_aug_mult, help='how many more images to augment')\n parser.add_argument('--aug_perc', type=float, default=conf.training_default_aug_percent, help='what percentage of images to augment 0 - 1')\n args = parser.parse_args()\n \n go(args.model, epochs=args.epochs, limit=args.limit, inputs=args.inputs, aug_mult=args.aug_mult, aug_perc=args.aug_perc)\n\n#python train.py mymodel_aug_90_x4_e200 --epochs=200 --aug_mult=4 --aug_perc=0.9\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
HybridRobotics/cbf | [
"d8a1b376e7e910de71df60cdf3619f68c40ab3ed"
] | [
"planning/path_generator/search_path_generator.py"
] | [
"import sys\n\nimport numpy as np\n\nfrom planning.path_generator.astar import *\n\n\ndef plot_global_map(path, obstacles):\n fig, ax = plt.subplots()\n for o in obstacles:\n patch = o.get_plot_patch()\n ax.add_patch(patch)\n ax.plot(path[:, 0], path[:, 1])\n plt.xlim([-1 * 0.15, 11 * 0.15])\n plt.ylim([0 * 0.15, 8 * 0.15])\n plt.show()\n\n\nclass AstarPathGenerator:\n def __init__(self, grid, quad, margin):\n self._global_path = None\n self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=quad)\n self._margin = margin\n\n def generate_path(self, sys, obstacles, goal_pos):\n graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)\n path = graph.a_star(sys.get_state()[:2], goal_pos)\n self._global_path = np.array([p.pos for p in path])\n print(self._global_path)\n if self._global_path == []:\n print(\"Global Path not found.\")\n sys.exit(1)\n if True:\n plot_global_map(self._global_path, obstacles)\n return self._global_path\n\n def logging(self, logger):\n logger._paths.append(self._global_path)\n\n\nclass AstarLoSPathGenerator:\n def __init__(self, grid, quad, margin):\n self._global_path = None\n self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=quad)\n self._margin = margin\n\n def generate_path(self, sys, obstacles, goal_pos):\n graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)\n path = graph.a_star(sys.get_state()[:2], goal_pos)\n path = graph.reduce_path(path)\n self._global_path = np.array([p.pos for p in path])\n print(self._global_path)\n if self._global_path == []:\n print(\"Global Path not found.\")\n sys.exit(1)\n if False:\n plot_global_map(self._global_path, obstacles)\n return self._global_path\n\n def logging(self, logger):\n logger._paths.append(self._global_path)\n\n\nclass ThetaStarPathGenerator:\n def __init__(self, grid, quad, margin):\n self._global_path = None\n self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=False)\n self._margin = margin\n\n def generate_path(self, sys, obstacles, goal_pos):\n graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)\n path = graph.theta_star(sys.get_state()[:2], goal_pos)\n self._global_path = np.array([p.pos for p in path])\n print(self._global_path)\n if self._global_path == []:\n print(\"Global Path not found.\")\n sys.exit(1)\n if True:\n plot_global_map(self._global_path, obstacles)\n return self._global_path\n\n def logging(self, logger):\n logger._paths.append(self._global_path)\n"
] | [
[
"numpy.array"
]
] |
HyperGAN/imgclsmob | [
"88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3"
] | [
"pytorch/pytorchcv/models/common.py"
] | [
"\"\"\"\n Common routines for models in PyTorch.\n\"\"\"\n\n__all__ = ['HSwish', 'get_activation_layer', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block',\n 'conv3x3_block', 'conv7x7_block', 'dwconv3x3_block', 'dwconv5x5_block', 'PreConvBlock', 'pre_conv1x1_block',\n 'pre_conv3x3_block', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'IBN', 'Identity', 'DualPathSequential',\n 'Concurrent', 'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass',\n 'MultiOutputSequential', 'Flatten']\n\nimport math\nfrom inspect import isfunction\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Swish(nn.Module):\n \"\"\"\n Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.\n \"\"\"\n def forward(self, x):\n return x * torch.sigmoid(x)\n\n\nclass HSigmoid(nn.Module):\n \"\"\"\n Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'\n https://arxiv.org/abs/1905.02244.\n \"\"\"\n def forward(self, x):\n return F.relu6(x + 3.0, inplace=True) / 6.0\n\n\nclass HSwish(nn.Module):\n \"\"\"\n H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.\n\n Parameters:\n ----------\n inplace : bool\n Whether to use inplace version of the module.\n \"\"\"\n def __init__(self, inplace=False):\n super(HSwish, self).__init__()\n self.inplace = inplace\n\n def forward(self, x):\n return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0\n\n\ndef get_activation_layer(activation):\n \"\"\"\n Create activation layer from string/function.\n\n Parameters:\n ----------\n activation : function, or str, or nn.Module\n Activation function or name of activation function.\n\n Returns\n -------\n nn.Module\n Activation layer.\n \"\"\"\n assert (activation is not None)\n if isfunction(activation):\n return activation()\n elif isinstance(activation, str):\n if activation == \"relu\":\n return nn.ReLU(inplace=True)\n elif activation == \"relu6\":\n return nn.ReLU6(inplace=True)\n elif activation == \"swish\":\n return Swish()\n elif activation == \"hswish\":\n return HSwish(inplace=True)\n else:\n raise NotImplementedError()\n else:\n assert (isinstance(activation, nn.Module))\n return activation\n\n\ndef conv1x1(in_channels,\n out_channels,\n stride=1,\n groups=1,\n bias=False):\n \"\"\"\n Convolution 1x1 layer.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n \"\"\"\n return nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n stride=stride,\n groups=groups,\n bias=bias)\n\n\ndef conv3x3(in_channels,\n out_channels,\n stride=1,\n padding=1,\n dilation=1,\n groups=1,\n bias=False):\n \"\"\"\n Convolution 3x3 layer.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 1\n Padding value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n \"\"\"\n return nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n\n\ndef depthwise_conv3x3(channels,\n stride):\n \"\"\"\n Depthwise convolution 3x3 layer.\n\n Parameters:\n ----------\n channels : int\n Number of input/output channels.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n \"\"\"\n return nn.Conv2d(\n in_channels=channels,\n out_channels=channels,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=channels,\n bias=False)\n\n\nclass ConvBlock(nn.Module):\n \"\"\"\n Standard convolution block with Batch normalization and activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n use_bn : bool, default True\n Whether to use BatchNorm layer.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation=1,\n groups=1,\n bias=False,\n use_bn=True,\n bn_eps=1e-5,\n activation=(lambda: nn.ReLU(inplace=True))):\n super(ConvBlock, self).__init__()\n self.activate = (activation is not None)\n self.use_bn = use_bn\n\n self.conv = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n if self.use_bn:\n self.bn = nn.BatchNorm2d(\n num_features=out_channels,\n eps=bn_eps)\n if self.activate:\n self.activ = get_activation_layer(activation)\n\n def forward(self, x):\n x = self.conv(x)\n if self.use_bn:\n x = self.bn(x)\n if self.activate:\n x = self.activ(x)\n return x\n\n\ndef conv1x1_block(in_channels,\n out_channels,\n stride=1,\n padding=0,\n groups=1,\n bias=False,\n bn_eps=1e-5,\n activation=(lambda: nn.ReLU(inplace=True))):\n \"\"\"\n 1x1 version of the standard convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 0\n Padding value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n \"\"\"\n return ConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n stride=stride,\n padding=padding,\n groups=groups,\n bias=bias,\n bn_eps=bn_eps,\n activation=activation)\n\n\ndef conv3x3_block(in_channels,\n out_channels,\n stride=1,\n padding=1,\n dilation=1,\n groups=1,\n bias=False,\n use_bn=True,\n bn_eps=1e-5,\n activation=(lambda: nn.ReLU(inplace=True))):\n \"\"\"\n 3x3 version of the standard convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 1\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n use_bn : bool, default True\n Whether to use BatchNorm layer.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n \"\"\"\n return ConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias,\n use_bn=use_bn,\n bn_eps=bn_eps,\n activation=activation)\n\n\ndef conv5x5_block(in_channels,\n out_channels,\n stride=1,\n padding=2,\n dilation=1,\n groups=1,\n bias=False,\n bn_eps=1e-5,\n activation=(lambda: nn.ReLU(inplace=True))):\n \"\"\"\n 5x5 version of the standard convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 2\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n \"\"\"\n return ConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=5,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias,\n bn_eps=bn_eps,\n activation=activation)\n\n\ndef conv7x7_block(in_channels,\n out_channels,\n stride=1,\n padding=3,\n bias=False,\n activation=(lambda: nn.ReLU(inplace=True))):\n \"\"\"\n 7x7 version of the standard convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 3\n Padding value for convolution layer.\n bias : bool, default False\n Whether the layer uses a bias vector.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n \"\"\"\n return ConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=7,\n stride=stride,\n padding=padding,\n bias=bias,\n activation=activation)\n\n\ndef dwconv3x3_block(in_channels,\n out_channels,\n stride=1,\n padding=1,\n dilation=1,\n bias=False,\n bn_eps=1e-5,\n activation=(lambda: nn.ReLU(inplace=True))):\n \"\"\"\n 3x3 depthwise version of the standard convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 1\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n bias : bool, default False\n Whether the layer uses a bias vector.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n \"\"\"\n return conv3x3_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=out_channels,\n bias=bias,\n bn_eps=bn_eps,\n activation=activation)\n\n\ndef dwconv5x5_block(in_channels,\n out_channels,\n stride=1,\n padding=2,\n dilation=1,\n bias=False,\n bn_eps=1e-5,\n activation=(lambda: nn.ReLU(inplace=True))):\n \"\"\"\n 5x5 depthwise version of the standard convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 2\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n bias : bool, default False\n Whether the layer uses a bias vector.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n \"\"\"\n return conv5x5_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=out_channels,\n bias=bias,\n bn_eps=bn_eps,\n activation=activation)\n\n\nclass PreConvBlock(nn.Module):\n \"\"\"\n Convolution block with Batch normalization and ReLU pre-activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n bias : bool, default False\n Whether the layer uses a bias vector.\n return_preact : bool, default False\n Whether return pre-activation. It's used by PreResNet.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation=1,\n bias=False,\n return_preact=False,\n activate=True):\n super(PreConvBlock, self).__init__()\n self.return_preact = return_preact\n self.activate = activate\n\n self.bn = nn.BatchNorm2d(num_features=in_channels)\n if self.activate:\n self.activ = nn.ReLU(inplace=True)\n self.conv = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=bias)\n\n def forward(self, x):\n x = self.bn(x)\n if self.activate:\n x = self.activ(x)\n if self.return_preact:\n x_pre_activ = x\n x = self.conv(x)\n if self.return_preact:\n return x, x_pre_activ\n else:\n return x\n\n\ndef pre_conv1x1_block(in_channels,\n out_channels,\n stride=1,\n bias=False,\n return_preact=False,\n activate=True):\n \"\"\"\n 1x1 version of the pre-activated convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n bias : bool, default False\n Whether the layer uses a bias vector.\n return_preact : bool, default False\n Whether return pre-activation.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n return PreConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n stride=stride,\n padding=0,\n bias=bias,\n return_preact=return_preact,\n activate=activate)\n\n\ndef pre_conv3x3_block(in_channels,\n out_channels,\n stride=1,\n padding=1,\n dilation=1,\n return_preact=False,\n activate=True):\n \"\"\"\n 3x3 version of the pre-activated convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 1\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n return_preact : bool, default False\n Whether return pre-activation.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n return PreConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n return_preact=return_preact,\n activate=activate)\n\n\ndef channel_shuffle(x,\n groups):\n \"\"\"\n Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'\n https://arxiv.org/abs/1707.01083.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n groups : int\n Number of groups.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n batch, channels, height, width = x.size()\n # assert (channels % groups == 0)\n channels_per_group = channels // groups\n x = x.view(batch, groups, channels_per_group, height, width)\n x = torch.transpose(x, 1, 2).contiguous()\n x = x.view(batch, channels, height, width)\n return x\n\n\nclass ChannelShuffle(nn.Module):\n \"\"\"\n Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.\n\n Parameters:\n ----------\n channels : int\n Number of channels.\n groups : int\n Number of groups.\n \"\"\"\n def __init__(self,\n channels,\n groups):\n super(ChannelShuffle, self).__init__()\n # assert (channels % groups == 0)\n if channels % groups != 0:\n raise ValueError('channels must be divisible by groups')\n self.groups = groups\n\n def forward(self, x):\n return channel_shuffle(x, self.groups)\n\n\ndef channel_shuffle2(x,\n groups):\n \"\"\"\n Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'\n https://arxiv.org/abs/1707.01083. The alternative version.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n groups : int\n Number of groups.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n batch, channels, height, width = x.size()\n # assert (channels % groups == 0)\n channels_per_group = channels // groups\n x = x.view(batch, channels_per_group, groups, height, width)\n x = torch.transpose(x, 1, 2).contiguous()\n x = x.view(batch, channels, height, width)\n return x\n\n\nclass ChannelShuffle2(nn.Module):\n \"\"\"\n Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.\n The alternative version.\n\n Parameters:\n ----------\n channels : int\n Number of channels.\n groups : int\n Number of groups.\n \"\"\"\n def __init__(self,\n channels,\n groups):\n super(ChannelShuffle2, self).__init__()\n # assert (channels % groups == 0)\n if channels % groups != 0:\n raise ValueError('channels must be divisible by groups')\n self.groups = groups\n\n def forward(self, x):\n return channel_shuffle2(x, self.groups)\n\n\nclass SEBlock(nn.Module):\n \"\"\"\n Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.\n\n Parameters:\n ----------\n channels : int\n Number of channels.\n reduction : int, default 16\n Squeeze reduction value.\n approx_sigmoid : bool, default False\n Whether to use approximated sigmoid function.\n activation : function, or str, or nn.Module\n Activation function or name of activation function.\n \"\"\"\n def __init__(self,\n channels,\n reduction=16,\n approx_sigmoid=False,\n activation=(lambda: nn.ReLU(inplace=True))):\n super(SEBlock, self).__init__()\n mid_cannels = channels // reduction\n\n self.pool = nn.AdaptiveAvgPool2d(output_size=1)\n self.conv1 = conv1x1(\n in_channels=channels,\n out_channels=mid_cannels,\n bias=True)\n self.activ = get_activation_layer(activation)\n self.conv2 = conv1x1(\n in_channels=mid_cannels,\n out_channels=channels,\n bias=True)\n self.sigmoid = HSigmoid() if approx_sigmoid else nn.Sigmoid()\n\n def forward(self, x):\n w = self.pool(x)\n w = self.conv1(w)\n w = self.activ(w)\n w = self.conv2(w)\n w = self.sigmoid(w)\n x = x * w\n return x\n\n\nclass IBN(nn.Module):\n \"\"\"\n Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'\n https://arxiv.org/abs/1807.09441.\n\n Parameters:\n ----------\n channels : int\n Number of channels.\n inst_fraction : float, default 0.5\n The first fraction of channels for normalization.\n inst_first : bool, default True\n Whether instance normalization be on the first part of channels.\n \"\"\"\n def __init__(self,\n channels,\n first_fraction=0.5,\n inst_first=True):\n super(IBN, self).__init__()\n self.inst_first = inst_first\n h1_channels = int(math.floor(channels * first_fraction))\n h2_channels = channels - h1_channels\n self.split_sections = [h1_channels, h2_channels]\n\n if self.inst_first:\n self.inst_norm = nn.InstanceNorm2d(\n num_features=h1_channels,\n affine=True)\n self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)\n else:\n self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)\n self.inst_norm = nn.InstanceNorm2d(\n num_features=h2_channels,\n affine=True)\n\n def forward(self, x):\n x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)\n if self.inst_first:\n x1 = self.inst_norm(x1.contiguous())\n x2 = self.batch_norm(x2.contiguous())\n else:\n x1 = self.batch_norm(x1.contiguous())\n x2 = self.inst_norm(x2.contiguous())\n x = torch.cat((x1, x2), dim=1)\n return x\n\n\nclass Identity(nn.Module):\n \"\"\"\n Identity block.\n \"\"\"\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass DualPathSequential(nn.Sequential):\n \"\"\"\n A sequential container for modules with dual inputs/outputs.\n Modules will be executed in the order they are added.\n\n Parameters:\n ----------\n return_two : bool, default True\n Whether to return two output after execution.\n first_ordinals : int, default 0\n Number of the first modules with single input/output.\n last_ordinals : int, default 0\n Number of the final modules with single input/output.\n dual_path_scheme : function\n Scheme of dual path response for a module.\n dual_path_scheme_ordinal : function\n Scheme of dual path response for an ordinal module.\n \"\"\"\n def __init__(self,\n return_two=True,\n first_ordinals=0,\n last_ordinals=0,\n dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),\n dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):\n super(DualPathSequential, self).__init__()\n self.return_two = return_two\n self.first_ordinals = first_ordinals\n self.last_ordinals = last_ordinals\n self.dual_path_scheme = dual_path_scheme\n self.dual_path_scheme_ordinal = dual_path_scheme_ordinal\n\n def forward(self, x1, x2=None):\n length = len(self._modules.values())\n for i, module in enumerate(self._modules.values()):\n if (i < self.first_ordinals) or (i >= length - self.last_ordinals):\n x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)\n else:\n x1, x2 = self.dual_path_scheme(module, x1, x2)\n if self.return_two:\n return x1, x2\n else:\n return x1\n\n\nclass Concurrent(nn.Sequential):\n \"\"\"\n A container for concatenation of modules on the base of the sequential container.\n\n Parameters:\n ----------\n axis : int, default 1\n The axis on which to concatenate the outputs.\n stack : bool, default False\n Whether to concatenate tensors along a new dimension.\n \"\"\"\n def __init__(self,\n axis=1,\n stack=False):\n super(Concurrent, self).__init__()\n self.axis = axis\n self.stack = stack\n\n def forward(self, x):\n out = []\n for module in self._modules.values():\n out.append(module(x))\n if self.stack:\n out = torch.stack(tuple(out), dim=self.axis)\n else:\n out = torch.cat(tuple(out), dim=self.axis)\n return out\n\n\nclass ParametricSequential(nn.Sequential):\n \"\"\"\n A sequential container for modules with parameters.\n Modules will be executed in the order they are added.\n \"\"\"\n def __init__(self, *args):\n super(ParametricSequential, self).__init__(*args)\n\n def forward(self, x, **kwargs):\n for module in self._modules.values():\n x = module(x, **kwargs)\n return x\n\n\nclass ParametricConcurrent(nn.Sequential):\n \"\"\"\n A container for concatenation of modules with parameters.\n\n Parameters:\n ----------\n axis : int, default 1\n The axis on which to concatenate the outputs.\n \"\"\"\n def __init__(self, axis=1):\n super(ParametricConcurrent, self).__init__()\n self.axis = axis\n\n def forward(self, x, **kwargs):\n out = []\n for module in self._modules.values():\n out.append(module(x, **kwargs))\n out = torch.cat(tuple(out), dim=self.axis)\n return out\n\n\nclass Hourglass(nn.Module):\n \"\"\"\n A hourglass block.\n\n Parameters:\n ----------\n down_seq : nn.Sequential\n Down modules as sequential.\n up_seq : nn.Sequential\n Up modules as sequential.\n skip_seq : nn.Sequential\n Skip connection modules as sequential.\n merge_type : str, default 'add'\n Type of concatenation of up and skip outputs.\n return_first_skip : bool, default False\n Whether return the first skip connection output. Used in ResAttNet.\n \"\"\"\n def __init__(self,\n down_seq,\n up_seq,\n skip_seq,\n merge_type=\"add\",\n return_first_skip=False):\n super(Hourglass, self).__init__()\n assert (len(up_seq) == len(down_seq))\n assert (len(skip_seq) == len(down_seq))\n assert (merge_type in [\"add\"])\n self.merge_type = merge_type\n self.return_first_skip = return_first_skip\n self.depth = len(down_seq)\n\n self.down_seq = down_seq\n self.up_seq = up_seq\n self.skip_seq = skip_seq\n\n def forward(self, x, **kwargs):\n y = None\n down_outs = [x]\n for down_module in self.down_seq._modules.values():\n x = down_module(x)\n down_outs.append(x)\n for i in range(len(down_outs)):\n if i != 0:\n y = down_outs[self.depth - i]\n skip_module = self.skip_seq[self.depth - i]\n y = skip_module(y)\n if (y is not None) and (self.merge_type == \"add\"):\n x = x + y\n if i != len(down_outs) - 1:\n up_module = self.up_seq[self.depth - 1 - i]\n x = up_module(x)\n if self.return_first_skip:\n return x, y\n else:\n return x\n\n\nclass SesquialteralHourglass(nn.Module):\n \"\"\"\n A sesquialteral hourglass block.\n\n Parameters:\n ----------\n down1_seq : nn.Sequential\n The first down modules as sequential.\n skip1_seq : nn.Sequential\n The first skip connection modules as sequential.\n up_seq : nn.Sequential\n Up modules as sequential.\n skip2_seq : nn.Sequential\n The second skip connection modules as sequential.\n down2_seq : nn.Sequential\n The second down modules as sequential.\n merge_type : str, default 'con'\n Type of concatenation of up and skip outputs.\n \"\"\"\n def __init__(self,\n down1_seq,\n skip1_seq,\n up_seq,\n skip2_seq,\n down2_seq,\n merge_type=\"cat\"):\n super(SesquialteralHourglass, self).__init__()\n assert (len(down1_seq) == len(up_seq))\n assert (len(down1_seq) == len(down2_seq))\n assert (len(skip1_seq) == len(skip2_seq))\n assert (len(down1_seq) == len(skip1_seq) - 1)\n assert (merge_type in [\"cat\", \"add\"])\n self.merge_type = merge_type\n self.depth = len(down1_seq)\n\n self.down1_seq = down1_seq\n self.skip1_seq = skip1_seq\n self.up_seq = up_seq\n self.skip2_seq = skip2_seq\n self.down2_seq = down2_seq\n\n def _merge(self, x, y):\n if y is not None:\n if self.merge_type == \"cat\":\n x = torch.cat((x, y), dim=1)\n elif self.merge_type == \"add\":\n x = x + y\n return x\n\n def forward(self, x, **kwargs):\n y = self.skip1_seq[0](x)\n skip1_outs = [y]\n for i in range(self.depth):\n x = self.down1_seq[i](x)\n y = self.skip1_seq[i + 1](x)\n skip1_outs.append(y)\n x = skip1_outs[self.depth]\n y = self.skip2_seq[0](x)\n skip2_outs = [y]\n for i in range(self.depth):\n x = self.up_seq[i](x)\n y = skip1_outs[self.depth - 1 - i]\n x = self._merge(x, y)\n y = self.skip2_seq[i + 1](x)\n skip2_outs.append(y)\n x = self.skip2_seq[self.depth](x)\n for i in range(self.depth):\n x = self.down2_seq[i](x)\n y = skip2_outs[self.depth - 1 - i]\n x = self._merge(x, y)\n return x\n\n\nclass MultiOutputSequential(nn.Sequential):\n \"\"\"\n A sequential container with multiple outputs.\n Modules will be executed in the order they are added.\n \"\"\"\n def __init__(self):\n super(MultiOutputSequential, self).__init__()\n\n def forward(self, x):\n outs = []\n for module in self._modules.values():\n x = module(x)\n if hasattr(module, \"do_output\") and module.do_output:\n outs.append(x)\n return [x] + outs\n\n\nclass Flatten(nn.Module):\n \"\"\"\n Simple flatten module.\n \"\"\"\n\n def forward(self, x):\n return x.view(x.size(0), -1)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.functional.relu6",
"torch.split",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU6",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.sigmoid",
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.cat",
"torch.transpose"
]
] |
DoranLyong/dynamic-images-for-action-recognition | [
"06a68c2337b45c44a8c7ec50e94585a9b9615ad0"
] | [
"dynamic_image_networks/hmdb51/training_scripts/train_resnext50_hmdb51.py"
] | [
"# import apex - !!!! INCLUDE THIS IMPORT IF YOU WANT TO USE MIXED PRECISION TRAINING !!!!\nimport torch\nimport os\nimport sys\nimport torch.optim as optim\nimport torch.nn as nn\nfrom datetime import datetime\nfrom tqdm import tqdm\nfrom pathlib import Path\n\n# Make sure that the project root is in your PATH (i.e., the parent folder containing 'dynamic_image_networks').\nsys.path.append(str(Path('../../..').resolve()))\n\n# ---------------------------------------------------------------\n# Model / dataset choice\n# ---------------------------------------------------------------\nfrom dynamic_image_networks.hmdb51.models.resnext50_temppool import get_model\nfrom dynamic_image_networks.hmdb51.dataloaders.hmdb51_dataloader import get_train_loader\nfrom dynamic_image_networks.hmdb51.utilities.calculate_training_metrics import calculate_accuracy\nfrom dynamic_image_networks.hmdb51.utilities.logger import initialize_logger\nfrom dynamic_image_networks.hmdb51.utilities.meters import AverageMeter\n\n\ndef main():\n # ============================================================================================\n # Setup\n # ============================================================================================\n # ---------------------------------------------------------------\n # Random seeds\n # ---------------------------------------------------------------\n torch.manual_seed(590238490)\n torch.backends.cudnn.benchmark = True\n\n # ---------------------------------------------------------------\n # GPU\n # ---------------------------------------------------------------\n device = torch.device(\"cuda:0\")\n fp16 = False\n if fp16:\n print('!!! MIXED PRECISION TRAINING IS ENABLED -- ONLY USE FOR VOLTA AND TURING GPUs!!!')\n\n # ---------------------------------------------------------------\n # Training settings\n # ---------------------------------------------------------------\n batch_size = 32\n num_epochs = 60\n num_workers = 6\n max_segment_size = 10\n save_best_models = True\n image_augmentation = False\n\n # ----------------------------------------------------------------------------\n # Get the model\n # ----------------------------------------------------------------------------\n net = get_model(num_classes=51)\n net.to(device)\n\n # ----------------------------------------------------------------------------\n # Initialize optimizer and loss function\n # ----------------------------------------------------------------------------\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=3e-3)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)\n if fp16:\n net, optimizer = apex.amp.initialize(net, optimizer, opt_level=\"O1\")\n\n # ---------------------------------------------------------------\n # Logging set-up\n # ---------------------------------------------------------------\n # File-name\n file_name = ''.join(os.path.basename(__file__).split('.py')[:-1])\n logger = initialize_logger(file_name, log_dir='./logs/')\n\n # ============================================================================================\n # Train\n # ============================================================================================\n time_start = datetime.now()\n fold_i = 1\n\n # ---------------------------------------------------------------\n # Load dataloaders\n # ---------------------------------------------------------------\n train_loader, validation_loader = get_train_loader(fold_id=fold_i,\n batch_size=batch_size,\n num_workers=num_workers,\n image_augmenation=image_augmentation,\n segment_size=max_segment_size)\n\n logger.info('Starting Training on Fold: {}\\n'.format(fold_i))\n\n best_val_loss = float('inf')\n best_val_acc = 0\n for epoch_i in range(num_epochs):\n # ---------------------------------------------------------------\n # Training and validation loop\n # ---------------------------------------------------------------\n\n avg_loss, avg_acc = training_loop('train', net, device, train_loader,\n optimizer, criterion, fp16)\n\n avg_val_loss, avg_val_acc = training_loop('val', net, device, validation_loader,\n None, criterion, fp16)\n\n if scheduler:\n scheduler.step(avg_val_loss)\n\n # ---------------------------------------------------------------\n # Track the best model\n # ---------------------------------------------------------------\n if avg_val_loss < best_val_loss:\n best_val_loss = avg_val_loss\n\n if save_best_models:\n logger.info('Saving model because of best loss...')\n os.makedirs('./saved_models/', exist_ok=True)\n torch.save(net.state_dict(),\n './saved_models/{}_fold_{}_best_loss_state.pt'.format(file_name, fold_i))\n\n if avg_val_acc > best_val_acc:\n best_val_acc = avg_val_acc\n\n if save_best_models:\n logger.info('Saving model because of best acc...')\n os.makedirs('./saved_models/', exist_ok=True)\n torch.save(net.state_dict(),\n './saved_models/{}_fold_{}_best_acc_state.pt'.format(file_name, fold_i))\n\n # ---------------------------------------------------------------\n # Log the training status\n # ---------------------------------------------------------------\n time_elapsed = datetime.now() - time_start\n output_msg = 'Fold {}, Epoch: {}/{}\\n' \\\n '---------------------\\n' \\\n 'train loss: {:.6f}, val loss: {:.6f}\\n' \\\n 'train acc: {:.6f}, val acc: {:.6f}\\n' \\\n 'best val loss: {:.6f}, best val acc: {:.6f}\\n' \\\n 'time elapsed: {}\\n'. \\\n format(fold_i, epoch_i, num_epochs - 1,\n avg_loss, avg_val_loss,\n avg_acc, avg_val_acc,\n best_val_loss, best_val_acc,\n str(time_elapsed).split('.')[0])\n logger.info(output_msg)\n\n logger.info('Finished Training')\n\n\ndef training_loop(phase, net, device, dataloader, optimizer, criterion, fp16):\n loss_meter = AverageMeter()\n acc_meter = AverageMeter()\n\n # Set the model into the appropriate mode.\n if phase == 'train':\n net.train()\n elif phase == 'val':\n net.eval()\n else:\n raise ValueError\n\n # Enable gradient accumulation only for the training phase.\n with torch.set_grad_enabled(phase == 'train'):\n for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):\n x, y, = data\n x, y = x.to(device, non_blocking=True), y.to(device, non_blocking=True)\n\n # Prediction.\n y_pred = net(x).float()\n\n # Loss and step.\n loss = criterion(y_pred, y)\n if phase == 'train':\n optimizer.zero_grad()\n if fp16 is True:\n with apex.amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n\n # Metrics\n batch_size = len(y)\n loss_meter.add(loss.item(), batch_size)\n acc_meter.add(calculate_accuracy(y_pred, y), batch_size)\n\n avg_loss = loss_meter.get_average()\n avg_acc = acc_meter.get_average()\n return avg_loss, avg_acc\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.set_grad_enabled",
"torch.manual_seed",
"torch.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.device"
]
] |
cwkeam/pytorch-metric-learning | [
"63e4ecb781c5735ad714f61a3eecc55f72496905"
] | [
"tests/losses/test_fastap_loss.py"
] | [
"######################################\n#######ORIGINAL IMPLEMENTATION########\n######################################\n# FROM https://github.com/kunhe/FastAP-metric-learning/blob/master/pytorch/FastAP_loss.py\n# This code is copied directly from the official implementation\n# so that we can make sure our implementation returns the same result.\n# It's copied under the MIT license.\nimport torch\nfrom torch.autograd import Variable\n\n\ndef softBinning(D, mid, Delta):\n y = 1 - torch.abs(D - mid) / Delta\n return torch.max(torch.tensor([0], dtype=D.dtype).to(D.device), y)\n\n\ndef dSoftBinning(D, mid, Delta):\n side1 = (D > (mid - Delta)).type(D.dtype)\n side2 = (D <= mid).type(D.dtype)\n ind1 = side1 * side2 # .type(torch.uint8)\n\n side1 = (D > mid).type(D.dtype)\n side2 = (D <= (mid + Delta)).type(D.dtype)\n ind2 = side1 * side2 # .type(torch.uint8)\n\n return (ind1 - ind2) / Delta\n\n\n######################################\n#######ORIGINAL IMPLEMENTATION########\n######################################\n# FROM https://github.com/kunhe/FastAP-metric-learning/blob/master/pytorch/FastAP_loss.py\n# This code is copied directly from the official implementation\n# so that we can make sure our implementation returns the same result.\n# It's copied under the MIT license.\nclass OriginalImplementationFastAP(torch.autograd.Function):\n \"\"\"\n FastAP - autograd function definition\n\n This class implements the FastAP loss from the following paper:\n \"Deep Metric Learning to Rank\",\n F. Cakir, K. He, X. Xia, B. Kulis, S. Sclaroff. CVPR 2019\n\n NOTE:\n Given a input batch, FastAP does not sample triplets from it as it's not\n a triplet-based method. Therefore, FastAP does not take a Sampler as input.\n Rather, we specify how the input batch is selected.\n \"\"\"\n\n @staticmethod\n def forward(ctx, input, target, num_bins):\n \"\"\"\n Args:\n input: torch.Tensor(N x embed_dim), embedding matrix\n target: torch.Tensor(N x 1), class labels\n num_bins: int, number of bins in distance histogram\n \"\"\"\n N = target.size()[0]\n assert input.size()[0] == N, \"Batch size donesn't match!\"\n\n # 1. get affinity matrix\n Y = target.unsqueeze(1)\n Aff = 2 * (Y == Y.t()).type(input.dtype) - 1\n Aff.masked_fill_(\n torch.eye(N, N).bool().to(input.device), 0\n ) # set diagonal to 0\n\n I_pos = (Aff > 0).type(input.dtype).to(input.device)\n I_neg = (Aff < 0).type(input.dtype).to(input.device)\n N_pos = torch.sum(I_pos, 1)\n\n # 2. compute distances from embeddings\n # squared Euclidean distance with range [0,4]\n dist2 = 2 - 2 * torch.mm(input, input.t())\n\n # 3. estimate discrete histograms\n Delta = torch.tensor(4.0 / num_bins).to(input.device)\n Z = torch.linspace(0.0, 4.0, steps=num_bins + 1).to(input.device)\n L = Z.size()[0]\n h_pos = torch.zeros((N, L), dtype=input.dtype).to(input.device)\n h_neg = torch.zeros((N, L), dtype=input.dtype).to(input.device)\n for l in range(L):\n pulse = softBinning(dist2, Z[l], Delta)\n h_pos[:, l] = torch.sum(pulse * I_pos, 1)\n h_neg[:, l] = torch.sum(pulse * I_neg, 1)\n\n H_pos = torch.cumsum(h_pos, 1)\n h = h_pos + h_neg\n H = torch.cumsum(h, 1)\n\n # 4. compate FastAP\n FastAP = h_pos * H_pos / H\n FastAP[torch.isnan(FastAP) | torch.isinf(FastAP)] = 0\n FastAP = torch.sum(FastAP, 1) / N_pos\n FastAP = FastAP[~torch.isnan(FastAP)]\n loss = 1 - torch.mean(FastAP)\n\n # 6. save for backward\n ctx.save_for_backward(input, target)\n ctx.Z = Z\n ctx.Delta = Delta\n ctx.dist2 = dist2\n ctx.I_pos = I_pos\n ctx.I_neg = I_neg\n ctx.h_pos = h_pos\n ctx.h_neg = h_neg\n ctx.H_pos = H_pos\n ctx.N_pos = N_pos\n ctx.h = h\n ctx.H = H\n ctx.L = torch.tensor(L)\n\n return loss\n\n @staticmethod\n def backward(ctx, grad_output):\n input, target = ctx.saved_tensors\n\n Z = Variable(ctx.Z, requires_grad=False)\n Delta = Variable(ctx.Delta, requires_grad=False)\n dist2 = Variable(ctx.dist2, requires_grad=False)\n I_pos = Variable(ctx.I_pos, requires_grad=False)\n I_neg = Variable(ctx.I_neg, requires_grad=False)\n h = Variable(ctx.h, requires_grad=False)\n H = Variable(ctx.H, requires_grad=False)\n h_pos = Variable(ctx.h_pos, requires_grad=False)\n h_neg = Variable(ctx.h_neg, requires_grad=False)\n H_pos = Variable(ctx.H_pos, requires_grad=False)\n N_pos = Variable(ctx.N_pos, requires_grad=False)\n\n L = Z.size()[0]\n H2 = torch.pow(H, 2)\n H_neg = H - H_pos\n\n # 1. d(FastAP)/d(h+)\n LTM1 = torch.tril(torch.ones(L, L), -1) # lower traingular matrix\n tmp1 = h_pos * H_neg / H2\n tmp1[torch.isnan(tmp1)] = 0\n\n d_AP_h_pos = (H_pos * H + h_pos * H_neg) / H2\n d_AP_h_pos = d_AP_h_pos + torch.mm(tmp1, LTM1.cuda())\n d_AP_h_pos = d_AP_h_pos / N_pos.repeat(L, 1).t()\n d_AP_h_pos[torch.isnan(d_AP_h_pos) | torch.isinf(d_AP_h_pos)] = 0\n\n # 2. d(FastAP)/d(h-)\n LTM0 = torch.tril(torch.ones(L, L), 0) # lower triangular matrix\n tmp2 = -h_pos * H_pos / H2\n tmp2[torch.isnan(tmp2)] = 0\n\n d_AP_h_neg = torch.mm(tmp2, LTM0.cuda())\n d_AP_h_neg = d_AP_h_neg / N_pos.repeat(L, 1).t()\n d_AP_h_neg[torch.isnan(d_AP_h_neg) | torch.isinf(d_AP_h_neg)] = 0\n\n # 3. d(FastAP)/d(embedding)\n d_AP_x = 0\n for l in range(L):\n dpulse = dSoftBinning(dist2, Z[l], Delta)\n dpulse[torch.isnan(dpulse) | torch.isinf(dpulse)] = 0\n ddp = dpulse * I_pos\n ddn = dpulse * I_neg\n\n alpha_p = torch.diag(d_AP_h_pos[:, l]) # N*N\n alpha_n = torch.diag(d_AP_h_neg[:, l])\n Ap = torch.mm(ddp, alpha_p) + torch.mm(alpha_p, ddp)\n An = torch.mm(ddn, alpha_n) + torch.mm(alpha_n, ddn)\n\n # accumulate gradient\n d_AP_x = d_AP_x - torch.mm(input.t(), (Ap + An))\n\n grad_input = -d_AP_x\n return grad_input.t(), None, None\n\n\n######################################\n#######ORIGINAL IMPLEMENTATION########\n######################################\n# FROM https://github.com/kunhe/FastAP-metric-learning/blob/master/pytorch/FastAP_loss.py\n# This code is copied directly from the official implementation\n# so that we can make sure our implementation returns the same result.\n# It's copied under the MIT license.\nclass OriginalImplementationFastAPLoss(torch.nn.Module):\n \"\"\"\n FastAP - loss layer definition\n\n This class implements the FastAP loss from the following paper:\n \"Deep Metric Learning to Rank\",\n F. Cakir, K. He, X. Xia, B. Kulis, S. Sclaroff. CVPR 2019\n \"\"\"\n\n def __init__(self, num_bins=10):\n super(OriginalImplementationFastAPLoss, self).__init__()\n self.num_bins = num_bins\n\n def forward(self, batch, labels):\n return OriginalImplementationFastAP.apply(batch, labels, self.num_bins)\n\n\n### Testing this library's implementation ###\nimport unittest\n\nfrom pytorch_metric_learning.losses import FastAPLoss\n\nfrom .. import TEST_DEVICE, TEST_DTYPES\nfrom ..zzz_testing_utils.testing_utils import angle_to_coord\n\n\nclass TestFastAPLoss(unittest.TestCase):\n def test_fast_ap_loss(self):\n num_bins = 5\n loss_func = FastAPLoss(num_bins)\n original_loss_func = OriginalImplementationFastAPLoss(num_bins)\n ref_emb = torch.randn(32, 32)\n ref_labels = torch.randint(0, 10, (32,))\n\n for dtype in TEST_DTYPES:\n embedding_angles = torch.arange(0, 180)\n embeddings = torch.tensor(\n [angle_to_coord(a) for a in embedding_angles],\n requires_grad=True,\n dtype=dtype,\n ).to(\n TEST_DEVICE\n ) # 2D embeddings\n labels = torch.randint(low=0, high=10, size=(180,)).to(TEST_DEVICE)\n\n loss = loss_func(embeddings, labels)\n loss.backward()\n original_loss = original_loss_func(\n torch.nn.functional.normalize(embeddings), labels\n )\n rtol = 1e-2 if dtype == torch.float16 else 1e-5\n self.assertTrue(torch.isclose(loss, original_loss, rtol=rtol))\n\n # fastap doesn't support ref_emb\n self.assertRaises(\n ValueError,\n lambda: loss_func(\n embeddings, labels, ref_emb=ref_emb, ref_labels=ref_labels\n ),\n )\n"
] | [
[
"torch.randint",
"torch.mm",
"torch.diag",
"torch.cumsum",
"torch.eye",
"torch.randn",
"torch.linspace",
"torch.autograd.Variable",
"torch.isinf",
"torch.arange",
"torch.mean",
"torch.ones",
"torch.nn.functional.normalize",
"torch.tensor",
"torch.isclose",
"torch.isnan",
"torch.pow",
"torch.sum",
"torch.abs",
"torch.zeros"
]
] |
dennismalmgren/marl | [
"baa846dc4144cf6f53e51d8cf1e2fcf5800c9f95"
] | [
"src/runners/episode_runner.py"
] | [
"from envs import REGISTRY as env_REGISTRY\nfrom functools import partial\nfrom components.episode_buffer import EpisodeBatch\nimport numpy as np\n\n\nclass EpisodeRunner:\n\n def __init__(self, args, logger):\n self.args = args\n self.logger = logger\n self.batch_size = self.args.batch_size_run\n assert self.batch_size == 1\n\n self.env = env_REGISTRY[self.args.env](**self.args.env_args)\n self.episode_limit = self.env.episode_limit\n self.t = 0\n\n self.t_env = 0\n\n self.train_returns = []\n self.test_returns = []\n self.train_stats = {}\n self.test_stats = {}\n\n # Log the first run\n self.log_train_stats_t = -1000000\n\n def setup(self, scheme, groups, preprocess, mac):\n self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,\n preprocess=preprocess, device=self.args.device) \n self.mac = mac\n\n def get_env_info(self):\n return self.env.get_env_info()\n\n def save_replay(self):\n self.env.save_replay()\n\n def close_env(self):\n self.env.close()\n\n def reset(self):\n self.batch = self.new_batch()\n self.env.reset()\n self.t = 0\n\n def run(self, test_mode=False):\n self.reset()\n\n terminated = False\n episode_return = 0\n self.mac.init_hidden(batch_size=self.batch_size)\n\n while not terminated:\n\n pre_transition_data = {\n \"state\": [self.env.get_state()],\n \"avail_actions\": [self.env.get_avail_actions()],\n \"obs\": [self.env.get_obs()]\n }\n\n self.batch.update(pre_transition_data, ts=self.t)\n\n # Pass the entire batch of experiences up till now to the agents\n # Receive the actions for each agent at this timestep in a batch of size 1\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)\n\n reward, terminated, env_info = self.env.step(actions[0])\n episode_return += reward\n\n post_transition_data = {\n \"actions\": actions,\n \"reward\": [(reward,)],\n \"terminated\": [(terminated != env_info.get(\"episode_limit\", False),)],\n }\n\n self.batch.update(post_transition_data, ts=self.t)\n\n self.t += 1\n\n last_data = {\n \"state\": [self.env.get_state()],\n \"avail_actions\": [self.env.get_avail_actions()],\n \"obs\": [self.env.get_obs()]\n }\n self.batch.update(last_data, ts=self.t)\n\n # Select actions in the last stored state\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)\n self.batch.update({\"actions\": actions}, ts=self.t)\n\n cur_stats = self.test_stats if test_mode else self.train_stats\n cur_returns = self.test_returns if test_mode else self.train_returns\n log_prefix = \"test_\" if test_mode else \"\"\n cur_stats.update({k: cur_stats.get(k, 0) + env_info.get(k, 0) for k in set(cur_stats) | set(env_info)})\n cur_stats[\"n_episodes\"] = 1 + cur_stats.get(\"n_episodes\", 0)\n cur_stats[\"ep_length\"] = self.t + cur_stats.get(\"ep_length\", 0)\n\n if not test_mode:\n self.t_env += self.t\n\n cur_returns.append(episode_return)\n\n if test_mode and (len(self.test_returns) == self.args.test_nepisode):\n self._log(cur_returns, cur_stats, log_prefix)\n elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:\n self._log(cur_returns, cur_stats, log_prefix)\n if hasattr(self.mac.action_selector, \"epsilon\"):\n self.logger.log_stat(\"epsilon\", self.mac.action_selector.epsilon, self.t_env)\n self.log_train_stats_t = self.t_env\n\n return self.batch\n\n def _log(self, returns, stats, prefix):\n self.logger.log_stat(prefix + \"return_mean\", np.mean(returns), self.t_env)\n self.logger.log_stat(prefix + \"return_std\", np.std(returns), self.t_env)\n returns.clear()\n\n for k, v in stats.items():\n if k != \"n_episodes\":\n self.logger.log_stat(prefix + k + \"_mean\" , v/stats[\"n_episodes\"], self.t_env)\n stats.clear()\n"
] | [
[
"numpy.std",
"numpy.mean"
]
] |
chanhee0222/feed2resp | [
"16dc7071f17af56cbf019eeabcd12a5dbd0693e7"
] | [
"main.py"
] | [
"import argparse\nimport datetime\nimport glob\nimport logging\nimport os\nimport time\n\nimport torch\n\nfrom logging_helper import init_logger\nfrom models import Discriminator, BartSystem\nfrom train import train\nfrom transformer_base import add_generic_args, generic_train\n\n\nclass Config():\n # data_path = './data/chatbot/'\n # log_dir = 'runs/exp'\n save_path = './save'\n # pretrained_embed_path = './embedding/'\n device = torch.device('cuda' if True and torch.cuda.is_available() else 'cpu')\n # device = torch.device('cpu')\n discriminator_method = 'Multi' # 'Multi' or 'Cond'\n load_pretrained_embed = False\n min_freq = 3\n max_length = 1024 # max_source_length\n # embed_size = 256\n d_model = 256\n h = 4\n num_styles = 2\n num_classes = num_styles + 1 if discriminator_method == 'Multi' else 2\n num_layers = 4\n # batch_size = 64\n lr_F = 5e-6\n lr_D = 1e-4\n L2 = 0\n iter_D = 10\n iter_F = 5\n F_pretrain_iter = 1\n log_steps = 5\n eval_steps = 25\n learned_pos_embed = True\n dropout = 0\n drop_rate_config = [(1, 0)]\n temperature_config = [(1, 0)]\n\n slf_factor = 0.25\n cyc_factor = 0.5\n adv_factor = 1\n\n inp_shuffle_len = 0\n inp_unk_drop_fac = 0\n inp_rand_drop_fac = 0\n inp_drop_prob = 0\n\n ### Bart system\n output_dir='feedback_sum'\n do_predict=True\n max_source_length=1024\n max_target_length=56\n data_dir=\"feedback\"\n\n\ndef get_n_params(model):\n pp=0\n for p in list(model.parameters()):\n nn=1\n for s in list(p.size()):\n nn = nn*s\n pp += nn\n return pp\n\n\ndef main():\n config = Config()\n parser = argparse.ArgumentParser()\n add_generic_args(parser, os.getcwd())\n parser = BartSystem.add_model_specific_args(parser, os.getcwd())\n args = parser.parse_args()\n\n # Some values from Config class needs to be copied to args to work.\n setattr(config, \"num_train_epochs\", args.num_train_epochs)\n setattr(config, \"save_path\", args.output_dir)\n setattr(args, \"learning_rate\", config.lr_F)\n\n # Create output directory.\n timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n setattr(config, \"save_folder\", os.path.join(config.save_path, timestamp))\n os.makedirs(os.path.join(config.save_folder, 'ckpts'))\n init_logger(config.save_folder)\n logger = logging.getLogger(__name__)\n\n model_F = BartSystem(args).to(config.device)\n # Don't use the trainer to fit the model\n args.do_train = False\n # trainer = generic_train(model_F, args)\n if args.output_dir:\n try:\n checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, \"checkpointepoch=*.ckpt\"), recursive=True)))\n if checkpoints[-1]:\n BartSystem.load_from_checkpoint(checkpoints[-1])\n logger.info(\"Load checkpoint sucessfully!\")\n except:\n logger.info(\"Failed to load checkpoint!\")\n\n # train_iters, dev_iters, test_iters, vocab = load_dataset(config)\n train_iters, dev_iters, test_iters = model_F.train_dataloader(), model_F.val_dataloader(), model_F.test_dataloader()\n model_D = Discriminator(config, model_F.tokenizer).to(config.device)\n\n logger.info(config.discriminator_method)\n # import pdb\n # pdb.set_trace()\n logger.info(model_D)\n\n train(config, model_F, model_D, train_iters, dev_iters, test_iters)\n \n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.cuda.is_available"
]
] |
tsalo/PyMARE | [
"7eb950fb137b6221f2ea5d381ca91d16eb4b8a35"
] | [
"pymare/stats.py"
] | [
"\"\"\"Miscellaneous statistical functions.\"\"\"\n\nimport numpy as np\nimport scipy.stats as ss\nfrom scipy.optimize import Bounds, minimize\n\n\ndef weighted_least_squares(y, v, X, tau2=0.0, return_cov=False):\n \"\"\"2-D weighted least squares.\n\n Args:\n y (NDArray): 2-d array of estimates (studies x parallel datasets)\n v (NDArray): 2-d array of sampling variances\n X (NDArray): Fixed effect design matrix\n tau2 (float): tau^2 estimate to use for weights\n return_cov (bool): Whether or not to return the inverse cov matrix\n\n Returns:\n If return_cov is True, returns both fixed parameter estimates and the\n inverse covariance matrix; if False, only the parameter estimates.\n \"\"\"\n\n w = 1.0 / (v + tau2)\n\n # Einsum indices: k = studies, p = predictors, i = parallel iterates\n wX = np.einsum(\"kp,ki->ipk\", X, w)\n cov = wX.dot(X)\n\n # numpy >= 1.8 inverts stacked matrices along the first N - 2 dims, so we\n # can vectorize computation along the second dimension (parallel datasets)\n precision = np.linalg.pinv(cov).T\n\n pwX = np.einsum(\"ipk,qpi->iqk\", wX, precision)\n beta = np.einsum(\"ipk,ik->ip\", pwX, y.T).T\n\n return (beta, precision) if return_cov else beta\n\n\ndef ensure_2d(arr):\n \"\"\"Ensure the passed array has 2 dimensions.\"\"\"\n if arr is None:\n return arr\n try:\n arr = np.array(arr)\n except:\n return arr\n if arr.ndim == 1:\n arr = arr[:, None]\n return arr\n\n\ndef q_profile(y, v, X, alpha=0.05):\n \"\"\"Get the CI for tau^2 via the Q-Profile method (Viechtbauer, 2007).\n\n Args:\n y (ndarray): 1d array of study-level estimates\n v (ndarray): 1d array of study-level variances\n X (ndarray): 1d or 2d array containing study-level predictors\n (including intercept); has dimensions K x P, where K is the number\n of studies and P is the number of predictor variables.\n alpha (float, optional): alpha value defining the coverage of the CIs,\n where width(CI) = 1 - alpha. Defaults to 0.05.\n\n Returns:\n A dictionary with keys 'ci_l' and 'ci_u', corresponding to the lower\n and upper bounds of the tau^2 confidence interval, respectively.\n\n Notes:\n Following the Viechtbauer implementation, this method returns the\n interval that gives an equal probability mass at both tails (i.e.,\n P(tau^2 <= lower_bound) == P(tau^2 >= upper_bound) == alpha/2), and\n *not* the smallest possible range of tau^2 values that provides the\n desired coverage.\n\n References:\n Viechtbauer, W. (2007). Confidence intervals for the amount of\n heterogeneity in meta-analysis. Statistics in Medicine, 26(1), 37-52.\n \"\"\"\n k, p = X.shape\n df = k - p\n l_crit = ss.chi2.ppf(1 - alpha / 2, df)\n u_crit = ss.chi2.ppf(alpha / 2, df)\n args = (ensure_2d(y), ensure_2d(v), X)\n bds = Bounds([0], [np.inf], keep_feasible=True)\n\n # Use the D-L estimate of tau^2 as a starting point; when using a fixed\n # value, minimize() sometimes fails to stay in bounds.\n from .estimators import DerSimonianLaird\n\n ub_start = 2 * DerSimonianLaird().fit(y, v, X).params_[\"tau2\"]\n\n lb = minimize(lambda x: (q_gen(*args, x) - l_crit) ** 2, [0], bounds=bds).x[0]\n ub = minimize(lambda x: (q_gen(*args, x) - u_crit) ** 2, [ub_start], bounds=bds).x[0]\n return {\"ci_l\": lb, \"ci_u\": ub}\n\n\ndef q_gen(y, v, X, tau2):\n \"\"\"Generalized form of Cochran's Q-statistic.\n\n Args:\n y (ndarray): 1d array of study-level estimates\n v (ndarray): 1d array of study-level variances\n X (ndarray): 1d or 2d array containing study-level predictors\n (including intercept); has dimensions K x P, where K is the number\n of studies and P is the number of predictor variables.\n tau2 (float): Between-study variance. Must be >= 0.\n\n Returns:\n A float giving the value of Cochran's Q-statistic.\n\n References:\n Veroniki, A. A., Jackson, D., Viechtbauer, W., Bender, R., Bowden, J.,\n Knapp, G., Kuss, O., Higgins, J. P., Langan, D., & Salanti, G. (2016).\n Methods to estimate the between-study variance and its uncertainty in\n meta-analysis. Research synthesis methods, 7(1), 55–79.\n https://doi.org/10.1002/jrsm.1164\n \"\"\"\n if np.any(tau2 < 0):\n raise ValueError(\"Value of tau^2 must be >= 0.\")\n beta = weighted_least_squares(y, v, X, tau2)\n w = 1.0 / (v + tau2)\n return (w * (y - X.dot(beta)) ** 2).sum(0)\n"
] | [
[
"scipy.stats.chi2.ppf",
"numpy.any",
"scipy.optimize.Bounds",
"numpy.einsum",
"numpy.linalg.pinv",
"numpy.array"
]
] |
Yashgh7076/CU-Thesis | [
"59a7c6e8009395b5773b1ee47c38ca287ed6c189"
] | [
"tseries_crossval.py"
] | [
"import numpy as np \r\nimport sys\r\nimport os\r\nimport math\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TF info\r\nimport tensorflow as tf\r\n#import matplotlib.pyplot as plt\r\n\r\n# Define constants\r\nstride = 15 #1 second @ 15 Hz sampling\r\nwindow = 30*15 #30 seconds window considered\r\n\r\nfolder = sys.argv[1] \r\nif not os.path.exists(folder):\r\n print(\"Unable to open folder containing data, check that folder exists \\n\")\r\n exit(0)\r\ntotal_files = 488\r\n\r\ntotal_sum = 0\r\nfor i in range(1,total_files + 1):\r\n file_no = 'output' + str(i) + '.txt'\r\n full_path = os.path.join(folder, file_no)\r\n #print(full_path)\r\n \r\n f = open(full_path,'r')\r\n d=[[float(x) for x in line.split()] for line in f]\r\n f.close()\r\n\r\n N = len(d) \r\n total_sum = total_sum + N \r\n\r\n M = len(d[0])\r\n measurements = int((M-window)/6) \r\n \r\ndataset = np.zeros((total_sum,measurements,6))\r\nvectors = np.zeros((total_sum,window),dtype=np.uint8)\r\nwindows_in_recording = np.zeros((total_files), dtype=np.uint32)\r\n\r\ntotal_windows = 0\r\nfor i in range(1,total_files + 1):\r\n file_no = 'output' + str(i) + '.txt'\r\n full_path = os.path.join(folder, file_no)\r\n \r\n f = open(full_path,'r')\r\n d=[[float(x) for x in line.split()] for line in f]\r\n f.close() \r\n\r\n # Need to recalculate the number of windows each time\r\n N = len(d)\r\n \r\n labels = np.zeros(shape = (N,window), dtype=np.uint8) # np.uint8 -> each sample is labeled from 0 to 5\r\n data = np.zeros(shape = (N,measurements,6))\r\n data_max = np.zeros((6)) # Create placeholders\r\n data_min = np.zeros((6))\r\n temp_3 = np.zeros((6))\r\n temp_4 = np.zeros((6))\r\n\r\n for j in range(N):\r\n temp = d[j]\r\n temp_1 = temp[0:window]\r\n temp_2 = temp[window:M] \r\n\r\n labels[j,:] = temp_1\r\n\r\n for k in range(measurements): # Read data\r\n for l in range(6):\r\n data[j,k,l] = temp_2[(6*k) + l] \r\n \r\n for j in range(N):\r\n if(j == 1):\r\n data_max = np.amax(data[j,:,:], axis=0)\r\n data_min = np.amin(data[j,:,:], axis=0)\r\n else:\r\n temp_3 = np.amax(data[j,:,:], axis=0)\r\n temp_4 = np.amin(data[j,:,:], axis=0)\r\n for k in range(6):\r\n if(temp_3[k] >= data_max[k]):\r\n data_max[k] = temp_3[k]\r\n\r\n if(temp_4[k] <= data_min[k]):\r\n data_min[k] = temp_4[k]\r\n\r\n # Normalize each recording (meal)\r\n for j in range(N):\r\n for k in range(measurements):\r\n data[j,k,:] = data[j,k,:] - data_min # Vector subtraction\r\n data[j,k,:] = data[j,k,:]/(data_max - data_min) # Element-wise division\r\n\r\n dataset[total_windows:total_windows + N, :, :] = data\r\n vectors[total_windows:total_windows + N,:] = labels \r\n total_windows = total_windows + N\r\n windows_in_recording[i-1] = total_windows #Calculates all windows till this meal -> That is what we want!\r\n\r\n# Clear variables from memory\r\ndel data, labels, d, temp_1, temp_2, temp_3, temp_4 \r\n\r\n# Print out to verify\r\n#f = open('segments_data.txt','w') \r\n#for j in range(measurements):\r\n# for k in range(6):\r\n# f.write(\"%f \" % (dataset[0,j,k]))\r\n# f.write(\"\\n\") # --> correct way of newline in Python!\r\n#f.close()\r\n\r\n#f = open('segments_labels.txt','w')\r\n#for j in range(total_windows):\r\n# for k in range(window):\r\n# f.write(\"%u \" % (vectors[j,k]))\r\n# f.write(\"\\n\")\r\n#f.close()\r\n\r\n# Cross-validation starts here, split data into five parts, use validation_split (keras) for simplicity\r\npart_1 = windows_in_recording[math.floor((0.2*total_files)) -1]\r\npart_2 = windows_in_recording[math.floor((0.4*total_files)) -1]\r\npart_3 = windows_in_recording[math.floor((0.6*total_files)) -1]\r\npart_4 = windows_in_recording[math.floor((0.8*total_files)) -1]\r\nfor iter in range(5): \r\n \r\n if(iter == 0):\r\n tst_data = dataset[0:part_1,:,:] \r\n trn_data = dataset[part_1:total_windows,:,:]\r\n\r\n tst_vcts = vectors[0:part_1,:]\r\n trn_vcts = vectors[part_1:total_windows,:] \r\n elif(iter == 1):\r\n tst_data = dataset[part_1:part_2,:,:]\r\n temp_1 = dataset[0:part_1,:,:]\r\n temp_2 = dataset[part_2:total_windows,:,:]\r\n trn_data = np.concatenate((temp_1, temp_2), axis=0)\r\n\r\n tst_vcts = vectors[part_1:part_2,:]\r\n temp_3 = vectors[0:part_1,:]\r\n temp_4 = vectors[part_2:total_windows,:]\r\n trn_vcts = np.concatenate((temp_3, temp_4), axis=0) \r\n elif(iter == 2):\r\n tst_data = dataset[part_2:part_3,:,:]\r\n temp_1 = dataset[0:part_2,:,:]\r\n temp_2 = dataset[part_3:total_windows,:,:]\r\n trn_data = np.concatenate((temp_1, temp_2), axis=0)\r\n \r\n tst_vcts = vectors[part_2:part_3,:]\r\n temp_3 = vectors[0:part_2,:]\r\n temp_4 = vectors[part_3:total_windows,:]\r\n trn_vcts = np.concatenate((temp_3, temp_4), axis=0)\r\n elif(iter == 3):\r\n tst_data = dataset[part_3:part_4,:,:]\r\n temp_1 = dataset[0:part_3,:,:]\r\n temp_2 = dataset[part_4:total_windows,:,:]\r\n trn_data = np.concatenate((temp_1, temp_2), axis=0)\r\n \r\n tst_vcts = vectors[part_3:part_4,:]\r\n temp_3 = vectors[0:part_3,:]\r\n temp_4 = vectors[part_4:total_windows,:]\r\n trn_vcts = np.concatenate((temp_3, temp_4), axis=0)\r\n elif(iter == 4):\r\n tst_data = dataset[part_4:total_windows,:,:]\r\n trn_data = dataset[0:part_4,:,:] \r\n\r\n tst_vcts = vectors[part_4:total_windows,:]\r\n trn_vcts = vectors[0:part_4,:] \r\n\r\n # Reshape labels -> needed for keras compatibility \r\n trn_size = trn_data.shape[0] \r\n trn_vcts = np.reshape(trn_vcts, newshape=(trn_size, 1, window)) # Each vector is of size 1 x training_window => 1 x N image of labels\r\n\r\n # Neural network training starts here\r\n print(\"Creating model\", iter, \"here\")\r\n inputs = tf.keras.layers.Input(shape=(measurements, 6))\r\n reshape = tf.keras.layers.Reshape((1, measurements, 6))(inputs) # Data is a 1 x 450 'image' of 6 channels\r\n # Downstream --> Encoder\r\n conv_1 = tf.keras.layers.Conv2D(filters=8, kernel_size=(1,15), strides=1, padding='same', activation='linear')(reshape)\r\n bn_1 = tf.keras.layers.BatchNormalization(axis=3)(conv_1)\r\n act_1 = tf.keras.layers.ReLU()(bn_1)\r\n pool_1 = tf.keras.layers.MaxPool2D(pool_size=(1,2))(act_1)\r\n\r\n conv_2 = tf.keras.layers.Conv2D(filters=16, kernel_size=(1,7), strides=1, padding='same', activation='linear')(pool_1)\r\n bn_2 = tf.keras.layers.BatchNormalization(axis=3)(conv_2)\r\n act_2 = tf.keras.layers.ReLU()(bn_2)\r\n pool_2 = tf.keras.layers.MaxPool2D(pool_size=(1,2))(act_2)\r\n\r\n conv_3 = tf.keras.layers.Conv2D(filters=32, kernel_size=(1,5), strides=1, padding='same', activation='linear')(pool_2)\r\n bn_3 = tf.keras.layers.BatchNormalization(axis=3)(conv_3)\r\n act_3 = tf.keras.layers.ReLU()(bn_3)\r\n pool_3 = tf.keras.layers.MaxPool2D(pool_size=(1,2))(act_3)\r\n\r\n # Upstream --> Decoder\r\n up_conv1 = tf.keras.layers.Conv2DTranspose(filters=32, kernel_size=(1,5),padding='same',strides=(1,2),activation='linear')(pool_3)\r\n bn_4 = tf.keras.layers.BatchNormalization(axis=3)(up_conv1)\r\n act_4 = tf.keras.layers.ReLU()(bn_4)\r\n concat = tf.keras.layers.Concatenate()\r\n cc_1 = concat([act_4, pool_2])\r\n\r\n up_conv2 = tf.keras.layers.Conv2DTranspose(filters=16, kernel_size=(1,7),padding='same',strides=(1,2),activation='linear')(cc_1)\r\n bn_5 = tf.keras.layers.BatchNormalization(axis=3)(up_conv2)\r\n act_5 = tf.keras.layers.ReLU()(bn_5)\r\n pad_1 = tf.keras.layers.ZeroPadding2D(padding=((0,0),(0,1)))(act_5)\r\n cc_2 = concat([pad_1, pool_1])\r\n\r\n # Final Layer\r\n pen_ult = tf.keras.layers.Conv2DTranspose(filters=6,kernel_size=(1,3),strides=(1,2),activation='softmax')(cc_2)\r\n outputs = tf.keras.layers.Cropping2D(cropping=((0,0),(0,1)))(pen_ult)\r\n\r\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\r\n model.compile(optimizer = 'adam', loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits='True'), metrics=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits='True')])\r\n if(iter == 0):\r\n model.summary()\r\n\r\n # Store training sequence to .txt file\r\n training_log = 'crossval_fold_' + str(iter) + '.txt'\r\n csv_logger = tf.keras.callbacks.CSVLogger(training_log, append = True, separator=' ')\r\n print(\"Training for fold\", iter)\r\n metrics = model.fit(trn_data, trn_vcts, epochs=200, validation_split= 0.2, verbose=2, callbacks=[csv_logger])\r\n print(\"Saving model for fold\", iter)\r\n model_ID = 'crossval_modelID_' + str(iter) + '.h5'\r\n tf.keras.models.save_model(model,model_ID)\r\n #del model -> Most likely not needed....\r\n\r\n##print(\"Predict\")\r\n##op = model.predict(dataset[0:10,:,:])\r\n##print(op.shape)\r\n##temp = op[0,:,:,:]\r\n##temp = np.reshape(temp,(window, 6))\r\n##for i in range(window):\r\n##\tprint(temp[i,:], np.argmax(temp[i,:]))\r\n\r\n"
] | [
[
"tensorflow.keras.layers.Cropping2D",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.layers.ZeroPadding2D",
"numpy.amax",
"tensorflow.keras.layers.Conv2D",
"numpy.reshape",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.models.save_model",
"numpy.zeros",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.Model",
"tensorflow.keras.callbacks.CSVLogger",
"numpy.amin",
"numpy.concatenate",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.Input"
]
] |
gafaua/PolyTrack | [
"5a4b409732b9396be8271f5cba4ad426808d5af5"
] | [
"src/tools/vis_tracking_kittimots.py"
] | [
"import numpy as np\nimport cv2\nimport os\nimport glob\nimport sys\nfrom collections import defaultdict\nfrom pathlib import Path\nimport pycocotools.mask as rletools\nfrom PIL import Image, ImageDraw\nimport matplotlib.pyplot as plt\n\nDATA_PATH = '../../data/KITTIMOTS/'\nIMG_PATH = DATA_PATH + 'train/'\nSAVE_VIDEO = False\nIS_GT = True\n\ncats = ['Car', 'Pedestrian']\ncat_ids = {cat: i for i, cat in enumerate(cats)}\nCOLORS = [(255, 0, 255), (122, 122, 255), (255, 0, 0)]\n\ndef draw_bbox(img, bboxes, c=(255, 0, 255)):\n for bbox in bboxes:\n color = COLORS[int(bbox[5])]\n cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), \n (int(bbox[2]), int(bbox[3])), \n color, 2, lineType=cv2.LINE_AA)\n ct = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]\n txt = '{}'.format(int(bbox[4]))\n cv2.putText(img, txt, (int(ct[0]), int(ct[1])), \n cv2.FONT_HERSHEY_SIMPLEX, 0.5, \n color, thickness=1, lineType=cv2.LINE_AA)\n\nif __name__ == '__main__':\n # seqs = os.listdir(IMG_PATH)\n seqs = ['0001']\n for seq in sorted(seqs):\n print('seq', seq)\n if '.DS_Store' in seq:\n continue\n\n gt_file = DATA_PATH + 'instances_txt/' + seq + '.txt'\n\n with open(gt_file, 'r') as f:\n lines = f.readlines()\n\n lines = [l.split() for l in lines]\n \n frame_count = -1\n im_to_inst = {}\n\n for l in lines:\n frame, oid, cid, h, w, rle = l\n\n if int(cid) - 1 not in cat_ids.values():\n continue\n\n frame = int(frame)\n if frame_count != frame:\n frame_count = frame\n im_to_inst[frame] = []\n \n im_to_inst[frame].append(rle)\n\n for i in im_to_inst:\n #img = cv2.imread(os.path.join(IMG_PATH, '{}/{:06d}.png'.format(seq, i)))\n img = Image.open(os.path.join(IMG_PATH, '{}/{:06d}.png'.format(seq, i))).convert('RGBA')\n #img.putalpha(128)\n\n size = [int(h), int(w)]\n merged = np.zeros(size, dtype=np.float)\n print(f'Frame {i}: {len(im_to_inst[i])} masks')\n for mask in im_to_inst[i]:\n m = {'size': size, 'counts': mask.encode(encoding='UTF-8')}\n binary_mask = rletools.decode(m)\n \n merged = np.logical_or(merged, binary_mask)\n \n merged_mask = Image.fromarray(np.uint8(merged * 128), mode='L')\n color = Image.new('RGBA', (size[1], size[0]), (228, 150, 150, 255))\n # plt.imshow(merged_mask)\n # plt.imshow(img)\n # plt.show()\n image = Image.composite(color, img, merged_mask)\n\n image.save('../../data/KITTIMOTS/examples/{:06d}.png'.format(i))\n\n\n # preds = {}\n # for K in range(1, len(sys.argv)):\n # pred_path = sys.argv[K] + '/{}.txt'.format(seq)\n # pred_file = open(pred_path, 'r')\n # preds[K] = defaultdict(list)\n # for line in pred_file:\n # tmp = line[:-1].split(' ')\n # frame_id = int(tmp[0])\n # track_id = int(tmp[1])\n # cat_id = cat_ids[tmp[2]]\n # bbox = [float(tmp[6]), float(tmp[7]), float(tmp[8]), float(tmp[9])]\n # score = float(tmp[17])\n # preds[K][frame_id].append(bbox + [track_id, cat_id, score])\n\n # images_path = '{}/{}/'.format(IMG_PATH, seq)\n # images = os.listdir(images_path)\n # num_images = len([image for image in images if 'png' in image])\n \n # for i in range(num_images):\n # frame_id = i\n # file_path = '{}/{:06d}.png'.format(images_path, i)\n # img = cv2.imread(file_path)\n # for K in range(1, len(sys.argv)):\n # img_pred = img.copy()\n # draw_bbox(img_pred, preds[K][frame_id])\n # cv2.imshow('pred{}'.format(K), img_pred)\n # cv2.waitKey()\n # if SAVE_VIDEO:\n # video.write(img_pred)\n # if SAVE_VIDEO:\n # video.release()\n"
] | [
[
"numpy.logical_or",
"numpy.zeros",
"numpy.uint8"
]
] |
condereis/mean-variance-portfolio | [
"526b1e86d1e92f08ceca9a7c204b043089272744"
] | [
"tests/test_stock.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for `mvport` package.\"\"\"\n\n\nimport unittest\nimport numpy as np\n\nfrom mvport.stock import Stock\n\n\nclass TestStock(unittest.TestCase):\n \"\"\"Tests for `mvport` package.\"\"\"\n\n def setUp(self):\n \"\"\"SetUp.\"\"\"\n self.ticker = 'AAPL'\n self.returns = [-2, -1, 0, 1, 2]\n self.stock = Stock(self.ticker, self.returns)\n\n def test_get_ticker(self):\n \"\"\"Test get_ticker.\"\"\"\n self.assertEqual(self.stock.get_ticker(), self.ticker)\n\n def test_set_ticker(self):\n \"\"\"Test set_ticker.\"\"\"\n self.stock.set_ticker('new_ticker')\n self.assertEqual(self.stock.get_ticker(), 'new_ticker')\n\n def test_get_returns(self):\n \"\"\"Test get_returns.\"\"\"\n np.testing.assert_array_equal(self.stock.get_returns(), np.array(self.returns))\n\n def test_set_returns(self):\n \"\"\"Test set_ticker.\"\"\"\n self.stock.set_returns([-1, 0, 1])\n np.testing.assert_array_equal(self.stock.get_returns(), np.array([-1, 0, 1]))\n\n def test_get_mean(self):\n \"\"\"Test get_mean.\"\"\"\n self.assertEqual(self.stock.get_mean(), 0)\n self.stock.set_returns([0, 1, 2])\n self.assertEqual(self.stock.get_mean(), 1)\n\n def test_get_variance(self):\n \"\"\"Test get_variance.\"\"\"\n self.assertEqual(self.stock.get_variance(), 2)\n self.stock.set_returns([-3,-1,0,1,3])\n self.assertEqual(self.stock.get_variance(), 4)\n\n\nif __name__ == '__main__':\n sys.exit(unittest.main())\n"
] | [
[
"numpy.array"
]
] |
LLcat1217/Cirq | [
"b88069f7b01457e592ad69d6b413642ef11a56b8"
] | [
"cirq-core/cirq/ops/two_qubit_diagonal_gate_test.py"
] | [
"# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\nimport sympy\n\nimport cirq\n\n\[email protected](\n 'gate',\n (\n (\n cirq.TwoQubitDiagonalGate([2, 3, 5, 7]),\n cirq.TwoQubitDiagonalGate([0, 0, 0, 0]),\n cirq.TwoQubitDiagonalGate([2, 3, 5, sympy.Symbol('a')]),\n cirq.TwoQubitDiagonalGate([0.34, 0.12, 0, 0.96]),\n )\n ),\n)\ndef test_consistent_protocols(gate):\n cirq.testing.assert_implements_consistent_protocols(gate)\n\n\ndef test_parameterized_decompose():\n angles = sympy.symbols('x0, x1, x2, x3')\n parameterized_op = cirq.TwoQubitDiagonalGate(angles).on(*cirq.LineQubit.range(2))\n decomposed_circuit = cirq.Circuit(cirq.decompose(parameterized_op))\n for resolver in (\n cirq.Linspace('x0', -2, 2, 6)\n * cirq.Linspace('x1', -2, 2, 6)\n * cirq.Linspace('x2', -2, 2, 6)\n * cirq.Linspace('x3', -2, 2, 6)\n ):\n np.testing.assert_allclose(\n cirq.unitary(cirq.resolve_parameters(parameterized_op, resolver)),\n cirq.unitary(cirq.resolve_parameters(decomposed_circuit, resolver)),\n )\n\n\ndef test_unitary():\n diagonal_angles = [2, 3, 5, 7]\n assert cirq.has_unitary(cirq.TwoQubitDiagonalGate(diagonal_angles))\n np.testing.assert_allclose(\n cirq.unitary(cirq.TwoQubitDiagonalGate(diagonal_angles)),\n np.diag([np.exp(1j * angle) for angle in diagonal_angles]),\n atol=1e-8,\n )\n\n\ndef test_diagram():\n a, b = cirq.LineQubit.range(2)\n\n diagonal_circuit = cirq.Circuit(cirq.TwoQubitDiagonalGate([2, 3, 5, 7])(a, b))\n cirq.testing.assert_has_diagram(\n diagonal_circuit,\n \"\"\"\n0: ───diag(2, 3, 5, 7)───\n │\n1: ───#2─────────────────\n\"\"\",\n )\n cirq.testing.assert_has_diagram(\n diagonal_circuit,\n \"\"\"\n0: ---diag(2, 3, 5, 7)---\n |\n1: ---#2-----------------\n\"\"\",\n use_unicode_characters=False,\n )\n\n\ndef test_diagonal_exponent():\n diagonal_angles = [2, 3, 5, 7]\n diagonal_gate = cirq.TwoQubitDiagonalGate(diagonal_angles)\n\n sqrt_diagonal_gate = diagonal_gate**0.5\n\n expected_angles = [prime / 2 for prime in diagonal_angles]\n assert cirq.approx_eq(sqrt_diagonal_gate, cirq.TwoQubitDiagonalGate(expected_angles))\n\n assert cirq.pow(cirq.TwoQubitDiagonalGate(diagonal_angles), \"test\", None) is None\n\n\ndef test_protocols_mul_not_implemented():\n diagonal_angles = [2, 3, None, 7]\n diagonal_gate = cirq.TwoQubitDiagonalGate(diagonal_angles)\n with pytest.raises(TypeError):\n cirq.protocols.pow(diagonal_gate, 3)\n\n\[email protected]('resolve_fn', [cirq.resolve_parameters, cirq.resolve_parameters_once])\ndef test_resolve(resolve_fn):\n diagonal_angles = [2, 3, 5, 7]\n diagonal_gate = cirq.TwoQubitDiagonalGate(\n diagonal_angles[:2] + [sympy.Symbol('a'), sympy.Symbol('b')]\n )\n assert cirq.is_parameterized(diagonal_gate)\n\n diagonal_gate = resolve_fn(diagonal_gate, {'a': 5})\n assert diagonal_gate == cirq.TwoQubitDiagonalGate(diagonal_angles[:3] + [sympy.Symbol('b')])\n assert cirq.is_parameterized(diagonal_gate)\n\n diagonal_gate = resolve_fn(diagonal_gate, {'b': 7})\n assert diagonal_gate == cirq.TwoQubitDiagonalGate(diagonal_angles)\n assert not cirq.is_parameterized(diagonal_gate)\n"
] | [
[
"numpy.exp"
]
] |
multirotorsociety/SAFMC-19-D2-Autonomous-Drone | [
"fd9f0fae5d7cbf618b327224e06a7f459612b4ca"
] | [
"Old/hoop_detection_angle.py"
] | [
"from __future__ import print_function\nimport time\nimport math\nimport thread\n\n# Dk imports\nfrom pymavlink import mavutil\nfrom dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative\n\n# Mux and TOF imports\nimport I2CMultiplexer\nimport VL53L1X\n\n# CV imports\nimport cv2\nimport numpy as np\n\n\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom fractions import Fraction\nfrom PIL import Image\n\nimport random \nfrom sympy import Point, Polygon, pi\n\n\n#cap = cv2.VideoCapture(0)\ncamera = PiCamera()\ncamera.resolution = (426, 240)\ncamera.framerate = 24\ncamera.exposure_mode = 'auto'\ncamera.exposure_compensation = -3\ncamera.drc_strength = 'off'\ncamera.still_stats = False\n\ncamera.awb_mode = 'off'\ncamera.awb_gains = (Fraction(167, 103), Fraction(27,16))\n\nrawCapture = PiRGBArray(camera, size=(426, 240))\n\nout = cv2.VideoWriter(str(time.time()) + \".avi\",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (426, 240))\n\n\n\n# allow the camera to warmup\ntime.sleep(0.1)\n# Connect to Vehicle\nconnection_string = '/dev/ttyUSB0'\nsitl = None\n\n# Start SITL if no connection string specified\nif not connection_string:\n import dronekit_sitl\n sitl = dronekit_sitl.start_default()\n connection_string = sitl.connection_string()\n\n# Connect to the Vehicle\nprint('Connecting to vehicle on: %s' % connection_string)\nvehicle = connect(connection_string, wait_ready=True, baud=57600)\n\n# Global variables for distance:\ndistance_in_mm_N = 0 # North Sensor\ndistance_in_mm_S = 0 # South Sensor\ndistance_in_mm_E = 0 # East Sensor\ndistance_in_mm_W = 0 # West Sensor\ndistance_in_mm_45 = 0 # 45 degree south east sensor\n\ndX = 0\ndY = 0\n\n#Create an I2C Multiplexer object, the address of I2C Multiplexer is 0X70\nI2CMulti = I2CMultiplexer.I2CMultiplexer(0x70) \n# Init TOF obj\ntof = VL53L1X.VL53L1X()\n # STarts the TOFs on their respective ports\ntry:\n # for i in [0,2,4,6]:\n for i in [0,1,2,7,3]:\n \tI2CMulti.selectPort(i)\n \ttof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=0x29)\n \ttof.open() # Initialise the i2c bus and configure the sensor\n \ttof.start_ranging(3) # Start ranging, 1 = Short Range, 2 = Medium Range, 3 = Long Range\nexcept:\n print(\"port init failed\")\n\ndef detect_circle():\n global dX\n global dY\n for img in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n\n for i in range(5): # Clears the 5 frame buffer \n frame = img.array\n height, width = frame.shape[:2]\n centre = (int(width/2), int(height/2))\n\n b_channel = np.array(frame[:,:,0]).astype('float')\n g_channel = np.array(frame[:,:,1]).astype('float')\n r_channel = np.array(frame[:,:,2]).astype('float')\n\n bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel)\n img_rec_red2 = np.subtract(r_channel,((b_channel + g_channel)/ 2))\n #img_rec_red2 = np.divide(r_channel, 255)\n img_rec_red2 = np.divide(img_rec_red2,255) \n #img_rec_red2 = np.square(img_rec_red2)\n img_rec_red2[img_rec_red2 < 0.3] = 0\n #dX, dY = 0,0\n\n trials = 1\n try:\n # Get the array of indices of detected pixels\n thresholded_array = np.argwhere(img_rec_red2 >= 0.3)\n thresholded_list = thresholded_array.tolist()\n #print(thresholded_list)\n\n \n if len(thresholded_list) > trials*3:\n # sets the number of trials before averaging to get the centre\n \n total_centres_X = 0\n total_centres_Y = 0\n hoop_centre = (0,0)\n arr_len_3rd = int(len(thresholded_list) / 3)\n for i in range(trials):\n r1 = random.randrange(0, int(arr_len_3rd/2))\n\n #r2 = random.randrange(0, arr_len_3rd)\n # rerolls if the same number was rolled\n #while r2 == r1:\n r2 = random.randrange(arr_len_3rd, 2*arr_len_3rd)\n r3 = random.randrange(int(2.5*arr_len_3rd), len(thresholded_list))\n #while r3 == r1 or r3 == r2:\n #r3 = random.randrange(0, len(thresholded_list))\n print(thresholded_list[r1],thresholded_list[r2],thresholded_list[r3])\n current_centre = Polygon(thresholded_list[r1],thresholded_list[r2],thresholded_list[r3]).circumcenter\n #print(current_centre)\n total_centres_X += int(current_centre.y)\n total_centres_Y += int(current_centre.x)\n cv2.circle(frame, (thresholded_list[r1][1], thresholded_list[r1][0]), 5, (0, 0, 255), -1)\n cv2.circle(frame, (thresholded_list[r2][1], thresholded_list[r2][0]), 5, (0, 0, 255), -1)\n cv2.circle(frame, (thresholded_list[r3][1], thresholded_list[r3][0]), 5, (0, 0, 255), -1)\n \n cX = int(total_centres_X / trials)\n cY = int(total_centres_Y / trials)\n\n #print(cX,cY)\n except:\n print(\"no hoop detected\")\n \n # put text and highlight the center\n try:\n cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)\n cv2.line(frame, centre, (cX, cY), (255,0,0), 2)\n \n #cv2.putText(frame, \"centroid\", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n \n dX = cX - centre[0] \n dY = centre[1] - cY\n cv2.putText(frame, (\"(\" + str(dX) + \", \" + str(dY) + \" )\"), (centre[0] - 20, centre[1] - 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n\n #print('Velocities: ' + str(dX) + \",\" + str(dY))\n except:\n #print(\"No centre detected\")\n #dX = 0\n #dY = 0\n dX = None\n dY = None\n \n\n out.write(frame)\n k = cv2.waitKey(1)\n rawCapture.truncate(0)\n# Arm and rakeoff to specific altitude\ndef arm_and_takeoff(aTargetAltitude):\n \"\"\"\n Arms vehicle and fly to aTargetAltitude.\n \"\"\"\n\n print(\"Basic pre-arm checks\")\n #Don't try to arm until autopilot is ready\n # while not vehicle.is_armable:\n # print(\" Waiting for vehicle to initialise...\")\n # time.sleep(1)\n\n print(\"Arming motors\")\n # Copter should arm in GUIDED mode\n vehicle.mode = VehicleMode(\"GUIDED\")\n vehicle.armed = True\n # while not vehicle.armed == True:\n # print(\"Not Armed\")\n # time.sleep(0.4)\n\n # while not vehicle.armed == True:\n # vehicle.armed = True\n # print(\"Not Armed 2\")\n # time.sleep(0.4)\n\n\n #Confirm vehicle armed before attempting to take off\n while not vehicle.armed:\n print(\" Waiting for arming...\")\n time.sleep(1)\n print(\"Taking off!\")\n vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude\n\n\n # Wait until the vehicle reaches a safe height before processing the goto \n # (otherwise the command after Vehicle.simple_takeoff will execute\n # immediately).\n while True:\n print(\" Altitude: \", vehicle.rangefinder.distance)\n current_alt = vehicle.rangefinder.distance\n if current_alt > 20:\n current_alt = 0\n print(\" Arm state: \", vehicle.armed)\n # Break and return from function just below target altitude.\n if current_alt >= aTargetAltitude * 0.95:\n print(\"Reached target altitude\")\n break\n time.sleep(1)\n\ndef goto_position_target_local_ned(north, east, down):\n \"\"\"\n Send SET_POSITION_TARGET_LOCAL_NED command to request the vehicle fly to a specified\n location in the North, East, Down frame.\n \"\"\"\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame\n 0b0000111111111000, # type_mask (only positions enabled)\n north, east, down,\n 0, 0, 0, # x, y, z velocity in m/s (not used)\n 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)\n 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)\n # send command to vehicle\n vehicle.send_mavlink(msg)\n\ndef get_distance_metres(aLocation1, aLocation2):\n \"\"\"\n Returns the ground distance in metres between two LocationGlobal objects.\n\n This method is an approximation, and will not be accurate over large distances and close to the \n earth's poles. It comes from the ArduPilot test code: \n https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py\n \"\"\"\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5\n\ndef get_location_metres(original_location, dNorth, dEast):\n \"\"\"\n Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the \n specified `original_location`. The returned LocationGlobal has the same `alt` value\n as `original_location`.\n\n The function is useful when you want to move the vehicle around specifying locations relative to \n the current vehicle position.\n\n The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.\n\n For more information see:\n http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters\n \"\"\"\n earth_radius = 6378137.0 #Radius of \"spherical\" earth\n #Coordinate offsets in radians\n dLat = dNorth/earth_radius\n dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))\n\n #New position in decimal degrees\n newlat = original_location.lat + (dLat * 180/math.pi)\n newlon = original_location.lon + (dLon * 180/math.pi)\n if type(original_location) is LocationGlobal:\n targetlocation=LocationGlobal(newlat, newlon,original_location.alt)\n elif type(original_location) is LocationGlobalRelative:\n targetlocation=LocationGlobalRelative(newlat, newlon,original_location.alt)\n else:\n raise Exception(\"Invalid Location object passed\")\n \n return targetlocation\n\ndef goto(dNorth, dEast, gotoFunction=vehicle.simple_goto):\n \"\"\"\n Moves the vehicle to a position dNorth metres North and dEast metres East of the current position.\n\n The method takes a function pointer argument with a single `dronekit.lib.LocationGlobal` parameter for \n the target position. This allows it to be called with different position-setting commands. \n By default it uses the standard method: dronekit.lib.Vehicle.simple_goto().\n\n The method reports the distance to target every two seconds.\n \"\"\"\n \n currentLocation = vehicle.location.global_relative_frame\n targetLocation = get_location_metres(currentLocation, dNorth, dEast)\n targetDistance = get_distance_metres(currentLocation, targetLocation)\n gotoFunction(targetLocation)\n \n #print \"DEBUG: targetLocation: %s\" % targetLocation\n #print \"DEBUG: targetLocation: %s\" % targetDistance\n print(\"Initiating GOTO\")\n\n while vehicle.mode.name==\"GUIDED\": #Stop action if we are no longer in guided mode.\n #print \"DEBUG: mode: %s\" % vehicle.mode.name\n remainingDistance=get_distance_metres(vehicle.location.global_relative_frame, targetLocation)\n print(\"Distance to target: \" + str(remainingDistance))\n if remainingDistance < 0.11: #Just below target, in case of undershoot.\n print(\"Reached target\")\n break;\n time.sleep(2)\n\n# Sends a velocity to the drone at a rate of 2 Hx\ndef send_global_velocity(velocity_x, velocity_y, velocity_z, duration):\n \"\"\"\n Move vehicle in direction based on specified velocity vectors.\n \"\"\"\n msg = vehicle.message_factory.set_position_target_global_int_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame\n 0b0000111111000111, # type_mask (only speeds enabled)\n 0, # lat_int - X Position in WGS84 frame in 1e7 * meters\n 0, # lon_int - Y Position in WGS84 frame in 1e7 * meters\n 0, # alt - Altitude in meters in AMSL altitude(not WGS84 if absolute or relative)\n # altitude above terrain if GLOBAL_TERRAIN_ALT_INT\n velocity_x, # X velocity in NED frame in m/s\n velocity_y, # Y velocity in NED frame in m/s\n velocity_z, # Z velocity in NED frame in m/s\n 0, 0, 0, # afx, afy, afz acceleration (not supported yet, ignored in GCS_Mavlink)\n 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)\n\n # send command to vehicle on 1 Hz cycle\n for x in range(0,duration):\n vehicle.send_mavlink(msg)\n time.sleep(0.5)\n\n\n\n# Sets the Yaw - vehicle will yaw according to the yaw slew rate set in params\n# give the vehicle more time (give a 0 velocity vector for x amount of seconds - enough for\n# the drone to complete the yaw)\ndef condition_yaw(heading, relative=False):\n \"\"\"\n Send MAV_CMD_CONDITION_YAW message to point vehicle at a specified heading (in degrees).\n\n This method sets an absolute heading by default, but you can set the `relative` parameter\n to `True` to set yaw relative to the current yaw heading.\n\n By default the yaw of the vehicle will follow the direction of travel. After setting \n the yaw using this function there is no way to return to the default yaw \"follow direction \n of travel\" behaviour (https://github.com/diydrones/ardupilot/issues/2427)\n\n For more information see: \n http://copter.ardupilot.com/wiki/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_condition_yaw\n \"\"\"\n if relative:\n is_relative = 1 #yaw relative to direction of travel\n else:\n is_relative = 0 #yaw is an absolute angle\n # create the CONDITION_YAW command using command_long_encode()\n msg = vehicle.message_factory.command_long_encode(\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_CMD_CONDITION_YAW, #command\n 0, #confirmation\n heading, # param 1, yaw in degrees\n 0, # param 2, yaw speed deg/s\n 1, # param 3, direction -1 ccw, 1 cw\n is_relative, # param 4, relative offset 1, absolute angle 0\n 0, 0, 0) # param 5 ~ 7 not used\n # send command to vehicle\n vehicle.send_mavlink(msg)\n\n# The following 2 methods allow for the drone attitude to be directly controlled\n# the movement is not OF corrected - avoid usage where possible\ndef set_attitude(roll_angle = 0.0, pitch_angle = 0.0, yaw_rate = 0.0, thrust = 0.5, duration = 0):\n \"\"\"\n Note that from AC3.3 the message should be re-sent every second (after about 3 seconds\n with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified\n velocity persists until it is canceled. The code below should work on either version\n (sending the message multiple times does not cause problems).\n \"\"\"\n \n \"\"\"\n The roll and pitch rate cannot be controllbed with rate in radian in AC3.4.4 or earlier,\n so you must use quaternion to control the pitch and roll for those vehicles.\n \"\"\"\n \n # Thrust > 0.5: Ascend\n # Thrust == 0.5: Hold the altitude\n # Thrust < 0.5: Descend\n msg = vehicle.message_factory.set_attitude_target_encode(\n 0, # time_boot_ms\n 1, # Target system\n 1, # Target component\n 0b00000000, # Type mask: bit 1 is LSB\n to_quaternion(roll_angle, pitch_angle), # Quaternion\n 0, # Body roll rate in radian\n 0, # Body pitch rate in radian\n math.radians(yaw_rate), # Body yaw rate in radian\n thrust # Thrust\n )\n vehicle.send_mavlink(msg)\n\n start = time.time()\n while time.time() - start < duration:\n vehicle.send_mavlink(msg)\n #time.sleep(0.1)\n\ndef to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):\n \"\"\"\n Convert degrees to quaternions\n \"\"\"\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n# Gets the readings from the TOF sensors and updates the distance vars\ndef get_I2C_readings():\n global distance_in_mm_N\n global distance_in_mm_S\n global distance_in_mm_E\n global distance_in_mm_W\n global distance_in_mm_45\n while(True):\n\n I2CMulti.selectPort(0)\n distance_in_mm_N = tof.get_distance() # Grab the range in mm\n\n I2CMulti.selectPort(3)\n distance_in_mm_S = tof.get_distance() # Grab the range in mm\n\n I2CMulti.selectPort(7)\n distance_in_mm_E = tof.get_distance() # Grab the range in mm\n\n I2CMulti.selectPort(2)\n distance_in_mm_W = tof.get_distance() # Grab the range in mm\n I2CMulti.selectPort(1)\n distance_in_mm_45 = tof.get_distance() # Grab the range in mm\n\n\n\t\t#print(\"Sensor N distance: \" + str(distance_in_mm_N) + \" \\nSensor S distance: \" + str(distance_in_mm_S) + \"\\nSensor E distance: \" + str(distance_in_mm_E) + \"\\nSensor W distance: \" + str(distance_in_mm_W))\n time.sleep(0.05)\n\n\n\ndef calculate_velocity(ground_heading, angle):\n rads = math.radian(angle)\n rads += math.radians(ground_heading)\n if rads > math.radians(360):\n rads -= math.radians(360)\n elif rads < -math.radians(360):\n rads += math.radians(360)\n vel_x = (np.cos(heading_rad) / 5)\n vel_y = (np.sin(heading_rad) / 5)\n return vel_x, vel_y\n\n# Starts TOF readings before takeoff\n#thread.start_new_thread(get_I2C_readings, ())\n\n# Starts CV code\nthread.start_new_thread(detect_circle, ())\n\n# Gets vehcle heading on thr ground (this is assumed to be the forward heading)\nground_heading = vehicle.heading\n\n\n# Takeoff to 1.5m\narm_and_takeoff(1.5)\n\n# Corridor Variables\nINCREMENT_DISTANCE = 0.1\nCORRIDOR_WIDTH_HALVED = 1300 # in mm\nTHRESHOLD_DISTANCE = 100\nlower_bound = CORRIDOR_WIDTH_HALVED - THRESHOLD_DISTANCE\nupper_bound = CORRIDOR_WIDTH_HALVED + THRESHOLD_DISTANCE\n\n\n\n#print(str(right_X) + str(right_Y))\n\nVEL_SCALE_Y = 0.005 # velocity scaling factor from openCV\nVEL_SCALE_X = 0.001\npx_threshold = 10 # sets the threshold before any velocity is taken\n\nprint(dX, dY)\n# Hoop alignment code\nx_aligned = False\ny_aligned = False\n\n### SINGLE AXIS ALIGNMENT CODE\n# while True:\n# if dX < -px_threshold or dX > px_threshold:\n# # remember, negative means up\n# up_vel = -dX*VEL_SCALE\n# if up_vel > 0.05:\n# up_vel = 0.05\n# elif up_vel < 0.05:\n# up_vel = -0.05\n# send_global_velocity(0,0,(up_vel), 2)\n# send_global_velocity(0,0,0,1) # reset the global vels\n# else:\n# break\n\n# print(\"x aligned\")\n\n# while True:\n\n# if dY < -px_threshold or dY > px_threshold:\n# right_vel_X = -right_X*dY*VEL_SCALE\n# right_vel_Y = -right_Y*dY*VEL_SCALE\n# if right_vel_X > 0.05:\n# right_vel_X = 0.05\n# elif right_vel_X < -0.05:\n# right_vel_X = -0.05\n# if right_vel_Y > 0.05:\n# right_vel_Y = 0.05\n# elif right_vel_Y < -0.05:\n# right_vel_Y = -0.05\n# send_global_velocity(right_vel_X,right_vel_Y,0,2)\n# send_global_velocity(0,0,0,1) # reset the global vels\n# else :\n# break\n\n### DOUBLE AXIS ALIGNMENT\nup_vel, right_vel_X, right_vel_Y = 0,0,0\n\nforward_scale = 0.1\n\nstab_seconds_X = 0\nstab_seconds_Y = 0\nstab_threshold = 1\nwhile (not x_aligned) or (not y_aligned):\n if dX == None:\n print(\"hoop not detected\")\n break\n line_d = (dX**2 + dY**2)**0.5\n\n if line_d == 0:\n fwd_x, fwd_y = calculate_velocity(ground_heading, 0)\n send_global_velocity(fwd_X,fwd_Y,0,2)\n send_global_velocity(0,0,0,1)\n\n total_scale = forward_scale/line_d\n print(dX, dY)\n if dX < -px_threshold or dX > px_threshold:\n x_aligned = False\n up_vel = round((-dX*VEL_SCALE_X), 3)\n if up_vel > 0.1:\n up_vel = 0.1\n elif up_vel < -0.1:\n up_vel = -0.1\n stab_seconds_X = 0\n else:\n if stab_seconds_X == stab_threshold:\n x_aligned = True\n else:\n x_aligned = False\n stab_seconds_X += 1\n up_vel = 0\n if dY < -px_threshold or dY > px_threshold:\n y_aligned = False\n angle = math.degrees(np.arctan2(total_scale / line_d))\n right_vel_X, right_vel_Y = calculate_velocity(ground_heading, angle)\n\n stab_seconds_Y = 0\n else:\n if stab_seconds_Y == stab_threshold:\n y_aligned = True\n else:\n y_aligned = False\n stab_seconds_Y += 1\n right_vel_X = 0\n right_vel_Y = 0\n print(\"alignment x: \" + str(x_aligned))\n print(\"alignment y: \" + str(y_aligned))\n print(\"velocity: \" + str(right_vel_X) + \" : \" + str(right_vel_Y) + \" : \" + str(up_vel))\n send_global_velocity(right_vel_X,right_vel_Y,up_vel,2)\n send_global_velocity(0,0,0,1) # reset the global vels\n\n\n\nprint(\"Fully Aligned\")\nsend_global_velocity(0,0,0,10) # reset the global vels\n# condition_yaw(90, True)\n# condition_yaw(-90, True)\n \n\n\n\n\n\n\nprint(\"Landing\")\nvehicle.mode = VehicleMode(\"LAND\")\n\n# Close vehicle object before exiting script\nprint(\"Close vehicle object\")\nvehicle.close()\n\n# Shut down simulator if it was started.\nif sitl:\n sitl.stop()\n\n\n\n\n\n\nI2CMulti.i2c.write_byte(0x70,0) # how it closes?\ntof.stop_ranging() # Stop ranging\n\nout.release()\n\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.arctan2",
"numpy.divide",
"numpy.argwhere",
"numpy.subtract",
"numpy.cos",
"numpy.add",
"numpy.array",
"numpy.sin"
]
] |
ondrejba/hmm | [
"1e9fe47a6057d93e7c77614016a89d5d46959e97"
] | [
"hmm/scripts/easy_casino_learn.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom ..easy_casino import Casino\nfrom ..hmm_multinoulli import HMMMultinoulli\n\n\nhmm = HMMMultinoulli(Casino.A, Casino.PX, Casino.INIT)\n\n# generate sequence\nseq_length = 300\nbatch_size = 500\n\nxs_batch = []\nzs_batch = []\n\nfor j in range(batch_size):\n casino = Casino()\n\n xs = [casino.observe()]\n zs = [casino.z]\n\n for i in range(seq_length - 1):\n casino.transition()\n xs.append(casino.observe())\n zs.append(casino.z)\n\n xs_batch.append(xs)\n zs_batch.append(zs)\n\nxs_batch = np.array(xs_batch)\nzs_batch = np.array(zs_batch)\n\nnum_hidden_states = len(np.unique(zs_batch))\n\n# learn\nhmm.initialize_em(2, 6)\n\nfor i in range(200):\n # learn\n print(\"step\", i)\n print(hmm.A)\n print(hmm.init)\n print(hmm.PX)\n print()\n\n ll = hmm.learn_em(xs_batch)\n print(\"log likelihood:\", ll)\n print()\n\n# calculate probabilities\nalphas, log_evidence, betas, gammas, etas = hmm.forward_backward(xs_batch[0])\n\n# plot alphas and gammas\nplot_zs = np.array(zs_batch[0])\nplot_alphas = alphas[:, 1]\nplot_gammas = gammas[:, 1]\nplot_xs = np.linspace(1, len(plot_zs), num=len(plot_zs))\n\nplt.figure(figsize=(12, 9))\n\nplt.subplot(2, 1, 1)\nplt.title(\"filtering\")\nplt.plot(plot_xs, plot_zs, label=\"z\")\nplt.plot(plot_xs, plot_alphas, label=\"P(z) = 1\")\nplt.legend()\n\nplt.subplot(2, 1, 2)\nplt.title(\"smoothing\")\nplt.plot(plot_xs, plot_zs, label=\"z\")\nplt.plot(plot_xs, plot_gammas, label=\"P(z) = 1\")\nplt.legend()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.unique"
]
] |
ustbjdl1021/improved_snl_unet | [
"7f7bf092153e1a535337b80bd1b673eff3ddec52"
] | [
"model/snl_block.py"
] | [
"import torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass ImprovedSNL(nn.Module):\r\n def __init__(self, in_channels, transfer_channels, stage_num=2):\r\n super(ImprovedSNL, self).__init__()\r\n self.in_channels = in_channels\r\n self.transfer_channels = transfer_channels\r\n self.stage_num = stage_num\r\n self.transform_t = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)\r\n self.transform_p = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)\r\n self.row_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)\r\n self.column_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)\r\n self.w1 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)\r\n self.w2 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)\r\n self.bn = nn.BatchNorm2d(in_channels)\r\n self._init_params()\r\n\r\n def _init_params(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.Linear):\r\n nn.init.normal_(m.weight, 0, 0.01)\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n \r\n def getAtt(self, x):\r\n t = self.transform_t(x)\r\n p = self.transform_p(x)\r\n b, c, h, w = t.size()\r\n t = t.view(b, c, -1).permute(0, 2, 1)\r\n p = p.view(b, c, -1)\r\n m = torch.bmm(torch.relu(t), torch.relu(p))\r\n m += m.permute(0, 2, 1)\r\n m_hat = m / 2\r\n degree = torch.sum(m_hat, dim=2)\r\n degree[degree != 0] = torch.sqrt(1.0 / degree[degree != 0])\r\n affinity_matrix = m_hat * degree.unsqueeze(1)\r\n affinity_matrix *= degree.unsqueeze(2)\r\n \r\n return affinity_matrix\r\n\r\n def stage(self, x):\r\n affinity_matrix = self.getAtt(x)\r\n \r\n column_features = self.column_transform(x)\r\n b, c, h, w = column_features.size()\r\n column_features = column_features.view(b, c, -1)\r\n column_features = torch.bmm(column_features, affinity_matrix).contiguous().view(b,c,h,w)\r\n column_features = self.w1(column_features)\r\n \r\n row_features = self.row_transform(x)\r\n b, c, h, w = row_features.size()\r\n row_features = row_features.view(b, c, -1).permute(0, 2, 1)\r\n row_features = torch.bmm(affinity_matrix, row_features).permute(0, 2, 1).contiguous().view(b,c,h,w)\r\n row_features = self.w2(row_features)\r\n \r\n output = column_features + row_features\r\n output = self.bn(output)\r\n output = output + x\r\n\r\n return output\r\n\r\n def forward(self, x):\r\n for stage in range(self.stage_num):\r\n x = self.stage(x)\r\n\r\n return x\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"torch.sum",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.constant_",
"torch.sqrt",
"torch.nn.init.normal_",
"torch.relu",
"torch.nn.Conv2d",
"torch.bmm"
]
] |
KnwSondess/Regym | [
"825c7dacf955a3e2f6c658c0ecb879a0ca036c1a"
] | [
"regym/rl_algorithms/algorithms/PPO/rnd_loss.py"
] | [
"from typing import Dict, List\nimport torch\nimport torch.nn.functional as F \n\n\ndef compute_loss(states: torch.Tensor, \n actions: torch.Tensor,\n next_states: torch.Tensor,\n log_probs_old: torch.Tensor, \n ext_returns: torch.Tensor,\n ext_advantages: torch.Tensor,\n std_ext_advantages: torch.Tensor,\n int_returns: torch.Tensor,\n int_advantages: torch.Tensor, \n std_int_advantages: torch.Tensor,\n target_random_features: torch.Tensor,\n states_mean: torch.Tensor, \n states_std: torch.Tensor,\n model: torch.nn.Module,\n pred_intr_model: torch.nn.Module,\n intrinsic_reward_ratio: float,\n ratio_clip: float, \n entropy_weight: float,\n value_weight: float,\n rnd_weight: float,\n rnd_obs_clip: float,\n summary_writer: object = None,\n iteration_count: int = 0,\n rnn_states: Dict[str, Dict[str, List[torch.Tensor]]] = None) -> torch.Tensor:\n '''\n Computes the loss of an actor critic model using the\n loss function from equation (9) in the paper:\n Proximal Policy Optimization Algorithms: https://arxiv.org/abs/1707.06347\n\n :param states: Dimension: batch_size x state_size: States visited by the agent.\n :param actions: Dimension: batch_size x action_size. Actions which the agent\n took at every state in :param states: with the same index.\n :param log_probs_old: Dimension: batch_size x 1. Log probability of taking\n the action with the same index in :param actions:.\n Used to compute the policy probability ratio.\n Refer to original paper equation (6)\n :param ext_returns: Dimension: batch_size x 1. Empirical returns obtained via\n calculating the discounted return from the environment's rewards\n :param ext_advantages: Dimension: batch_size x 1. Estimated advantage function\n for every state and action in :param states: and\n :param actions: (respectively) with the same index.\n :param std_ext_advantages: Dimension: batch_size x 1. Estimated standardized advantage function\n for every state and action in :param states: and\n :param actions: (respectively) with the same index.\n :param int_returns: Dimension: batch_size x 1. Empirical intrinsic returns obtained via\n calculating the discounted intrinsic return from the intrinsic rewards.\n :param int_advantages: Dimension: batch_size x 1. Estimated intrisinc advantage function\n for every state and action in :param states: and\n :param actions: (respectively) with the same index.\n :param std_int_advantages: Dimension: batch_size x 1. Estimated standardized intrinsic advantage function\n for every state and action in :param states: and\n :param actions: (respectively) with the same index.\n :param target_random_features: target random features used to compute the intrinsic rewards.\n :param states_mean: mean over the previous training step's states.\n :param states_std: standard deviation over the previous training step's states.\n :param model: torch.nn.Module used to compute the policy probability ratio\n as specified in equation (6) of original paper.\n :param predict_intr_model: intrinsic reward prediction model.\n :param intrinsic_reward_ratio: ratio of intrinsic reward to extrinsic reward.\n :param ratio_clip: Epsilon value used to clip the policy ratio's value.\n This parameter acts as the radius of the Trust Region.\n Refer to original paper equation (7).\n :param entropy_weight: Coefficient to be used for the entropy bonus\n for the loss function. Refer to original paper eq (9)\n :param value_weight: Coefficient to be used for the value loss\n for the loss function. Refer to original paper eq (9)\n :param rnd_weight: Coefficient to be used for the rnd loss\n for the loss function.\n :param rnn_states: The :param model: can be made up of different submodules.\n Some of these submodules will feature an LSTM architecture.\n This parameter is a dictionary which maps recurrent submodule names\n to a dictionary which contains 2 lists of tensors, each list\n corresponding to the 'hidden' and 'cell' states of\n the LSTM submodules. These tensors are used by the\n :param model: when calculating the policy probability ratio.\n '''\n advantages = ext_advantages + intrinsic_reward_ratio*int_advantages\n std_advantages = std_ext_advantages + intrinsic_reward_ratio*std_int_advantages\n \n prediction = model(states, actions, rnn_states=rnn_states)\n \n ratio = torch.exp((prediction['log_pi_a'] - log_probs_old))\n \n obj = ratio * std_advantages\n obj_clipped = torch.clamp(ratio,\n 1.0 - ratio_clip,\n 1.0 + ratio_clip) * std_advantages\n \n policy_val = -torch.min(obj, obj_clipped).mean()\n entropy_val = prediction['ent'].mean()\n policy_loss = policy_val - entropy_weight * entropy_val # L^{clip} and L^{S} from original paper\n #policy_loss = -torch.min(obj, obj_clipped).mean() - entropy_weight * prediction['ent'].mean() # L^{clip} and L^{S} from original paper\n \n # Random Network Distillation loss:\n norm_next_states = (next_states-states_mean) / (states_std+1e-8)\n if rnd_obs_clip > 1e-1:\n norm_next_states = torch.clamp( norm_next_states, -rnd_obs_clip, rnd_obs_clip)\n pred_random_features = pred_intr_model(norm_next_states)\n \n # Clamping:\n #pred_random_features = torch.clamp(pred_random_features, -1e20, 1e20)\n #target_random_features = torch.clamp(target_random_features, -1e20, 1e20)\n \n # Softmax:\n #pred_random_features = F.softmax(pred_random_features)\n \n # Losses:\n #int_reward_loss = torch.nn.functional.smooth_l1_loss(target_random_features.detach(), pred_random_features)\n int_reward_loss = torch.nn.functional.mse_loss( pred_random_features, target_random_features.detach())\n \n #ext_returns = torch.clamp(ext_returns, -1e10, 1e10)\n #int_returns = torch.clamp(int_returns, -1e10, 1e10)\n #prediction['v'] = torch.clamp(prediction['v'], -1e10, 1e10)\n #prediction['int_v'] = torch.clamp(prediction['int_v'], -1e10, 1e10)\n \n #ext_v_loss = torch.nn.functional.smooth_l1_loss(ext_returns, prediction['v']) \n #int_v_loss = torch.nn.functional.smooth_l1_loss(int_returns, prediction['int_v']) \n ext_v_loss = torch.nn.functional.mse_loss(input=prediction['v'], target=ext_returns) \n int_v_loss = torch.nn.functional.mse_loss(input=prediction['int_v'], target=int_returns) \n \n value_loss = (ext_v_loss + int_v_loss)\n #value_loss = ext_v_loss\n rnd_loss = int_reward_loss \n\n total_loss = policy_loss + rnd_weight * rnd_loss + value_weight * value_loss\n #total_loss = policy_loss + value_weight * value_loss\n\n if summary_writer is not None:\n summary_writer.add_scalar('Training/RatioMean', ratio.mean().cpu().item(), iteration_count)\n #summary_writer.add_histogram('Training/Ratio', ratio.cpu(), iteration_count)\n summary_writer.add_scalar('Training/ExtAdvantageMean', ext_advantages.mean().cpu().item(), iteration_count)\n summary_writer.add_scalar('Training/IntAdvantageMean', int_advantages.mean().cpu().item(), iteration_count)\n summary_writer.add_scalar('Training/AdvantageMean', advantages.mean().cpu().item(), iteration_count)\n #summary_writer.add_histogram('Training/ExtAdvantage', ext_advantages.cpu(), iteration_count)\n #summary_writer.add_histogram('Training/IntAdvantage', int_advantages.cpu(), iteration_count)\n #summary_writer.add_histogram('Training/Advantage', advantages.cpu(), iteration_count)\n summary_writer.add_scalar('Training/RNDLoss', int_reward_loss.cpu().item(), iteration_count)\n summary_writer.add_scalar('Training/ExtVLoss', ext_v_loss.cpu().item(), iteration_count)\n summary_writer.add_scalar('Training/IntVLoss', int_v_loss.cpu().item(), iteration_count)\n \n summary_writer.add_scalar('Training/MeanVValues', prediction['v'].cpu().mean().item(), iteration_count)\n summary_writer.add_scalar('Training/MeanReturns', ext_returns.cpu().mean().item(), iteration_count)\n summary_writer.add_scalar('Training/StdVValues', prediction['v'].cpu().std().item(), iteration_count)\n summary_writer.add_scalar('Training/StdReturns', ext_returns.cpu().std().item(), iteration_count)\n \n summary_writer.add_scalar('Training/MeanIntVValues', prediction['int_v'].cpu().mean().item(), iteration_count)\n summary_writer.add_scalar('Training/MeanIntReturns', int_returns.cpu().mean().item(), iteration_count)\n summary_writer.add_scalar('Training/StdIntVValues', prediction['int_v'].cpu().std().item(), iteration_count)\n summary_writer.add_scalar('Training/StdIntReturns', int_returns.cpu().std().item(), iteration_count)\n \n summary_writer.add_scalar('Training/ValueLoss', value_loss.cpu().item(), iteration_count)\n summary_writer.add_scalar('Training/PolicyVal', policy_val.cpu().item(), iteration_count)\n summary_writer.add_scalar('Training/EntropyVal', entropy_val.cpu().item(), iteration_count)\n summary_writer.add_scalar('Training/PolicyLoss', policy_loss.cpu().item(), iteration_count)\n summary_writer.add_scalar('Training/TotalLoss', total_loss.cpu().item(), iteration_count)\n \n return total_loss\n"
] | [
[
"torch.nn.functional.mse_loss",
"torch.clamp",
"torch.exp",
"torch.min"
]
] |
glasgowcompbio/vimms-gym | [
"95cb6fa84ee6e3a64618b7a2a54c3835ad0d7867"
] | [
"vimms_gym/viewer_helper.py"
] | [
"import os\nimport sys\n\nimport numpy as np\nimport streamlit as st\nfrom stable_baselines3 import PPO\nfrom vimms.ChemicalSamplers import UniformRTAndIntensitySampler, GaussianChromatogramSampler, \\\n UniformMZFormulaSampler\nfrom vimms.Common import POSITIVE\n\nfrom vimms_gym.common import METHOD_PPO, METHOD_TOPN\n\nsys.path.append('..')\nfrom vimms_gym.env import DDAEnv\nfrom vimms_gym.evaluation import Episode, pick_action\n\n\[email protected]_memo\ndef preset_1():\n n_chemicals = (2000, 5000)\n mz_range = (100, 600)\n rt_range = (200, 1000)\n intensity_range = (1E4, 1E10)\n\n min_mz = mz_range[0]\n max_mz = mz_range[1]\n min_rt = rt_range[0]\n max_rt = rt_range[1]\n\n min_log_intensity = np.log(intensity_range[0])\n max_log_intensity = np.log(intensity_range[1])\n\n isolation_window = 0.7\n rt_tol = 120\n mz_tol = 10\n ionisation_mode = POSITIVE\n enable_spike_noise = True\n noise_density = 0.1\n noise_max_val = 1E3\n\n mz_sampler = UniformMZFormulaSampler(min_mz=min_mz, max_mz=max_mz)\n ri_sampler = UniformRTAndIntensitySampler(min_rt=min_rt, max_rt=max_rt,\n min_log_intensity=min_log_intensity,\n max_log_intensity=max_log_intensity)\n cr_sampler = GaussianChromatogramSampler()\n params = {\n 'chemical_creator': {\n 'mz_range': mz_range,\n 'rt_range': rt_range,\n 'intensity_range': intensity_range,\n 'n_chemicals': n_chemicals,\n 'mz_sampler': mz_sampler,\n 'ri_sampler': ri_sampler,\n 'cr_sampler': cr_sampler,\n },\n 'noise': {\n 'enable_spike_noise': enable_spike_noise,\n 'noise_density': noise_density,\n 'noise_max_val': noise_max_val,\n 'mz_range': mz_range\n },\n 'env': {\n 'ionisation_mode': ionisation_mode,\n 'rt_range': rt_range,\n 'isolation_window': isolation_window,\n 'mz_tol': mz_tol,\n 'rt_tol': rt_tol,\n }\n }\n return params\n\n\[email protected]_memo\ndef preset_2():\n return None\n\n\ndef load_model_and_params(method, params):\n params = dict(params) # make a copy\n model = None\n N = None\n min_ms1_intensity = None\n\n if method == METHOD_PPO:\n # TODO: should be uploaded, rather than hardcoded?\n in_dir = os.path.abspath(os.path.join('..', 'notebooks', 'simulated_chems', 'results'))\n env_name = 'DDAEnv'\n model_name = 'PPO'\n fname = os.path.join(in_dir, '%s_%s.zip' % (env_name, model_name))\n # st.write('Loading model from: ', fname)\n model = load_ppo(fname)\n\n elif method == METHOD_TOPN:\n min_ms1_intensity = 5000\n N = 20 # from optimise_baselines.ipynb\n rt_tol = 30 # from optimise_baselines.ipynb\n params['env']['rt_tol'] = rt_tol\n\n return N, min_ms1_intensity, model, params\n\n\[email protected]_singleton\ndef load_ppo(fname):\n model = PPO.load(fname)\n return model\n\n\ndef run_simulation(N, chems, max_peaks, method, min_ms1_intensity, model, params):\n env = DDAEnv(max_peaks, params)\n obs = env.reset(chems=chems)\n done = False\n episode = Episode(obs)\n with st.spinner('Wait for it...'):\n while not done: # repeat until episode is done\n\n # select an action depending on the observation and method\n action, action_probs = pick_action(\n method, obs, model, env.features, N, min_ms1_intensity)\n\n # make one step through the simulation\n obs, reward, done, info = env.step(action)\n\n # FIXME: seems to slow the simulation a lot!\n # image = env.render(mode='rgb_array')\n\n # store new episodic information\n if obs is not None:\n episode.add_step_data(action, action_probs, obs, reward, info)\n\n if episode.num_steps % 500 == 0:\n st.write('Step\\t', episode.num_steps, '\\tTotal reward\\t',\n episode.get_total_rewards())\n\n # if episode is finished, break\n if done:\n msg = f'Episode stored into session: {episode.num_steps} timesteps ' \\\n f'with total reward {episode.get_total_rewards()}'\n st.success(msg)\n break\n return episode\n"
] | [
[
"numpy.log"
]
] |
BrunoBertti/Scikit_Learning | [
"4b9e10ff7909f3728ac1e8bba19f5fd779340bc4",
"4b9e10ff7909f3728ac1e8bba19f5fd779340bc4"
] | [
"06_Transformacoes_do_Conjunto_de_Dados/6.6_Projecao_Aleatoria/6.6.1._O_Lema_de_Johnson-Lindenstrauss.py",
"06_Transformacoes_do_Conjunto_de_Dados/6.1_Pipelines_e_Estimadores_Compostos/6.1.2_Transformando_Alvo_em_Regressao.py"
] | [
"########## 6.6.1. O lema de Johnson-Lindenstrauss ##########\n\n\n\n # O principal resultado teórico por trás da eficiência da projeção aleatória é o lema de Johnson-Lindenstrauss (citando a Wikipedia):\n\n # Em matemática, o lema de Johnson-Lindenstrauss é um resultado sobre embeddings de baixa distorção de pontos de alta dimensão em espaço euclidiano de baixa dimensão. O lema afirma que um pequeno conjunto de pontos em um espaço de alta dimensão pode ser incorporado em um espaço de dimensão muito menor de tal forma que as distâncias entre os pontos sejam praticamente preservadas. O mapa usado para a incorporação é pelo menos Lipschitz, e pode até ser considerado uma projeção ortogonal.\n\n # Conhecendo apenas o número de amostras, o johnson_lindenstrauss_min_dim estima conservadoramente o tamanho mínimo do subespaço aleatório para garantir uma distorção limitada introduzida pela projeção aleatória: \n\n\nfrom sklearn.random_projection import johnson_lindenstrauss_min_dim\njohnson_lindenstrauss_min_dim(n_samples=1e6, eps=0.5)\n\njohnson_lindenstrauss_min_dim(n_samples=1e6, eps=[0.5, 0.1, 0.01])\n\njohnson_lindenstrauss_min_dim(n_samples=[1e4, 1e5, 1e6], eps=0.1)\n\n\n\n # https://scikit-learn.org/stable/auto_examples/miscellaneous/plot_johnson_lindenstrauss_bound.html \n\n\n\n ## Exemplos:\n\n ## See The Johnson-Lindenstrauss bound for embedding with random projections for a theoretical explication on the Johnson-Lindenstrauss lemma and an empirical validation using sparse random matrices. (https://scikit-learn.org/stable/auto_examples/miscellaneous/plot_johnson_lindenstrauss_bound.html#sphx-glr-auto-examples-miscellaneous-plot-johnson-lindenstrauss-bound-py)\n\n\n\n\n\n\n ## Referências:\n\n ## Sanjoy Dasgupta and Anupam Gupta, 1999. An elementary proof of the Johnson-Lindenstrauss Lemma. ( http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.39.3334&rep=rep1&type=pdf)",
"########## 6.1.2. Transformando alvo em regressão ##########\n\n\n # TransformedTargetRegressor transforma os destinos y antes de ajustar um modelo de regressão. As previsões são mapeadas de volta ao espaço original por meio de uma transformação inversa. Toma como argumento o regressor que será usado para previsão e o transformador que será aplicado à variável alvo: \n\nimport numpy as np\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nX, y = fetch_california_housing(return_X_y=True)\nX, y = X[:2000, :], y[:2000] # selecione um subconjunto de dados \ntransformer = QuantileTransformer(output_distribution='normal')\nregressor = LinearRegression()\nregr = TransformedTargetRegressor(regressor=regressor,\n transformer=transformer)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\nregr.fit(X_train, y_train)\nprint('R2 score: {0:.2f}'.format(regr.score(X_test, y_test)))\nraw_target_regr = LinearRegression().fit(X_train, y_train)\nprint('R2 score: {0:.2f}'.format(raw_target_regr.score(X_test, y_test)))\n\n\n # Para transformações simples, ao invés de um objeto Transformer, pode-se passar um par de funções, definindo a transformação e seu mapeamento inverso: \n\ndef func(x):\n return np.log(x)\ndef inverse_func(x):\n return np.exp(x)\n\n # Posteriormente, o objeto é criado como: \n\nregr = TransformedTargetRegressor(regressor=regressor,\n func=func,\n inverse_func=inverse_func)\nregr.fit(X_train, y_train)\nTransformedTargetRegressor(...)\nprint('R2 score: {0:.2f}'.format(regr.score(X_test, y_test)))\n\n\n # Por padrão, as funções fornecidas são verificadas em cada ajuste para serem o inverso uma da outra. No entanto, é possível contornar essa verificação definindo check_inverse como False: \n\ndef inverse_func(x):\n return x\nregr = TransformedTargetRegressor(regressor=regressor,\n func=func,\n inverse_func=inverse_func,\n check_inverse=False)\nregr.fit(X_train, y_train)\nTransformedTargetRegressor(...)\nprint('R2 score: {0:.2f}'.format(regr.score(X_test, y_test)))\n\n\n # Nota: A transformação pode ser acionada configurando transformador ou o par de funções func e inverse_func. No entanto, definir ambas as opções gerará um erro. \n\n\n\n ## Exemplos:\n\n ## https://scikit-learn.org/stable/auto_examples/compose/plot_transformed_target.html#sphx-glr-auto-examples-compose-plot-transformed-target-py"
] | [
[
"sklearn.random_projection.johnson_lindenstrauss_min_dim"
],
[
"sklearn.compose.TransformedTargetRegressor",
"sklearn.preprocessing.QuantileTransformer",
"sklearn.linear_model.LinearRegression",
"numpy.exp",
"numpy.log",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.fetch_california_housing"
]
] |
0xTDF/Quant-Trading-Strategy-Backtesting-Framework | [
"d77089bab3513013d456819e9790e67e44adec8e"
] | [
"MA cross.py"
] | [
"import backtrader as bt\r\nimport backtrader.analyzers as bta\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\nimport yfinance\r\n\r\n\r\nclass MaCrossStrategy(bt.Strategy):\r\n\r\n # signal generator\r\n def __init__(self):\r\n\r\n ma_fast = bt.ind.SMA(period = 10)\r\n ma_slow = bt.ind.SMA(period = 20)\r\n\r\n self.crossover = bt.ind.CrossOver(ma_fast, ma_slow)\r\n\r\n # executes order from the signals\r\n def next(self):\r\n if not self.position:\r\n if self.crossover > 0:\r\n self.buy()\r\n elif self.crossover < 0:\r\n self.close()\r\n\r\n\r\ncerebro = bt.Cerebro()\r\n\r\n# pulls price data from yahoo finance\r\ndata = bt.feeds.YahooFinanceCSVData(dataname='BTC-USD.csv')\r\n\r\n# converts to log chart\r\ndata.plotinfo.plotlog = True\r\n\r\n# adds data to engine\r\ncerebro.adddata(data)\r\n# adds strategy to engine\r\ncerebro.addstrategy(MaCrossStrategy)\r\n\r\n# sets starting capital\r\ncerebro.broker.setcash(1000.0)\r\n# sets size per trade\r\ncerebro.addsizer(bt.sizers.PercentSizer, percents = 10)\r\n\r\n# analysis\r\ncerebro.addanalyzer(bta.SharpeRatio, _name = \"sharpe\")\r\ncerebro.addanalyzer(bta.Transactions, _name = \"trans\")\r\ncerebro.addanalyzer(bta.TradeAnalyzer, _name = \"trades\")\r\n\r\n# runs back test\r\nback = cerebro.run()\r\nprint(cerebro.broker.getvalue())\r\n\r\n# useful output data\r\nsharpeRatio = back[0].analyzers.sharpe.get_analysis()\r\nprint(sharpeRatio)\r\ntransactions = back[0].analyzers.trans.get_analysis()\r\n#print(transactions)\r\ntradeAnalyzer = back[0].analyzers.trades.get_analysis()\r\n#print(tradeAnalyzer)\r\n\r\n\r\n# colour scheme of plot\r\nplt.style.use('fivethirtyeight')\r\n\r\nplt.rcParams[\"figure.figsize\"] = (10, 6)\r\nplt.rcParams['lines.linewidth'] = 1\r\n\r\nSIZE = 7\r\nplt.rcParams['axes.labelsize'] = SIZE\r\nplt.rcParams['ytick.labelsize'] = SIZE\r\nplt.rcParams['xtick.labelsize'] = SIZE\r\nplt.rcParams[\"font.size\"] = SIZE\r\n\r\nCOLOR = '1'\r\nplt.rcParams['text.color'] = COLOR\r\nplt.rcParams['axes.labelcolor'] = COLOR\r\nplt.rcParams['xtick.color'] = COLOR\r\nplt.rcParams['ytick.color'] = COLOR\r\n\r\nplt.rcParams['grid.linewidth']=0.1\r\nplt.rcParams['grid.color']=\"#101622\"\r\nplt.rcParams['lines.color']=\"0.5\"\r\nplt.rcParams['axes.edgecolor']=\"0.2\"\r\nplt.rcParams['axes.linewidth']=0.5\r\n\r\nplt.rcParams['figure.facecolor']=\"#101622\"\r\nplt.rcParams['axes.facecolor']=\"#101622\"\r\nplt.rcParams[\"savefig.dpi\"]=120\r\ndpi = plt.rcParams[\"savefig.dpi\"]\r\nwidth = 1080\r\nheight = 1920\r\nplt.rcParams['figure.figsize'] = height/dpi, width/dpi\r\nplt.rcParams[\"savefig.facecolor\"] =\"#101622\"\r\nplt.rcParams[\"savefig.edgecolor\"]=\"#101622\"\r\n\r\nplt.rcParams['legend.fontsize'] = SIZE\r\nplt.rcParams['legend.title_fontsize'] = SIZE + 1\r\nplt.rcParams['legend.labelspacing'] =0.25\r\nplt.rcParams['image.cmap']='tab10'\r\n\r\n\r\ncerebro.plot(style = 'candle',barup='white', bardown='#1973c2',volume = False)\r\nplt.show()\r\n\r\n"
] | [
[
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.show"
]
] |
JensRL/PPaNM | [
"a28d9826d24c821cbc35a2e5fb5c478118f1e693"
] | [
"Lectures/9 - Matlib/test.py"
] | [
"import math\rimport scipy.integrate as integrate\r\rncalls = 0\rdef f(x):\r global ncalls\r ncalls +=1\r return math.log(x)/math.sqrt(x)\r\rresult = integrate.quad(f,0,1)\rprint(\"result=\", result, \"ncalls =\",ncalls)"
] | [
[
"scipy.integrate.quad"
]
] |
d3m0n-r00t/BentoML | [
"e5c53b821369f5391de9ab3a20ecad5db9e77202"
] | [
"bentoml/adapters/dataframe_output.py"
] | [
"import json\nfrom typing import Sequence\n\nfrom bentoml.adapters.json_output import JsonOutput\nfrom bentoml.types import InferenceError, InferenceResult, InferenceTask\nfrom bentoml.utils.dataframe_util import PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS\n\n\ndef df_to_json(result, pandas_dataframe_orient=\"records\"):\n import pandas as pd\n\n assert (\n pandas_dataframe_orient in PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS\n ), f\"unknown pandas dataframe orient '{pandas_dataframe_orient}'\"\n\n if isinstance(result, pd.DataFrame):\n return result.to_json(orient=pandas_dataframe_orient)\n\n if isinstance(result, pd.Series):\n return pd.DataFrame(result).to_json(orient=pandas_dataframe_orient)\n return json.dumps(result)\n\n\nclass DataframeOutput(JsonOutput):\n \"\"\"\n Converts result of user defined API function into specific output.\n\n Args:\n cors (str): The value of the Access-Control-Allow-Origin header set in the\n AWS Lambda response object. Default is \"*\". If set to None,\n the header will not be set.\n \"\"\"\n\n BATCH_MODE_SUPPORTED = True\n\n def __init__(self, output_orient='records', **kwargs):\n super().__init__(**kwargs)\n self.output_orient = output_orient\n\n assert self.output_orient in PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS, (\n f\"Invalid 'output_orient'='{self.orient}', valid options are \"\n f\"{PANDAS_DATAFRAME_TO_JSON_ORIENT_OPTIONS}\"\n )\n\n @property\n def config(self):\n base_config = super(DataframeOutput, self).config\n return dict(base_config, output_orient=self.output_orient)\n\n @property\n def pip_dependencies(self):\n \"\"\"\n :return: List of PyPI package names required by this OutputAdapter\n \"\"\"\n return ['pandas']\n\n def pack_user_func_return_value(\n self, return_result, tasks: Sequence[InferenceTask]\n ) -> Sequence[InferenceResult[str]]:\n rv = []\n i = 0\n for task in tasks:\n if task.batch is None:\n result = return_result[i : i + 1]\n i += 1\n else:\n result = return_result[i : i + task.batch]\n i += task.batch\n try:\n result = df_to_json(result, self.output_orient)\n rv.append(InferenceResult(http_status=200, data=result))\n except Exception as e: # pylint: disable=broad-except\n rv.append(InferenceError(err_msg=str(e), http_status=500))\n return rv\n"
] | [
[
"pandas.DataFrame"
]
] |
linusg/Pyto | [
"901ac307b68486d8289105c159ca702318bea5b0"
] | [
"site-packages/sklearn/linear_model/_stochastic_gradient.py"
] | [
"# Authors: Peter Prettenhofer <[email protected]> (main author)\n# Mathieu Blondel (partial_fit support)\n#\n# License: BSD 3 clause\n\"\"\"Classification and regression using Stochastic Gradient Descent (SGD).\"\"\"\n\nimport numpy as np\nimport warnings\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom joblib import Parallel, delayed\n\nfrom ..base import clone, is_classifier\nfrom ._base import LinearClassifierMixin, SparseCoefMixin\nfrom ._base import make_dataset\nfrom ..base import BaseEstimator, RegressorMixin\nfrom ..utils import check_array, check_random_state, check_X_y\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.multiclass import _check_partial_fit_first_call\nfrom ..utils.validation import check_is_fitted, _check_sample_weight\nfrom ..exceptions import ConvergenceWarning\nfrom ..model_selection import StratifiedShuffleSplit, ShuffleSplit\n\nfrom ._sgd_fast import plain_sgd, average_sgd\nfrom ..utils import compute_class_weight\nfrom ._sgd_fast import Hinge\nfrom ._sgd_fast import SquaredHinge\nfrom ._sgd_fast import Log\nfrom ._sgd_fast import ModifiedHuber\nfrom ._sgd_fast import SquaredLoss\nfrom ._sgd_fast import Huber\nfrom ._sgd_fast import EpsilonInsensitive\nfrom ._sgd_fast import SquaredEpsilonInsensitive\nfrom ..utils.fixes import _joblib_parallel_args\n\nLEARNING_RATE_TYPES = {\"constant\": 1, \"optimal\": 2, \"invscaling\": 3,\n \"adaptive\": 4, \"pa1\": 5, \"pa2\": 6}\n\nPENALTY_TYPES = {\"none\": 0, \"l2\": 2, \"l1\": 1, \"elasticnet\": 3}\n\nDEFAULT_EPSILON = 0.1\n# Default value of ``epsilon`` parameter.\n\nMAX_INT = np.iinfo(np.int32).max\n\n\nclass _ValidationScoreCallback:\n \"\"\"Callback for early stopping based on validation score\"\"\"\n\n def __init__(self, estimator, X_val, y_val, sample_weight_val,\n classes=None):\n self.estimator = clone(estimator)\n self.estimator.t_ = 1 # to pass check_is_fitted\n if classes is not None:\n self.estimator.classes_ = classes\n self.X_val = X_val\n self.y_val = y_val\n self.sample_weight_val = sample_weight_val\n\n def __call__(self, coef, intercept):\n est = self.estimator\n est.coef_ = coef.reshape(1, -1)\n est.intercept_ = np.atleast_1d(intercept)\n return est.score(self.X_val, self.y_val, self.sample_weight_val)\n\n\nclass BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for SGD classification and regression.\"\"\"\n\n def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,\n l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,\n shuffle=True, verbose=0, epsilon=0.1, random_state=None,\n learning_rate=\"optimal\", eta0=0.0, power_t=0.5,\n early_stopping=False, validation_fraction=0.1,\n n_iter_no_change=5, warm_start=False, average=False):\n self.loss = loss\n self.penalty = penalty\n self.learning_rate = learning_rate\n self.epsilon = epsilon\n self.alpha = alpha\n self.C = C\n self.l1_ratio = l1_ratio\n self.fit_intercept = fit_intercept\n self.shuffle = shuffle\n self.random_state = random_state\n self.verbose = verbose\n self.eta0 = eta0\n self.power_t = power_t\n self.early_stopping = early_stopping\n self.validation_fraction = validation_fraction\n self.n_iter_no_change = n_iter_no_change\n self.warm_start = warm_start\n self.average = average\n self.max_iter = max_iter\n self.tol = tol\n # current tests expect init to do parameter validation\n # but we are not allowed to set attributes\n self._validate_params()\n\n def set_params(self, **kwargs):\n super().set_params(**kwargs)\n self._validate_params()\n return self\n\n @abstractmethod\n def fit(self, X, y):\n \"\"\"Fit model.\"\"\"\n\n def _validate_params(self, for_partial_fit=False):\n \"\"\"Validate input params. \"\"\"\n if not isinstance(self.shuffle, bool):\n raise ValueError(\"shuffle must be either True or False\")\n if not isinstance(self.early_stopping, bool):\n raise ValueError(\"early_stopping must be either True or False\")\n if self.early_stopping and for_partial_fit:\n raise ValueError(\"early_stopping should be False with partial_fit\")\n if self.max_iter is not None and self.max_iter <= 0:\n raise ValueError(\"max_iter must be > zero. Got %f\" % self.max_iter)\n if not (0.0 <= self.l1_ratio <= 1.0):\n raise ValueError(\"l1_ratio must be in [0, 1]\")\n if self.alpha < 0.0:\n raise ValueError(\"alpha must be >= 0\")\n if self.n_iter_no_change < 1:\n raise ValueError(\"n_iter_no_change must be >= 1\")\n if not (0.0 < self.validation_fraction < 1.0):\n raise ValueError(\"validation_fraction must be in range (0, 1)\")\n if self.learning_rate in (\"constant\", \"invscaling\", \"adaptive\"):\n if self.eta0 <= 0.0:\n raise ValueError(\"eta0 must be > 0\")\n if self.learning_rate == \"optimal\" and self.alpha == 0:\n raise ValueError(\"alpha must be > 0 since \"\n \"learning_rate is 'optimal'. alpha is used \"\n \"to compute the optimal learning rate.\")\n\n # raises ValueError if not registered\n self._get_penalty_type(self.penalty)\n self._get_learning_rate_type(self.learning_rate)\n\n if self.loss not in self.loss_functions:\n raise ValueError(\"The loss %s is not supported. \" % self.loss)\n\n def _get_loss_function(self, loss):\n \"\"\"Get concrete ``LossFunction`` object for str ``loss``. \"\"\"\n try:\n loss_ = self.loss_functions[loss]\n loss_class, args = loss_[0], loss_[1:]\n if loss in ('huber', 'epsilon_insensitive',\n 'squared_epsilon_insensitive'):\n args = (self.epsilon, )\n return loss_class(*args)\n except KeyError:\n raise ValueError(\"The loss %s is not supported. \" % loss)\n\n def _get_learning_rate_type(self, learning_rate):\n try:\n return LEARNING_RATE_TYPES[learning_rate]\n except KeyError:\n raise ValueError(\"learning rate %s \"\n \"is not supported. \" % learning_rate)\n\n def _get_penalty_type(self, penalty):\n penalty = str(penalty).lower()\n try:\n return PENALTY_TYPES[penalty]\n except KeyError:\n raise ValueError(\"Penalty %s is not supported. \" % penalty)\n\n def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,\n intercept_init=None):\n \"\"\"Allocate mem for parameters; initialize if provided.\"\"\"\n if n_classes > 2:\n # allocate coef_ for multi-class\n if coef_init is not None:\n coef_init = np.asarray(coef_init, order=\"C\")\n if coef_init.shape != (n_classes, n_features):\n raise ValueError(\"Provided ``coef_`` does not match \"\n \"dataset. \")\n self.coef_ = coef_init\n else:\n self.coef_ = np.zeros((n_classes, n_features),\n dtype=np.float64, order=\"C\")\n\n # allocate intercept_ for multi-class\n if intercept_init is not None:\n intercept_init = np.asarray(intercept_init, order=\"C\")\n if intercept_init.shape != (n_classes, ):\n raise ValueError(\"Provided intercept_init \"\n \"does not match dataset.\")\n self.intercept_ = intercept_init\n else:\n self.intercept_ = np.zeros(n_classes, dtype=np.float64,\n order=\"C\")\n else:\n # allocate coef_ for binary problem\n if coef_init is not None:\n coef_init = np.asarray(coef_init, dtype=np.float64,\n order=\"C\")\n coef_init = coef_init.ravel()\n if coef_init.shape != (n_features,):\n raise ValueError(\"Provided coef_init does not \"\n \"match dataset.\")\n self.coef_ = coef_init\n else:\n self.coef_ = np.zeros(n_features,\n dtype=np.float64,\n order=\"C\")\n\n # allocate intercept_ for binary problem\n if intercept_init is not None:\n intercept_init = np.asarray(intercept_init, dtype=np.float64)\n if intercept_init.shape != (1,) and intercept_init.shape != ():\n raise ValueError(\"Provided intercept_init \"\n \"does not match dataset.\")\n self.intercept_ = intercept_init.reshape(1,)\n else:\n self.intercept_ = np.zeros(1, dtype=np.float64, order=\"C\")\n\n # initialize average parameters\n if self.average > 0:\n self.standard_coef_ = self.coef_\n self.standard_intercept_ = self.intercept_\n self.average_coef_ = np.zeros(self.coef_.shape,\n dtype=np.float64,\n order=\"C\")\n self.average_intercept_ = np.zeros(self.standard_intercept_.shape,\n dtype=np.float64,\n order=\"C\")\n\n def _make_validation_split(self, y):\n \"\"\"Split the dataset between training set and validation set.\n\n Parameters\n ----------\n y : array, shape (n_samples, )\n Target values.\n\n Returns\n -------\n validation_mask : array, shape (n_samples, )\n Equal to 1 on the validation set, 0 on the training set.\n \"\"\"\n n_samples = y.shape[0]\n validation_mask = np.zeros(n_samples, dtype=np.uint8)\n if not self.early_stopping:\n # use the full set for training, with an empty validation set\n return validation_mask\n\n if is_classifier(self):\n splitter_type = StratifiedShuffleSplit\n else:\n splitter_type = ShuffleSplit\n cv = splitter_type(test_size=self.validation_fraction,\n random_state=self.random_state)\n idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))\n if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:\n raise ValueError(\n \"Splitting %d samples into a train set and a validation set \"\n \"with validation_fraction=%r led to an empty set (%d and %d \"\n \"samples). Please either change validation_fraction, increase \"\n \"number of samples, or disable early_stopping.\"\n % (n_samples, self.validation_fraction, idx_train.shape[0],\n idx_val.shape[0]))\n\n validation_mask[idx_val] = 1\n return validation_mask\n\n def _make_validation_score_cb(self, validation_mask, X, y, sample_weight,\n classes=None):\n if not self.early_stopping:\n return None\n\n return _ValidationScoreCallback(\n self, X[validation_mask], y[validation_mask],\n sample_weight[validation_mask], classes=classes)\n\n\ndef _prepare_fit_binary(est, y, i):\n \"\"\"Initialization for fit_binary.\n\n Returns y, coef, intercept, average_coef, average_intercept.\n \"\"\"\n y_i = np.ones(y.shape, dtype=np.float64, order=\"C\")\n y_i[y != est.classes_[i]] = -1.0\n average_intercept = 0\n average_coef = None\n\n if len(est.classes_) == 2:\n if not est.average:\n coef = est.coef_.ravel()\n intercept = est.intercept_[0]\n else:\n coef = est.standard_coef_.ravel()\n intercept = est.standard_intercept_[0]\n average_coef = est.average_coef_.ravel()\n average_intercept = est.average_intercept_[0]\n else:\n if not est.average:\n coef = est.coef_[i]\n intercept = est.intercept_[i]\n else:\n coef = est.standard_coef_[i]\n intercept = est.standard_intercept_[i]\n average_coef = est.average_coef_[i]\n average_intercept = est.average_intercept_[i]\n\n return y_i, coef, intercept, average_coef, average_intercept\n\n\ndef fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,\n pos_weight, neg_weight, sample_weight, validation_mask=None,\n random_state=None):\n \"\"\"Fit a single binary classifier.\n\n The i'th class is considered the \"positive\" class.\n\n Parameters\n ----------\n est : Estimator object\n The estimator to fit\n\n i : int\n Index of the positive class\n\n X : numpy array or sparse matrix of shape [n_samples,n_features]\n Training data\n\n y : numpy array of shape [n_samples, ]\n Target values\n\n alpha : float\n The regularization parameter\n\n C : float\n Maximum step size for passive aggressive\n\n learning_rate : string\n The learning rate. Accepted values are 'constant', 'optimal',\n 'invscaling', 'pa1' and 'pa2'.\n\n max_iter : int\n The maximum number of iterations (epochs)\n\n pos_weight : float\n The weight of the positive class\n\n neg_weight : float\n The weight of the negative class\n\n sample_weight : numpy array of shape [n_samples, ]\n The weight of each sample\n\n validation_mask : numpy array of shape [n_samples, ] or None\n Precomputed validation mask in case _fit_binary is called in the\n context of a one-vs-rest reduction.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n \"\"\"\n # if average is not true, average_coef, and average_intercept will be\n # unused\n y_i, coef, intercept, average_coef, average_intercept = \\\n _prepare_fit_binary(est, y, i)\n assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]\n\n random_state = check_random_state(random_state)\n dataset, intercept_decay = make_dataset(\n X, y_i, sample_weight, random_state=random_state)\n\n penalty_type = est._get_penalty_type(est.penalty)\n learning_rate_type = est._get_learning_rate_type(learning_rate)\n\n if validation_mask is None:\n validation_mask = est._make_validation_split(y_i)\n classes = np.array([-1, 1], dtype=y_i.dtype)\n validation_score_cb = est._make_validation_score_cb(\n validation_mask, X, y_i, sample_weight, classes=classes)\n\n # numpy mtrand expects a C long which is a signed 32 bit integer under\n # Windows\n seed = random_state.randint(MAX_INT)\n\n tol = est.tol if est.tol is not None else -np.inf\n\n if not est.average:\n result = plain_sgd(coef, intercept, est.loss_function_,\n penalty_type, alpha, C, est.l1_ratio,\n dataset, validation_mask, est.early_stopping,\n validation_score_cb, int(est.n_iter_no_change),\n max_iter, tol, int(est.fit_intercept),\n int(est.verbose), int(est.shuffle), seed,\n pos_weight, neg_weight,\n learning_rate_type, est.eta0,\n est.power_t, est.t_, intercept_decay)\n\n else:\n standard_coef, standard_intercept, average_coef, average_intercept, \\\n n_iter_ = average_sgd(coef, intercept, average_coef,\n average_intercept, est.loss_function_,\n penalty_type, alpha, C, est.l1_ratio,\n dataset, validation_mask, est.early_stopping,\n validation_score_cb,\n int(est.n_iter_no_change), max_iter, tol,\n int(est.fit_intercept), int(est.verbose),\n int(est.shuffle), seed, pos_weight,\n neg_weight, learning_rate_type, est.eta0,\n est.power_t, est.t_, intercept_decay,\n est.average)\n\n if len(est.classes_) == 2:\n est.average_intercept_[0] = average_intercept\n else:\n est.average_intercept_[i] = average_intercept\n\n result = standard_coef, standard_intercept, n_iter_\n\n return result\n\n\nclass BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):\n\n loss_functions = {\n \"hinge\": (Hinge, 1.0),\n \"squared_hinge\": (SquaredHinge, 1.0),\n \"perceptron\": (Hinge, 0.0),\n \"log\": (Log, ),\n \"modified_huber\": (ModifiedHuber, ),\n \"squared_loss\": (SquaredLoss, ),\n \"huber\": (Huber, DEFAULT_EPSILON),\n \"epsilon_insensitive\": (EpsilonInsensitive, DEFAULT_EPSILON),\n \"squared_epsilon_insensitive\": (SquaredEpsilonInsensitive,\n DEFAULT_EPSILON),\n }\n\n @abstractmethod\n def __init__(self, loss=\"hinge\", penalty='l2', alpha=0.0001,\n l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,\n shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,\n random_state=None, learning_rate=\"optimal\", eta0=0.0,\n power_t=0.5, early_stopping=False,\n validation_fraction=0.1, n_iter_no_change=5,\n class_weight=None, warm_start=False, average=False):\n\n super().__init__(\n loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,\n fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,\n shuffle=shuffle, verbose=verbose, epsilon=epsilon,\n random_state=random_state, learning_rate=learning_rate, eta0=eta0,\n power_t=power_t, early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n n_iter_no_change=n_iter_no_change, warm_start=warm_start,\n average=average)\n self.class_weight = class_weight\n self.n_jobs = n_jobs\n\n def _partial_fit(self, X, y, alpha, C,\n loss, learning_rate, max_iter,\n classes, sample_weight,\n coef_init, intercept_init):\n X, y = check_X_y(X, y, 'csr', dtype=np.float64, order=\"C\",\n accept_large_sparse=False)\n\n n_samples, n_features = X.shape\n\n _check_partial_fit_first_call(self, classes)\n\n n_classes = self.classes_.shape[0]\n\n # Allocate datastructures from input arguments\n self._expanded_class_weight = compute_class_weight(self.class_weight,\n self.classes_, y)\n sample_weight = _check_sample_weight(sample_weight, X)\n\n if getattr(self, \"coef_\", None) is None or coef_init is not None:\n self._allocate_parameter_mem(n_classes, n_features,\n coef_init, intercept_init)\n elif n_features != self.coef_.shape[-1]:\n raise ValueError(\"Number of features %d does not match previous \"\n \"data %d.\" % (n_features, self.coef_.shape[-1]))\n\n self.loss_function_ = self._get_loss_function(loss)\n if not hasattr(self, \"t_\"):\n self.t_ = 1.0\n\n # delegate to concrete training procedure\n if n_classes > 2:\n self._fit_multiclass(X, y, alpha=alpha, C=C,\n learning_rate=learning_rate,\n sample_weight=sample_weight,\n max_iter=max_iter)\n elif n_classes == 2:\n self._fit_binary(X, y, alpha=alpha, C=C,\n learning_rate=learning_rate,\n sample_weight=sample_weight,\n max_iter=max_iter)\n else:\n raise ValueError(\n \"The number of classes has to be greater than one;\"\n \" got %d class\" % n_classes)\n\n return self\n\n def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,\n intercept_init=None, sample_weight=None):\n self._validate_params()\n if hasattr(self, \"classes_\"):\n self.classes_ = None\n\n X, y = check_X_y(X, y, 'csr', dtype=np.float64, order=\"C\",\n accept_large_sparse=False)\n\n # labels can be encoded as float, int, or string literals\n # np.unique sorts in asc order; largest class id is positive class\n classes = np.unique(y)\n\n if self.warm_start and hasattr(self, \"coef_\"):\n if coef_init is None:\n coef_init = self.coef_\n if intercept_init is None:\n intercept_init = self.intercept_\n else:\n self.coef_ = None\n self.intercept_ = None\n\n if self.average > 0:\n self.standard_coef_ = self.coef_\n self.standard_intercept_ = self.intercept_\n self.average_coef_ = None\n self.average_intercept_ = None\n\n # Clear iteration count for multiple call to fit.\n self.t_ = 1.0\n\n self._partial_fit(X, y, alpha, C, loss, learning_rate, self.max_iter,\n classes, sample_weight, coef_init, intercept_init)\n\n if (self.tol is not None and self.tol > -np.inf\n and self.n_iter_ == self.max_iter):\n warnings.warn(\"Maximum number of iteration reached before \"\n \"convergence. Consider increasing max_iter to \"\n \"improve the fit.\",\n ConvergenceWarning)\n return self\n\n def _fit_binary(self, X, y, alpha, C, sample_weight,\n learning_rate, max_iter):\n \"\"\"Fit a binary classifier on X and y. \"\"\"\n coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,\n learning_rate, max_iter,\n self._expanded_class_weight[1],\n self._expanded_class_weight[0],\n sample_weight,\n random_state=self.random_state)\n\n self.t_ += n_iter_ * X.shape[0]\n self.n_iter_ = n_iter_\n\n # need to be 2d\n if self.average > 0:\n if self.average <= self.t_ - 1:\n self.coef_ = self.average_coef_.reshape(1, -1)\n self.intercept_ = self.average_intercept_\n else:\n self.coef_ = self.standard_coef_.reshape(1, -1)\n self.standard_intercept_ = np.atleast_1d(intercept)\n self.intercept_ = self.standard_intercept_\n else:\n self.coef_ = coef.reshape(1, -1)\n # intercept is a float, need to convert it to an array of length 1\n self.intercept_ = np.atleast_1d(intercept)\n\n def _fit_multiclass(self, X, y, alpha, C, learning_rate,\n sample_weight, max_iter):\n \"\"\"Fit a multi-class classifier by combining binary classifiers\n\n Each binary classifier predicts one class versus all others. This\n strategy is called OvA (One versus All) or OvR (One versus Rest).\n \"\"\"\n # Precompute the validation split using the multiclass labels\n # to ensure proper balancing of the classes.\n validation_mask = self._make_validation_split(y)\n\n # Use joblib to fit OvA in parallel.\n # Pick the random seed for each job outside of fit_binary to avoid\n # sharing the estimator random state between threads which could lead\n # to non-deterministic behavior\n random_state = check_random_state(self.random_state)\n seeds = random_state.randint(MAX_INT, size=len(self.classes_))\n result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(require=\"sharedmem\"))(\n delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,\n max_iter, self._expanded_class_weight[i],\n 1., sample_weight,\n validation_mask=validation_mask,\n random_state=seed)\n for i, seed in enumerate(seeds))\n\n # take the maximum of n_iter_ over every binary fit\n n_iter_ = 0.\n for i, (_, intercept, n_iter_i) in enumerate(result):\n self.intercept_[i] = intercept\n n_iter_ = max(n_iter_, n_iter_i)\n\n self.t_ += n_iter_ * X.shape[0]\n self.n_iter_ = n_iter_\n\n if self.average > 0:\n if self.average <= self.t_ - 1.0:\n self.coef_ = self.average_coef_\n self.intercept_ = self.average_intercept_\n else:\n self.coef_ = self.standard_coef_\n self.standard_intercept_ = np.atleast_1d(self.intercept_)\n self.intercept_ = self.standard_intercept_\n\n def partial_fit(self, X, y, classes=None, sample_weight=None):\n \"\"\"Perform one epoch of stochastic gradient descent on given samples.\n\n Internally, this method uses ``max_iter = 1``. Therefore, it is not\n guaranteed that a minimum of the cost function is reached after calling\n it once. Matters such as objective convergence and early stopping\n should be handled by the user.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Subset of the training data\n\n y : numpy array, shape (n_samples,)\n Subset of the target values\n\n classes : array, shape (n_classes,)\n Classes across all calls to partial_fit.\n Can be obtained by via `np.unique(y_all)`, where y_all is the\n target vector of the entire dataset.\n This argument is required for the first call to partial_fit\n and can be omitted in the subsequent calls.\n Note that y doesn't need to contain all labels in `classes`.\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples.\n If not provided, uniform weights are assumed.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._validate_params(for_partial_fit=True)\n if self.class_weight in ['balanced']:\n raise ValueError(\"class_weight '{0}' is not supported for \"\n \"partial_fit. In order to use 'balanced' weights,\"\n \" use compute_class_weight('{0}', classes, y). \"\n \"In place of y you can us a large enough sample \"\n \"of the full training set target to properly \"\n \"estimate the class frequency distributions. \"\n \"Pass the resulting weights as the class_weight \"\n \"parameter.\".format(self.class_weight))\n return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,\n learning_rate=self.learning_rate, max_iter=1,\n classes=classes, sample_weight=sample_weight,\n coef_init=None, intercept_init=None)\n\n def fit(self, X, y, coef_init=None, intercept_init=None,\n sample_weight=None):\n \"\"\"Fit linear model with Stochastic Gradient Descent.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data\n\n y : numpy array, shape (n_samples,)\n Target values\n\n coef_init : array, shape (n_classes, n_features)\n The initial coefficients to warm-start the optimization.\n\n intercept_init : array, shape (n_classes,)\n The initial intercept to warm-start the optimization.\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples.\n If not provided, uniform weights are assumed. These weights will\n be multiplied with class_weight (passed through the\n constructor) if class_weight is specified\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n return self._fit(X, y, alpha=self.alpha, C=1.0,\n loss=self.loss, learning_rate=self.learning_rate,\n coef_init=coef_init, intercept_init=intercept_init,\n sample_weight=sample_weight)\n\n\nclass SGDClassifier(BaseSGDClassifier):\n \"\"\"Linear classifiers (SVM, logistic regression, a.o.) with SGD training.\n\n This estimator implements regularized linear models with stochastic\n gradient descent (SGD) learning: the gradient of the loss is estimated\n each sample at a time and the model is updated along the way with a\n decreasing strength schedule (aka learning rate). SGD allows minibatch\n (online/out-of-core) learning, see the partial_fit method.\n For best results using the default learning rate schedule, the data should\n have zero mean and unit variance.\n\n This implementation works with data represented as dense or sparse arrays\n of floating point values for the features. The model it fits can be\n controlled with the loss parameter; by default, it fits a linear support\n vector machine (SVM).\n\n The regularizer is a penalty added to the loss function that shrinks model\n parameters towards the zero vector using either the squared euclidean norm\n L2 or the absolute norm L1 or a combination of both (Elastic Net). If the\n parameter update crosses the 0.0 value because of the regularizer, the\n update is truncated to 0.0 to allow for learning sparse models and achieve\n online feature selection.\n\n Read more in the :ref:`User Guide <sgd>`.\n\n Parameters\n ----------\n loss : str, default: 'hinge'\n The loss function to be used. Defaults to 'hinge', which gives a\n linear SVM.\n\n The possible options are 'hinge', 'log', 'modified_huber',\n 'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\n\n The 'log' loss gives logistic regression, a probabilistic classifier.\n 'modified_huber' is another smooth loss that brings tolerance to\n outliers as well as probability estimates.\n 'squared_hinge' is like hinge but is quadratically penalized.\n 'perceptron' is the linear loss used by the perceptron algorithm.\n The other losses are designed for regression but can be useful in\n classification as well; see SGDRegressor for a description.\n\n penalty : str, 'none', 'l2', 'l1', or 'elasticnet'\n The penalty (aka regularization term) to be used. Defaults to 'l2'\n which is the standard regularizer for linear SVM models. 'l1' and\n 'elasticnet' might bring sparsity to the model (feature selection)\n not achievable with 'l2'.\n\n alpha : float\n Constant that multiplies the regularization term. Defaults to 0.0001.\n Also used to compute learning_rate when set to 'optimal'.\n\n l1_ratio : float\n The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\n l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\n Defaults to 0.15.\n\n fit_intercept : bool\n Whether the intercept should be estimated or not. If False, the\n data is assumed to be already centered. Defaults to True.\n\n max_iter : int, optional (default=1000)\n The maximum number of passes over the training data (aka epochs).\n It only impacts the behavior in the ``fit`` method, and not the\n :meth:`partial_fit` method.\n\n .. versionadded:: 0.19\n\n tol : float or None, optional (default=1e-3)\n The stopping criterion. If it is not None, the iterations will stop\n when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive\n epochs.\n\n .. versionadded:: 0.19\n\n shuffle : bool, optional\n Whether or not the training data should be shuffled after each epoch.\n Defaults to True.\n\n verbose : integer, default=0\n The verbosity level\n\n epsilon : float, default=0.1\n Epsilon in the epsilon-insensitive loss functions; only if `loss` is\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\n For 'huber', determines the threshold at which it becomes less\n important to get the prediction exactly right.\n For epsilon-insensitive, any differences between the current prediction\n and the correct label are ignored if they are less than this threshold.\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the OVA (One Versus All, for\n multi-class problems) computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n random_state : int, RandomState instance or None, optional (default=None)\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`.\n\n learning_rate : string, optional\n The learning rate schedule:\n\n 'constant':\n eta = eta0\n 'optimal': [default]\n eta = 1.0 / (alpha * (t + t0))\n where t0 is chosen by a heuristic proposed by Leon Bottou.\n 'invscaling':\n eta = eta0 / pow(t, power_t)\n 'adaptive':\n eta = eta0, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n early_stopping is True, the current learning rate is divided by 5.\n\n eta0 : double\n The initial learning rate for the 'constant', 'invscaling' or\n 'adaptive' schedules. The default value is 0.0 as eta0 is not used by\n the default schedule 'optimal'.\n\n power_t : double\n The exponent for inverse scaling learning rate [default 0.5].\n\n early_stopping : bool, default=False\n Whether to use early stopping to terminate training when validation\n score is not improving. If set to True, it will automatically set aside\n a stratified fraction of training data as validation and terminate\n training when validation score is not improving by at least tol for\n n_iter_no_change consecutive epochs.\n\n .. versionadded:: 0.20\n\n validation_fraction : float, default=0.1\n The proportion of training data to set aside as validation set for\n early stopping. Must be between 0 and 1.\n Only used if early_stopping is True.\n\n .. versionadded:: 0.20\n\n n_iter_no_change : int, default=5\n Number of iterations with no improvement to wait before early stopping.\n\n .. versionadded:: 0.20\n\n class_weight : dict, {class_label: weight} or \"balanced\" or None, optional\n Preset for the class_weight fit parameter.\n\n Weights associated with classes. If not given, all classes\n are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n warm_start : bool, default=False\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n Repeatedly calling fit or partial_fit when warm_start is True can\n result in a different solution than when calling fit a single time\n because of the way the data is shuffled.\n If a dynamic learning rate is used, the learning rate is adapted\n depending on the number of samples already seen. Calling ``fit`` resets\n this counter, while ``partial_fit`` will result in increasing the\n existing counter.\n\n average : bool or int, default=False\n When set to True, computes the averaged SGD weights and stores the\n result in the ``coef_`` attribute. If set to an int greater than 1,\n averaging will begin once the total number of samples seen reaches\n average. So ``average=10`` will begin averaging after seeing 10\n samples.\n\n Attributes\n ----------\n coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\\\n n_features)\n Weights assigned to the features.\n\n intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)\n Constants in decision function.\n\n n_iter_ : int\n The actual number of iterations to reach the stopping criterion.\n For multiclass fits, it is the maximum over every binary fit.\n\n loss_function_ : concrete ``LossFunction``\n\n classes_ : array of shape (n_classes,)\n\n t_ : int\n Number of weight updates performed during training.\n Same as ``(n_iter_ * n_samples)``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import linear_model\n >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\n >>> Y = np.array([1, 1, 2, 2])\n >>> clf = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)\n >>> clf.fit(X, Y)\n SGDClassifier()\n\n >>> print(clf.predict([[-0.8, -1]]))\n [1]\n\n See also\n --------\n sklearn.svm.LinearSVC, LogisticRegression, Perceptron\n\n \"\"\"\n\n def __init__(self, loss=\"hinge\", penalty='l2', alpha=0.0001, l1_ratio=0.15,\n fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True,\n verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,\n random_state=None, learning_rate=\"optimal\", eta0=0.0,\n power_t=0.5, early_stopping=False, validation_fraction=0.1,\n n_iter_no_change=5, class_weight=None, warm_start=False,\n average=False):\n super().__init__(\n loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,\n fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,\n shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,\n random_state=random_state, learning_rate=learning_rate, eta0=eta0,\n power_t=power_t, early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n n_iter_no_change=n_iter_no_change, class_weight=class_weight,\n warm_start=warm_start, average=average)\n\n def _check_proba(self):\n if self.loss not in (\"log\", \"modified_huber\"):\n raise AttributeError(\"probability estimates are not available for\"\n \" loss=%r\" % self.loss)\n\n @property\n def predict_proba(self):\n \"\"\"Probability estimates.\n\n This method is only available for log loss and modified Huber loss.\n\n Multiclass probability estimates are derived from binary (one-vs.-rest)\n estimates by simple normalization, as recommended by Zadrozny and\n Elkan.\n\n Binary probability estimates for loss=\"modified_huber\" are given by\n (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions\n it is necessary to perform proper probability calibration by wrapping\n the classifier with\n :class:`sklearn.calibration.CalibratedClassifierCV` instead.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n\n Returns\n -------\n array, shape (n_samples, n_classes)\n Returns the probability of the sample for each class in the model,\n where classes are ordered as they are in `self.classes_`.\n\n References\n ----------\n Zadrozny and Elkan, \"Transforming classifier scores into multiclass\n probability estimates\", SIGKDD'02,\n http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf\n\n The justification for the formula in the loss=\"modified_huber\"\n case is in the appendix B in:\n http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf\n \"\"\"\n self._check_proba()\n return self._predict_proba\n\n def _predict_proba(self, X):\n check_is_fitted(self)\n\n if self.loss == \"log\":\n return self._predict_proba_lr(X)\n\n elif self.loss == \"modified_huber\":\n binary = (len(self.classes_) == 2)\n scores = self.decision_function(X)\n\n if binary:\n prob2 = np.ones((scores.shape[0], 2))\n prob = prob2[:, 1]\n else:\n prob = scores\n\n np.clip(scores, -1, 1, prob)\n prob += 1.\n prob /= 2.\n\n if binary:\n prob2[:, 0] -= prob\n prob = prob2\n else:\n # the above might assign zero to all classes, which doesn't\n # normalize neatly; work around this to produce uniform\n # probabilities\n prob_sum = prob.sum(axis=1)\n all_zero = (prob_sum == 0)\n if np.any(all_zero):\n prob[all_zero, :] = 1\n prob_sum[all_zero] = len(self.classes_)\n\n # normalize\n prob /= prob_sum.reshape((prob.shape[0], -1))\n\n return prob\n\n else:\n raise NotImplementedError(\"predict_(log_)proba only supported when\"\n \" loss='log' or loss='modified_huber' \"\n \"(%r given)\" % self.loss)\n\n @property\n def predict_log_proba(self):\n \"\"\"Log of probability estimates.\n\n This method is only available for log loss and modified Huber loss.\n\n When loss=\"modified_huber\", probability estimates may be hard zeros\n and ones, so taking the logarithm is not possible.\n\n See ``predict_proba`` for details.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n\n Returns\n -------\n T : array-like, shape (n_samples, n_classes)\n Returns the log-probability of the sample for each class in the\n model, where classes are ordered as they are in\n `self.classes_`.\n \"\"\"\n self._check_proba()\n return self._predict_log_proba\n\n def _predict_log_proba(self, X):\n return np.log(self.predict_proba(X))\n\n\nclass BaseSGDRegressor(RegressorMixin, BaseSGD):\n\n loss_functions = {\n \"squared_loss\": (SquaredLoss, ),\n \"huber\": (Huber, DEFAULT_EPSILON),\n \"epsilon_insensitive\": (EpsilonInsensitive, DEFAULT_EPSILON),\n \"squared_epsilon_insensitive\": (SquaredEpsilonInsensitive,\n DEFAULT_EPSILON),\n }\n\n @abstractmethod\n def __init__(self, loss=\"squared_loss\", penalty=\"l2\", alpha=0.0001,\n l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,\n shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,\n random_state=None, learning_rate=\"invscaling\", eta0=0.01,\n power_t=0.25, early_stopping=False, validation_fraction=0.1,\n n_iter_no_change=5, warm_start=False, average=False):\n super().__init__(\n loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,\n fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,\n shuffle=shuffle, verbose=verbose, epsilon=epsilon,\n random_state=random_state, learning_rate=learning_rate, eta0=eta0,\n power_t=power_t, early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n n_iter_no_change=n_iter_no_change, warm_start=warm_start,\n average=average)\n\n def _partial_fit(self, X, y, alpha, C, loss, learning_rate,\n max_iter, sample_weight, coef_init, intercept_init):\n X, y = check_X_y(X, y, \"csr\", copy=False, order='C', dtype=np.float64,\n accept_large_sparse=False)\n y = y.astype(np.float64, copy=False)\n\n n_samples, n_features = X.shape\n\n sample_weight = _check_sample_weight(sample_weight, X)\n\n # Allocate datastructures from input arguments\n if getattr(self, \"coef_\", None) is None:\n self._allocate_parameter_mem(1, n_features, coef_init,\n intercept_init)\n elif n_features != self.coef_.shape[-1]:\n raise ValueError(\"Number of features %d does not match previous \"\n \"data %d.\" % (n_features, self.coef_.shape[-1]))\n if self.average > 0 and getattr(self, \"average_coef_\", None) is None:\n self.average_coef_ = np.zeros(n_features,\n dtype=np.float64,\n order=\"C\")\n self.average_intercept_ = np.zeros(1, dtype=np.float64, order=\"C\")\n\n self._fit_regressor(X, y, alpha, C, loss, learning_rate,\n sample_weight, max_iter)\n\n return self\n\n def partial_fit(self, X, y, sample_weight=None):\n \"\"\"Perform one epoch of stochastic gradient descent on given samples.\n\n Internally, this method uses ``max_iter = 1``. Therefore, it is not\n guaranteed that a minimum of the cost function is reached after calling\n it once. Matters such as objective convergence and early stopping\n should be handled by the user.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Subset of training data\n\n y : numpy array of shape (n_samples,)\n Subset of target values\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples.\n If not provided, uniform weights are assumed.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._validate_params(for_partial_fit=True)\n return self._partial_fit(X, y, self.alpha, C=1.0,\n loss=self.loss,\n learning_rate=self.learning_rate, max_iter=1,\n sample_weight=sample_weight, coef_init=None,\n intercept_init=None)\n\n def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,\n intercept_init=None, sample_weight=None):\n self._validate_params()\n if self.warm_start and getattr(self, \"coef_\", None) is not None:\n if coef_init is None:\n coef_init = self.coef_\n if intercept_init is None:\n intercept_init = self.intercept_\n else:\n self.coef_ = None\n self.intercept_ = None\n\n if self.average > 0:\n self.standard_intercept_ = self.intercept_\n self.standard_coef_ = self.coef_\n self.average_coef_ = None\n self.average_intercept_ = None\n\n # Clear iteration count for multiple call to fit.\n self.t_ = 1.0\n\n self._partial_fit(X, y, alpha, C, loss, learning_rate,\n self.max_iter, sample_weight, coef_init,\n intercept_init)\n\n if (self.tol is not None and self.tol > -np.inf\n and self.n_iter_ == self.max_iter):\n warnings.warn(\"Maximum number of iteration reached before \"\n \"convergence. Consider increasing max_iter to \"\n \"improve the fit.\",\n ConvergenceWarning)\n\n return self\n\n def fit(self, X, y, coef_init=None, intercept_init=None,\n sample_weight=None):\n \"\"\"Fit linear model with Stochastic Gradient Descent.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data\n\n y : numpy array, shape (n_samples,)\n Target values\n\n coef_init : array, shape (n_features,)\n The initial coefficients to warm-start the optimization.\n\n intercept_init : array, shape (1,)\n The initial intercept to warm-start the optimization.\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples (1. for unweighted).\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n return self._fit(X, y, alpha=self.alpha, C=1.0,\n loss=self.loss, learning_rate=self.learning_rate,\n coef_init=coef_init,\n intercept_init=intercept_init,\n sample_weight=sample_weight)\n\n def _decision_function(self, X):\n \"\"\"Predict using the linear model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n\n Returns\n -------\n array, shape (n_samples,)\n Predicted target values per element in X.\n \"\"\"\n check_is_fitted(self)\n\n X = check_array(X, accept_sparse='csr')\n\n scores = safe_sparse_dot(X, self.coef_.T,\n dense_output=True) + self.intercept_\n return scores.ravel()\n\n def predict(self, X):\n \"\"\"Predict using the linear model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n\n Returns\n -------\n array, shape (n_samples,)\n Predicted target values per element in X.\n \"\"\"\n return self._decision_function(X)\n\n def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,\n sample_weight, max_iter):\n dataset, intercept_decay = make_dataset(X, y, sample_weight)\n\n loss_function = self._get_loss_function(loss)\n penalty_type = self._get_penalty_type(self.penalty)\n learning_rate_type = self._get_learning_rate_type(learning_rate)\n\n if not hasattr(self, \"t_\"):\n self.t_ = 1.0\n\n validation_mask = self._make_validation_split(y)\n validation_score_cb = self._make_validation_score_cb(\n validation_mask, X, y, sample_weight)\n\n random_state = check_random_state(self.random_state)\n # numpy mtrand expects a C long which is a signed 32 bit integer under\n # Windows\n seed = random_state.randint(0, np.iinfo(np.int32).max)\n\n tol = self.tol if self.tol is not None else -np.inf\n\n if self.average > 0:\n self.standard_coef_, self.standard_intercept_, \\\n self.average_coef_, self.average_intercept_, self.n_iter_ =\\\n average_sgd(self.standard_coef_,\n self.standard_intercept_[0],\n self.average_coef_,\n self.average_intercept_[0],\n loss_function,\n penalty_type,\n alpha, C,\n self.l1_ratio,\n dataset,\n validation_mask, self.early_stopping,\n validation_score_cb,\n int(self.n_iter_no_change),\n max_iter, tol,\n int(self.fit_intercept),\n int(self.verbose),\n int(self.shuffle),\n seed,\n 1.0, 1.0,\n learning_rate_type,\n self.eta0, self.power_t, self.t_,\n intercept_decay, self.average)\n\n self.average_intercept_ = np.atleast_1d(self.average_intercept_)\n self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)\n self.t_ += self.n_iter_ * X.shape[0]\n\n if self.average <= self.t_ - 1.0:\n self.coef_ = self.average_coef_\n self.intercept_ = self.average_intercept_\n else:\n self.coef_ = self.standard_coef_\n self.intercept_ = self.standard_intercept_\n\n else:\n self.coef_, self.intercept_, self.n_iter_ = \\\n plain_sgd(self.coef_,\n self.intercept_[0],\n loss_function,\n penalty_type,\n alpha, C,\n self.l1_ratio,\n dataset,\n validation_mask, self.early_stopping,\n validation_score_cb,\n int(self.n_iter_no_change),\n max_iter, tol,\n int(self.fit_intercept),\n int(self.verbose),\n int(self.shuffle),\n seed,\n 1.0, 1.0,\n learning_rate_type,\n self.eta0, self.power_t, self.t_,\n intercept_decay)\n\n self.t_ += self.n_iter_ * X.shape[0]\n self.intercept_ = np.atleast_1d(self.intercept_)\n\n\nclass SGDRegressor(BaseSGDRegressor):\n \"\"\"Linear model fitted by minimizing a regularized empirical loss with SGD\n\n SGD stands for Stochastic Gradient Descent: the gradient of the loss is\n estimated each sample at a time and the model is updated along the way with\n a decreasing strength schedule (aka learning rate).\n\n The regularizer is a penalty added to the loss function that shrinks model\n parameters towards the zero vector using either the squared euclidean norm\n L2 or the absolute norm L1 or a combination of both (Elastic Net). If the\n parameter update crosses the 0.0 value because of the regularizer, the\n update is truncated to 0.0 to allow for learning sparse models and achieve\n online feature selection.\n\n This implementation works with data represented as dense numpy arrays of\n floating point values for the features.\n\n Read more in the :ref:`User Guide <sgd>`.\n\n Parameters\n ----------\n loss : str, default: 'squared_loss'\n The loss function to be used. The possible values are 'squared_loss',\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'\n\n The 'squared_loss' refers to the ordinary least squares fit.\n 'huber' modifies 'squared_loss' to focus less on getting outliers\n correct by switching from squared to linear loss past a distance of\n epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is\n linear past that; this is the loss function used in SVR.\n 'squared_epsilon_insensitive' is the same but becomes squared loss past\n a tolerance of epsilon.\n\n penalty : str, 'none', 'l2', 'l1', or 'elasticnet'\n The penalty (aka regularization term) to be used. Defaults to 'l2'\n which is the standard regularizer for linear SVM models. 'l1' and\n 'elasticnet' might bring sparsity to the model (feature selection)\n not achievable with 'l2'.\n\n alpha : float\n Constant that multiplies the regularization term. Defaults to 0.0001\n Also used to compute learning_rate when set to 'optimal'.\n\n l1_ratio : float\n The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\n l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\n Defaults to 0.15.\n\n fit_intercept : bool\n Whether the intercept should be estimated or not. If False, the\n data is assumed to be already centered. Defaults to True.\n\n max_iter : int, optional (default=1000)\n The maximum number of passes over the training data (aka epochs).\n It only impacts the behavior in the ``fit`` method, and not the\n :meth:`partial_fit` method.\n\n .. versionadded:: 0.19\n\n tol : float or None, optional (default=1e-3)\n The stopping criterion. If it is not None, the iterations will stop\n when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive\n epochs.\n\n .. versionadded:: 0.19\n\n shuffle : bool, optional\n Whether or not the training data should be shuffled after each epoch.\n Defaults to True.\n\n verbose : integer, default=0\n The verbosity level.\n\n epsilon : float, default=0.1\n Epsilon in the epsilon-insensitive loss functions; only if `loss` is\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\n For 'huber', determines the threshold at which it becomes less\n important to get the prediction exactly right.\n For epsilon-insensitive, any differences between the current prediction\n and the correct label are ignored if they are less than this threshold.\n\n random_state : int, RandomState instance or None, optional (default=None)\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`.\n\n learning_rate : string, optional\n The learning rate schedule:\n\n 'constant':\n eta = eta0\n 'optimal':\n eta = 1.0 / (alpha * (t + t0))\n where t0 is chosen by a heuristic proposed by Leon Bottou.\n 'invscaling': [default]\n eta = eta0 / pow(t, power_t)\n 'adaptive':\n eta = eta0, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n early_stopping is True, the current learning rate is divided by 5.\n\n eta0 : double\n The initial learning rate for the 'constant', 'invscaling' or\n 'adaptive' schedules. The default value is 0.01.\n\n power_t : double\n The exponent for inverse scaling learning rate [default 0.25].\n\n early_stopping : bool, default=False\n Whether to use early stopping to terminate training when validation\n score is not improving. If set to True, it will automatically set aside\n a fraction of training data as validation and terminate\n training when validation score is not improving by at least tol for\n n_iter_no_change consecutive epochs.\n\n .. versionadded:: 0.20\n\n validation_fraction : float, default=0.1\n The proportion of training data to set aside as validation set for\n early stopping. Must be between 0 and 1.\n Only used if early_stopping is True.\n\n .. versionadded:: 0.20\n\n n_iter_no_change : int, default=5\n Number of iterations with no improvement to wait before early stopping.\n\n .. versionadded:: 0.20\n\n warm_start : bool, default=False\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n Repeatedly calling fit or partial_fit when warm_start is True can\n result in a different solution than when calling fit a single time\n because of the way the data is shuffled.\n If a dynamic learning rate is used, the learning rate is adapted\n depending on the number of samples already seen. Calling ``fit`` resets\n this counter, while ``partial_fit`` will result in increasing the\n existing counter.\n\n average : bool or int, default=False\n When set to True, computes the averaged SGD weights and stores the\n result in the ``coef_`` attribute. If set to an int greater than 1,\n averaging will begin once the total number of samples seen reaches\n average. So ``average=10`` will begin averaging after seeing 10\n samples.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,)\n Weights assigned to the features.\n\n intercept_ : array, shape (1,)\n The intercept term.\n\n average_coef_ : array, shape (n_features,)\n Averaged weights assigned to the features.\n\n average_intercept_ : array, shape (1,)\n The averaged intercept term.\n\n n_iter_ : int\n The actual number of iterations to reach the stopping criterion.\n\n t_ : int\n Number of weight updates performed during training.\n Same as ``(n_iter_ * n_samples)``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import linear_model\n >>> n_samples, n_features = 10, 5\n >>> rng = np.random.RandomState(0)\n >>> y = rng.randn(n_samples)\n >>> X = rng.randn(n_samples, n_features)\n >>> clf = linear_model.SGDRegressor(max_iter=1000, tol=1e-3)\n >>> clf.fit(X, y)\n SGDRegressor()\n\n See also\n --------\n Ridge, ElasticNet, Lasso, sklearn.svm.SVR\n\n \"\"\"\n def __init__(self, loss=\"squared_loss\", penalty=\"l2\", alpha=0.0001,\n l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,\n shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,\n random_state=None, learning_rate=\"invscaling\", eta0=0.01,\n power_t=0.25, early_stopping=False, validation_fraction=0.1,\n n_iter_no_change=5, warm_start=False, average=False):\n super().__init__(\n loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,\n fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,\n shuffle=shuffle, verbose=verbose, epsilon=epsilon,\n random_state=random_state, learning_rate=learning_rate, eta0=eta0,\n power_t=power_t, early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n n_iter_no_change=n_iter_no_change, warm_start=warm_start,\n average=average)\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.any",
"numpy.asarray",
"numpy.atleast_1d",
"numpy.iinfo",
"numpy.clip",
"numpy.array",
"numpy.unique"
]
] |
choudhury722k/English-to-French-translator | [
"e792ce92adbdd3100d73d9d8aebc109cc7c560d7"
] | [
"Model prediction/app.py"
] | [
"from re import X\nfrom flask import Flask,render_template,url_for,request\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras import models \nimport numpy as np\nimport pickle\n\nfrench_tokenizer = pickle.load(open('french_tokenizer.pickle', 'rb'))\nenglish_tokenizer = pickle.load(open('english_tokenizer.pickle', 'rb'))\nmodel = models.load_model(\"translator_model.h5\")\n\ny_id_to_word = {value: key for key, value in french_tokenizer.word_index.items()}\ny_id_to_word[0] = '<PAD>'\n#y_id_to_word\n\napp = Flask(__name__)\n\[email protected]('/')\ndef hello_World():\n\treturn \"Hello Soumya\"\n\[email protected]('/translator', methods = ['GET', 'POST'])\ndef eng_to_french():\n message = request.args.get(\"message\")\n sentence = [english_tokenizer.word_index[word] for word in message.split()]\n #sentence\n sentence = pad_sequences([sentence], maxlen=15, padding='post')\n sentences = np.array([sentence[0]])\n predictions = model.predict(sentences, len(sentences))\n x = ' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]])\n if '<PAD>' in x:\n x=x.replace('<PAD>','')\n print(x) \n return x\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n\t"
] | [
[
"numpy.array",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"numpy.argmax",
"tensorflow.keras.models.load_model"
]
] |
alxmamaev/catalyst | [
"d05120c68fbc5174ff74297d29c0fc00d7e94924"
] | [
"catalyst/engines/tests/test_parallel.py"
] | [
"# flake8: noqa\n\nfrom typing import Any, Dict, List\nimport logging\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom catalyst.callbacks import CheckpointCallback, CriterionCallback, OptimizerCallback\nfrom catalyst.core.runner import IRunner\nfrom catalyst.engines import DataParallelEngine\nfrom catalyst.engines.torch import DeviceEngine\nfrom catalyst.loggers import ConsoleLogger, CSVLogger\nfrom catalyst.runners.config import SupervisedConfigRunner\nfrom catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES\n\nfrom .misc import DataParallelTypeChecker, DummyDataset, DummyModel, LossMinimizationCallback\n\nlogger = logging.getLogger(__name__)\n\n\nclass CustomRunner(IRunner):\n def __init__(self, logdir):\n super().__init__()\n self._logdir = logdir\n\n def get_engine(self):\n return DataParallelEngine()\n\n def get_callbacks(self, stage: str):\n return {\n \"criterion\": CriterionCallback(\n metric_key=\"loss\", input_key=\"logits\", target_key=\"targets\"\n ),\n \"optimizer\": OptimizerCallback(metric_key=\"loss\"),\n # \"scheduler\": dl.SchedulerCallback(loader_key=\"valid\", metric_key=\"loss\"),\n \"checkpoint\": CheckpointCallback(\n self._logdir, loader_key=\"valid\", metric_key=\"loss\", minimize=True, save_n_best=3\n ),\n \"test_nn_parallel_data_parallel\": DataParallelTypeChecker(),\n \"test_loss_minimization\": LossMinimizationCallback(\"loss\", logger=logger),\n }\n\n @property\n def stages(self) -> \"Iterable[str]\":\n return [\"train\"]\n\n def get_stage_len(self, stage: str) -> int:\n return 10\n\n def get_loaders(self, stage: str) -> \"OrderedDict[str, DataLoader]\":\n dataset = DummyDataset(6)\n loader = DataLoader(dataset, batch_size=4)\n return {\"train\": loader, \"valid\": loader}\n\n def get_model(self, stage: str):\n return DummyModel(4, 2)\n\n def get_criterion(self, stage: str):\n return torch.nn.MSELoss()\n\n def get_optimizer(self, model, stage: str):\n return torch.optim.Adam(model.parameters())\n\n def get_scheduler(self, optimizer, stage: str):\n return None\n\n def get_trial(self):\n return None\n\n def get_loggers(self):\n return {\"console\": ConsoleLogger(), \"csv\": CSVLogger(logdir=self._logdir)}\n\n def handle_batch(self, batch):\n x, y = batch\n logits = self.model(x)\n\n self.batch = {\"features\": x, \"targets\": y, \"logits\": logits}\n\n\ndef train_from_runner():\n with TemporaryDirectory() as logdir:\n runner = CustomRunner(logdir)\n runner.run()\n\n\ndef train_from_config():\n with TemporaryDirectory() as logdir:\n dataset = DummyDataset(6)\n runner = SupervisedConfigRunner(\n config={\n \"args\": {\"logdir\": logdir},\n \"model\": {\"_target_\": \"DummyModel\", \"in_features\": 4, \"out_features\": 2},\n \"engine\": {\"_target_\": \"DataParallelEngine\"},\n \"args\": {\"logdir\": logdir},\n \"stages\": {\n \"stage1\": {\n \"num_epochs\": 10,\n \"loaders\": {\"batch_size\": 4, \"num_workers\": 0},\n \"criterion\": {\"_target_\": \"MSELoss\"},\n \"optimizer\": {\"_target_\": \"Adam\", \"lr\": 1e-3},\n \"callbacks\": {\n \"criterion\": {\n \"_target_\": \"CriterionCallback\",\n \"metric_key\": \"loss\",\n \"input_key\": \"logits\",\n \"target_key\": \"targets\",\n },\n \"optimizer\": {\"_target_\": \"OptimizerCallback\", \"metric_key\": \"loss\"},\n \"test_nn_parallel_data_parallel\": {\n \"_target_\": \"DataParallelTypeChecker\"\n },\n \"test_loss_minimization\": {\n \"_target_\": \"LossMinimizationCallback\",\n \"key\": \"loss\",\n },\n },\n },\n },\n }\n )\n runner.get_datasets = lambda *args, **kwargs: {\n \"train\": dataset,\n \"valid\": dataset,\n }\n runner.run()\n\n\[email protected](not IS_CUDA_AVAILABLE, reason=\"CUDA device is not available\")\ndef test_experiment_parallel_engine_with_cuda():\n train_from_runner()\n\n\n# @mark.skip(\"Config experiment is in development phase!\")\[email protected](not IS_CUDA_AVAILABLE, reason=\"CUDA device is not available\")\ndef test_config_experiment_engine_with_cuda():\n train_from_config()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.MSELoss"
]
] |
LvHang/aps | [
"3e9c8b247e0526481970c28e8af1a6a93cc7f2cc"
] | [
"aps/loader/simu.py"
] | [
"#!/usr/bin/env python\n\n# Copyright 2020 Jian Wu\n# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\"\"\"\nAdopt from my another project: https://github.com/funcwj/setk\nSee https://github.com/funcwj/setk/tree/master/doc/data_simu for command line usage\n\"\"\"\nimport argparse\nimport numpy as np\n\nfrom aps.loader.audio import read_audio, add_room_response\nfrom aps.opts import StrToBoolAction\nfrom aps.const import EPSILON\n\n\ndef coeff_snr(sig_pow, ref_pow, snr):\n \"\"\"\n For\n mix = Sa + alpha*Sb\n Given\n SNR = 10*log10[Pa/(Pb * alpha^2)]\n we got\n alpha = Pa/[Pb*10^(SNR/10)]^0.5\n \"\"\"\n return (ref_pow / (sig_pow * 10**(snr / 10) + EPSILON))**0.5\n\n\ndef add_speaker(mix_nsamps,\n src_spk,\n src_begin,\n sdr,\n src_rir=None,\n channel=-1,\n sr=16000):\n \"\"\"\n Mix source speakers\n \"\"\"\n spk_image, spk_power = [], []\n for i, spk in enumerate(src_spk):\n if src_rir is None:\n src = spk[None, ...] if spk.ndim == 1 else spk\n spk_image.append(src)\n spk_power.append(np.mean(src[0]**2))\n else:\n rir = src_rir[i]\n if rir.ndim == 1:\n rir = rir[None, ...]\n if channel >= 0:\n if rir.ndim == 2:\n rir = rir[channel:channel + 1]\n revb, p = add_room_response(spk, rir, sr=sr)\n spk_image.append(revb)\n spk_power.append(p)\n # make mix\n N, _ = spk_image[0].shape\n mix = [np.zeros([N, mix_nsamps], dtype=np.float32) for _ in src_spk]\n # start mixing\n ref_power = spk_power[0]\n for i, image in enumerate(spk_image):\n dur = image.shape[-1]\n beg = src_begin[i]\n coeff = 1 if i == 0 else coeff_snr(spk_power[i], ref_power, sdr[i])\n mix[i][..., beg:beg + dur] += coeff * image\n return mix\n\n\ndef add_point_noise(mix_nsamps,\n ref_power,\n noise,\n noise_begin,\n snr,\n noise_rir=None,\n channel=-1,\n repeat=False,\n sr=16000):\n \"\"\"\n Add pointsource noises\n \"\"\"\n image = []\n image_power = []\n for i, noise in enumerate(noise):\n beg = noise_begin[i]\n if not repeat:\n dur = min(noise.shape[-1], mix_nsamps - beg)\n else:\n dur = mix_nsamps - beg\n # if short, then padding\n if noise.shape[-1] < dur:\n noise = np.pad(noise, (0, dur - noise.shape[-1]), mode=\"wrap\")\n\n if noise_rir is None:\n src = noise[None, ...] if noise.ndim == 1 else noise\n\n image.append(src)\n image_power.append(np.mean(src[0, :dur]**2) if dur > 0 else 0)\n else:\n rir = noise_rir[i]\n if rir.ndim == 1:\n rir = rir[None, ...]\n if channel >= 0:\n if rir.ndim == 2:\n rir = rir[channel:channel + 1]\n revb, revb_power = add_room_response(noise[:dur], rir, sr=sr)\n image.append(revb)\n image_power.append(revb_power)\n # make noise mix\n N, _ = image[0].shape\n mix = np.zeros([N, mix_nsamps], dtype=np.float32)\n # start mixing\n for i, img in enumerate(image):\n beg = noise_begin[i]\n coeff = coeff_snr(image_power[i], ref_power, snr[i])\n mix[..., beg:beg + dur] += coeff * img[..., :dur]\n return mix\n\n\ndef load_audio(src_args, beg=None, end=None, sr=16000):\n \"\"\"\n Load audio from args.xxx\n \"\"\"\n if src_args:\n src_path = src_args.split(\",\")\n beg_int = [None for _ in src_path]\n end_int = [None for _ in src_path]\n if beg:\n beg_int = [int(v) for v in beg.split(\",\")]\n if end:\n end_int = [int(v) for v in end.split(\",\")]\n return [\n read_audio(s, sr=sr, beg=b, end=e)\n for s, b, e in zip(src_path, beg_int, end_int)\n ]\n else:\n return None\n\n\ndef run_simu(args):\n\n def arg_float(src_args):\n return [float(s) for s in src_args.split(\",\")] if src_args else None\n\n src_spk = load_audio(args.src_spk, sr=args.sr)\n src_rir = load_audio(args.src_rir, sr=args.sr)\n if src_rir:\n if len(src_rir) != len(src_spk):\n raise RuntimeError(\n f\"Number of --src-rir={args.src_rir} do not match with \" +\n f\"--src-spk={args.src_spk} option\")\n sdr = arg_float(args.src_sdr)\n if len(src_spk) > 1 and not sdr:\n raise RuntimeError(\"--src-sdr need to be assigned for \" +\n f\"--src-spk={args.src_spk}\")\n if sdr:\n if len(src_spk) - 1 != len(sdr):\n raise RuntimeError(\"Number of --src-snr - 1 do not match with \" +\n \"--src-snr option\")\n sdr = [0] + sdr\n\n src_begin = arg_float(args.src_begin)\n if src_begin:\n src_begin = [int(v) for v in src_begin]\n else:\n src_begin = [0 for _ in src_spk]\n\n # number samples of the mixture\n mix_nsamps = max([b + s.size for b, s in zip(src_begin, src_spk)])\n\n point_noise_rir = load_audio(args.point_noise_rir, sr=args.sr)\n\n point_noise_end = [\n str(int(v) + mix_nsamps) for v in args.point_noise_offset.split()\n ]\n point_noise = load_audio(args.point_noise,\n beg=args.point_noise_offset,\n end=\",\".join(point_noise_end),\n sr=args.sr)\n\n if args.point_noise:\n if point_noise_rir:\n if len(point_noise) != len(point_noise_rir):\n raise RuntimeError(\n f\"Number of --point-noise-rir={args.point_noise_rir} do not match with \"\n + f\"--point-noise={args.point_noise} option\")\n point_snr = arg_float(args.point_noise_snr)\n if not point_snr:\n raise RuntimeError(\"--point-noise-snr need to be assigned for \" +\n f\"--point-noise={args.point_noise}\")\n if len(point_noise) != len(point_snr):\n raise RuntimeError(\n f\"Number of --point-noise-snr={args.point_noise_snr} do not match with \"\n + f\"--point-noise={args.point_noise} option\")\n\n point_begin = arg_float(args.point_noise_begin)\n if point_begin:\n point_begin = [int(v) for v in point_begin]\n else:\n point_begin = [0 for _ in point_noise]\n\n isotropic_noise = load_audio(args.isotropic_noise,\n beg=str(args.isotropic_noise_offset),\n end=str(args.isotropic_noise_offset +\n mix_nsamps),\n sr=args.sr)\n if isotropic_noise:\n isotropic_noise = isotropic_noise[0]\n isotropic_snr = arg_float(args.isotropic_noise_snr)\n if not isotropic_snr:\n raise RuntimeError(\n \"--isotropic-snr need to be assigned for \" +\n f\"--isotropic-noise={args.isotropic_noise} option\")\n isotropic_snr = isotropic_snr[0]\n else:\n isotropic_snr = None\n\n # add speakers\n spk = add_speaker(mix_nsamps,\n src_spk,\n src_begin,\n sdr,\n src_rir=src_rir,\n channel=args.dump_channel,\n sr=args.sr)\n spk_utt = sum(spk)\n mix = spk_utt.copy()\n\n spk_power = np.mean(spk_utt[0]**2)\n if point_noise:\n noise = add_point_noise(mix_nsamps,\n spk_power,\n point_noise,\n point_begin,\n point_snr,\n noise_rir=point_noise_rir,\n channel=args.dump_channel,\n repeat=args.point_noise_repeat,\n sr=args.sr)\n num_channels = spk_utt.shape[0]\n if num_channels != noise.shape[0]:\n if num_channels == 1:\n noise = noise[0:1]\n else:\n raise RuntimeError(\"Channel mismatch between source speaker \" +\n \"configuration and pointsource noise's, \" +\n f\"{num_channels} vs {noise.shape[0]}\")\n mix = spk_utt + noise\n else:\n noise = None\n\n ch = args.dump_channel\n if isotropic_noise is not None:\n N, _ = spk_utt.shape\n if N == 1:\n if isotropic_noise.ndim == 1:\n isotropic_noise = isotropic_noise[None, ...]\n else:\n if ch >= 0:\n isotropic_noise = isotropic_noise[ch:ch + 1]\n else:\n raise RuntimeError(\n \"Single channel mixture vs multi-channel \"\n \"isotropic noise\")\n else:\n if isotropic_noise.shape[0] != N:\n raise RuntimeError(\n \"Channel number mismatch between mixture and isotropic noise, \"\n + f\"{N} vs {isotropic_noise.shape[0]}\")\n\n dur = min(mix_nsamps, isotropic_noise.shape[-1])\n isotropic_chunk = isotropic_noise[0, :dur]\n power = np.mean(isotropic_chunk**2)\n coeff = coeff_snr(power, spk_power, isotropic_snr)\n mix[..., :dur] += coeff * isotropic_chunk\n\n if noise is None:\n noise = coeff * isotropic_chunk\n else:\n noise[..., :dur] += coeff * isotropic_chunk\n\n factor = args.norm_factor / (np.max(np.abs(mix)) + EPSILON)\n\n mix = mix.squeeze() * factor\n spk = [s[0] * factor for s in spk]\n\n if noise is None:\n return mix, spk, None\n else:\n return mix, spk, noise[0] * factor\n\n\ndef make_argparse():\n parser = argparse.ArgumentParser(\n description=\"Command to do audio data simulation\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--src-spk\",\n type=str,\n required=True,\n help=\"Source speakers, e.g., spk1.wav,spk2.wav\")\n parser.add_argument(\"--src-rir\",\n type=str,\n default=\"\",\n help=\"RIRs for each source speakers\")\n parser.add_argument(\"--src-sdr\",\n type=str,\n default=\"\",\n help=\"SDR for each speakers (if needed)\")\n parser.add_argument(\"--src-begin\",\n type=str,\n default=\"\",\n help=\"Begining samples on the mixture utterances\")\n parser.add_argument(\"--point-noise\",\n type=str,\n default=\"\",\n help=\"Add pointsource noises\")\n parser.add_argument(\"--point-noise-rir\",\n type=str,\n default=\"\",\n help=\"RIRs of the pointsource noises (if needed)\")\n parser.add_argument(\"--point-noise-snr\",\n type=str,\n default=\"\",\n help=\"SNR of the pointsource noises\")\n parser.add_argument(\"--point-noise-begin\",\n type=str,\n default=\"\",\n help=\"Begining samples of the \"\n \"pointsource noises on the mixture \"\n \"utterances (if needed)\")\n parser.add_argument(\"--point-noise-offset\",\n type=str,\n default=\"\",\n help=\"Add from the offset position \"\n \"of the pointsource noise\")\n parser.add_argument(\"--point-noise-repeat\",\n action=StrToBoolAction,\n default=False,\n help=\"Repeat the pointsource noise or not\")\n parser.add_argument(\"--isotropic-noise\",\n type=str,\n default=\"\",\n help=\"Add isotropic noises\")\n parser.add_argument(\"--isotropic-noise-snr\",\n type=str,\n default=\"\",\n help=\"SNR of the isotropic noises\")\n parser.add_argument(\"--isotropic-noise-offset\",\n type=int,\n default=0,\n help=\"Add noise from the offset position \"\n \"of the isotropic noise\")\n parser.add_argument(\"--dump-channel\",\n type=int,\n default=-1,\n help=\"Index of the channel to dump out (-1 means all)\")\n parser.add_argument('--norm-factor',\n type=float,\n default=0.9,\n help=\"Normalization factor of the final output\")\n parser.add_argument(\"--sr\",\n type=int,\n default=16000,\n help=\"Value of the sample rate\")\n return parser\n"
] | [
[
"numpy.mean",
"numpy.pad",
"numpy.abs",
"numpy.zeros"
]
] |
JingweiZuo/SE2TeC | [
"f2aab845aa648e366d0f6917a5d8abfd4d556d13"
] | [
"SE4TeC_demo/GUI_function.py"
] | [
"import time\nimport tkinter as tk\nfrom tkinter import *\nimport tkinter.filedialog as filedialog\nfrom tkinter.filedialog import askopenfilename\nimport utils.utils as util\nimport utils.similarity_measures as sm\nimport SMAP.MatrixProfile as mp\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nLARGE_FONT= (\"Verdana\", 12)\n\nclass gui_function:\n def __init__(self, master):\n self.filename = 'file name'\n self.training_filename = 'choose training set'\n self.testing_filename = 'choose testing set'\n #transfer the main test part to the class\n self.master = master\n self.dataset = util.Dataset()\n self.testdataset = util.Dataset()\n self.dataset_name = None\n self.shapeletList1 = []\n self.shapeletList2 = []\n\n def add_dataset(self):\n self.dataset_name = askopenfilename(parent=self.master, title=\"Choose a file\")\n array_tsdict = util.load_dataset(self.dataset_name)\n dir = self.dataset_name.split(\"/\")\n datasetname = dir[-1]\n self.dataset.update(array_tsdict, datasetname)\n self.master.v_dsname.set(self.dataset.name)\n self.master.v_tslength.set(self.dataset.tslength)\n self.master.v_tsnbr.set(self.dataset.size)\n self.master.v_classnbr.set(len(self.dataset.ClassList))\n self.master.show_frame(self.master.frame2, \"SMAPPage\")\n\n def add_testing_file(self):\n self.testfile_name = askopenfilename(parent=self.master, title=\"Choose a file\")\n array_tsdict = util.load_dataset(self.testfile_name)\n dir = self.testfile_name.split(\"/\")\n datasetname = dir[-1]\n self.testdataset.update(array_tsdict, datasetname)\n self.master.v_testdsname.set(self.testdataset.name)\n self.master.v_testtslength.set(self.testdataset.tslength)\n self.master.v_testtsnbr.set(self.testdataset.size)\n self.master.v_testclassnbr.set(len(self.testdataset.ClassList))\n self.master.testdataset= self.testdataset\n\n def ShowAlgoFrame(self, algorithm):\n self.master.frame2_1[algorithm].tkraise()\n self.master.frame2_1[algorithm].grid(row=0, column=0, sticky=W)\n\n def extractDP(self, master):\n self.nbr_source = master.v_source.get()\n self.nbr_target = master.v_target.get()\n dataset = master.dataset\n hash_source = dataset.tsNameDir[self.nbr_source]\n hash_target = dataset.tsNameDir[self.nbr_target]\n self.source = dataset.tsObjectDir[hash_source]\n self.target = dataset.tsObjectDir[hash_target]\n self.m = master.v_queryL.get()\n index_start = master.v_queryI.get()\n\n data = self.target.timeseries\n index_end = index_start + self.m\n query = self.source.timeseries[index_start:index_end]\n #DP = sm.mass_v2(data, query)\n #DP = sm.mass_v1(query, data)\n DP = sm.euclidean_distance_unequal_lengths(data, query)\n # display the figures on the CANVAS of the GUI\n\n # CANVAS\n # remove the axis_x of \"self.axe2\"\n plt.setp(self.master.ax2.get_xaxis(), visible=False)\n self.master.ax2.spines['bottom'].set_visible(False)\n self.master.ax3.clear() # clear the previous plot at the same position\n x = range(len(DP))\n self.master.ax3.spines['top'].set_visible(False)\n self.master.ax3.spines['right'].set_visible(False)\n self.master.ax3.set_ylabel(\"Distance Profile\")\n self.master.ax3.plot(x, DP, linewidth=0.5, label=\"D. P. of Query in \" +self.nbr_target)\n self.master.ax3.legend()\n self.master.canvas.draw()\n\n # show the Nearest Neighbor in target TS\n DP_list = DP.tolist()\n index_inValue = DP_list.index(min(DP_list))\n index_end = index_inValue + master.m\n NearestN = self.target.timeseries[index_inValue:index_end]\n x_target = range(len(self.target.timeseries))\n x_NearestN = range(index_inValue, index_end)\n self.ax2 = self.master.ax2\n self.ax2.clear()\n self.ax2.plot(x_target, self.target.timeseries, linewidth=0.5, label=self.nbr_target)\n self.ax2.plot(x_NearestN, NearestN, linewidth=2, label=\"Nearest Neighbor of Query\")\n self.ax2.spines['top'].set_visible(False)\n self.ax2.spines['right'].set_visible(False)\n self.ax2.set_ylabel(\"Target TS\")\n self.ax2.legend(loc=\"upper right\")\n self.master.canvas.draw()\n\n\n def extractMP(self, master):\n self.nbr_source = master.v_source.get()\n self.nbr_target = master.v_target.get()\n dataset = master.dataset\n hash_source = dataset.tsNameDir[self.nbr_source]\n hash_target = dataset.tsNameDir[self.nbr_target]\n self.source = dataset.tsObjectDir[hash_source]\n self.target = dataset.tsObjectDir[hash_target]\n self.m = master.v_queryL.get()\n\n dp_all, MP= mp.computeMP(self.source, self.target, self.m, \"mass_v2\")\n\n # CANVAS\n # remove the axis_x of \"self.axe2\"\n plt.setp(self.master.ax2.get_xaxis(), visible=False)\n self.master.ax2.spines['bottom'].set_visible(False)\n self.master.ax3.clear() # clear the previous plot at the same position\n x = range(len(MP))\n self.master.ax3.spines['top'].set_visible(False)\n self.master.ax3.spines['right'].set_visible(False)\n self.master.ax3.set_ylabel(\"Matrix Profile\")\n self.master.ax3.plot(x, MP, linewidth=0.5, label=\"M. P. of \"+self.nbr_source+\" towards \" +self.nbr_target)\n self.master.ax3.legend()\n self.master.canvas.draw()\n\n # show the matching pair in Source and Target TS\n index_source = MP.index(min(MP))\n index_source_end = index_source + self.m\n x_pair_source = range(index_source, index_source_end)\n pair_source = self.source.timeseries[index_source:index_source_end]\n DP = sm.euclidean_distance_unequal_lengths(self.target.timeseries, pair_source)\n index_target = DP.tolist().index(min(DP.tolist()))\n index_target_end = index_target + self.m\n x_pair_target = range(index_target, index_target_end)\n pair_target = self.target.timeseries[index_target:index_target_end]\n\n # remove the Query in Source TS\n self.master.ax1.clear()\n x = range(len(self.source.timeseries))\n self.master.ax1.spines['top'].set_visible(False)\n self.master.ax1.spines['right'].set_visible(False)\n self.master.ax1.set_ylabel(\"Source TS\")\n self.master.ax1.plot(x_pair_source, pair_source, linewidth=2, color=\"red\", label=\"Nearest Pair in source\")\n self.master.ax1.plot(x, self.source.timeseries, linewidth=0.5, label=self.nbr_source)\n self.master.ax1.legend()\n self.master.canvas.draw()\n # remove the Nearest Neighbor in Target TS\n self.master.ax2.clear()\n x = range(len(self.target.timeseries))\n self.master.ax2.spines['top'].set_visible(False)\n self.master.ax2.spines['right'].set_visible(False)\n self.master.ax2.set_ylabel(\"Target TS\")\n self.master.ax2.plot(x_pair_target, pair_target, linewidth=2, color=\"red\", label=\"Nearest Pair in target\")\n self.master.ax2.plot(x, self.target.timeseries, linewidth=0.5, label=self.nbr_target)\n self.master.ax2.legend()\n self.master.canvas.draw()\n\n\n def extractLB(self):\n return 0\n\n def extractRP(self, master):\n source = master.source\n input_class = str(master.v_class.get())\n start = time.clock()\n DP_all, mp_all, self.dist_differ, dist_threshold, self.dist_side_C, self.dist_side_nonC = mp.computeDistDiffer(source, master.dataset.tsObjectDir, self.m)\n end = time.clock()\n self.SMAP_time = round(end - start, 2)\n if str(source.class_timeseries) == input_class:\n RP = self.dist_side_C\n else:\n RP = self.dist_side_nonC\n # CANVAS\n # Configire the axis looking (start)\n self.master.ax3.clear()\n if (self.master.ax2.get_ylabel()!=\"Rep. Profile\"):\n self.master.ax2.clear()\n plt.setp(self.master.ax2.get_xaxis(), visible=True)\n self.master.ax2.spines['bottom'].set_visible(True)\n self.master.ax3.axis(\"off\")\n # Configire the axis looking (end)\n x = range(len(RP))\n self.master.ax2.set_ylabel(\"Rep. Profile\")\n label = \"Rep. P. of \" + self.nbr_source + \" in class \" + input_class\n self.master.ax2.plot(x, RP, linewidth=0.5, label=label)\n self.master.ax2.legend()\n self.master.canvas.draw()\n # remove the Query in Source TS\n self.master.ax1.clear()\n x = range(len(self.source.timeseries))\n self.master.ax1.spines['top'].set_visible(False)\n self.master.ax1.spines['right'].set_visible(False)\n self.master.ax1.set_ylabel(\"Source TS\")\n self.master.ax1.plot(x, self.source.timeseries, linewidth=0.5, label=self.nbr_source)\n self.master.ax1.legend()\n self.master.canvas.draw()\n\n\n def extractDiscP(self, master):\n '''source = master.source\n dp_all, mp_all, dist_differ, dist_threshold, dist_side_C, dist_side_nonC = mp.computeDistDiffer(source, master.dataset.tsObjectDir, self.m)'''\n DiscP = self.dist_differ\n # CANVAS\n # Configire the axis looking (start)\n plt.setp(self.master.ax2.get_xaxis(), visible=False)\n self.master.ax2.spines['bottom'].set_visible(False)\n self.master.ax3.axis(\"on\")\n # Configire the axis looking (end)\n x = range(len(DiscP))\n self.master.ax3.set_ylabel(\"Discm. Profile\")\n label = \"Discm. P. of \" + self.nbr_source\n self.master.ax3.plot(x, DiscP, linewidth=0.5, label=label)\n self.master.ax3.legend()\n self.master.canvas.draw()\n\n # show the pattern found in source TS\n discP_list = DiscP.tolist()\n index_maxValue = discP_list.index(max(discP_list))\n index_end = index_maxValue + master.m\n source = master.source.timeseries\n pattern = source[index_maxValue:index_end]\n x_source = range(len(source))\n x_pattern = range(index_maxValue, index_end)\n\n # CANVAS\n self.ax1 = self.master.ax1\n self.ax1.clear()\n self.ax1.plot(x_source, source, linewidth=0.5, label=\"Source TS\")\n self.ax1.plot(x_pattern, pattern, linewidth=2, color=\"red\", label=\"Candidate Shaplet in \"+ master.v_source.get())\n self.ax1.spines['top'].set_visible(False)\n self.ax1.spines['right'].set_visible(False)\n self.ax1.set_ylabel(\"Source TS\")\n self.ax1.legend(loc=\"upper right\")\n self.master.canvas.draw()\n self.master.v_timeSMAP.set(self.SMAP_time)\n\n def extractDiscP_LB(self, master):\n self.master.v_timeSMAPLB.set(0)\n\n def drawShapelet(self, path, filename):\n testFile = pd.read_csv(path + filename, header=None)\n Class = testFile[0][0]\n shapData = testFile[1][0]\n shapData = shapData.strip('()').replace('[', '').replace(']', '')\n shapeletList = []\n # shapObjectList: DD, Thresh\n shapObjectList = shapData.split(\"),(\")\n for shapObject in shapObjectList:\n shap = Shapelet()\n shapObject = shapObject.split(',')\n shap.DD = float(shapObject[0])\n shap.thresh = float(shapObject[1])\n shap.Class = Class\n shap.subseq = [float(s) for s in shapObject[2:]]\n shapeletList.append(shap)\n return shapeletList\n\n def drawTS(self, path, filename):\n tsObjectList1 = []\n tsObjectList2 = []\n testFile = pd.read_csv(path + filename, header=None)\n tsClass1 = testFile[testFile[1] == 1]\n tsClass2 = testFile[testFile[1] == -1]\n for i in tsClass1.index:\n ts = timeseries()\n row = tsClass1.loc[i]\n ts.id = row[0]\n ts.Class = row[1]\n ts.seq = row[2].split(',')\n ts.seq = [float(val) for val in ts.seq]\n tsObjectList1.append(ts)\n for i in tsClass2.index:\n ts = timeseries()\n row = tsClass2.loc[i]\n ts.id = row[0]\n ts.Class = row[1]\n ts.seq = row[2].split(',')\n ts.seq = [float(val) for val in ts.seq]\n tsObjectList2.append(ts)\n return tsObjectList1, tsObjectList2\n\n def showTSset(self):\n path_ECG = \"/Users/Jingwei/Desktop/PhD_study/Done/EDBTdemo2018/SMAP_results/ECG200/TS_raw/\"\n file_ECG = \"TS.csv\"\n path = path_ECG\n filename = file_ECG\n tsObjectC1, tsObjectC2 = self.drawTS(path, filename)\n self.fig = self.master.figure\n if self.master.v_class.get()==\"1.0\":\n self.fig.clf()\n self.ax1 = self.fig.add_subplot(211)\n self.ax2 = self.fig.add_subplot(212)\n input_class = self.master.v_class.get()\n if input_class == \"1.0\":\n self.ax1.clear()\n for ts in tsObjectC1[11:21]:\n seq = ts.seq\n self.ax1.set_ylabel(\"TS data class 1\")\n X = range(0, len(seq))\n self.ax1.plot(X, seq, color='green', linewidth=0.5)\n elif input_class == \"-1.0\":\n self.ax2.clear()\n for ts in tsObjectC2[0:10]:\n seq = ts.seq\n self.ax2.set_xlabel(\"index/offset\")\n self.ax2.set_ylabel(\"TS data class -1.0\")\n X = range(0, len(seq))\n self.ax2.plot(X, seq, color='green', linewidth=0.5)\n self.master.canvas.draw()\n\n def extractShapeletCandidate(self):\n path_ECG = \"/Users/Jingwei/Desktop/PhD_study/Done/EDBTdemo2018/SMAP_results/ECG200/Shapelets/\"\n f1_ECG = \"part-00043-956f02be-ab81-45db-9679-0bfd9150f5e8.csv\" # 1\n f2_ECG = \"part-00013-956f02be-ab81-45db-9679-0bfd9150f5e8.csv\" # -1\n path = path_ECG\n filename1 = f1_ECG\n filename2 = f2_ECG\n self.shapeletList1 = self.drawShapelet(path, filename1)\n self.shapeletList2 = self.drawShapelet(path, filename2)\n input_k = self.master.v_k.get()\n input_c = self.master.v_class.get()\n self.fig = self.master.figure\n if input_c == \"1.0\":\n i = 0\n for shap in self.shapeletList1[:input_k]:\n self.subaxe = self.fig.add_subplot(211)\n shapdata = shap.subseq\n # add a shift of 10 for shapelets\n X = range(10, len(shapdata)+10)\n self.subaxe.plot(X, shapdata, color='red', linewidth=2)\n i = i + 0.1\n elif input_c == \"-1.0\":\n i = 0\n for shap in self.shapeletList2[:input_k]:\n self.subaxe = self.fig.add_subplot(212)\n shapdata = shap.subseq\n X = range(0, len(shapdata))\n self.subaxe.plot(X, shapdata, color='blue', linewidth=2)\n self.master.canvas.draw()\n # canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=True)\n\n def extractED(self):\n return 0\n\n def extractEDMatrix(self, master):\n self.master.v_timeUSE.set(0)\n\n def predict(self, master):\n #list of Shapelet from different class\n testdataset = master.guiFunc.testdataset\n nbr_testTS = master.v_testInstance.get()\n print(\"---callback predict---\")\n print(nbr_testTS)\n if nbr_testTS!=\"select\":\n hash_testTS = testdataset.tsNameDir[nbr_testTS]\n self.testTS = testdataset.tsObjectDir[hash_testTS]\n testTS = self.testTS.timeseries\n min_dist = float('inf')\n index_target = None\n predict_class = '0'\n match_shapelet = None\n print(\"length os shapeletList1 is \" + str(len(self.shapeletList1)))\n for shap in self.shapeletList1 + self.shapeletList2:\n DP = sm.euclidean_distance_unequal_lengths(testTS, shap.subseq)\n DP = DP.tolist()\n DP_min = min(DP)\n if min_dist > DP_min:\n min_dist = DP_min\n index_target = DP.index(DP_min)\n match_shapelet = shap\n self.testTS = testdataset.tsObjectDir[hash_testTS]\n # CANVAS\n x = range(len(testTS))\n shap_data = match_shapelet.subseq\n x_shap = range(index_target, index_target + len(shap_data))\n self.master.figure.clf()\n self.ax = self.master.figure.add_subplot(111)\n self.ax.spines['top'].set_visible(False)\n self.ax.spines['right'].set_visible(False)\n self.ax.plot(x, testTS, linewidth=0.5, label=\"testing TS: \" + nbr_testTS)\n self.ax.plot(x_shap, shap_data, linewidth=2, label=\"Matching Shapelet\")\n self.ax.set_ylabel(\"Testing TS\")\n self.ax.set_title(\"Real class: \" + str(self.testTS.class_timeseries) + \"; Prediction: \" + str(match_shapelet.Class))\n self.ax.legend(loc=\"upper right\")\n self.master.canvas.draw()\n\nclass Shapelet(object):\n def __init__(self):\n self.id = 0.0\n self.Class = ''\n self.subseq = None\n self.DD = 0.0\n self.thresh = 0.0\n\nclass timeseries(object):\n def __init__(self):\n self.id = None\n self.Class = ''\n self.seq = None\n\n"
] | [
[
"matplotlib.use",
"pandas.read_csv"
]
] |
NNHieu/loss-landscape | [
"dfe517f23993ffbafea99272026d09e074e50b4f"
] | [
"cifar10/models/repnet.py"
] | [
"\nimport torch \nimport torch.nn as nn \nimport torch.nn.functional as F\n\nimport matplotlib.pyplot as plt \n\nimport torch.autograd as autograd \n\nfrom torchvision import datasets, transforms \nfrom torch.utils.data import DataLoader \n\nimport torch.optim as optim\n\nimport os\nimport argparse\n\n\nclass ResNetLayer(nn.Module): \n def __init__(self, n_channels, n_inner_channels, kernel_size=3, num_groups=8): \n super().__init__()\n self.conv1 = nn.Conv2d(n_channels, n_inner_channels, kernel_size, padding=kernel_size//2, bias=False)\n self.conv2 = nn.Conv2d(n_inner_channels, n_channels, kernel_size, padding=kernel_size//2, bias=False)\n self.norm1 = nn.GroupNorm(num_groups, n_inner_channels)\n self.norm2 = nn.GroupNorm(num_groups, n_channels)\n self.norm3 = nn.GroupNorm(num_groups, n_channels) \n self.conv1.weight.data.normal_(0, 0.01)\n self.conv2.weight.data.normal_(0, 0.01)\n \n def forward(self, z, x):\n if z is None:\n y = self.norm1(F.relu(self.conv1(x)))\n return self.norm3(F.relu(x + self.norm2(self.conv2(y))))\n else:\n y = self.norm1(F.relu(self.conv1(z)))\n return self.norm3(F.relu(z + self.norm2(x + self.conv2(y))))\n\n\nclass RepeatConvLayer(nn.Module):\n def __init__(self, f, num_repeat): \n super().__init__() \n self.f = f\n self.num_repeat = num_repeat\n\n def forward(self, x): \n z = self.f(None, x)\n for i in range(self.num_repeat):\n z = self.f(z, x)\n return z\n\ndef repeatNet(num_repeat):\n chan = 48\n f = ResNetLayer(chan, 64, kernel_size=3) \n model = nn.Sequential(nn.Conv2d(3,chan, kernel_size=3, bias=True, padding=1), \n nn.BatchNorm2d(chan), \n RepeatConvLayer(f, num_repeat), \n nn.BatchNorm2d(chan), \n nn.AvgPool2d(8,8), \n nn.Flatten(), \n nn.Linear(chan*4*4,10))\n return model\n\ndef repeatNet5():\n return repeatNet(5)\n\ndef repeatNet10():\n return repeatNet(10)\n\ndef repeatNet17():\n return repeatNet(17)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.GroupNorm",
"torch.nn.Linear",
"torch.nn.Flatten",
"torch.nn.Conv2d",
"torch.nn.AvgPool2d"
]
] |
ayulockin/mmdetection | [
"70f6d9cfade4a2f0b198e4f64776521d181b28be"
] | [
"mmdet/models/detectors/maskformer.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\n\nfrom mmdet.core import INSTANCE_OFFSET\nfrom mmdet.core.visualization import imshow_det_bboxes\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .single_stage import SingleStageDetector\n\n\[email protected]_module()\nclass MaskFormer(SingleStageDetector):\n r\"\"\"Implementation of `Per-Pixel Classification is\n NOT All You Need for Semantic Segmentation\n <https://arxiv.org/pdf/2107.06278>`_.\"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n panoptic_head=None,\n train_cfg=None,\n test_cfg=None,\n init_cfg=None):\n super(SingleStageDetector, self).__init__(init_cfg=init_cfg)\n self.backbone = build_backbone(backbone)\n if neck is not None:\n self.neck = build_neck(neck)\n panoptic_head.update(train_cfg=train_cfg)\n panoptic_head.update(test_cfg=test_cfg)\n self.panoptic_head = build_head(panoptic_head)\n\n self.num_things_classes = self.panoptic_head.num_things_classes\n self.num_stuff_classes = self.panoptic_head.num_stuff_classes\n self.num_classes = self.panoptic_head.num_classes\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n def forward_dummy(self, img, img_metas):\n \"\"\"Used for computing network flops. See\n `mmdetection/tools/analysis_tools/get_flops.py`\n\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[Dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n \"\"\"\n super(SingleStageDetector, self).forward_train(img, img_metas)\n x = self.extract_feat(img)\n outs = self.panoptic_head(x, img_metas)\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_masks,\n gt_semantic_seg,\n gt_bboxes_ignore=None,\n **kargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[Dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box.\n gt_masks (list[BitmapMasks]): true segmentation masks for each box\n used if the architecture supports a segmentation task.\n gt_semantic_seg (list[tensor]): semantic segmentation mask for\n images.\n gt_bboxes_ignore (list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n Defaults to None.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n # add batch_input_shape in img_metas\n super(SingleStageDetector, self).forward_train(img, img_metas)\n x = self.extract_feat(img)\n losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,\n gt_labels, gt_masks,\n gt_semantic_seg,\n gt_bboxes_ignore)\n\n return losses\n\n def simple_test(self, img, img_metas, **kwargs):\n \"\"\"Test without augmentation.\"\"\"\n feat = self.extract_feat(img)\n mask_results = self.panoptic_head.simple_test(feat, img_metas,\n **kwargs)\n\n results = []\n for mask in mask_results:\n result = {'pan_results': mask.detach().cpu().numpy()}\n results.append(result)\n\n return results\n\n def aug_test(self, imgs, img_metas, **kwargs):\n raise NotImplementedError\n\n def onnx_export(self, img, img_metas):\n raise NotImplementedError\n\n def show_result(self,\n img,\n result,\n score_thr=0.3,\n bbox_color=(72, 101, 241),\n text_color=(72, 101, 241),\n mask_color=None,\n thickness=2,\n font_size=13,\n win_name='',\n show=False,\n wait_time=0,\n out_file=None):\n \"\"\"Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (dict): The results.\n\n score_thr (float, optional): Minimum score of bboxes to be shown.\n Default: 0.3.\n bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n The tuple of color should be in BGR order. Default: 'green'.\n text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n The tuple of color should be in BGR order. Default: 'green'.\n mask_color (None or str or tuple(int) or :obj:`Color`):\n Color of masks. The tuple of color should be in BGR order.\n Default: None.\n thickness (int): Thickness of lines. Default: 2.\n font_size (int): Font size of texts. Default: 13.\n win_name (str): The window name. Default: ''.\n wait_time (float): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n img (Tensor): Only if not `show` or `out_file`.\n \"\"\"\n img = mmcv.imread(img)\n img = img.copy()\n pan_results = result['pan_results']\n # keep objects ahead\n ids = np.unique(pan_results)[::-1]\n legal_indices = ids != self.num_classes # for VOID label\n ids = ids[legal_indices]\n labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)\n segms = (pan_results[None] == ids[:, None, None])\n\n # if out_file specified, do not show image in window\n if out_file is not None:\n show = False\n # draw bounding boxes\n img = imshow_det_bboxes(\n img,\n segms=segms,\n labels=labels,\n class_names=self.CLASSES,\n bbox_color=bbox_color,\n text_color=text_color,\n mask_color=mask_color,\n thickness=thickness,\n font_size=font_size,\n win_name=win_name,\n show=show,\n wait_time=wait_time,\n out_file=out_file)\n\n if not (show or out_file):\n return img\n"
] | [
[
"numpy.array",
"numpy.unique"
]
] |
Code37/zipline | [
"8beba055aa4211dc2debc5c3083077cbd19d0bbc"
] | [
"zipline/data/resample.py"
] | [
"# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import OrderedDict\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nimport pandas as pd\nfrom six import with_metaclass\n\nfrom zipline.data._resample import (\n _minute_to_session_open,\n _minute_to_session_high,\n _minute_to_session_low,\n _minute_to_session_close,\n _minute_to_session_volume,\n)\nfrom zipline.data.bar_reader import NoDataOnDate\nfrom zipline.data.minute_bars import MinuteBarReader\nfrom zipline.data.session_bars import SessionBarReader\nfrom zipline.utils.memoize import lazyval\n\n_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((\n ('open', 'first'),\n ('high', 'max'),\n ('low', 'min'),\n ('close', 'last'),\n ('volume', 'sum'),\n))\n\n\ndef minute_frame_to_session_frame(minute_frame, calendar):\n\n \"\"\"\n Resample a DataFrame with minute data into the frame expected by a\n BcolzDailyBarWriter.\n\n Parameters\n ----------\n minute_frame : pd.DataFrame\n A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,\n and `dt` (minute dts)\n calendar : zipline.utils.calendars.trading_calendar.TradingCalendar\n A TradingCalendar on which session labels to resample from minute\n to session.\n\n Return\n ------\n session_frame : pd.DataFrame\n A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,\n and `day` (datetime-like).\n \"\"\"\n how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])\n for c in minute_frame.columns)\n return minute_frame.groupby(calendar.minute_to_session_label).agg(how)\n\n\ndef minute_to_session(column, close_locs, data, out):\n \"\"\"\n Resample an array with minute data into an array with session data.\n\n This function assumes that the minute data is the exact length of all\n minutes in the sessions in the output.\n\n Parameters\n ----------\n column : str\n The `open`, `high`, `low`, `close`, or `volume` column.\n close_locs : array[intp]\n The locations in `data` which are the market close minutes.\n data : array[float64|uint32]\n The minute data to be sampled into session data.\n The first value should align with the market open of the first session,\n containing values for all minutes for all sessions. With the last value\n being the market close of the last session.\n out : array[float64|uint32]\n The output array into which to write the sampled sessions.\n \"\"\"\n if column == 'open':\n _minute_to_session_open(close_locs, data, out)\n elif column == 'high':\n _minute_to_session_high(close_locs, data, out)\n elif column == 'low':\n _minute_to_session_low(close_locs, data, out)\n elif column == 'close':\n _minute_to_session_close(close_locs, data, out)\n elif column == 'volume':\n _minute_to_session_volume(close_locs, data, out)\n return out\n\n\nclass DailyHistoryAggregator(object):\n \"\"\"\n Converts minute pricing data into a daily summary, to be used for the\n last slot in a call to history with a frequency of `1d`.\n\n This summary is the same as a daily bar rollup of minute data, with the\n distinction that the summary is truncated to the `dt` requested.\n i.e. the aggregation slides forward during a the course of simulation day.\n\n Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.\n The aggregation rules for each price type is documented in their respective\n\n \"\"\"\n\n def __init__(self, market_opens, minute_reader, trading_calendar):\n self._market_opens = market_opens\n self._minute_reader = minute_reader\n self._trading_calendar = trading_calendar\n\n # The caches are structured as (date, market_open, entries), where\n # entries is a dict of asset -> (last_visited_dt, value)\n #\n # Whenever an aggregation method determines the current value,\n # the entry for the respective asset should be overwritten with a new\n # entry for the current dt.value (int) and aggregation value.\n #\n # When the requested dt's date is different from date the cache is\n # flushed, so that the cache entries do not grow unbounded.\n #\n # Example cache:\n # cache = (date(2016, 3, 17),\n # pd.Timestamp('2016-03-17 13:31', tz='UTC'),\n # {\n # 1: (1458221460000000000, np.nan),\n # 2: (1458221460000000000, 42.0),\n # })\n self._caches = {\n 'open': None,\n 'high': None,\n 'low': None,\n 'close': None,\n 'volume': None\n }\n\n # The int value is used for deltas to avoid extra computation from\n # creating new Timestamps.\n self._one_min = pd.Timedelta('1 min').value\n\n def _prelude(self, dt, field):\n session = self._trading_calendar.minute_to_session_label(dt)\n dt_value = dt.value\n cache = self._caches[field]\n if cache is None or cache[0] != session:\n market_open = self._market_opens.loc[session]\n cache = self._caches[field] = (session, market_open, {})\n\n _, market_open, entries = cache\n market_open = market_open.tz_localize('UTC')\n if dt != market_open:\n prev_dt = dt_value - self._one_min\n else:\n prev_dt = None\n return market_open, prev_dt, dt_value, entries\n\n def opens(self, assets, dt):\n \"\"\"\n The open field's aggregation returns the first value that occurs\n for the day, if there has been no data on or before the `dt` the open\n is `nan`.\n\n Once the first non-nan open is seen, that value remains constant per\n asset for the remainder of the day.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')\n\n opens = []\n session_label = self._trading_calendar.minute_to_session_label(dt)\n\n for asset in assets:\n if not asset.is_alive_for_session(session_label):\n opens.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'open')\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n else:\n try:\n last_visited_dt, first_open = entries[asset]\n if last_visited_dt == dt_value:\n opens.append(first_open)\n continue\n elif not pd.isnull(first_open):\n opens.append(first_open)\n entries[asset] = (dt_value, first_open)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['open'],\n after_last,\n dt,\n [asset],\n )[0]\n nonnan = window[~pd.isnull(window)]\n if len(nonnan):\n val = nonnan[0]\n else:\n val = np.nan\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['open'],\n market_open,\n dt,\n [asset],\n )[0]\n nonnan = window[~pd.isnull(window)]\n if len(nonnan):\n val = nonnan[0]\n else:\n val = np.nan\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n return np.array(opens)\n\n def highs(self, assets, dt):\n \"\"\"\n The high field's aggregation returns the largest high seen between\n the market open and the current dt.\n If there has been no data on or before the `dt` the high is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')\n\n highs = []\n session_label = self._trading_calendar.minute_to_session_label(dt)\n\n for asset in assets:\n if not asset.is_alive_for_session(session_label):\n highs.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'high')\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n else:\n try:\n last_visited_dt, last_max = entries[asset]\n if last_visited_dt == dt_value:\n highs.append(last_max)\n continue\n elif last_visited_dt == prev_dt:\n curr_val = self._minute_reader.get_value(\n asset, dt, 'high')\n if pd.isnull(curr_val):\n val = last_max\n elif pd.isnull(last_max):\n val = curr_val\n else:\n val = max(last_max, curr_val)\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['high'],\n after_last,\n dt,\n [asset],\n )[0].T\n val = np.nanmax(np.append(window, last_max))\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['high'],\n market_open,\n dt,\n [asset],\n )[0].T\n val = np.nanmax(window)\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n return np.array(highs)\n\n def lows(self, assets, dt):\n \"\"\"\n The low field's aggregation returns the smallest low seen between\n the market open and the current dt.\n If there has been no data on or before the `dt` the low is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')\n\n lows = []\n session_label = self._trading_calendar.minute_to_session_label(dt)\n\n for asset in assets:\n if not asset.is_alive_for_session(session_label):\n lows.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'low')\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n else:\n try:\n last_visited_dt, last_min = entries[asset]\n if last_visited_dt == dt_value:\n lows.append(last_min)\n continue\n elif last_visited_dt == prev_dt:\n curr_val = self._minute_reader.get_value(\n asset, dt, 'low')\n val = np.nanmin([last_min, curr_val])\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['low'],\n after_last,\n dt,\n [asset],\n )[0].T\n val = np.nanmin(np.append(window, last_min))\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['low'],\n market_open,\n dt,\n [asset],\n )[0].T\n val = np.nanmin(window)\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n return np.array(lows)\n\n def closes(self, assets, dt):\n \"\"\"\n The close field's aggregation returns the latest close at the given\n dt.\n If the close for the given dt is `nan`, the most recent non-nan\n `close` is used.\n If there has been no data on or before the `dt` the close is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')\n\n closes = []\n session_label = self._trading_calendar.minute_to_session_label(dt)\n\n def _get_filled_close(asset):\n \"\"\"\n Returns the most recent non-nan close for the asset in this\n session. If there has been no data in this session on or before the\n `dt`, returns `nan`\n \"\"\"\n window = self._minute_reader.load_raw_arrays(\n ['close'],\n market_open,\n dt,\n [asset],\n )[0]\n try:\n return window[~np.isnan(window)][-1]\n except IndexError:\n return np.NaN\n\n for asset in assets:\n if not asset.is_alive_for_session(session_label):\n closes.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'close')\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n else:\n try:\n last_visited_dt, last_close = entries[asset]\n if last_visited_dt == dt_value:\n closes.append(last_close)\n continue\n elif last_visited_dt == prev_dt:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = last_close\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n else:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = _get_filled_close(asset)\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n except KeyError:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = _get_filled_close(asset)\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n return np.array(closes)\n\n def volumes(self, assets, dt):\n \"\"\"\n The volume field's aggregation returns the sum of all volumes\n between the market open and the `dt`\n If there has been no data on or before the `dt` the volume is 0.\n\n Returns\n -------\n np.array with dtype=int64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')\n\n volumes = []\n session_label = self._trading_calendar.minute_to_session_label(dt)\n\n for asset in assets:\n if not asset.is_alive_for_session(session_label):\n volumes.append(0)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'volume')\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n else:\n try:\n last_visited_dt, last_total = entries[asset]\n if last_visited_dt == dt_value:\n volumes.append(last_total)\n continue\n elif last_visited_dt == prev_dt:\n val = self._minute_reader.get_value(\n asset, dt, 'volume')\n val += last_total\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['volume'],\n after_last,\n dt,\n [asset],\n )[0]\n val = np.nansum(window) + last_total\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['volume'],\n market_open,\n dt,\n [asset],\n )[0]\n val = np.nansum(window)\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n return np.array(volumes)\n\n\nclass MinuteResampleSessionBarReader(SessionBarReader):\n\n def __init__(self, calendar, minute_bar_reader):\n self._calendar = calendar\n self._minute_bar_reader = minute_bar_reader\n\n def _get_resampled(self, columns, start_session, end_session, assets):\n range_open = self._calendar.session_open(start_session)\n range_close = self._calendar.session_close(end_session)\n\n minute_data = self._minute_bar_reader.load_raw_arrays(\n columns,\n range_open,\n range_close,\n assets,\n )\n\n # Get the index of the close minute for each session in the range.\n # If the range contains only one session, the only close in the range\n # is the last minute in the data. Otherwise, we need to get all the\n # session closes and find their indices in the range of minutes.\n if start_session == end_session:\n close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)\n else:\n minutes = self._calendar.minutes_in_range(\n range_open,\n range_close,\n )\n session_closes = self._calendar.session_closes_in_range(\n start_session,\n end_session,\n )\n close_ilocs = minutes.searchsorted(session_closes.values)\n\n results = []\n shape = (len(close_ilocs), len(assets))\n\n for col in columns:\n if col != 'volume':\n out = np.full(shape, np.nan)\n else:\n out = np.zeros(shape, dtype=np.uint32)\n results.append(out)\n\n for i in range(len(assets)):\n for j, column in enumerate(columns):\n data = minute_data[j][:, i]\n minute_to_session(column, close_ilocs, data, results[j][:, i])\n\n return results\n\n @property\n def trading_calendar(self):\n return self._calendar\n\n def load_raw_arrays(self, columns, start_dt, end_dt, sids):\n return self._get_resampled(columns, start_dt, end_dt, sids)\n\n def get_value(self, sid, session, colname):\n # WARNING: This will need caching or other optimization if used in a\n # tight loop.\n # This was developed to complete interface, but has not been tuned\n # for real world use.\n return self._get_resampled([colname], session, session, [sid])[0][0][0]\n\n @lazyval\n def sessions(self):\n cal = self._calendar\n first = self._minute_bar_reader.first_trading_day\n last = cal.minute_to_session_label(\n self._minute_bar_reader.last_available_dt)\n return cal.sessions_in_range(first, last)\n\n @lazyval\n def last_available_dt(self):\n return self.trading_calendar.minute_to_session_label(\n self._minute_bar_reader.last_available_dt\n )\n\n @property\n def first_trading_day(self):\n return self._minute_bar_reader.first_trading_day\n\n def get_last_traded_dt(self, asset, dt):\n return self.trading_calendar.minute_to_session_label(\n self._minute_bar_reader.get_last_traded_dt(asset, dt))\n\n\nclass ReindexBarReader(with_metaclass(ABCMeta)):\n \"\"\"\n A base class for readers which reindexes results, filling in the additional\n indices with empty data.\n\n Used to align the reading assets which trade on different calendars.\n\n Currently only supports a ``trading_calendar`` which is a superset of the\n ``reader``'s calendar.\n\n Parameters\n ----------\n\n - trading_calendar : zipline.utils.trading_calendar.TradingCalendar\n The calendar to use when indexing results from the reader.\n - reader : MinuteBarReader|SessionBarReader\n The reader which has a calendar that is a subset of the desired\n ``trading_calendar``.\n - first_trading_session : pd.Timestamp\n The first trading session the reader should provide. Must be specified,\n since the ``reader``'s first session may not exactly align with the\n desired calendar. Specifically, in the case where the first session\n on the target calendar is a holiday on the ``reader``'s calendar.\n - last_trading_session : pd.Timestamp\n The last trading session the reader should provide. Must be specified,\n since the ``reader``'s last session may not exactly align with the\n desired calendar. Specifically, in the case where the last session\n on the target calendar is a holiday on the ``reader``'s calendar.\n \"\"\"\n\n def __init__(self,\n trading_calendar,\n reader,\n first_trading_session,\n last_trading_session):\n self._trading_calendar = trading_calendar\n self._reader = reader\n self._first_trading_session = first_trading_session\n self._last_trading_session = last_trading_session\n\n @property\n def last_available_dt(self):\n return self._reader.last_available_dt\n\n def get_last_traded_dt(self, sid, dt):\n return self._reader.get_last_traded_dt(sid, dt)\n\n @property\n def first_trading_day(self):\n return self._reader.first_trading_day\n\n def get_value(self, sid, dt, field):\n # Give an empty result if no data is present.\n try:\n return self._reader.get_value(sid, dt, field)\n except NoDataOnDate:\n if field == 'volume':\n return 0\n else:\n return np.nan\n\n @abstractmethod\n def _outer_dts(self, start_dt, end_dt):\n raise NotImplementedError\n\n @abstractmethod\n def _inner_dts(self, start_dt, end_dt):\n raise NotImplementedError\n\n @property\n def trading_calendar(self):\n return self._trading_calendar\n\n @lazyval\n def sessions(self):\n return self.trading_calendar.sessions_in_range(\n self._first_trading_session,\n self._last_trading_session\n )\n\n def load_raw_arrays(self, fields, start_dt, end_dt, sids):\n outer_dts = self._outer_dts(start_dt, end_dt)\n inner_dts = self._inner_dts(start_dt, end_dt)\n\n indices = outer_dts.searchsorted(inner_dts)\n\n shape = len(outer_dts), len(sids)\n\n outer_results = []\n\n if len(inner_dts) > 0:\n inner_results = self._reader.load_raw_arrays(\n fields, inner_dts[0], inner_dts[-1], sids)\n else:\n inner_results = None\n\n for i, field in enumerate(fields):\n if field != 'volume':\n out = np.full(shape, np.nan)\n else:\n out = np.zeros(shape, dtype=np.uint32)\n\n if inner_results is not None:\n out[indices] = inner_results[i]\n\n outer_results.append(out)\n\n return outer_results\n\n\nclass ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):\n \"\"\"\n See: ``ReindexBarReader``\n \"\"\"\n\n def _outer_dts(self, start_dt, end_dt):\n return self._trading_calendar.minutes_in_range(start_dt, end_dt)\n\n def _inner_dts(self, start_dt, end_dt):\n return self._reader.calendar.minutes_in_range(start_dt, end_dt)\n\n\nclass ReindexSessionBarReader(ReindexBarReader, SessionBarReader):\n \"\"\"\n See: ``ReindexBarReader``\n \"\"\"\n\n def _outer_dts(self, start_dt, end_dt):\n return self.trading_calendar.sessions_in_range(start_dt, end_dt)\n\n def _inner_dts(self, start_dt, end_dt):\n return self._reader.trading_calendar.sessions_in_range(\n start_dt, end_dt)\n"
] | [
[
"numpy.nanmax",
"numpy.zeros",
"numpy.append",
"pandas.Timedelta",
"numpy.nanmin",
"numpy.nansum",
"numpy.isnan",
"numpy.array",
"pandas.isnull",
"numpy.full",
"pandas.Timestamp"
]
] |
niopeng/elpips | [
"385012a2ee614c75a1631546c391039af85744f4"
] | [
"elpips/util.py"
] | [
"import tensorflow as tf\nimport numpy as np\n\n\t\ndef switch_case_cond(cases, default_case):\n\tif cases:\n\t\tcondition, effect = cases[0]\n\t\treturn tf.cond(condition, effect, lambda: switch_case_cond(cases[1:], default_case))\n\treturn default_case()\n\ndef switch_case_where(cases, default_case):\n\tif cases:\n\t\tcondition, effect = cases[0]\n\t\treturn tf.where(condition, effect, switch_case_where(cases[1:], default_case))\n\treturn default_case\n\n\ndef np_dtype(tf_dtype):\n\tif tf_dtype == tf.float32:\n\t\treturn np.float32\n\tif tf_dtype == tf.float64:\n\t\treturn np.float64\n\traise Exception('Unsupported dtype')\n\ndef f32_to_dtype(x, dtype):\n\tif dtype == tf.float32:\n\t\treturn x\n\treturn tf.cast(x, dtype)\n\t\n\ndef as_tuple(x):\n\t'''Formats x as a tuple. If x is already a tuple returns it as is, otherwise returns a 1-tuple containing x.'''\n\tif isinstance(x, tuple):\n\t\treturn x\n\telse:\n\t\treturn (x,)\n\ndef for_each(x, func):\n\t'''Runs 'func' for x, or each item of x if x is a tuple. Returns the results in the same format.'''\n\tif isinstance(x, tuple):\n\t\treturn tuple((func(s) for s in x))\n\telse:\n\t\treturn func(x)\n\ndef for_tuple(x, func):\n\t'''Runs 'func' for as_tuple(x). Returns the results in the original format. Assumes 'func' returns tuple when given tuple.'''\n\tif isinstance(x, tuple):\n\t\treturn func(x)\n\telse:\n\t\treturn func(as_tuple(x))[0]\n"
] | [
[
"tensorflow.cast"
]
] |
PaulBehler/estimagic | [
"c14f743986262d508e55738c90737cb504fe987b"
] | [
"src/estimagic/benchmarking/run_benchmark.py"
] | [
"\"\"\"Functions to create, run and visualize optimization benchmarks.\n\nTO-DO:\n- Add other benchmark sets:\n - finish medium scale problems from https://arxiv.org/pdf/1710.11005.pdf, Page 34.\n - add scalar problems from https://github.com/AxelThevenot\n- Add option for deterministic noise or wiggle.\n\n\"\"\"\nfrom pathlib import Path\n\nimport numpy as np\nfrom estimagic import batch_evaluators\nfrom estimagic.logging.read_log import read_optimization_histories\nfrom estimagic.optimization.optimize import minimize\n\n\ndef run_benchmark(\n problems,\n optimize_options,\n logging_directory,\n *,\n batch_evaluator=\"joblib\",\n n_cores=1,\n error_handling=\"continue\",\n fast_logging=True,\n seed=None,\n):\n \"\"\"Run problems with different optimize options.\n\n Args:\n problems (dict): Nested dictionary with benchmark problems of the structure:\n {\"name\": {\"inputs\": {...}, \"solution\": {...}, \"info\": {...}}}\n where \"inputs\" are keyword arguments for ``minimize`` such as the criterion\n function and start parameters. \"solution\" contains the entries \"params\" and\n \"value\" and \"info\" might contain information about the test problem.\n optimize_options (list or dict): Either a list of algorithms or a Nested\n dictionary that maps a name for optimizer settings\n (e.g. ``\"lbfgsb_strict_criterion\"``) to a dictionary of keyword arguments\n for arguments for ``minimize`` (e.g. ``{\"algorithm\": \"scipy_lbfgsb\",\n \"algo_options\": {\"convergence.relative_criterion_tolerance\": 1e-12}}``).\n Alternatively, the values can just be an algorithm which is then benchmarked\n at default settings.\n batch_evaluator (str or callable): See :ref:`batch_evaluators`.\n logging_directory (pathlib.Path): Directory in which the log databases are\n saved.\n n_cores (int): Number of optimizations that is run in parallel. Note that in\n addition to that an optimizer might parallelize.\n error_handling (str): One of \"raise\", \"continue\".\n fast_logging (bool): Whether the slightly unsafe but much faster database\n configuration is chosen.\n\n Returns:\n dict: Nested Dictionary with information on the benchmark run. The outer keys\n are tuples where the first entry is the name of the problem and the second\n the name of the optimize options. The values are dicts with the entries:\n \"runtime\", \"params_history\", \"criterion_history\", \"solution\"\n\n \"\"\"\n np.random.seed(seed)\n logging_directory = Path(logging_directory)\n logging_directory.mkdir(parents=True, exist_ok=True)\n\n if isinstance(batch_evaluator, str):\n batch_evaluator = getattr(\n batch_evaluators, f\"{batch_evaluator}_batch_evaluator\"\n )\n\n opt_options = _process_optimize_options(optimize_options)\n\n log_options = {\"fast_logging\": fast_logging, \"if_table_exists\": \"replace\"}\n\n kwargs_list = []\n names = []\n for prob_name, problem in problems.items():\n for option_name, options in opt_options.items():\n kwargs = {\n **options,\n **problem[\"inputs\"],\n \"logging\": logging_directory / f\"{prob_name}_{option_name}.db\",\n \"log_options\": log_options,\n }\n kwargs_list.append(kwargs)\n names.append((prob_name, option_name))\n\n log_paths = [kwargs[\"logging\"] for kwargs in kwargs_list]\n\n raw_results = batch_evaluator(\n func=minimize,\n arguments=kwargs_list,\n n_cores=n_cores,\n error_handling=error_handling,\n unpack_symbol=\"**\",\n )\n\n results = {}\n for name, result, log_path in zip(names, raw_results, log_paths):\n histories = read_optimization_histories(log_path)\n stop = histories[\"metadata\"][\"timestamps\"].max()\n start = histories[\"metadata\"][\"timestamps\"].min()\n runtime = (stop - start).total_seconds()\n\n results[name] = {\n \"params_history\": histories[\"params\"],\n \"criterion_history\": histories[\"values\"],\n \"time_history\": histories[\"metadata\"][\"timestamps\"] - start,\n \"solution\": result,\n \"runtime\": runtime,\n }\n\n return results\n\n\ndef _process_optimize_options(raw_options):\n if not isinstance(raw_options, dict):\n dict_options = {}\n for option in raw_options:\n if isinstance(option, str):\n dict_options[option] = option\n else:\n dict_options[option.__name__] = option\n else:\n dict_options = raw_options\n\n out_options = {}\n for name, option in dict_options.items():\n if not isinstance(option, dict):\n option = {\"algorithm\": option}\n\n if \"log_options\" in option:\n raise ValueError(\n \"Log options cannot be specified as part of optimize_options. Logging \"\n \"behavior is configured by the run_benchmark function.\"\n )\n out_options[name] = option\n\n return out_options\n"
] | [
[
"numpy.random.seed"
]
] |
KristofferLM96/TsetlinMachine-GO | [
"926091fc70042abe5a67230932398bdab2c46328"
] | [
"Data_Handling/Data_analyze.py"
] | [
"# -----------------------------------------------\n# ................. LIBRARIES ...................\n# -----------------------------------------------\nimport glob\nimport os\nimport time\nimport numpy as np\n\n\n# -----------------------------------------------\n# ............. GLOBAL VARIABLES ................\n# -----------------------------------------------\n\nname = \"100_9x9Aya\" # 9x9Natsukaze || 9x9Aya || x_9x9Aya .. x = amount moves\nfile_name = name + \"_binary.txt\"\nbinary_path = \"Data/Binary/\" + file_name\noriginal_path = \"/home/kristoffer/Documents/Data/Original/9x9_10k_r104_144x20k/*\"\nencoding = \"UTF-8\" # ISO-8859-1 / UTF-8\nmultiple_files = True\nunique_list = []\noriginal_list = []\n# [check_handicap(), check_duplication(), get_result_ratio(), check_moves(), remove_empty_lines()]\nrun_programs = [0, 0, 1, 0, 0]\n\n\n# -----------------------------------------------\n# ................. FUNCTIONS ...................\n# -----------------------------------------------\n\n\ndef remove_empty_lines():\n output_file = open(\"Data/Binary/\" + name + \"_binary_1.txt\", \"w+\")\n with open(binary_path, \"r\") as file:\n for line in file:\n if not line.isspace():\n output_file.write(line)\n\n\ndef check_handicap():\n file = open(original_path, 'r', encoding=encoding)\n file_lines = file.readlines()\n _handicap = file_lines[0].split(\"HA[\")\n print(_handicap)\n handicap = _handicap[1][0]\n print(handicap)\n\n file.close()\n\n\ndef check_duplication():\n file = open(binary_path, 'r', encoding=encoding)\n global original_list\n global unique_list\n original_list = [line.strip() for line in file]\n print(\"Original List Length:\", len(original_list))\n original_length = len(original_list)\n\n unique_list = np.unique(original_list)\n unique_length = len(unique_list)\n print(\"Unique List Length:\", unique_length)\n print(\"Original - Unique:\", original_length - unique_length, \"\\n\")\n\n file.close()\n\n\ndef get_result_ratio():\n win = open(\"Data/Results-Split/\" + name + \"_win.txt\", 'r')\n loss = open(\"Data/Results-Split/\" + name + \"_loss.txt\", 'r')\n draw = open(\"Data/Results-Split/\" + name + \"_draw.txt\", 'r')\n win_amount = len(win.readlines())\n loss_amount = len(loss.readlines())\n draw_amount = len(draw.readlines())\n total_amount = win_amount + loss_amount + draw_amount\n print(\"Total Amount:\", total_amount)\n print(\"Amount of wins:\", win_amount, \",\", round(((win_amount * 100) / total_amount), 2), \"%\")\n print(\"Amount of loss:\", loss_amount, \",\", round(((loss_amount * 100) / total_amount), 2), \"%\")\n print(\"Amount of draw:\", draw_amount, \",\", round(((draw_amount * 100) / total_amount), 2), \"%\")\n\n win.close()\n loss.close()\n draw.close()\n\n\ndef check_moves():\n total_pos = 19\n moves_list = []\n\n def get_moves(_game_lines):\n if \"HA[\" in _game_lines[0]:\n handicap = int(_game_lines[0].split(\"HA[\")[1][0])\n else:\n handicap = 0\n _move_list = []\n const = 4\n for row in _game_lines[1:-1]:\n x = translate(row[3])\n y = translate(row[4])\n if row[1] + row[2] == \"AB\":\n for i in range(handicap):\n x = translate(row[4 + (i * const)])\n y = translate(row[5 + (i * const)])\n _move = [\"b\", x, y]\n if x != total_pos and y != total_pos:\n _move_list.append(_move)\n else:\n if row[1] == \"B\":\n _move = [\"b\", x, y]\n if row[1] == \"W\":\n _move = [\"w\", x, y]\n if x != total_pos and y != total_pos:\n _move_list.append(_move)\n return _move_list\n\n def translate(i):\n if i == \"a\":\n return 0\n if i == \"b\":\n return 1\n if i == \"c\":\n return 2\n if i == \"d\":\n return 3\n if i == \"e\":\n return 4\n if i == \"f\":\n return 5\n if i == \"g\":\n return 6\n if i == \"h\":\n return 7\n if i == \"i\":\n return 8\n if i == \"j\":\n return 9\n if i == \"k\":\n return 10\n if i == \"l\":\n return 11\n if i == \"m\":\n return 12\n if i == \"n\":\n return 13\n if i == \"o\":\n return 14\n if i == \"p\":\n return 15\n if i == \"q\":\n return 16\n if i == \"r\":\n return 17\n if i == \"s\":\n return 18\n if i == \"t\":\n return 19\n\n counter = 1\n total_files = len(glob.glob(os.path.join(original_path, '*.sgf')))\n for infile in glob.glob(os.path.join(original_path, '*.sgf')):\n start_time = time.time()\n file = open(infile, 'r', encoding=\"ISO-8859-1\")\n file_lines = file.readlines()\n moves_list.append(len(get_moves(file_lines)))\n print(infile)\n print(\"Getting moves from file \", counter, \"out of\", total_files,\n \"files. ............................................... \",\n round((counter / total_files * 100), 2), \"% ............................................... \",\n round((time.time() - start_time) * 1000, 2), \"ms\", \"\\n\")\n counter = counter + 1\n file.close()\n\n unique_moves_list, unique_moves_list_count = np.unique(moves_list, return_counts=True)\n print(unique_moves_list, \"\\n\")\n print(unique_moves_list_count, \"\\n\")\n total_data = sum(unique_moves_list_count)\n for x, y in np.nditer([unique_moves_list, unique_moves_list_count]):\n print(\"Moves: %d : Amount: %d, %d %%\" % (int(x), int(y), ((int(y)*100)/total_data)))\n print(\"\\n\")\n print(\"Unique Move lengths:\", len(unique_moves_list))\n\n\n# -----------------------------------------------\n# .................. MAIN .......................\n# -----------------------------------------------\nif run_programs[0]:\n check_handicap()\nif run_programs[1]:\n check_duplication()\nif run_programs[2]:\n get_result_ratio()\nif run_programs[3]:\n check_moves()\nif run_programs[4]:\n remove_empty_lines()\n"
] | [
[
"numpy.nditer",
"numpy.unique"
]
] |
Khizar-Ali/Lip2Wav | [
"07f056b3468ca660823830680bf25bdd42034f9e"
] | [
"synthesizer/models/custom_decoder.py"
] | [
"from __future__ import absolute_import, division, print_function\nimport collections\nimport tensorflow as tf\nfrom synthesizer.models.helpers import TacoTestHelper, TacoTrainingHelper\nfrom tensorflow.contrib.seq2seq.python.ops import decoder\nfrom tensorflow.contrib.seq2seq.python.ops import helper as helper_py\nfrom tensorflow.python.framework import ops, tensor_shape\nfrom tensorflow.python.layers import base as layers_base\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.util import nest\n\n\nclass CustomDecoderOutput(\n\t\t#collections.namedtuple(\"CustomDecoderOutput\", (\"rnn_output\", \"token_output\", \"sample_id\"))):\n\t\tcollections.namedtuple(\"CustomDecoderOutput\", (\"rnn_output\", \"sample_id\"))):\n\n\tpass\n\n\nclass CustomDecoder(decoder.Decoder):\n\t\"\"\"Custom sampling decoder.\n\n\tAllows for stop token prediction at inference time\n\tand returns equivalent loss in training time.\n\n\tNote:\n\tOnly use this decoder with Tacotron 2 as it only accepts tacotron custom helpers\n\t\"\"\"\n\n\tdef __init__(self, cell, helper, initial_state, output_layer=None):\n\t\t\"\"\"Initialize CustomDecoder.\n\t\tArgs:\n\t\t\tcell: An `RNNCell` instance.\n\t\t\thelper: A `Helper` instance.\n\t\t\tinitial_state: A (possibly nested tuple of...) tensors and TensorArrays.\n\t\t\t\tThe initial state of the RNNCell.\n\t\t\toutput_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,\n\t\t\t\t`tf.layers.Dense`. Optional layer to apply to the RNN output prior\n\t\t\t\tto storing the result or sampling.\n\t\tRaises:\n\t\t\tTypeError: if `cell`, `helper` or `output_layer` have an incorrect type.\n\t\t\"\"\"\n\t\trnn_cell_impl.assert_like_rnncell(type(cell), cell)\n\t\tif not isinstance(helper, helper_py.Helper):\n\t\t\traise TypeError(\"helper must be a Helper, received: %s\" % type(helper))\n\t\tif (output_layer is not None\n\t\t\t\tand not isinstance(output_layer, layers_base.Layer)):\n\t\t\traise TypeError(\n\t\t\t\t\t\"output_layer must be a Layer, received: %s\" % type(output_layer))\n\t\tself._cell = cell\n\t\tself._helper = helper\n\t\tself._initial_state = initial_state\n\t\tself._output_layer = output_layer\n\n\t@property\n\tdef batch_size(self):\n\t\treturn self._helper.batch_size\n\n\tdef _rnn_output_size(self):\n\t\tsize = self._cell.output_size\n\t\tif self._output_layer is None:\n\t\t\treturn size\n\t\telse:\n\t\t\t# To use layer\"s compute_output_shape, we need to convert the\n\t\t\t# RNNCell\"s output_size entries into shapes with an unknown\n\t\t\t# batch size. We then pass this through the layer\"s\n\t\t\t# compute_output_shape and read off all but the first (batch)\n\t\t\t# dimensions to get the output size of the rnn with the layer\n\t\t\t# applied to the top.\n\t\t\toutput_shape_with_unknown_batch = nest.map_structure(\n\t\t\t\t\tlambda s: tensor_shape.TensorShape([None]).concatenate(s),\n\t\t\t\t\tsize)\n\t\t\tlayer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access\n\t\t\t\t\toutput_shape_with_unknown_batch)\n\t\t\treturn nest.map_structure(lambda s: s[1:], layer_output_shape)\n\n\t@property\n\tdef output_size(self):\n\t\t# Return the cell output and the id\n\t\t#return CustomDecoderOutput(\n\t\t#\t\trnn_output=self._rnn_output_size(),\n\t\t#\t\ttoken_output=self._helper.token_output_size,\n\t\t#\t\tsample_id=self._helper.sample_ids_shape)\n\t\treturn CustomDecoderOutput(\n\t\t\t\trnn_output=self._rnn_output_size(),\n\t\t\t\tsample_id=self._helper.sample_ids_shape)\n\n\t@property\n\tdef output_dtype(self):\n\t\t# Assume the dtype of the cell is the output_size structure\n\t\t# containing the input_state\"s first component's dtype.\n\t\t# Return that structure and the sample_ids_dtype from the helper.\n\t\tdtype = nest.flatten(self._initial_state)[0].dtype\n\t\t#return CustomDecoderOutput(\n\t\t#\t\tnest.map_structure(lambda _: dtype, self._rnn_output_size()),\n\t\t#\t\ttf.float32,\n\t\t#\t\tself._helper.sample_ids_dtype)\n\t\treturn CustomDecoderOutput(\n\t\t\t\tnest.map_structure(lambda _: dtype, self._rnn_output_size()),\n\t\t\t\tself._helper.sample_ids_dtype)\n\n\tdef initialize(self, name=None):\n\t\t\"\"\"Initialize the decoder.\n\t\tArgs:\n\t\t\tname: Name scope for any created operations.\n\t\tReturns:\n\t\t\t`(finished, first_inputs, initial_state)`.\n\t\t\"\"\"\n\t\treturn self._helper.initialize() + (self._initial_state,)\n\n\tdef step(self, time, inputs, state, name=None):\n\t\t\"\"\"Perform a custom decoding step.\n\t\tEnables for dyanmic <stop_token> prediction\n\t\tArgs:\n\t\t\ttime: scalar `int32` tensor.\n\t\t\tinputs: A (structure of) input tensors.\n\t\t\tstate: A (structure of) state tensors and TensorArrays.\n\t\t\tname: Name scope for any created operations.\n\t\tReturns:\n\t\t\t`(outputs, next_state, next_inputs, finished)`.\n\t\t\"\"\"\n\t\twith ops.name_scope(name, \"CustomDecoderStep\", (time, inputs, state)):\n\t\t\t#Call outputprojection wrapper cell\n\t\t\t#(cell_outputs, stop_token), cell_state = self._cell(inputs, state)\n\t\t\t(cell_outputs), cell_state = self._cell(inputs, state)\n\n\t\t\t#apply output_layer (if existant)\n\t\t\tif self._output_layer is not None:\n\t\t\t\tcell_outputs = self._output_layer(cell_outputs)\n\t\t\tsample_ids = self._helper.sample(\n\t\t\t\t\ttime=time, outputs=cell_outputs, state=cell_state)\n\n\t\t\t#(finished, next_inputs, next_state) = self._helper.next_inputs(\n\t\t\t#\t\ttime=time,\n\t\t\t#\t\toutputs=cell_outputs,\n\t\t\t#\t\tstate=cell_state,\n\t\t\t#\t\tsample_ids=sample_ids,\n\t\t\t#\t\tstop_token_prediction=stop_token)\n\t\t\t(finished, next_inputs, next_state) = self._helper.next_inputs(\n\t\t\t\t\ttime=time,\n\t\t\t\t\toutputs=cell_outputs,\n\t\t\t\t\tstate=cell_state,\n\t\t\t\t\tsample_ids=sample_ids)\n\n\t\t#outputs = CustomDecoderOutput(cell_outputs, stop_token, sample_ids)\n\t\toutputs = CustomDecoderOutput(cell_outputs, sample_ids)\n\t\treturn (outputs, next_state, next_inputs, finished)\n"
] | [
[
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.util.nest.flatten"
]
] |
wgurecky/StarVine | [
"b952a88eeaff476484ba6a26420cfe4ef575d162"
] | [
"starvine/bvcopula/tests/test_freeze_params.py"
] | [
"##\n# \\brief Test ability to determine best fit copula via AIC\nfrom __future__ import print_function, division\nfrom starvine.bvcopula.pc_base import PairCopula\nimport unittest\nimport numpy as np\nimport os\npwd_ = os.getcwd()\ndataDir = pwd_ + \"/tests/data/\"\nnp.random.seed(123)\n\n\nclass TestGaussFrozen(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n np.random.seed(123)\n\n def testGaussFrozen(self):\n # Load matlab data set\n stocks = np.loadtxt(dataDir + 'stocks.csv', delimiter=',')\n x = stocks[:, 0]\n y = stocks[:, 1]\n stockModel = PairCopula(x, y)\n\n # Try to fit all copula\n stockModel.copulaTournament(verbosity=0)\n\n # Ensure that the gaussian copula was chosen as the best fit\n self.assertTrue(stockModel.copulaModel.name == \"gauss\")\n self.assertTrue(stockModel.copulaParams[0] == \"gauss\")\n\n # Check gaussian copula parameters for correctness\n self.assertAlmostEqual(stockModel.copulaParams[1][0], 0.73874003, 4)\n\n # Eval the frozen model\n frzU, frzV = stockModel.copulaModel.sample(40000)\n\n # Eval a model with specified params\n setU, setV = stockModel.copulaModel.sample(40000, (0.73874003,))\n\n # Ensure both frozen model and specified param model produce same result\n frzModel = PairCopula(frzU, frzV)\n setModel = PairCopula(setU, setV)\n frzKtau, fp = frzModel.empKTau()\n setKtau, sp = setModel.empKTau()\n self.assertAlmostEqual(frzKtau, setKtau, delta=0.02)\n self.assertAlmostEqual(fp, sp, delta=0.02)\n\n # Eval a model with different specified params\n setU2, setV2 = stockModel.copulaModel.sample(20000, (0.3,))\n setModel2 = PairCopula(setU2, setV2)\n setKtau2, sp2 = setModel2.empKTau()\n self.assertTrue(setKtau2 != setKtau)\n self.assertTrue(abs(setKtau2 - setKtau) > 0.2)\n\n\n def testFrankFrozen(self):\n np.random.seed(123)\n # Load matlab data set\n stocks = np.loadtxt(dataDir + 'stocks.csv', delimiter=',')\n x = stocks[:, 0]\n y = stocks[:, 1]\n stockModel = PairCopula(x, y, family={'frank': 0, })\n\n # Try to fit all copula\n stockModel.copulaTournament(verbosity=0)\n\n # Eval the frozen model\n frzU, frzV = stockModel.copulaModel.sample(40000)\n\n # Eval a model with specified params\n setU, setV = stockModel.copulaModel.sample(40000, *stockModel.copulaParams[1])\n\n # Ensure both frozen model and specified param model produce same result\n frzModel = PairCopula(frzU, frzV)\n setModel = PairCopula(setU, setV)\n frzKtau, fp = frzModel.empKTau()\n setKtau, sp = setModel.empKTau()\n self.assertAlmostEqual(frzKtau, setKtau, delta=0.02)\n self.assertAlmostEqual(fp, sp, delta=0.02)\n"
] | [
[
"numpy.random.seed",
"numpy.loadtxt"
]
] |
gavinlive/scd-data-manager | [
"570b67bacb4bf17f0b49c5875933f233ddd76e6c"
] | [
"scd_manager.py"
] | [
"import os, sys\n\nimport pydicom as pyd\nimport matplotlib.pyplot as plt\nimport collections\nimport pandas as pd\n\nPatientRecord = collections.namedtuple('PatientRecord', ['patient_id', 'image_folder', 'original_id', 'gender', 'age', 'pathology', 'all_scans', 'scans', 'scans_list', 'scans_total'])\nPatientScans = collections.namedtuple('PatientScans', ['files', 'prefixes', 'suffixes', 'total_number'])\nDataRecord = collections.namedtuple('DataRecord', ['dicom_path', 'contour_path'])\n\nclass DataManager(object):\n def __config__(self):\n self.path = '.'\n self.patient_info_path = self.path + '/scd_patientdata.csv'\n self.meta_data_path = self.path + '/scd_patientdata.csv'\n self.images_path = self.path + '/SCD_DeidentifiedImages/'\n self.segmentations_path = self.path + '/SCD_ManualContours/'\n def __init__(self):\n self.__config__()\n self.__load_patient_info()\n self._patients = next(os.walk(self.images_path))[1]\n print(self._patients)\n self.__get_patient_images()\n if(self.__verify_scan_sets()==True): print(\"verified all scan sets\")\n if(self.__verify_scan_numbers()==True): print(\"verified all scan numbers\")\n\n\n def __import_contours(self, patient, suffixes, files):\n path = self.segmentations_path + patient + '/contours-manual/IRCCI-expert/'\n records_list=[]\n numbers_list = []\n for file in os.listdir(path):\n if file.endswith(\".txt\"):\n f=file.split(\"-\")\n scan_number = int(f[2])\n f=f[3]\n if((f==\"icontour\") and (scan_number in suffixes)):\n contour_filepath = path +'/' + file\n indx = suffixes.index(scan_number)\n dicom_filepath = files[indx]\n records_list.append(DataRecord(dicom_filepath, contour_filepath))\n numbers_list.append(scan_number)\n return records_list,numbers_list,len(records_list)\n\n def __load_patient_info(self):\n self.patient_info = pd.read_csv(self.patient_info_path)\n def __get_patient_info(self, patient):\n this = self.patient_info[self.patient_info['OriginalID']==patient].values.tolist()[0]\n print(this)\n cols = self.patient_info.columns.values.tolist()\n toget = ['Gender', 'Age', 'PathologyID', 'Pathology']\n toreturn=[]\n for t in toget:\n indx = cols.index(t)\n toreturn.append(this[indx])\n return toreturn\n\n def depre__import_contours(self):\n for patient in self._patients:\n path = self.segmentations_path + patient + '/contours-manual/IRCCI-expert/'\n records_list=[]\n numbers_list = []\n for file in os.listdir(path):\n if file.endswith(\".txt\"):\n f=file.split(\"-\")\n scan_number = f[2]\n f=f[3]\n if((f==\"icontour\") and (scan_number in self.patient[patient].all_scans.suffixes)):\n contour_filepath = path +'/' + file\n indx = self.patient[patient].all_scans.suffixes.indx(scan_number)\n dicom_filepath = self.patient[patient].all_scans.files[indx]\n records_list.append(DataRecord(dicom_filepath, contour_filepath))\n numbers_list.append(scan_number)\n self.patient[patient].scans=records_list\n self.patient[patient].scans_list=numbers_list\n self.patient[patient].scans_total = len(records_list)\n\n\n def __verify_scan_sets(self):\n for patient in self._patients:\n prefix_list = self.patient[patient].all_scans.prefixes\n b = [(x==1) for x in prefix_list]\n if False in b:\n print('Fault for patient: %s' % patient)\n return False\n return True\n def __verify_scan_numbers(self):\n for patient in self._patients:\n prefix_list = self.patient[patient].all_scans.suffixes\n b = [(prefix_list.count(x)==1) for x in prefix_list]\n if False in b:\n print('Fault for patient: %s' % patient)\n return False\n return True\n\n\n def __get_patient_images(self):\n self.patient = {}\n for patient in self._patients:\n #list_of_image_folders_for_patient = next(os.walk(self.images_path + patient))[1]\n #list_of_image_folders_for_patient_full_path = [self.images_path + patient + '/' + x for x in list_of_image_folders_for_patient]\n\n #self.patient[patient] = {\"patient_id\": patient, \"images\": list_of_image_folders_for_patient, \"original_id\": \"\",\n #\"gender\": \"\", \"age\": 0, \"pathology\": \"\" }\n\n\n def get_files(list_of_folders):\n file_list = []\n for folder in list_of_folders:\n for file in os.listdir(folder):\n if file.endswith(\".dcm\"):\n file_list.append(folder +'/' + file)\n return file_list\n\n def get_prefix_suffix(files):\n prefixes = []\n suffixes = []\n for file in files:\n f=file.split(\"-\")\n f=f[-2::]\n f[1] = f[1].split(\".\")[0]\n prefixes.append(int(f[0]))\n suffixes.append(int(f[1]))\n return prefixes, suffixes\n\n files = get_files([self.images_path+patient+'/DICOM'])\n prefixes, suffixes = get_prefix_suffix(files)\n this_patient_scan_set = PatientScans(files, prefixes, suffixes, len(files))\n scans, scans_list, scans_total = self.__import_contours(patient, suffixes, files)\n gender, age, pathologyID, _ = self.__get_patient_info(patient)\n this_patient_record = PatientRecord(patient_id=patient, image_folder=self.images_path + patient, original_id=\"\",\n gender=gender, age=age, pathology=pathologyID, all_scans=this_patient_scan_set, scans=scans, scans_list=scans_list, scans_total=scans_total)\n self.patient[patient] = this_patient_record\n\n def total_examples(self):\n count=0\n for patient in self._patients:\n count += self.patient[patient].scans_total\n return count\n\n def __call__(self, patient,scan_number):\n return self.patient[patient].all_scans.files[scan_number]\n\n"
] | [
[
"pandas.read_csv"
]
] |
harinipsamy/Tensorflow-2-Reinforcement-Learning-Cookbook | [
"b8858554e4c819c96de10c100f8213ab41561c69"
] | [
"Chapter05/stock_trading_visual_continuous_env.py"
] | [
"#!/usr/bin/env python\n# Visual stock/share trading RL environment with continuous trade actions\n# Chapter 5, TensorFlow 2 Reinforcement Learning Cookbook | Praveen Palanisamy\n\nimport os\nimport random\nfrom typing import Dict\n\nimport cv2\nimport gym\nimport numpy as np\nimport pandas as pd\nfrom gym import spaces\n\nfrom trading_utils import TradeVisualizer\n\nenv_config = {\n \"ticker\": \"MSFT\",\n \"opening_account_balance\": 1000,\n # Number of steps (days) of data provided to the agent in one observation\n \"observation_horizon_sequence_length\": 30,\n}\n\n\nclass StockTradingVisualContinuousEnv(gym.Env):\n def __init__(self, env_config: Dict = env_config):\n \"\"\"Stock trading environment for RL agents with continuous action space\n\n Args:\n ticker (str, optional): Ticker symbol for the stock. Defaults to \"MSFT\".\n env_config (Dict): Env configuration values\n \"\"\"\n super(StockTradingVisualContinuousEnv, self).__init__()\n self.ticker = env_config.get(\"ticker\", \"MSFT\")\n data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data\")\n self.ticker_file_stream = os.path.join(f\"{data_dir}\", f\"{self.ticker}.csv\")\n assert os.path.isfile(\n self.ticker_file_stream\n ), f\"Historical stock data file stream not found at: data/{self.ticker}.csv\"\n # Stock market data stream. An offline file stream is used. Alternatively, a web\n # API can be used to pull live data.\n # Data-Frame: Date Open High Low Close Adj-Close Volume\n self.ohlcv_df = pd.read_csv(self.ticker_file_stream)\n\n self.opening_account_balance = env_config[\"opening_account_balance\"]\n # Action: 1-dim value indicating a fraction amount of shares to Buy (0 to 1) or\n # sell (-1 to 0). The fraction is taken on the allowable number of\n # shares that can be bought or sold based on the account balance (no margin).\n self.action_space = spaces.Box(\n low=np.array([-1]), high=np.array([1]), dtype=np.float\n )\n\n self.observation_features = [\n \"Open\",\n \"High\",\n \"Low\",\n \"Close\",\n \"Adj Close\",\n \"Volume\",\n ]\n self.obs_width, self.obs_height = 128, 128\n self.horizon = env_config.get(\"observation_horizon_sequence_length\")\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(128, 128, 3),\n dtype=np.uint8,\n )\n self.viz = None # Visualizer\n\n def step(self, action):\n # Execute one step within the environment\n self.execute_trade_action(action)\n\n self.current_step += 1\n\n reward = self.account_value - self.opening_account_balance # Profit (loss)\n done = self.account_value <= 0 or self.current_step >= len(\n self.ohlcv_df.loc[:, \"Open\"].values\n )\n\n obs = self.get_observation()\n\n return obs, reward, done, {}\n\n def reset(self):\n # Reset the state of the environment to an initial state\n self.cash_balance = self.opening_account_balance\n self.account_value = self.opening_account_balance\n self.num_shares_held = 0\n self.cost_basis = 0\n self.current_step = 0\n self.trades = []\n if self.viz is None:\n self.viz = TradeVisualizer(\n self.ticker,\n self.ticker_file_stream,\n \"TFRL-Cookbook Ch4-StockTradingVisualContinuousEnv\",\n )\n\n return self.get_observation()\n\n def render(self, **kwargs):\n # Render the environment to the screen\n\n if self.current_step > self.horizon:\n self.viz.render(\n self.current_step,\n self.account_value,\n self.trades,\n window_size=self.horizon,\n )\n\n def close(self):\n if self.viz is not None:\n self.viz.close()\n self.viz = None\n\n def get_observation(self):\n \"\"\"Return a view of the Ticker price chart as image observation\n\n Returns:\n img_observation (np.ndarray): Image of ticker candle stick plot\n with volume bars as observation\n \"\"\"\n img_observation = self.viz.render_image_observation(\n self.current_step, self.horizon\n )\n img_observation = cv2.resize(\n img_observation, dsize=(128, 128), interpolation=cv2.INTER_CUBIC\n )\n\n return img_observation\n\n def execute_trade_action(self, action):\n\n if action == 0: # Indicates \"Hold\" action\n # Hold position; No trade to be executed\n return\n\n order_type = \"buy\" if action > 0 else \"sell\"\n\n order_fraction_of_allowable_shares = abs(action)\n # Stochastically determine the current stock price based on Market Open & Close\n current_price = random.uniform(\n self.ohlcv_df.loc[self.current_step, \"Open\"],\n self.ohlcv_df.loc[self.current_step, \"Close\"],\n )\n if order_type == \"buy\":\n allowable_shares = int(self.cash_balance / current_price)\n # Simulate a BUY order and execute it at current_price\n num_shares_bought = int(\n allowable_shares * order_fraction_of_allowable_shares\n )\n current_cost = self.cost_basis * self.num_shares_held\n additional_cost = num_shares_bought * current_price\n\n self.cash_balance -= additional_cost\n self.cost_basis = (current_cost + additional_cost) / (\n self.num_shares_held + num_shares_bought\n )\n self.num_shares_held += num_shares_bought\n\n if num_shares_bought > 0:\n self.trades.append(\n {\n \"type\": \"buy\",\n \"step\": self.current_step,\n \"shares\": num_shares_bought,\n \"proceeds\": additional_cost,\n }\n )\n\n elif order_type == \"sell\":\n # Simulate a SELL order and execute it at current_price\n num_shares_sold = int(\n self.num_shares_held * order_fraction_of_allowable_shares\n )\n self.cash_balance += num_shares_sold * current_price\n self.num_shares_held -= num_shares_sold\n sale_proceeds = num_shares_sold * current_price\n\n if num_shares_sold > 0:\n self.trades.append(\n {\n \"type\": \"sell\",\n \"step\": self.current_step,\n \"shares\": num_shares_sold,\n \"proceeds\": sale_proceeds,\n }\n )\n if self.num_shares_held == 0:\n self.cost_basis = 0\n # Update account value\n self.account_value = self.cash_balance + self.num_shares_held * current_price\n\n\nif __name__ == \"__main__\":\n env = StockTradingVisualContinuousEnv()\n obs = env.reset()\n for _ in range(600):\n action = env.action_space.sample()\n next_obs, reward, done, _ = env.step(action)\n env.render()\n"
] | [
[
"pandas.read_csv",
"numpy.array"
]
] |
Marcel-Rodekamp/MLP | [
"349ac8e10679e2ec53980908c580902996a493e7"
] | [
"src/LossFunctions/ActionCrossEntropy.py"
] | [
"import torch\n\nclass ActionCrossEntropyFunction(torch.autograd.Function):\n @staticmethod\n def forward(self,input,target,action,force = None):\n self.mb_size,self.dim = input.size()\n\n # save the force for backward\n self.force = force\n\n\n # get action difference\n action_input = torch.tensor( [action(input[i_mb,:].numpy()) for i_mb in range(self.mb_size)], dtype = input.dtype )\n action_target = torch.tensor( [action(target[i_mb,:].numpy()) for i_mb in range(self.mb_size)], dtype = input.dtype )\n\n output = action_target * action_input.log() + (1-action_target) * (1-action_input).log()\n\n self.save_for_backward(input,action_input,action_target,output)\n\n output = torch.sqrt(output.real**2+output.imag**2)\n\n # average over batch and return\n output = torch.mean(output)\n\n return output\n\n @staticmethod\n def backward(self,grad_output):\n input,action_input,action_target,cross_entropy = self.saved_tensors\n\n action_grad_input = -torch.tensor( [self.force(input[i_mb,:].numpy()) for i_mb in range(self.mb_size)], dtype = input.dtype )\n\n grad = torch.unsqueeze( cross_entropy.conj()*((action_target)/(action_input) + (1-action_target)/(1-action_input)), -1 ) * action_grad_input\n return grad.conj(),None,None,None\n\nActionCrossEntropyFunc = ActionCrossEntropyFunction.apply\n\nclass ActionCrossEntropy(torch.nn.Module):\n def __init__(self,action):\n super(ActionCrossEntropy, self).__init__()\n self.action = action.eval\n self.force = action.force\n\n def forward(self, input, target):\n return ActionCrossEntropyFunc(input,target,self.action,self.force)\n"
] | [
[
"torch.sqrt",
"torch.mean"
]
] |
ArzelaAscoIi/haystack | [
"be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08"
] | [
"haystack/utils/squad_data.py"
] | [
"from typing import List\n\nimport logging\nimport json\nimport random\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom haystack.schema import Document, Label\nfrom haystack.modeling.data_handler.processor import _read_squad_file\n\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\ntqdm.pandas()\n\n\nCOLUMN_NAMES = [\"title\", \"context\", \"question\", \"id\", \"answer_text\", \"answer_start\", \"is_impossible\"]\n\n\nclass SquadData:\n \"\"\"\n This class is designed to manipulate data that is in SQuAD format\n \"\"\"\n\n def __init__(self, squad_data):\n \"\"\"\n :param squad_data: SQuAD format data, either as a dict with a `data` key, or just a list of SQuAD documents\n \"\"\"\n if type(squad_data) == dict:\n self.version = squad_data.get(\"version\")\n self.data = squad_data[\"data\"]\n elif type(squad_data) == list:\n self.version = None\n self.data = squad_data\n self.df = self.to_df(self.data)\n\n def merge_from_file(self, filename: str):\n \"\"\"Merge the contents of a SQuAD format json file with the data stored in this object\"\"\"\n new_data = json.load(open(filename))[\"data\"]\n self.merge(new_data)\n\n def merge(self, new_data: List):\n \"\"\"\n Merge data in SQuAD format with the data stored in this object\n :param new_data: A list of SQuAD document data\n \"\"\"\n df_new = self.to_df(new_data)\n self.df = pd.concat([df_new, self.df])\n self.data = self.df_to_data(self.df)\n\n @classmethod\n def from_file(cls, filename: str):\n \"\"\"\n Create a SquadData object by providing the name of a SQuAD format json file\n \"\"\"\n data = json.load(open(filename))\n return cls(data)\n\n def save(self, filename: str):\n \"\"\"\n Write the data stored in this object to a json file.\n \"\"\"\n with open(filename, \"w\") as f:\n squad_data = {\"version\": self.version, \"data\": self.data}\n json.dump(squad_data, f, indent=2)\n\n def to_dpr_dataset(self):\n raise NotImplementedError(\n \"SquadData.to_dpr_dataset() not yet implemented. \"\n \"For now, have a look at the script at haystack/retriever/squad_to_dpr.py\"\n )\n\n def to_document_objs(self):\n \"\"\"\n Export all paragraphs stored in this object to haystack.Document objects.\n \"\"\"\n df_docs = self.df[[\"title\", \"context\"]]\n df_docs = df_docs.drop_duplicates()\n record_dicts = df_docs.to_dict(\"records\")\n documents = [Document(content=rd[\"context\"], id=rd[\"title\"]) for rd in record_dicts]\n return documents\n\n # TODO refactor to new Label objects\n def to_label_objs(self):\n \"\"\"\n Export all labels stored in this object to haystack.Label objects.\n \"\"\"\n df_labels = self.df[[\"id\", \"question\", \"answer_text\", \"answer_start\"]]\n record_dicts = df_labels.to_dict(\"records\")\n labels = [\n Label(\n query=rd[\"question\"],\n answer=rd[\"answer_text\"],\n is_correct_answer=True,\n is_correct_document=True,\n id=rd[\"id\"],\n origin=rd.get(\"origin\", \"SquadData tool\"),\n document_id=rd.get(\"document_id\", None),\n )\n for rd in record_dicts\n ]\n return labels\n\n @staticmethod\n def to_df(data):\n \"\"\"Convert a list of SQuAD document dictionaries into a pandas dataframe (each row is one annotation)\"\"\"\n flat = []\n for document in data:\n title = document[\"title\"]\n for paragraph in document[\"paragraphs\"]:\n context = paragraph[\"context\"]\n for question in paragraph[\"qas\"]:\n q = question[\"question\"]\n id = question[\"id\"]\n is_impossible = question[\"is_impossible\"]\n # For no_answer samples\n if len(question[\"answers\"]) == 0:\n flat.append(\n {\n \"title\": title,\n \"context\": context,\n \"question\": q,\n \"id\": id,\n \"answer_text\": \"\",\n \"answer_start\": None,\n \"is_impossible\": is_impossible,\n }\n )\n # For span answer samples\n else:\n for answer in question[\"answers\"]:\n answer_text = answer[\"text\"]\n answer_start = answer[\"answer_start\"]\n flat.append(\n {\n \"title\": title,\n \"context\": context,\n \"question\": q,\n \"id\": id,\n \"answer_text\": answer_text,\n \"answer_start\": answer_start,\n \"is_impossible\": is_impossible,\n }\n )\n df = pd.DataFrame.from_records(flat)\n return df\n\n def count(self, unit=\"questions\"):\n \"\"\"\n Count the samples in the data. Choose from unit = \"paragraphs\", \"questions\", \"answers\", \"no_answers\", \"span_answers\"\n \"\"\"\n c = 0\n for document in self.data:\n for paragraph in document[\"paragraphs\"]:\n if unit == \"paragraphs\":\n c += 1\n for question in paragraph[\"qas\"]:\n if unit == \"questions\":\n c += 1\n # Count no_answers\n if len(question[\"answers\"]) == 0:\n if unit in [\"answers\", \"no_answers\"]:\n c += 1\n # Count span answers\n else:\n for answer in question[\"answers\"]:\n if unit in [\"answers\", \"span_answers\"]:\n c += 1\n return c\n\n @classmethod\n def df_to_data(cls, df):\n \"\"\"\n Convert a dataframe into SQuAD format data (list of SQuAD document dictionaries).\n \"\"\"\n logger.info(\"Converting data frame to squad format data\")\n\n # Aggregate the answers of each question\n logger.info(\"Aggregating the answers of each question\")\n df_grouped_answers = df.groupby([\"title\", \"context\", \"question\", \"id\", \"is_impossible\"])\n df_aggregated_answers = (\n df[[\"title\", \"context\", \"question\", \"id\", \"is_impossible\"]].drop_duplicates().reset_index()\n )\n answers = df_grouped_answers.progress_apply(cls._aggregate_answers).rename(\"answers\")\n answers = pd.DataFrame(answers).reset_index()\n df_aggregated_answers = pd.merge(df_aggregated_answers, answers)\n\n # Aggregate the questions of each passage\n logger.info(\"Aggregating the questions of each paragraphs of each document\")\n df_grouped_questions = df_aggregated_answers.groupby([\"title\", \"context\"])\n df_aggregated_questions = df[[\"title\", \"context\"]].drop_duplicates().reset_index()\n questions = df_grouped_questions.progress_apply(cls._aggregate_questions).rename(\"qas\")\n questions = pd.DataFrame(questions).reset_index()\n df_aggregated_questions = pd.merge(df_aggregated_questions, questions)\n\n logger.info(\"Aggregating the paragraphs of each document\")\n df_grouped_paragraphs = df_aggregated_questions.groupby([\"title\"])\n df_aggregated_paragraphs = df[[\"title\"]].drop_duplicates().reset_index()\n paragraphs = df_grouped_paragraphs.progress_apply(cls._aggregate_passages).rename(\"paragraphs\")\n paragraphs = pd.DataFrame(paragraphs).reset_index()\n df_aggregated_paragraphs = pd.merge(df_aggregated_paragraphs, paragraphs)\n\n df_aggregated_paragraphs = df_aggregated_paragraphs[[\"title\", \"paragraphs\"]]\n ret = df_aggregated_paragraphs.to_dict(\"records\")\n\n return ret\n\n @staticmethod\n def _aggregate_passages(x):\n x = x[[\"context\", \"qas\"]]\n ret = x.to_dict(\"records\")\n return ret\n\n @staticmethod\n def _aggregate_questions(x):\n x = x[[\"question\", \"id\", \"answers\", \"is_impossible\"]]\n ret = x.to_dict(\"records\")\n return ret\n\n @staticmethod\n def _aggregate_answers(x):\n x = x[[\"answer_text\", \"answer_start\"]]\n x = x.rename(columns={\"answer_text\": \"text\"})\n # Span anwser\n try:\n x[\"answer_start\"] = x[\"answer_start\"].astype(int)\n ret = x.to_dict(\"records\")\n # No answer\n except ValueError:\n ret = []\n return ret\n\n def set_data(self, data):\n self.data = data\n self.df = self.to_df(data)\n\n def sample_questions(self, n):\n \"\"\"\n Return a sample of n questions in SQuAD format (list of SQuAD document dictionaries)\n Note, that if the same question is asked on multiple different passages, this fn treats that\n as a single question\n \"\"\"\n all_questions = self.get_all_questions()\n sampled_questions = random.sample(all_questions, n)\n df_sampled = self.df[self.df[\"question\"].isin(sampled_questions)]\n return self.df_to_data(df_sampled)\n\n def get_all_paragraphs(self):\n \"\"\"\n Return all paragraph strings.\n \"\"\"\n return self.df[\"context\"].unique().tolist()\n\n def get_all_questions(self):\n \"\"\"\n Return all question strings. Note that if the same question appears for different paragraphs, it will be\n returned multiple times by this fn\n \"\"\"\n df_questions = self.df[[\"title\", \"context\", \"question\"]]\n df_questions = df_questions.drop_duplicates()\n questions = df_questions[\"question\"].tolist()\n return questions\n\n def get_all_document_titles(self):\n \"\"\"Return all document title strings\"\"\"\n return self.df[\"title\"].unique().tolist()\n\n\nif __name__ == \"__main__\":\n # Download the SQuAD dataset if it isn't at target directory\n _read_squad_file(\"../data/squad20/train-v2.0.json\")\n\n filename1 = \"../data/squad20/train-v2.0.json\"\n filename2 = \"../data/squad20/dev-v2.0.json\"\n\n # Load file1 and take a sample of 10000 questions\n sd = SquadData.from_file(filename1)\n sample1 = sd.sample_questions(n=10000)\n\n # Set sd to now contain the sample of 10000 questions\n sd.set_data(sample1)\n\n # Merge sd with file2 and take a sample of 100 questions\n sd.merge_from_file(filename2)\n sample2 = sd.sample_questions(n=100)\n sd.set_data(sample2)\n\n # Save this sample of 100\n sd.save(\"../data/squad20/sample.json\")\n\n paragraphs = sd.get_all_paragraphs()\n questions = sd.get_all_questions()\n titles = sd.get_all_document_titles()\n\n documents = sd.to_document_objs()\n labels = sd.to_label_objs()\n\n n_qs = sd.count(unit=\"questions\")\n n_as = sd.count(unit=\"no_answers\")\n n_ps = sd.count(unit=\"paragraphs\")\n\n print(n_qs)\n print(n_as)\n print(n_ps)\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.DataFrame",
"pandas.merge",
"pandas.concat"
]
] |
LimJiaJing/Cam2BEV | [
"f788a9f58b464bc7b114e5a0dd1afcd6683f10e3"
] | [
"preprocessing/homography_converter/uNetXST_homographies/2_F.py"
] | [
"# ==============================================================================\n# MIT License\n#\n# Copyright 2020 Institute for Automotive Engineering of RWTH Aachen University.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ==============================================================================\n\nimport numpy as np\n\n# for dataset 2_F\nH = [\n np.array([[0.03506686613905922, 27.971438297785962, -0.17694724954191404], [0.3821882391578238, 9.481642330993019e-17, 5.46222110929461], [25.000001047737943, 6.202207287472715e-15, 27.000001047737943]]) # front\n]\n"
] | [
[
"numpy.array"
]
] |
phoenix1712/deep-person-reid | [
"70365320f5319e180d7fce4993003382b06906b0"
] | [
"torchreid/data_manager/cuhk03.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport glob\nimport re\nimport sys\nimport urllib\nimport tarfile\nimport zipfile\nimport os.path as osp\nfrom scipy.io import loadmat\nimport numpy as np\nimport h5py\nfrom scipy.misc import imsave\n\nfrom torchreid.utils.iotools import mkdir_if_missing, write_json, read_json\n\n\nclass CUHK03(object):\n \"\"\"\n CUHK03\n\n Reference:\n Li et al. DeepReID: Deep Filter Pairing Neural Network for Person Re-identification. CVPR 2014.\n\n URL: http://www.ee.cuhk.edu.hk/~xgwang/CUHK_identification.html#!\n \n Dataset statistics:\n # identities: 1360\n # images: 13164\n # cameras: 6\n # splits: 20 (classic)\n\n Args:\n split_id (int): split index (default: 0)\n cuhk03_labeled (bool): whether to load labeled images; if false, detected images are loaded (default: False)\n \"\"\"\n dataset_dir = 'cuhk03'\n\n def __init__(self, root='data', split_id=0, cuhk03_labeled=False, cuhk03_classic_split=False, verbose=True, **kwargs):\n super(CUHK03, self).__init__()\n self.dataset_dir = osp.join(root, self.dataset_dir)\n self.data_dir = osp.join(self.dataset_dir, 'cuhk03_release')\n self.raw_mat_path = osp.join(self.data_dir, 'cuhk-03.mat')\n \n self.imgs_detected_dir = osp.join(self.dataset_dir, 'images_detected')\n self.imgs_labeled_dir = osp.join(self.dataset_dir, 'images_labeled')\n \n self.split_classic_det_json_path = osp.join(self.dataset_dir, 'splits_classic_detected.json')\n self.split_classic_lab_json_path = osp.join(self.dataset_dir, 'splits_classic_labeled.json')\n \n self.split_new_det_json_path = osp.join(self.dataset_dir, 'splits_new_detected.json')\n self.split_new_lab_json_path = osp.join(self.dataset_dir, 'splits_new_labeled.json')\n \n self.split_new_det_mat_path = osp.join(self.dataset_dir, 'cuhk03_new_protocol_config_detected.mat')\n self.split_new_lab_mat_path = osp.join(self.dataset_dir, 'cuhk03_new_protocol_config_labeled.mat')\n\n self._check_before_run()\n self._preprocess()\n\n if cuhk03_labeled:\n image_type = 'labeled'\n split_path = self.split_classic_lab_json_path if cuhk03_classic_split else self.split_new_lab_json_path\n else:\n image_type = 'detected'\n split_path = self.split_classic_det_json_path if cuhk03_classic_split else self.split_new_det_json_path\n\n splits = read_json(split_path)\n assert split_id < len(splits), \"Condition split_id ({}) < len(splits) ({}) is false\".format(split_id, len(splits))\n split = splits[split_id]\n print(\"Split index = {}\".format(split_id))\n\n train = split['train']\n query = split['query']\n gallery = split['gallery']\n\n num_train_pids = split['num_train_pids']\n num_query_pids = split['num_query_pids']\n num_gallery_pids = split['num_gallery_pids']\n num_total_pids = num_train_pids + num_query_pids\n\n num_train_imgs = split['num_train_imgs']\n num_query_imgs = split['num_query_imgs']\n num_gallery_imgs = split['num_gallery_imgs']\n num_total_imgs = num_train_imgs + num_query_imgs\n\n if verbose:\n print(\"=> CUHK03 ({}) loaded\".format(image_type))\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # images\")\n print(\" ------------------------------\")\n print(\" train | {:5d} | {:8d}\".format(num_train_pids, num_train_imgs))\n print(\" query | {:5d} | {:8d}\".format(num_query_pids, num_query_imgs))\n print(\" gallery | {:5d} | {:8d}\".format(num_gallery_pids, num_gallery_imgs))\n print(\" ------------------------------\")\n print(\" total | {:5d} | {:8d}\".format(num_total_pids, num_total_imgs))\n print(\" ------------------------------\")\n\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids = num_train_pids\n self.num_query_pids = num_query_pids\n self.num_gallery_pids = num_gallery_pids\n\n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.raw_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.raw_mat_path))\n if not osp.exists(self.split_new_det_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_det_mat_path))\n if not osp.exists(self.split_new_lab_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_lab_mat_path))\n\n def _preprocess(self):\n \"\"\"\n This function is a bit complex and ugly, what it does is\n 1. Extract data from cuhk-03.mat and save as png images.\n 2. Create 20 classic splits. (Li et al. CVPR'14)\n 3. Create new split. (Zhong et al. CVPR'17)\n \"\"\"\n print(\"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if osp.exists(self.imgs_labeled_dir) and \\\n osp.exists(self.imgs_detected_dir) and \\\n osp.exists(self.split_classic_det_json_path) and \\\n osp.exists(self.split_classic_lab_json_path) and \\\n osp.exists(self.split_new_det_json_path) and \\\n osp.exists(self.split_new_lab_json_path):\n return\n\n mkdir_if_missing(self.imgs_detected_dir)\n mkdir_if_missing(self.imgs_labeled_dir)\n\n print(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)\n img_path = osp.join(save_dir, img_name)\n imsave(img_path, img)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n print(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid+1, pid+1, img_paths))\n print(\"done camera pair {} with {} identities\".format(campid+1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n \n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n print(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n \n write_json(splits_classic_det, self.split_classic_det_json_path)\n write_json(splits_classic_lab, self.split_classic_lab_json_path)\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n print(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_det_json_path)\n\n print(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_lab_json_path)\n"
] | [
[
"scipy.io.loadmat",
"scipy.misc.imsave"
]
] |
selasley/pandas | [
"5b5574520dba1e79ac95e5079724a41151c20b9a"
] | [
"pandas/core/groupby/generic.py"
] | [
"\"\"\"\nDefine the SeriesGroupBy and DataFrameGroupBy\nclasses that hold the groupby interfaces (and some implementations).\n\nThese are user facing as the result of the ``df.groupby(...)`` operations,\nwhich here returns a DataFrameGroupBy object.\n\"\"\"\nfrom __future__ import annotations\n\nfrom collections import abc\nfrom functools import partial\nfrom textwrap import dedent\nfrom typing import (\n Any,\n Callable,\n Hashable,\n Iterable,\n Mapping,\n NamedTuple,\n Sequence,\n TypeVar,\n Union,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n Interval,\n reduction as libreduction,\n)\nfrom pandas._typing import (\n ArrayLike,\n Manager,\n Manager2D,\n SingleManager,\n)\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n is_bool,\n is_categorical_dtype,\n is_dict_like,\n is_integer_dtype,\n is_interval_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.missing import (\n isna,\n notna,\n)\n\nfrom pandas.core import (\n algorithms,\n nanops,\n)\nfrom pandas.core.apply import (\n GroupByApply,\n maybe_mangle_lambdas,\n reconstruct_func,\n validate_func_kwargs,\n)\nfrom pandas.core.base import SpecificationError\nimport pandas.core.common as com\nfrom pandas.core.construction import create_series_with_explicit_dtype\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.groupby import base\nfrom pandas.core.groupby.groupby import (\n GroupBy,\n _agg_template,\n _apply_docs,\n _transform_template,\n warn_dropping_nuisance_columns_deprecated,\n)\nfrom pandas.core.groupby.grouper import get_grouper\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n all_indexes_same,\n)\nfrom pandas.core.series import Series\nfrom pandas.core.shared_docs import _shared_docs\nfrom pandas.core.util.numba_ import maybe_use_numba\n\nfrom pandas.plotting import boxplot_frame_groupby\n\n# TODO(typing) the return value on this callable should be any *scalar*.\nAggScalar = Union[str, Callable[..., Any]]\n# TODO: validate types on ScalarResult and move to _typing\n# Blocked from using by https://github.com/python/mypy/issues/1484\n# See note at _mangle_lambda_list\nScalarResult = TypeVar(\"ScalarResult\")\n\n\nclass NamedAgg(NamedTuple):\n column: Hashable\n aggfunc: AggScalar\n\n\ndef generate_property(name: str, klass: type[DataFrame | Series]):\n \"\"\"\n Create a property for a GroupBy subclass to dispatch to DataFrame/Series.\n\n Parameters\n ----------\n name : str\n klass : {DataFrame, Series}\n\n Returns\n -------\n property\n \"\"\"\n\n def prop(self):\n return self._make_wrapper(name)\n\n parent_method = getattr(klass, name)\n prop.__doc__ = parent_method.__doc__ or \"\"\n prop.__name__ = name\n return property(prop)\n\n\ndef pin_allowlisted_properties(\n klass: type[DataFrame | Series], allowlist: frozenset[str]\n):\n \"\"\"\n Create GroupBy member defs for DataFrame/Series names in a allowlist.\n\n Parameters\n ----------\n klass : DataFrame or Series class\n class where members are defined.\n allowlist : frozenset[str]\n Set of names of klass methods to be constructed\n\n Returns\n -------\n class decorator\n\n Notes\n -----\n Since we don't want to override methods explicitly defined in the\n base class, any such name is skipped.\n \"\"\"\n\n def pinner(cls):\n for name in allowlist:\n if hasattr(cls, name):\n # don't override anything that was explicitly defined\n # in the base class\n continue\n\n prop = generate_property(name, klass)\n setattr(cls, name, prop)\n\n return cls\n\n return pinner\n\n\n@pin_allowlisted_properties(Series, base.series_apply_allowlist)\nclass SeriesGroupBy(GroupBy[Series]):\n _apply_allowlist = base.series_apply_allowlist\n\n def _wrap_agged_manager(self, mgr: Manager) -> Series:\n if mgr.ndim == 1:\n mgr = cast(SingleManager, mgr)\n single = mgr\n else:\n mgr = cast(Manager2D, mgr)\n single = mgr.iget(0)\n ser = self.obj._constructor(single, name=self.obj.name)\n # NB: caller is responsible for setting ser.index\n return ser\n\n def _get_data_to_aggregate(self) -> SingleManager:\n ser = self._obj_with_exclusions\n single = ser._mgr\n return single\n\n def _iterate_slices(self) -> Iterable[Series]:\n yield self._selected_obj\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).min()\n 1 1\n 2 3\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).agg('min')\n 1 1\n 2 3\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])\n min max\n 1 1 2\n 2 3 4\n\n The output column names can be controlled by passing\n the desired column names and aggregations as keyword arguments.\n\n >>> s.groupby([1, 1, 2, 2]).agg(\n ... minimum='min',\n ... maximum='max',\n ... )\n minimum maximum\n 1 1 2\n 2 3 4\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the aggregating function.\n\n >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())\n 1 1.0\n 2 3.0\n dtype: float64\n \"\"\"\n )\n\n @Appender(\n _apply_docs[\"template\"].format(\n input=\"series\", examples=_apply_docs[\"series_examples\"]\n )\n )\n def apply(self, func, *args, **kwargs) -> Series:\n return super().apply(func, *args, **kwargs)\n\n @doc(_agg_template, examples=_agg_examples_doc, klass=\"Series\")\n def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):\n\n if maybe_use_numba(engine):\n with self._group_selection_context():\n data = self._selected_obj\n result = self._aggregate_with_numba(\n data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs\n )\n index = self.grouper.result_index\n return self.obj._constructor(result.ravel(), index=index, name=data.name)\n\n relabeling = func is None\n columns = None\n if relabeling:\n columns, func = validate_func_kwargs(kwargs)\n kwargs = {}\n\n if isinstance(func, str):\n return getattr(self, func)(*args, **kwargs)\n\n elif isinstance(func, abc.Iterable):\n # Catch instances of lists / tuples\n # but not the class list / tuple itself.\n func = maybe_mangle_lambdas(func)\n ret = self._aggregate_multiple_funcs(func)\n if relabeling:\n # error: Incompatible types in assignment (expression has type\n # \"Optional[List[str]]\", variable has type \"Index\")\n ret.columns = columns # type: ignore[assignment]\n return ret\n\n else:\n cyfunc = com.get_cython_func(func)\n if cyfunc and not args and not kwargs:\n return getattr(self, cyfunc)()\n\n if self.grouper.nkeys > 1:\n return self._python_agg_general(func, *args, **kwargs)\n\n try:\n return self._python_agg_general(func, *args, **kwargs)\n except KeyError:\n # TODO: KeyError is raised in _python_agg_general,\n # see test_groupby.test_basic\n result = self._aggregate_named(func, *args, **kwargs)\n\n # result is a dict whose keys are the elements of result_index\n index = self.grouper.result_index\n return create_series_with_explicit_dtype(\n result, index=index, dtype_if_empty=object\n )\n\n agg = aggregate\n\n def _aggregate_multiple_funcs(self, arg) -> DataFrame:\n if isinstance(arg, dict):\n\n # show the deprecation, but only if we\n # have not shown a higher level one\n # GH 15931\n raise SpecificationError(\"nested renamer is not supported\")\n\n elif any(isinstance(x, (tuple, list)) for x in arg):\n arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]\n\n # indicated column order\n columns = next(zip(*arg))\n else:\n # list of functions / function names\n columns = []\n for f in arg:\n columns.append(com.get_callable_name(f) or f)\n\n arg = zip(columns, arg)\n\n results: dict[base.OutputKey, DataFrame | Series] = {}\n for idx, (name, func) in enumerate(arg):\n\n key = base.OutputKey(label=name, position=idx)\n results[key] = self.aggregate(func)\n\n if any(isinstance(x, DataFrame) for x in results.values()):\n from pandas import concat\n\n res_df = concat(\n results.values(), axis=1, keys=[key.label for key in results.keys()]\n )\n return res_df\n\n indexed_output = {key.position: val for key, val in results.items()}\n output = self.obj._constructor_expanddim(indexed_output, index=None)\n output.columns = Index(key.label for key in results)\n\n output = self._reindex_output(output)\n return output\n\n def _indexed_output_to_ndframe(\n self, output: Mapping[base.OutputKey, ArrayLike]\n ) -> Series:\n \"\"\"\n Wrap the dict result of a GroupBy aggregation into a Series.\n \"\"\"\n assert len(output) == 1\n values = next(iter(output.values()))\n result = self.obj._constructor(values)\n result.name = self.obj.name\n return result\n\n def _wrap_applied_output(\n self,\n data: Series,\n values: list[Any],\n not_indexed_same: bool = False,\n override_group_keys: bool = False,\n ) -> DataFrame | Series:\n \"\"\"\n Wrap the output of SeriesGroupBy.apply into the expected result.\n\n Parameters\n ----------\n data : Series\n Input data for groupby operation.\n values : List[Any]\n Applied output for each group.\n not_indexed_same : bool, default False\n Whether the applied outputs are not indexed the same as the group axes.\n\n Returns\n -------\n DataFrame or Series\n \"\"\"\n if len(values) == 0:\n # GH #6265\n return self.obj._constructor(\n [],\n name=self.obj.name,\n index=self.grouper.result_index,\n dtype=data.dtype,\n )\n assert values is not None\n\n if isinstance(values[0], dict):\n # GH #823 #24880\n index = self.grouper.result_index\n res_df = self.obj._constructor_expanddim(values, index=index)\n res_df = self._reindex_output(res_df)\n # if self.observed is False,\n # keep all-NaN rows created while re-indexing\n res_ser = res_df.stack(dropna=self.observed)\n res_ser.name = self.obj.name\n return res_ser\n elif isinstance(values[0], (Series, DataFrame)):\n result = self._concat_objects(\n values,\n not_indexed_same=not_indexed_same,\n override_group_keys=override_group_keys,\n )\n result.name = self.obj.name\n return result\n else:\n # GH #6265 #24880\n result = self.obj._constructor(\n data=values, index=self.grouper.result_index, name=self.obj.name\n )\n return self._reindex_output(result)\n\n def _aggregate_named(self, func, *args, **kwargs):\n # Note: this is very similar to _aggregate_series_pure_python,\n # but that does not pin group.name\n result = {}\n initialized = False\n\n for name, group in self:\n object.__setattr__(group, \"name\", name)\n\n output = func(group, *args, **kwargs)\n output = libreduction.extract_result(output)\n if not initialized:\n # We only do this validation on the first iteration\n libreduction.check_result_array(output, group.dtype)\n initialized = True\n result[name] = output\n\n return result\n\n @Substitution(klass=\"Series\")\n @Appender(_transform_template)\n def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):\n return self._transform(\n func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs\n )\n\n def _cython_transform(\n self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs\n ):\n assert axis == 0 # handled by caller\n\n obj = self._selected_obj\n\n try:\n result = self.grouper._cython_operation(\n \"transform\", obj._values, how, axis, **kwargs\n )\n except NotImplementedError as err:\n raise TypeError(f\"{how} is not supported for {obj.dtype} dtype\") from err\n\n return obj._constructor(result, index=self.obj.index, name=obj.name)\n\n def _transform_general(self, func: Callable, *args, **kwargs) -> Series:\n \"\"\"\n Transform with a callable func`.\n \"\"\"\n assert callable(func)\n klass = type(self.obj)\n\n results = []\n for name, group in self:\n # this setattr is needed for test_transform_lambda_with_datetimetz\n object.__setattr__(group, \"name\", name)\n res = func(group, *args, **kwargs)\n\n results.append(klass(res, index=group.index))\n\n # check for empty \"results\" to avoid concat ValueError\n if results:\n from pandas.core.reshape.concat import concat\n\n concatenated = concat(results)\n result = self._set_result_index_ordered(concatenated)\n else:\n result = self.obj._constructor(dtype=np.float64)\n\n result.name = self.obj.name\n return result\n\n def filter(self, func, dropna: bool = True, *args, **kwargs):\n \"\"\"\n Return a copy of a Series excluding elements from groups that\n do not satisfy the boolean criterion specified by func.\n\n Parameters\n ----------\n func : function\n To apply to each group. Should return True or False.\n dropna : Drop groups that do not pass the filter. True by default;\n if False, groups that evaluate False are filled with NaNs.\n\n Notes\n -----\n Functions that mutate the passed object can produce unexpected\n behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\n for more details.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n ... 'foo', 'bar'],\n ... 'B' : [1, 2, 3, 4, 5, 6],\n ... 'C' : [2.0, 5., 8., 1., 2., 9.]})\n >>> grouped = df.groupby('A')\n >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)\n 1 2\n 3 4\n 5 6\n Name: B, dtype: int64\n\n Returns\n -------\n filtered : Series\n \"\"\"\n if isinstance(func, str):\n wrapper = lambda x: getattr(x, func)(*args, **kwargs)\n else:\n wrapper = lambda x: func(x, *args, **kwargs)\n\n # Interpret np.nan as False.\n def true_and_notna(x) -> bool:\n b = wrapper(x)\n return b and notna(b)\n\n try:\n indices = [\n self._get_index(name) for name, group in self if true_and_notna(group)\n ]\n except (ValueError, TypeError) as err:\n raise TypeError(\"the filter must return a boolean result\") from err\n\n filtered = self._apply_filter(indices, dropna)\n return filtered\n\n def nunique(self, dropna: bool = True) -> Series:\n \"\"\"\n Return number of unique elements in the group.\n\n Returns\n -------\n Series\n Number of unique values within each group.\n \"\"\"\n ids, _, _ = self.grouper.group_info\n\n val = self.obj._values\n\n codes, _ = algorithms.factorize(val, sort=False)\n sorter = np.lexsort((codes, ids))\n codes = codes[sorter]\n ids = ids[sorter]\n\n # group boundaries are where group ids change\n # unique observations are where sorted values change\n idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]\n inc = np.r_[1, codes[1:] != codes[:-1]]\n\n # 1st item of each group is a new unique observation\n mask = codes == -1\n if dropna:\n inc[idx] = 1\n inc[mask] = 0\n else:\n inc[mask & np.r_[False, mask[:-1]]] = 0\n inc[idx] = 1\n\n out = np.add.reduceat(inc, idx).astype(\"int64\", copy=False)\n if len(ids):\n # NaN/NaT group exists if the head of ids is -1,\n # so remove it from res and exclude its index from idx\n if ids[0] == -1:\n res = out[1:]\n idx = idx[np.flatnonzero(idx)]\n else:\n res = out\n else:\n res = out[1:]\n ri = self.grouper.result_index\n\n # we might have duplications among the bins\n if len(res) != len(ri):\n res, out = np.zeros(len(ri), dtype=out.dtype), res\n res[ids[idx]] = out\n\n result = self.obj._constructor(res, index=ri, name=self.obj.name)\n return self._reindex_output(result, fill_value=0)\n\n @doc(Series.describe)\n def describe(self, **kwargs):\n return super().describe(**kwargs)\n\n def value_counts(\n self,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n bins=None,\n dropna: bool = True,\n ):\n\n from pandas.core.reshape.merge import get_join_indexers\n from pandas.core.reshape.tile import cut\n\n ids, _, _ = self.grouper.group_info\n val = self.obj._values\n\n names = self.grouper.names + [self.obj.name]\n\n if is_categorical_dtype(val.dtype) or (\n bins is not None and not np.iterable(bins)\n ):\n # scalar bins cannot be done at top level\n # in a backward compatible way\n # GH38672 relates to categorical dtype\n ser = self.apply(\n Series.value_counts,\n normalize=normalize,\n sort=sort,\n ascending=ascending,\n bins=bins,\n )\n ser.index.names = names\n return ser\n\n # groupby removes null keys from groupings\n mask = ids != -1\n ids, val = ids[mask], val[mask]\n\n if bins is None:\n lab, lev = algorithms.factorize(val, sort=True)\n llab = lambda lab, inc: lab[inc]\n else:\n\n # lab is a Categorical with categories an IntervalIndex\n lab = cut(Series(val), bins, include_lowest=True)\n # error: \"ndarray\" has no attribute \"cat\"\n lev = lab.cat.categories # type: ignore[attr-defined]\n # error: No overload variant of \"take\" of \"_ArrayOrScalarCommon\" matches\n # argument types \"Any\", \"bool\", \"Union[Any, float]\"\n lab = lev.take( # type: ignore[call-overload]\n # error: \"ndarray\" has no attribute \"cat\"\n lab.cat.codes, # type: ignore[attr-defined]\n allow_fill=True,\n # error: Item \"ndarray\" of \"Union[ndarray, Index]\" has no attribute\n # \"_na_value\"\n fill_value=lev._na_value, # type: ignore[union-attr]\n )\n llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]\n\n if is_interval_dtype(lab.dtype):\n # TODO: should we do this inside II?\n lab_interval = cast(Interval, lab)\n\n sorter = np.lexsort((lab_interval.left, lab_interval.right, ids))\n else:\n sorter = np.lexsort((lab, ids))\n\n ids, lab = ids[sorter], lab[sorter]\n\n # group boundaries are where group ids change\n idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]\n idx = np.r_[0, idchanges]\n if not len(ids):\n idx = idchanges\n\n # new values are where sorted labels change\n lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))\n inc = np.r_[True, lchanges]\n if not len(val):\n inc = lchanges\n inc[idx] = True # group boundaries are also new values\n out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts\n\n # num. of times each group should be repeated\n rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))\n\n # multi-index components\n codes = self.grouper.reconstructed_codes\n codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]\n # error: List item 0 has incompatible type \"Union[ndarray[Any, Any], Index]\";\n # expected \"Index\"\n levels = [ping.group_index for ping in self.grouper.groupings] + [\n lev # type: ignore[list-item]\n ]\n\n if dropna:\n mask = codes[-1] != -1\n if mask.all():\n dropna = False\n else:\n out, codes = out[mask], [level_codes[mask] for level_codes in codes]\n\n if normalize:\n out = out.astype(\"float\")\n d = np.diff(np.r_[idx, len(ids)])\n if dropna:\n m = ids[lab == -1]\n np.add.at(d, m, -1)\n acc = rep(d)[mask]\n else:\n acc = rep(d)\n out /= acc\n\n if sort and bins is None:\n cat = ids[inc][mask] if dropna else ids[inc]\n sorter = np.lexsort((out if ascending else -out, cat))\n out, codes[-1] = out[sorter], codes[-1][sorter]\n\n if bins is not None:\n # for compat. with libgroupby.value_counts need to ensure every\n # bin is present at every index level, null filled with zeros\n diff = np.zeros(len(out), dtype=\"bool\")\n for level_codes in codes[:-1]:\n diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]\n\n ncat, nbin = diff.sum(), len(levels[-1])\n\n left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]\n\n right = [diff.cumsum() - 1, codes[-1]]\n\n _, idx = get_join_indexers(left, right, sort=False, how=\"left\")\n out = np.where(idx != -1, out[idx], 0)\n\n if sort:\n sorter = np.lexsort((out if ascending else -out, left[0]))\n out, left[-1] = out[sorter], left[-1][sorter]\n\n # build the multi-index w/ full levels\n def build_codes(lev_codes: np.ndarray) -> np.ndarray:\n return np.repeat(lev_codes[diff], nbin)\n\n codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]\n codes.append(left[-1])\n\n mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)\n\n if is_integer_dtype(out.dtype):\n out = ensure_int64(out)\n return self.obj._constructor(out, index=mi, name=self.obj.name)\n\n @doc(Series.nlargest)\n def nlargest(self, n: int = 5, keep: str = \"first\"):\n f = partial(Series.nlargest, n=n, keep=keep)\n data = self._obj_with_exclusions\n # Don't change behavior if result index happens to be the same, i.e.\n # already ordered and n >= all group sizes.\n result = self._python_apply_general(f, data, not_indexed_same=True)\n return result\n\n @doc(Series.nsmallest)\n def nsmallest(self, n: int = 5, keep: str = \"first\"):\n f = partial(Series.nsmallest, n=n, keep=keep)\n data = self._obj_with_exclusions\n # Don't change behavior if result index happens to be the same, i.e.\n # already ordered and n >= all group sizes.\n result = self._python_apply_general(f, data, not_indexed_same=True)\n return result\n\n\n@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)\nclass DataFrameGroupBy(GroupBy[DataFrame]):\n\n _apply_allowlist = base.dataframe_apply_allowlist\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame(\n ... {\n ... \"A\": [1, 1, 2, 2],\n ... \"B\": [1, 2, 3, 4],\n ... \"C\": [0.362838, 0.227877, 1.267767, -0.562860],\n ... }\n ... )\n\n >>> df\n A B C\n 0 1 1 0.362838\n 1 1 2 0.227877\n 2 2 3 1.267767\n 3 2 4 -0.562860\n\n The aggregation is for each column.\n\n >>> df.groupby('A').agg('min')\n B C\n A\n 1 1 0.227877\n 2 3 -0.562860\n\n Multiple aggregations\n\n >>> df.groupby('A').agg(['min', 'max'])\n B C\n min max min max\n A\n 1 1 2 0.227877 0.362838\n 2 3 4 -0.562860 1.267767\n\n Select a column for aggregation\n\n >>> df.groupby('A').B.agg(['min', 'max'])\n min max\n A\n 1 1 2\n 2 3 4\n\n Different aggregations per column\n\n >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})\n B C\n min max sum\n A\n 1 1 2 0.590715\n 2 3 4 0.704907\n\n To control the output names with different aggregations per column,\n pandas supports \"named aggregation\"\n\n >>> df.groupby(\"A\").agg(\n ... b_min=pd.NamedAgg(column=\"B\", aggfunc=\"min\"),\n ... c_sum=pd.NamedAgg(column=\"C\", aggfunc=\"sum\"))\n b_min c_sum\n A\n 1 1 0.590715\n 2 3 0.704907\n\n - The keywords are the *output* column names\n - The values are tuples whose first element is the column to select\n and the second element is the aggregation to apply to that column.\n Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields\n ``['column', 'aggfunc']`` to make it clearer what the arguments are.\n As usual, the aggregation can be a callable or a string alias.\n\n See :ref:`groupby.aggregate.named` for more.\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the aggregating function.\n\n >>> df.groupby(\"A\")[[\"B\"]].agg(lambda x: x.astype(float).min())\n B\n A\n 1 1.0\n 2 3.0\n \"\"\"\n )\n\n @doc(_agg_template, examples=_agg_examples_doc, klass=\"DataFrame\")\n def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):\n\n if maybe_use_numba(engine):\n with self._group_selection_context():\n data = self._selected_obj\n result = self._aggregate_with_numba(\n data, func, *args, engine_kwargs=engine_kwargs, **kwargs\n )\n index = self.grouper.result_index\n return self.obj._constructor(result, index=index, columns=data.columns)\n\n relabeling, func, columns, order = reconstruct_func(func, **kwargs)\n func = maybe_mangle_lambdas(func)\n\n op = GroupByApply(self, func, args, kwargs)\n result = op.agg()\n if not is_dict_like(func) and result is not None:\n return result\n elif relabeling and result is not None:\n # this should be the only (non-raising) case with relabeling\n # used reordered index of columns\n result = result.iloc[:, order]\n result.columns = columns\n\n if result is None:\n\n # grouper specific aggregations\n if self.grouper.nkeys > 1:\n # test_groupby_as_index_series_scalar gets here with 'not self.as_index'\n return self._python_agg_general(func, *args, **kwargs)\n elif args or kwargs:\n # test_pass_args_kwargs gets here (with and without as_index)\n # can't return early\n result = self._aggregate_frame(func, *args, **kwargs)\n\n elif self.axis == 1:\n # _aggregate_multiple_funcs does not allow self.axis == 1\n # Note: axis == 1 precludes 'not self.as_index', see __init__\n result = self._aggregate_frame(func)\n return result\n\n else:\n\n # try to treat as if we are passing a list\n gba = GroupByApply(self, [func], args=(), kwargs={})\n try:\n result = gba.agg()\n\n except ValueError as err:\n if \"no results\" not in str(err):\n # raised directly by _aggregate_multiple_funcs\n raise\n result = self._aggregate_frame(func)\n\n else:\n sobj = self._selected_obj\n\n if isinstance(sobj, Series):\n # GH#35246 test_groupby_as_index_select_column_sum_empty_df\n result.columns = self._obj_with_exclusions.columns.copy()\n else:\n # Retain our column names\n result.columns._set_names(\n sobj.columns.names, level=list(range(sobj.columns.nlevels))\n )\n # select everything except for the last level, which is the one\n # containing the name of the function(s), see GH#32040\n result.columns = result.columns.droplevel(-1)\n\n if not self.as_index:\n self._insert_inaxis_grouper_inplace(result)\n result.index = Index(range(len(result)))\n\n return result\n\n agg = aggregate\n\n def _iterate_slices(self) -> Iterable[Series]:\n obj = self._selected_obj\n if self.axis == 1:\n obj = obj.T\n\n if isinstance(obj, Series) and obj.name not in self.exclusions:\n # Occurs when doing DataFrameGroupBy(...)[\"X\"]\n yield obj\n else:\n for label, values in obj.items():\n if label in self.exclusions:\n continue\n\n yield values\n\n def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:\n if self.grouper.nkeys != 1:\n raise AssertionError(\"Number of keys must be 1\")\n\n obj = self._obj_with_exclusions\n\n result: dict[Hashable, NDFrame | np.ndarray] = {}\n if self.axis == 0:\n # test_pass_args_kwargs_duplicate_columns gets here with non-unique columns\n for name, data in self:\n fres = func(data, *args, **kwargs)\n result[name] = fres\n else:\n # we get here in a number of test_multilevel tests\n for name in self.indices:\n grp_df = self.get_group(name, obj=obj)\n fres = func(grp_df, *args, **kwargs)\n result[name] = fres\n\n result_index = self.grouper.result_index\n other_ax = obj.axes[1 - self.axis]\n out = self.obj._constructor(result, index=other_ax, columns=result_index)\n if self.axis == 0:\n out = out.T\n\n return out\n\n def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:\n # only for axis==0\n # tests that get here with non-unique cols:\n # test_resample_with_timedelta_yields_no_empty_groups,\n # test_resample_apply_product\n\n obj = self._obj_with_exclusions\n result: dict[int, NDFrame] = {}\n\n for i, (item, sgb) in enumerate(self._iterate_column_groupbys(obj)):\n result[i] = sgb.aggregate(func, *args, **kwargs)\n\n res_df = self.obj._constructor(result)\n res_df.columns = obj.columns\n return res_df\n\n def _wrap_applied_output(\n self,\n data: DataFrame,\n values: list,\n not_indexed_same: bool = False,\n override_group_keys: bool = False,\n ):\n\n if len(values) == 0:\n result = self.obj._constructor(\n index=self.grouper.result_index, columns=data.columns\n )\n result = result.astype(data.dtypes, copy=False)\n return result\n\n # GH12824\n first_not_none = next(com.not_none(*values), None)\n\n if first_not_none is None:\n # GH9684 - All values are None, return an empty frame.\n return self.obj._constructor()\n elif isinstance(first_not_none, DataFrame):\n return self._concat_objects(\n values,\n not_indexed_same=not_indexed_same,\n override_group_keys=override_group_keys,\n )\n\n key_index = self.grouper.result_index if self.as_index else None\n\n if isinstance(first_not_none, (np.ndarray, Index)):\n # GH#1738: values is list of arrays of unequal lengths\n # fall through to the outer else clause\n # TODO: sure this is right? we used to do this\n # after raising AttributeError above\n return self.obj._constructor_sliced(\n values, index=key_index, name=self._selection\n )\n elif not isinstance(first_not_none, Series):\n # values are not series or array-like but scalars\n # self._selection not passed through to Series as the\n # result should not take the name of original selection\n # of columns\n if self.as_index:\n return self.obj._constructor_sliced(values, index=key_index)\n else:\n result = self.obj._constructor(values, columns=[self._selection])\n self._insert_inaxis_grouper_inplace(result)\n return result\n else:\n # values are Series\n return self._wrap_applied_output_series(\n values,\n not_indexed_same,\n first_not_none,\n key_index,\n override_group_keys,\n )\n\n def _wrap_applied_output_series(\n self,\n values: list[Series],\n not_indexed_same: bool,\n first_not_none,\n key_index,\n override_group_keys: bool,\n ) -> DataFrame | Series:\n # this is to silence a DeprecationWarning\n # TODO(2.0): Remove when default dtype of empty Series is object\n kwargs = first_not_none._construct_axes_dict()\n backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)\n values = [x if (x is not None) else backup for x in values]\n\n all_indexed_same = all_indexes_same(x.index for x in values)\n\n # GH3596\n # provide a reduction (Frame -> Series) if groups are\n # unique\n if self.squeeze:\n applied_index = self._selected_obj._get_axis(self.axis)\n singular_series = len(values) == 1 and applied_index.nlevels == 1\n\n if singular_series:\n # GH2893\n # we have series in the values array, we want to\n # produce a series:\n # if any of the sub-series are not indexed the same\n # OR we don't have a multi-index and we have only a\n # single values\n return self._concat_objects(\n values,\n not_indexed_same=not_indexed_same,\n override_group_keys=override_group_keys,\n )\n\n # still a series\n # path added as of GH 5545\n elif all_indexed_same:\n from pandas.core.reshape.concat import concat\n\n return concat(values)\n\n if not all_indexed_same:\n # GH 8467\n return self._concat_objects(\n values,\n not_indexed_same=True,\n override_group_keys=override_group_keys,\n )\n\n # Combine values\n # vstack+constructor is faster than concat and handles MI-columns\n stacked_values = np.vstack([np.asarray(v) for v in values])\n\n if self.axis == 0:\n index = key_index\n columns = first_not_none.index.copy()\n if columns.name is None:\n # GH6124 - propagate name of Series when it's consistent\n names = {v.name for v in values}\n if len(names) == 1:\n columns.name = list(names)[0]\n else:\n index = first_not_none.index\n columns = key_index\n stacked_values = stacked_values.T\n\n if stacked_values.dtype == object:\n # We'll have the DataFrame constructor do inference\n stacked_values = stacked_values.tolist()\n result = self.obj._constructor(stacked_values, index=index, columns=columns)\n\n if not self.as_index:\n self._insert_inaxis_grouper_inplace(result)\n\n return self._reindex_output(result)\n\n def _cython_transform(\n self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs\n ) -> DataFrame:\n assert axis == 0 # handled by caller\n # TODO: no tests with self.ndim == 1 for DataFrameGroupBy\n\n # With self.axis == 0, we have multi-block tests\n # e.g. test_rank_min_int, test_cython_transform_frame\n # test_transform_numeric_ret\n # With self.axis == 1, _get_data_to_aggregate does a transpose\n # so we always have a single block.\n mgr: Manager2D = self._get_data_to_aggregate()\n if numeric_only:\n mgr = mgr.get_numeric_data(copy=False)\n\n def arr_func(bvalues: ArrayLike) -> ArrayLike:\n return self.grouper._cython_operation(\n \"transform\", bvalues, how, 1, **kwargs\n )\n\n # We could use `mgr.apply` here and not have to set_axis, but\n # we would have to do shape gymnastics for ArrayManager compat\n res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True)\n res_mgr.set_axis(1, mgr.axes[1])\n\n if len(res_mgr) < len(mgr):\n warn_dropping_nuisance_columns_deprecated(type(self), how)\n\n res_df = self.obj._constructor(res_mgr)\n if self.axis == 1:\n res_df = res_df.T\n return res_df\n\n def _transform_general(self, func, *args, **kwargs):\n from pandas.core.reshape.concat import concat\n\n applied = []\n obj = self._obj_with_exclusions\n gen = self.grouper.get_iterator(obj, axis=self.axis)\n fast_path, slow_path = self._define_paths(func, *args, **kwargs)\n\n # Determine whether to use slow or fast path by evaluating on the first group.\n # Need to handle the case of an empty generator and process the result so that\n # it does not need to be computed again.\n try:\n name, group = next(gen)\n except StopIteration:\n pass\n else:\n object.__setattr__(group, \"name\", name)\n try:\n path, res = self._choose_path(fast_path, slow_path, group)\n except TypeError:\n return self._transform_item_by_item(obj, fast_path)\n except ValueError as err:\n msg = \"transform must return a scalar value for each group\"\n raise ValueError(msg) from err\n if group.size > 0:\n res = _wrap_transform_general_frame(self.obj, group, res)\n applied.append(res)\n\n # Compute and process with the remaining groups\n for name, group in gen:\n if group.size == 0:\n continue\n object.__setattr__(group, \"name\", name)\n res = path(group)\n res = _wrap_transform_general_frame(self.obj, group, res)\n applied.append(res)\n\n concat_index = obj.columns if self.axis == 0 else obj.index\n other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1\n concatenated = concat(applied, axis=self.axis, verify_integrity=False)\n concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)\n return self._set_result_index_ordered(concatenated)\n\n @Substitution(klass=\"DataFrame\")\n @Appender(_transform_template)\n def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):\n return self._transform(\n func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs\n )\n\n def _define_paths(self, func, *args, **kwargs):\n if isinstance(func, str):\n fast_path = lambda group: getattr(group, func)(*args, **kwargs)\n slow_path = lambda group: group.apply(\n lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis\n )\n else:\n fast_path = lambda group: func(group, *args, **kwargs)\n slow_path = lambda group: group.apply(\n lambda x: func(x, *args, **kwargs), axis=self.axis\n )\n return fast_path, slow_path\n\n def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):\n path = slow_path\n res = slow_path(group)\n\n if self.ngroups == 1:\n # no need to evaluate multiple paths when only\n # a single group exists\n return path, res\n\n # if we make it here, test if we can use the fast path\n try:\n res_fast = fast_path(group)\n except AssertionError:\n raise # pragma: no cover\n except Exception:\n # GH#29631 For user-defined function, we can't predict what may be\n # raised; see test_transform.test_transform_fastpath_raises\n return path, res\n\n # verify fast path returns either:\n # a DataFrame with columns equal to group.columns\n # OR a Series with index equal to group.columns\n if isinstance(res_fast, DataFrame):\n if not res_fast.columns.equals(group.columns):\n return path, res\n elif isinstance(res_fast, Series):\n if not res_fast.index.equals(group.columns):\n return path, res\n else:\n return path, res\n\n if res_fast.equals(res):\n path = fast_path\n\n return path, res\n\n def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:\n # iterate through columns, see test_transform_exclude_nuisance\n # gets here with non-unique columns\n output = {}\n inds = []\n for i, (colname, sgb) in enumerate(self._iterate_column_groupbys(obj)):\n try:\n output[i] = sgb.transform(wrapper)\n except TypeError:\n # e.g. trying to call nanmean with string values\n warn_dropping_nuisance_columns_deprecated(type(self), \"transform\")\n else:\n inds.append(i)\n\n if not output:\n raise TypeError(\"Transform function invalid for data types\")\n\n columns = obj.columns.take(inds)\n\n result = self.obj._constructor(output, index=obj.index)\n result.columns = columns\n return result\n\n def filter(self, func, dropna=True, *args, **kwargs):\n \"\"\"\n Return a copy of a DataFrame excluding filtered elements.\n\n Elements from groups are filtered if they do not satisfy the\n boolean criterion specified by func.\n\n Parameters\n ----------\n func : function\n Function to apply to each subframe. Should return True or False.\n dropna : Drop groups that do not pass the filter. True by default;\n If False, groups that evaluate False are filled with NaNs.\n\n Returns\n -------\n filtered : DataFrame\n\n Notes\n -----\n Each subframe is endowed the attribute 'name' in case you need to know\n which group you are working on.\n\n Functions that mutate the passed object can produce unexpected\n behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\n for more details.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n ... 'foo', 'bar'],\n ... 'B' : [1, 2, 3, 4, 5, 6],\n ... 'C' : [2.0, 5., 8., 1., 2., 9.]})\n >>> grouped = df.groupby('A')\n >>> grouped.filter(lambda x: x['B'].mean() > 3.)\n A B C\n 1 bar 2 5.0\n 3 bar 4 1.0\n 5 bar 6 9.0\n \"\"\"\n indices = []\n\n obj = self._selected_obj\n gen = self.grouper.get_iterator(obj, axis=self.axis)\n\n for name, group in gen:\n object.__setattr__(group, \"name\", name)\n\n res = func(group, *args, **kwargs)\n\n try:\n res = res.squeeze()\n except AttributeError: # allow e.g., scalars and frames to pass\n pass\n\n # interpret the result of the filter\n if is_bool(res) or (is_scalar(res) and isna(res)):\n if res and notna(res):\n indices.append(self._get_index(name))\n else:\n # non scalars aren't allowed\n raise TypeError(\n f\"filter function returned a {type(res).__name__}, \"\n \"but expected a scalar bool\"\n )\n\n return self._apply_filter(indices, dropna)\n\n def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:\n if self.axis == 1:\n # GH 37725\n raise ValueError(\"Cannot subset columns when using axis=1\")\n # per GH 23566\n if isinstance(key, tuple) and len(key) > 1:\n # if len == 1, then it becomes a SeriesGroupBy and this is actually\n # valid syntax, so don't raise warning\n warnings.warn(\n \"Indexing with multiple keys (implicitly converted to a tuple \"\n \"of keys) will be deprecated, use a list instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return super().__getitem__(key)\n\n def _gotitem(self, key, ndim: int, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : {1, 2}\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n if ndim == 2:\n if subset is None:\n subset = self.obj\n return DataFrameGroupBy(\n subset,\n self.grouper,\n axis=self.axis,\n level=self.level,\n grouper=self.grouper,\n exclusions=self.exclusions,\n selection=key,\n as_index=self.as_index,\n sort=self.sort,\n group_keys=self.group_keys,\n squeeze=self.squeeze,\n observed=self.observed,\n mutated=self.mutated,\n dropna=self.dropna,\n )\n elif ndim == 1:\n if subset is None:\n subset = self.obj[key]\n return SeriesGroupBy(\n subset,\n level=self.level,\n grouper=self.grouper,\n selection=key,\n sort=self.sort,\n group_keys=self.group_keys,\n squeeze=self.squeeze,\n observed=self.observed,\n dropna=self.dropna,\n )\n\n raise AssertionError(\"invalid ndim for _gotitem\")\n\n def _get_data_to_aggregate(self) -> Manager2D:\n obj = self._obj_with_exclusions\n if self.axis == 1:\n return obj.T._mgr\n else:\n return obj._mgr\n\n def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:\n # zip in reverse so we can always insert at loc 0\n columns = result.columns\n for name, lev, in_axis in zip(\n reversed(self.grouper.names),\n reversed(self.grouper.get_group_levels()),\n reversed([grp.in_axis for grp in self.grouper.groupings]),\n ):\n # GH #28549\n # When using .apply(-), name will be in columns already\n if in_axis and name not in columns:\n result.insert(0, name, lev)\n\n def _indexed_output_to_ndframe(\n self, output: Mapping[base.OutputKey, ArrayLike]\n ) -> DataFrame:\n \"\"\"\n Wrap the dict result of a GroupBy aggregation into a DataFrame.\n \"\"\"\n indexed_output = {key.position: val for key, val in output.items()}\n columns = Index([key.label for key in output])\n columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names)\n\n result = self.obj._constructor(indexed_output)\n result.columns = columns\n return result\n\n def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:\n if not self.as_index:\n # GH 41998 - empty mgr always gets index of length 0\n rows = mgr.shape[1] if mgr.shape[0] > 0 else 0\n index = Index(range(rows))\n mgr.set_axis(1, index)\n result = self.obj._constructor(mgr)\n\n self._insert_inaxis_grouper_inplace(result)\n result = result._consolidate()\n else:\n index = self.grouper.result_index\n mgr.set_axis(1, index)\n result = self.obj._constructor(mgr)\n\n if self.axis == 1:\n result = result.T\n\n # Note: we only need to pass datetime=True in order to get numeric\n # values converted\n return self._reindex_output(result)._convert(datetime=True)\n\n def _iterate_column_groupbys(self, obj: DataFrame | Series):\n for i, colname in enumerate(obj.columns):\n yield colname, SeriesGroupBy(\n obj.iloc[:, i],\n selection=colname,\n grouper=self.grouper,\n exclusions=self.exclusions,\n observed=self.observed,\n )\n\n def _apply_to_column_groupbys(self, func, obj: DataFrame | Series) -> DataFrame:\n from pandas.core.reshape.concat import concat\n\n columns = obj.columns\n results = [\n func(col_groupby) for _, col_groupby in self._iterate_column_groupbys(obj)\n ]\n\n if not len(results):\n # concat would raise\n return DataFrame([], columns=columns, index=self.grouper.result_index)\n else:\n return concat(results, keys=columns, axis=1)\n\n def nunique(self, dropna: bool = True) -> DataFrame:\n \"\"\"\n Return DataFrame with counts of unique elements in each position.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the counts.\n\n Returns\n -------\n nunique: DataFrame\n\n Examples\n --------\n >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',\n ... 'ham', 'ham'],\n ... 'value1': [1, 5, 5, 2, 5, 5],\n ... 'value2': list('abbaxy')})\n >>> df\n id value1 value2\n 0 spam 1 a\n 1 egg 5 b\n 2 egg 5 b\n 3 spam 2 a\n 4 ham 5 x\n 5 ham 5 y\n\n >>> df.groupby('id').nunique()\n value1 value2\n id\n egg 1 1\n ham 1 2\n spam 2 1\n\n Check for rows with the same id but conflicting values:\n\n >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())\n id value1 value2\n 0 spam 1 a\n 3 spam 2 a\n 4 ham 5 x\n 5 ham 5 y\n \"\"\"\n\n if self.axis != 0:\n # see test_groupby_crash_on_nunique\n return self._python_agg_general(lambda sgb: sgb.nunique(dropna))\n\n obj = self._obj_with_exclusions\n results = self._apply_to_column_groupbys(\n lambda sgb: sgb.nunique(dropna), obj=obj\n )\n\n if not self.as_index:\n results.index = Index(range(len(results)))\n self._insert_inaxis_grouper_inplace(results)\n\n return results\n\n @doc(\n _shared_docs[\"idxmax\"],\n numeric_only_default=\"True for axis=0, False for axis=1\",\n )\n def idxmax(self, axis=0, skipna: bool = True, numeric_only: bool | None = None):\n axis = DataFrame._get_axis_number(axis)\n if numeric_only is None:\n numeric_only = None if axis == 0 else False\n\n def func(df):\n # NB: here we use numeric_only=None, in DataFrame it is False GH#38217\n res = df._reduce(\n nanops.nanargmax,\n \"argmax\",\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n )\n indices = res._values\n index = df._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return df._constructor_sliced(result, index=res.index)\n\n func.__name__ = \"idxmax\"\n return self._python_apply_general(func, self._obj_with_exclusions)\n\n @doc(\n _shared_docs[\"idxmin\"],\n numeric_only_default=\"True for axis=0, False for axis=1\",\n )\n def idxmin(self, axis=0, skipna: bool = True, numeric_only: bool | None = None):\n axis = DataFrame._get_axis_number(axis)\n if numeric_only is None:\n numeric_only = None if axis == 0 else False\n\n def func(df):\n # NB: here we use numeric_only=None, in DataFrame it is False GH#46560\n res = df._reduce(\n nanops.nanargmin,\n \"argmin\",\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n )\n indices = res._values\n index = df._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return df._constructor_sliced(result, index=res.index)\n\n func.__name__ = \"idxmin\"\n return self._python_apply_general(func, self._obj_with_exclusions)\n\n boxplot = boxplot_frame_groupby\n\n def value_counts(\n self,\n subset: Sequence[Hashable] | None = None,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n dropna: bool = True,\n ) -> DataFrame | Series:\n \"\"\"\n Return a Series or DataFrame containing counts of unique rows.\n\n .. versionadded:: 1.4.0\n\n Parameters\n ----------\n subset : list-like, optional\n Columns to use when counting unique combinations.\n normalize : bool, default False\n Return proportions rather than frequencies.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n dropna : bool, default True\n Don’t include counts of rows that contain NA values.\n\n Returns\n -------\n Series or DataFrame\n Series if the groupby as_index is True, otherwise DataFrame.\n\n See Also\n --------\n Series.value_counts: Equivalent method on Series.\n DataFrame.value_counts: Equivalent method on DataFrame.\n SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.\n\n Notes\n -----\n - If the groupby as_index is True then the returned Series will have a\n MultiIndex with one level per input column.\n - If the groupby as_index is False then the returned DataFrame will have an\n additional column with the value_counts. The column is labelled 'count' or\n 'proportion', depending on the ``normalize`` parameter.\n\n By default, rows that contain any NA values are omitted from\n the result.\n\n By default, the result will be in descending order so that the\n first element of each group is the most frequently-occurring row.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],\n ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],\n ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']\n ... })\n\n >>> df\n gender education country\n 0 male low US\n 1 male medium FR\n 2 female high US\n 3 male low FR\n 4 female high FR\n 5 male low FR\n\n >>> df.groupby('gender').value_counts()\n gender education country\n female high FR 1\n US 1\n male low FR 2\n US 1\n medium FR 1\n dtype: int64\n\n >>> df.groupby('gender').value_counts(ascending=True)\n gender education country\n female high FR 1\n US 1\n male low US 1\n medium FR 1\n low FR 2\n dtype: int64\n\n >>> df.groupby('gender').value_counts(normalize=True)\n gender education country\n female high FR 0.50\n US 0.50\n male low FR 0.50\n US 0.25\n medium FR 0.25\n dtype: float64\n\n >>> df.groupby('gender', as_index=False).value_counts()\n gender education country count\n 0 female high FR 1\n 1 female high US 1\n 2 male low FR 2\n 3 male low US 1\n 4 male medium FR 1\n\n >>> df.groupby('gender', as_index=False).value_counts(normalize=True)\n gender education country proportion\n 0 female high FR 0.50\n 1 female high US 0.50\n 2 male low FR 0.50\n 3 male low US 0.25\n 4 male medium FR 0.25\n \"\"\"\n if self.axis == 1:\n raise NotImplementedError(\n \"DataFrameGroupBy.value_counts only handles axis=0\"\n )\n\n with self._group_selection_context():\n df = self.obj\n\n in_axis_names = {\n grouping.name for grouping in self.grouper.groupings if grouping.in_axis\n }\n if isinstance(self._selected_obj, Series):\n name = self._selected_obj.name\n keys = [] if name in in_axis_names else [self._selected_obj]\n else:\n keys = [\n # Can't use .values because the column label needs to be preserved\n self._selected_obj.iloc[:, idx]\n for idx, name in enumerate(self._selected_obj.columns)\n if name not in in_axis_names\n ]\n\n if subset is not None:\n clashing = set(subset) & set(in_axis_names)\n if clashing:\n raise ValueError(\n f\"Keys {clashing} in subset cannot be in \"\n \"the groupby column keys\"\n )\n\n groupings = list(self.grouper.groupings)\n for key in keys:\n grouper, _, _ = get_grouper(\n df,\n key=key,\n axis=self.axis,\n sort=self.sort,\n dropna=dropna,\n )\n groupings += list(grouper.groupings)\n\n # Take the size of the overall columns\n gb = df.groupby(\n groupings,\n sort=self.sort,\n observed=self.observed,\n dropna=self.dropna,\n )\n result_series = cast(Series, gb.size())\n\n if normalize:\n # Normalize the results by dividing by the original group sizes.\n # We are guaranteed to have the first N levels be the\n # user-requested grouping.\n levels = list(\n range(len(self.grouper.groupings), result_series.index.nlevels)\n )\n indexed_group_size = result_series.groupby(\n result_series.index.droplevel(levels),\n sort=self.sort,\n observed=self.observed,\n dropna=self.dropna,\n ).transform(\"sum\")\n\n result_series /= indexed_group_size\n\n if sort:\n # Sort the values and then resort by the main grouping\n index_level = range(len(self.grouper.groupings))\n result_series = result_series.sort_values(\n ascending=ascending\n ).sort_index(level=index_level, sort_remaining=False)\n\n result: Series | DataFrame\n if self.as_index:\n result = result_series\n else:\n # Convert to frame\n name = \"proportion\" if normalize else \"count\"\n index = result_series.index\n columns = com.fill_missing_names(index.names)\n if name in columns:\n raise ValueError(\n f\"Column label '{name}' is duplicate of result column\"\n )\n result_series.name = name\n result_series.index = index.set_names(range(len(columns)))\n result_frame = result_series.reset_index()\n result_frame.columns = columns + [name]\n result = result_frame\n return result.__finalize__(self.obj, method=\"value_counts\")\n\n\ndef _wrap_transform_general_frame(\n obj: DataFrame, group: DataFrame, res: DataFrame | Series\n) -> DataFrame:\n from pandas import concat\n\n if isinstance(res, Series):\n # we need to broadcast across the\n # other dimension; this will preserve dtypes\n # GH14457\n if res.index.is_(obj.index):\n res_frame = concat([res] * len(group.columns), axis=1)\n res_frame.columns = group.columns\n res_frame.index = group.index\n else:\n res_frame = obj._constructor(\n np.tile(res.values, (len(group.index), 1)),\n columns=group.columns,\n index=group.index,\n )\n assert isinstance(res_frame, DataFrame)\n return res_frame\n else:\n return res\n"
] | [
[
"pandas.core.frame.DataFrame",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.apply.validate_func_kwargs",
"pandas.core.reshape.concat.concat",
"numpy.asarray",
"pandas.core.reshape.merge.get_join_indexers",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.common.fill_missing_names",
"pandas.util._decorators.doc",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.groupby.grouper.get_grouper",
"numpy.iterable",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas.core.indexes.api.Index",
"pandas.core.dtypes.missing.notna",
"pandas.core.apply.GroupByApply",
"numpy.where",
"numpy.nonzero",
"numpy.flatnonzero",
"pandas.util._exceptions.find_stack_level",
"pandas.core.apply.maybe_mangle_lambdas",
"pandas.core.dtypes.common.is_interval_dtype",
"numpy.add.at",
"pandas.core.indexes.api.MultiIndex",
"numpy.repeat",
"numpy.lexsort",
"pandas.core.common.get_cython_func",
"pandas.core.groupby.base.OutputKey",
"numpy.arange",
"pandas.core.apply.reconstruct_func",
"pandas.core.algorithms.factorize",
"numpy.add.reduceat",
"pandas.core.common.get_callable_name",
"pandas.core.base.SpecificationError",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_bool",
"pandas.core.dtypes.missing.isna",
"pandas.util._decorators.Substitution",
"pandas.util._decorators.Appender",
"pandas._libs.reduction.check_result_array",
"pandas.core.common.not_none",
"pandas.core.series.Series",
"pandas.core.indexes.api.all_indexes_same",
"pandas.core.util.numba_.maybe_use_numba",
"pandas.core.frame.DataFrame._get_axis_number",
"pandas._libs.reduction.extract_result",
"pandas.core.dtypes.common.is_integer_dtype"
]
] |
sandeepsinghsengar/MPUNet2Plus | [
"fd97800cd349ee47d2c9cce1851a332dcbcb047c"
] | [
"mpunet/callbacks/callbacks.py"
] | [
"import matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport psutil\nimport numpy as np\nimport os\nfrom tensorflow.keras.callbacks import Callback\nfrom datetime import datetime\nfrom mpunet.logging import ScreenLogger\nfrom mpunet.utils.plotting import (imshow_with_label_overlay, imshow,\n plot_all_training_curves)\n\n\nclass DividerLine(Callback):\n \"\"\"\n Simply prints a line to screen after each epoch\n \"\"\"\n def __init__(self, logger=None):\n \"\"\"\n Args:\n logger: An instance of a MultiPlanar Logger that prints to screen\n and/or file\n \"\"\"\n super().__init__()\n self.logger = logger or ScreenLogger()\n\n def on_epoch_end(self, epoch, logs=None):\n self.logger(\"-\"*45 + \"\\n\")\n\n\nclass LearningCurve(Callback):\n \"\"\"\n On epoch end this callback looks for all csv files matching the 'csv_regex'\n regex within the dir 'out_dir' and attempts to create a learning curve for\n each file that will be saved to 'out_dir'.\n\n Note: Failure to plot a learning curve based on a given csv file will\n is handled in the plot_all_training_curves function and will not\n cause the LearningCurve callback to raise an exception.\n \"\"\"\n def __init__(self, log_dir=\"logs\", out_dir=\"logs\", fname=\"curve.png\",\n csv_regex=\"*training.csv\", logger=None, **plot_kwargs):\n \"\"\"\n Args:\n log_dir: Relative path from the\n out_dir:\n fname:\n csv_regex:\n logger:\n \"\"\"\n super().__init__()\n out_dir = os.path.abspath(out_dir)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n self.csv_regex = os.path.join(os.path.abspath(log_dir), csv_regex)\n self.save_path = os.path.join(out_dir, fname)\n self.logger = logger or ScreenLogger()\n self.plot_kwargs = plot_kwargs\n\n def on_epoch_end(self, epoch, logs={}):\n plot_all_training_curves(self.csv_regex,\n self.save_path,\n logy=True,\n raise_error=False,\n logger=self.logger,\n **self.plot_kwargs)\n\n\nclass MemoryConsumption(Callback):\n def __init__(self, max_gib=None, round_=2, logger=None):\n self.max_gib = max_gib\n self.logger = logger\n self.round_ = round_\n\n def on_epoch_end(self, epoch, logs={}):\n process = psutil.Process(os.getpid())\n mem_bytes = process.memory_info().rss\n mem_gib = round(mem_bytes / (1024**3), self.round_)\n logs['memory_usage_gib'] = mem_gib\n if self.max_gib and mem_gib >= self.max_gib:\n self.warn(\"Stopping training from callback 'MemoryConsumption'! \"\n \"Total memory consumption of {} GiB exceeds limitation\"\n \" (self.max_gib = {}) \".format(mem_gib, self.max_gib))\n self.model.stop_training = True\n\n\nclass DelayedCallback(object):\n \"\"\"\n Callback wrapper that delays the functionality of another callback by N\n number of epochs.\n \"\"\"\n def __init__(self, callback, start_from=0, logger=None):\n \"\"\"\n Args:\n callback: A tf.keras callback\n start_from: Delay the activity of 'callback' until this epoch\n 'start_from'\n logger: An instance of a MultiPlanar Logger that prints to screen\n and/or file\n \"\"\"\n self.logger = logger or ScreenLogger()\n self.callback = callback\n self.start_from = start_from\n\n def __getattr__(self, item):\n return getattr(self.callback, item)\n\n def on_epoch_end(self, epoch, logs=None):\n if epoch >= self.start_from-1:\n self.callback.on_epoch_end(epoch, logs=logs)\n else:\n self.logger(\"[%s] Not active at epoch %i - will be at %i\" %\n (self.callback.__class__.__name__,\n epoch+1, self.start_from))\n\n\nclass TrainTimer(Callback):\n \"\"\"\n Appends train timing information to the log.\n If called prior to tf.keras.callbacks.CSVLogger this information will\n be written to disk.\n \"\"\"\n def __init__(self, logger=None, max_minutes=None, verbose=1):\n super().__init__()\n self.logger = logger or ScreenLogger()\n self.max_minutes = int(max_minutes) if max_minutes else None\n self.verbose = bool(verbose)\n\n # Timing attributes\n self.train_begin_time = None\n self.prev_epoch_time = None\n\n def on_train_begin(self, logs=None):\n self.train_begin_time = datetime.now()\n\n def on_epoch_begin(self, epoch, logs=None):\n self.prev_epoch_time = datetime.now()\n\n def on_epoch_end(self, epoch, logs=None):\n # Compute epoch execution time\n end_time = datetime.now()\n epoch_time = end_time - self.prev_epoch_time\n train_time = end_time - self.train_begin_time\n\n # Update attributes\n self.prev_epoch_time = end_time\n\n # Add to logs\n train_hours = round(train_time.total_seconds() / 3600, 4)\n epoch_minutes = round(epoch_time.total_seconds() / 60, 4)\n logs[\"epoch_minutes\"] = epoch_minutes\n logs[\"train_hours\"] = train_hours\n\n if self.verbose:\n self.logger(\"[TrainTimer] Epoch time: %.2f minutes \"\n \"- Total train time: %.2f hours\"\n % (epoch_minutes, train_hours))\n if self.max_minutes and train_hours*60 > self.max_minutes:\n self.logger(\"Stopping training. Training ran for {} minutes, \"\n \"max_minutes of {} was specified on the TrainTimer \"\n \"callback.\".format(train_hours*60, self.max_minutes))\n self.model.stop_training = True\n\n\nclass FGBatchBalancer(Callback):\n \"\"\"\n mpunet callback.\n\n Sets the forced FG fraction in a batch at each epoch to 1-recall over the\n validation data at the previous epoch\n \"\"\"\n def __init__(self, train_data, val_data=None, logger=None):\n \"\"\"\n Args:\n train_data: A mpunet.sequence object representing the\n training data\n val_data: A mpunet.sequence object representing the\n validation data\n logger: An instance of a MultiPlanar Logger that prints to screen\n and/or file\n \"\"\"\n super().__init__()\n self.data = ((\"train\", train_data), (\"val\", val_data))\n self.logger = logger or ScreenLogger()\n self.active = True\n\n def on_epoch_end(self, epoch, logs=None):\n if not self.active:\n return None\n\n recall = logs.get(\"val_recall\")\n if recall is None:\n self.logger(\"[FGBatchBalancer] No val_recall in logs. \"\n \"Disabling callback. \"\n \"Did you put this callback before the validation \"\n \"callback?\")\n self.active = False\n else:\n # Always at least 1 image slice\n fraction = max(0.01, 1 - recall)\n for name, data in self.data:\n if data is not None:\n data.fg_batch_fraction = fraction\n self.logger(\"[FGBatchBalancer] Setting FG fraction for %s \"\n \"to: %.4f - Now %s/%s\" % (name,\n fraction,\n data.n_fg_slices,\n data.batch_size))\n\n\nclass MeanReduceLogArrays(Callback):\n \"\"\"\n On epoch end, goes through the log and replaces any array entries with\n their mean value.\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def on_epoch_end(self, epoch, logs={}):\n for key, value in logs.items():\n if isinstance(value, (np.ndarray, list)):\n logs[key] = np.mean(value)\n\n\nclass PrintLayerWeights(Callback):\n \"\"\"\n Print the weights of a specified layer every some epoch or batch.\n \"\"\"\n def __init__(self, layer, every=10, first=10, per_epoch=False, logger=None):\n \"\"\"\n Args:\n layer: A tf.keras layer\n every: Print the weights every 'every' batch or epoch if\n per_epoch=True\n first: Print the first 'first' elements of each weight matrix\n per_epoch: Print after 'every' epoch instead of batch\n logger: An instance of a MultiPlanar Logger that prints to screen\n and/or file\n \"\"\"\n super().__init__()\n if isinstance(layer, int):\n self.layer = self.model.layers[layer]\n else:\n self.layer = layer\n self.first = first\n self.every = every\n self.logger = logger or ScreenLogger()\n\n self.per_epoch = per_epoch\n if per_epoch:\n # Apply on every epoch instead of per batches\n self.on_epoch_begin = self.on_batch_begin\n self.on_batch_begin = lambda *args, **kwargs: None\n self.log()\n\n def log(self):\n self.logger(\"PrintLayerWeights Callback\")\n self.logger(\"Layer: \", self.layer)\n self.logger(\"Every: \", self.every)\n self.logger(\"First: \", self.first)\n self.logger(\"Per epoch: \", self.per_epoch)\n\n def on_batch_begin(self, batch, logs=None):\n if batch % self.every:\n return\n weights = self.layer.get_weights()\n self.logger(\"Weights for layer '%s'\" % self.layer)\n self.logger(\"Weights:\\n%s\" % weights[0].ravel()[:self.first])\n try:\n self.logger(\"Baises:\\n%s\" % weights[1].ravel()[:self.first])\n except IndexError:\n pass\n\n\nclass SaveOutputAs2DImage(Callback):\n \"\"\"\n Save random 2D slices from the output of a given layer during training.\n \"\"\"\n def __init__(self, layer, sequence, model, out_dir, every=10, logger=None):\n \"\"\"\n Args:\n layer: A tf.keras layer\n sequence: A MultiPlanar.sequence object from which batches are\n sampled and pushed through the graph to output of layer\n model: A tf.keras model object\n out_dir: Path to directory (existing or non-existing) in which\n images will be stored\n every: Perform this operation every 'every' batches\n \"\"\"\n super().__init__()\n self.every = every\n self.seq = sequence\n self.layer = layer\n self.epoch = None\n self.model = model\n self.logger = logger or ScreenLogger()\n\n self.out_dir = out_dir\n if not os.path.exists(out_dir):\n os.makedirs(self.out_dir)\n self.log()\n\n def log(self):\n self.logger(\"Save Output as 2D Image Callback\")\n self.logger(\"Layer: \", self.layer)\n self.logger(\"Every: \", self.every)\n\n def on_epoch_begin(self, epoch, logs=None):\n self.epoch = epoch\n\n def on_batch_end(self, batch, logs=None):\n if batch % self.every:\n return\n\n # Get output of layer\n self.model.predict_on_batch()\n sess = tf.keras.backend.get_session()\n X, _, _ = self.seq[0]\n outs = sess.run([self.layer.output], feed_dict={self.model.input: X})[0]\n if isinstance(outs, list):\n outs = outs[0]\n\n for i, (model_in, layer_out) in enumerate(zip(X, outs)):\n fig = plt.figure(figsize=(12, 6))\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n\n # Plot model input and layer outputs on each ax\n chl1, axis, slice = imshow(ax1, model_in)\n chl2, _, _ = imshow(ax2, layer_out, axis=axis, slice=slice)\n\n # Set labels and save figure\n ax1.set_title(\"Model input - Channel %i - Axis %i - Slice %i\"\n % (chl1, axis,slice), size=22)\n ax2.set_title(\"Layer output - Channel %i - Axis %i - Slice %i\"\n % (chl2, axis, slice), size=22)\n\n fig.tight_layout()\n fig.savefig(os.path.join(self.out_dir, \"epoch_%i_batch_%i_im_%i\" %\n (self.epoch, batch, i)))\n plt.close(fig)\n\n\nclass SavePredictionImages(Callback):\n \"\"\"\n Save images after each epoch of training of the model on a batch of\n training and a batch of validation data sampled from sequence objects.\n\n Saves the input image with ground truth overlay as well as the predicted\n label masks.\n \"\"\"\n def __init__(self, train_data, val_data, outdir='images'):\n \"\"\"\n Args:\n train_data: A mpunet.sequence object from which training\n data can be sampled via the __getitem__ method.\n val_data: A mpunet.sequence object from which validation\n data can be sampled via the __getitem__ method.\n outdir: Path to directory (existing or non-existing) in which\n images will be stored.\n \"\"\"\n super().__init__()\n\n self.train_data = train_data\n self.val_data = val_data\n self.save_path = os.path.abspath(os.path.join(outdir, \"pred_images_at_epoch\"))\n\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path)\n\n def pred_and_save(self, data, subdir):\n # Get a random batch\n X, y, _ = data[np.random.randint(len(data), dtype=np.int64)]\n\n # Predict on the batch\n pred = self.model.predict(X)\n\n subdir = os.path.join(self.save_path, subdir)\n if not os.path.exists(subdir):\n os.mkdir(subdir)\n\n # Plot each sample in the batch\n for i, (im, lab, p) in enumerate(zip(X, y, pred)):\n fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(12, 6))\n lab = lab.reshape(im.shape[:-1] + (lab.shape[-1],))\n p = p.reshape(im.shape[:-1] + (p.shape[-1],))\n # Imshow ground truth on ax2\n # This function will determine which channel, axis and slice to\n # show and return so that we can use them for the other 2 axes\n chnl, axis, slice = imshow_with_label_overlay(ax2, im, lab, lab_alpha=1.0)\n\n # Imshow pred on ax3\n imshow_with_label_overlay(ax3, im, p, lab_alpha=1.0,\n channel=chnl, axis=axis, slice=slice)\n\n # Imshow raw image on ax1\n # Chose the same slice, channel and axis as above\n im = im[..., chnl]\n im = np.moveaxis(im, axis, 0)\n if slice is not None:\n # Only for 3D imges\n im = im[slice]\n ax1.imshow(im, cmap=\"gray\")\n\n # Set labels\n ax1.set_title(\"Image\", size=18)\n ax2.set_title(\"True labels\", size=18)\n ax3.set_title(\"Prediction\", size=18)\n\n fig.tight_layout()\n with np.testing.suppress_warnings() as sup:\n sup.filter(UserWarning)\n fig.savefig(os.path.join(subdir, str(i) + \".png\"))\n plt.close(fig.number)\n\n def on_epoch_end(self, epoch, logs={}):\n self.pred_and_save(self.train_data, \"train_%s\" % epoch)\n if self.val_data is not None:\n self.pred_and_save(self.val_data, \"val_%s\" % epoch)\n"
] | [
[
"matplotlib.pyplot.figure",
"numpy.moveaxis",
"matplotlib.pyplot.subplots",
"tensorflow.keras.backend.get_session",
"matplotlib.pyplot.close",
"matplotlib.use",
"numpy.testing.suppress_warnings",
"numpy.mean"
]
] |
yux1991/PyRHEED | [
"b39ad03651c92e3649069919ae48b1e5158cd3dd"
] | [
"source/graph_3D_surface.py"
] | [
"from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nfrom PyQt5 import QtCore, QtGui, QtWidgets, QtDataVisualization\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\n\nclass Graph(QtWidgets.QWidget):\n\n SHOW_2D_CONTOUR_SIGNAL = QtCore.pyqtSignal(str,bool,float,float,float,float,int,str)\n\n def __init__(self):\n super(Graph,self).__init__()\n\n def run_3D_graph(self,path):\n self.graphPath = path\n self.graph = SurfaceGraph()\n self.container = QtWidgets.QWidget.createWindowContainer(self.graph)\n self.screenSize = self.graph.screen().size()\n self.container.setMinimumSize(self.screenSize.width()/2, self.screenSize.height()/2)\n self.container.setMaximumSize(self.screenSize)\n self.container.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)\n self.container.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.mainVLayout = QtWidgets.QVBoxLayout(self)\n self.hLayout = QtWidgets.QHBoxLayout()\n self.vLayout = QtWidgets.QVBoxLayout()\n self.hLayout.addWidget(self.container,1)\n self.vLayout.setAlignment(QtCore.Qt.AlignTop)\n self.hLayout.addLayout(self.vLayout)\n self.mainVLayout.addLayout(self.hLayout)\n self.setWindowTitle(\"3D Surface\")\n self.setWindowModality(QtCore.Qt.WindowModal)\n\n self.chooseGraph = QtWidgets.QGroupBox(\"Choose Graph\")\n self.chooseGraph.setStyleSheet('QGroupBox::title {color:blue;}')\n self.chooseGraphGrid = QtWidgets.QGridLayout(self.chooseGraph)\n self.chooseSourceLabel = QtWidgets.QLabel(\"The path of the graph is:\\n\"+self.graphPath)\n self.chooseSourceLabel.setAlignment(QtCore.Qt.AlignTop)\n self.chooseSourceLabel.setFixedWidth(250)\n self.chooseSourceLabel.setFixedHeight(75)\n self.chooseSourceLabel.setWordWrap(True)\n self.chooseSourceButton = QtWidgets.QPushButton(\"Browse\")\n self.chooseSourceButton.clicked.connect(self.choose_graph)\n self.chooseGraphGrid.addWidget(self.chooseSourceLabel,0,0)\n self.chooseGraphGrid.addWidget(self.chooseSourceButton,1,0)\n\n self.plotOptions = QtWidgets.QGroupBox(\"Contour Plot Options\")\n self.plotOptions.setStyleSheet('QGroupBox::title {color:blue;}')\n self.plotOptionsVBox = QtWidgets.QVBoxLayout(self.plotOptions)\n self.plotOptionsGrid = QtWidgets.QGridLayout()\n self.colormapLabel = QtWidgets.QLabel(\"Colormap\")\n self.colormap = QtWidgets.QComboBox()\n self.colormap.addItem(\"jet\",\"jet\")\n self.colormap.addItem(\"hsv\",\"hsv\")\n self.colormap.addItem(\"rainbow\",\"rainbow\")\n self.colormap.addItem(\"nipy_spectral\",\"nipy_spectral\")\n self.levelMinLabel = QtWidgets.QLabel(\"Level Min ({})\".format(0.0))\n self.levelMinSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.levelMinSlider.setMinimum(0)\n self.levelMinSlider.setMaximum(100)\n self.levelMinSlider.setValue(0)\n self.levelMinSlider.valueChanged.connect(self.refresh_level_min)\n self.levelMaxLabel = QtWidgets.QLabel(\"Level Max ({})\".format(1.0))\n self.levelMaxSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.levelMaxSlider.setMinimum(0)\n self.levelMaxSlider.setMaximum(100)\n self.levelMaxSlider.setValue(100)\n self.levelMaxSlider.valueChanged.connect(self.refresh_level_max)\n self.radiusMinLabel = QtWidgets.QLabel(\"Radius Min ({})\".format(0.0))\n self.radiusMinSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.radiusMinSlider.setMinimum(0)\n self.radiusMinSlider.setMaximum(1000)\n self.radiusMinSlider.setValue(0)\n self.radiusMinSlider.valueChanged.connect(self.refresh_radius_min)\n self.radiusMaxLabel = QtWidgets.QLabel(\"Radius Max ({})\".format(10.0))\n self.radiusMaxSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.radiusMaxSlider.setMinimum(0)\n self.radiusMaxSlider.setMaximum(1000)\n self.radiusMaxSlider.setValue(1000)\n self.radiusMaxSlider.valueChanged.connect(self.refresh_radius_max)\n self.numberOfContourLevelsLabel = QtWidgets.QLabel(\"Number of Contour Levels ({})\".format(50))\n self.numberOfContourLevelsLabel.setFixedWidth(160)\n self.numberOfContourLevelsSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.numberOfContourLevelsSlider.setMinimum(5)\n self.numberOfContourLevelsSlider.setMaximum(100)\n self.numberOfContourLevelsSlider.setValue(50)\n self.numberOfContourLevelsSlider.valueChanged.connect(self.refresh_number_of_contour_levels)\n self.show2DContourButton = QtWidgets.QPushButton(\"Show 2D Contour\")\n self.show2DContourButton.clicked.connect(self.show_2D_contour_button_pressed)\n self.show2DContourButton.setEnabled(False)\n self.plotOptionsGrid.addWidget(self.colormapLabel,0,0)\n self.plotOptionsGrid.addWidget(self.colormap,0,1)\n self.plotOptionsGrid.addWidget(self.levelMinLabel,1,0)\n self.plotOptionsGrid.addWidget(self.levelMinSlider,1,1)\n self.plotOptionsGrid.addWidget(self.levelMaxLabel,2,0)\n self.plotOptionsGrid.addWidget(self.levelMaxSlider,2,1)\n self.plotOptionsGrid.addWidget(self.radiusMinLabel,3,0)\n self.plotOptionsGrid.addWidget(self.radiusMinSlider,3,1)\n self.plotOptionsGrid.addWidget(self.radiusMaxLabel,4,0)\n self.plotOptionsGrid.addWidget(self.radiusMaxSlider,4,1)\n self.plotOptionsGrid.addWidget(self.numberOfContourLevelsLabel,5,0)\n self.plotOptionsGrid.addWidget(self.numberOfContourLevelsSlider,5,1)\n self.plotOptionsVBox.addLayout(self.plotOptionsGrid)\n self.plotOptionsVBox.addWidget(self.show2DContourButton)\n\n self.themeList = QtWidgets.QComboBox(self)\n self.themeList.addItem(\"Qt\")\n self.themeList.addItem(\"Primary Colors\")\n self.themeList.addItem(\"Digia\")\n self.themeList.addItem(\"Stone Moss\")\n self.themeList.addItem(\"Army Blue\")\n self.themeList.addItem(\"Retro\")\n self.themeList.addItem(\"Ebony\")\n self.themeList.addItem(\"Isabelle\")\n\n self.colorGroupBox = QtWidgets.QGroupBox(\"3D Surface Colormap\")\n self.colorGroupBox.setStyleSheet('QGroupBox::title {color:blue;}')\n\n self.grBtoY = QtGui.QLinearGradient(0,0,1,100)\n self.grBtoY.setColorAt(1.0,QtCore.Qt.black)\n self.grBtoY.setColorAt(0.67,QtCore.Qt.blue)\n self.grBtoY.setColorAt(0.33,QtCore.Qt.red)\n self.grBtoY.setColorAt(0.0,QtCore.Qt.yellow)\n self.pm = QtGui.QPixmap(50,100)\n self.pmp = QtGui.QPainter(self.pm)\n self.pmp.setBrush(QtGui.QBrush(self.grBtoY))\n self.pmp.setPen(QtCore.Qt.NoPen)\n self.pmp.drawRect(0,0,50,100)\n self.pmp.end()\n self.gradientBtoYPB = QtWidgets.QPushButton(self)\n self.gradientBtoYPB.setIcon(QtGui.QIcon(self.pm))\n self.gradientBtoYPB.setIconSize(QtCore.QSize(50,100))\n self.gradientBtoYPB.setEnabled(False)\n\n self.grGtoR = QtGui.QLinearGradient(0,0,1,100)\n self.grGtoR.setColorAt(1.0,QtCore.Qt.darkGreen)\n self.grGtoR.setColorAt(0.5,QtCore.Qt.yellow)\n self.grGtoR.setColorAt(0.2,QtCore.Qt.red)\n self.grGtoR.setColorAt(0.0,QtCore.Qt.darkRed)\n self.pm2 = QtGui.QPixmap(50,100)\n self.pmp2 = QtGui.QPainter(self.pm2)\n self.pmp2.setBrush(QtGui.QBrush(self.grGtoR))\n self.pmp2.drawRect(0,0,50,100)\n self.pmp2.end()\n self.gradientGtoRPB = QtWidgets.QPushButton(self)\n self.gradientGtoRPB.setIcon(QtGui.QIcon(self.pm2))\n self.gradientGtoRPB.setIconSize(QtCore.QSize(50,100))\n self.gradientGtoRPB.setEnabled(False)\n\n self.grBtoR = QtGui.QLinearGradient(0,0,1,100)\n self.grBtoR.setColorAt(1.0, QtCore.Qt.darkBlue)\n self.grBtoR.setColorAt(0.95, QtCore.Qt.blue)\n self.grBtoR.setColorAt(0.9, QtCore.Qt.darkCyan)\n self.grBtoR.setColorAt(0.8, QtCore.Qt.cyan)\n self.grBtoR.setColorAt(0.6, QtCore.Qt.green)\n self.grBtoR.setColorAt(0.2, QtCore.Qt.yellow)\n self.grBtoR.setColorAt(0.0, QtCore.Qt.red)\n self.pm3 = QtGui.QPixmap(50,100)\n self.pmp3 = QtGui.QPainter(self.pm3)\n self.pmp3.setBrush(QtGui.QBrush(self.grBtoR))\n self.pmp3.drawRect(0,0,50,100)\n self.pmp3.end()\n self.gradientBtoRPB = QtWidgets.QPushButton(self)\n self.gradientBtoRPB.setIcon(QtGui.QIcon(self.pm3))\n self.gradientBtoRPB.setIconSize(QtCore.QSize(50,100))\n self.gradientBtoRPB.setEnabled(False)\n\n self.colorHBox = QtWidgets.QHBoxLayout()\n self.colorHBox.addWidget(self.gradientBtoYPB)\n self.colorHBox.addWidget(self.gradientGtoRPB)\n self.colorHBox.addWidget(self.gradientBtoRPB)\n self.colorGroupBox.setLayout(self.colorHBox)\n\n self.statusBar = QtWidgets.QGroupBox(\"Log\")\n self.statusBar.setStyleSheet('QGroupBox::title {color:blue;}')\n self.statusGrid = QtWidgets.QGridLayout(self.statusBar)\n self.statusBar.setFixedHeight(150)\n self.statusBar.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Fixed)\n self.logBox = QtWidgets.QTextEdit(QtCore.QTime.currentTime().toString(\"hh:mm:ss\")+ \\\n \"\\u00A0\\u00A0\\u00A0\\u00A0Initialized!\")\n self.logCursor = QtGui.QTextCursor(self.logBox.document())\n self.logCursor.movePosition(QtGui.QTextCursor.End)\n self.logBox.setTextCursor(self.logCursor)\n self.logBox.ensureCursorVisible()\n self.logBox.setAlignment(QtCore.Qt.AlignTop)\n self.logBox.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.logBoxScroll = QtWidgets.QScrollArea()\n self.logBoxScroll.setWidget(self.logBox)\n self.logBoxScroll.setWidgetResizable(True)\n self.logBoxScroll.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.statusGrid.addWidget(self.logBoxScroll,0,0)\n\n self.vLayout.addWidget(self.chooseGraph)\n self.vLayout.addWidget(self.plotOptions)\n themeLabel = QtWidgets.QLabel(\"Theme\")\n themeLabel.setStyleSheet(\"QLabel {color:blue;}\")\n self.vLayout.addWidget(themeLabel)\n self.vLayout.addWidget(self.themeList)\n self.vLayout.addWidget(self.colorGroupBox)\n self.mainVLayout.addWidget(self.statusBar)\n\n self.show()\n desktopRect = QtWidgets.QApplication.desktop().availableGeometry(self)\n center = desktopRect.center()\n self.move(center.x()-self.width()*0.5,center.y()-self.height()*0.5)\n\n self.themeList.currentIndexChanged.connect(self.graph.change_theme)\n self.gradientBtoYPB.pressed.connect(self.graph.set_black_to_yellow_gradient)\n self.gradientGtoRPB.pressed.connect(self.graph.set_green_to_red_gradient)\n self.gradientBtoRPB.pressed.connect(self.graph.set_blue_to_red_gradient)\n self.SHOW_2D_CONTOUR_SIGNAL.connect(self.show_2d_contour)\n self.graph.LOG_MESSAGE.connect(self.update_log)\n self.themeList.setCurrentIndex(3)\n\n if not path=='':\n self.graph.fill_two_dimensional_mapping_proxy(path)\n self.graph.enable_two_dimensional_mapping_model(True)\n self.show2DContourButton.setEnabled(True)\n self.gradientBtoYPB.setEnabled(True)\n self.gradientGtoRPB.setEnabled(True)\n self.gradientBtoRPB.setEnabled(True)\n\n def choose_graph(self):\n path = QtWidgets.QFileDialog.getOpenFileName(None,\"choose the graph\",self.graphPath)\n self.graphPath = path[0]\n self.graphPathExtension = os.path.splitext(self.graphPath)[1]\n if not self.graphPathExtension == \".txt\":\n self.raise_error('[Error: wrong file type] Please choose a *.txt file')\n self.update_log('[Error: wrong file type] Please choose a *.txt file')\n else:\n self.chooseSourceLabel.setText(\"The path of the graph is:\\n\"+self.graphPath)\n self.update_log(\"Loading DataArray...\")\n QtCore.QCoreApplication.processEvents()\n self.graph.fill_two_dimensional_mapping_proxy(self.graphPath)\n self.graph.enable_two_dimensional_mapping_model(True)\n self.show2DContourButton.setEnabled(True)\n self.gradientBtoYPB.setEnabled(True)\n self.gradientGtoRPB.setEnabled(True)\n self.gradientBtoRPB.setEnabled(True)\n\n def refresh_level_min(self):\n self.levelMinLabel.setText(\"Level Min ({})\".format(self.levelMinSlider.value()/100))\n if self.levelMinSlider.value() > self.levelMaxSlider.value():\n self.levelMaxSlider.setValue(self.levelMinSlider.value())\n\n def refresh_level_max(self):\n self.levelMaxLabel.setText(\"Level Max ({})\".format(self.levelMaxSlider.value()/100))\n if self.levelMinSlider.value() > self.levelMaxSlider.value():\n self.levelMinSlider.setValue(self.levelMaxSlider.value())\n\n def refresh_radius_min(self):\n self.radiusMinLabel.setText(\"Radius Min ({})\".format(self.radiusMinSlider.value()/100))\n if self.radiusMinSlider.value() > self.radiusMaxSlider.value():\n self.radiusMaxSlider.setValue(self.radiusMinSlider.value())\n\n def refresh_radius_max(self):\n self.radiusMaxLabel.setText(\"Radius Max ({})\".format(self.radiusMaxSlider.value()/100))\n if self.radiusMinSlider.value() > self.radiusMaxSlider.value():\n self.radiusMinSlider.setValue(self.radiusMaxSlider.value())\n\n def refresh_number_of_contour_levels(self):\n self.numberOfContourLevelsLabel.setText(\"Number of Contour Levels ({})\".format(self.numberOfContourLevelsSlider.value()))\n\n def show_2D_contour_button_pressed(self):\n self.update_log(\"Showing contour plot...\")\n QtCore.QCoreApplication.processEvents()\n self.SHOW_2D_CONTOUR_SIGNAL.emit(self.graphPath,True,self.levelMinSlider.value()/100,self.levelMaxSlider.value()/100,\\\n self.radiusMinSlider.value()/100,self.radiusMaxSlider.value()/100,\\\n self.numberOfContourLevelsSlider.value(),self.colormap.currentText())\n\n def show_2d_contour(self,path, insideGraph3D = False, min=0.0, max=1.0, radius_min=0, radius_max=10, number_of_levels=50, colormap='jet'):\n window = QtWidgets.QDialog()\n layout = QtWidgets.QVBoxLayout(window)\n figure = plt.figure()\n canvas = FigureCanvas(figure)\n toolbar = NavigationToolbar(canvas,window)\n figure.clear()\n if not path == None:\n if os.path.splitext(path)[1] == '.txt':\n radius,theta,intensity = self.convert_to_RTI(path)\n levels = np.linspace(min,max,number_of_levels)\n ax = figure.add_subplot(111,polar = True)\n ax.contourf(theta,radius,intensity,levels=levels,cmap=colormap)\n ax.set_ylim(radius_min,radius_max)\n canvas.draw()\n else:\n self.raise_error('[Error: wrong file type] Please choose a *.txt file')\n else:\n self.raise_error('[Error: no file] Please choose a valid file first')\n layout.addWidget(toolbar)\n layout.addWidget(canvas)\n window.setWindowTitle(\"2D Contour\")\n window.show()\n if insideGraph3D:\n window.finished.connect(self.contour_plot_finished)\n\n def contour_plot_finished(self):\n self.update_log('Contour plot ended.')\n\n def update_log(self,msg):\n self.logBox.append(QtCore.QTime.currentTime().toString(\"hh:mm:ss\")+\"\\u00A0\\u00A0\\u00A0\\u00A0\"+msg)\n\n def convert_to_RTI(self,path):\n raw_data = np.loadtxt(path)\n if np.amin(raw_data[:,0])<0:\n data = np.empty(raw_data.shape)\n data[:,0] = np.abs(raw_data[:,0])\n data[:,1] = np.where(raw_data[:,0]>0,0,180)+raw_data[:,1]\n data[:,2] = raw_data[:,2]\n else:\n data = raw_data\n df = pd.DataFrame(data,columns = ['radius','theta','intensity'])\n table = df.pivot_table(values = 'intensity',index='radius',columns='theta')\n return table.index.tolist(),[a/180*math.pi for a in table.columns.tolist()],table\n\n def raise_error(self,message):\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Warning)\n msg.setText(message)\n msg.setWindowTitle(\"Error\")\n msg.setStandardButtons(QtWidgets.QMessageBox.Ok)\n msg.setEscapeButton(QtWidgets.QMessageBox.Close)\n msg.exec()\n\n def raise_attention(self,information):\n info = QtWidgets.QMessageBox()\n info.setIcon(QtWidgets.QMessageBox.Information)\n info.setText(information)\n info.setWindowTitle(\"Information\")\n info.setStandardButtons(QtWidgets.QMessageBox.Ok)\n info.setEscapeButton(QtWidgets.QMessageBox.Close)\n info.exec()\n\n\nclass SurfaceGraph(QtDataVisualization.Q3DSurface):\n\n LOG_MESSAGE = QtCore.pyqtSignal(str)\n\n def __init__(self):\n super(SurfaceGraph,self).__init__()\n self.twoDimensionalMappingProxy = QtDataVisualization.QSurfaceDataProxy()\n self.twoDimensionalMappingSeries = QtDataVisualization.QSurface3DSeries(self.twoDimensionalMappingProxy)\n\n def convert_to_data_array(self,path):\n data = np.loadtxt(path)\n df = pd.DataFrame(data,columns = ['radius','theta','intensity'])\n table = df.pivot_table(values = 'intensity',index='radius',columns='theta')\n radius = table.index.tolist()\n theta = table.columns.tolist()\n self.sampleMin,self.sampleMax = -max(radius),max(radius)\n dataArray = []\n for i in range(table.shape[0]):\n newRow=[]\n for j in range(table.shape[1]):\n item = QtDataVisualization.QSurfaceDataItem()\n X,Y,Z = radius[i]*np.cos(theta[j]/180*math.pi),\\\n radius[i]*np.sin(theta[j]/180*math.pi),\\\n table.loc[radius[i],theta[j]]\n item.setPosition(QtGui.QVector3D(X,Z,Y))\n newRow.append(item)\n dataArray.append(newRow)\n return dataArray\n\n def fill_two_dimensional_mapping_proxy(self,path):\n dataArray = self.convert_to_data_array(path)\n self.twoDimensionalMappingProxy.resetArray(dataArray)\n self.LOG_MESSAGE.emit(\"DataArray Loaded!\")\n\n def enable_two_dimensional_mapping_model(self,enable):\n if enable:\n for series in self.seriesList():\n self.removeSeries(series)\n self.twoDimensionalMappingSeries.setDrawMode(QtDataVisualization.QSurface3DSeries.DrawSurface)\n self.twoDimensionalMappingSeries.setFlatShadingEnabled(True)\n self.axisX().setLabelFormat(\"%.2f\")\n self.axisZ().setLabelFormat(\"%.2f\")\n self.axisX().setRange(self.sampleMin,self.sampleMax)\n self.axisY().setRange(0,1)\n self.axisZ().setRange(self.sampleMin,self.sampleMax)\n self.axisX().setTitle(\"Kx (\\u212B\\u207B\\u00B9)\")\n self.axisX().setTitleVisible(True)\n self.axisZ().setTitle(\"Ky (\\u212B\\u207B\\u00B9)\")\n self.axisZ().setTitleVisible(True)\n self.axisY().setTitle(\"Normalized Intensity (arb. units)\")\n self.axisY().setTitleVisible(True)\n self.axisX().setLabelAutoRotation(30)\n self.axisY().setLabelAutoRotation(90)\n self.axisZ().setLabelAutoRotation(30)\n self.addSeries(self.twoDimensionalMappingSeries)\n\n def change_theme(self, theme):\n self.activeTheme().setType(QtDataVisualization.Q3DTheme.Theme(theme))\n\n def set_black_to_yellow_gradient(self):\n self.gr = QtGui.QLinearGradient()\n self.gr.setColorAt(0.0, QtCore.Qt.black)\n self.gr.setColorAt(0.33, QtCore.Qt.blue)\n self.gr.setColorAt(0.67, QtCore.Qt.red)\n self.gr.setColorAt(1.0, QtCore.Qt.yellow)\n self.seriesList()[0].setBaseGradient(self.gr)\n self.seriesList()[0].setColorStyle(QtDataVisualization.Q3DTheme.ColorStyleRangeGradient)\n\n def set_green_to_red_gradient(self):\n self.gr = QtGui.QLinearGradient()\n self.gr.setColorAt(0.0, QtCore.Qt.darkGreen)\n self.gr.setColorAt(0.5, QtCore.Qt.yellow)\n self.gr.setColorAt(0.8, QtCore.Qt.red)\n self.gr.setColorAt(1.0, QtCore.Qt.darkRed)\n self.seriesList()[0].setBaseGradient(self.gr)\n self.seriesList()[0].setColorStyle(QtDataVisualization.Q3DTheme.ColorStyleRangeGradient)\n\n def set_blue_to_red_gradient(self):\n self.gr = QtGui.QLinearGradient()\n self.gr.setColorAt(0.0, QtCore.Qt.darkBlue)\n self.gr.setColorAt(0.05, QtCore.Qt.blue)\n self.gr.setColorAt(0.1, QtCore.Qt.darkCyan)\n self.gr.setColorAt(0.2, QtCore.Qt.cyan)\n self.gr.setColorAt(0.4, QtCore.Qt.green)\n self.gr.setColorAt(0.8, QtCore.Qt.yellow)\n self.gr.setColorAt(1.0, QtCore.Qt.red)\n self.seriesList()[0].setBaseGradient(self.gr)\n self.seriesList()[0].setColorStyle(QtDataVisualization.Q3DTheme.ColorStyleRangeGradient)\n\n def raise_error(self,message):\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Warning)\n msg.setText(message)\n msg.setWindowTitle(\"Error\")\n msg.setStandardButtons(QtWidgets.QMessageBox.Ok)\n msg.setEscapeButton(QtWidgets.QMessageBox.Close)\n msg.exec()\n\n def raise_attention(self,information):\n info = QtWidgets.QMessageBox()\n info.setIcon(QtWidgets.QMessageBox.Information)\n info.setText(information)\n info.setWindowTitle(\"Information\")\n info.setStandardButtons(QtWidgets.QMessageBox.Ok)\n info.setEscapeButton(QtWidgets.QMessageBox.Close)\n info.exec()\n\n"
] | [
[
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"numpy.empty",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"numpy.abs",
"numpy.cos",
"numpy.where",
"numpy.amin",
"numpy.sin",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.linspace",
"numpy.loadtxt"
]
] |
Turoad/CLRNet | [
"51e082db12973943bddefd76fd0d431fcb3350ff",
"51e082db12973943bddefd76fd0d431fcb3350ff"
] | [
"clrnet/models/necks/fpn.py",
"clrnet/datasets/llamas.py"
] | [
"import warnings\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmcv.cnn import ConvModule\nfrom ..registry import NECKS\n\n\[email protected]_module\nclass FPN(nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n num_outs,\n start_level=0,\n end_level=-1,\n add_extra_convs=False,\n extra_convs_on_inputs=True,\n relu_before_extra_convs=False,\n no_norm_on_lateral=False,\n conv_cfg=None,\n norm_cfg=None,\n attention=False,\n act_cfg=None,\n upsample_cfg=dict(mode='nearest'),\n init_cfg=dict(type='Xavier',\n layer='Conv2d',\n distribution='uniform'),\n cfg=None):\n super(FPN, self).__init__()\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n self.num_outs = num_outs\n self.attention = attention\n self.relu_before_extra_convs = relu_before_extra_convs\n self.no_norm_on_lateral = no_norm_on_lateral\n self.upsample_cfg = upsample_cfg.copy()\n\n if end_level == -1:\n self.backbone_end_level = self.num_ins\n assert num_outs >= self.num_ins - start_level\n else:\n # if end_level < inputs, no extra level is allowed\n self.backbone_end_level = end_level\n assert end_level <= len(in_channels)\n assert num_outs == end_level - start_level\n self.start_level = start_level\n self.end_level = end_level\n self.add_extra_convs = add_extra_convs\n assert isinstance(add_extra_convs, (str, bool))\n if isinstance(add_extra_convs, str):\n # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'\n assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')\n elif add_extra_convs: # True\n if extra_convs_on_inputs:\n # TODO: deprecate `extra_convs_on_inputs`\n warnings.simplefilter('once')\n warnings.warn(\n '\"extra_convs_on_inputs\" will be deprecated in v2.9.0,'\n 'Please use \"add_extra_convs\"', DeprecationWarning)\n self.add_extra_convs = 'on_input'\n else:\n self.add_extra_convs = 'on_output'\n\n self.lateral_convs = nn.ModuleList()\n self.fpn_convs = nn.ModuleList()\n\n for i in range(self.start_level, self.backbone_end_level):\n l_conv = ConvModule(\n in_channels[i],\n out_channels,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n act_cfg=act_cfg,\n inplace=False)\n fpn_conv = ConvModule(out_channels,\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n inplace=False)\n\n self.lateral_convs.append(l_conv)\n self.fpn_convs.append(fpn_conv)\n\n # add extra conv layers (e.g., RetinaNet)\n extra_levels = num_outs - self.backbone_end_level + self.start_level\n if self.add_extra_convs and extra_levels >= 1:\n for i in range(extra_levels):\n if i == 0 and self.add_extra_convs == 'on_input':\n in_channels = self.in_channels[self.backbone_end_level - 1]\n else:\n in_channels = out_channels\n extra_fpn_conv = ConvModule(in_channels,\n out_channels,\n 3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n inplace=False)\n self.fpn_convs.append(extra_fpn_conv)\n\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n assert len(inputs) >= len(self.in_channels)\n\n if len(inputs) > len(self.in_channels):\n for _ in range(len(inputs) - len(self.in_channels)):\n del inputs[0]\n\n # build laterals\n laterals = [\n lateral_conv(inputs[i + self.start_level])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n\n # build top-down path\n used_backbone_levels = len(laterals)\n for i in range(used_backbone_levels - 1, 0, -1):\n # In some cases, fixing `scale factor` (e.g. 2) is preferred, but\n # it cannot co-exist with `size` in `F.interpolate`.\n if 'scale_factor' in self.upsample_cfg:\n laterals[i - 1] += F.interpolate(laterals[i],\n **self.upsample_cfg)\n else:\n prev_shape = laterals[i - 1].shape[2:]\n laterals[i - 1] += F.interpolate(laterals[i],\n size=prev_shape,\n **self.upsample_cfg)\n\n # build outputs\n # part 1: from original levels\n outs = [\n self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n ]\n # part 2: add extra levels\n if self.num_outs > len(outs):\n # use max pool to get more levels on top of outputs\n # (e.g., Faster R-CNN, Mask R-CNN)\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n # add conv layers on top of original feature maps (RetinaNet)\n else:\n if self.add_extra_convs == 'on_input':\n extra_source = inputs[self.backbone_end_level - 1]\n elif self.add_extra_convs == 'on_lateral':\n extra_source = laterals[-1]\n elif self.add_extra_convs == 'on_output':\n extra_source = outs[-1]\n else:\n raise NotImplementedError\n outs.append(self.fpn_convs[used_backbone_levels](extra_source))\n for i in range(used_backbone_levels + 1, self.num_outs):\n if self.relu_before_extra_convs:\n outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n else:\n outs.append(self.fpn_convs[i](outs[-1]))\n return tuple(outs)\n",
"import os\nimport pickle as pkl\nimport cv2\n\nfrom .registry import DATASETS\nimport numpy as np\nfrom tqdm import tqdm\nfrom .base_dataset import BaseDataset\n\nTRAIN_LABELS_DIR = 'labels/train'\nTEST_LABELS_DIR = 'labels/valid'\nTEST_IMGS_DIR = 'color_images/test'\nSPLIT_DIRECTORIES = {'train': 'labels/train', 'val': 'labels/valid'}\nfrom clrnet.utils.llamas_utils import get_horizontal_values_for_four_lanes\nimport clrnet.utils.llamas_metric as llamas_metric\n\n\[email protected]_module\nclass LLAMAS(BaseDataset):\n def __init__(self, data_root, split='train', processes=None, cfg=None):\n self.split = split\n self.data_root = data_root\n super().__init__(data_root, split, processes, cfg)\n if split != 'test' and split not in SPLIT_DIRECTORIES.keys():\n raise Exception('Split `{}` does not exist.'.format(split))\n if split != 'test':\n self.labels_dir = os.path.join(self.data_root,\n SPLIT_DIRECTORIES[split])\n\n self.data_infos = []\n self.load_annotations()\n\n def get_img_heigth(self, _):\n return self.cfg.ori_img_h\n\n def get_img_width(self, _):\n return self.cfg.ori_img_w\n\n def get_metrics(self, lanes, _):\n # Placeholders\n return [0] * len(lanes), [0] * len(lanes), [1] * len(lanes), [\n 1\n ] * len(lanes)\n\n def get_img_path(self, json_path):\n # /foo/bar/test/folder/image_label.ext --> test/folder/image_label.ext\n base_name = '/'.join(json_path.split('/')[-3:])\n image_path = os.path.join(\n 'color_images', base_name.replace('.json', '_color_rect.png'))\n return image_path\n\n def get_img_name(self, json_path):\n base_name = (json_path.split('/')[-1]).replace('.json',\n '_color_rect.png')\n return base_name\n\n def get_json_paths(self):\n json_paths = []\n for root, _, files in os.walk(self.labels_dir):\n for file in files:\n if file.endswith(\".json\"):\n json_paths.append(os.path.join(root, file))\n return json_paths\n\n def load_annotations(self):\n # the labels are not public for the test set yet\n if self.split == 'test':\n imgs_dir = os.path.join(self.data_root, TEST_IMGS_DIR)\n self.data_infos = [{\n 'img_path':\n os.path.join(root, file),\n 'img_name':\n os.path.join(TEST_IMGS_DIR,\n root.split('/')[-1], file),\n 'lanes': [],\n 'relative_path':\n os.path.join(root.split('/')[-1], file)\n } for root, _, files in os.walk(imgs_dir) for file in files\n if file.endswith('.png')]\n self.data_infos = sorted(self.data_infos,\n key=lambda x: x['img_path'])\n return\n\n # Waiting for the dataset to load is tedious, let's cache it\n os.makedirs('cache', exist_ok=True)\n cache_path = 'cache/llamas_{}.pkl'.format(self.split)\n if os.path.exists(cache_path):\n with open(cache_path, 'rb') as cache_file:\n self.data_infos = pkl.load(cache_file)\n self.max_lanes = max(\n len(anno['lanes']) for anno in self.data_infos)\n return\n\n self.max_lanes = 0\n print(\"Searching annotation files...\")\n json_paths = self.get_json_paths()\n print('{} annotations found.'.format(len(json_paths)))\n\n for json_path in tqdm(json_paths):\n lanes = get_horizontal_values_for_four_lanes(json_path)\n lanes = [[(x, y) for x, y in zip(lane, range(self.cfg.ori_img_h))\n if x >= 0] for lane in lanes]\n lanes = [lane for lane in lanes if len(lane) > 0]\n lanes = [list(set(lane))\n for lane in lanes] # remove duplicated points\n lanes = [lane for lane in lanes\n if len(lane) > 2] # remove lanes with less than 2 points\n\n lanes = [sorted(lane, key=lambda x: x[1])\n for lane in lanes] # sort by y\n lanes.sort(key=lambda lane: lane[0][0])\n mask_path = json_path.replace('.json', '.png')\n\n # generate seg labels\n seg = np.zeros((717, 1276, 3))\n for i, lane in enumerate(lanes):\n for j in range(0, len(lane) - 1):\n cv2.line(seg, (round(lane[j][0]), lane[j][1]),\n (round(lane[j + 1][0]), lane[j + 1][1]),\n (i + 1, i + 1, i + 1),\n thickness=15)\n\n cv2.imwrite(mask_path, seg)\n\n relative_path = self.get_img_path(json_path)\n img_path = os.path.join(self.data_root, relative_path)\n self.max_lanes = max(self.max_lanes, len(lanes))\n self.data_infos.append({\n 'img_path': img_path,\n 'img_name': relative_path,\n 'mask_path': mask_path,\n 'lanes': lanes,\n 'relative_path': relative_path\n })\n\n with open(cache_path, 'wb') as cache_file:\n pkl.dump(self.data_infos, cache_file)\n\n def assign_class_to_lanes(self, lanes):\n return {\n label: value\n for label, value in zip(['l0', 'l1', 'r0', 'r1'], lanes)\n }\n\n def get_prediction_string(self, pred):\n ys = np.arange(300, 717, 1) / (self.cfg.ori_img_h - 1)\n out = []\n for lane in pred:\n xs = lane(ys)\n valid_mask = (xs >= 0) & (xs < 1)\n xs = xs * (self.cfg.ori_img_w - 1)\n lane_xs = xs[valid_mask]\n lane_ys = ys[valid_mask] * (self.cfg.ori_img_h - 1)\n lane_xs, lane_ys = lane_xs[::-1], lane_ys[::-1]\n lane_str = ' '.join([\n '{:.5f} {:.5f}'.format(x, y) for x, y in zip(lane_xs, lane_ys)\n ])\n if lane_str != '':\n out.append(lane_str)\n\n return '\\n'.join(out)\n\n def evaluate(self, predictions, output_basedir):\n print('Generating prediction output...')\n for idx, pred in enumerate(predictions):\n relative_path = self.data_infos[idx]['relative_path']\n output_filename = '/'.join(relative_path.split('/')[-2:]).replace(\n '_color_rect.png', '.lines.txt')\n output_filepath = os.path.join(output_basedir, output_filename)\n os.makedirs(os.path.dirname(output_filepath), exist_ok=True)\n output = self.get_prediction_string(pred)\n with open(output_filepath, 'w') as out_file:\n out_file.write(output)\n if self.split == 'test':\n return None\n result = llamas_metric.eval_predictions(output_basedir,\n self.labels_dir,\n iou_thresholds=np.linspace(0.5, 0.95, 10),\n unofficial=False)\n return result[0.5]['F1']\n"
] | [
[
"torch.nn.functional.relu",
"torch.nn.ModuleList",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d"
],
[
"numpy.arange",
"numpy.linspace",
"numpy.zeros"
]
] |
DarioMarzella/pdb2sql | [
"64d5d4f27fb67a0ff41221bcd27667237048bb3f"
] | [
"pdb2sql/StructureSimilarity.py"
] | [
"import warnings\nimport numpy as np\nfrom .pdb2sqlcore import pdb2sql\nfrom .interface import interface\nfrom .superpose import get_trans_vect, get_rotation_matrix, superpose_selection\n\nfrom . import transform\nimport os\nimport pickle\n\n\nclass StructureSimilarity(object):\n\n def __init__(self, decoy, ref, verbose=False, enforce_residue_matching=True):\n \"\"\"Compute structure similarity between two structures.\n\n This class allows to compute the i-RMSD, L-RMSD, Fnat and DockQ\n score of a given conformation.\n This can be a replacement for ProFIT.\n Note that the calculation of the zones are done by the class\n itself and does not require any extra input.\n\n Note:\n 1. The decoy and pdb must have consistent residue numbering.\n 2. The lzone files here are different with those from ProFit.\n lzone: here need only zone residues for fitting, no need\n of residue for rms calculation. RMS residues are\n automatically assumed as the other chain,\n Be careful with ProFit zone files that contain RZONE/RATOMS.\n 3. Missing residues/atoms will be ignored.\n\n Args:\n decoy : pdb file or sql database of the decoy conformation\n ref : pdb file or sql database of the reference conformation\n verbose (bool) : verbosity option\n\n Examples:\n >>> from pdb2sql import StructureSimilarity\n >>> decoy = '1AK4_5w.pdb'\n >>> ref = '1AK4.pdb'\n >>> sim = StructureSimilarity(decoy,ref)\n >>> irmsd_fast = sim.compute_irmsd_fast(method='svd',\n ... izone='1AK4.izone')\n >>> irmsd = sim.compute_irmsd_pdb2sql(method='svd',\n ... izone='1AK4.izone')\n >>> lrmsd_fast = sim.compute_lrmsd_fast(method='svd',\n ... lzone='1AK4.lzone',check=True)\n >>> lrmsd = sim.compute_lrmsd_pdb2sql(exportpath=None,\n ... method='svd')\n >>> Fnat = sim.compute_fnat_pdb2sql()\n >>> Fnat_fast = sim.compute_fnat_fast(\n ... ref_pairs='1AK4.ref_pairs')\n >>> dockQ = sim.compute_DockQScore(Fnat_fast,\n ... lrmsd_fast,irmsd_fast)\n \"\"\"\n\n self.decoy = decoy\n self.ref = ref\n self.verbose = verbose\n self.origin = [0., 0., 0.]\n self.enforce_residue_matching = enforce_residue_matching\n \n def __repr__(self):\n return f'{self.__module__}.{self.__class__.__name__}({self.decoy}, {self.ref}, {self.verbose})'\n\n def check_residues(self):\n \"\"\"Check if the residue numbering matches.\"\"\"\n\n res_ref = pdb2sql(self.ref).get_residues()\n res_dec = pdb2sql(self.decoy).get_residues()\n\n if res_ref != res_dec:\n print('Residues are different in the reference and decoy')\n print('Residues found in %s and not in %s' %\n (self.ref, self.decoy))\n print(set(res_ref).difference(set(res_dec)))\n print('Residues found in %s and not in %s' %\n (self.decoy, self.ref))\n print(set(res_dec).difference(set(res_ref))) \n \n if self.enforce_residue_matching == True:\n raise ValueError(\n 'Residue numbering not identical in ref and decoy\\n Set enforce_residue_matching=False to bypass this error.') \n else:\n warns.Warning('Residue numbering not identical in ref and decoy.')\n\n ##########################################################################\n #\n # FAST ROUTINE TO COMPUTE THE L-RMSD\n # Require the precalculation of the lzone\n # A dedicated routine is implemented to comoute the lzone\n # if lzone is not given in argument the routine will compute them automatically\n #\n ##########################################################################\n\n # compute the L-RMSD\n def compute_lrmsd_fast(self, lzone=None, method='svd', check=True, name=['C', 'CA', 'N', 'O']):\n \"\"\"Fast routine to compute the L-RMSD.\n\n L-RMSD is computed by aligning the longest chain of the decoy to\n the one of the reference and computing the RMSD of the shortest\n chain between decoy and reference. By default, both fitting and\n rms calculation use only backbone atoms. See reference:\n\n DockQ: A Quality Measure for Protein-Protein Docking Models\n https://doi.org/10.1371/journal.pone.0161879\n\n Args:\n lzone (None, optional): name of the file containing the zone\n definition. If None the file will be calculated first.\n method (str, optional): Method to align the fragments,\n 'svd' or 'quaternion'.\n check (bool, optional): Check if the sequences are aligned\n and fix it if not. Defaults to True.\n name (list, optional): atom name to include in the zone.\n Defaults to ['C', 'CA', 'N', 'O']\n\n Returns:\n float: L-RMSD value of the conformation\n\n See also:\n :meth:`compute_lrmsd_pdb2sql`\n \"\"\"\n\n # create/read the lzone file\n if lzone is None:\n resData = self.compute_lzone(save_file=False)\n elif not os.path.isfile(lzone):\n resData = self.compute_lzone(\n save_file=True, filename=lzone)\n else:\n resData = self.read_zone(lzone)\n\n if check or self.enforce_residue_matching:\n\n # Note:\n # 1. get_data_zone_backbone returns in_zone and not_in_zone\n # here the in_zone defines the zone for fitting,\n # and not_in_zone defines the zone for rms calculation.\n\n self.check_residues()\n\n data_decoy_long, data_decoy_short = self.get_data_zone_backbone(\n self.decoy, resData, return_not_in_zone=True, name=name)\n\n data_ref_long, data_ref_short = self.get_data_zone_backbone(\n self.ref, resData, return_not_in_zone=True, name=name)\n\n atom_long = data_ref_long.intersection(data_decoy_long)\n xyz_decoy_long = self._get_xyz(self.decoy, atom_long)\n xyz_ref_long = self._get_xyz(self.ref, atom_long)\n\n atom_short = data_ref_short.intersection(data_decoy_short)\n xyz_decoy_short = self._get_xyz(self.decoy, atom_short)\n xyz_ref_short = self._get_xyz(self.ref, atom_short)\n\n # extract the xyz\n else:\n\n xyz_decoy_long, xyz_decoy_short = self.get_xyz_zone_backbone(\n self.decoy, resData, return_not_in_zone=True, name=name)\n\n xyz_ref_long, xyz_ref_short = self.get_xyz_zone_backbone(\n self.ref, resData, return_not_in_zone=True, name=name)\n\n xyz_decoy_short = superpose_selection(\n xyz_decoy_short, xyz_decoy_long, xyz_ref_long, method)\n\n # compute the RMSD\n return self.get_rmsd(xyz_decoy_short, xyz_ref_short)\n\n # compute the lzone file\n def compute_lzone(self, save_file=True, filename=None):\n \"\"\"Compute the zone for L-RMSD calculation.\n\n Note:\n It only provides the zone of long chain(s) which is used for\n fitting. The zone used for calculating RMSD is defined in\n the function `compute_lrmsd_fast`.\n\n Args:\n save_file (bool, optional): save the zone file\n filename (str, optional): name of the file\n\n Returns:\n dict: definition of the zone.\n \"\"\"\n sql_ref = pdb2sql(self.ref)\n chains = list(sql_ref.get_chains())\n if len(chains) != 2:\n raise ValueError(\n 'exactly two chains are needed for lrmsd calculation but we found %d' % len(chains), chains)\n\n nA = len(sql_ref.get('x,y,z', chainID=chains[0]))\n nB = len(sql_ref.get('x,y,z', chainID=chains[1]))\n\n # detect which chain is the longest\n long_chain = chains[0]\n if nA < nB:\n long_chain = chains[1]\n\n # extract data about the residue\n data_test = [\n tuple(data) for data in sql_ref.get(\n 'chainID,resSeq',\n chainID=long_chain)]\n\n data_test = sorted(set(data_test))\n\n # close the sql\n sql_ref._close()\n\n if save_file:\n if filename is None:\n f = open(self.ref.split('.')[0] + '.lzone', 'w')\n else:\n f = open(filename, 'w')\n for res in data_test:\n chain = res[0]\n num = res[1]\n f.write('zone %s%d-%s%d\\n' % (chain, num, chain, num))\n f.close()\n\n resData = {}\n for res in data_test:\n chain = res[0]\n num = res[1]\n\n if chain not in resData.keys():\n resData[chain] = []\n resData[chain].append(num)\n\n return resData\n\n ##########################################################################\n #\n # FAST ROUTINE TO COMPUTE THE I-RMSD\n # Require the precalculation of the izone\n # A dedicated routine is implemented to comoute the izone\n # if izone is not given in argument the routine will compute them automatcally\n #\n ##########################################################################\n\n def compute_irmsd_fast(self, izone=None, method='svd',\n cutoff=10, check=True):\n \"\"\"Fast method to compute the i-rmsd.\n\n i-RMSD is computed by selecting the backbone atoms of reference\n interface that is defined as any pair of heavy atoms from two\n chains within 10Å of each other.\n Align these backbone atoms as best as possible with their\n coutner part in the decoy and compute the RMSD. See reference:\n\n DockQ: A Quality Measure for Protein-Protein Docking Models\n https://doi.org/10.1371/journal.pone.0161879\n\n Args:\n izone (None, optional): file name of the zone.\n if None the zones will be calculated automatically.\n method (str, optional): Method to align the fragments,\n 'svd' or 'quaternion'.\n cutoff (float, optional): cutoff for the contact atoms\n check (bool, optional): Check if the sequences are aligned\n and fix it if not. Should be True.\n\n Returns:\n float: i-RMSD value of the conformation\n\n See also:\n :meth:`compute_irmsd_pdb2sql`\n \"\"\"\n\n # read the izone file\n if izone is None:\n resData = self.compute_izone(cutoff, save_file=False)\n elif not os.path.isfile(izone):\n resData = self.compute_izone(\n cutoff, save_file=True, filename=izone)\n else:\n resData = self.read_zone(izone)\n\n if check or self.enforce_residue_matching:\n\n self.check_residues()\n\n data_decoy = self.get_data_zone_backbone(\n self.decoy, resData, return_not_in_zone=False)\n data_ref = self.get_data_zone_backbone(\n self.ref, resData, return_not_in_zone=False)\n\n atom_common = data_ref.intersection(data_decoy)\n xyz_contact_decoy = self._get_xyz(self.decoy, atom_common)\n xyz_contact_ref = self._get_xyz(self.ref, atom_common)\n\n # extract the xyz\n else:\n xyz_contact_decoy = self.get_xyz_zone_backbone(\n self.decoy, resData)\n xyz_contact_ref = self.get_xyz_zone_backbone(\n self.ref, resData)\n\n # superpose the fragments\n xyz_contact_decoy = superpose_selection(xyz_contact_decoy,\n xyz_contact_decoy,\n xyz_contact_ref, method)\n\n # return the RMSD\n return self.get_rmsd(xyz_contact_decoy, xyz_contact_ref)\n\n def compute_izone(self, cutoff=10.0, save_file=True, filename=None):\n \"\"\"Compute the zones for i-rmsd calculationss.\n\n Args:\n cutoff (float, optional): cutoff for the contact atoms\n save_file (bool, optional): svae file containing the zone\n filename (str, optional): filename\n\n Returns:\n dict: i-zone definition\n \"\"\"\n\n sql_ref = interface(self.ref)\n chains = list(sql_ref.get_chains())\n if len(chains) != 2:\n raise ValueError(\n 'exactly two chains are needed for irmsd calculation but we found %d' % len(chains), chains)\n\n contact_ref = sql_ref.get_contact_atoms(\n cutoff=cutoff, extend_to_residue=True, chain1=chains[0], chain2=chains[1])\n\n index_contact_ref = []\n for _, v in contact_ref.items():\n index_contact_ref += v\n\n # get the xyz and atom identifier of the decoy contact atoms\n data_test = [tuple(data) for data in sql_ref.get(\n 'chainID,resSeq',\n rowID=index_contact_ref,\n name=sql_ref.backbone_atoms)]\n\n data_test = sorted(set(data_test))\n\n # close the sql\n sql_ref._close()\n\n if save_file:\n\n if filename is None:\n f = open(self.ref.split('.')[0] + '.izone', 'w')\n else:\n f = open(filename, 'w')\n\n for res in data_test:\n chain = res[0]\n num = res[1]\n f.write('zone %s%d-%s%d\\n' % (chain, num, chain, num))\n f.close()\n\n resData = {}\n for res in data_test:\n chain = res[0]\n num = res[1]\n\n if chain not in resData.keys():\n resData[chain] = []\n resData[chain].append(num)\n return resData\n\n ##########################################################################\n #\n # ROUTINE TO COMPUTE THE fnat QUICKLY\n #\n ##########################################################################\n\n def compute_fnat_fast(self, cutoff=5):\n \"\"\"Fast method to cmpute the FNAT of the conformation.\n\n Fnat is the fraction of reference interface contacts preserved\n in the interface of decoy. The interface is defined as any pair\n of heavy atoms from two chains within 5Å of each other.\n\n Args:\n cutoff (int, optional): cutoff for the contact atoms\n\n Returns:\n float: FNAT value\n\n Raises:\n ValueError: if the decoy file is not found\n\n See also:\n :meth:`compute_fnat_pdb2sql`\n \"\"\"\n # compute ref residue pairs\n residue_pairs_ref = self.compute_residue_pairs_ref(\n cutoff, save_file=False)\n\n # create a dict of the decoy data\n data_decoy = pdb2sql.read_pdb(self.decoy)\n\n # read the decoy data\n residue_xyz = {}\n residue_name = {}\n for line in data_decoy:\n\n if line.startswith('ATOM'):\n\n chainID = line[21]\n if chainID == ' ':\n chainID = line[72]\n\n resSeq = int(line[22:26])\n resName = line[17:20].strip()\n name = line[12:16].strip()\n\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n\n key = (chainID, resSeq, resName)\n\n if key not in residue_xyz.keys():\n residue_xyz[key] = []\n residue_name[key] = []\n\n # if name in ['CA','C','N','O']\n # exclude Hydrogen\n if name[0] != 'H':\n residue_xyz[key].append([x, y, z])\n residue_name[key].append(name)\n\n # loop over the residue pairs of the ref\n nCommon, nTotal = 0, 0\n for resA, resB_list in residue_pairs_ref.items():\n if resA in residue_xyz.keys():\n xyzA = residue_xyz[resA]\n for resB in resB_list:\n if resB in residue_xyz.keys():\n xyzB = residue_xyz[resB]\n dist_min = np.min(np.array(\n [np.sqrt(np.sum((np.array(p1) - np.array(p2))**2))\n for p1 in xyzA for p2 in xyzB]))\n if dist_min <= cutoff:\n nCommon += 1\n nTotal += 1\n else:\n msg = f'\\t FNAT: not find residue: {resA}'\n warnings.warn(msg)\n\n # normalize\n return round(nCommon / nTotal, 6)\n\n # compute the residue pair of the reference\n def compute_residue_pairs_ref(\n self,\n cutoff=5.0,\n save_file=True,\n filename=None):\n \"\"\"Compute the residue pair on the reference conformation.\n\n Args:\n cutoff (float, optional): cutoff for the contact atoms\n save_file (bool, optional): save the file containing the\n residue pairs\n filename (None, optional): filename\n\n Returns:\n dict: defintition of the residue pairs\n \"\"\"\n\n sql_ref = interface(self.ref)\n chains = list(sql_ref.get_chains())\n if len(chains) != 2:\n raise ValueError(\n 'exactly two chains are needed for fnat calculation but we found %d' % len(chains), chains)\n residue_pairs_ref = sql_ref.get_contact_residues(\n cutoff=cutoff, return_contact_pairs=True, excludeH=True,\n chain1=chains[0], chain2=chains[1])\n sql_ref._close()\n\n if save_file:\n if filename is None:\n f = open(\n self.ref.split('.')[0] +\n 'residue_contact_pairs.pckl',\n 'wb')\n else:\n f = open(filename, 'wb')\n\n # save as pickle\n pickle.dump(residue_pairs_ref, f)\n f.close()\n\n return residue_pairs_ref\n\n ##########################################################################\n #\n # ROUTINE TO COMPUTE THE L-RMSD USING PDB2SQL\n # DOES NOT REQUIRE THE PRECALCULATION OF ANYTHONG\n # CAN OUTPUT THE SUPERIMPOSED STRUCTURES\n # MUCH SLOWER THAN THE FAST ROUTINES BUT EASIER TO USE\n #\n ##########################################################################\n\n # compute the L-RMSD\n def compute_lrmsd_pdb2sql(self, exportpath=None, method='svd', **kwargs):\n \"\"\"Slow routine to compute the L-RMSD.\n\n L-RMSD is computed by aligning the longest chain of the decoy to\n the one of the reference and computing the RMSD of the shortest\n chain between decoy and reference. Both fitting and rms calculation\n use only backbone atoms. See reference:\n\n DockQ: A Quality Measure for Protein-Protein Docking Models\n https://doi.org/10.1371/journal.pone.0161879\n\n Args:\n exportpath (str, optional): file name where the aligned pdbs\n are exported.\n method (str, optional): Method to align the fragments,\n 'svd' or 'quaternion'.\n\n Kwargs: selection keywords used in the pdb2sql.get() method :\n 'rowID', 'serial', 'name', 'altLoc',\n 'resName', 'resSeq', 'iCode',\n 'x', 'y', 'z', 'occ', 'temp', 'element', 'model'\n\n\n Returns:\n float: L-RMSD value of the conformation\n\n See also:\n :meth:`compute_lrmsd_fast`\n \"\"\"\n backbone = ['CA', 'C', 'N', 'O']\n if 'name' not in kwargs:\n kwargs['name'] = backbone\n\n if 'chainID' in kwargs:\n raise ValueError(\n 'do not specify chainID in compute_lrmsd_pdb2sql')\n\n # create the sql\n sql_decoy = pdb2sql(self.decoy, sqlfile='decoy.db')\n sql_ref = pdb2sql(self.ref, sqlfile='ref.db')\n\n # get the chains\n chains_decoy = sql_decoy.get_chains()\n chains_ref = sql_ref.get_chains()\n\n if chains_decoy != chains_ref:\n raise ValueError(\n 'Chains are different in decoy and reference structure')\n\n chain1 = chains_decoy[0]\n chain2 = chains_decoy[1]\n\n # extract the pos of chains A\n xyz_decoy_A = np.array(\n sql_decoy.get('x,y,z', chainID=chain1, **kwargs))\n xyz_ref_A = np.array(sql_ref.get(\n 'x,y,z', chainID=chain1, **kwargs))\n\n # extract the pos of chains B\n xyz_decoy_B = np.array(\n sql_decoy.get('x,y,z', chainID=chain2, **kwargs))\n xyz_ref_B = np.array(sql_ref.get(\n 'x,y,z', chainID=chain2, **kwargs))\n\n # check the lengthes\n if len(xyz_decoy_A) != len(xyz_ref_A):\n xyz_decoy_A, xyz_ref_A = self.get_identical_atoms(\n sql_decoy, sql_ref, chain1, **kwargs)\n\n if len(xyz_decoy_B) != len(xyz_ref_B):\n xyz_decoy_B, xyz_ref_B = self.get_identical_atoms(\n sql_decoy, sql_ref, **kwargs)\n\n # detect which chain is the longest\n nA, nB = len(xyz_decoy_A), len(xyz_decoy_B)\n if nA > nB:\n xyz_decoy_long = xyz_decoy_A\n xyz_ref_long = xyz_ref_A\n\n xyz_decoy_short = xyz_decoy_B\n xyz_ref_short = xyz_ref_B\n\n else:\n xyz_decoy_long = xyz_decoy_B\n xyz_ref_long = xyz_ref_B\n\n xyz_decoy_short = xyz_decoy_A\n xyz_ref_short = xyz_ref_A\n\n # get the translation so that both A chains are centered\n tr_decoy = get_trans_vect(xyz_decoy_long)\n tr_ref = get_trans_vect(xyz_ref_long)\n\n # translate everything for 1\n xyz_decoy_short += tr_decoy\n xyz_decoy_long += tr_decoy\n\n # translate everuthing for 2\n xyz_ref_short += tr_ref\n xyz_ref_long += tr_ref\n\n # get the ideal rotation matrix\n # to superimpose the A chains\n U = get_rotation_matrix(\n xyz_decoy_long, xyz_ref_long, method=method)\n\n # rotate the entire fragment\n xyz_decoy_short = transform.rotate(\n xyz_decoy_short, U, center=self.origin)\n\n # compute the RMSD\n lrmsd = self.get_rmsd(xyz_decoy_short, xyz_ref_short)\n\n # export the pdb for verifiactions\n if exportpath is not None:\n\n # extract the pos of the dimer\n xyz_decoy = np.array(sql_decoy.get('x,y,z'))\n xyz_ref = np.array(sql_ref.get('x,y,z'))\n\n # translate\n xyz_ref += tr_ref\n xyz_decoy += tr_decoy\n\n # rotate decoy\n xyz_decoy = transform.rotate(\n xyz_decoy, U, center=self.origin)\n\n # update the sql database\n sql_decoy.update_column('x', xyz_decoy[:, 0])\n sql_decoy.update_column('y', xyz_decoy[:, 1])\n sql_decoy.update_column('z', xyz_decoy[:, 2])\n\n sql_ref.update_column('x', xyz_ref[:, 0])\n sql_ref.update_column('y', xyz_ref[:, 1])\n sql_ref.update_column('z', xyz_ref[:, 2])\n\n # export\n sql_decoy.exportpdb(exportpath + '/lrmsd_decoy.pdb')\n sql_ref.exportpdb(exportpath + '/lrmsd_ref.pdb')\n\n # close the db\n sql_decoy._close()\n sql_ref._close()\n\n return lrmsd\n\n # RETURN THE ATOMS THAT ARE SHARED BY THE TWO DB\n # FOR A GIVEN CHAINID\n @staticmethod\n def get_identical_atoms(db1, db2, chain, **kwargs):\n \"\"\"Return that atoms shared by both databse for a specific chain.\n\n Args:\n db1 (TYPE): pdb2sql database of the first conformation\n db2 (TYPE): pdb2sql database of the 2nd conformation\n chain (str): chain name\n\n Kwargs: selection keywords used in the pdb2sql.get() method :\n 'rowID', 'serial', 'name', 'altLoc',\n 'resName', 'chainID', 'resSeq', 'iCode',\n 'x', 'y', 'z', 'occ', 'temp', 'element', 'model'\n\n Returns:\n list, list: list of xyz for both database\n \"\"\"\n\n # get data\n data1 = db1.get('chainID,resSeq,name',\n chainID=chain, **kwargs)\n data2 = db2.get('chainID,resSeq,name',\n chainID=chain, **kwargs)\n\n # tuplify\n data1 = [tuple(d1) for d1 in data1]\n data2 = [tuple(d2) for d2 in data2]\n\n # get the intersection\n shared_data = list(set(data1).intersection(data2))\n\n # get the xyz\n xyz1, xyz2 = [], []\n for data in shared_data:\n query = 'SELECT x,y,z from ATOM WHERE chainID=? AND resSeq=? and name=?'\n xyz1.append(list(list(db1.c.execute(query, data))[0]))\n xyz2.append(list(list(db2.c.execute(query, data))[0]))\n\n return xyz1, xyz2\n\n ##########################################################################\n #\n # ROUTINE TO COMPUTE THE I-RMSD USING PDB2SQL\n # DOES NOT REQUIRE THE PRECALCULATION OF ANYTHiNG\n # BUT CAN READ AN IZONE FILE AS WELL\n # CAN OUTPUT THE SUPERIMPOSED STRUCTURES\n # MUCH SLOWER THAN THE FAST ROUTINES BUT EASIER TO USE\n #\n ##########################################################################\n\n def compute_irmsd_pdb2sql(\n self,\n cutoff=10,\n method='svd',\n izone=None,\n exportpath=None):\n \"\"\"Slow method to compute the i-rmsd.\n\n i-RMSD is computed by selecting the backbone atoms of reference\n interface that is defined as any pair of heavy atoms from two\n chains within 10Å of each other.\n Align these backbone atoms as best as possible with their\n coutner part in the decoy and compute the RMSD. See reference:\n\n DockQ: A Quality Measure for Protein-Protein Docking Models\n https://doi.org/10.1371/journal.pone.0161879\n\n Args:\n izone (None, optional): file name of the zone.\n if None the zones will be calculated first.\n method (str, optional): Method to align the fragments,\n 'svd' or 'quaternion'.\n cutoff (float, optional): cutoff for the contact atoms\n exportpath (str, optional): file name where the aligned pdbs\n are exported.\n\n Returns:\n float: i-RMSD value of the conformation\n\n See also:\n :meth:`compute_irmsd_fast`\n \"\"\"\n\n # create thes sql\n sql_decoy = interface(self.decoy)\n sql_ref = interface(self.ref)\n\n # get the chains\n chains_decoy = sql_decoy.get_chains()\n chains_ref = sql_ref.get_chains()\n\n if chains_decoy != chains_ref:\n raise ValueError(\n 'Chains are different in decoy and reference structure')\n\n # get the contact atoms\n if izone is None:\n\n contact_ref = sql_ref.get_contact_atoms(\n cutoff=cutoff,\n extend_to_residue=True,\n chain1=chains_ref[0],\n chain2=chains_ref[1])\n\n index_contact_ref = []\n for v in contact_ref.values():\n index_contact_ref += v\n index_contact_ref = sql_ref.get(\n 'rowID', rowID=index_contact_ref, name=sql_ref.backbone_atoms)\n else:\n index_contact_ref = self.get_izone_rowID(\n sql_ref, izone, return_only_backbone_atoms=True)\n\n # get the xyz and atom identifier of the decoy contact atoms\n xyz_contact_ref = sql_ref.get(\n 'x,y,z', rowID=index_contact_ref)\n data_contact_ref = sql_ref.get(\n 'chainID,resSeq,resName,name',\n rowID=index_contact_ref)\n\n # get the xyz and atom indeitifier of the reference\n xyz_decoy = sql_decoy.get('x,y,z')\n data_decoy = sql_decoy.get('chainID,resSeq,resName,name')\n\n # loop through the ref label\n # check if the atom is in the decoy\n # if yes -> add xyz to xyz_contact_decoy\n # if no -> remove the corresponding to xyz_contact_ref\n xyz_contact_decoy = []\n index_contact_decoy = []\n clean_ref = False\n for iat, atom in enumerate(data_contact_ref):\n\n try:\n index = data_decoy.index(atom)\n index_contact_decoy.append(index)\n xyz_contact_decoy.append(xyz_decoy[index])\n except Exception:\n xyz_contact_ref[iat] = None\n index_contact_ref[iat] = None\n clean_ref = True\n\n # clean the xyz\n if clean_ref:\n xyz_contact_ref = [\n xyz for xyz in xyz_contact_ref if xyz is not None]\n index_contact_ref = [\n ind for ind in index_contact_ref if ind is not None]\n\n # check that we still have atoms in both chains\n chain_decoy = list(\n set(sql_decoy.get('chainID', rowID=index_contact_decoy)))\n chain_ref = list(\n set(sql_ref.get('chainID', rowID=index_contact_ref)))\n\n if len(chain_decoy) < 1 or len(chain_ref) < 1:\n raise ValueError(\n 'Error in i-rmsd: only one chain represented in one chain')\n\n # get the translation so that both A chains are centered\n tr_decoy = get_trans_vect(xyz_contact_decoy)\n tr_ref = get_trans_vect(xyz_contact_ref)\n\n # translate everything\n xyz_contact_decoy += tr_decoy\n xyz_contact_ref += tr_ref\n\n # get the ideql rotation matrix\n # to superimpose the A chains\n rot_mat = get_rotation_matrix(\n xyz_contact_decoy,\n xyz_contact_ref,\n method=method)\n\n # rotate the entire fragment\n xyz_contact_decoy = transform.rotate(\n xyz_contact_decoy, rot_mat, center=self.origin)\n\n # compute the RMSD\n irmsd = self.get_rmsd(xyz_contact_decoy, xyz_contact_ref)\n\n # export the pdb for verifiactions\n if exportpath is not None:\n\n # update the sql database\n sql_decoy.update_xyz(\n xyz_contact_decoy, rowID=index_contact_decoy)\n sql_ref.update_xyz(\n xyz_contact_ref, rowID=index_contact_ref)\n\n sql_decoy.exportpdb(\n exportpath + '/irmsd_decoy.pdb',\n rowID=index_contact_decoy)\n sql_ref.exportpdb(\n exportpath + '/irmsd_ref.pdb',\n rowID=index_contact_ref)\n\n # close the db\n sql_decoy._close()\n sql_ref._close()\n\n return irmsd\n\n # get the rowID of all the atoms\n def get_izone_rowID(self, sql, izone, return_only_backbone_atoms=True):\n \"\"\"Compute the index of the izone atoms.\n\n Args:\n sql (pdb2sql): database of the conformation\n izone (str): filename to store the zone\n return_only_backbone_atoms (bool, optional): Returns only\n the backbone atoms\n\n Returns:\n lis(int): index of the atoms in the zone\n\n Raises:\n FileNotFoundError: if the izone file is not found\n \"\"\"\n # read the file\n if not os.path.isfile(izone):\n raise FileNotFoundError('i-zone file not found', izone)\n\n with open(izone, 'r') as f:\n data = f.readlines()\n\n # get the data out of it\n resData = {}\n for line in data:\n\n res = line.split()[1].split('-')[0]\n chainID, resSeq = res[0], int(res[1:])\n\n if chainID not in resData.keys():\n resData[chainID] = []\n\n resData[chainID].append(resSeq)\n\n # get the rowID\n index_contact = []\n\n for chainID, resSeq in resData.items():\n if return_only_backbone_atoms:\n index_contact += sql.get('rowID',\n chainID=chainID,\n resSeq=resSeq,\n name=['C',\n 'CA',\n 'N',\n 'O'])\n else:\n index_contact += sql.get('rowID',\n chainID=chainID, resSeq=resSeq)\n\n return index_contact\n\n ##########################################################################\n #\n # ROUTINE TO COMPUTE THE fnat USING PDB2SQL\n #\n ##########################################################################\n\n def compute_fnat_pdb2sql(self, cutoff=5.0):\n \"\"\"Slow method to compute the FNAT of the conformation.\n\n Fnat is the fraction of reference interface contacts preserved\n in the interface of decoy. The interface is defined as any pair\n of heavy atoms from two chains within 5Å of each other.\n\n Args:\n cutoff (int, optional): cutoff for the contact atoms\n\n Returns:\n float: FNAT value\n\n See also:\n :meth:`compute_fnat_fast`\n \"\"\"\n\n # create the sql\n sql_decoy = interface(self.decoy, fix_chainID=True)\n sql_ref = interface(self.ref, fix_chainID=True)\n chains = list(sql_ref.get_chains())\n if len(chains) != 2:\n raise ValueError(\n 'exactly two chains are needed for irmsd calculation but we found %d' % len(chains), chains)\n\n # get the contact atoms\n residue_pairs_decoy = sql_decoy.get_contact_residues(\n cutoff=cutoff, return_contact_pairs=True, excludeH=True,\n chain1=chains[0], chain2=chains[1])\n residue_pairs_ref = sql_ref.get_contact_residues(\n cutoff=cutoff, return_contact_pairs=True, excludeH=True,\n chain1=chains[0], chain2=chains[1])\n\n # form the pair data\n data_pair_decoy = []\n for resA, resB_list in residue_pairs_decoy.items():\n data_pair_decoy += [(resA, resB) for resB in resB_list]\n\n # form the pair data\n data_pair_ref = []\n for resA, resB_list in residue_pairs_ref.items():\n data_pair_ref += [(resA, resB) for resB in resB_list]\n\n # find the umber of residue that ref and decoys hace in common\n nCommon = len(\n set(data_pair_ref).intersection(data_pair_decoy))\n\n # normalize\n fnat = nCommon / len(data_pair_ref)\n\n sql_decoy._close()\n sql_ref._close()\n\n return round(fnat, 6)\n\n ##########################################################################\n #\n # HELPER ROUTINES TO HANDLE THE ZONE FILES\n #\n ##########################################################################\n @staticmethod\n def get_xyz_zone_backbone(pdb_file, resData, return_not_in_zone=False, name=['C', 'CA', 'N', 'O']):\n \"\"\"Get the xyz of zone backbone atoms.\n\n Args:\n pdb_file (str): filename containing the pdb of the molecule\n resData (dict): information about the zone residues\n return_not_in_zone (bool, optional): Do we return the\n backbone atoms not in the zone and the chains used\n in the zone.\n\n Returns:\n list(float): XYZ of of backbone atoms in the zone.\n \"\"\"\n\n # read the ref file\n data = pdb2sql.read_pdb(pdb_file)\n\n # get the xyz of the\n xyz_in_zone = []\n xyz_not_in_zone = []\n\n for line in data:\n if line.startswith('ATOM'):\n chainID = line[21]\n if chainID == ' ':\n chainID = line[72]\n\n resSeq = int(line[22:26])\n atname = line[12:16].strip()\n\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n\n if atname in name:\n if chainID in resData.keys():\n if resSeq in resData[chainID]:\n xyz_in_zone.append([x, y, z])\n else:\n xyz_not_in_zone.append([x, y, z])\n\n if return_not_in_zone:\n return xyz_in_zone, xyz_not_in_zone\n\n else:\n return xyz_in_zone\n\n @staticmethod\n def get_data_zone_backbone(pdb_file, resData, return_not_in_zone=False, name=['C', 'CA', 'N', 'O']):\n \"\"\"Get the data (chainID, resSeq, name) of backbone atoms in the zone.\n\n Args:\n pdb_file (str): filename containing the pdb of the molecule\n resData (dict): information about the zone residues\n return_not_in_zone (bool, optional): Do we return the atoms\n not in the zone and the chains used in the zone\n\n Returns:\n set(float): data of the backbone atoms in the zone\n \"\"\"\n # read the ref file\n data = pdb2sql.read_pdb(pdb_file)\n\n # get the xyz of the\n data_in_zone = []\n data_not_in_zone = []\n\n for line in data:\n\n if line.startswith('ATOM'):\n\n chainID = line[21]\n if chainID == ' ':\n chainID = line[72]\n\n resSeq = int(line[22:26])\n atname = line[12:16].strip()\n\n if atname in name:\n if chainID in resData.keys():\n if resSeq in resData[chainID]:\n data_in_zone.append((chainID, resSeq, atname))\n else:\n data_not_in_zone.append((chainID, resSeq, atname))\n\n if return_not_in_zone:\n return set(data_in_zone), set(data_not_in_zone)\n\n else:\n return set(data_in_zone)\n\n @staticmethod\n def read_zone(zone_file):\n \"\"\"Read the zone file.\n\n Args:\n zone_file (str): name of the file\n\n Returns:\n dict: Info about the residues in the zone\n\n Raises:\n FileNotFoundError: if the zone file is not found\n \"\"\"\n # read the izone file\n if not os.path.isfile(zone_file):\n raise FileNotFoundError('zone file not found', zone_file)\n\n with open(zone_file, 'r') as f:\n data = f.readlines()\n\n # get the data out of it\n resData = {}\n for line in data:\n # line = zone A4-A4 for positive resNum\n # or line = zone A-4-A-4 for negative resNum\n # that happens for example in 2OUL\n\n # split the line\n res = line.split()[1].split('-')\n\n # if the resnum was positive\n # we have e.g res = [A4,A4]\n if len(res) == 2:\n res = res[0]\n chainID, resSeq = res[0], int(res[1:])\n\n # if the resnum was negative was negtive\n # we have e.g res = [A,4,A,4]\n elif len(res) == 4:\n chainID, resSeq = res[0], -int(res[1])\n\n if chainID not in resData.keys():\n resData[chainID] = []\n\n resData[chainID].append(resSeq)\n\n return resData\n\n @staticmethod\n def _get_xyz(pdb_file, index):\n \"\"\"Get xyz using (chainID, resSeq, name) index.\n\n Args:\n pdb_file(file): pdb file or data\n index(set): set of index represeneted with (chainID, resSeq, name)\n\n Returns:\n list: list of xyz\n \"\"\"\n data = pdb2sql.read_pdb(pdb_file)\n xyz = []\n\n for line in data:\n if line.startswith('ATOM'):\n chainID = line[21]\n if chainID == ' ':\n chainID = line[72]\n\n resSeq = int(line[22:26])\n name = line[12:16].strip()\n\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n\n if (chainID, resSeq, name) in index:\n xyz.append([x, y, z])\n\n return xyz\n\n ##########################################################################\n #\n # CAPRI categories and DockQ score\n #\n ##########################################################################\n @staticmethod\n def compute_CapriClass(fnat, lrmsd, irmsd, system='protein-protein'):\n \"\"\"Compute CAPRI ranking classes.\n\n Note:\n Criteria of CAPRI classes:\n https://doi.org/10.1371/journal.pone.0161879\n https://doi.org/10.1002/prot.21804\n The protocol for classifying predicted model into the four CAPRI\n categories should start with those defining incorrect predictions.\n\n Args:\n fnat(float): fnat\n lrmsd(float): ligand rmsd\n irmsd(float ): interface rmsd\n system (str): the type of complex system.\n Defaults to 'protein-protein'.\n\n Returns:\n str: CAPRI rank class, i.e. high, medium, acceptable or incorrect.\n \"\"\"\n\n if system == 'protein-protein':\n if fnat < 0.1 or (lrmsd > 10.0 and irmsd > 4.0):\n label = 'incorrect'\n elif 0.1 <= fnat < 0.3 and (lrmsd <= 10.0 or irmsd <= 4.0) or \\\n (fnat >= 0.3 and lrmsd > 5.0 and irmsd > 2.0):\n label = 'acceptable'\n elif 0.3 <= fnat < 0.5 and (lrmsd <= 5.0 or irmsd <= 2.0) or \\\n (fnat >= 0.5 and lrmsd > 1.0 and irmsd > 1.0):\n label = 'medium'\n elif fnat >= 0.5 and (lrmsd <= 1.0 or irmsd <= 1.0):\n label = 'high'\n else:\n warnings.warn(\n f'Invalid complex type {system} for CAPRI class calculation')\n\n return label\n\n # compute the DockQ score from the different elements\n @staticmethod\n def compute_DockQScore(fnat, lrmsd, irmsd, d1=8.5, d2=1.5):\n \"\"\"Compute the DockQ Score.\n\n Args:\n Fnat (float): Fnat value\n lrmsd (float): lrmsd value\n irmsd (float): irmsd value\n d1 (float, optional): first coefficient for the DockQ\n calculations\n d2 (float, optional): second coefficient for the DockQ\n calculations\n\n Returns:\n float: dockQ value\n \"\"\"\n\n def scale_rms(rms, d):\n return(1. / (1 + (rms / d)**2))\n\n dockq = 1. / 3 * \\\n (fnat + scale_rms(lrmsd, d1) + scale_rms(irmsd, d2))\n return round(dockq, 6)\n\n ##########################################################################\n #\n # clahses\n #\n ##########################################################################\n\n @staticmethod\n def compute_clashes(pdb, chain1='A', chain2='B'):\n \"\"\"Compute number of atomic clashes.\n\n Note:\n Clashes were defined as contacts between nonhydrogen atoms\n separated by <3.0Å. Structural models where number of clashes\n was 2 SD away from the average are excluded for assessment in\n CAPRI. see ref: https://doi.org/10.1002/prot.10393\n\n Args:\n pdb(file): pdb file or data\n chain1 (str): first chain ID. Defaults to 'A'.\n chain2 (str): second chain ID. Defaults to 'B'.\n\n Returns:\n int: number of atomic clashes.\n \"\"\"\n db = interface(pdb)\n atom_contact_pairs = db.get_contact_atoms(\n cutoff=3.0, excludeH=True,\n return_contact_pairs = True,\n chain1=chain1, chain2=chain2)\n db._close()\n nclash = 0\n for v in atom_contact_pairs.values():\n nclash += len(v)\n return nclash\n\n ##########################################################################\n #\n # ROUTINES TO ACTUALY SUPERPOSE THE MOLECULES\n #\n ##########################################################################\n\n # compute the RMSD of two sets of points\n @staticmethod\n def get_rmsd(P, Q):\n \"\"\"compute the RMSD.\n\n Args:\n P (np.array(nx3)): position of the points in the first\n molecule\n Q (np.array(nx3)): position of the points in the second\n molecule\n\n Returns:\n float: RMSD value\n \"\"\"\n n = len(P)\n return round(np.sqrt(1. / n * np.sum((P - Q)**2)), 3)\n"
] | [
[
"numpy.array",
"numpy.sum"
]
] |
romiosarkar6991/tfx-romio | [
"0703c1dd037c676e1d438c2e5ce831decfc9eed9",
"0703c1dd037c676e1d438c2e5ce831decfc9eed9"
] | [
"tfx/components/schema_gen/executor_test.py",
"tfx/examples/iris/iris_pipeline_beam.py"
] | [
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.schema_gen.executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nfrom tfx.components.schema_gen import executor\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import io_utils\n\n\nclass ExecutorTest(tf.test.TestCase):\n\n def setUp(self):\n super(ExecutorTest, self).setUp()\n\n self.source_data_dir = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), 'testdata')\n\n self.train_stats_artifact = standard_artifacts.ExampleStatistics(\n split='train')\n self.train_stats_artifact.uri = os.path.join(self.source_data_dir,\n 'statistics_gen/train/')\n\n self.output_data_dir = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self._testMethodName)\n\n self.schema_output = standard_artifacts.Schema()\n self.schema_output.uri = os.path.join(self.output_data_dir, 'schema_output')\n\n self.schema = standard_artifacts.Schema()\n self.schema.uri = os.path.join(self.source_data_dir, 'fixed_schema/')\n\n self.expected_schema = standard_artifacts.Schema()\n self.expected_schema.uri = os.path.join(self.source_data_dir, 'schema_gen/')\n\n self.input_dict = {\n 'stats': [self.train_stats_artifact],\n 'schema': None\n }\n self.output_dict = {\n 'output': [self.schema_output],\n }\n self.exec_properties = {'infer_feature_shape': False}\n\n def _assertSchemaEqual(self, expected_schema, actual_schema):\n schema_reader = io_utils.SchemaReader()\n expected_schema_proto = schema_reader.read(\n os.path.join(expected_schema.uri, executor._DEFAULT_FILE_NAME))\n actual_schema_proto = schema_reader.read(\n os.path.join(actual_schema.uri, executor._DEFAULT_FILE_NAME))\n self.assertProtoEquals(expected_schema_proto, actual_schema_proto)\n\n def testDoWithStatistics(self):\n schema_gen_executor = executor.Executor()\n schema_gen_executor.Do(self.input_dict, self.output_dict,\n self.exec_properties)\n self.assertNotEqual(0, len(tf.gfile.ListDirectory(self.schema_output.uri)))\n self._assertSchemaEqual(self.expected_schema, self.schema_output)\n\n def testDoWithSchema(self):\n self.input_dict['schema'] = [self.schema]\n self.input_dict.pop('stats')\n schema_gen_executor = executor.Executor()\n schema_gen_executor.Do(self.input_dict, self.output_dict,\n self.exec_properties)\n self.assertNotEqual(0, len(tf.gfile.ListDirectory(self.schema_output.uri)))\n self._assertSchemaEqual(self.schema, self.schema_output)\n\n def testDoWithNonExistentSchema(self):\n non_existent_schema = standard_artifacts.Schema()\n non_existent_schema.uri = '/path/to/non_existent/schema'\n\n self.input_dict['schema'] = [non_existent_schema]\n self.input_dict.pop('stats')\n\n with self.assertRaises(ValueError):\n schema_gen_executor = executor.Executor()\n schema_gen_executor.Do(self.input_dict, self.output_dict,\n self.exec_properties)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Iris flowers example using TFX.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nfrom typing import Text\nfrom tfx.components.evaluator.component import Evaluator\nfrom tfx.components.example_gen.csv_example_gen.component import CsvExampleGen\nfrom tfx.components.example_validator.component import ExampleValidator\nfrom tfx.components.model_validator.component import ModelValidator\nfrom tfx.components.pusher.component import Pusher\nfrom tfx.components.schema_gen.component import SchemaGen\nfrom tfx.components.statistics_gen.component import StatisticsGen\nfrom tfx.components.trainer.component import Trainer\nfrom tfx.orchestration import metadata\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.beam.beam_dag_runner import BeamDagRunner\nfrom tfx.proto import pusher_pb2\nfrom tfx.proto import trainer_pb2\nfrom tfx.utils.dsl_utils import external_input\n\n_pipeline_name = 'iris'\n\n# This example assumes that Iris flowers data is stored in ~/iris/data and the\n# utility function is in ~/iris. Feel free to customize as needed.\n_iris_root = os.path.join(os.environ['HOME'], 'iris')\n_data_root = os.path.join(_iris_root, 'data')\n# Python module file to inject customized logic into the TFX components. The\n# Transform and Trainer both require user-defined functions to run successfully.\n_module_file = os.path.join(_iris_root, 'iris_utils.py')\n# Path which can be listened to by the model server. Pusher will output the\n# trained model here.\n_serving_model_dir = os.path.join(_iris_root, 'serving_model', _pipeline_name)\n\n# Directory and data locations. This example assumes all of the flowers\n# example code and metadata library is relative to $HOME, but you can store\n# these files anywhere on your local filesystem.\n_tfx_root = os.path.join(os.environ['HOME'], 'tfx')\n_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)\n# Sqlite ML-metadata db path.\n_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,\n 'metadata.db')\n\n\ndef _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,\n module_file: Text, serving_model_dir: Text,\n metadata_path: Text) -> pipeline.Pipeline:\n \"\"\"Implements the Iris flowers pipeline with TFX.\"\"\"\n examples = external_input(data_root)\n\n # Brings data into the pipeline or otherwise joins/converts training data.\n example_gen = CsvExampleGen(input=examples)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n infer_schema = SchemaGen(\n statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=infer_schema.outputs['schema'])\n\n # Uses user-provided Python function that implements a model using TF-Learn.\n trainer = Trainer(\n module_file=module_file,\n examples=example_gen.outputs['examples'],\n schema=infer_schema.outputs['schema'],\n train_args=trainer_pb2.TrainArgs(num_steps=10000),\n eval_args=trainer_pb2.EvalArgs(num_steps=5000))\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_analyzer = Evaluator(\n examples=example_gen.outputs['examples'],\n model_exports=trainer.outputs['model'])\n\n # Performs quality validation of a candidate model (compared to a baseline).\n model_validator = ModelValidator(\n examples=example_gen.outputs['examples'], model=trainer.outputs['model'])\n\n # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=model_validator.outputs['blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=serving_model_dir)))\n\n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, trainer,\n model_analyzer, model_validator, pusher\n ],\n enable_cache=True,\n metadata_connection_config=metadata.sqlite_metadata_connection_config(\n metadata_path),\n additional_pipeline_args={},\n )\n\n\n# To run this pipeline from the python CLI:\n# $python iris_pipeline.py\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n BeamDagRunner().run(\n _create_pipeline(\n pipeline_name=_pipeline_name,\n pipeline_root=_pipeline_root,\n data_root=_data_root,\n module_file=_module_file,\n serving_model_dir=_serving_model_dir,\n metadata_path=_metadata_path))\n"
] | [
[
"tensorflow.gfile.ListDirectory",
"tensorflow.test.main"
],
[
"tensorflow.logging.set_verbosity"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.